text stringlengths 0 1.05M | meta dict |
|---|---|
"""All models for the extension."""
from django.conf import settings
from django.dispatch import receiver
from django.db import models
from model_utils.models import StatusModel, TimeStampedModel
from geokey.projects.models import Project
from .base import STATUS, FORMAT
from .managers import WebResourceManager
class WebResource(StatusModel, TimeStampedModel):
"""Store a single web resource."""
STATUS = STATUS
FORMAT = FORMAT
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
dataformat = models.CharField(max_length=10, null=False, choices=FORMAT)
url = models.URLField(max_length=250)
order = models.IntegerField(default=0)
colour = models.TextField(default='#0033ff')
symbol = models.ImageField(
upload_to='webresources/symbols',
max_length=500,
null=True,
blank=True
)
project = models.ForeignKey(
'projects.Project',
related_name='webresources'
)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
objects = WebResourceManager()
class Meta:
"""Model meta."""
ordering = ['order']
def delete(self):
"""Delete the web resource by setting its status to `deleted`."""
self.status = self.STATUS.deleted
self.save()
@receiver(models.signals.post_save, sender=Project)
def post_save_project(sender, instance, **kwargs):
"""Remove associated web resources when the project gets deleted."""
if instance.status == 'deleted':
WebResource.objects.filter(project=instance).delete()
| {
"repo_name": "ExCiteS/geokey-webresources",
"path": "geokey_webresources/models.py",
"copies": "1",
"size": "1612",
"license": "mit",
"hash": 1488738725783795200,
"line_mean": 27.2807017544,
"line_max": 76,
"alpha_frac": 0.6892059553,
"autogenerated": false,
"ratio": 4.133333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5322539288633333,
"avg_score": null,
"num_lines": null
} |
"""All models for the extension."""
import sys
import json
import csv
from osgeo import ogr
from django.conf import settings
from django.dispatch import receiver
from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.postgres.fields import ArrayField
from django.contrib.gis.db import models as gis
try:
from django.contrib.postgres.fields import JSONField
except ImportError:
from django_pgjson.fields import JsonBField as JSONField
from model_utils.models import StatusModel, TimeStampedModel
from geokey.projects.models import Project
from geokey.categories.models import Category, Field
from geokey_dataimports.helpers.model_helpers import import_from_csv
from .helpers import type_helpers
from .base import STATUS, FORMAT
from .exceptions import FileParseError
from .managers import DataImportManager
class DataImport(StatusModel, TimeStampedModel):
"""Store a single data import."""
STATUS = STATUS
FORMAT = FORMAT
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
dataformat = models.CharField(max_length=10, null=False, choices=FORMAT)
file = models.FileField(
upload_to='dataimports/files',
max_length=500
)
keys = ArrayField(models.CharField(max_length=100), null=True, blank=True)
project = models.ForeignKey(
'projects.Project',
related_name='dataimports'
)
category = models.ForeignKey(
'categories.Category',
null=True,
blank=True
)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
objects = DataImportManager()
def delete(self, *args, **kwargs):
"""Delete the data import by setting its status to `deleted`."""
self.status = self.STATUS.deleted
self.save()
def get_lookup_fields(self):
"""Get all lookup fields of a category."""
lookupfields = {}
for field in self.category.fields.all():
if field.fieldtype == 'LookupField':
lookupfields[field.key] = field
return lookupfields
@receiver(models.signals.post_save, sender=DataImport)
def post_save_dataimport(sender, instance, created, **kwargs):
"""Map data fields and data features when the data import gets created."""
if created:
datafields = []
datafeatures = []
fields = []
features = []
errors = []
if instance.dataformat == FORMAT.KML:
driver = ogr.GetDriverByName('KML')
reader = driver.Open(instance.file.path)
for layer in reader:
for feature in layer:
features.append(feature.ExportToJson())
else:
csv.field_size_limit(sys.maxsize)
file_obj = open(instance.file.path, 'rU')
if instance.dataformat == FORMAT.GeoJSON:
reader = json.load(file_obj)
features = reader['features']
if instance.dataformat == FORMAT.CSV:
import_from_csv(features=features, fields=fields, file_obj=file_obj)
for feature in features:
geometries = {}
for key, value in feature['properties'].items():
field = None
for existing_field in fields:
if existing_field['name'] == key:
field = existing_field
break
if field is None:
fields.append({
'name': key,
'good_types': set(['TextField', 'LookupField']),
'bad_types': set([])
})
field = fields[-1]
fieldtype = None
if 'geometry' not in feature:
try:
geometry = ogr.CreateGeometryFromWkt(str(value))
geometry = geometry.ExportToJson()
except:
geometry = None
fieldtype = 'GeometryField'
if geometry is not None:
if fieldtype not in field['bad_types']:
field['good_types'].add(fieldtype)
geometries[field['name']] = json.loads(geometry)
else:
field['good_types'].discard(fieldtype)
field['bad_types'].add(fieldtype)
fieldtype = None
if fieldtype is None:
fieldtype = 'NumericField'
if type_helpers.is_numeric(value):
if fieldtype not in field['bad_types']:
field['good_types'].add(fieldtype)
else:
field['good_types'].discard(fieldtype)
field['bad_types'].add(fieldtype)
fieldtypes = ['DateField', 'DateTimeField']
if type_helpers.is_date(value):
for fieldtype in fieldtypes:
if fieldtype not in field['bad_types']:
field['good_types'].add(fieldtype)
else:
for fieldtype in fieldtypes:
field['good_types'].discard(fieldtype)
field['bad_types'].add(fieldtype)
fieldtype = 'TimeField'
if type_helpers.is_time(value):
if fieldtype not in field['bad_types']:
field['good_types'].add(fieldtype)
else:
field['good_types'].discard(fieldtype)
field['bad_types'].add(fieldtype)
if 'geometry' not in feature and len(geometries) == 0:
errors.append({
'line': feature['line'],
'messages': ['The entry has no geometry set.']
})
else:
feature['geometries'] = geometries
geometryfield = None
for field in fields:
if 'GeometryField' not in field['good_types']:
datafields.append({
'name': field['name'],
'types': list(field['good_types'])
})
elif geometryfield is None:
geometryfield = field['name']
for feature in features:
geometry = None
if 'geometry' in feature:
geometry = feature['geometry']
elif 'geometries' in feature:
if not geometryfield:
errors.append({
'line': feature['line'],
'messages': ['The file has no valid geometry field.']
})
else:
geometries = feature['geometries']
if geometryfield in geometries:
geometry = geometries[geometryfield]
if geometry:
datafeatures.append({
'geometry': geometry,
'properties': feature['properties']
})
if errors:
instance.delete()
raise FileParseError('Failed to read file.', errors)
else:
for datafield in datafields:
if datafield['name']:
DataField.objects.create(
name=datafield['name'],
types=list(datafield['types']),
dataimport=instance
)
for datafeature in datafeatures:
DataFeature.objects.create(
geometry=json.dumps(datafeature['geometry']),
properties=datafeature['properties'],
dataimport=instance
)
class DataField(TimeStampedModel):
"""Store a single data field."""
name = models.CharField(max_length=100)
key = models.CharField(max_length=100, null=True, blank=True)
types = ArrayField(models.CharField(max_length=100), null=True, blank=True)
dataimport = models.ForeignKey(
'DataImport',
related_name='datafields'
)
def convert_to_field(self, name, fieldtype):
"""
Convert data field to regular GeoKey field.
Parameters
----------
user : geokey.users.models.User
The request user.
name : str
The name of the field.
fieldtype : str
The field type.
Returns
-------
geokey.categories.models.Field
The field created.
"""
category = self.dataimport.category
field = None
if self.key:
try:
field = category.fields.get(key=self.key)
except Category.DoesNotExist:
pass
proposed_key = slugify(self.name)
suggested_key = proposed_key
if field:
suggested_key = field.key
else:
count = 1
while category.fields.filter(key=suggested_key).exists():
suggested_key = '%s-%s' % (proposed_key, count)
count += 1
self.key = suggested_key
self.save()
field = Field.create(
name,
self.key,
'', False,
category,
fieldtype
)
for datafeature in self.dataimport.datafeatures.all():
properties = datafeature.properties
if self.name in properties:
# Edge case: if field type is set as text but original value
# is a number - import will fail, because a method
# create_search_index within geokey/contributions/models.py
# will try to use number within regular expression. So the
# fix is to make sure value of such field type is always
# stringified.
if field.fieldtype == 'TextField':
properties[self.name] = str(properties[self.name])
# If field key has changed - it needs to be reflected on feature
# properties too.
if self.key != self.name:
properties[self.key] = properties.pop(self.name)
datafeature.properties = properties
datafeature.save()
return field
class DataFeature(TimeStampedModel):
"""Store a single data feature."""
imported = models.BooleanField(default=False)
geometry = gis.GeometryField(geography=True)
properties = JSONField(default={})
dataimport = models.ForeignKey(
'DataImport',
related_name='datafeatures'
)
@receiver(models.signals.post_save, sender=Project)
def post_save_project(sender, instance, **kwargs):
"""Remove associated data imports when the project gets deleted."""
if instance.status == 'deleted':
DataImport.objects.filter(project=instance).delete()
@receiver(models.signals.post_save, sender=Category)
def post_save_category(sender, instance, **kwargs):
"""Remove associated data imports when the category gets deleted."""
if instance.status == 'deleted':
DataImport.objects.filter(category=instance).delete()
| {
"repo_name": "ExCiteS/geokey-dataimports",
"path": "geokey_dataimports/models.py",
"copies": "1",
"size": "11423",
"license": "mit",
"hash": -5647674630128379000,
"line_mean": 33.406626506,
"line_max": 80,
"alpha_frac": 0.5383874639,
"autogenerated": false,
"ratio": 4.919465977605513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5957853441505513,
"avg_score": null,
"num_lines": null
} |
"""All models for the queue.
Important: Changes here need to be followed by `make refresh`.
"""
from sqlalchemy import asc
from sqlalchemy import desc
from flask import current_app
from werkzeug.local import LocalProxy
from flask_debugtoolbar import DebugToolbarExtension
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy_utils import PasswordType
from sqlalchemy_utils import ArrowType
from sqlalchemy_utils.types.choice import ChoiceType
from passlib.context import CryptContext
from quupod.defaults import default_queue_settings
import flask_login
import arrow
from flask_script import Manager
from flask_migrate import Migrate
from flask import g, request
from quupod.views import url_for
from quupod.utils import strfdelta
from quupod.utils import Nil
from quupod.utils import str2lst
from wtforms import Form
#################
# PARENT MODELS #
#################
db = SQLAlchemy()
migrate = Migrate()
migration_manager = Manager()
toolbar = DebugToolbarExtension()
class Base(db.Model):
"""Base Model for all other models."""
__abstract__ = True
__access_token = None
__context = CryptContext(schemes=['pbkdf2_sha512'])
id = db.Column(db.Integer, primary_key=True)
updated_at = db.Column(ArrowType)
updated_by = db.Column(db.Integer)
created_at = db.Column(ArrowType, default=arrow.now('US/Pacific'))
created_by = db.Column(db.Integer)
is_active = db.Column(db.Boolean, default=True)
@property
def entity(self) -> str:
"""Return entity name."""
return self.__class__.__name__.lower()
@staticmethod
def random_hash() -> str:
"""Generate random hash."""
return Base.hash(str(arrow.utcnow()))
@staticmethod
def hash(value: str) -> str:
"""Hash value.
:param value: The value to hash.
"""
return Base.__context.encrypt(value)
@classmethod
def from_request(cls):
"""Create object from request."""
return cls(**dict(request.form.items())).save()
def modify_time(self, *fields, act=lambda t: t) -> db.Model:
"""Modify times.
:param *fields: The fields to change to local times.
:param act: A function to modify all time values.
"""
for field in fields:
setattr(self, field, act(getattr(self, field)))
return self
def to_local(self, *fields) -> db.Model:
"""Convert all to local times.
:param *fields: The fields to change to local times.
"""
return self.modify_time(
*fields,
act=lambda t: t.to(current_app.config['TZ'] or 'local'))
def to_utc(self, *fields) -> db.Model:
"""Convert all to UTC times.
:param *fields: The fields to change to UTC time.
"""
return self.modify_time(*fields, act=lambda t: t.to('utc'))
def set_tz(self, *fields, tz: str) -> db.Model:
"""Set timezones of current times to be a specific tz.
:param *fields: The fields to change to the specified timezone.
:param tz: The timezone.
"""
return self.modify_time(
*fields,
act=lambda t: t.replace(tzinfo=tz))
def set_local(self, *fields) -> db.Model:
"""Set timezones of current times to be local time.
:param *fields: The fields to change to local time.
"""
from dateutil import tz as t
return self.set_tz(
*fields,
tz=t.gettz(current_app.config['TZ']) or t.tzlocal())
def update(self, **kwargs) -> db.Model:
"""Update object with kwargs."""
for k, v in kwargs.items():
setattr(self, k, v)
return self
def save(self) -> db.Model:
"""Save object."""
try:
db.session.add(self)
db.session.commit()
return self
except:
db.session.rollback()
return self.save()
def setting(
self,
name: str,
dynamic: bool=False,
default=Nil) -> db.Model:
"""Get Setting by name.
:param name: The name of the setting to fetch.
:param dynamic: Set to true if the setting is not expected to exist in
default
"""
assert name in self.__defaultsettings__ or dynamic, \
'Not a valid setting'
key = {'%s_id' % self.entity: self.id}
setting = self.__settingclass__.query.filter_by(
name=name,
**key).one_or_none()
if not setting:
setting = self.load_setting(name, default)
return setting
def load_setting(self, name: str, default=Nil) -> db.Model:
"""Load a setting."""
try:
key = {'%s_id' % self.entity: self.id}
key.update(self.__defaultsettings__[name])
key.setdefault('name', name)
return self.__settingclass__(
**key).save()
except KeyError:
if default == Nil:
raise UserWarning('No such setting "%s"' % name)
return default
def load_settings(self, *names) -> [db.Model]:
"""Load a series of settings."""
return [self.load_setting(n) for n in names]
def deactivate(self) -> db.Model:
"""Deactivate the object."""
self.is_active = False
return self.save()
def activate(self) -> db.Model:
"""Activate the object."""
self.is_active = True
return self.save()
def load_roles(self, roles: [str]) -> db.Model:
"""Load role settings."""
RoleClass = {
'queue': QueueRole
}[self.entity]
for role in roles:
filt = {
'name': role['name'],
'%s_id' % self.entity: self.id
}
if not RoleClass.query.filter_by(**filt).one_or_none():
role = role.copy()
role.setdefault('%s_id' % self.entity, self.id)
RoleClass(**role).save()
return self
class Setting(Base):
"""Base setting model."""
__abstract__ = True
name = db.Column(db.String(100))
label = db.Column(db.String(100))
description = db.Column(db.Text)
value = db.Column(db.Text)
toggable = db.Column(db.Boolean, default=False)
enabled = db.Column(db.Boolean, default=True)
enable_description = db.Column(db.Text)
input_type = db.Column(db.String(50), default='text')
class Role(Base):
"""Base role model (<- hahaha. punny)."""
__abstract__ = True
name = db.Column(db.String(100))
permissions = db.Column(db.Text)
############
# ENTITIES #
############
class QueueRole(Role):
"""Roles for a queue."""
__tablename__ = 'queue_role'
queue_id = db.Column(db.Integer, db.ForeignKey('queue.id'))
@staticmethod
def get_by_name(role_name: str) -> db.Model:
"""Get a role by name for the global current queue."""
return QueueRole.query.filter_by(
name=role_name,
queue_id=g.queue.id).one()
class QueueSetting(Setting):
"""Settings for the queue application."""
__tablename__ = 'queue_setting'
queue_id = db.Column(db.Integer, db.ForeignKey('queue.id'))
class Queue(Base):
"""Model for all queues."""
__tablename__ = 'queue'
__settingclass__ = QueueSetting
__defaultsettings__ = default_queue_settings
name = db.Column(db.String(50))
description = db.Column(db.Text)
url = db.Column(db.String(50), unique=True)
category = db.Column(db.String(50))
settings = db.relationship("QueueSetting", backref="queue")
@property
def cleaned_settings(self) -> [Setting]:
"""Retrieve list of all settings.
This will check that each setting is valid and then assign each setting
attributes from the default settings file.
"""
for setting in g.queue.settings:
if setting.name in default_queue_settings:
setting.description = \
default_queue_settings[setting.name]['description']
if g.participant.role.name.lower() != 'owner':
return [
s for s in g.queue._sorted_settings if s.name != 'whitelist']
return g.queue._sorted_settings
@property
def _sorted_settings(self) -> [Setting]:
"""Return settings sorted by name."""
return sorted(g.queue.settings, key=lambda s: s.name)
def allowed_assignment(self, request: LocalProxy, form: Form) -> bool:
"""Return if assignment is allowed, per settings.
:param request: The request context object.
:param form: form to check
"""
lst = self.setting('assignments').value
assignment = request.form['assignment']
category = request.form.get('category', None)
if ':' in lst:
datum = dict(l.split(':') for l in lst.splitlines())
lst = datum.get(category, '*')
if lst == '*':
return True
if assignment not in str2lst(lst):
prefix = 'Assignment'
if category:
prefix = 'For "%s" inquiries, assignment' % category
form.errors.setdefault('assignment', []) \
.append(
'%s "%s" is not allowed. Only the following '
'assignments are: %s' % (prefix, assignment, lst))
return False
return True
def get_num_owners(self) -> int:
"""Get number of owners for the global, current queue."""
return Participant.query.join(QueueRole).filter(
QueueRole.name == 'Owner',
Participant.queue_id == g.queue.id).count()
def get_code_for_role(self, role_name: str) -> str:
"""Get promotion code for the provided role."""
return self.get_roles_to_codes()[role_name]
def get_roles_for_promotion(self) -> [str]:
"""Return a list of all role names available for promotion."""
return list(self.get_roles_to_codes().keys())
def get_roles_to_codes(self) -> dict:
"""Return a dictionary mapping roles to promotion codes."""
promotion_setting = g.queue.setting(
name='self_promotion',
default=None)
if not promotion_setting:
return {}
mapping = {}
valid_role_names = set(role.name.lower() for role in g.queue.roles)
for line in promotion_setting.value.splitlines():
role_name, code = map(line.split(':'), lambda s: s.strip())
if role_name.lower() in valid_role_names:
mapping[role_name] = code
return mapping
def is_promotion_valid(self, role_name: str, code: str) -> bool:
"""Check if the provided promotion credentials are valid."""
correct_code = self.get_roles_to_codes()[role_name]
return correct_code == '*' or correct_code == code
def is_valid_assignment(self, request: LocalProxy, form: Form) -> bool:
"""Check if the assignment is valid, based on settings.
:param request: The request context object.
:param form: form to check
"""
return not self.setting('assignments').enabled or \
self.allowed_assignment(request, form)
def present_staff(self) -> {db.Model}:
"""Fetch all present staff members."""
resolutions = Resolution.query.join(Inquiry).filter(
Resolution.resolved_at >= arrow.utcnow().replace(hours=-3),
Inquiry.queue_id == self.id).all()
staff = set()
for resolution in resolutions:
user = User.query.get(resolution.user_id)
user.resolution = resolution
ns = [
res.resolved_at - res.created_at
for res in Resolution.query.filter(
Resolution.resolved_at >= arrow.utcnow().replace(hours=-6),
Resolution.user_id == user.id)]
if ns:
total = ns[0]
for n in ns[1:]:
total = n + total
user.average = total/len(ns)
else:
user.average = 'n/a'
current = Resolution.query.filter_by(
user_id=user.id,
resolved_at=None).first()
user.status = 'free' if not current else 'busy'
staff.add(user)
return staff
def show_inquiry_types(self) -> bool:
"""Whether or not to show inquiry types."""
return self.setting('inquiry_types').enabled and \
self.setting('inquiry_type_selection').enabled
def ttr(self) -> str:
"""Compute average time until resolution."""
resolutions = Resolution.query.join(Inquiry).filter(
Resolution.created_at >= arrow.utcnow().replace(hours=-3),
Inquiry.queue_id == self.id).all()
ns = [res.created_at - res.inquiry.created_at for res in resolutions]
if ns:
total = ns[0]
for n in ns[1:]:
total = n + total
return strfdelta(total/len(ns))
return '00:00:00'
@property
def roles(self) -> [Role]:
"""Return all available roles for this queue."""
if not getattr(self, '__role', None):
self.__role = QueueRole.query.filter_by(queue_id=self.id).all()
return self.__role
class Resolution(Base):
"""Model for a resolution object.
A resolution object is created any time an inquiry it closed; it is NOT
created if the inquiry is re-queued however.
"""
__tablename__ = 'resolution'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
inquiry_id = db.Column(
db.Integer,
db.ForeignKey('inquiry.id'),
index=True)
resolved_at = db.Column(ArrowType, index=True)
comment = db.Column(db.Text)
@property
def inquiry(self) -> db.Model:
"""Fetch the related inquiry for this resolution."""
return Inquiry.query.get(self.inquiry_id)
@property
def staff(self) -> db.Model:
"""Fetch the related staff member for this resolution."""
return User.query.get(self.user_id)
def close(self) -> db.Model:
"""Close resolution."""
self.resolved_at = arrow.utcnow()
return self.save()
class User(Base, flask_login.UserMixin):
"""Queue system user."""
__tablename__ = 'user'
inquiries = db.relationship('Inquiry', backref='owner', lazy='dynamic')
name = db.Column(db.String(100))
email = db.Column(db.String(100), unique=True)
username = db.Column(db.String(50), unique=True)
password = db.Column(PasswordType(schemes=['pbkdf2_sha512']))
image_url = db.Column(db.Text)
google_id = db.Column(db.String(30), unique=True)
@property
def role(self) -> Role:
"""Get user role for given queue."""
return QueueRole.query.join(Participant).filter_by(
queue_id=g.queue.id,
user_id=self.id,
is_active=True).one_or_none()
@staticmethod
def get_home(user: db.Model, **kwargs) -> str:
"""Return home URL for given user."""
if getattr(g, 'queue', None):
if user and user.can('admin'):
return url_for('admin.home', **kwargs)
return url_for('queue.home', **kwargs)
return url_for('dashboard.home')
@staticmethod
def get_num_current_requests(name: str):
"""Get user's number of queued requests for the current queue."""
if flask_login.current_user.is_authenticated:
filter_id = User.email == flask_login.current_user.email
else:
filter_id = User.name == name
return Inquiry.query.join(User).filter(
filter_id,
Inquiry.status == 'unresolved',
Inquiry.queue_id == g.queue.id).count()
def set_role(self, role: str) -> db.Model:
"""set role for user."""
part = Participant.query.filter_by(
queue_id=g.queue.id,
user_id=self.id,
is_active=True).one_or_none()
role_id = QueueRole.query.filter_by(
queue_id=g.queue.id,
name=role).one().id
if part:
part.role_id = role_id
return part.save()
return Participant(
queue_id=g.queue.id,
user_id=self.id,
role_id=role_id).save()
def join(
self,
queue: Queue,
role: str=None,
role_id: str=None) -> db.Model:
"""Join a queue."""
assert queue.id, 'Save queue object first'
assert isinstance(queue, Queue), 'Can only join group.'
role_id = role_id or QueueRole.query.filter_by(
name=role,
queue_id=queue.id).one().id
return Participant(
user_id=self.id,
queue_id=queue.id,
role_id=role_id).save()
def queues(self) -> [Queue]:
"""Return all queues for this user."""
return (
Queue
.query
.join(Participant)
.filter_by(user_id=self.id)
.all())
def can(self, *permission) -> bool:
"""Check permissions for this user."""
if not g.queue:
return False
role = self.role
if role and \
(role.permissions == '*' or
any(p in role.permissions.split(',') for p in permission)):
return True
return False
class Inquiry(Base):
"""Inquiry placed in queue."""
__tablename__ = 'inquiry'
STATUSES = (
('unresolved', 'has not yet been addressed'),
('resolving', 'being addressed by admin'),
('resolved', 'addressed and closed'),
('closed', 'closed without resolution - end of session, MIA etc.'))
status = db.Column(
ChoiceType(STATUSES),
default='unresolved',
index=True)
name = db.Column(db.String(50))
comments = db.Column(db.Text)
assignment = db.Column(db.String(25))
problem = db.Column(db.String(25))
location = db.Column(db.String(25))
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
category = db.Column(db.String(25), default='question')
queue_id = db.Column(db.Integer, db.ForeignKey('queue.id'), index=True)
@property
def owner(self) -> User:
"""The user that filed the inquiry."""
return User.query.get(self.owner_id)
@property
def queue(self) -> Queue:
"""Return associated queue."""
return Queue.query.get(self.queue_id)
@property
def resolution(self) -> Resolution:
"""Return the resolution associated with this inquiry."""
if self.status != 'resolved':
return Resolution.query.filter_by(
resolved_at=None,
inquiry_id=self.id).first()
else:
return Resolution.query.filter_by(inquiry_id=self.id).first()
@staticmethod
def clear_all_inquiries():
"""Clear all inquiries for the current queue."""
Inquiry.query.filter_by(
status='unresolved',
queue_id=g.queue.id).update({'status': 'closed'})
Inquiry.query.filter_by(
status='resolving',
queue_id=g.queue.id).update({'status': 'closed'})
db.session.commit()
@staticmethod
def get_current_or_latest(**kwargs):
"""Return the current inquiry if one exists. Otherwise, return latest.
This method is used when fetching the latest inquiry on the help
screen. In this case, we would like to have the staff member resume
unresolved inquiries before resolving new ones.
"""
current_inquiry = Inquiry.get_current()
if current_inquiry:
return current_inquiry
return Inquiry.get_latest(**kwargs)
@staticmethod
def get_current() -> db.Model:
"""Return current resolution for the logged-in, staff user.
This is an inquiry that the logged-in staff user is currently working
on. This is not for the inquirer.
"""
resolution = Resolution.query.join(Inquiry).filter(
Inquiry.queue_id == g.queue.id,
Inquiry.status == 'resolving',
Resolution.user_id == flask_login.current_user.id,
Resolution.resolved_at == None).first()
if resolution:
return resolution.inquiry
@staticmethod
def get_current_asking() -> db.Model:
"""Return current resolution for the logged-in, non-staff user."""
return Inquiry.query.filter_by(
owner_id=flask_login.current_user.id,
status='unresolved',
queue_id=g.queue.id).first()
@staticmethod
def get_current_user_inquiries(limit: int=10) -> [db.Model]:
"""Return list of all inquiries associated with the current user."""
user = flask_login.current_user
if user.is_authenticated:
return Inquiry.query.filter_by(id=user.id).limit(limit).all()
return Inquiry.query.filter_by(name=user.name).limit(limit).all()
@staticmethod
def get_earliest(**kwargs) -> db.Model:
"""Return earliest unresolved inquiry for the current queue."""
kwargs = {k: v for k, v in kwargs.items() if v}
return Inquiry.query.filter_by(
status='unresolved',
queue_id=g.queue.id,
**kwargs).order_by(desc(Inquiry.created_at)).first()
@staticmethod
def get_inquiries(status: str, limit: int) -> [db.Model]:
"""Get all inquiries, along with resolutions, for the current queue."""
return (
Inquiry
.query
.join(Resolution)
.filter(
Inquiry.status == status,
Inquiry.queue_id == g.queue.id)
.order_by(desc(Resolution.created_at))
.limit(limit)
.all())
@staticmethod
def get_latest(**kwargs) -> db.Model:
"""Return latest unresolved inquiry for the current queue.
This will filter the keyword arguments provided. Specifically, it will:
- Remove all filters will falsey values.
- This will remove the 'category' field if the category is 'all'.
"""
kwargs = {k: v for k, v in kwargs.items() if v}
if kwargs.get('category', None) == 'all':
kwargs.pop('category')
return Inquiry.query.filter_by(
status='unresolved',
queue_id=g.queue.id,
**kwargs).order_by(asc(Inquiry.created_at)).first()
@staticmethod
def get_categories_unresolved(**kwargs) -> int:
"""Return categories that have unresolved inquiries."""
return Inquiry.get_unresolved(
str2lst(g.queue.setting('inquiry_types').value),
**kwargs)
@staticmethod
def get_unresolved(
categories: [str],
key: str='category',
**filters) -> [(str, int)]:
"""Return list of (category, number unresolved).
This only returns categories that have a non-zero number of unresolved
inquiries.
"""
lst = []
for category in categories:
filters[key] = category
num = Inquiry.get_num_unresolved(**filters)
if num > 0:
lst.append((category, num))
return lst
@staticmethod
def get_num_unresolved(**kwargs) -> int:
"""Return number of unresolved inquiries for a specific queue."""
filters = {'status': 'unresolved', 'queue_id': g.queue.id}
filters.update(kwargs)
return Inquiry.query.filter_by(**filters).count()
@staticmethod
def maybe_unlock_delayed() -> None:
"""Unlock delayed inquiry if a delayed inquiry is found.
Note that an inquiry is delayed by passing the id of the inquiry in the
query parameters of a URL.
"""
delayed_id = request.args.get('delayed_id', None)
if delayed_id:
Inquiry.query.get(delayed_id).unlock()
def current_position(self) -> int:
"""Fetch current position of this inquiry in the current queue."""
return Inquiry.query.filter(
Inquiry.status == 'unresolved',
Inquiry.queue_id == g.queue.id,
Inquiry.created_at <= self.created_at).count()
def close(self) -> db.Model:
"""Close an inquiry, marking as unresolved."""
return self.update(status='close').save()
def get_similar_inquiries(self):
"""Fetch all similar inquiries.
For now, "similar" inquiries are those that share identical assignment
names and problem numbers.
"""
return Inquiry.query.filter(
Inquiry.status == 'unresolved',
Inquiry.queue_id == g.queue.id,
Inquiry.assignment == self.assignment,
Inquiry.problem == self.problem,
Inquiry.owner_id != self.owner_id).all()
def get_wait_time(self, fmt: str='%h:%m:%s') -> str:
"""Return the wait time delta object as a string."""
return strfdelta(self.resolution.created_at-self.created_at, fmt)
def is_owned_by_current_user(self) -> bool:
"""Check if current inquiry is owned by the current user."""
user = flask_login.current_user
if user.is_authenticated:
return not self.owner_id
return self.owner_id == user.id
def lock(self) -> db.Model:
"""Lock an inquiry.
This is so no other staff members can attempt to resolve.
"""
self.status = 'resolving'
return self.save()
def maybe_lock(self) -> db.Model:
"""Lock an inquiry if the inquiry has not already been locked."""
if not self.resolution:
self.lock().link(flask_login.current_user)
return self
def link(self, user: User) -> Resolution:
"""Link inquiry to a user."""
filters = {
'user_id': user.id,
'inquiry_id': self.id,
'resolved_at': None
}
if not Resolution.query.filter_by(**filters).one_or_none():
return Resolution(**filters).save()
def resolved(self) -> db.Model:
"""Close an inquiry, marking as resolved."""
return self.update(status='resolved').save()
def unlock(self) -> db.Model:
"""Unlock Inquiry and re-enqueue request."""
self.status = 'unresolved'
if self.resolution:
self.resolution.close()
return self.save()
##############################
# MANY-TO-MANY RELATIONSHIPS #
##############################
class Participant(Base):
"""A participant represents a user for a queue.
This may be a staff member, a member requesting help, or even the
owner.
"""
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
queue_id = db.Column(db.Integer, db.ForeignKey('queue.id'))
role_id = db.Column(db.Integer, db.ForeignKey('queue_role.id'))
@property
def role(self) -> Role:
"""Return the role associated with this participant."""
return QueueRole.query.get(self.role_id)
@staticmethod
def update_or_create(user: User, role_name: str) -> db.Model:
"""Update an existing participant or create a new participant."""
role = QueueRole.get_by_name(role_name)
part = Participant.get_from_user(user)
if part:
return part.update(role_id=role.id).save()
else:
return Participant.create_from_user_and_role(user, role)
@staticmethod
def get_from_user(user: User):
"""Fetch participant using provided user."""
return Participant.query.filter_by(
user_id=user.id,
queue_id=g.queue.id).one_or_none()
@staticmethod
def create_from_user_and_role(user: User, role: Role) -> db.Model:
"""Create a new participant from the provided user and role."""
return Participant(
user_id=user.id,
queue_id=g.queue.id,
role_id=role.id
).save()
| {
"repo_name": "alvinwan/quupod",
"path": "quupod/models.py",
"copies": "2",
"size": "28166",
"license": "apache-2.0",
"hash": 2107427806458085400,
"line_mean": 32.4910820452,
"line_max": 79,
"alpha_frac": 0.578392388,
"autogenerated": false,
"ratio": 3.927216954824317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5505609342824317,
"avg_score": null,
"num_lines": null
} |
"""All models for the WeGovNow extension."""
import os
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from geokey.core.models import get_class_name, cross_check_fields
from geokey_wegovnow.base import LOG_MODELS, WATCHED_FIELDS
from geokey_wegovnow.logger import make_event, send_events
@receiver(pre_save)
def log_on_pre_save(sender, instance, **kwargs):
"""Init events, make event when instance is updated/deleted."""
if 'TRAVIS' not in os.environ:
events = []
instance._class_name = get_class_name(sender)
instance._logged = False
if sender.__name__ in LOG_MODELS:
if instance.status == 'deleted':
# If instance status changes to "deleted" - make event "deleted"
events.append(make_event(
instance._class_name,
instance,
'removed'))
instance._logged = True
else:
try:
# Old instance is needed for checking changed fields
old_instance = sender.objects.get(pk=instance.pk)
checked_fields = cross_check_fields(instance, old_instance)
changed_fields = [
x for x in checked_fields
if x.get('field') in WATCHED_FIELDS
]
if any(changed_fields):
# If watched fields changed - make event "updated"
events.append(make_event(
instance._class_name,
instance,
'updated'))
instance._logged = True
except sender.DoesNotExist:
pass
# Do not send events just yet - save in instance for now
instance._events = events
@receiver(post_save)
def log_on_post_save(sender, instance, created, **kwargs):
"""Finalise events, make event when instance is created."""
if 'TRAVIS' not in os.environ:
if created and not instance._logged and sender.__name__ in LOG_MODELS:
# If nothing logged and instance is created - make event "created"
instance._events.append(make_event(
instance._class_name,
instance,
'created'))
# Now send events
send_events(instance._events)
| {
"repo_name": "ExCiteS/geokey-wegovnow",
"path": "geokey_wegovnow/models.py",
"copies": "1",
"size": "2468",
"license": "mit",
"hash": -4216709030191537000,
"line_mean": 35.2941176471,
"line_max": 80,
"alpha_frac": 0.5490275527,
"autogenerated": false,
"ratio": 4.647834274952919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5696861827652919,
"avg_score": null,
"num_lines": null
} |
""" all models shared in tests
class names should be unique across whole project
"""
from mongoengine import (
Document, DynamicDocument, DynamicEmbeddedDocument, EmbeddedDocument,
fields
)
class DumbDocument(Document):
name = fields.StringField()
foo = fields.IntField()
class IntIdDocument(Document):
id = fields.IntField(primary_key=True)
name = fields.StringField()
foo = fields.IntField()
class DumbEmbedded(EmbeddedDocument):
name = fields.StringField()
foo = fields.IntField()
def __str__(self):
return "<DumbEmbedded %s %s>" % (self.name, self.foo)
class OtherEmbedded(EmbeddedDocument):
name = fields.StringField(required=True)
bar = fields.IntField(required=True)
class DumbDynamicEmbedded(DynamicEmbeddedDocument):
name = fields.StringField()
foo = fields.IntField()
def __str__(self):
return "<DumbDynamicEmbedded %s %s>" % (self.name, self.foo)
class DumbDynamic(DynamicDocument):
name = fields.StringField()
foo = fields.IntField()
class EmbeddingDynamic(DynamicDocument):
name = fields.StringField()
foo = fields.IntField()
embedded = fields.EmbeddedDocumentField(DumbEmbedded)
class DocumentEmbeddingDynamic(Document):
name = fields.StringField()
foo = fields.IntField()
embedded = fields.EmbeddedDocumentField(DumbDynamicEmbedded)
| {
"repo_name": "umutbozkurt/django-rest-framework-mongoengine",
"path": "tests/models.py",
"copies": "2",
"size": "1374",
"license": "mit",
"hash": -7786634229698272000,
"line_mean": 23.5357142857,
"line_max": 73,
"alpha_frac": 0.711790393,
"autogenerated": false,
"ratio": 3.9710982658959537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5682888658895954,
"avg_score": null,
"num_lines": null
} |
__all__ = ('ModManager')
import os
import json
from pocketthrone.managers.filemanager import FileManager
from pocketthrone.managers.unitmanager import UnitManager
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.entities.event import *
from pocketthrone.managers.pipe import L
from pocketthrone.managers.filemanager import FileManager
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.entities.event import *
from pocketthrone.entities.mod import Mod
class ModManager:
_tag = "[ModManager] "
# primary Mod collection holder
mods = []
mods_by_name = {}
_last_mod_id = -1
# manager flags
selected_mod = None
is_initialized = False
def __init__(self, mod_name="base"):
self.load_mods()
self.set_selected_mod(mod_name)
self.is_initialized = True
def next_mod_id(self):
'''returns new unique mod id'''
self._last_mod_id += 1
return self._last_mod_id
def load_mods(self):
'''loads mod entities from mods folder'''
# make function-wide collections
subfolder_names = []
mod_list = []
mod_list_by_name = {}
# iterate through folder names in $APPROOT/mods/ for overview
for foldername in os.listdir(FileManager.mod_root()):
subfolder_names.append(foldername)
print(self._tag + "mod folder name " + foldername)
# iterate through folder names in $APPROOT/mods/ for Mod loading
for basename in subfolder_names:
# make Mod entity & set id
mod = self.get_mod(basename)
mod._id = self.next_mod_id()
# append Mod to holder collections
self._add_mod(mod)
# save Mod collections
self.mods = mod_list
self.mods_by_name = mod_list_by_name
def get_mod(self, basename):
'''load a single mod from disk by mod basename'''
# load json from basename.json file
json_file_path = FileManager.mod_root() + basename + "/" + basename + ".json"
mod_json_content = FileManager.read_file(json_file_path)
mod = Mod(basename, disabled=True)
# make empty properties
mod_name = None
mod_author = None
mod_desc = None
mod_disabled = False
# mod is a dummy
if not mod_json_content:
mod.is_empty = True
return mod;
# load Mod properties from json
mod_json = json.loads(mod_json_content)
mod_name = mod_json.get("name", "<unnamed>")
mod_author = mod_json.get("author", "<unauthorized>")
mod_desc = mod_json.get("desc", "<missing description>")
mod_disabled = mod_json.get("disabled", False)
# make new Mod entity
mod = Mod(basename, name=mod_name, desc=mod_desc, author=mod_author, disabled=mod_disabled)
# return created Mod instance
return mod
def _add_mod(self, mod):
'''adds a mod entity to ModManager; system method'''
self.mods.append(mod)
self.mods_by_name[mod.basename] = mod
print(self._tag + "mod ADDED " + repr(mod))
def add_mod(self, basename, name, desc="", desc_de=None):
'''adds a mod entity to ModManager'''
mod = Mod(basename, name=name, desc=desc, desc_de=desc_de)
self._add_mod(mod)
def get_mods(self):
'''returns mod entity list when initialized'''
if self.is_initialized:
return self.mods
else:
return None
def get_selected_mod(self):
'''returns selected mod'''
return self.selected_mod
def set_selected_mod(self, basename):
'''sets selected mod by basename'''
self.selected_mod = self.get_mod(basename)
print(self._tag + "selected mod is now " + repr(self.get_selected_mod()))
| {
"repo_name": "herrschr/pocket-throne",
"path": "pocketthrone/managers/modmanager.py",
"copies": "2",
"size": "3388",
"license": "bsd-2-clause",
"hash": -7578796371876426000,
"line_mean": 29.25,
"line_max": 93,
"alpha_frac": 0.7042502952,
"autogenerated": false,
"ratio": 3.1168353265869366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.968073204767129,
"avg_score": 0.0280707148231292,
"num_lines": 112
} |
__all__ = "Mod"
from pocketthrone.managers.filemanager import FileManager
class Mod:
# engine identifiers
_id = -1
_tag = "[Mod] "
basename = None
# entity strings
name = None
desc = ""
desc_de = ""
author = ""
# entity flags
is_initialized = False
is_disabled = False
is_empty = False
def __init__(self, basename, name="<nameless>", desc="", desc_de=None, disabled=False, author="unknown"):
# set optional properties OR defaults
self.basename = basename
self.author = author
self.is_disabled = disabled
self.name = name
# descriptions
self.desc = desc
self.desc_de = desc_de
# file paths
self.json_path = None
self.folder_path = None
# initialize
self.initialize()
def initialize(self):
'''initializes mod entity'''
# make absolute mod .json file path
folder_path = FileManager.mod_root() + self.basename
json_path = folder_path + "/" + self.basename + ".json"
# update entity-wide
self.folder_path = folder_path
self.json_path = json_path
# warn when no German translation is found
if not self.desc_de:
self.desc_de = "<DE>" + self.desc
# flag mod as initialized
self.is_initialized = True
def allowed(self):
'''returns whether this mod is initialized'''
if self.is_initialized and self.is_enabled():
return True
return False
def set_enabled(self):
'''flags this mod as enabled'''
self.is_disabled = False
def set_disabled(self):
'''flags this mod as disabled'''
self.is_disabled = True
def is_enabled(self):
'''returns whether this mod is enabled'''
return not self.is_disabled
def is_disabled(self):
'''returns whether this mod is disabled'''
return self.is_disabled
def get_name(self):
'''returns english mod name'''
if not self.allowed():
return "<uninitialized>"
return self.name
def get_author(self):
'''returns mod author name'''
return self.author
def validate(self):
'''flags mod as validated'''
self.is_valid = True
def __repr__(self):
'''returns xml like mod representation'''
return "<Mod basename=" + self.basename + " name=" + self.get_name() + " allowed=" + str(self.allowed()) + ">"
| {
"repo_name": "herrschr/prey-game",
"path": "pocketthrone/entities/mod.py",
"copies": "2",
"size": "2138",
"license": "bsd-2-clause",
"hash": 1407042309772149500,
"line_mean": 23.0224719101,
"line_max": 112,
"alpha_frac": 0.6697848457,
"autogenerated": false,
"ratio": 3.2344931921331317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9726437902208152,
"avg_score": 0.035568027124996106,
"num_lines": 89
} |
__all__ = ["Mofa"]
import numpy as np
import matplotlib.pyplot as pl
from scipy.cluster.vq import kmeans
from scipy.linalg import inv
from matplotlib.patches import Ellipse
from . import _algorithms
class Mofa(object):
"""
Mixture of Factor Analyzers
calling arguments:
[ROSS DOCUMENT HERE]
internal variables:
`K`: Number of components
`M`: Latent dimensionality
`D`: Data dimensionality
`N`: Number of data points
`data`: (N,D) array of observations
`latents`: (K,M,N) array of latent variables
`latent_covs`: (K,M,M,N) array of latent covariances
`lambdas`: (K,M,D) array of loadings
`psis`: (K,D) array of diagonal variance values
`rs`: (K,N) array of responsibilities
`amps`: (K) array of component amplitudes
"""
def __init__(self,data,K,M,
PPCA=False,lock_psis=False,
rs_clip = 0.0,
max_condition_number=1.e6,
init=True,init_ppca=True):
# required
self.K = K
self.M = M
self.data = np.atleast_2d(data)
self.dataT = self.data.T # INSANE DATA DUPLICATION
self.N = self.data.shape[0]
self.D = self.data.shape[1]
# options
self.PPCA = PPCA
self.lock_psis = lock_psis
self.rs_clip = rs_clip
self.max_condition_number = float(max_condition_number)
assert rs_clip >= 0.0
# empty arrays to be filled
self.betas = np.zeros((self.K,self.M,self.D))
self.latents = np.zeros((self.K,self.M,self.N))
self.latent_covs = np.zeros((self.K,self.M,self.M,self.N))
self.kmeans_rs = np.zeros(self.N, dtype=int)
self.rs = np.zeros((self.K,self.N))
# initialize
if init:
self._initialize(init_ppca)
def _initialize(self,init_ppca,maxiter=200, tol=1e-4):
# Run K-means
# This is crazy, but DFM's kmeans returns nans/infs
# for some initializations
self.means = kmeans(self.data,self.K)[0]
self.run_kmeans()
# Randomly assign factor loadings
self.lambdas = np.random.randn(self.K,self.D,self.M) / \
np.sqrt(self.max_condition_number)
# Set (high rank) variance to variance of all data, along a dimension
self.psis = np.tile(np.var(self.data,axis=0)[None,:],(self.K,1))
# Set initial covs
self.covs = np.zeros((self.K,self.D,self.D))
self.inv_covs = 0. * self.covs
self._update_covs()
# Randomly assign the amplitudes.
self.amps = np.random.rand(self.K)
self.amps /= np.sum(self.amps)
if init_ppca:
# for each cluster, run a PPCA
for k in range(self.K):
ind = self.kmeans_rs==k
self.rs[k,ind] = 1
sumrs = np.sum(self.rs[k])
# run em
L = None
for i in xrange(maxiter):
self._one_component_E_step(k)
newL = self._log_sum(
self._log_multi_gauss(k,self.data[ind]))
newL = np.sum(newL)
self._one_component_M_step(k,sumrs,True)
self._update_covs()
if L!=None:
dL = np.abs((newL - L) / L)
if i > 5 and dL < tol:
break
L = newL
def run_kmeans(self, maxiter=200, tol=1e-4, verbose=True):
"""
Run the K-means algorithm using the C extension.
:param maxiter:
The maximum number of iterations to try.
:param tol:
The tolerance on the relative change in the loss function that
controls convergence.
:param verbose:
Print all the messages?
"""
iterations = _algorithms.kmeans(self.data, self.means,
self.kmeans_rs, tol, maxiter)
if verbose:
if iterations < maxiter:
print("K-means converged after {0} iterations."
.format(iterations))
else:
print("K-means *didn't* converge after {0} iterations."
.format(iterations))
def run_em(self, maxiter=400, tol=1e-4, verbose=True):
"""
Run the EM algorithm.
:param maxiter:
The maximum number of iterations to try.
:param tol:
The tolerance on the relative change in the loss function that
controls convergence.
:param verbose:
Print all the messages?
"""
L = None
for i in xrange(maxiter):
self._E_step()
newL = self.logLs.sum()
if i == 0 and verbose:
print("Initial NLL =", -newL)
self._M_step()
if L!=None:
dL = np.abs((newL - L) / L)
if i > 5 and dL < tol:
break
L = newL
if i < maxiter - 1:
if verbose:
print("EM converged after {0} iterations".format(i))
print("Final NLL = {0}".format(-newL))
else:
print("Warning: EM didn't converge after {0} iterations"
.format(i))
def take_EM_step(self):
"""
Do one E step and then do one M step. Duh!
"""
self._E_step()
self._M_step()
def _E_step(self):
"""
Expectation step. See docs for details.
"""
# resposibilities and likelihoods
self.logLs, self.rs = self._calc_probs()
for k in range(self.K):
self._one_component_E_step(k)
def _M_step(self):
"""
Maximization step. See docs for details.
This assumes that `_E_step()` has been run.
"""
sumrs = np.sum(self.rs,axis=1)
# maximize for each component
for k in range(self.K):
self._one_component_M_step(k,sumrs[k],self.PPCA)
self.amps[k] = sumrs[k] / self.N
if self.lock_psis:
psi = np.dot(sumrs, self.psis) / np.sum(sumrs)
for k in range(self.K):
self.psis[k] = psi
self._update_covs()
def _one_component_E_step(self,k):
"""
Calculate the E step for one component.
"""
# beta
self.betas[k] = np.dot(self.lambdas[k].T,self.inv_covs[k])
# latent values
zeroed = self.dataT - self.means[k, :, None]
self.latents[k] = np.dot(self.betas[k], zeroed)
# latent empirical covariance
step1 = self.latents[k, :, None, :] * self.latents[k, None, :, :]
step2 = np.dot(self.betas[k], self.lambdas[k])
self.latent_covs[k] = np.eye(self.M)[:,:,None] - step2[:,:,None] + step1
def _one_component_M_step(self,k,sumrs,PPCA):
"""
Calculate the M step for one component.
"""
# means
lambdalatents = np.dot(self.lambdas[k], self.latents[k])
self.means[k] = np.sum(self.rs[k] * (self.dataT - lambdalatents),
axis=1) / sumrs
# lambdas
zeroed = self.dataT - self.means[k,:, None]
self.lambdas[k] = np.dot(np.dot(zeroed[:,None,:] *
self.latents[k,None,:,:],self.rs[k]),
inv(np.dot(self.latent_covs[k],self.rs[k])))
# psis
# hacking a floor for psis
psis = np.dot((zeroed - lambdalatents) * zeroed,self.rs[k]) / sumrs
maxpsi = np.max(psis)
maxlam = np.max(np.sum(self.lambdas[k] * self.lambdas[k], axis=0))
minpsi = np.max([maxpsi, maxlam]) / self.max_condition_number
psis = np.clip(psis, minpsi, np.Inf)
if PPCA:
psis = np.mean(psis) * np.ones(self.D)
self.psis[k] = psis
def _update_covs(self):
"""
Update self.cov for responsibility, logL calc
"""
for k in range(self.K):
self.covs[k] = np.dot(self.lambdas[k],self.lambdas[k].T) + \
np.diag(self.psis[k])
self.inv_covs[k] = self._invert_cov(k)
def _calc_probs(self):
"""
Calculate log likelihoods, responsibilites for each datum
under each component.
"""
logrs = np.zeros((self.K, self.N))
for k in range(self.K):
logrs[k] = np.log(self.amps[k]) + self._log_multi_gauss(k, self.data)
# here lies some ghetto log-sum-exp...
# nothing like a little bit of overflow to make your day better!
L = self._log_sum(logrs)
logrs -= L[None, :]
if self.rs_clip > 0.0:
logrs = np.clip(logrs,np.log(self.rs_clip),np.Inf)
return L, np.exp(logrs)
def _log_multi_gauss(self, k, D):
"""
Gaussian log likelihood of the data for component k.
"""
sgn, logdet = np.linalg.slogdet(self.covs[k])
assert sgn > 0
X1 = (D - self.means[k]).T
X2 = np.dot(self.inv_covs[k], X1)
p = -0.5 * np.sum(X1 * X2, axis=0)
return -0.5 * np.log(2 * np.pi) * self.D - 0.5 * logdet + p
def _log_sum(self,loglikes):
"""
Calculate sum of log likelihoods
"""
loglikes = np.atleast_2d(loglikes)
a = np.max(loglikes, axis=0)
return a + np.log(np.sum(np.exp(loglikes - a[None, :]), axis=0))
def _invert_cov(self,k):
"""
Calculate inverse covariance of mofa or ppca model,
using inversion lemma
"""
psiI = inv(np.diag(self.psis[k]))
lam = self.lambdas[k]
lamT = lam.T
step = inv(np.eye(self.M) + np.dot(lamT,np.dot(psiI,lam)))
step = np.dot(step,np.dot(lamT,psiI))
step = np.dot(psiI,np.dot(lam,step))
return psiI - step
def plot_2d_ellipses(self,d1,d2, **kwargs):
"""
Make a 2D plot of the model projected onto axes
d1 and d2.
"""
for k in range(self.K):
mean = self.means[k,(d1, d2)]
cov = self.covs[k][((d1, d2),(d1, d2)), ((d1, d1), (d2, d2))]
self._plot_2d_ellipse(mean, cov, **kwargs)
def _plot_2d_ellipse(self, mu, cov, ax=None, **kwargs):
"""
Plot the error ellipse at a point given it's covariance matrix.
"""
# some sane defaults
facecolor = kwargs.pop('facecolor', 'none')
edgecolor = kwargs.pop('edgecolor', 'k')
x, y = mu
U, S, V = np.linalg.svd(cov)
theta = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
ellipsePlot = Ellipse(xy=[x, y],
width=2 * np.sqrt(S[0]),
height=2 * np.sqrt(S[1]),
angle=theta,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if ax is None:
ax = pl.gca()
ax.add_patch(ellipsePlot)
| {
"repo_name": "rossfadely/mofa",
"path": "mofa/mofa.py",
"copies": "1",
"size": "11258",
"license": "mit",
"hash": 8948368724834086000,
"line_mean": 30.3593314763,
"line_max": 81,
"alpha_frac": 0.5037306804,
"autogenerated": false,
"ratio": 3.406354009077156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4410084689477156,
"avg_score": null,
"num_lines": null
} |
__all__ = ['MongoBackend', 'CachedMongoBackend']
import string
from copy import copy
from ..utils import randutils
from ..utils.cacheutils import memoize_with_expiry
from base import BaseBackend
class MongoBackend(BaseBackend):
'''
Implements a collection using a mongo database backend.
'''
idField = '_id'
def _id2idfield(self, data):
if data and 'id' in data and self.idField != 'id':
data = copy(data)
idval = data.pop('id')
data[self.idField] = idval
return data
def _idfield2id(self, data):
if data and self.idField in data and self.idField != 'id':
data = copy(data)
idval = data.pop(self.idField)
data['id'] = idval
return data
def __init__(self, colName,
mongo=None, host="localhost:27017",
user="", passwd="", dbName="collections"):
from pymongo import MongoClient
if not mongo:
auth = '%s:%s@' % (user, passwd) if user else ''
uri = 'mongodb://' + auth + host + "/" + dbName
self.mongo = MongoClient(host=uri)
else:
self.mongo = mongo
self.dbName = dbName
super(MongoBackend, self).__init__(colName)
# Backend functions -----------------------
def makeId(self, model):
'''
Creates a random uuid for an Id.
'''
return unicode(randutils.gen_random_str(12, string.ascii_lowercase+\
string.digits))
def add(self, model):
'''
Adds a model to this collection and the database. Relies on the
super class' save functionality to assign an id.
'''
return model.save()
def saveModel(self, model):
model = self._id2idfield(model)
return self.mongo[self.dbName][self.colName].save(model)
def getItem(self, modelId):
'''
obviously this is quite inefficient. Later I should implement a simple
cache to keep these accesses from hitting the db each time
'''
model = self.mongo[self.dbName][self.colName].find_one(modelId)
model = self._idfield2id(model)
return model
def delete(self, model):
return self.mongo[self.dbName][self.colName].remove(model.id)
def len(self):
return self.mongo[self.dbName][self.colName].count()
def iter(self):
for data in self.mongo[self.dbName][self.colName].find():
yield data
def find(self, query, limit=None):
query = self._id2idfield(query)
cursor = self.mongo[self.dbName][self.colName].find(query)
if limit:
cursor.limit(limit)
for data in cursor:
data = self._idfield2id(data)
yield data
class CachedMongoBackend(MongoBackend):
'''
Implements a collection using a mongo database backend.
'''
cache = {}
def __init__(self, cachettl=300, **kwargs):
super(CachedMongoBackend, self).__init__(**kwargs)
wrapper = memoize_with_expiry(self.cache, expiry_time=cachettl)
self.getItem = wrapper(self.getItem)
| {
"repo_name": "colevscode/quickdata",
"path": "quickdata/backends/mongo.py",
"copies": "1",
"size": "3198",
"license": "mit",
"hash": 3956779924346384000,
"line_mean": 26.5689655172,
"line_max": 78,
"alpha_frac": 0.5769230769,
"autogenerated": false,
"ratio": 3.99250936329588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506943244019588,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(filename,
header={"t_start": time.time(), 'env_id' : env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords
)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
super(Monitor, self).close()
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
class ResultsWriter(object):
def __init__(self, filename, header='', extra_keys=()):
self.extra_keys = extra_keys
assert filename is not None
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = '# {} \n'.format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys))
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
if not firstline:
continue
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
| {
"repo_name": "openai/baselines",
"path": "baselines/bench/monitor.py",
"copies": "1",
"size": "5741",
"license": "mit",
"hash": 1770098491501087000,
"line_mean": 34.2208588957,
"line_max": 174,
"alpha_frac": 0.5688904372,
"autogenerated": false,
"ratio": 3.87643484132343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9935790476263227,
"avg_score": 0.0019069604520407454,
"num_lines": 163
} |
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
import csv
import json
import os
import time
from glob import glob
from typing import Tuple, Dict, Any, List, Optional
import gym
import pandas
import numpy as np
class Monitor(gym.Wrapper):
"""
A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.
:param env: (gym.Env) The environment
:param filename: (Optional[str]) the location to save a log file, can be None for no log
:param allow_early_resets: (bool) allows the reset of the environment before it is done
:param reset_keywords: (tuple) extra keywords for the reset call, if extra parameters are needed at reset
:param info_keywords: (tuple) extra information to log, from the information return of environment.step
"""
EXT = "monitor.csv"
file_handler = None
def __init__(self,
env: gym.Env,
filename: Optional[str],
allow_early_resets: bool = True,
reset_keywords=(),
info_keywords=()):
super(Monitor, self).__init__(env=env)
self.t_start = time.time()
if filename is None:
self.file_handler = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if os.path.isdir(filename):
filename = os.path.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.file_handler = open(filename, "wt")
self.file_handler.write('#%s\n' % json.dumps({"t_start": self.t_start, 'env_id': env.spec and env.spec.id}))
self.logger = csv.DictWriter(self.file_handler,
fieldnames=('r', 'l', 't') + reset_keywords + info_keywords)
self.logger.writeheader()
self.file_handler.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs) -> np.ndarray:
"""
Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True
:param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords
:return: (np.ndarray) the first observation of the environment
"""
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, "
"wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for key in self.reset_keywords:
value = kwargs.get(key)
if value is None:
raise ValueError('Expected you to pass kwarg {} into reset'.format(key))
self.current_reset_info[key] = value
return self.env.reset(**kwargs)
def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, Dict[Any, Any]]:
"""
Step the environment with the given action
:param action: (np.ndarray) the action
:return: (Tuple[np.ndarray, float, bool, Dict[Any, Any]]) observation, reward, done, information
"""
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
observation, reward, done, info = self.env.step(action)
self.rewards.append(reward)
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
eplen = len(self.rewards)
ep_info = {"r": round(ep_rew, 6), "l": eplen, "t": round(time.time() - self.t_start, 6)}
for key in self.info_keywords:
ep_info[key] = info[key]
self.episode_rewards.append(ep_rew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.t_start)
ep_info.update(self.current_reset_info)
if self.logger:
self.logger.writerow(ep_info)
self.file_handler.flush()
info['episode'] = ep_info
self.total_steps += 1
return observation, reward, done, info
def close(self):
"""
Closes the environment
"""
super(Monitor, self).close()
if self.file_handler is not None:
self.file_handler.close()
def get_total_steps(self) -> int:
"""
Returns the total number of timesteps
:return: (int)
"""
return self.total_steps
def get_episode_rewards(self) -> List[float]:
"""
Returns the rewards of all the episodes
:return: ([float])
"""
return self.episode_rewards
def get_episode_lengths(self) -> List[int]:
"""
Returns the number of timesteps of all the episodes
:return: ([int])
"""
return self.episode_lengths
def get_episode_times(self) -> List[float]:
"""
Returns the runtime in seconds of all the episodes
:return: ([float])
"""
return self.episode_times
class LoadMonitorResultsError(Exception):
"""
Raised when loading the monitor log fails.
"""
pass
def get_monitor_files(path: str) -> List[str]:
"""
get all the monitor files in the given path
:param path: (str) the logging folder
:return: ([str]) the log files
"""
return glob(os.path.join(path, "*" + Monitor.EXT))
def load_results(path: str) -> pandas.DataFrame:
"""
Load all Monitor logs from a given directory path matching ``*monitor.csv`` and ``*monitor.json``
:param path: (str) the directory path containing the log file(s)
:return: (pandas.DataFrame) the logged data
"""
# get both csv and (old) json files
monitor_files = (glob(os.path.join(path, "*monitor.json")) + get_monitor_files(path))
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, path))
data_frames = []
headers = []
for file_name in monitor_files:
with open(file_name, 'rt') as file_handler:
if file_name.endswith('csv'):
first_line = file_handler.readline()
assert first_line[0] == '#'
header = json.loads(first_line[1:])
data_frame = pandas.read_csv(file_handler, index_col=None)
headers.append(header)
elif file_name.endswith('json'): # Deprecated json format
episodes = []
lines = file_handler.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
data_frame = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
data_frame['t'] += header['t_start']
data_frames.append(data_frame)
data_frame = pandas.concat(data_frames)
data_frame.sort_values('t', inplace=True)
data_frame.reset_index(inplace=True)
data_frame['t'] -= min(header['t_start'] for header in headers)
# data_frame.headers = headers # HACK to preserve backwards compatibility
return data_frame
| {
"repo_name": "hill-a/stable-baselines",
"path": "stable_baselines/bench/monitor.py",
"copies": "1",
"size": "7798",
"license": "mit",
"hash": 6931061600118927000,
"line_mean": 36.4903846154,
"line_max": 120,
"alpha_frac": 0.5846370864,
"autogenerated": false,
"ratio": 4.067814293166406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008910885542952053,
"num_lines": 208
} |
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
import gym
from gym.core import Wrapper
from os import path
import time
from glob import glob
try:
import ujson as json # Not necessary for monitor writing, but very useful for monitor loading
except ImportError:
import json
class Monitor(Wrapper):
EXT = "monitor.json"
f = None
def __init__(self, env, filename, allow_early_resets=False):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
self.logger = JSONLogger(self.f)
self.logger.writekvs({"t_start": self.tstart, "gym_version": gym.__version__,
"env_id": env.spec.id if env.spec else 'Unknown'})
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.total_steps = 0
self.current_metadata = {} # extra info that gets injected into each log entry
# Useful for metalearning where we're modifying the environment externally
# But want our logs to know about these modifications
def __getstate__(self): # XXX
d = self.__dict__.copy()
if self.f:
del d['f'], d['logger']
d['_filename'] = self.f.name
d['_num_episodes'] = len(self.episode_rewards)
else:
d['_filename'] = None
return d
def __setstate__(self, d):
filename = d.pop('_filename')
self.__dict__ = d
if filename is not None:
nlines = d.pop('_num_episodes') + 1
self.f = open(filename, "r+t")
for _ in range(nlines):
self.f.readline()
self.f.truncate()
self.logger = JSONLogger(self.f)
def reset(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
return self.env.reset()
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": eprew, "l": eplen, "t": round(time.time() - self.tstart, 6)}
epinfo.update(self.current_metadata)
if self.logger:
self.logger.writekvs(epinfo)
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
info['episode'] = epinfo
self.total_steps += 1
return (ob, rew, done, info)
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
class JSONLogger(object):
def __init__(self, file):
self.file = file
def writekvs(self, kvs):
for k,v in kvs.items():
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
class LoadMonitorResultsError(Exception):
pass
def get_monitor_files(dir):
return glob(path.join(dir, "*" + Monitor.EXT))
def load_results(dir):
fnames = get_monitor_files(dir)
if not fnames:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
episodes = []
headers = []
for fname in fnames:
with open(fname, 'rt') as fh:
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episode['abstime'] = header['t_start'] + episode['t']
del episode['t']
episodes.append(episode)
header0 = headers[0]
for header in headers[1:]:
assert header['env_id'] == header0['env_id'], "mixing data from two envs"
episodes = sorted(episodes, key=lambda e: e['abstime'])
return {
'env_info': {'env_id': header0['env_id'], 'gym_version': header0['gym_version']},
'episode_end_times': [e['abstime'] for e in episodes],
'episode_lengths': [e['l'] for e in episodes],
'episode_rewards': [e['r'] for e in episodes],
'initial_reset_time': min([min(header['t_start'] for header in headers)])
}
| {
"repo_name": "learnercys/baselines",
"path": "baselines/bench/monitor.py",
"copies": "3",
"size": "5053",
"license": "mit",
"hash": 9182285160155120000,
"line_mean": 33.6095890411,
"line_max": 174,
"alpha_frac": 0.5711458539,
"autogenerated": false,
"ratio": 3.7374260355029585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0032163139420530906,
"num_lines": 146
} |
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
import gym
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
self.f.write('#%s\n'%json.dumps({"t_start": self.tstart, "gym_version": gym.__version__,
"env_id": env.spec.id if env.spec else 'Unknown'}))
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+reset_keywords)
self.logger.writeheader()
self.reset_keywords = reset_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def _reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def _step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
epinfo.update(self.current_reset_info)
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
info['episode'] = epinfo
self.total_steps += 1
return (ob, rew, done, info)
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
class LoadMonitorResultsError(Exception):
pass
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = glob(osp.join(dir, "*monitor.*")) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df | {
"repo_name": "brain-research/mirage-rl-bpttv",
"path": "baselines/bench/monitor.py",
"copies": "1",
"size": "4553",
"license": "mit",
"hash": 4052522776231246000,
"line_mean": 36.0243902439,
"line_max": 174,
"alpha_frac": 0.5695146058,
"autogenerated": false,
"ratio": 3.845439189189189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4914953794989189,
"avg_score": null,
"num_lines": null
} |
__all__ = ['montage2d']
import numpy as np
from .. import exposure
EPSILON = 1e-6
def montage2d(arr_in, fill='mean', rescale_intensity=False, grid_shape=None):
"""Create a 2-dimensional 'montage' from a 3-dimensional input array
representing an ensemble of equally shaped 2-dimensional images.
For example, ``montage2d(arr_in, fill)`` with the following `arr_in`
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
will return:
+---+---+
| 1 | 2 |
+---+---+
| 3 | * |
+---+---+
Where the '*' patch will be determined by the `fill` parameter.
Parameters
----------
arr_in: ndarray, shape=[n_images, height, width]
3-dimensional input array representing an ensemble of n_images
of equal shape (i.e. [height, width]).
fill: float or 'mean', optional
How to fill the 2-dimensional output array when sqrt(n_images)
is not an integer. If 'mean' is chosen, then fill = arr_in.mean().
rescale_intensity: bool, optional
Whether to rescale the intensity of each image to [0, 1].
grid_shape: tuple, optional
The desired grid shape for the montage (tiles_y, tiles_x).
The default aspect ratio is square.
Returns
-------
arr_out: ndarray, shape=[alpha * height, alpha * width]
Output array where 'alpha' has been determined automatically to
fit (at least) the `n_images` in `arr_in`.
Examples
--------
>>> import numpy as np
>>> from skimage.util.montage import montage2d
>>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2)
>>> arr_in # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
>>> arr_out = montage2d(arr_in)
>>> arr_out.shape
(4, 4)
>>> arr_out
array([[ 0. , 1. , 4. , 5. ],
[ 2. , 3. , 6. , 7. ],
[ 8. , 9. , 5.5, 5.5],
[ 10. , 11. , 5.5, 5.5]])
>>> arr_in.mean()
5.5
>>> arr_out_nonsquare = montage2d(arr_in, grid_shape=(1, 3))
>>> arr_out_nonsquare
array([[ 0., 1., 4., 5., 8., 9.],
[ 2., 3., 6., 7., 10., 11.]])
>>> arr_out_nonsquare.shape
(2, 6)
"""
assert arr_in.ndim == 3
n_images, height, width = arr_in.shape
arr_in = arr_in.copy()
# -- rescale intensity if necessary
if rescale_intensity:
for i in range(n_images):
arr_in[i] = exposure.rescale_intensity(arr_in[i])
# -- determine alpha
if grid_shape:
alpha_y, alpha_x = grid_shape
else:
alpha_y = alpha_x = int(np.ceil(np.sqrt(n_images)))
# -- fill missing patches
if fill == 'mean':
fill = arr_in.mean()
n_missing = int((alpha_y * alpha_x) - n_images)
missing = np.ones((n_missing, height, width), dtype=arr_in.dtype) * fill
arr_out = np.vstack((arr_in, missing))
# -- reshape to 2d montage, step by step
arr_out = arr_out.reshape(alpha_y, alpha_x, height, width)
arr_out = arr_out.swapaxes(1, 2)
arr_out = arr_out.reshape(alpha_y * height, alpha_x * width)
return arr_out
| {
"repo_name": "SamHames/scikit-image",
"path": "skimage/util/montage.py",
"copies": "3",
"size": "3204",
"license": "bsd-3-clause",
"hash": 1939502593469311700,
"line_mean": 28.1272727273,
"line_max": 77,
"alpha_frac": 0.534019975,
"autogenerated": false,
"ratio": 3.204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00017152658662092623,
"num_lines": 110
} |
__all__ = ['montage2d']
import numpy as np
from .. import exposure
EPSILON = 1e-6
def montage2d(arr_in, fill='mean', rescale_intensity=False):
"""Create a 2-dimensional 'montage' from a 3-dimensional input array
representing an ensemble of equally shaped 2-dimensional images.
For example, montage2d(arr_in, fill) with the following `arr_in`
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
will return:
+---+---+
| 1 | 2 |
+---+---+
| 3 | * |
+---+---+
Where the '*' patch will be determined by the `fill` parameter.
Parameters
----------
arr_in: ndarray, shape=[n_images, height, width]
3-dimensional input array representing an ensemble of n_images
of equal shape (i.e. [height, width]).
fill: float or 'mean', optional
How to fill the 2-dimensional output array when sqrt(n_images)
is not an integer. If 'mean' is chosen, then fill = arr_in.mean().
rescale_intensity: bool, optional
Whether to rescale the intensity of each image to [0, 1].
Returns
-------
arr_out: ndarray, shape=[alpha * height, alpha * width]
Output array where 'alpha' has been determined automatically to
fit (at least) the `n_images` in `arr_in`.
Example
-------
>>> import numpy as np
>>> from skimage.util.montage import montage2d
>>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2)
>>> print arr_in # doctest: +NORMALIZE_WHITESPACE
[[[ 0 1]
[ 2 3]]
[[ 4 5]
[ 6 7]]
[[ 8 9]
[10 11]]]
>>> arr_out = montage2d(arr_in)
>>> print arr_out.shape
(4, 4)
>>> print arr_out
[[ 0. 1. 4. 5. ]
[ 2. 3. 6. 7. ]
[ 8. 9. 5.5 5.5]
[ 10. 11. 5.5 5.5]]
>>> print arr_in.mean()
5.5
"""
assert arr_in.ndim == 3
n_images, height, width = arr_in.shape
# -- rescale intensity if necessary
if rescale_intensity:
for i in range(n_images):
arr_in[i] = exposure.rescale_intensity(arr_in[i])
# -- determine alpha
alpha = int(np.ceil(np.sqrt(n_images)))
# -- fill missing patches
if fill == 'mean':
fill = arr_in.mean()
n_missing = int((alpha**2.) - n_images)
missing = np.ones((n_missing, height, width), dtype=arr_in.dtype) * fill
arr_out = np.vstack((arr_in, missing))
# -- reshape to 2d montage, step by step
arr_out = arr_out.reshape(alpha, alpha, height, width)
arr_out = arr_out.swapaxes(1, 2)
arr_out = arr_out.reshape(alpha * height, alpha * width)
return arr_out
| {
"repo_name": "emmanuelle/scikits.image",
"path": "skimage/util/montage.py",
"copies": "2",
"size": "2613",
"license": "bsd-3-clause",
"hash": -8174177404243759000,
"line_mean": 26.21875,
"line_max": 76,
"alpha_frac": 0.5545350172,
"autogenerated": false,
"ratio": 3.270337922403004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4824872939603004,
"avg_score": null,
"num_lines": null
} |
__all__ = ["MonthBoxOffice"]
import tushare as ts
from flask import request,json
from flask_restful import Resource
def get_month_boxoffice(month=None):
if month == None:
result = ts.month_boxoffice().to_json()
else:
try:
result = ts.month_boxoffice(month).to_json()
except Exception as e:
result = json.dumps({"error":True,"message":"can not get the data, format date as YYYY-M"})
return result
class MonthBoxOffice(Resource):
"""获取单月电影票房数据,默认为上一月,可输入月份参数获取指定月度的数据。
参数说明:
date:年月(YYYY-MM),默认为上一月
返回值说明:
Irank 排名
MovieName 电影名称
WomIndex 口碑指数
avgboxoffice 平均票价
avgshowcount 场均人次
box_pro 月度占比
boxoffice 单月票房(万)
days 月内天数
releaseTime 上映日期
"""
def get(self):
date = request.args.get("date")
return get_month_boxoffice(date)
| {
"repo_name": "FinaceInfo/Chinese-box-office-info",
"path": "api/boxoffice/month_boxoffice.py",
"copies": "1",
"size": "1040",
"license": "mit",
"hash": -7809324577735618000,
"line_mean": 22.2972972973,
"line_max": 103,
"alpha_frac": 0.6357308585,
"autogenerated": false,
"ratio": 2.455840455840456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3591571314340456,
"avg_score": null,
"num_lines": null
} |
__all__ = ["MouseEvent", \
"TouchEvent", \
"KeyEvent", \
"kPressedMouseEventType", \
"kReleasedMouseEventType", \
"kMovedMouseEventType", \
"kLeftMouseButton", \
"kMiddleMouseButton", \
"kRightMouseButton", \
"kNoneMouseButton", \
"kTouchStart", \
"kTouchEnd", \
"kTouchMove", \
"kKeyDownEventType", \
"kKeyUpEventType", \
"kRawKeyDownEventType", \
"kCharEventType", \
"kAltKeyModifierMask", \
"kControlKeyModifierMask", \
"kMetaKeyModifierMask", \
"kShiftKeyModifierMask", \
"kNumLockKeyModifierMask"]
# Specifies the type of the mouse event.
kPressedMouseEventType = 0
kReleasedMouseEventType = 1
kMovedMouseEventType = 2
# Specifies the mouse buttons.
kLeftMouseButton = 0
kMiddleMouseButton = 1
kRightMouseButton = 2
kNoneMouseButton = 3
# Specifies the type of the touch event.
kTouchStart = 0
kTouchEnd = 1
kTouchMove = 2
# Specifies the type of the keyboard event.
kKeyDownEventType = 0
kKeyUpEventType = 1
kRawKeyDownEventType = 2
kCharEventType = 3
# Specifies modifier keys as stated in
# third_party/WebKit/Source/WebCore/inspector/Inspector.json.
# Notice: |kNumLockKeyModifierMask| is for usage in the key_converter.cc
# and keycode_text_conversion_x.cc only, not for inspector.
kAltKeyModifierMask = 1 << 0
kControlKeyModifierMask = 1 << 1
kMetaKeyModifierMask = 1 << 2
kShiftKeyModifierMask = 1 << 3
kNumLockKeyModifierMask = 1 << 4
class MouseEvent(object):
def __init__(self, mouse_event_type, mouse_button, x, y, modifiers, click_count):
self.typer = mouse_event_type
self.button = mouse_button
self.x = x
self.y = y
self.modifiers = modifiers
# |click_count| should not be negative.
self.click_count = click_count
class TouchEvent(object):
def __init_(self, touch_event_type, x, y):
self.typer = touch_event_type
self.x = x
self.y = y
class KeyEvent(object):
def __init__(self, key_event_type, modifiers, modified_text, unmodified_text, key_code):
self.typer = key_event_type
self.modifiers = modifiers
self.modified_text = modified_text
self.unmodified_text = unmodified_text
self.key_code = key_code
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/ui_events.py",
"copies": "1",
"size": "2339",
"license": "bsd-3-clause",
"hash": 6284396824353492000,
"line_mean": 27.1807228916,
"line_max": 90,
"alpha_frac": 0.6524155622,
"autogenerated": false,
"ratio": 3.4858420268256336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9527762938184344,
"avg_score": 0.022098930168257813,
"num_lines": 83
} |
__all__ = ['MPI_allocator']
import os
import commands
import logging
from openmdao.main.mp_support import OpenMDAO_Manager, register
from openmdao.main.resource import FactoryAllocator, \
HOME_DIRECTORY, WORKING_DIRECTORY
from openmdao.main.objserverfactory import ObjServer
from openmdao.main.rbac import get_credentials, set_credentials, rbac
class MPI_Allocator(FactoryAllocator):
"""
A resource allocator for jobs running in an MPI environment
such as Torque or PBS.
"""
_MPI = True
def __init__(self,name='MPI_Allocator',machines=None, accounting_id='no-default-set',
authkey=None, allow_shell=True):
super(MPI_Allocator, self).__init__(name, authkey, allow_shell)
self.factory.manager_class = _ServerManager
self.factory.server_classname = 'mpiallocator_mpiallocator_MPI_Server'
self.accounting_id = accounting_id
self.machines = machines
#command = 'echo $PBS_VERSION'
command = 'echo $PBS_NODEFILE'
IS_PBS=commands.getoutput(command) != ''
self.job_id = 0
if not IS_PBS:
print 'Warning: This is not an mpi environment'
# really only used for testing
MPI_Allocator._MPI = False
# get job information from PBS environment variables
if machines is None:
self.machines = []
self.machines=commands.getoutput('cat $PBS_NODEFILE').split('\n')
self.n_cpus = 0
self.workers=[]
for host in self.machines:
print 'appending node',host,'to allocator'
self.workers.append({'hostname':host,'state':1})
self.n_cpus+=1
self.max_cpus = len(self.workers)
def configure(self, cfg):
"""
Configure allocator from :class:`ConfigParser` instance.
Normally only called during manager initialization.
cfg: :class:`ConfigParser`
Configuration data is located under the section matching
this allocator's `name`.
Allows modifying `accounting_id` and factory options.
"""
super(PBS_Allocator, self).configure(cfg)
if cfg.has_option(self.name, 'accounting_id'):
self.accounting_id = cfg.get(self.name, 'accounting_id')
self._logger.debug(' accounting_id: %s', self.accounting_id)
@rbac('*')
def max_servers(self, resource_desc):
"""
Return the maximum number of servers which could be deployed for
`resource_desc`.
resource_desc: dict
Description of required resources.
"""
retcode, info = self.check_compatibility(resource_desc)
if retcode != 0:
return (0, info)
elif 'min_cpus' in resource_desc:
return (self.max_cpus / resource_desc['min_cpus'], {})
else:
return (self.max_cpus, {})
@rbac('*')
def time_estimate(self, resource_desc):
"""
Return ``(estimate, criteria)`` indicating how well this resource
allocator can satisfy the `resource_desc` request. The estimate will
be:
- >0 for an estimate of walltime (seconds).
- 0 for no estimate.
- -1 for no resource at this time.
- -2 for no support for `resource_desc`.
The returned criteria is a dictionary containing information related
to the estimate, such as hostnames, load averages, unsupported
resources, etc.
resource_desc: dict
Description of required resources.
"""
hostnames=[]
criteria = {
'hostnames':hostnames,
}
if 'name' in resource_desc:
if resource_desc['name'] != self.name:
return (-2,criteria)
if 'min_cpus' in resource_desc:
n_cpus = resource_desc['min_cpus']
else:
return (-2,criteria)
if self._qstat() < n_cpus:
return (-1,criteria)
nh = 0
for host in self.workers:
if nh == n_cpus: break
if host['state'] == 1:
hostnames.append(host['hostname'])
nh+=1
criteria = {
'hostnames':hostnames,
}
return (0,criteria)
def check_compatibility(self, resource_desc):
"""
Check compatibility with resource attributes.
resource_desc: dict
Description of required resources.
Returns ``(retcode, info)``. If Compatible, then `retcode` is zero
and `info` is empty. Otherwise `retcode` will be -2 and `info` will
be a single-entry dictionary whose key is the incompatible key in
`resource_desc` and value provides data regarding the incompatibility.
"""
retcode, info = \
super(MPI_Allocator, self).check_compatibility(resource_desc)
if retcode != 0:
return (retcode, info)
for key in info:
value = resource_desc[key]
if key == 'localhost':
if value:
return (-2, {key: 'requested local host'})
elif key == 'min_cpus':
self.n_cpus = self._qstat()
if self.n_cpus < value:
return (-2, {'min_cpus': 'want %s, have %s'
% (value, self.n_cpus)})
elif key == 'max_cpus':
pass
else:
return (-2, {key: 'unrecognized key'})
return (0, {})
@rbac('*')
def deploy(self,name, resource_desc,criteria):
"""
Deploy a server suitable for `resource_desc`.
Returns a proxy to the deployed server.
name: string
Name for server.
resource_desc: dict
Description of required resources.
criteria: dict
The dictionary returned by :meth:`time_estimate`.
"""
hostnames = []
n_cpus=resource_desc['min_cpus']
nh = 0
for i,worker in enumerate(self.workers):
if nh == n_cpus: break
if worker['state'] == 1:
worker['state'] = 0
hostnames.append(worker['hostname'])
nh+=1
print 'allocating hosts',hostnames
credentials = get_credentials()
allowed_users = {credentials.user: credentials.public_key}
try:
server = self.factory.create(typname='', allowed_users=allowed_users,
name=name)
# overwrite the server's host list with the assigned hosts
server.host = hostnames[0]
server.mpi_resources = hostnames
return server
# Shouldn't happen...
except Exception as exc: #pragma no cover
self._logger.error('create failed: %r', exc)
return None
def release(self,server):
"""
"""
print 'releasing hosts',server.mpi_resources
for worker in self.workers:
for host in server.host:
if host == worker['hostname']:
worker['state'] = 1
self.factory.release(server)
def _qstat(self):
"""check status of the workers and return number of free nodes"""
free=0
for i in range(len(self.workers)):
free+=self.workers[i]['state']
return free
#def shutdown(self):
# """ todo: shut down MPIallocator cluster """
# pass
class MPI_Server(ObjServer):
"""
Server that knows how to execute an MPI job with mpirun given a
resource description containing a list of hosts to execute the job on.
"""
@rbac('owner')
def configure(self, accounting_id):
"""
Configure default accounting id.
accounting_id: string
Used as default ``accounting_id`` value.
"""
self.mpi_resources = None
# @rbac('owner')
# def execute_command(self,resource_desc)
# """
# Submit command based on `resource_desc`.
# resource_desc: dict
# Description of command and required resources.
# Necessary resource keys:
# ========================= ===========================
# Resource Key Description
# ========================= ===========================
# remote_command name of executable
# ------------------------- ---------------------------
# hostnames list of hosts
# ------------------------- ---------------------------
# The job will be submitted in the following manner:
# mpirun [-np X] [-host <hostnames>] <remote_command>
# """
# env = None
# command=[]
# command.extend(self.mpi_path)
# # put together execute command from resource_desc
# # first the execute command, probably mpirun
# if 'hostnames' in resource_desc:
# np = len(resource_desc['hostnames'])
# if np > 0:
# self.command.extend(('-np',str(np)))
# command.extend(('-host',str(resource_desc['hostnames'])))
# else:
# raise ValueError('%s: np must be > 0, got %d'
# % (self.name, np))
# else:
# raise ValueError('"hostnames" key must be specified in resource_desc')
# if 'remote_command' in resource_desc:
# command.extend(resource_desc['remote_command'])
# try:
# process = ShellProc(command, DEV_NULL, 'qsub.out', STDOUT, env)
class _ServerManager(OpenMDAO_Manager):
"""
A :class:`multiprocessing.Manager` which manages :class:`PBS_Server`.
"""
pass
register(MPI_Server, _ServerManager, 'mpiallocator.mpiallocator')
| {
"repo_name": "fzahle/MPI_allocator",
"path": "src/mpiallocator/mpiallocator.py",
"copies": "1",
"size": "9866",
"license": "apache-2.0",
"hash": 4613777893670449000,
"line_mean": 30.2215189873,
"line_max": 89,
"alpha_frac": 0.5450030407,
"autogenerated": false,
"ratio": 4.3196147110332745,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5364617751733274,
"avg_score": null,
"num_lines": null
} |
__all__ = ['MultiLabelsFeatureSelector']
from .labels import MultiLabelsClassifier
from ..utils import get_class_from_module_path
import numpy as np
class MultiLabelsFeatureSelector(MultiLabelsClassifier):
def __init__(self, score_func=None, **kwargs):
super(MultiLabelsClassifier, self).__init__(**kwargs)
self.score_func = get_class_from_module_path(score_func)
#end def
def fit_binarized(self, X_featurized, Y_binarized, **kwargs):
scores = np.zeros((X_featurized.shape[1], Y_binarized.shape[1]))
pval = np.zeros((X_featurized.shape[1], Y_binarized.shape[1]))
for j in range(Y_binarized.shape[1]):
scores[:, j], pval[:, j] = self.score_func(X_featurized, Y_binarized[:, j])
self.labels_scores_ = scores
self.labels_pvals_ = pval
self.scores_ = np.max(scores, axis=1)
return self
#end def
def feature_select(self, X_featurized, Y_labels, **kwargs):
self.fit(X_featurized, Y_labels, **kwargs)
return self.scores_
#end def
def transform(self, X, **kwargs):
return X
#end class
| {
"repo_name": "skylander86/ycml",
"path": "ycml/classifiers/feature_selector.py",
"copies": "1",
"size": "1124",
"license": "apache-2.0",
"hash": -9061495526740382000,
"line_mean": 29.3783783784,
"line_max": 87,
"alpha_frac": 0.6387900356,
"autogenerated": false,
"ratio": 3.2769679300291545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9370752295623485,
"avg_score": 0.009001134001134,
"num_lines": 37
} |
__all__ = ['mvee']
import math
import numpy as np
try:
from scipy.spatial import ConvexHull
except ImportError:
def _getConvexHull(points):
return points
else:
def _getConvexHull(points):
hull = ConvexHull(points)
return points[np.unique(hull.simplices)]
def mvee(points, tol=1.e-4, limits=10000):
"""
Finds the minimum volume enclosing ellipsoid (MVEE) of a set of data points
in the M-dimentional space.
Parameters
----------
points : (N, M) array_like
A array of N points in the M-dimentional space. N must be larger than M.
tol : float, optional
Error in the solution with respect to the optimal value.
limits : int, optional
Maximal number of iteration.
Returns
-------
A : (M,M) ndarray
The matrix of the ellipse equation in the 'center form':
(x-c)^{T} A^{-1} (x-c) = 1,
where the eigenvalues of A are the squares of the semiaxes.
c : (M,) ndarray
The center of the ellipse.
Notes
-----
This function is ported from the MATLAB routine
``Minimum Volume Enclosing Ellipsoid'' (see [1]_ and [2]_)
by Nima Moshtagh (nima@seas.upenn.edu) at University of Pennsylvania.
Note that the output matrix A here is different from the original MATLAB
routine, where it returns A^{-1} instead.
References
----------
.. [1] http://www.mathworks.com/matlabcentral/fileexchange/9542-minimum-volume-enclosing-ellipsoid/content/MinVolEllipse.m
.. [2] http://stackoverflow.com/questions/14016898/port-matlab-bounding-ellipsoid-code-to-python
"""
P = _getConvexHull(points)
N, d = P.shape
if N <= d:
raise ValueError("The number of points must be larger than the number of dimensions.")
dp1_inv = 1./float(d+1)
Q = np.vstack((P.T, np.ones(N)))
err = tol + 1.
u = np.ones(N)/float(N)
while err > tol and limits > 0:
X_inv = np.linalg.inv(np.einsum('ij,j,kj', Q, u, Q))
M = np.einsum('ji,jk,ki->i', Q, X_inv, Q)
j = np.argmax(M)
step_size = (1.-d/(M[j]-1.))*dp1_inv
u[j] -= 1.
err = math.sqrt((u*u).sum())*math.fabs(step_size)
u *= (1.-step_size)
u[j] += 1.
u /= u.sum()
limits -= 1
c = np.dot(u, P)
A = (np.einsum('ji,j,jk', P, u, P) - np.outer(c,c)) * float(d)
return A, c
| {
"repo_name": "manodeep/yymao-helpers",
"path": "helpers/mvee.py",
"copies": "1",
"size": "2395",
"license": "mit",
"hash": 1497706374908923400,
"line_mean": 31.8082191781,
"line_max": 126,
"alpha_frac": 0.5949895616,
"autogenerated": false,
"ratio": 3.1806108897742362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4275600451374236,
"avg_score": null,
"num_lines": null
} |
__all__ = ["n3_bias_field_correction", "n3_bias_field_correction2", "n4_bias_field_correction", "abp_n4"]
from . import process_args as pargs
from .get_mask import get_mask
from .iMath import iMath
from ..core import ants_image as iio
from .. import utils
def n3_bias_field_correction(image, downsample_factor=3):
"""
N3 Bias Field Correction
ANTsR function: `n3BiasFieldCorrection`
Arguments
---------
image : ANTsImage
image to be bias corrected
downsample_factor : scalar
how much to downsample image before performing bias correction
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read( ants.get_ants_data('r16') )
>>> image_n3 = ants.n3_bias_field_correction(image)
"""
outimage = image.clone()
args = [image.dimension, image, outimage, downsample_factor]
processed_args = pargs._int_antsProcessArguments(args)
libfn = utils.get_lib_fn("N3BiasFieldCorrection")
libfn(processed_args)
return outimage
def n3_bias_field_correction2(
image,
mask=None,
shrink_factor=4,
convergence={"iters": 50, "tol": 1e-07},
spline_param=200,
number_of_fitting_levels=4,
return_bias_field=False,
verbose=False,
weight_mask=None,
):
"""
N3 Bias Field Correction
ANTsR function: `n3BiasFieldCorrection2`
Arguments
---------
image : ANTsImage
image to bias correct
mask : ANTsImage
input mask, if one is not passed one will be made
shrink_factor : scalar
Shrink factor for multi-resolution correction, typically integer less than 4
convergence : dict w/ keys `iters` and `tol`
iters : maximum number of iterations
tol : the convergence tolerance.
spline_param : float or vector
Parameter controlling number of control points in spline. Either single value, indicating the spacing in each direction, or vector with one entry per dimension of image, indicating the mesh size.
number_of_fitting_levels : integer
Number of fitting levels per iteration.
return_bias_field : boolean
Return bias field instead of bias corrected image.
verbose : boolean
enables verbose output.
weight_mask : ANTsImage (optional)
antsImage of weight mask
Returns
-------
ANTsImage
Example
-------
>>> image = ants.image_read( ants.get_ants_data('r16') )
>>> image_n3 = ants.n3_bias_field_correction2(image)
"""
if image.pixeltype != "float":
image = image.clone("float")
iters = convergence["iters"]
tol = convergence["tol"]
if mask is None:
mask = get_mask(image)
N3_CONVERGENCE_1 = "[%i,%.10f]" % (iters, tol)
N3_SHRINK_FACTOR_1 = str(shrink_factor)
if (not isinstance(spline_param, (list, tuple))) or (len(spline_param) == 1):
N3_BSPLINE_PARAMS = "[%i,%i]" % (spline_param, number_of_fitting_levels)
elif (isinstance(spline_param, (list, tuple))) and (
len(spline_param) == image.dimension
):
N3_BSPLINE_PARAMS = "[%s,%i]" % (("x".join([str(sp) for sp in spline_param])), number_of_fitting_levels)
else:
raise ValueError(
"Length of splineParam must either be 1 or dimensionality of image"
)
if weight_mask is not None:
if not isinstance(weight_mask, iio.ANTsImage):
raise ValueError("Weight Image must be an antsImage")
outimage = image.clone("float")
outbiasfield = image.clone("float")
i = utils.get_pointer_string(outimage)
b = utils.get_pointer_string(outbiasfield)
output = "[%s,%s]" % (i, b)
kwargs = {
"d": outimage.dimension,
"i": image,
"w": weight_mask,
"s": N3_SHRINK_FACTOR_1,
"c": N3_CONVERGENCE_1,
"b": N3_BSPLINE_PARAMS,
"x": mask,
"o": output,
"v": int(verbose),
}
processed_args = pargs._int_antsProcessArguments(kwargs)
libfn = utils.get_lib_fn("N3BiasFieldCorrection")
libfn(processed_args)
if return_bias_field == True:
return outbiasfield
else:
return outimage
def n4_bias_field_correction(
image,
mask=None,
shrink_factor=4,
convergence={"iters": [50, 50, 50, 50], "tol": 1e-07},
spline_param=200,
return_bias_field=False,
verbose=False,
weight_mask=None,
):
"""
N4 Bias Field Correction
ANTsR function: `n4BiasFieldCorrection`
Arguments
---------
image : ANTsImage
image to bias correct
mask : ANTsImage
input mask, if one is not passed one will be made
shrink_factor : scalar
Shrink factor for multi-resolution correction, typically integer less than 4
convergence : dict w/ keys `iters` and `tol`
iters : vector of maximum number of iterations for each level
tol : the convergence tolerance.
spline_param : float or vector
Parameter controlling number of control points in spline. Either single value, indicating the spacing in each direction, or vector with one entry per dimension of image, indicating the mesh size.
return_bias_field : boolean
Return bias field instead of bias corrected image.
verbose : boolean
enables verbose output.
weight_mask : ANTsImage (optional)
antsImage of weight mask
Returns
-------
ANTsImage
Example
-------
>>> image = ants.image_read( ants.get_ants_data('r16') )
>>> image_n4 = ants.n4_bias_field_correction(image)
"""
if image.pixeltype != "float":
image = image.clone("float")
iters = convergence["iters"]
tol = convergence["tol"]
if mask is None:
mask = get_mask(image)
N4_CONVERGENCE_1 = "[%s, %.10f]" % ("x".join([str(it) for it in iters]), tol)
N4_SHRINK_FACTOR_1 = str(shrink_factor)
if (not isinstance(spline_param, (list, tuple))) or (len(spline_param) == 1):
N4_BSPLINE_PARAMS = "[%i]" % spline_param
elif (isinstance(spline_param, (list, tuple))) and (
len(spline_param) == image.dimension
):
N4_BSPLINE_PARAMS = "[%s]" % ("x".join([str(sp) for sp in spline_param]))
else:
raise ValueError(
"Length of splineParam must either be 1 or dimensionality of image"
)
if weight_mask is not None:
if not isinstance(weight_mask, iio.ANTsImage):
raise ValueError("Weight Image must be an antsImage")
outimage = image.clone("float")
outbiasfield = image.clone("float")
i = utils.get_pointer_string(outimage)
b = utils.get_pointer_string(outbiasfield)
output = "[%s,%s]" % (i, b)
kwargs = {
"d": outimage.dimension,
"i": image,
"w": weight_mask,
"s": N4_SHRINK_FACTOR_1,
"c": N4_CONVERGENCE_1,
"b": N4_BSPLINE_PARAMS,
"x": mask,
"o": output,
"v": int(verbose),
}
processed_args = pargs._int_antsProcessArguments(kwargs)
libfn = utils.get_lib_fn("N4BiasFieldCorrection")
libfn(processed_args)
if return_bias_field == True:
return outbiasfield
else:
return outimage
def abp_n4(image, intensity_truncation=(0.025, 0.975, 256), mask=None, usen3=False):
"""
Truncate outlier intensities and bias correct with the N4 algorithm.
ANTsR function: `abpN4`
Arguments
---------
image : ANTsImage
image to correct and truncate
intensity_truncation : 3-tuple
quantiles for intensity truncation
mask : ANTsImage (optional)
mask for bias correction
usen3 : boolean
if True, use N3 bias correction instead of N4
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> image2 = ants.abp_n4(image)
"""
if (not isinstance(intensity_truncation, (list, tuple))) or (
len(intensity_truncation) != 3
):
raise ValueError("intensity_truncation must be list/tuple with 3 values")
outimage = iMath(
image,
"TruncateIntensity",
intensity_truncation[0],
intensity_truncation[1],
intensity_truncation[2],
)
if usen3 == True:
outimage = n3_bias_field_correction(outimage, 4)
outimage = n3_bias_field_correction(outimage, 2)
return outimage
else:
outimage = n4_bias_field_correction(outimage, mask)
return outimage
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/utils/bias_correction.py",
"copies": "1",
"size": "8509",
"license": "apache-2.0",
"hash": -2783060727474430500,
"line_mean": 27.6498316498,
"line_max": 203,
"alpha_frac": 0.6198143143,
"autogenerated": false,
"ratio": 3.567714884696017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4687529198996017,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'AbortHandshake', 'ConnectionClosed', 'DuplicateParameter',
'InvalidHandshake', 'InvalidHeader', 'InvalidHeaderFormat',
'InvalidHeaderValue', 'InvalidMessage', 'InvalidOrigin',
'InvalidParameterName', 'InvalidParameterValue', 'InvalidState',
'InvalidStatusCode', 'InvalidUpgrade', 'InvalidURI', 'NegotiationError',
'PayloadTooBig', 'WebSocketProtocolError',
]
class InvalidHandshake(Exception):
"""
Exception raised when a handshake request or response is invalid.
"""
class AbortHandshake(InvalidHandshake):
"""
Exception raised to abort a handshake and return a HTTP response.
"""
def __init__(self, status, headers, body=b''):
self.status = status
self.headers = headers
self.body = body
message = "HTTP {}, {} headers, {} bytes".format(
status, len(headers), len(body))
super().__init__(message)
class InvalidMessage(InvalidHandshake):
"""
Exception raised when the HTTP message in a handshake request is malformed.
"""
class InvalidHeader(InvalidHandshake):
"""
Exception raised when a HTTP header doesn't have a valid format or value.
"""
def __init__(self, name, value):
if value:
message = "Invalid {} header: {}".format(name, value)
else:
message = "Missing or empty {} header".format(name)
super().__init__(message)
class InvalidHeaderFormat(InvalidHeader):
"""
Exception raised when a Sec-WebSocket-* HTTP header cannot be parsed.
"""
def __init__(self, name, error, string, pos):
error = "{} at {} in {}".format(error, pos, string)
super().__init__(name, error)
class InvalidHeaderValue(InvalidHeader):
"""
Exception raised when a Sec-WebSocket-* HTTP header has a wrong value.
"""
class InvalidUpgrade(InvalidHeader):
"""
Exception raised when a Upgrade or Connection header isn't correct.
"""
class InvalidOrigin(InvalidHeader):
"""
Exception raised when the Origin header in a request isn't allowed.
"""
def __init__(self, origin):
super().__init__('Origin', origin)
class InvalidStatusCode(InvalidHandshake):
"""
Exception raised when a handshake response status code is invalid.
Provides the integer status code in its ``status_code`` attribute.
"""
def __init__(self, status_code):
self.status_code = status_code
message = "Status code not 101: {}".format(status_code)
super().__init__(message)
class NegotiationError(InvalidHandshake):
"""
Exception raised when negociating an extension fails.
"""
class InvalidParameterName(NegotiationError):
"""
Exception raised when a parameter name in an extension header is invalid.
"""
def __init__(self, name):
self.name = name
message = "Invalid parameter name: {}".format(name)
super().__init__(message)
class InvalidParameterValue(NegotiationError):
"""
Exception raised when a parameter value in an extension header is invalid.
"""
def __init__(self, name, value):
self.name = name
self.value = value
message = "Invalid value for parameter {}: {}".format(name, value)
super().__init__(message)
class DuplicateParameter(NegotiationError):
"""
Exception raised when a parameter name is repeated in an extension header.
"""
def __init__(self, name):
self.name = name
message = "Duplicate parameter: {}".format(name)
super().__init__(message)
class InvalidState(Exception):
"""
Exception raised when an operation is forbidden in the current state.
"""
CLOSE_CODES = {
1000: "OK",
1001: "going away",
1002: "protocol error",
1003: "unsupported type",
# 1004 is reserved
1005: "no status code [internal]",
1006: "connection closed abnormally [internal]",
1007: "invalid data",
1008: "policy violation",
1009: "message too big",
1010: "extension required",
1011: "unexpected error",
1015: "TLS failure [internal]",
}
class ConnectionClosed(InvalidState):
"""
Exception raised when trying to read or write on a closed connection.
Provides the connection close code and reason in its ``code`` and
``reason`` attributes respectively.
"""
def __init__(self, code, reason):
self.code = code
self.reason = reason
message = "WebSocket connection is closed: "
if 3000 <= code < 4000:
explanation = "registered"
elif 4000 <= code < 5000:
explanation = "private use"
else:
explanation = CLOSE_CODES.get(code, "unknown")
message += "code = {} ({}), ".format(code, explanation)
if reason:
message += "reason = {}".format(reason)
else:
message += "no reason"
super().__init__(message)
class InvalidURI(Exception):
"""
Exception raised when an URI isn't a valid websocket URI.
"""
class PayloadTooBig(Exception):
"""
Exception raised when a frame's payload exceeds the maximum size.
"""
class WebSocketProtocolError(Exception):
"""
Internal exception raised when the remote side breaks the protocol.
"""
| {
"repo_name": "gnmiller/craig-bot",
"path": "craig-bot/lib/python3.6/site-packages/websockets/exceptions.py",
"copies": "1",
"size": "5325",
"license": "mit",
"hash": 6457262638596644000,
"line_mean": 24.7246376812,
"line_max": 79,
"alpha_frac": 0.6264788732,
"autogenerated": false,
"ratio": 4.430116472545757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5556595345745757,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'AddCellConnToPoints',
'PointsToTube',
'LonLatToUTM',
'RotatePoints',
'ExtractPoints',
'RotationTool',
'ExtractCellCenters',
'AppendCellCenters',
'IterateOverPoints',
'ConvertUnits',
'BuildSurfaceFromPoints',
]
__displayname__ = 'Point/Line Sets'
from datetime import datetime
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import pyvista
from .. import _helpers, interface
from ..base import FilterBase, FilterPreserveTypeBase
# improt CreateTensorMesh for its cell string parsing
from ..model_build import CreateTensorMesh
# NOTE: internal import of pyproj in LonLatToUTM
###############################################################################
# ---- Cell Connectivity ----#
class AddCellConnToPoints(FilterBase):
"""This filter will add linear cell connectivity between scattered points.
You have the option to add ``VTK_Line`` or ``VTK_PolyLine`` connectivity.
``VTK_Line`` connectivity makes a straight line between the points in order
(either in the order by index or using a nearest neighbor calculation).
The ``VTK_PolyLine`` adds a poly line connectivity between all points as
one spline (either in the order by index or using a nearest neighbor
calculation). Type map is specified in `vtkCellType.h`.
**Cell Connectivity Types:**
- 4: Poly Line
- 3: Line
"""
__displayname__ = 'Add Cell Connectivity to Points'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkPolyData',
nOutputPorts=1,
outputType='vtkPolyData',
)
# Parameters
self.__cell_type = kwargs.get('cell_type', vtk.VTK_POLY_LINE)
self.__usenbr = kwargs.get('nearest_nbr', False)
self.__close_loop = kwargs.get('close_loop', False)
self.__keep_vertices = kwargs.get('keep_vertices', False)
self.__unique = kwargs.get('unique', False)
def _connect_cells(self, pdi, pdo, log_time=False):
"""Internal helper to perfrom the connection"""
# NOTE: Type map is specified in vtkCellType.h
cell_type = self.__cell_type
if log_time:
start_time = datetime.now()
# Get the Points over the NumPy interface
pdi = pyvista.wrap(pdi)
points = np.copy(
pdi.points
) # New NumPy array of poins so we dont destroy input
if self.__unique:
# Remove repeated points
indexes = np.unique(points, return_index=True, axis=0)[1]
points = np.array(points[sorted(indexes)])
def _find_min_path(points):
try:
# sklearn's KDTree is faster: use it if available
from sklearn.neighbors import KDTree as Tree
except ImportError:
from scipy.spatial import cKDTree as Tree
_compute_dist = lambda pt0, pt1: np.linalg.norm(pt0 - pt1)
ind, min_dist = None, np.inf
tree = Tree(points)
for pt in points:
cur_ind = tree.query([pt], k=len(points))[1].ravel()
dist = 0.0
for i in range(len(cur_ind) - 1):
dist += _compute_dist(points[cur_ind[i]], points[cur_ind[i + 1]])
if dist < min_dist:
ind = cur_ind
min_dist = dist
return ind.ravel()
if self.__usenbr:
ind = _find_min_path(points)
else:
ind = np.arange(len(points), dtype=int)
if self.__keep_vertices:
poly = pyvista.PolyData(np.copy(points))
else:
poly = pyvista.PolyData()
poly.points = np.copy(points)
if cell_type == vtk.VTK_LINE:
lines = np.c_[np.full(len(ind) - 1, 2), ind[0:-1], ind[1:]]
if self.__close_loop:
app = np.append(
lines,
[
[2, ind[-1], ind[0]],
],
axis=0,
)
lines = app
poly.lines = lines
elif cell_type == vtk.VTK_POLY_LINE:
cells = vtk.vtkCellArray()
cell = vtk.vtkPolyLine()
if self.__close_loop:
cell.GetPointIds().SetNumberOfIds(len(ind) + 1)
else:
cell.GetPointIds().SetNumberOfIds(len(ind))
for i in ind:
cell.GetPointIds().SetId(i, ind[i])
if self.__close_loop:
cell.GetPointIds().SetId(i + 1, ind[0])
cells.InsertNextCell(cell)
poly.SetLines(cells)
else:
raise _helpers.PVGeoError('Cell type ({}) not supported'.format(cell_type))
for key, val in pdi.point_arrays.items():
poly.point_arrays[key] = val
pdo.DeepCopy(poly)
if log_time:
print("exectuted in {}".format(datetime.now() - start_time))
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output data object"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._connect_cells(pdi, pdo)
return 1
#### Seters and Geters ####
def set_cell_type(self, cell_type):
"""Set the cell typ by the integer id as specified in `vtkCellType.h`"""
if cell_type != self.__cell_type:
self.__cell_type = cell_type
self.Modified()
def set_use_nearest_nbr(self, flag):
"""Set a flag on whether to a KDTree nearest neighbor
algorithm to sort the points to before adding linear connectivity.
"""
if flag != self.__usenbr:
self.__usenbr = flag
self.Modified()
def set_use_unique_points(self, flag):
"""Set a flag on whether to only use unique points"""
if flag != self.__unique:
self.__unique = flag
self.Modified()
###############################################################################
class PointsToTube(AddCellConnToPoints):
"""Takes points from a vtkPolyData object and constructs a line of those
points then builds a polygonal tube around that line with some specified
radius and number of sides.
"""
__displayname__ = 'Points to Tube'
__category__ = 'filter'
def __init__(self, num_sides=20, radius=10.0, capping=False, **kwargs):
AddCellConnToPoints.__init__(self, **kwargs)
# Additional Parameters
# NOTE: CellType should remain vtk.VTK_POLY_LINE (4) connection
self.__numSides = num_sides
self.__radius = radius
self.__capping = capping
def _connect_cells(self, pdi, pdo, log_time=False):
"""This uses the parent's ``_connect_cells()`` to build a tub around"""
AddCellConnToPoints._connect_cells(self, pdi, pdo, log_time=log_time)
tube = vtk.vtkTubeFilter()
tube.SetInputData(pdo)
# User Defined Parameters
tube.SetCapping(self.__capping)
tube.SetRadius(self.__radius)
tube.SetNumberOfSides(self.__numSides)
# apply the filter
tube.Update()
pdo.ShallowCopy(tube.GetOutput())
return pdo
#### Seters and Geters ####
def set_radius(self, radius):
"""Set the radius of the tube"""
if self.__radius != radius:
self.__radius = radius
self.Modified()
def set_number_of_sides(self, num):
"""Set the number of sides (resolution) for the tube"""
if self.__numSides != num:
self.__numSides = num
self.Modified()
def set_capping(self, flag):
"""Set a boolean flag on whether or not to cap the ends of the tube"""
if self.__capping != flag:
self.__capping = flag
self.Modified()
###############################################################################
# ---- LonLat to Cartesian ----#
class LonLatToUTM(FilterPreserveTypeBase):
"""Converts Points from Lon Lat to UTM"""
__displayname__ = 'Lat Lon To UTM'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterPreserveTypeBase.__init__(self, inputType='vtkDataSet', **kwargs)
self.__zone = (11,)
self.__ellps = 'WGS84'
self.set_zone(kwargs.get('zone', 11)) # User defined
self.set_ellps(kwargs.get('ellps', 'WGS84')) # User defined
@staticmethod
def get_available_ellps(idx=None):
"""Returns the available ellps"""
import pyproj
ellps = pyproj.pj_ellps.keys()
# Now migrate WGSXX to front so that 84 is always default
wgs = ['WGS60', 'WGS66', 'WGS72', 'WGS84']
for i, name in enumerate(wgs):
oldindex = ellps.index(name)
ellps.insert(0, ellps.pop(oldindex))
if idx is not None:
return ellps[idx]
return ellps
def __convert_2d(self, lon, lat, elev):
"""Converts 2D Lon Lat coords to 2D XY UTM points"""
import pyproj
p = pyproj.Proj(proj='utm', zone=self.__zone, ellps=self.__ellps)
utm_x, utm_y = p(lon, lat)
return np.c_[utm_x, utm_y, elev]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = pyvista.wrap(self.GetInputData(inInfo, 0, 0))
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
if not hasattr(pdi, 'points'):
raise _helpers.PVGeoError(
'Input data object does not have points to convert.'
)
coords = pdi.points.copy() # New NumPy array of poins so we dont destroy input
# Now Conver the points
points = self.__convert_2d(coords[:, 0], coords[:, 1], coords[:, 2])
output = pdi.copy()
output.points = points
pdo.DeepCopy(output)
return 1
def set_zone(self, zone):
"""Set the UTM zone number"""
if zone < 1 or zone > 60:
raise _helpers.PVGeoError('Zone (%d) is invalid.' % zone)
if self.__zone != zone:
self.__zone = int(zone)
self.Modified()
def set_ellps(self, ellps):
"""Set the ellipsoid type"""
if isinstance(ellps, int):
ellps = self.get_available_ellps(idx=ellps)
if not isinstance(ellps, str):
raise _helpers.PVGeoError('Ellps must be a string.')
if self.__ellps != ellps:
self.__ellps = ellps
self.Modified()
###############################################################################
class RotationTool(object):
"""A class that holds a set of methods/tools for performing and estimating
coordinate rotations.
"""
__displayname__ = 'Rotation Tool'
__category__ = 'filter'
def __init__(self, decimals=6):
# Parameters
self.RESOLUTION = np.pi / 3200.0
self.DECIMALS = decimals
@staticmethod
def _get_rotation_matrix(theta):
"""Internal helper to generate a rotation matrix given a rotation angle"""
xx = np.cos(theta)
xy = -np.sin(theta)
yx = np.sin(theta)
yy = np.cos(theta)
if not isinstance(theta, np.ndarray):
return np.array([[xx, xy], [yx, yy]])
# Otherwise populate arrat manually
mat = np.zeros((len(theta), 2, 2))
mat[:, 0, 0] = xx
mat[:, 0, 1] = xy
mat[:, 1, 0] = yx
mat[:, 1, 1] = yy
return mat
@staticmethod
def rotate_around(pts, theta, origin):
"""Rotate points around an origins given an anlge on the XY plane"""
xarr, yarr = pts[:, 0], pts[:, 1]
ox, oy = origin[0], origin[1]
qx = ox + np.cos(theta) * (xarr - ox) - np.sin(theta) * (yarr - oy)
qy = oy + np.sin(theta) * (xarr - ox) + np.cos(theta) * (yarr - oy)
return np.vstack((qx, qy)).T
@staticmethod
def rotate(pts, theta):
"""Rotate points around (0,0,0) given an anlge on the XY plane"""
rot = RotationTool._get_rotation_matrix(theta)
rotated = pts.dot(rot)
if not isinstance(theta, np.ndarray):
return rotated
return np.swapaxes(rotated, 0, 1)
@staticmethod
def distance_between(pts):
"""Gets the distance between two points"""
if pts.ndim < 3:
return np.sqrt((pts[0, 0] - pts[1, 0]) ** 2 + (pts[0, 1] - pts[1, 1]) ** 2)
return np.sqrt(
(pts[:, 0, 0] - pts[:, 1, 0]) ** 2 + (pts[:, 0, 1] - pts[:, 1, 1]) ** 2
)
@staticmethod
def cos_between(pts):
"""Gets the cosine between two points"""
if pts.ndim < 3:
xdiff = abs(pts[0, 0] - pts[1, 0])
dist = RotationTool.distance_between(pts)
return np.arccos(xdiff / dist)
# Otherwise we have a set of points
xdiff = abs(pts[:, 0, 0] - pts[:, 1, 0])
dist = RotationTool.distance_between(pts)
return np.arccos(xdiff / dist)
@staticmethod
def sin_between(pts):
"""Calculate the sin angle between two points"""
ydiff = abs(pts[0, 1] - pts[1, 1])
dist = RotationTool.distance_between(pts)
return np.arcsin(ydiff / dist)
@staticmethod
def rotation_matrix(vector_orig, vector_fin):
"""Calculate the rotation matrix required to rotate from one vector to another.
For the rotation of one vector to another, there are an infinit series of rotation matrices
possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry
plane between the two vectors. Hence the axis-angle convention will be used to construct the
matrix with the rotation axis defined as the cross product of the two vectors. The rotation
angle is the arccosine of the dot product of the two unit vectors.
Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,
the rotation matrix R is::
| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |
R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |
| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |
Args:
vector_orig (umpy array, len 3): The unrotated vector defined in the reference frame.
vector_fin (numpy array, len 3): The rotated vector defined in the reference frame.
Note:
This code was adopted from `printipi`_ under the MIT license.
.. _printipi: https://github.com/Wallacoloo/printipi/blob/master/util/rotation_matrix.py
"""
from math import acos, cos, sin
from numpy import cross, dot
from numpy.linalg import norm
R = np.zeros((3, 3))
# Convert the vectors to unit vectors.
vector_orig = vector_orig / norm(vector_orig)
vector_fin = vector_fin / norm(vector_fin)
# The rotation axis (normalised).
axis = cross(vector_orig, vector_fin)
axis_len = norm(axis)
if axis_len != 0.0:
axis = axis / axis_len
# Alias the axis coordinates.
x = axis[0]
y = axis[1]
z = axis[2]
# The rotation angle.
angle = acos(dot(vector_orig, vector_fin))
# Trig functions (only need to do this maths once!).
ca = cos(angle)
sa = sin(angle)
# Calculate the rotation matrix elements.
R[0, 0] = 1.0 + (1.0 - ca) * (x ** 2 - 1.0)
R[0, 1] = -z * sa + (1.0 - ca) * x * y
R[0, 2] = y * sa + (1.0 - ca) * x * z
R[1, 0] = z * sa + (1.0 - ca) * x * y
R[1, 1] = 1.0 + (1.0 - ca) * (y ** 2 - 1.0)
R[1, 2] = -x * sa + (1.0 - ca) * y * z
R[2, 0] = -y * sa + (1.0 - ca) * x * z
R[2, 1] = x * sa + (1.0 - ca) * y * z
R[2, 2] = 1.0 + (1.0 - ca) * (z ** 2 - 1.0)
return R
# def _converge_angle2(self, pt1, pt2):
# """internal use only: pts should only be a two neighboring points"""
# # Make the theta range up to 90 degrees to rotate points through
# #- angles = [0.0, 90.0)
# angles = np.arange(0.0, np.pi/2, self.RESOLUTION)
# pts = self.rotate(np.vstack((pt1, pt2)), angles)
# # Get the angles between the points
# c = self.cos_between(pts)
# dist = self.distance_between(pts)
#
# # Find angles that satisfy grid conditions
# xidx = np.argwhere(abs(c - np.pi/2.0) < (1 * 10**-self.DECIMALS))
# yidx = np.argwhere(abs(c - 0.0) < (1 * 10**-self.DECIMALS))
# if len(xidx) == 1 and len(yidx) == 0:
# return 0, np.pi/2-angles[xidx], dist[xidx]
# #return angles[xidx[0][0]]
# elif len(yidx) == 1 and len(xidx) == 0:
# return 1, np.pi/2-angles[yidx], dist[yidx]
# else:
# raise _helpers.PVGeoError('No angle found')
def _converge_angle(self, pt1, pt2):
"""Internal use only: pts should only be a two neighboring points."""
# Make the theta range up to 90 degrees to rotate points through
# - angles = [0.0, 90.0)
angles = np.arange(0.0, np.pi / 2, self.RESOLUTION)
# nang = len(angles) # Number of rotations
# if pt1.ndim == pt2.ndim == 3:
# # uh-oh
# raise RuntimeError()
pts = np.vstack((pt1, pt2))
rotated = self.rotate(pts, angles) # Points rotated for all angles
cosbtw = self.cos_between(rotated)
distbtw = self.distance_between(rotated)
# Now find minimum
# X axis
xmin = np.argwhere(
np.abs(cosbtw - np.pi / 2.0) < (1 * 10 ** -self.DECIMALS)
).flatten()
ymin = np.argwhere(np.abs(cosbtw - 0.0) < (1 * 10 ** -self.DECIMALS)).flatten()
# Protection to make sure we can converge
if len(xmin) == 0 and len(ymin) == 0:
# Uh-oh... lets decrease the precision
# - lets try again with lower precision
self.DECIMALS -= 1
if self.DECIMALS < 0:
self.DECIMALS = 0
raise _helpers.PVGeoError('No angle found.')
return self._converge_angle(pt1, pt2)
# Figure out of the two points share the x axis or y axis and return
if len(xmin) > 0 and len(ymin) > 0:
raise RuntimeError('Invalid solution')
elif len(xmin) > 0:
xidx = np.mean(xmin, dtype=int)
return 0, angles[xidx], distbtw[xidx]
elif len(ymin) > 0:
yidx = np.mean(ymin, dtype=int)
return 1, angles[yidx], distbtw[yidx]
# No solution found.
raise _helpers.PVGeoError('No angle found. Precision too low/high.')
def _estimate_angle_and_spacing(self, pts, sample=0.5):
"""internal use only"""
try:
# sklearn's KDTree is faster: use it if available
from sklearn.neighbors import KDTree as Tree
except ImportError:
from scipy.spatial import cKDTree as Tree
# Creat the indexing range for searching the points:
num = len(pts)
rng = np.linspace(0, num - 1, num=num, dtype=int)
N = int(num * sample) + 1
rng = np.random.choice(rng, N)
angles = np.zeros(len(rng))
tree = Tree(pts)
distances = [[], []]
#######################################################################
#######################################################################
# Find nearest point
distall, ptsiall = tree.query(pts, k=2)
pt1all, pt2all = pts[ptsiall[:, 0]], pts[ptsiall[:, 1]]
#######################################################################
idx = 0
for i in rng:
# OPTIMIZE
ax, angles[idx], dist = self._converge_angle(pt1all[i], pt2all[i])
distances[ax].append(dist)
idx += 1
#######################################################################
# TODO??? angles, distances = self._converge_angle(pt1all, pt2all)
#######################################################################
#######################################################################
dx, dy = distances[0], distances[1]
if len(dx) == 0:
dx = dy
elif len(dy) == 0:
dy = dx
TOLERANCE = np.min(np.append(dx, dy)) / 2.0
angle = np.average(np.unique(angles))
dx = np.unique(np.around(dx / TOLERANCE)) * TOLERANCE
dy = np.unique(np.around(dy / TOLERANCE)) * TOLERANCE
# Now round to decimals
dx = np.around(dx, self.DECIMALS)
dy = np.around(dx, self.DECIMALS)
# print('Recovered: ', dx, dy)
return angle, dx[0], dy[0]
def estimate_and_rotate(self, x, y, z):
"""A method to estimate the rotation of a set of points and correct
that rotation on the XY plane
"""
if not (len(x) == len(y) == len(z)):
raise AssertionError(
'Must have same number of coordinates for all components.'
)
idxs = np.argwhere(z == z[0])
pts = np.hstack((x[idxs], y[idxs]))
angle, dx, dy = self._estimate_angle_and_spacing(pts)
inv = self.rotate(np.vstack((x, y)).T, angle)
return inv[:, 0], inv[:, 1], z, dx, dy, angle
# ---- Coordinate Rotations ----#
class RotatePoints(FilterBase):
"""Rotates XYZ coordinates in `vtkPolyData` around an origin at a given
angle on the XY plane.
"""
__displayname__ = 'Rotate Points'
__category__ = 'filter'
def __init__(self, angle=45.0, origin=None, use_corner=True):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkPolyData',
nOutputPorts=1,
outputType='vtkPolyData',
)
# Parameters
self.__angle = angle
if origin is None:
origin = [0.0, 0.0]
self.__origin = origin
self.__use_corner = use_corner
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output."""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
# Get the Points over the NumPy interface
wpdi = dsa.WrapDataObject(pdi) # NumPy wrapped input
points = np.array(
wpdi.Points
) # New NumPy array of poins so we dont destroy input
origin = self.__origin
if self.__use_corner:
idx = np.argmin(points[:, 0])
origin = [points[idx, 0], points[idx, 1]]
points[:, 0:2] = RotationTool.rotate_around(
points[:, 0:2], self.__angle, origin
)
pdo.DeepCopy(pdi)
pts = pdo.GetPoints()
for i, pt in enumerate(points):
pts.SetPoint(i, pt)
return 1
def set_rotation_degrees(self, theta):
"""Sets the rotational angle in degrees."""
theta = np.deg2rad(theta)
if self.__angle != theta:
self.__angle = theta
self.Modified()
def set_origin(self, xo, yo):
"""Sets the origin to perform the rotate around."""
if self.__origin != [xo, yo]:
self.__origin = [xo, yo]
self.Modified()
def set_use_corner(self, flag):
"""A flag to use a corner of the input data set as the rotational
origin.
"""
if self.__use_corner != flag:
self.__use_corner = flag
self.Modified()
###############################################################################
class ExtractPoints(FilterBase):
"""Extracts XYZ coordinates and point/cell data from an input ``vtkDataSet``"""
__displayname__ = 'Extract Points'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType='vtkPolyData',
)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
# Get the Points over the NumPy interface
wpdi = dsa.WrapDataObject(pdi) # NumPy wrapped input
if not hasattr(wpdi, 'Points'):
raise _helpers.PVGeoError('Input data object does not have XYZ points.')
points = np.array(
wpdi.Points
) # New NumPy array of poins so we dont destroy input
# Now transfer data
f = vtk.vtkCellDataToPointData()
f.SetInputData(pdi)
f.Update()
d = f.GetOutput()
pdo.ShallowCopy(interface.points_to_poly_data(points))
_helpers.copy_arrays_to_point_data(d, pdo, 0) # 0 is point data
return 1
class ExtractCellCenters(FilterBase):
__displayname__ = 'Extract Cell Centers'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType='vtkPolyData',
**kwargs
)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Find cell centers
filt = vtk.vtkCellCenters()
filt.SetInputDataObject(pdi)
filt.Update()
centers = dsa.WrapDataObject(filt.GetOutput()).Points
# Get CellData
wpdi = dsa.WrapDataObject(pdi)
celldata = wpdi.CellData
keys = celldata.keys()
# Make poly data of Cell centers:
pdo.DeepCopy(interface.points_to_poly_data(centers))
for i, name in enumerate(keys):
pdo.GetPointData().AddArray(pdi.GetCellData().GetArray(name))
return 1
class AppendCellCenters(FilterPreserveTypeBase):
__displayname__ = 'Append Cell Centers'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterPreserveTypeBase.__init__(self, **kwargs)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Find cell centers
filt = vtk.vtkCellCenters()
filt.SetInputDataObject(pdi)
filt.Update()
# I use the dataset adapter/numpy interface because its easy
centers = dsa.WrapDataObject(filt.GetOutput()).Points
centers = interface.convert_array(centers)
centers.SetName('Cell Centers')
# Copy input data and add cell centers as tuple array
pdo.DeepCopy(pdi)
pdo.GetCellData().AddArray(centers)
return 1
class IterateOverPoints(FilterBase):
"""Iterate over points in a time varying manner."""
__displayname__ = 'Iterate Over Points'
__category__ = 'filter'
def __init__(self, dt=1.0):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkPolyData',
nOutputPorts=1,
outputType='vtkPolyData',
)
# Parameters
self.__dt = dt
self.__timesteps = None
self.__original = 2
self.__tindex = None
self.__n = 2
self.__decimate = 100
# The point/normal that gets updated on every iteration
self.__point = (0.0, 0.0, 0.0)
self.__normal = (1.0, 0.0, 0.0)
def _update_time_steps(self):
"""For internal use only"""
self.__timesteps = _helpers.update_time_steps(self, self.__n, self.__dt)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get number of points
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
# Get the Points over the NumPy interface
# wpdi = dsa.WrapDataObject(pdi) # NumPy wrapped input
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
# Now grab point at this timestep
pt = pdi.GetPoints().GetPoint(self.__tindex[i])
# Calculate normal
pts1 = self.__point
pts2 = pt
x1, y1, z1 = pts1[0], pts1[1], pts1[2]
x2, y2, z2 = pts2[0], pts2[1], pts2[2]
normal = [x2 - x1, y2 - y1, z2 - z1]
self.__point = pt
self.__normal = normal
poly = interface.points_to_poly_data(np.array(pt))
pdo.ShallowCopy(poly)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set the time information"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get number of points
self.__original = pdi.GetNumberOfPoints()
self.set_decimate(self.__decimate)
# register time:
self._update_time_steps()
return 1
#### Public Getters / Setters ####
def set_decimate(self, percent):
"""Set the percent (1 to 100) to decimate"""
if percent > 100 or percent < 1:
return
self.__decimate = percent
self.__n = int(self.__original * (percent / 100.0))
self.__tindex = np.linspace(0, self.__original - 1, self.__n, dtype=int)
self._update_time_steps()
self.Modified()
def set_time_delta(self, dt):
"""
Set the time step interval in seconds
"""
if self.__dt != dt:
self.__dt = dt
self._update_time_steps()
self.Modified()
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps"""
return self.__timesteps.tolist() if self.__timesteps is not None else None
def get_point(self):
"""Get the current point"""
return list(self.__point)
def get_normal(self):
"""Get the current normal vector"""
return list(self.__normal)
class ConvertUnits(FilterPreserveTypeBase):
"""Convert points in an input data object to from meters to feet or vice versa.
This simply uses a ``vtkTransformFilter`` and scales input data object with
common conversions.
"""
__displayname__ = 'Convert XYZ Units'
__category__ = 'filter'
def __init__(self, conversion='meter_to_feet', **kwargs):
FilterPreserveTypeBase.__init__(self, **kwargs)
self.__conversion = conversion
@staticmethod
def lookup_conversions(get_keys=False):
"""All Available conversions
Return:
dict: dictionary of conversion units
"""
convs = dict(
meter_to_feet=3.2808399,
feet_to_meter=1 / 3.2808399,
)
if get_keys:
return convs.keys()
return convs
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get number of points
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
filt = vtk.vtkTransformFilter()
trans = vtk.vtkTransform()
trans.Scale(self.get_conversion(), self.get_conversion(), self.get_conversion())
filt.SetTransform(trans)
filt.SetInputDataObject(pdi)
filt.Update()
scaled = filt.GetOutputDataObject(0)
pdo.DeepCopy(scaled)
return 1
def set_conversion(self, key):
"""Set the conversion via a lookup table"""
convs = self.lookup_conversions()
if isinstance(key, str):
if key.lower() not in convs.keys():
raise _helpers.PVGeoError('Converion `%s` not available.' % key)
elif isinstance(key, int):
key = convs.keys()[key]
if self.__conversion != key:
self.__conversion = key
self.Modified()
return 1
def get_conversion(self):
"""Get the conversion value"""
convs = self.lookup_conversions()
return convs[self.__conversion]
class BuildSurfaceFromPoints(FilterBase):
"""From the sorted x, y, and z station locations in the input PolyData,
create a surface to project down from the line of those points. Use the
Z cells to control the size of the mesh surface
"""
__displayname__ = 'Build Surface From Points'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(
self, inputType='vtkPolyData', outputType='vtkStructuredGrid', **kwargs
)
self.__zcoords = CreateTensorMesh._read_cell_line('0. 50.')
zcoords = kwargs.get('zcoords', self.__zcoords)
if not isinstance(zcoords, (str, list, tuple, np.ndarray)):
raise TypeError('zcoords of bad type.')
if isinstance(zcoords, str):
self.set_z_coords_str(zcoords)
else:
self.set_z_coords(zcoords)
@staticmethod
def create_surface(points, z_range):
"""From the sorted x, y, and z station locations, create a surface
to display a seismic recording/migration on in space. The result is
defined in the X,Y,Z-z_range 3D space.
The z_range should be treated as relative coordinates to the values
given on the third column of the points array. If you want the values
in the z_range to be treated as the absolute coordinates, simply
do not pass any Z values in the points array - if points is N by 2,
then the values in z_range will be inferred as absolute.
Args:
points (np.ndarray): array-like of the station x and y locations
(npts by 2-3) z_range (np.ndarray): The linear space of the z
dimension. This will be filled out for every station location.
Return:
pyvista.UnstructuredGrid
"""
if hasattr(points, 'values'):
# This will extract data from pandas dataframes if those are given
points = points.values
points = np.array(points)
z_range = np.array(z_range)
xloc = points[:, 0]
yloc = points[:, 1]
if points.shape[1] > 2:
zloc = points[:, 2]
else:
val = np.nanmax(z_range)
z_range = val - np.flip(z_range)
zloc = np.full(xloc.shape, val)
if not len(xloc) == len(yloc) == len(zloc):
raise AssertionError('Coordinate shapes do not match.')
nt = len(xloc)
ns = len(z_range)
# Extrapolate points to a 2D surface
# repeat the XY locations across
points = np.repeat(np.c_[xloc, yloc, zloc], ns, axis=0)
# repeat the Z locations across
tp = np.repeat(z_range.reshape((-1, len(z_range))), nt, axis=0)
tp = zloc[:, None] - tp
points[:, -1] = tp.ravel()
# Produce the output
output = pyvista.StructuredGrid()
output.points = points
output.dimensions = [ns, nt, 1]
return output
def RequestData(self, request, inInfo, outInfo):
"""Execute on pipeline"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get number of points
pdo = self.GetOutputData(outInfo, 0)
#### Perfrom task ####
data = pyvista.wrap(pdi)
output = BuildSurfaceFromPoints.create_surface(
data.points, np.array(self.__zcoords)
)
pdo.DeepCopy(output)
return 1
def set_z_coords(self, zcoords):
"""Set the spacings for the cells in the Z direction
Args:
zcoords (list or np.array(floats)): the spacings along the Z-axis"""
if len(zcoords) != len(self.__zcoords) or not np.allclose(
self.__zcoords, zcoords
):
self.__zcoords = zcoords
self.Modified()
def set_z_coords_str(self, zcoordstr):
"""Set the spacings for the cells in the Z direction
Args:
zcoordstr (str) : the spacings along the Z-axis in the UBC style"""
zcoords = CreateTensorMesh._read_cell_line(zcoordstr)
self.set_z_coords(zcoords)
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/filters/xyz.py",
"copies": "1",
"size": "36851",
"license": "bsd-3-clause",
"hash": -6156719771944934000,
"line_mean": 34.7083333333,
"line_max": 101,
"alpha_frac": 0.5500529158,
"autogenerated": false,
"ratio": 3.76414708886619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9811077534308692,
"avg_score": 0.0006244940714994757,
"num_lines": 1032
} |
__all__ = [
'add_if_not_exists_clause',
'exactly_one',
'executing',
'one_or_none',
'scalar',
]
import contextlib
import sqlalchemy.engine
from sqlalchemy.schema import CreateIndex
from g1.bases.assertions import ASSERT
def add_if_not_exists_clause(index, connectable):
"""Add "IF NOT EXISTS" clause to create index statement.
I don't know why but ``sqlalchemy.Index.create()`` does not take a
``checkfirst`` argument like the rest of others.
"""
stmt = str(CreateIndex(index).compile(connectable))
stmt = stmt.replace('CREATE INDEX', 'CREATE INDEX IF NOT EXISTS', 1)
ASSERT.in_('IF NOT EXISTS', stmt)
return stmt
@contextlib.contextmanager
def executing(connectable, statement):
if isinstance(connectable, sqlalchemy.engine.Connection):
# Do NOT close Connection object that caller passes to us.
ctx = contextlib.nullcontext(connectable)
else:
ctx = connectable.connect()
with ctx as conn:
# ResultProxy does not implement __enter__ and __exit__.
result = conn.execute(statement)
try:
yield result
finally:
result.close()
# I don't know why but SQLAlchemy only make this available in ORM, not
# in core.
def one_or_none(connectable, statement):
with executing(connectable, statement) as result:
row = result.fetchone()
ASSERT.none(result.fetchone())
return row
def exactly_one(connectable, statement):
return ASSERT.not_none(one_or_none(connectable, statement))
def scalar(connectable, statement):
return exactly_one(connectable, statement)[0]
| {
"repo_name": "clchiou/garage",
"path": "py/g1/databases/g1/databases/utils.py",
"copies": "1",
"size": "1638",
"license": "mit",
"hash": 5760225149889764000,
"line_mean": 26.7627118644,
"line_max": 72,
"alpha_frac": 0.6746031746,
"autogenerated": false,
"ratio": 3.9375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51121031746,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"AdsClient",
"AdsConnection",
"AdsDatatype",
"AdsDevice",
"AdsException",
"AdsState",
"amspacket",
"AmsPacket",
"BinaryParser",
"SymbolInfo",
"HexBlock",
"AdsIO",
"ProcImage"
]
from pyads.symbolinfo import *
from pyads.adsdatatype import *
from pyads.adsexception import *
from pyads.adsstate import *
from pyads.amspacket import *
from pyads.binaryparser import *
from pyads.adsconnection import *
from pyads.adsclient import *
from pyads.adsdevice import *
from pyads.adsio import *
from pyads.procimage import *
def HexBlock(data, width = 8):
i, result, currentHexLine, currentChrLine = 0, '', '', ''
for byte in data:
# next line, if required
if (i == width):
result += '%s %s\n' % (currentHexLine, currentChrLine)
currentHexLine = ''
currentChrLine = ''
i = 0
# python2 / python3 - normalize to numeric byte
char = ord(byte) if isinstance(byte, str) else byte
# append to lines
currentHexLine += '%02x ' % char
currentChrLine += '.' if (char < 32 or char > 126) else chr(char)
i += 1
# append last line
result += '%s %s' % (currentHexLine, currentChrLine)
return result | {
"repo_name": "chwiede/pyads",
"path": "pyads/__init__.py",
"copies": "1",
"size": "1278",
"license": "mit",
"hash": 6288976157838024000,
"line_mean": 23.5961538462,
"line_max": 73,
"alpha_frac": 0.607198748,
"autogenerated": false,
"ratio": 3.435483870967742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4542682618967742,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Agent',
'Connector',
]
import re
import sys
import errno
import base64
import socket
import urlparse
try:
import ssl
except ImportError:
# if import _ssl in sll fails, then subsequent
# import ssl statements would succeed
# workaround that, so other modules have
# a chance to detect ssl support
sys.modules.pop('ssl', None)
ssl = None
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from kitsu.http.errors import *
from kitsu.http.headers import *
from kitsu.http.request import *
from kitsu.http.response import *
from kitsu.http.decoders import *
class HTTPClient(object):
def __init__(self, sock, sizelimit=None, bodylimit=None, packetsize=4096):
self.sock = sock
self.data = ''
self.sizelimit = sizelimit
self.bodylimit = bodylimit
self.packetsize = packetsize
def __del__(self):
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
def detach(self):
sock, self.sock = self.sock, None
return sock
def clear(self):
data, self.data = self.data, ''
return data
def __recv(self):
data = self.sock.recv(self.packetsize)
#print "<- %r" % (data,)
return data
def __send(self, data):
#print "-> %r" % (data,)
return self.sock.sendall(data)
def __sendBody(self, body):
if body is None:
return
if isinstance(body, basestring):
if not body:
return
self.__send(body)
return
while True:
# assume it's a file
data = body.read(packetsize)
if not data:
break
self.__send(data)
def makeRequest(self, request):
sizelimit = self.sizelimit
self.__send(request.toString())
self.__sendBody(request.body)
parser = ResponseParser()
if not self.data:
self.data = self.__recv()
while True:
if not self.data:
raise HTTPDataError("not enough data for response")
response = parser.parse(self.data)
if sizelimit is not None:
sizelimit -= len(self.data)
if response:
self.data = parser.clear()
if sizelimit is not None:
sizelimit += len(self.data)
if sizelimit < 0:
raise HTTPLimitError()
assert parser.done
assert len(response) == 1
response = response[0]
break
if sizelimit is not None and sizelimit <= 0:
raise HTTPLimitError()
self.data = self.__recv()
decoder = CompoundDecoder.from_response(request, response)
if not decoder:
# response has no body
response.body = ''
return response
response.body = StringIO()
def process_chunk(chunk):
if isinstance(chunk, Headers):
response.headers.update(chunk, merge=True)
else:
response.body.write(chunk)
if self.bodylimit is not None and response.body.tell() > self.bodylimit:
raise HTTPLimitError()
def process_chunks(chunks):
for chunk in chunks:
process_chunk(chunk)
if not self.data:
self.data = self.__recv()
while True:
if not self.data:
break
process_chunks(decoder.parse(self.data))
if sizelimit is not None:
sizelimit -= len(self.data)
if decoder.done:
break
if sizelimit is not None and sizelimit < 0:
raise HTTPLimitError()
self.data = self.__recv()
process_chunks(decoder.finish())
self.data = decoder.clear()
if sizelimit is not None:
sizelimit += len(self.data)
if sizelimit < 0:
raise HTTPLimitError()
response.body = response.body.getvalue()
return response
class HTTPProxyClient(object):
__slots__ = (
'_HTTPProxyClient__sock',
'_HTTPProxyClient__headers',
'_HTTPProxyClient__peername',
)
def __init__(self, sock, headers=()):
self.__sock = sock
self.__headers = Headers(headers)
self.__peername = None
@property
def __class__(self):
return self.__sock.__class__
def __getattr__(self, name):
return getattr(self.__sock, name)
def __setattr__(self, name, value):
if name in self.__slots__:
return object.__setattr__(self, name, value)
return setattr(self.__sock, name, value)
def __delattr__(self, name):
if name in self.__slots__:
return object.__delattr__(self, name)
return delattr(self.__sock, name)
def __readline(self, limit=65536):
"""Read a line being careful not to read more than needed"""
s = StringIO()
while True:
c = self.__sock.recv(1)
if not c:
break
s.write(c)
if c == '\n':
break
if s.tell() >= limit:
break
return s.getvalue()
def connect(self, address):
if self.__peername is not None:
raise socket.error(errno.EISCONN, 'Socket is already connected')
host, port = address
target = '%s:%s' % (host, port)
request = Request(method='CONNECT', target=target)
# The 'Host' header is not strictly needed,
# it's only added here for consistency
request.headers['Host'] = target
request.headers.update(self.__headers)
self.__sock.sendall(request.toString())
limit = 65536
parser = ResponseParser()
while True:
data = self.__readline(limit)
if not data:
raise HTTPDataError("not enough data for response")
limit -= len(data)
response = parser.parse(data)
if response:
assert len(response) == 1
response = response[0]
assert parser.done
data = parser.clear()
assert not data
break
if limit <= 0:
raise HTTPLimitError("CONNECT: response too big")
if response.code != 200:
raise socket.error(errno.ECONNREFUSED, '%d %s' % (response.code, response.phrase))
self.__peername = (host, port)
def connect_ex(self, *args, **kwargs):
raise NotImplemented
def getpeername(self):
# First make sure wrapped socket is connected
remote = self.__sock.getpeername()
# Emulate connected socket if it is connected
peername = self.__peername
if self.__peername is None:
raise socket.error(errno.ENOTCONN, 'Socket is not connected')
return self.__peername
def create_socket(address=None, timeout=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if timeout is not None:
sock.settimeout(timeout)
if address is not None:
sock.connect(address)
return sock
def wrap_ssl(sock, keyfile=None, certfile=None, **kwargs):
if ssl is None:
return socket.ssl(sock, keyfile, certfile)
# Work around http://bugs.python.org/issue5103 on Python 2.6
sslsock = ssl.wrap_socket(sock, keyfile, certfile, do_handshake_on_connect=False, **kwargs)
# Work around bug in gevent.ssl, timeout in SSLObject is not inherited
sslsock.settimeout(sock.gettimeout())
try:
sslsock.getpeername()
except:
return sslsock # not connected
sslsock.do_handshake()
return sslsock
def _parse_netloc(netloc, default_port=None):
index = netloc.find(':')
if index >= 0:
host, port = netloc[:index], netloc[index+1:]
try:
port = int(port)
except ValueError:
port = default_port
else:
host, port = netloc, default_port
return host, port
def _parse_uri(uri):
if '://' not in uri:
uri = 'http://' + uri
scheme, netloc, path, query, fragment = urlparse.urlsplit(uri)
if not netloc and path.startswith('//'):
# urlsplit ignores netloc for unknown schemes
path = path[2:]
index = path.find('/')
if index >= 0:
netloc, path = path[:index], path[index:]
else:
netloc, path = path, ''
# split username:password if any
index = netloc.find('@')
if index >= 0:
auth, netloc = netloc[:index], netloc[index+1:]
else:
auth = ''
if query:
path = path + '?' + query
return scheme, auth, netloc, path, fragment
def _make_uri(scheme, auth, netloc, path='', fragment=''):
uri = scheme + '://'
if auth and netloc:
uri += auth + '@' + netloc
else:
uri += auth or netloc
if path:
uri += path
if fragment:
uri += '#' + fragment
return uri
class Agent(object):
no_redirect_headers = (
'Transfer-Encoding',
'Content-Length',
'Content-Range',
'Content-Type',
'Authorization',
'Referer',
'Expect',
'Range',
'Host',
)
def __init__(self, proxy=None, headers=(), timeout=30, keepalive=None, sizelimit=None, bodylimit=None, redirectlimit=20):
self.proxy = proxy
self.headers = Headers(headers)
self.timeout = timeout
self.keepalive = keepalive
self.sizelimit = sizelimit
self.bodylimit = bodylimit
self.redirectlimit = redirectlimit
self.__current_address = None
self.__current_client = None
self.create_socket = create_socket
self.wrap_ssl = wrap_ssl
def close(self):
self.__current_address = None
if self.__current_client is not None:
self.__current_client.close()
self.__current_client = None
def __makeRequest(self, url, method='GET', version=(1, 1), headers=(), body=None, referer=None, keyfile=None, certfile=None, ignore_content_length=False):
scheme, auth, netloc, path, fragment = _parse_uri(url)
scheme = scheme.lower()
if scheme not in ('http', 'https'):
raise HTTPError("Unsupported scheme %r: %s" % (scheme, url))
request = Request(method=method, target=path or '/', version=version, headers=self.headers, body=body)
request.headers.update(headers)
if auth and 'Authorization' not in request.headers:
auth = re.sub(r"\s", "", base64.encodestring(auth))
request.headers['Authorization'] = 'Basic %s' % auth
if netloc and 'Host' not in request.headers:
request.headers['Host'] = netloc
if referer and 'Referer' not in request.headers:
request.headers['Referer'] = referer
if ignore_content_length:
if 'Connection' not in request.headers:
request.headers['Connection'] = 'close'
request.ignore_content_length = True
elif self.keepalive is not None and 'Connection' not in request.headers:
request.headers['Connection'] = self.keepalive and 'keep-alive' or 'close'
if self.proxy:
proxytype, proxyauth, proxynetloc, proxypath, proxyfragment = _parse_uri(self.proxy)
proxytype = proxytype.lower()
if proxytype not in ('http', 'https'):
raise HTTPError("Unsupported proxy type %r" % (proxytype,))
proxyheaders = Headers(self.headers)
if proxyauth:
proxyauth = re.sub(r"\s", "", base64.encodestring(proxyauth))
proxyheaders['Proxy-Authorization'] = 'Basic %s' % proxyauth
if 'https' in (scheme, proxytype):
address = ((proxytype, proxynetloc), (scheme, netloc))
else:
request.target = url
request.headers.update(proxyheaders)
address = ((proxytype, proxynetloc),)
else:
address = ((scheme, netloc),)
if self.__current_address != address:
self.close()
if self.__current_client is None:
tscheme, tnetloc = address[0]
sock = self.create_socket(_parse_netloc(tnetloc, tscheme == 'https' and 443 or 80), self.timeout)
if self.proxy and 'https' in (scheme, proxytype):
tscheme, tnetloc = address[1]
sock = HTTPProxyClient(sock, proxyheaders)
sock.connect(_parse_netloc(tnetloc, tscheme == 'https' and 443 or 80))
if scheme == 'https':
sock = self.wrap_ssl(sock, keyfile, certfile)
client = self.__current_client = HTTPClient(sock, sizelimit=self.sizelimit, bodylimit=self.bodylimit)
self.__current_address = address
else:
client = self.__current_client
client.sizelimit = self.sizelimit
client.bodylimit = self.bodylimit
try:
response = client.makeRequest(request)
except:
self.close()
raise
keepalive = response.version >= (1, 1)
connection = response.headers.get('Connection')
if connection:
connection = [value.strip().lower() for value in connection.split(',')]
if 'keep-alive' in connection:
keepalive = True
if 'close' in connection:
keepalive = False
if ignore_content_length:
keepalive = False
if not keepalive or (not self.keepalive and self.keepalive is not None):
self.close()
return response
def makeRequest(self, url, **kwargs):
url = url.strip()
urlchain = []
headers = Headers(kwargs.pop('headers', ()))
redirectlimit = kwargs.pop('redirectlimit', self.redirectlimit)
while True:
response = self.__makeRequest(url, headers=headers, **kwargs)
urlchain.append(url)
if response.code in (301, 302, 303, 307) and redirectlimit > 0:
redirectlimit -= 1
location = response.headers.getlist('Location')
if location:
location = location[0].strip()
if location:
for name in self.no_redirect_headers:
headers.poplist(name, None)
for name in headers.keys():
if name.startswith('If-'):
headers.poplist(name, None)
kwargs['referer'] = url
url = urlparse.urljoin(url, location)
kwargs['method'] = 'GET'
kwargs['body'] = None
continue
break
response.urlchain = urlchain
response.url = url
return response
class Connector(object):
def __init__(self, proxy=None, headers=(), timeout=30):
self.proxy = proxy
self.headers = Headers(headers)
self.timeout = timeout
self.create_socket = create_socket
self.wrap_ssl = wrap_ssl
def connect(self, address, ssl=False, keyfile=None, certfile=None):
if self.proxy:
proxytype, proxyauth, proxynetloc, proxypath, proxyfragment = _parse_uri(self.proxy)
proxytype = proxytype.lower()
if proxytype not in ('http', 'https'):
raise HTTPError("Unsupported proxy type %r" % (proxytype,))
proxyheaders = Headers(self.headers)
if proxyauth:
proxyauth = re.sub(r"\s", "", base64.encodestring(proxyauth))
proxyheaders['Proxy-Authorization'] = 'Basic %s' % proxyauth
sock = self.create_socket(_parse_netloc(proxynetloc, proxytype == 'https' and 443 or 80), self.timeout)
sock = HTTPProxyClient(sock, proxyheaders)
sock.connect(address)
else:
sock = self.create_socket(address, self.timeout)
if ssl:
sock = self.wrap_ssl(sock, keyfile, certfile)
return sock
| {
"repo_name": "snaury/kitsu.http",
"path": "kitsu/http/client.py",
"copies": "1",
"size": "16465",
"license": "mit",
"hash": -9048164586709878000,
"line_mean": 35.1074561404,
"line_max": 158,
"alpha_frac": 0.5551776496,
"autogenerated": false,
"ratio": 4.365058324496289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011286376851181118,
"num_lines": 456
} |
__all__ = ['nagios', 'json']
from check_delivery.timer import Timer
class Output(object):
def __init__(self, *args, **kwargs):
super(Output, self).__init__(*args, **kwargs)
self._status = None
self._message = None
self._details = None
self._step = None
self._expect_res = None
self._timers = Timer()
def __str__(self):
raise NotImplementedError()
@property
def status(self):
"""
An integer, minimum 0.
"""
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def message(self):
"""
A short message explaining the status.
"""
return self._message
@message.setter
def message(self, value):
self._message = value
@property
def details(self):
"""
More details explaining the status. Optional, None by default.
"""
return self._details
@details.setter
def details(self, value):
self._details = value
@property
def timers(self):
return self._timers
@timers.setter
def timers(self, value):
self._timers = value
@property
def step(self):
return self._step
@step.setter
def step(self, value):
self._step = value
@property
def expect_res(self):
return self._expect_res
@expect_res.setter
def expect_res(self, value):
self._expect_res = value | {
"repo_name": "Leryan/check_delivery",
"path": "check_delivery/output/__init__.py",
"copies": "1",
"size": "1523",
"license": "mit",
"hash": -71533911434968330,
"line_mean": 19.0526315789,
"line_max": 70,
"alpha_frac": 0.5528562049,
"autogenerated": false,
"ratio": 4.27808988764045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00041118421052631577,
"num_lines": 76
} |
__all__ = [
'AlertApi',
]
class AlertApi(object):
""" DEPRECATED: Used the methods in `MonitorApi` instead. """
def alert(self, query, name=None, message=None, silenced=False,
notify_no_data=None, timeout_h=None):
"""
Create a new metric alert for the given *query*. If *name* is unset,
the alert will be given a name based on the query. The *message* will
accompany any notifications sent for the alert and can contain the same
'@' notation as events to alert individual users. The *silenced* flag
controls whether or not notifications are sent for alert state changes.
>>> dog_http_api.alert("sum(last_1d):sum:system.net.bytes_rcvd{host:host0} > 100")
"""
body = {
'query': query,
'silenced': silenced,
}
if name:
body['name'] = name
if message:
body['message'] = message
if notify_no_data:
body['notify_no_data'] = notify_no_data
if timeout_h:
body['timeout_h'] = timeout_h
return self.http_request('POST', '/alert', body,
response_formatter=lambda x: x['id'],
)
def update_alert(self, alert_id, query, name=None, message=None, silenced=False,
notify_no_data=None, timeout_h=None, silenced_timeout_ts=None):
"""
Update the metric alert identified by *alert_id* with the given
*query*. If *name* is unset, the alert will be given a name based on
the query. The *message* will accompany any notifications sent for the
alert and can contain the same '@' notation as events to alert
individual users. The *silenced* flag controls whether or not
notifications are sent for alert state changes.
>>> dog_http_api.update_alert(1234, "sum(last_1d):sum:system.net.bytes_rcvd{host:host0} > 100")
"""
body = {
'query': query,
'silenced': silenced,
}
if name:
body['name'] = name
if message:
body['message'] = message
if notify_no_data:
body['notify_no_data'] = notify_no_data
if timeout_h:
body['timeout_h'] = timeout_h
if silenced_timeout_ts:
body['silenced_timeout_ts']
return self.http_request('PUT', '/alert/%s' % alert_id, body,
response_formatter=lambda x: x['id'],
)
def get_alert(self, alert_id):
"""
Get the details for the metric alert identified by *alert_id*.
>>> dog_http_api.get_alert(1234)
"""
return self.http_request('GET', '/alert/%s' % alert_id)
def delete_alert(self, alert_id):
"""
Delete the metric alert identified by *alert_id*.
>>> dog_http_api.delete_alert(1234)
"""
return self.http_request('DELETE', '/alert/%s' % alert_id)
def get_all_alerts(self):
"""
Get the details for all metric alerts.
>>> dog_http_api.get_all_alert()
"""
return self.http_request('GET', '/alert',
response_formatter=lambda x: x['alerts'],
)
def mute_alerts(self):
"""
Mute all alerts.
>>> dog_http_api.mute_alerts()
"""
return self.http_request('POST', '/mute_alerts')
def unmute_alerts(self):
"""
Unmute all alerts.
>>> dog_http_api.unmute_alerts()
"""
return self.http_request('POST', '/unmute_alerts')
| {
"repo_name": "DataDog/dogapi",
"path": "src/dogapi/http/alerts.py",
"copies": "1",
"size": "3555",
"license": "bsd-3-clause",
"hash": 3993802941278999600,
"line_mean": 30.7410714286,
"line_max": 103,
"alpha_frac": 0.558931083,
"autogenerated": false,
"ratio": 3.868335146898803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4927266229898803,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'AlertApi',
]
class AlertApi(object):
def alert(self, query, name=None, message=None, silenced=False,
notify_no_data=None, timeout_h=None):
"""
Create a new metric alert for the given *query*. If *name* is unset,
the alert will be given a name based on the query. The *message* will
accompany any notifications sent for the alert and can contain the same
'@' notation as events to alert individual users. The *silenced* flag
controls whether or not notifications are sent for alert state changes.
>>> dog_http_api.alert("sum(last_1d):sum:system.net.bytes_rcvd{host:host0} > 100")
"""
body = {
'query': query,
'silenced': silenced,
}
if name:
body['name'] = name
if message:
body['message'] = message
if notify_no_data:
body['notify_no_data'] = notify_no_data
if timeout_h:
body['timeout_h'] = timeout_h
return self.http_request('POST', '/alert', body,
response_formatter=lambda x: x['id'],
)
def update_alert(self, alert_id, query, name=None, message=None, silenced=False,
notify_no_data=None, timeout_h=None):
"""
Update the metric alert identified by *alert_id* with the given
*query*. If *name* is unset, the alert will be given a name based on
the query. The *message* will accompany any notifications sent for the
alert and can contain the same '@' notation as events to alert
individual users. The *silenced* flag controls whether or not
notifications are sent for alert state changes.
>>> dog_http_api.update_alert(1234, "sum(last_1d):sum:system.net.bytes_rcvd{host:host0} > 100")
"""
body = {
'query': query,
'silenced': silenced,
}
if name:
body['name'] = name
if message:
body['message'] = message
if notify_no_data:
body['notify_no_data'] = notify_no_data
if timeout_h:
body['timeout_h'] = timeout_h
return self.http_request('PUT', '/alert/%s' % alert_id, body,
response_formatter=lambda x: x['id'],
)
def get_alert(self, alert_id):
"""
Get the details for the metric alert identified by *alert_id*.
>>> dog_http_api.get_alert(1234)
"""
return self.http_request('GET', '/alert/%s' % alert_id)
def delete_alert(self, alert_id):
"""
Delete the metric alert identified by *alert_id*.
>>> dog_http_api.delete_alert(1234)
"""
return self.http_request('DELETE', '/alert/%s' % alert_id)
def get_all_alerts(self):
"""
Get the details for all metric alerts.
>>> dog_http_api.get_all_alert()
"""
return self.http_request('GET', '/alert',
response_formatter=lambda x: x['alerts'],
)
def mute_alerts(self):
"""
Mute all alerts.
>>> dog_http_api.mute_alerts()
"""
return self.http_request('POST', '/mute_alerts')
def unmute_alerts(self):
"""
Unmute all alerts.
>>> dog_http_api.unmute_alerts()
"""
return self.http_request('POST', '/unmute_alerts')
| {
"repo_name": "edx/dogapi",
"path": "src/dogapi/http/alerts.py",
"copies": "1",
"size": "3391",
"license": "bsd-3-clause",
"hash": 7665095326831591000,
"line_mean": 30.1100917431,
"line_max": 103,
"alpha_frac": 0.5552934238,
"autogenerated": false,
"ratio": 3.884306987399771,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4939600411199771,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'alerts',
]
from collections import namedtuple
from pathlib import Path
import contextlib
import datetime
import enum
import fcntl
import json
import logging
import os
import re
import selectors
import subprocess
import sys
import urllib.error
import urllib.request
from garage import apps
from garage import scripts
from garage.assertions import ASSERT
LOG = logging.getLogger(__name__)
class Levels(enum.Enum):
INFO = enum.auto()
GOOD = enum.auto()
WARNING = enum.auto()
ERROR = enum.auto()
def load_config(args):
return json.loads(scripts.ensure_file(args.config).read_text())
class Alerts:
@classmethod
def make(cls, args):
return cls(load_config(args)['alerts'])
def __init__(self, config):
self._srcs = [
SourceLog(src_config)
for src_config in config['sources']
]
# For now, only one destination `slack` is supported; so it has
# to be present.
self._dst_slack = DestinationSlack(config['destinations']['slack'])
def watch(self):
selector = selectors.DefaultSelector()
with contextlib.ExitStack() as stack:
for src in self._srcs:
pipe = stack.enter_context(src.tailing())
self._set_nonblocking(pipe)
selector.register(pipe, selectors.EVENT_READ, src)
while True:
for key, _ in selector.select():
src = key.data
message = src.parse(key.fileobj)
if message is not None:
self._dst_slack.send(message)
@staticmethod
def _set_nonblocking(pipe):
# NOTE: Use fcntl is not portable to non-Unix platforms.
fd = pipe.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
class SourceCollectd:
SEVERITY_TABLE = {
'OKAY': Levels.GOOD,
'WARNING': Levels.WARNING,
'FAILURE': Levels.ERROR,
}
DEFAULT_TITLE = 'collectd notification'
def parse(self, alert_input):
message = {'level': Levels.INFO}
headers = {}
while True:
line = alert_input.readline().strip()
if line:
self._parse_header(line, message, headers)
else:
break # No more header fields.
message['title'] = self._make_title(headers, self.DEFAULT_TITLE)
message['description'] = alert_input.read()
return message
def _parse_header(self, line, message, headers):
name, value = line.split(':', maxsplit=1)
name = name.strip()
value = value.strip()
if name == 'Host':
message['host'] = value
elif name == 'Time':
value = float(value)
message['timestamp'] = datetime.datetime.utcfromtimestamp(value)
elif name == 'Severity':
message['level'] = self.SEVERITY_TABLE.get(value, Levels.INFO)
elif name == 'Plugin':
headers['plugin'] = value
elif name == 'PluginInstance':
headers['plugin_instance'] = value
elif name == 'Type':
headers['type'] = value
elif name == 'TypeInstance':
headers['type_instance'] = value
elif name == 'CurrentValue':
headers['current_value'] = float(value)
elif name == 'WarningMin':
headers['warning_min'] = float(value)
elif name == 'WarningMax':
headers['warning_max'] = float(value)
elif name == 'FailureMin':
headers['failure_min'] = float(value)
elif name == 'FailureMax':
headers['failure_max'] = float(value)
else:
LOG.error('unknown collectd notification header: %r', line)
@staticmethod
def _make_title(headers, default):
"""Generate title string for certain plugins."""
plugin = headers.get('plugin')
plugin_instance = headers.get('plugin_instance', '?')
type_instance = headers.get('type_instance', '?')
if plugin == 'cpu':
who = 'cpu:%s,%s' % (plugin_instance, type_instance)
elif plugin == 'memory':
who = 'memory,%s' % type_instance
elif plugin == 'df':
who = 'df:%s,%s' % (plugin_instance, type_instance)
else:
return default
# NOTE: We make use of the property that any comparison to NaN
# is False.
nan = float('NaN')
current_value = headers.get('current_value', nan)
failure_min = headers.get('failure_min', nan)
failure_max = headers.get('failure_max', nan)
warning_min = headers.get('warning_min', nan)
warning_max = headers.get('warning_max', nan)
if warning_min <= current_value <= warning_max:
what = '%.2f%% <= %.2f%% <= %.2f%%' % (
warning_min, current_value, warning_max)
elif current_value > failure_max:
what = '%.2f%% > %.2f%%' % (current_value, failure_max)
elif current_value > warning_max:
what = '%.2f%% > %.2f%%' % (current_value, warning_max)
elif current_value <= warning_max:
what = '%.2f%% <= %.2f%%' % (current_value, warning_max)
elif current_value < failure_min:
what = '%.2f%% < %.2f%%' % (current_value, failure_min)
elif current_value < warning_min:
what = '%.2f%% < %.2f%%' % (current_value, warning_min)
elif current_value >= warning_min:
what = '%.2f%% >= %.2f%%' % (current_value, warning_min)
else:
what = '?'
return '%s: %s' % (who, what)
class SourceLog:
Rule = namedtuple('Rule', 'pattern alert skip')
class Tailing:
def __init__(self, path):
self.path = path
self._proc = None
def __enter__(self):
ASSERT.none(self._proc)
cmd = ['tail', '-Fn0', self.path]
self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return self._proc.stdout
def __exit__(self, *_):
for kill in (self._proc.terminate, self._proc.kill):
if self._proc.poll() is None:
kill()
self._proc.wait(2)
else:
break
if self._proc.poll() is None:
raise RuntimeError('cannot stop process: %r' % self._proc)
self._proc = None
def __init__(self, config):
self.path = config['path']
self._rules = [
self.Rule(
pattern=re.compile(rule['pattern']),
alert=rule.get('alert', {}),
skip=rule.get('skip', False),
)
for rule in config['rules']
]
ASSERT(self._rules, 'expect non-empty rules: %r', config)
for rule in self._rules:
ASSERT(
not (rule.alert and rule.skip),
'expect either alert or skip, but not both: %r', config,
)
def tailing(self):
return self.Tailing(self.path)
def parse(self, alert_input):
line = alert_input.readline().decode('utf-8').strip()
for rule in self._rules:
match = rule.pattern.search(line)
if not match:
pass
elif rule.skip:
return None
else:
return self._make_message(rule.alert, match, line)
return None
def _make_message(self, alert, match, line):
kwargs = match.groupdict()
kwargs.setdefault('host', os.uname().nodename)
kwargs.setdefault('title', self.path)
kwargs.setdefault('raw_message', line)
message = {
'host':
alert.get('host', '{host}').format(**kwargs),
'level':
Levels[alert.get('level', 'INFO').format(**kwargs).upper()],
'title':
alert.get('title', '{title}').format(**kwargs),
'description':
alert.get('description', '{raw_message}').format(**kwargs),
}
timestamp_fmt = alert.get('timestamp')
if timestamp_fmt is not None:
value = float(timestamp_fmt.format(**kwargs))
message['timestamp'] = datetime.datetime.utcfromtimestamp(value)
return message
class DestinationSlack:
COLOR_TABLE = {
Levels.INFO: '',
Levels.GOOD: 'good',
Levels.WARNING: 'warning',
Levels.ERROR: 'danger',
}
@classmethod
def make(cls, args):
return cls(load_config(args)['alerts']['destinations']['slack'])
def __init__(self, config):
self.webhook = config['webhook']
self.username = config.get('username', 'ops-onboard')
self.icon_emoji = config.get('icon_emoji', ':robot_face:')
def send(self, message):
try:
# urlopen checks the HTTP status code for us.
urllib.request.urlopen(self._make_request(**message))
except urllib.error.HTTPError as exc:
LOG.warning('cannot send to slack: %r', exc)
def _make_request(
self, *,
host=None,
timestamp=None,
level,
title,
description):
fallback = [level.name]
if host:
fallback.append(host)
fallback.append(title)
fallback.append(description)
fallback = ': '.join(fallback)
fields = []
if host:
fields.append({
'title': 'Host',
'value': host,
'short': True,
})
attachment = {
'fallback': fallback,
'color': self.COLOR_TABLE[level],
'title': title,
'text': description,
'fields': fields,
}
if timestamp is not None:
attachment['ts'] = int(timestamp.timestamp())
message = {
'username': self.username,
'icon_emoji': self.icon_emoji,
'attachments': [attachment],
}
return urllib.request.Request(
self.webhook,
headers={'Content-Type': 'application/json'},
data=json.dumps(message).encode('utf-8'),
)
@apps.with_help('generate alert from collectd notification')
def collectd(args):
"""Generate an alert from collectd notification and then send it."""
# At the moment we have only one destination.
dst_slack = DestinationSlack.make(args)
dst_slack.username = 'collectd'
dst_slack.icon_emoji = ':exclamation:'
src_collectd = SourceCollectd()
message = src_collectd.parse(sys.stdin)
if message:
dst_slack.send(message)
return 0
@apps.with_help('send alert')
@apps.with_argument(
'--host', default=os.uname().nodename,
help='overwrite host name (default to %(default)s)',
)
@apps.with_argument(
'--level',
choices=tuple(level.name.lower() for level in Levels),
default=Levels.INFO.name.lower(),
help='set alert level (default to %(default)s)',
)
@apps.with_argument(
'--systemd-service-result',
help=(
'provide service result for deriving alert level, '
'overwriting `--level`'
),
)
@apps.with_argument(
'--title', required=True,
help='set title of alert message',
)
@apps.with_argument(
'--description', required=True,
help='set alert description',
)
def send(args):
"""Send alert to one of the destinations."""
# At the moment we have only one destination.
dst_slack = DestinationSlack.make(args)
if args.systemd_service_result is not None:
if args.systemd_service_result == 'success':
level = Levels.GOOD
else:
level = Levels.ERROR
else:
level = Levels[args.level.upper()]
dst_slack.send(dict(
host=args.host,
level=level,
title=args.title,
description=args.description,
timestamp=datetime.datetime.utcnow(),
))
return 0
@apps.with_help('watch the system and generate alerts')
def watch(args):
"""Watch the system and generate alerts.
This is intended to be the most basic layer of the alerting system;
more sophisticated alerting logic should be implemented at higher
level.
"""
Alerts.make(args).watch()
return 0
@apps.with_help('manage alerts')
@apps.with_defaults(
no_locking_required=True,
root_allowed=True,
)
@apps.with_argument(
'--config', type=Path, default='/etc/ops/config.json',
help='set config file path (default to %(default)s)'
)
@apps.with_apps(
'operation', 'operation on alerts',
collectd,
send,
watch,
)
def alerts(args):
"""Manage alerts."""
return args.operation(args)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/onboard/alerts.py",
"copies": "1",
"size": "12867",
"license": "mit",
"hash": -1643927742996718800,
"line_mean": 28.1108597285,
"line_max": 76,
"alpha_frac": 0.5539752856,
"autogenerated": false,
"ratio": 4.03480714957667,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.508878243517667,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'AlgorithmBase',
'ReaderBaseBase',
'ReaderBase',
'FilterBase',
'FilterPreserveTypeBase',
'TwoFileReaderBase',
'WriterBase',
'InterfacedBaseReader',
]
__displayname__ = 'Base Classes'
import warnings
# Outside Imports:
import vtk # NOTE: This is the first import executed in the package! Keep here!!
import vtk.util.vtkAlgorithm as valg # import VTKPythonAlgorithmBase
import pyvista as pv
from . import _helpers
###############################################################################
class AlgorithmBase(valg.VTKPythonAlgorithmBase):
"""This is a base class to add convienace methods to the
``VTKPythonAlgorithmBase`` for all algorithms implemented in ``PVGeo``.
We implement our algorithms in this manner to harness all of the backend
support that the ``VTKPythonAlgorithmBase`` class provides for integrating
custom algorithms on a VTK pipeline. All of the pipeline methods for setting
inputs, getting outputs, making requests are handled by the super classes.
For more information on what functionality is available, check out the VTK
Docs for the `vtkAlgorithm`_ and then check out the following blog posts:
* `vtkPythonAlgorithm is great`_
* A VTK pipeline primer `(part 1)`_, `(part 2)`_, and `(part 3)`_
* `ParaView Python Docs`_
.. _vtkAlgorithm: https://www.vtk.org/doc/nightly/html/classvtkAlgorithm.html
.. _vtkPythonAlgorithm is great: https://blog.kitware.com/vtkpythonalgorithm-is-great/
.. _(part 1): https://blog.kitware.com/a-vtk-pipeline-primer-part-1/
.. _(part 2): https://blog.kitware.com/a-vtk-pipeline-primer-part-2/
.. _(part 3): https://blog.kitware.com/a-vtk-pipeline-primer-part-3/
.. _ParaView Python Docs: https://www.paraview.org/ParaView/Doc/Nightly/www/py-doc/paraview.util.vtkAlgorithm.html
"""
__displayname__ = 'Algorithm Base'
__category__ = 'base'
def __init__(
self,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType='vtkTable',
**kwargs
):
valg.VTKPythonAlgorithmBase.__init__(
self,
nInputPorts=nInputPorts,
inputType=inputType,
nOutputPorts=nOutputPorts,
outputType=outputType,
)
# Add error handler to make errors easier to deal with
self.__error_observer = _helpers.ErrorObserver()
self.__error_observer.make_observer(self)
def GetOutput(self, port=0):
"""A conveience method to get the output data object of this ``PVGeo``
algorithm.
"""
return pv.wrap(self.GetOutputDataObject(port))
def error_occurred(self):
"""A conveience method for handling errors on the VTK pipeline
Return:
bool: true if an error has ovvured since last checked
"""
return self.__error_observer.error_occurred()
def get_error_message(self):
"""A conveience method to print the error message."""
return self.__error_observer.get_error_message()
def apply(self):
"""Update the algorithm and get the output data object"""
self.Update()
return pv.wrap(self.GetOutput())
def update(self):
"""Alias for self.Update()"""
return self.Update()
def get_output(self, port=0):
"""Alias for self.GetOutput()"""
return self.GetOutput(port=port)
###############################################################################
# Base Base Reader
class ReaderBaseBase(AlgorithmBase):
"""A base class for inherrited functionality common to all reader algorithms"""
__displayname__ = 'Reader Base Base'
__category__ = 'base'
def __init__(self, nOutputPorts=1, outputType='vtkTable', **kwargs):
AlgorithmBase.__init__(
self,
nInputPorts=0,
nOutputPorts=nOutputPorts,
outputType=outputType,
**kwargs
)
# Attributes are namemangled to ensure proper setters/getters are used
# For the reader
self.__filenames = kwargs.get('filenames', [])
# To know whether or not the read needs to perform
self.__need_to_read = True
def need_to_read(self, flag=None):
"""Ask self if the reader needs to read the files again.
Args:
flag (bool): Set the read status
Return:
bool: the status of the reader.
"""
if flag is not None and isinstance(flag, (bool, int)):
self.__need_to_read = flag
return self.__need_to_read
def Modified(self, read_again=True):
"""Call modified if the files needs to be read again again"""
if read_again:
self.__need_to_read = read_again
AlgorithmBase.Modified(self)
def modified(self, read_again=True):
return self.Modified(read_again=read_again)
#### Methods for performing the read ####
# These are meant to be overwritten by child classes
def _get_file_contents(self, idx=None):
raise NotImplementedError()
def _read_up_front(self):
raise NotImplementedError()
def _get_raw_data(self, idx=0):
raise NotImplementedError()
#### Seters and Geters ####
def clear_file_names(self):
"""Use to clear file names of the reader.
Note:
This does not set the reader to need to read again as there are
no files to read.
"""
self.__filenames = []
def AddFileName(self, filename):
"""Use to set the file names for the reader. Handles singlt string or
list of strings.
Args:
filename (str): The absolute file name with path to read.
"""
if filename is None:
return # do nothing if None is passed by a constructor on accident
if isinstance(filename, list):
for f in filename:
self.AddFileName(f)
elif filename not in self.__filenames:
self.__filenames.append(filename)
self.Modified()
def add_file_name(self, filename):
"""Use to set the file names for the reader. Handles singlt string or
list of strings.
Args:
filename (str): The absolute file name with path to read.
"""
return self.AddFileName(filename)
def get_file_names(self, idx=None):
"""Returns the list of file names or given and index returns a specified
timestep's filename.
"""
if self.__filenames is None or len(self.__filenames) < 1:
raise _helpers.PVGeoError('File names are not set.')
if idx is None:
return self.__filenames
return self.__filenames[idx]
def apply(self, filename):
"""Given a file name (or list of file names), perfrom the read"""
self.AddFileName(filename)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
# Base filter to preserve input data type
class FilterBase(AlgorithmBase):
"""A base class for implementing filters which holds several convienace
methods"""
__displayname__ = 'Filter Base'
__category__ = 'base'
def __init__(
self,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType='vtkPolyData',
**kwargs
):
AlgorithmBase.__init__(
self,
nInputPorts=nInputPorts,
inputType=inputType,
nOutputPorts=nOutputPorts,
outputType=outputType,
**kwargs
)
def apply(self, input_data_object):
"""Run this algorithm on the given input dataset"""
self.SetInputDataObject(input_data_object)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
# Base Reader
class ReaderBase(ReaderBaseBase):
"""A base class for inherrited functionality common to all reader algorithms
that need to handle a time series.
"""
__displayname__ = 'Reader Base: Time Varying'
__category__ = 'base'
def __init__(self, nOutputPorts=1, outputType='vtkTable', **kwargs):
ReaderBaseBase.__init__(
self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs
)
# Attributes are namemangled to ensure proper setters/getters are used
# For the VTK/ParaView pipeline
self.__dt = kwargs.get('dt', 1.0)
self.__timesteps = None
def _update_time_steps(self):
"""For internal use only: appropriately sets the timesteps."""
if len(self.get_file_names()) > 1:
self.__timesteps = _helpers.update_time_steps(
self, self.get_file_names(), self.__dt
)
return 1
#### Algorithm Methods ####
def RequestInformation(self, request, inInfo, outInfo):
"""This is a conveience method that should be overwritten when needed.
This will handle setting the timesteps appropriately based on the number
of file names when the pipeline needs to know the time information.
"""
self._update_time_steps()
return 1
#### Seters and Geters ####
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps on the pipeline."""
return self.__timesteps.tolist() if self.__timesteps is not None else None
def set_time_delta(self, dt):
"""An advanced property to set the time step in seconds."""
if dt != self.__dt:
self.__dt = dt
self.Modified()
###############################################################################
# Base filter to preserve input data type
class FilterPreserveTypeBase(FilterBase):
"""A Base class for implementing filters that preserve the data type of
their arbitrary input.
"""
__displayname__ = 'Filter Preserve Type Base'
__category__ = 'base'
def __init__(self, nInputPorts=1, **kwargs):
FilterBase.__init__(
self,
nInputPorts=nInputPorts,
inputType=kwargs.pop('inputType', 'vtkDataObject'),
nOutputPorts=1,
**kwargs
)
self._preserve_port = 0 # This is the port to preserve data object type
# THIS IS CRUCIAL to preserve data type through filter
def RequestDataObject(self, request, inInfo, outInfo):
"""There is no need to overwrite this. This method lets the pipeline
know that the algorithm will dynamically decide the output data type
based in the input data type.
"""
self.OutputType = self.GetInputData(
inInfo, self._preserve_port, 0
).GetClassName()
self.FillOutputPortInformation(0, outInfo.GetInformationObject(0))
return 1
###############################################################################
# Two File Reader Base
class TwoFileReaderBase(AlgorithmBase):
"""A base clase for readers that need to handle two input files.
One meta-data file and a series of data files.
"""
__displayname__ = 'Two File Reader Base'
__category__ = 'base'
def __init__(self, nOutputPorts=1, outputType='vtkUnstructuredGrid', **kwargs):
AlgorithmBase.__init__(
self, nInputPorts=0, nOutputPorts=nOutputPorts, outputType=outputType
)
self.__dt = kwargs.get('dt', 1.0)
self.__timesteps = None
self.__mesh_filename = kwargs.get('meshfile', None) # Can only be one!
modfiles = kwargs.get(
'model_files', []
) # Can be many (single attribute, manytimesteps)
if isinstance(modfiles, str):
modfiles = [modfiles]
self.__model_filenames = modfiles
self.__need_to_read_mesh = True
self.__need_to_read_models = True
def __update_time_steps(self):
"""For internal use only"""
if len(self.__model_filenames) > 0:
self.__timesteps = _helpers.update_time_steps(
self, self.__model_filenames, self.__dt
)
return 1
def need_to_readMesh(self, flag=None):
"""Ask self if the reader needs to read the mesh file again.
Args:
flag (bool): set the status of the reader for mesh files.
"""
if flag is not None and isinstance(flag, (bool, int)):
self.__need_to_read_mesh = flag
return self.__need_to_read_mesh
def need_to_readModels(self, flag=None):
"""Ask self if the reader needs to read the model files again.
Args:
flag (bool): set the status of the reader for model files.
"""
if flag is not None and isinstance(flag, (bool, int)):
self.__need_to_read_models = flag
return self.__need_to_read_models
def Modified(self, read_again_mesh=True, read_again_models=True):
"""Call modified if the files needs to be read again again
Args:
read_again_mesh (bool): set the status of the reader for mesh files.
read_again_models (bool): set the status of the reader for model files.
"""
if read_again_mesh:
self.need_to_readMesh(flag=read_again_mesh)
if read_again_models:
self.need_to_readModels(flag=read_again_models)
return AlgorithmBase.Modified(self)
def modified(self, read_again_mesh=True, read_again_models=True):
return self.Modified(
read_again_mesh=read_again_mesh, read_again_models=read_again_models
)
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to handle setting up time variance"""
self.__update_time_steps()
return 1
#### Seters and Geters ####
@staticmethod
def has_models(model_files):
"""A convienance method to see if a list contatins models filenames."""
if isinstance(model_files, list):
return len(model_files) > 0
return model_files is not None
def this_has_models(self):
"""Ask self if the reader has model filenames set."""
return TwoFileReaderBase.has_models(self.__model_filenames)
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps"""
return self.__timesteps.tolist() if self.__timesteps is not None else None
def set_time_delta(self, dt):
"""An advanced property for the time step in seconds."""
if dt != self.__dt:
self.__dt = dt
self.Modified(read_again_mesh=False, read_again_models=False)
def clear_mesh(self):
"""Use to clear mesh file name"""
self.__mesh_filename = None
self.Modified(read_again_mesh=True, read_again_models=False)
def clear_models(self):
"""Use to clear data file names"""
self.__model_filenames = []
self.Modified(read_again_mesh=False, read_again_models=True)
def set_mesh_filename(self, filename):
"""Set the mesh file name."""
if self.__mesh_filename != filename:
self.__mesh_filename = filename
self.Modified(read_again_mesh=True, read_again_models=False)
def add_model_file_name(self, filename):
"""Use to set the file names for the reader. Handles single string or
list of strings.
Args:
filename (str or list(str)): the file name(s) to use for the model data.
"""
if filename is None:
return # do nothing if None is passed by a constructor on accident
if isinstance(filename, list):
for f in filename:
self.add_model_file_name(f)
self.Modified(read_again_mesh=False, read_again_models=True)
elif filename not in self.__model_filenames:
self.__model_filenames.append(filename)
self.Modified(read_again_mesh=False, read_again_models=True)
return 1
def get_model_filenames(self, idx=None):
"""Returns the list of file names or given and index returns a specified
timestep's filename.
"""
if idx is None or not self.this_has_models():
return self.__model_filenames
return self.__model_filenames[idx]
def get_mesh_filename(self):
"""Get the mesh filename"""
return self.__mesh_filename
def apply(self):
"""Perfrom the read with parameters/file names set during init or by
setters"""
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class WriterBase(AlgorithmBase):
__displayname__ = 'Writer Base'
__category__ = 'base'
def __init__(self, nInputPorts=1, inputType='vtkPolyData', **kwargs):
AlgorithmBase.__init__(
self, nInputPorts=nInputPorts, inputType=inputType, nOutputPorts=0
)
self.__filename = kwargs.get('filename', None)
self.__fmt = '%.9e'
# For composite datasets: not always used
self.__blockfilenames = None
self.__composite = False
def FillInputPortInformation(self, port, info):
"""Allows us to save composite datasets as well.
Note:
I only care about ``vtkMultiBlockDataSet``
"""
info.Set(self.INPUT_REQUIRED_DATA_TYPE(), self.InputType)
info.Append(
self.INPUT_REQUIRED_DATA_TYPE(), 'vtkMultiBlockDataSet'
) # vtkCompositeDataSet
return 1
def SetFileName(self, filename):
"""Specify the filename for the output. Writer can only handle a single
output data object/time step."""
if not isinstance(filename, str):
raise RuntimeError(
'File name must be string. Only single file is supported.'
)
if self.__filename != filename:
self.__filename = filename
self.Modified()
def set_file_name(self, filename):
"""Specify the filename for the output. Writer can only handle a single
output data object/time step."""
return self.SetFileName(filename)
def get_file_name(self):
"""Get the set filename."""
return self.__filename
def Write(self, input_data_object=None):
"""Perfrom the write out."""
if input_data_object:
self.SetInputDataObject(input_data_object)
self.Modified()
self.Update()
def write(self, input_data_object=None):
return self.write(input_data_object=input_data_object)
def perform_write_out(self, input_data_object, filename, object_name):
"""This method must be implemented. This is automatically called by
``RequestData`` for single inputs or composite inputs."""
raise NotImplementedError('perform_write_out must be implemented!')
def apply(self, input_data_object):
"""Run this writer algorithm on the given input data object"""
self.SetInputDataObject(input_data_object)
self.Modified()
self.Update()
def set_format(self, fmt):
"""Use to set the ASCII format for the writer default is ``'%.9e'``"""
if self.__fmt != fmt and isinstance(fmt, str):
self.__fmt = fmt
self.Modified()
def get_format(self):
"""Get the ASCII format used for floats"""
return self.__fmt
#### Following methods are for composite datasets ####
def use_composite(self):
"""True if input dataset is a composite dataset"""
return self.__composite
def set_block_filenames(self, n):
"""Gets a list of filenames based on user input filename and creates a
numbered list of filenames for the reader to save out. Assumes the
filename has an extension set already.
"""
number = n
count = 0
while number > 0:
number = number // 10
count = count + 1
count = '%d' % count
identifier = '_%.' + count + 'd'
blocknum = [identifier % i for i in range(n)]
# Check the file extension:
ext = self.get_file_name().split('.')[-1]
basename = self.get_file_name().replace('.%s' % ext, '')
self.__blockfilenames = [
basename + '%s.%s' % (blocknum[i], ext) for i in range(n)
]
return self.__blockfilenames
def get_block_filename(self, idx):
"""Get filename for component of a multi block dataset"""
return self.__blockfilenames[idx]
def RequestData(self, request, inInfo, outInfo):
"""Subclasses must implement a ``perform_write_out`` method that takes an
input data object and a filename. This method will automatically handle
composite data sets.
"""
inp = self.GetInputData(inInfo, 0, 0)
if isinstance(inp, vtk.vtkMultiBlockDataSet):
self.__composite = True
# Handle composite datasets. NOTE: This only handles vtkMultiBlockDataSet
if self.__composite:
num = inp.GetNumberOfBlocks()
self.set_block_filenames(num)
for i in range(num):
data = inp.GetBlock(i)
name = inp.GetMetaData(i).Get(vtk.vtkCompositeDataSet.NAME())
if data.IsTypeOf(self.InputType):
self.perform_write_out(data, self.get_block_filename(i), name)
else:
warnings.warn(
'Input block %d of type(%s) not saveable by writer.'
% (i, type(data))
)
# Handle single input dataset
else:
self.perform_write_out(inp, self.get_file_name(), None)
return 1
###############################################################################
class InterfacedBaseReader(ReaderBase):
"""A general base reader for all interfacing with librarues that already
have file I/O methods and VTK data object interfaces. This provides a
routine for using an external library to handle all I/O and produce the
VTK data objects."""
__displayname__ = 'Interfaced Base Reader'
def __init__(self, **kwargs):
ReaderBase.__init__(self, **kwargs)
self.__objects = []
# THIS IS CRUCIAL to dynamically decided output type
def RequestDataObject(self, request, inInfo, outInfo):
"""Do not override. This method lets the us dynamically decide the
output data type based in the read meshes.
Note: they all have to be the same VTK type.
"""
self._read_up_front()
self.FillOutputPortInformation(0, outInfo.GetInformationObject(0))
return 1
@staticmethod
def _read_file(filename):
"""OVERRIDE: Reads from the the libraries format and returns an object
in the given library's format."""
raise NotImplementedError()
@staticmethod
def _get_vtk_object(obj):
"""OVERRIDE: Given an object in the interfaced library's type, return
a converted VTK data object."""
raise NotImplementedError()
def _read_up_front(self):
"""Do not override. A predifiened routine for reading the files up front."""
filenames = self.get_file_names()
self.__objects = []
for f in filenames:
mesh = self._read_file(f)
obj = self._get_vtk_object(mesh)
self.__objects.append(obj)
# Now check that all objects in list are same type and set output type
typ = type(self.__objects[0])
if not all(isinstance(x, typ) for x in self.__objects):
raise _helpers.PVGeoError('Input VTK objects are not all of the same type.')
self.OutputType = self.__objects[0].GetClassName()
def _get_object_at_index(self, idx=None):
"""Internal helper to get the data object at the specified index"""
if idx is not None:
return self.__objects[idx]
return self.__objects[0]
def RequestData(self, request, inInfo, outInfo):
"""Do not override. Used by pipeline to get data for current timestep
and populate the output data object.
"""
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
# Get output:
output = self.GetOutputData(outInfo, 0)
output.ShallowCopy(self._get_object_at_index(idx=i))
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Do not override. Used by pipeline to set extents and time info."""
# Call parent to handle time stuff
ReaderBase.RequestInformation(self, request, inInfo, outInfo)
# Now set whole output extent
info = outInfo.GetInformationObject(0)
obj = self.__objects[0] # Get first grid to set output extents
# Set WHOLE_EXTENT: This is absolutely necessary
ext = obj.GetExtent()
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
###############################################################################
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/base.py",
"copies": "1",
"size": "25248",
"license": "bsd-3-clause",
"hash": -1424093899177424100,
"line_mean": 34.7620396601,
"line_max": 118,
"alpha_frac": 0.5944233207,
"autogenerated": false,
"ratio": 4.304859335038363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5399282655738363,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Aligner',
'PrefixComparer',
'SuffixComparer',
'hamming_sphere',
'hamming_environment',
'edit_environment',
'edit_distance',
]
from typing import Iterator, Tuple
from cutadapt._align import Aligner, PrefixComparer, SuffixComparer
# flags for global alignment
# The interpretation of the first flag is:
# An initial portion of seq1 may be skipped at no cost.
# This is equivalent to saying that in the alignment,
# gaps in the beginning of seq2 are free.
#
# The other flags have an equivalent meaning.
START_WITHIN_SEQ1 = 1
START_WITHIN_SEQ2 = 2
STOP_WITHIN_SEQ1 = 4
STOP_WITHIN_SEQ2 = 8
# Use this to get regular semiglobal alignment
# (all gaps in the beginning or end are free)
SEMIGLOBAL = START_WITHIN_SEQ1 | START_WITHIN_SEQ2 | STOP_WITHIN_SEQ1 | STOP_WITHIN_SEQ2
def edit_distance(s: str, t: str) -> int:
"""
Return the edit distance between the strings s and t.
The edit distance is the sum of the numbers of insertions, deletions,
and mismatches that is minimally necessary to transform one string
into the other.
"""
m = len(s) # index i
n = len(t) # index j
costs = list(range(m + 1))
for j in range(1, n + 1):
prev = costs[0]
costs[0] += 1
for i in range(1, m + 1):
match = int(s[i - 1] == t[j - 1])
c = min(
prev + 1 - match,
costs[i] + 1,
costs[i - 1] + 1,
)
prev = costs[i]
costs[i] = c
return costs[-1]
def hamming_sphere(s: str, k: int) -> Iterator[str]:
"""
Yield all strings t for which the hamming distance between s and t is exactly k,
assuming the alphabet is A, C, G, T.
"""
assert k >= 0
if k == 0:
yield s
return
n = len(s)
# i is the first position that is varied
for i in range(n - k + 1):
prefix = s[:i]
c = s[i]
suffix = s[i+1:]
for ch in 'ACGT':
if ch == c:
continue
for t in hamming_sphere(suffix, k - 1):
y = prefix + ch + t
assert len(y) == n
yield y
def hamming_environment(s: str, k: int) -> Iterator[Tuple[str, int, int]]:
"""
Find all strings t for which the hamming distance between s and t is at most k,
assuming the alphabet is A, C, G, T.
Yield tuples (t, e, m), where e is the hamming distance between s and t and
m is the number of matches (equal to len(t) - e).
"""
n = len(s)
for e in range(k + 1):
for t in hamming_sphere(s, e):
yield t, e, n - e
def naive_edit_environment(s: str, k: int) -> Iterator[str]:
"""
Apply all possible edits up to edit distance k to string s.
A string may be returned more than once.
"""
yield s
if k == 0:
return
for s in naive_edit_environment(s, k - 1):
n = len(s)
for ch in "ACGT":
for i in range(n):
prefix = s[:i] + ch
yield prefix + s[i:] # insertion
yield prefix + s[i + 1:] # substitution
yield s + ch # insertion into final position
# all deletions
for i in range(n):
yield s[:i] + s[i+1:]
def edit_environment(s: str, k: int) -> Iterator[Tuple[str, int, int]]:
"""
Find all strings t for which the edit distance between s and t is at most k,
assuming the alphabet is A, C, G, T.
Yield tuples (t, e, m), where e is the edit distance between s and t and
m is the number of matches in the optimal alignment.
"""
rate = k / len(s) if s else 0
aligner = Aligner(s, max_error_rate=rate, flags=0, min_overlap=len(s))
seen = set()
for t in naive_edit_environment(s, k):
if t in seen:
continue
seen.add(t)
result = aligner.locate(t)
matches, errors = result[-2:] # type: ignore
yield t, errors, matches
def slow_edit_environment(s: str, k: int) -> Iterator[Tuple[str, int, int]]:
"""
Find all strings t for which the edit distance between s and t is at most k,
assuming the alphabet is A, C, G, T.
Yield tuples (t, e, m), where e is the edit distance between s and t and
m is the number of matches in the optimal alignment.
"""
n = len(s)
alphabet = "TGCA"
work_stack = [(
"",
list(range(n + 1)),
[0] * (n + 1),
)]
while work_stack:
# t is the current prefix
# costs is a row at index len(t) in the DP matrix
# matches is a row in the corresponding matrix of the no. of matches
t, costs, matches = work_stack.pop()
# The row is the last row of the DP matrix for aligning t against s
i = len(t)
if costs[-1] <= k:
# The costs of an optimal alignment of t against s are at most k,
# so t is within the edit environment.
yield t, costs[-1], matches[-1]
if i == n + k:
# Last row reached
continue
# Runtime heuristic: The entries in the DP matrix cannot get lower
# in subsequent rows, so don’t try longer suffixs if all entries are
# greater than k.
if min(costs) > k:
continue
# compute next row in DP matrix for all characters of the alphabet
for ch in alphabet:
# create a new DP matrix row for each character of the alphabet
next_costs = [0] * (n + 1)
next_costs[0] = len(t) + 1
next_matches = [0] * (n + 1)
for j in range(1, n + 1):
match = 0 if s[j - 1] == ch else 1
assert j > 0
diag = costs[j-1] + match
left = next_costs[j-1] + 1
up = costs[j] + 1
if diag <= left and diag <= up:
c, m = diag, matches[j-1] + (1 - match)
elif left <= up:
c, m = left, next_matches[j-1]
else:
c, m = up, matches[j]
next_costs[j] = c
next_matches[j] = m
work_stack.append((t + ch, next_costs, next_matches))
| {
"repo_name": "marcelm/cutadapt",
"path": "src/cutadapt/align.py",
"copies": "1",
"size": "6234",
"license": "mit",
"hash": 7054827613049480000,
"line_mean": 30.4747474747,
"line_max": 88,
"alpha_frac": 0.542843389,
"autogenerated": false,
"ratio": 3.594002306805075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46368456958050747,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'allow_tags',
'humanized',
'filter_class',
'filter_by',
'order_by',
'sorter_class',
)
def allow_tags(func):
"""Allows HTML tags to be returned from resource without escaping"""
if isinstance(func, property):
func = func.fget
func.allow_tags = True
return func
def humanized(humanized_func, **humanized_func_kwargs):
"""Sets 'humanized' function to method or property."""
def decorator(func):
if isinstance(func, property):
func = func.fget
def _humanized_func(*args, **kwargs):
kwargs.update(humanized_func_kwargs)
return humanized_func(*args, **kwargs)
func.humanized = _humanized_func
return func
return decorator
def filter_class(filter_class):
"""Sets 'filter' class (this attribute is used inside grid and rest)."""
def decorator(func):
if isinstance(func, property):
func = func.fget
func.filter = filter_class
return func
return decorator
def filter_by(field_name):
"""Sets 'field name' (this is used for grid filtering)"""
def decorator(func):
if isinstance(func, property):
func = func.fget
func.filter_by = field_name
return func
return decorator
def order_by(field_name):
"""Sets 'field name' (this is used for grid ordering)"""
def decorator(func):
if isinstance(func, property):
func = func.fget
func.order_by = field_name
return func
return decorator
def sorter_class(sorter_class):
"""Sets 'sorter' class (this attribute is used inside grid and rest)."""
def decorator(func):
if isinstance(func, property):
func = func.fget
func.sorter = sorter_class
return func
return decorator
| {
"repo_name": "druids/django-pyston",
"path": "pyston/utils/decorators.py",
"copies": "1",
"size": "1840",
"license": "bsd-3-clause",
"hash": -701576403707325700,
"line_mean": 25.2857142857,
"line_max": 76,
"alpha_frac": 0.6092391304,
"autogenerated": false,
"ratio": 4.088888888888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 70
} |
__all__ = ['NameVerDetectionError', 'Artifact', 'LocalArtifact', 'LocalRpmArtifact', 'RemoteArtifact']
import six.moves.urllib.parse
import itertools
import re
import os
import logging
logger = logging.getLogger(__name__)
class ArtifactError(Exception):
pass
class NameVerDetectionError(ArtifactError):
pass
class Artifact(object):
"""
Generic class describing an artifact
"""
def __init__(self, group, artifact='', version='', classifier='', extension=''):
self.group = group
self.artifact = artifact
self.version = version
self.classifier = classifier
self.extension = extension
def get_coordinates_string(self):
return '{group}:{artifact}:{version}:{classifier}:{extension}'.format(group=self.group, artifact=self.artifact,
version=self.version,
classifier=self.classifier,
extension=self.extension)
def __repr__(self):
return self.get_coordinates_string()
class LocalArtifact(Artifact):
"""
Artifact for upload to repository
"""
def __init__(self, group, local_path, artifact='', version='', classifier='', extension=''):
self.local_path = local_path
artifact_detected, version_detected, extension_detected = self.detect_name_ver_ext()
if not artifact:
artifact = artifact_detected
if not version:
version = version_detected
if not extension:
extension = extension_detected
super(LocalArtifact, self).__init__(group=group, artifact=artifact, version=version, classifier=classifier,
extension=extension)
def detect_name_ver_ext(self):
base_name = os.path.basename(self.local_path)
result = re.match('^(?# name)(.*?)-(?=\d)(?# version)(\d.*)\.(?# extension)([^.]+)$', base_name)
if result is None:
raise NameVerDetectionError('Automatic detection of name and/or version failed for %s', self.local_path)
name, version, extension = result.group(1), result.group(2), result.group(3)
logger.debug('name: %s, version: %s, extension: %s', name, version, extension)
return name, version, extension
class LocalRpmArtifact(LocalArtifact):
"""
Special case of local artifact, which can detect it's coordinates from RPM metadata
"""
@staticmethod
def get_artifact_group(url):
if url is None:
raise Exception('Web pages of the package not present in RPM metadata, please fill the URL tag in specfile')
parts = six.moves.urllib.parse.urlsplit(url).netloc.split(".")
return ".".join(itertools.ifilter(lambda x: x != "www", reversed(parts)))
def __init__(self, local_path, group=None):
try:
import rpm
except ImportError:
raise ArtifactError("Can't import rpm module to detect name and version")
ts = rpm.ts()
fdno = os.open(local_path, os.O_RDONLY)
headers = ts.hdrFromFdno(fdno)
os.close(fdno)
if not group:
group = self.get_artifact_group(headers['url'])
artifact = headers['name']
version = '{v}-{r}'.format(v=headers['version'], r=headers['release'])
super(LocalRpmArtifact, self).__init__(group=group, artifact=artifact, version=version, local_path=local_path)
class RemoteArtifact(Artifact):
"""
Artifact in repository
"""
def __init__(self, group=None, artifact='', version='', classifier='', extension='', url=None, repo_id=None):
super(RemoteArtifact, self).__init__(group=group, artifact=artifact, version=version, classifier=classifier,
extension=extension)
self.repo_id = repo_id
self.url = url
@classmethod
def from_repo_id_and_coordinates(cls, repo_id, coordinates):
"""
:param repo_id:
:param coordinates: e.g. 'com.fooware:foo:1.0.0'
:return:
"""
fields = coordinates.split(':')
if len(fields) < 3:
raise ArtifactError('Incorrect coordinates, at least group, artifact and version are obligatory')
group, artifact, version = fields[0], fields[1], fields[2]
classifier = extension = ''
if len(fields) > 3:
classifier = fields[3]
if len(fields) > 4:
extension = fields[4]
return cls(group=group, artifact=artifact, version=version, classifier=classifier, extension=extension,
repo_id=repo_id) | {
"repo_name": "packagemgmt/repositorytools",
"path": "repositorytools/lib/artifact.py",
"copies": "2",
"size": "4777",
"license": "apache-2.0",
"hash": -1681071178715789800,
"line_mean": 34.1323529412,
"line_max": 120,
"alpha_frac": 0.5897006489,
"autogenerated": false,
"ratio": 4.338782924613987,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5928483573513987,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'AnimateTBM',
]
import vtk
from ..base import AlgorithmBase
class AnimateTBM(AlgorithmBase):
"""This filter analyzes a vtkTable containing position information about a Tunnel Boring Machine (TBM). This Filter iterates over each row of the table as a timestep and uses the XYZ coordinates of the three different parts of the TBM to generate a tube that represents the TBM."""
def __init__(self):
AlgorithmBase.__init__(
self,
nInputPorts=1,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkPolyData',
)
# Parameters
self.__diameter = (17.45,)
self.__dt = 1.0
def RequestData(self, request, inInfo, outInfo):
from vtk.numpy_interface import dataset_adapter as dsa
import PVGeo._helpers as inputhelp
from PVGeo.filters import pointsToTube
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Grab input arrays to process from drop down menus
# - Grab all fields for input arrays:
fields = []
for i in range(3):
fields.append(inputhelp.get_selected_array_field(self, i))
# - Simply grab the names
names = []
for i in range(9):
names.append(inputhelp.get_selected_array_name(self, i))
# Pass array names and associations on to process
# Get the input arrays
wpdi = dsa.WrapDataObject(pdi)
arrs = []
for i in range(9):
arrs.append(inputhelp.get_array(wpdi, fields[i], names[i]))
# grab coordinates for each part of boring machine at time idx as row
executive = self.GetExecutive()
outInfo = executive.GetOutputInformation(0)
idx = int(outInfo.Get(executive.UPDATE_TIME_STEP()) / self.__dt)
pts = []
for i in range(3):
x = arrs[i * 3][idx]
y = arrs[i * 3 + 1][idx]
z = arrs[i * 3 + 2][idx]
pts.append((x, y, z))
# now exectute a points to tube filter
vtk_pts = vtk.vtkPoints()
for i in range(len(pts)):
vtk_pts.InsertNextPoint(pts[i][0], pts[i][1], pts[i][2])
poly = vtk.vtkPolyData()
poly.SetPoints(vtk_pts)
pointsToTube(
poly, radius=self.__diameter / 2, numSides=20, nrNbr=False, pdo=pdo
)
return 1
def RequestInformation(self, request, inInfo, outInfo):
import numpy as np
executive = self.GetExecutive()
outInfo = executive.GetOutputInformation(0)
# Calculate list of timesteps here
# - Get number of rows in table and use that for num time steps
nrows = int(self.GetInput().GetColumn(0).GetNumberOfTuples())
xtime = np.arange(0, nrows * self.__dt, self.__dt, dtype=float)
outInfo.Remove(executive.TIME_STEPS())
for i in range(len(xtime)):
outInfo.Append(executive.TIME_STEPS(), xtime[i])
# Remove and set time range info
outInfo.Remove(executive.TIME_RANGE())
outInfo.Append(executive.TIME_RANGE(), xtime[0])
outInfo.Append(executive.TIME_RANGE(), xtime[-1])
return 1
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/tunneling/animate.py",
"copies": "1",
"size": "3253",
"license": "bsd-3-clause",
"hash": -5531578759989260000,
"line_mean": 36.8255813953,
"line_max": 285,
"alpha_frac": 0.5976022133,
"autogenerated": false,
"ratio": 3.688208616780045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4785810830080045,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'Annotation',
'InputAnnotation',
'Vars',
#'WrapsMixin',
#'InputWraps',
'InputFlush',
'Test',
'Inhibitor',
'OutputAnnotation',
'Expr',
#'OutputWraps',
'OutputFlush',
'Flush',
)
import abc
import collections
import itertools
from .black_token import BlackToken
from .errors import AnnotationError
from .expression import EName, EList, ETuple, make_expression
from .net_element import NetElement
from .token_list import TokenList
# from .node import BasePlace, BaseTransition
class Annotation(NetElement, metaclass=abc.ABCMeta):
@abc.abstractmethod
def label(self):
raise NotImplementedError()
class InputAnnotation(Annotation):
@abc.abstractmethod
def variables(self):
raise NotImplementedError()
@abc.abstractmethod
def filter_substitutions(self, tokens):
raise NotImplementedError()
def _remove_token(self, tokens, token):
tokens.remove(token)
def remove_substitution(self, tokens, substitution):
for variable in self.variables():
self._remove_token(tokens, substitution[variable])
def _iter_tokens(self, tokens):
variables = self._variables
if len(variables) > 1:
for combination in itertools.combinations(tokens, len(variables)):
for permutation in itertools.permutations(combination):
yield permutation
else:
variable = variables[0]
for token in tokens:
yield (token, )
def _iter_substitutions(self, tokens):
for subst_tokens in self._iter_tokens(tokens):
yield collections.OrderedDict(zip(self.variables(), subst_tokens))
class Vars(InputAnnotation):
def __init__(self, expression, guard=None):
super().__init__()
self._variables = None
self._guard = make_expression(guard)
self._expression = make_expression(expression)
expression = self._expression
variables = []
if isinstance(expression, EName):
variables.append(expression.name)
elif isinstance(expression, (EList, ETuple)):
for subexpression in expression.elements:
if isinstance(subexpression, EName):
variable = subexpression.name
if variable in variables:
raise AnnotationError("invalid input annotation {!r}: repeated variable {!r}".format(str(expression), variable))
variables.append(variable)
else:
raise AnnotationError("invalid input annotation {!r}: {!r} is not a variable name".format(str(expression), str(subexpression)))
else:
raise AnnotationError("invalid input annotation {!r}: not a variable name".format(str(expression)))
self._variables = tuple(variables)
@property
def guard(self):
return self._guard
def variables(self):
return self._variables
def __repr__(self):
args = [repr(str(self._expression))]
if self._guard is not None:
args.append("guard={!r}".format(str(self._guard)))
return "{}({})".format(self.__class__.__name__, ', '.join(args))
def label(self):
label = str(self._expression)
if self._guard is not None:
label += " [{}]".format(str(self._guard))
return label
def filter_substitutions(self, tokens):
guard = self.guard
if guard is None:
return self._iter_substitutions(tokens)
else:
return filter(lambda dct: guard.evaluate(globals_d=self._net.globals_d, locals_d=dct), self._iter_substitutions(tokens))
class WrapsMixin(object):
__wrapped_class__ = None
def __init__(self, annotation):
if not isinstance(annotation, self.__wrapped_class__):
raise AnnotationError("invalid object {!r} for {}: not an {}".format(
annotation, self.__class__.__name__, self.__wrapped_class__.__name__))
self._wrapped_annotation = annotation
@property
def wrapped_annotation(self):
return self._wrapped_annotation
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self._wrapped_annotation)
class InputWraps(InputAnnotation, WrapsMixin):
__wrapped_class__ = InputAnnotation
def __init__(self, annotation):
super().__init__()
WrapsMixin.__init__(self, annotation=annotation)
def bind(self, net):
super().bind(net)
self._wrapped_annotation.bind(net)
def variables(self):
return self._wrapped_annotation.variables()
def filter_substitutions(self, tokens):
yield from self._wrapped_annotation.filter_substitutions(tokens)
def _remove_token(self, tokens, token):
return self._wrapped_annotation._remove_token(tokens, token)
def remove_substitution(self, tokens, substitution):
return self._wrapped_annotation.remove_substitution(tokens, substitution)
def _iter_tokens(self, tokens):
yield from self._wrapped_annotation._iter_tokens(tokens)
def _iter_substitutions(self, tokens):
yield from self._wrapped_annotation._iter_substitutions(tokens)
class InputFlush(InputWraps):
def __init__(self, annotation):
super().__init__(annotation)
if len(self._wrapped_annotation.variables()) != 1:
raise AnnotationError("invalid annotation {!r} for {}: a single variable is needed".format(
self._wrapped_annotation, self.__class__.__name__))
def label(self):
return self._wrapped_annotation.label() + " !"
def remove_substitution(self, tokens, substitution):
for variable in self.variables():
values = substitution[variable]
for value in values:
self._wrapped_annotation._remove_token(tokens, value)
def filter_substitutions(self, tokens):
variable = self.variables()[0]
token_list = TokenList()
for substitution in self._wrapped_annotation.filter_substitutions(tokens):
token_list.add(substitution[variable])
yield collections.OrderedDict([(variable, token_list)])
class Test(InputWraps):
def label(self):
return self._wrapped_annotation.label() + " ?"
def _remove_token(self, tokens, token):
pass
def remove_substitution(self, tokens, substitution):
pass
class Inhibitor(InputAnnotation):
def filter_substitutions(self, tokens):
it = iter(tokens)
try:
next(it)
except StopIteration:
yield None
def remove_substitution(self, tokens, substitution):
pass
def variables(self):
return ()
def label(self):
return "<>"
class OutputAnnotation(Annotation):
def add_token(self, tokens, token):
tokens.add(token)
@abc.abstractmethod
def produce_token(self, substitution):
raise NotImplementedError()
class Expr(OutputAnnotation):
def __init__(self, expression):
super().__init__()
self._expression = make_expression(expression)
@property
def expression(self):
return self._expression
def produce_token(self, substitution):
return self._expression.evaluate(globals_d=self._net.globals_d, locals_d=substitution)
def label(self):
return str(self._expression)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, str(self._expression))
class OutputWraps(OutputAnnotation, WrapsMixin):
__wrapped_class__ = OutputAnnotation
def __init__(self, annotation):
super().__init__()
WrapsMixin.__init__(self, annotation=annotation)
def bind(self, net):
super().bind(net)
self._wrapped_annotation.bind(net)
def add_token(self, tokens, token):
return self._wrapped_annotation.add_token(tokens, token)
def produce_token(self, substitution):
return self._wrapped_annotation.produce_token(substitution)
class OutputFlush(OutputWraps):
def add_token(self, tokens, token):
for t in token:
self._wrapped_annotation.add_token(tokens, t)
def label(self):
return self._wrapped_annotation.label() + ' !'
def Flush(annotation):
if isinstance(annotation, InputAnnotation):
return InputFlush(annotation)
elif isinstance(annotation, OutputAnnotation):
return OutputFlush(annotation)
else:
raise TypeError("cannot flush object {!r}".format(annotation))
| {
"repo_name": "simone-campagna/petra",
"path": "petra/annotation.py",
"copies": "1",
"size": "8637",
"license": "apache-2.0",
"hash": 7436855474642049000,
"line_mean": 30.4072727273,
"line_max": 147,
"alpha_frac": 0.6274169272,
"autogenerated": false,
"ratio": 4.433778234086242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004061746446921746,
"num_lines": 275
} |
__all__ = [
'Anomaly',
'Bandit',
'Burst',
'Classifier',
'Clustering',
'NearestNeighbor',
'Recommender',
'Regression',
'Stat',
'Graph',
'Weight',
]
class _EmbeddedUnavailable(object):
def __init__(self, *args, **kwargs):
raise RuntimeError('Embedded Jubatus Python module is not installed.')
try:
from embedded_jubatus import Anomaly
from embedded_jubatus import Bandit
from embedded_jubatus import Burst
from embedded_jubatus import Classifier
from embedded_jubatus import Clustering
from embedded_jubatus import NearestNeighbor
from embedded_jubatus import Recommender
from embedded_jubatus import Regression
from embedded_jubatus import Stat
from embedded_jubatus import Weight
from embedded_jubatus import Graph
except ImportError:
Anomaly = _EmbeddedUnavailable
Bandit = _EmbeddedUnavailable
Burst = _EmbeddedUnavailable
Classifier = _EmbeddedUnavailable
Clustering = _EmbeddedUnavailable
NearestNeighbor = _EmbeddedUnavailable
Recommender = _EmbeddedUnavailable
Regression = _EmbeddedUnavailable
Stat = _EmbeddedUnavailable
Graph = _EmbeddedUnavailable
Weight = _EmbeddedUnavailable
| {
"repo_name": "jubatus/jubatus-python-client",
"path": "jubatus/embedded.py",
"copies": "1",
"size": "1234",
"license": "mit",
"hash": 118139915532590820,
"line_mean": 28.380952381,
"line_max": 78,
"alpha_frac": 0.7155591572,
"autogenerated": false,
"ratio": 4.269896193771626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005423280423280424,
"num_lines": 42
} |
__all__ = [
'Application',
'HttpError',
'Request',
'Response',
'ResponseClosed',
]
import collections.abc
import dataclasses
import enum
import logging
import typing
import urllib.parse
from g1.asyncs.bases import locks
from g1.asyncs.bases import queues
from g1.asyncs.bases import servers
from g1.asyncs.bases import streams
from g1.asyncs.bases import tasks
from g1.bases import contexts
from g1.bases.assertions import ASSERT
from . import consts
LOG = logging.getLogger(__name__)
class HttpError(Exception):
@classmethod
def redirect(cls, status, message, location):
ASSERT.in_range(status, (300, 400))
return cls(status, message, {consts.HEADER_LOCATION: location})
# The headers argument can be either dict or pairs. Note that while
# HTTP headers can be duplicated, we still use a dict to represent
# headers because here we are a producer rather than a parser of
# HTTP headers.
def __init__(self, status, message, headers=None, content=b''):
super().__init__(message)
self.status = ASSERT.in_range(_cast_status(status), (300, 600))
self.headers = ASSERT.predicate(
dict(headers) if headers is not None else {},
lambda hdrs: all(
isinstance(k, str) and isinstance(v, str)
for k, v in hdrs.items()
),
)
self.content = ASSERT.isinstance(content, bytes)
@property
def location(self):
return self.headers.get(consts.HEADER_LOCATION)
@dataclasses.dataclass(frozen=True)
class Request:
environ: typing.Mapping[str, str]
#
# NOTE: Handlers are expected to mutate context content directly.
# Although the context object support hierarchical interface (and
# thus no mutation on the content), mutation is preferred because,
# with mutation, handlers do not have to be fitted into a hierarchy,
# and sibling handlers may see context changes made by each other.
#
# Of course, when a handler passes the context to a non-handler, and
# you are worried that the non-handler code might "corrupt" the
# context content, the handler may use context's hierarchy interface
# to isolate context changes made by the non-handler code.
#
context: contexts.Context
def get_header(self, name, default=None):
environ_name = 'HTTP_' + name.replace('-', '_').upper()
return self.environ.get(environ_name, default)
@property
def method(self):
return self.environ['REQUEST_METHOD']
@property
def path_str(self):
return self.environ['PATH_INFO']
@property
def path(self):
return consts.UrlPath(self.path_str)
@property
def query_str(self):
return self.environ['QUERY_STRING']
@property
def query(self):
return urllib.parse.parse_qs(self.query_str)
@property
def query_list(self):
return urllib.parse.parse_qsl(self.query_str)
@property
def content(self):
return self.environ['wsgi.input']
class ResponseClosed(ValueError):
"""When writing to a closed response object.
This inherits from ValueError as file-like objects raises ValueError
when writing to a closed file.
"""
# A proxy object to expose only public interface.
class Response:
def __init__(self, private):
self._private = private
@property
def status(self):
return self._private.status
@status.setter
def status(self, status):
self._private.status = status
@property
def headers(self):
return self._private.headers
def commit(self):
return self._private.commit()
async def write(self, data):
return await self._private.write(data)
def sendfile(self, file):
return self._private.sendfile(file)
def close(self):
return self._private.close()
@enum.unique
class _SendMechanisms(enum.Enum):
UNDECIDED = enum.auto()
SEND = enum.auto()
SENDFILE = enum.auto()
class _Response:
"""Response object.
A response is in one of three states:
* UNCOMMITTED:
A response starts in this state, and transitions to COMMITTED if
`commit` is called, and to CLOSED if `close` is called. Users
may set status code and headers and write to the response body
when response is in this state.
* COMMITTED:
A response transitions to CLOSED if `close` is called. In this
state, users may only write to the response body, may read
response data.
* CLOSED:
A response is read-only in this state.
"""
class Headers(collections.abc.MutableMapping):
def __init__(self, is_uncommitted):
self._is_uncommitted = is_uncommitted
self._headers = {}
def __len__(self):
return len(self._headers)
def __iter__(self):
return iter(self._headers)
def __getitem__(self, header):
return self._headers[header]
def __setitem__(self, header, value):
ASSERT.true(self._is_uncommitted())
ASSERT.isinstance(header, str)
ASSERT.isinstance(value, str)
self._headers[header] = value
def __delitem__(self, header):
ASSERT.true(self._is_uncommitted())
del self._headers[header]
@classmethod
def _make_precommit(cls):
return streams.BytesStream()
def __init__(self, start_response, is_sendfile_supported):
self._start_response = start_response
self._status = consts.Statuses.OK
self.headers = self.Headers(self.is_uncommitted)
self._precommit = self._make_precommit()
# Set capacity to 1 to prevent excessive buffering.
self._body = queues.Queue(capacity=1)
self.file = None
self._send_mechanism = _SendMechanisms.UNDECIDED
self._send_mechanism_decided = locks.Event()
if not is_sendfile_supported:
self._set_send_mechanism(_SendMechanisms.SEND)
def is_uncommitted(self):
return self._precommit is not None and not self._body.is_closed()
def reset(self):
"""Reset response status, headers, and content."""
ASSERT.true(self.is_uncommitted())
self._status = consts.Statuses.OK
# Do NOT call `self.headers.clear`, but replace it with a new
# headers object instead because application code might still
# keep a reference to the old headers object, and clearing it
# could cause confusing results.
self.headers = self.Headers(self.is_uncommitted)
# It is safe to replace `_precommit` on uncommitted response.
self._precommit.close()
self._precommit = self._make_precommit()
def commit(self):
"""Commit the response.
Once the response is committed, you cannot change its status or
headers, but the response is not done yet, and you may continue
writing its content until it is closed.
"""
if not self.is_uncommitted():
return
self._start_response(
self._format_status(self._status),
list(self.headers.items()),
)
# Non-closed BytesStream returns None when it is empty.
data = self._precommit.read_nonblocking()
if data is not None:
self._body.put_nonblocking(data)
self._precommit.close()
self._precommit = None
def cancel(self, exc):
status = consts.Statuses.SERVICE_UNAVAILABLE
headers = [(consts.HEADER_RETRY_AFTER, '60')]
if self.is_uncommitted():
self.reset()
self.status = status
self.headers.update(headers)
return
self._body.close(graceful=False)
self._start_response(
self._format_status(status),
headers,
(exc.__class__, exc, exc.__traceback__),
)
def err_after_commit(self, exc):
"""Record exception raised after commit.
This first closes the response, dropping remaining body data,
and then calls start_response with HTTP 5xx and exc_info. If
the WSGI server has not yet started sending response, it resets
the response to HTTP 500; otherwise it re-raises the exception.
"""
ASSERT.false(self.is_uncommitted())
self._body.close(graceful=False)
self._start_response(
self._format_status(consts.Statuses.INTERNAL_SERVER_ERROR),
[],
(exc.__class__, exc, exc.__traceback__),
)
@staticmethod
def _format_status(status):
return '{status.value} {status.phrase}'.format(status=status)
@property
def status(self):
return self._status
@status.setter
def status(self, status):
ASSERT.true(self.is_uncommitted())
self._status = _cast_status(status)
async def read(self):
try:
return await self._body.get()
except queues.Closed:
return b''
async def write(self, data):
if self._body.is_closed():
raise ResponseClosed('response is closed')
ASSERT.is_not(self._send_mechanism, _SendMechanisms.SENDFILE)
self._set_send_mechanism(_SendMechanisms.SEND)
if not data:
return 0
if self.is_uncommitted():
return self._precommit.write_nonblocking(data)
try:
await self._body.put(data)
except queues.Closed:
# Re-raise ValueError like other file-like classes.
raise ResponseClosed('response is closed') from None
return len(data)
def sendfile(self, file):
if self._body.is_closed():
raise ResponseClosed('response is closed')
# sendfile can be called only once.
ASSERT.is_(self._send_mechanism, _SendMechanisms.UNDECIDED)
ASSERT.not_none(file)
self._set_send_mechanism(_SendMechanisms.SENDFILE)
self.file = file
def close(self):
try:
self.commit()
finally:
# Although unlikely, add `finally` in case commit errs out.
self._send_mechanism_decided.set()
self._body.close()
def _set_send_mechanism(self, mechanism):
ASSERT.is_not(mechanism, _SendMechanisms.UNDECIDED)
self._send_mechanism = mechanism
self._send_mechanism_decided.set()
async def wait_send_mechanism_decided(self):
await self._send_mechanism_decided.wait()
class Application:
def __init__(self, handler):
self._handler = handler
self._handler_queue = tasks.CompletionQueue()
async def serve(self):
await servers.supervise_server(self._handler_queue, ())
def shutdown(self):
self._handler_queue.close()
async def __call__(self, environ, start_response):
ASSERT.false(self._handler_queue.is_closed())
request = Request(environ=environ, context=contexts.Context())
file_wrapper = environ.get('wsgi.file_wrapper')
response = _Response(start_response, file_wrapper is not None)
# Handler task may linger on after application completes. You
# could do tricks with this feature.
self._handler_queue.spawn(self._run_handler(request, response))
await response.wait_send_mechanism_decided()
if response.file is None:
return self._iter_content(response)
else:
return file_wrapper(response.file)
async def _run_handler(self, request, response):
try:
await self._handler(request, Response(response))
except ResponseClosed:
pass
except Exception as exc:
await self._on_handler_error(request, response, exc)
except BaseException as exc:
# Most likely a task cancellation, not really an error.
response.cancel(exc)
raise
finally:
response.close()
@staticmethod
async def _on_handler_error(request, response, exc):
if not response.is_uncommitted():
response.err_after_commit(exc)
return
response.reset()
log_args = (
request.method,
request.path,
'?' if request.query_str else '',
request.query_str,
)
if not isinstance(exc, HttpError):
LOG.error(
'%s %s%s%s context=%r: '
'handler crashes before commits response',
*log_args,
request.context,
exc_info=exc,
)
# TODO: What headers should we set in this case?
response.status = consts.Statuses.INTERNAL_SERVER_ERROR
return
log_args += (exc.status.value, exc.status.phrase)
if 300 <= exc.status < 400:
LOG.debug(
'%s %s%s%s -> %d %s %s ; reason: %s', \
*log_args, exc.location, exc
)
elif 400 <= exc.status < 500:
LOG.info('%s %s%s%s -> %d %s ; reason: %s', *log_args, exc)
elif exc.status == 503:
LOG.warning('%s %s%s%s -> %d %s ; reason: %s', *log_args, exc)
else:
LOG.warning('%s %s%s%s -> %d %s', *log_args, exc_info=exc)
response.status = exc.status
response.headers.update(exc.headers)
if exc.content:
await response.write(exc.content)
@staticmethod
async def _iter_content(response):
try:
while True:
data = await response.read()
if not data:
break
yield data
finally:
# There are two code paths that may reach here. One is when
# handler returns, and _run_handler closes the response.
# The other is when this async generator is cancelled. In
# the latter case, you must call `response.close` to notify
# handler.
response.close()
def _cast_status(status):
return consts.Statuses(status) # pylint: disable=no-value-for-parameter
| {
"repo_name": "clchiou/garage",
"path": "py/g1/webs/g1/webs/wsgi_apps.py",
"copies": "1",
"size": "14211",
"license": "mit",
"hash": -5161332765299994000,
"line_mean": 29.6271551724,
"line_max": 76,
"alpha_frac": 0.6064316375,
"autogenerated": false,
"ratio": 4.113169319826339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5219600957326338,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ArchiveTypes',
'Compressors',
'assert_command_exist',
'check_command_exist',
'export_path',
'get_url_path',
'guess_archive_type',
'guess_compressor',
'remove_archive_suffix',
]
import enum
import logging
import os
import shutil
import urllib.parse
from pathlib import Path
from g1.bases.assertions import ASSERT
from . import bases
LOG = logging.getLogger(__name__)
class ArchiveTypes(enum.Enum):
UNKNOWN = enum.auto()
TAR = enum.auto()
ZIP = enum.auto()
class Compressors(enum.Enum):
UNKNOWN = enum.auto()
UNCOMPRESSED = enum.auto()
BZIP2 = enum.auto()
GZIP = enum.auto()
XZ = enum.auto()
ZIP = enum.auto()
def assert_command_exist(command):
ASSERT.predicate(command, shutil.which, message='expect command exist: {}')
def check_command_exist(command):
if not shutil.which(command):
LOG.warning(
'command %s does not exist; some features are unavailable', command
)
def export_path(var, path):
"""Prepend path to a PATH-like environment variable."""
paths = os.environ.get(var)
paths = '%s:%s' % (path, paths) if paths else str(path)
LOG.info('prepend %s: %r', var, paths)
if not bases.get_dry_run():
os.environ[var] = paths
def get_url_path(url):
return Path(urllib.parse.urlparse(url).path)
_SUFFIXES = (
('.tar', ArchiveTypes.TAR, Compressors.UNCOMPRESSED),
('.tar.bz2', ArchiveTypes.TAR, Compressors.BZIP2),
('.tbz2', ArchiveTypes.TAR, Compressors.BZIP2),
('.tar.gz', ArchiveTypes.TAR, Compressors.GZIP),
('.tgz', ArchiveTypes.TAR, Compressors.GZIP),
('.tar.xz', ArchiveTypes.TAR, Compressors.XZ),
('.txz', ArchiveTypes.TAR, Compressors.XZ),
('.zip', ArchiveTypes.ZIP, Compressors.ZIP),
# Put non-archive suffixes last as they overlaps suffixes above.
('.bz2', ArchiveTypes.UNKNOWN, Compressors.BZIP2),
('.gz', ArchiveTypes.UNKNOWN, Compressors.GZIP),
('.xz', ArchiveTypes.UNKNOWN, Compressors.XZ),
)
def guess_archive_type(filename):
return _guess(filename)[0]
def guess_compressor(filename):
return _guess(filename)[1]
def _guess(filename):
for suffix, archive_type, compressor in _SUFFIXES:
if filename.endswith(suffix):
return archive_type, compressor
return ArchiveTypes.UNKNOWN, Compressors.UNKNOWN
def remove_archive_suffix(filename):
for suffix, archive_type, _ in _SUFFIXES:
if (
archive_type is not ArchiveTypes.UNKNOWN
and filename.endswith(suffix)
):
return filename[:-len(suffix)]
return filename
| {
"repo_name": "clchiou/garage",
"path": "py/g1/scripts/g1/scripts/utils.py",
"copies": "1",
"size": "2637",
"license": "mit",
"hash": 2980067654430274600,
"line_mean": 24.3557692308,
"line_max": 79,
"alpha_frac": 0.6533940083,
"autogenerated": false,
"ratio": 3.3464467005076144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9499840708807614,
"avg_score": 0,
"num_lines": 104
} |
__all__ = [
'arguments',
'example',
'keyword',
'seealso',
'table',
'underline'
]
class Parser:
def __init__(self, pctxt):
self.pctxt = pctxt
def parse(self, line):
return line
class PContext:
def __init__(self, templates = None):
self.set_content_list([])
self.templates = templates
def set_content(self, content):
self.set_content_list(content.split("\n"))
def set_content_list(self, content):
self.lines = content
self.nblines = len(self.lines)
self.i = 0
self.stop = False
def get_lines(self):
return self.lines
def eat_lines(self):
count = 0
while self.has_more_lines() and self.lines[self.i].strip():
count += 1
self.next()
return count
def eat_empty_lines(self):
count = 0
while self.has_more_lines() and not self.lines[self.i].strip():
count += 1
self.next()
return count
def next(self, count=1):
self.i += count
def has_more_lines(self, offset=0):
return self.i + offset < self.nblines
def get_line(self, offset=0):
return self.lines[self.i + offset].rstrip()
# Get the indentation of a line
def get_indent(line):
indent = 0
length = len(line)
while indent < length and line[indent] == ' ':
indent += 1
return indent
# Remove unneeded indentation
def remove_indent(list):
# Detect the minimum indentation in the list
min_indent = -1
for line in list:
if not line.strip():
continue
indent = get_indent(line)
if min_indent < 0 or indent < min_indent:
min_indent = indent
# Realign the list content to remove the minimum indentation
if min_indent > 0:
for index, line in enumerate(list):
list[index] = line[min_indent:]
| {
"repo_name": "cbonte/haproxy-dconv",
"path": "parser/__init__.py",
"copies": "1",
"size": "1941",
"license": "apache-2.0",
"hash": -5776088203086298000,
"line_mean": 22.962962963,
"line_max": 71,
"alpha_frac": 0.561566203,
"autogenerated": false,
"ratio": 3.828402366863905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4889968569863905,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ArrayMath',
'NormalizeArray',
'PercentThreshold',
'ArraysToRGBA',
]
__displayname__ = 'Math Operations'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import pyvista as pv
from .. import _helpers, interface
from ..base import FilterBase, FilterPreserveTypeBase
###############################################################################
# ---- ArrayMath ----#
class ArrayMath(FilterPreserveTypeBase):
"""This filter allows the user to select two input data arrays on which to
perfrom math operations. The input arrays are used in their order of
selection for the operations.
Args:
multiplier (float) : a static shifter/scale factor across the array
after normalization.
new_name (str): The new array's string name
operation (str, int, or callable): The operation as a string key, int
index, or callable method
**Available Math Operations:**
- `add`: This adds the two data arrays together
- `subtract`: This subtracts input array 2 from input array 1
- `multiply`: Multiplies the two data arrays together
- `divide`: Divide input array 1 by input array 2 (arr1/arr2)
- `correlate`: Use `np.correlate(arr1, arr2, mode='same')`
"""
__displayname__ = 'Array Math'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterPreserveTypeBase.__init__(self)
# Parameters:
self.__multiplier = kwargs.get('multiplier', 1.0)
self.__new_name = kwargs.get('new_name', 'Mathed Up')
self.__input_array_1 = [None, None]
self.__input_array_2 = [None, None]
# Convert operation to callable method
op = kwargs.get('operation', 'add')
if isinstance(op, (str, int)):
op = self.get_operation(op)
self.__operation = op
@staticmethod
def _correlate(arr1, arr2):
"""Use ``np.correlate()`` on ``mode='same'`` on two selected arrays
from one input.
"""
return np.correlate(arr1, arr2, mode='same')
@staticmethod
def _multiply(arr1, arr2):
"""Mutlipies two input NumPy arrays"""
return arr1 * arr2
@staticmethod
def _divide(arr1, arr2):
"""Divides two input NumPy arrays"""
return arr1 / arr2
@staticmethod
def _add(arr1, arr2):
"""Adds two input NumPy arrays"""
return arr1 + arr2
@staticmethod
def _subtract(arr1, arr2):
"""Subtracts two input NumPy arrays"""
return arr1 - arr2
@staticmethod
def get_operations():
"""Returns the math operation methods as callable objects in a
dictionary
"""
ops = dict(
add=ArrayMath._add,
subtract=ArrayMath._subtract,
multiply=ArrayMath._multiply,
divide=ArrayMath._divide,
correlate=ArrayMath._correlate,
)
return ops
@staticmethod
def get_operation_names():
"""Gets a list of the math operation keys
Return:
list(str): the keys for getting the math operations
"""
ops = ArrayMath.get_operations()
return list(ops.keys())
@staticmethod
def get_operation(idx):
"""Gets a math operation based on an index in the keys
Return:
callable: the math operation method
"""
if isinstance(idx, str):
return ArrayMath.get_operations()[idx]
n = ArrayMath.get_operation_names()[idx]
return ArrayMath.get_operations()[n]
def _math_up(self, pdi, pdo):
"""Make sure to pass array names and integer associated fields.
Use helpers to get these properties.
"""
if pdo is None:
# TODO: test this
pdo = pdi.DeepCopy()
# Get the input arrays
field1, name1 = self.__input_array_1[0], self.__input_array_1[1]
field2, name2 = self.__input_array_2[0], self.__input_array_2[1]
wpdi = dsa.WrapDataObject(pdi)
arr1 = _helpers.get_numpy_array(wpdi, field1, name1)
arr2 = _helpers.get_numpy_array(wpdi, field2, name2)
# Perform Math Operation
carr = self.__operation(arr1, arr2)
# apply the multiplier
carr *= self.__multiplier
# If no name given for data by user, use operator name
new_name = self.__new_name
if new_name == '':
new_name = 'Mathed Up'
# Convert to a VTK array
c = interface.convert_array(carr, name=new_name)
# Build output
pdo.DeepCopy(pdi)
pdo = _helpers.add_array(pdo, field1, c)
return pdo
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to perfrom operation and generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._math_up(pdi, pdo)
return 1
#### Seters and Geters ####
def _set_input_array_1(self, field, name):
"""Set 1st input array by name and field"""
if self.__input_array_1[0] != field:
self.__input_array_1[0] = field
self.Modified()
if self.__input_array_1[1] != name:
self.__input_array_1[1] = name
self.Modified()
def _set_input_array_2(self, field, name):
"""Set 2nd input array by name and field"""
if self.__input_array_2[0] != field:
self.__input_array_2[0] = field
self.Modified()
if self.__input_array_2[1] != name:
self.__input_array_2[1] = name
self.Modified()
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if idx == 0:
self._set_input_array_1(field, name)
elif idx == 1:
self._set_input_array_2(field, name)
else:
raise _helpers.PVGeoError(
'SetInputArrayToProcess() do not know how to handle idx: %d' % idx
)
return 1
def apply(self, input_data_object, array_name_0, array_name_1):
"""Run the algorith on an input data object, specifying array names"""
self.SetInputDataObject(input_data_object)
arr0, field0 = _helpers.search_for_array(input_data_object, array_name_0)
arr1, field1 = _helpers.search_for_array(input_data_object, array_name_1)
self.SetInputArrayToProcess(0, 0, 0, field0, array_name_0)
self.SetInputArrayToProcess(1, 0, 0, field1, array_name_1)
self.Update()
return pv.wrap(self.GetOutput())
def set_multiplier(self, val):
"""This is a static shifter/scale factor across the array after
normalization.
"""
if self.__multiplier != val:
self.__multiplier = val
self.Modified()
def get_multiplier(self):
"""Return the set multiplier/scalar"""
return self.__multiplier
def set_new_array_name(self, name):
"""Give the new array a meaningful name."""
if self.__new_name != name:
self.__new_name = name
self.Modified()
def get_new_array_name(self):
"""Get the name used for the new array"""
return self.__new_name
def set_operation(self, op):
"""Set the math operation to perform
Args:
op (str, int, or callable): The operation as a string key, int
index, or callable method
Note:
This can accept a callable method to set a custom operation as long
as its signature is: ``<callable>(arr1, arr2)``
"""
if isinstance(op, str):
op = ArrayMath.get_operations()[op]
elif isinstance(op, int):
op = ArrayMath.get_operation(op)
if self.__operation != op:
self.__operation = op
self.Modified()
###############################################################################
# ---- Normalizations ----#
class NormalizeArray(FilterPreserveTypeBase):
"""This filter allows the user to select an array from the input data set
to be normalized. The filter will append another array to that data set for
the output. The user can specify how they want to rename the array, can
choose a multiplier, and can choose from several types of common
normalizations (more functionality added as requested).
Args:
multiplier (float) : a static shifter/scale factor across the array
after normalization.
new_name (str): The new array's string name
absolute (bool):
normalization (str, int, or callable): The operation as a string key,
integer index, or callable method
**Normalization Types:**
- `feature_scale`: Feature Scale
- `standard_score`: tandard Score
- `log10`: Natural Log
- `natural_log`: Log Base 10
- `just_multiply`: Only Multiply by Multiplier
"""
__displayname__ = 'Normalize Array'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterPreserveTypeBase.__init__(self)
# Parameters:
self.__multiplier = kwargs.get('multiplier', 1.0)
self.__new_name = kwargs.get('new_name', 'Normalized')
self.__absolute = kwargs.get('absolute', False)
self.__input_array = [None, None]
# Convert operation to callable method
op = kwargs.get('normalization', 'feature_scale')
if isinstance(op, (str, int)):
op = self.get_normalization(op)
self.__normalization = op
self.__shift = 0.0
#### Array normalization methods ####
@staticmethod
def _pass_array(arr):
"""Cast an input array as a NumPy array"""
return np.array(arr)
@staticmethod
def _feature_scale(arr, rng=None):
"""Returns feature scale normalization of input array"""
# TODO: implement ability to use custom range
if rng is not None:
mi = rng[0]
ma = rng[1]
else:
mi = np.nanmin(arr)
ma = np.nanmax(arr)
return (arr - mi) / (ma - mi)
@staticmethod
def _standard_score(arr):
"""Returns tandard score normalization of input array"""
return (arr - np.mean(arr)) / (np.std(arr))
@staticmethod
def _log10(arr):
"""Returns log base 10 of input array"""
return np.log10(arr)
@staticmethod
def _log_nat(arr):
"""Returns natural logarithm of input array"""
return np.log(arr)
@staticmethod
def get_normalizations():
"""All Available normalizations
Return:
dict: dictionary of callable methods for normalizing an array
"""
ops = dict(
feature_scale=NormalizeArray._feature_scale,
standard_score=NormalizeArray._standard_score,
log10=NormalizeArray._log10,
natural_log=NormalizeArray._log_nat,
just_multiply=NormalizeArray._pass_array,
)
return ops
@staticmethod
def get_normalization_names():
"""Gets a list of the normalization keys
Return:
list(str): the keys for getting the normalizations
"""
ops = NormalizeArray.get_normalizations()
return list(ops.keys())
@staticmethod
def get_normalization(idx):
"""Gets a normalization based on an index in the keys
Return:
callable: the normalization method
"""
if isinstance(idx, str):
return NormalizeArray.get_normalizations()[idx]
n = NormalizeArray.get_normalization_names()[idx]
return NormalizeArray.get_normalizations()[n]
@staticmethod
def get_array_range(pdi, field, name):
"""Returns a tuple of the range for a ``vtkDataArray`` on a
``vtkDataObject``.
"""
wpdi = dsa.WrapDataObject(pdi)
arr = _helpers.get_numpy_array(wpdi, field, name)
arr = np.array(arr)
return (np.nanmin(arr), np.nanmax(arr))
def _normalize(self, pdi, pdo):
"""Perform normalize on a data array for any given VTK data object."""
# Get input array
field, name = self.__input_array[0], self.__input_array[1]
# self.__range = NormalizeArray.get_array_range(pdi, field, name)
wpdi = dsa.WrapDataObject(pdi)
arr = _helpers.get_numpy_array(wpdi, field, name)
arr = np.array(arr, dtype=float)
# Take absolute value?
if self.__absolute:
arr = np.abs(arr)
arr += self.__shift
# Perform normalization scheme
arr = self.__normalization(arr)
# apply the multiplier
arr *= self.__multiplier
# If no name given for data by user, use operator name
new_name = self.__new_name
if new_name == '':
new_name = 'Normalized ' + name
# Convert to VTK array
c = interface.convert_array(arr, name=new_name)
# Build output
pdo.DeepCopy(pdi)
pdo = _helpers.add_array(pdo, field, c)
return pdo
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._normalize(pdi, pdo)
return 1
#### Seters and Geters ####
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field:
self.__input_array[0] = field
self.Modified()
if self.__input_array[1] != name:
self.__input_array[1] = name
self.Modified()
return 1
def apply(self, input_data_object, array_name):
"""Run the algorithm on an input data object, specifying the array"""
self.SetInputDataObject(input_data_object)
arr, field = _helpers.search_for_array(input_data_object, array_name)
self.SetInputArrayToProcess(0, 0, 0, field, array_name)
self.Update()
return pv.wrap(self.GetOutput())
def set_multiplier(self, val):
"""This is a static shifter/scale factor across the array after
normalization.
"""
if self.__multiplier != val:
self.__multiplier = val
self.Modified()
def get_multiplier(self):
"""Return the set multiplier/scalar"""
return self.__multiplier
def set_new_array_name(self, name):
"""Give the new array a meaningful name."""
if self.__new_name != name:
self.__new_name = name
self.Modified()
def get_new_array_name(self):
"""Get the name of the new array"""
return self.__new_name
def set_take_absolute_value(self, flag):
"""This will take the absolute value of the array before normalization."""
if self.__absolute != flag:
self.__absolute = flag
self.Modified()
def set_normalization(self, norm):
"""Set the normalization operation to perform
Args:
norm (str, int, or callable): The operation as a string key, int
index, or callable method
Note:
This can accept a callable method to set a custom operation as long
as its signature is: ``<callable>(arr)``
"""
if isinstance(norm, str):
norm = NormalizeArray.get_normalizations()[norm]
elif isinstance(norm, int):
norm = NormalizeArray.get_normalization(norm)
if self.__normalization != norm:
self.__normalization = norm
self.Modified()
def set_shift(self, sft):
"""Set a static shifter to the input data array"""
if self.__shift != sft:
self.__shift = sft
self.Modified()
###############################################################################
class PercentThreshold(FilterBase):
"""Allows user to select a percent of the data range to threshold.
This will find the data range of the selected input array and remove the
bottom percent. This can be reversed using the invert property.
"""
__displayname__ = 'Percent Threshold'
__category__ = 'filter'
def __init__(self, percent=50, invert=False, **kwargs):
FilterBase.__init__(
self, inputType='vtkDataSet', outputType='vtkUnstructuredGrid', **kwargs
)
self.__invert = invert
if percent < 1.0:
percent *= 100
self.__percent = percent # NOTE: not decimal percent
self.__filter = vtk.vtkThreshold()
self.__input_array = [None, None]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline for execution"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
self.__filter.SetInputDataObject(pdi)
pdo = self.GetOutputData(outInfo, 0)
# Get Input Array
field, name = self.__input_array[0], self.__input_array[1]
wpdi = dsa.WrapDataObject(pdi)
arr = _helpers.get_numpy_array(wpdi, field, name)
dmin, dmax = np.nanmin(arr), np.nanmax(arr)
val = dmin + (self.__percent / 100.0) * (dmax - dmin)
if self.__invert:
self.__filter.ThresholdByLower(val)
else:
self.__filter.ThresholdByUpper(val)
self.__filter.Update()
filt = self.__filter.GetOutputDataObject(0)
pdo.ShallowCopy(filt)
return 1
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field or self.__input_array[1] != name:
self.__input_array[0] = field
self.__input_array[1] = name
self.__filter.SetInputArrayToProcess(idx, port, connection, field, name)
self.Modified()
return 1
def set_percent(self, percent):
"""Set the percent for the threshold in range (0, 100).
Any values falling beneath the set percent of the total data range
will be removed."""
if self.__percent != percent:
self.__percent = percent
self.Modified()
def set_use_continuous_cell_range(self, flag):
"""If this is on (default is off), we will use the continuous
interval [minimum cell scalar, maxmimum cell scalar] to intersect
the threshold bound , rather than the set of discrete scalar
values from the vertices"""
return self.__filter.SetUseContinuousCellRange(flag)
def set_invert(self, flag):
"""Use to invert the threshold filter"""
if self.__invert != flag:
self.__invert = flag
self.Modified()
def apply(self, input_data_object, array_name):
"""Run the algorithm on an input data object, specifying the array"""
self.SetInputDataObject(input_data_object)
arr, field = _helpers.search_for_array(input_data_object, array_name)
self.SetInputArrayToProcess(0, 0, 0, field, array_name)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class ArraysToRGBA(FilterPreserveTypeBase):
"""Use arrays from input data object to set an RGBA array. Sets colors and
transparencies.
"""
__displayname__ = 'Arrays To RGBA'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterPreserveTypeBase.__init__(self, **kwargs)
self.__use_trans = False
self.__r_array = [None, None]
self.__g_array = [None, None]
self.__b_array = [None, None]
self.__a_array = [None, None]
self.__field = None
self.__mask = -9999
def _get_arrays(self, wpdi):
"""Internal helper to fetch RGBA arrays"""
# Get Red
fieldr, name = self.__r_array[0], self.__r_array[1]
r_arr = _helpers.get_numpy_array(wpdi, fieldr, name)
# Get Green
fieldg, name = self.__g_array[0], self.__g_array[1]
g_arr = _helpers.get_numpy_array(wpdi, fieldg, name)
# Get Blue
fieldb, name = self.__b_array[0], self.__b_array[1]
b_arr = _helpers.get_numpy_array(wpdi, fieldb, name)
# Get Trans
fielda, name = self.__a_array[0], self.__a_array[1]
a_arr = _helpers.get_numpy_array(wpdi, fielda, name)
if fieldr != fieldg != fieldb: # != fielda
raise _helpers.PVGeoError('Data arrays must be of the same field.')
self.__field = fieldr
return r_arr, g_arr, b_arr, a_arr
def _mask_arrays(self, r_arr, g_arr, b_arr, a_arr):
"""Internal helper to mask RGBA arrays"""
r_arr = np.ma.masked_where(r_arr == self.__mask, r_arr)
g_arr = np.ma.masked_where(g_arr == self.__mask, g_arr)
b_arr = np.ma.masked_where(b_arr == self.__mask, b_arr)
a_arr = np.ma.masked_where(a_arr == self.__mask, a_arr)
return r_arr, g_arr, b_arr, a_arr
def RequestData(self, request, inInfo, outInfo):
"""Execute on pipeline"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
wpdi = dsa.WrapDataObject(pdi)
# Get number of points
pdo = self.GetOutputData(outInfo, 0)
# Get the arrays for the RGB values
r_arr, g_arr, b_arr, a_arr = self._get_arrays(wpdi)
r_arr, g_arr, b_arr, a_arr = self._mask_arrays(r_arr, g_arr, b_arr, a_arr)
# normalize each color array bewteen 0 and 255
r_arr = NormalizeArray._feature_scale(r_arr, [0, 255])
g_arr = NormalizeArray._feature_scale(g_arr, [0, 255])
b_arr = NormalizeArray._feature_scale(b_arr, [0, 255])
# Now concatenate the arrays
if self.__use_trans:
a_arr = NormalizeArray._feature_scale(a_arr, [0, 255])
col = np.array(np.c_[r_arr, g_arr, b_arr, a_arr], dtype=np.uint8)
else:
col = np.array(np.c_[r_arr, g_arr, b_arr], dtype=np.uint8)
colors = interface.convert_array(col, name='Colors')
# Set the output
pdo.DeepCopy(pdi)
# Add new color array
_helpers.add_array(pdo, self.__field, colors)
return 1
#### Seters and Geters ####
def set_use_transparency(self, flag):
"""Set a boolean flag on whether or not to use a transparency component"""
if self.__use_trans != flag:
self.__use_trans = flag
self.Modified()
def set_mask_value(self, val):
"""Set the value to mask in the RGBA arrays"""
if self.__mask != val:
self.__mask = val
self.Modified()
def _set_input_array_red(self, field, name):
"""Set field and name of red array"""
if self.__r_array[0] != field:
self.__r_array[0] = field
self.Modified()
if self.__r_array[1] != name:
self.__r_array[1] = name
self.Modified()
def _set_input_array_green(self, field, name):
"""Set field and name of green array"""
if self.__g_array[0] != field:
self.__g_array[0] = field
self.Modified()
if self.__g_array[1] != name:
self.__g_array[1] = name
self.Modified()
def _set_input_array_blue(self, field, name):
"""Set field and name of blue array"""
if self.__b_array[0] != field:
self.__b_array[0] = field
self.Modified()
if self.__b_array[1] != name:
self.__b_array[1] = name
self.Modified()
def _set_input_array_trans(self, field, name):
"""Set field and name of transparency array"""
if self.__a_array[0] != field:
self.__a_array[0] = field
self.Modified()
if self.__a_array[1] != name:
self.__a_array[1] = name
self.Modified()
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if idx == 0:
self._set_input_array_red(field, name)
elif idx == 1:
self._set_input_array_green(field, name)
elif idx == 2:
self._set_input_array_blue(field, name)
elif idx == 3:
self._set_input_array_trans(field, name)
else:
raise _helpers.PVGeoError(
'SetInputArrayToProcess() do not know how to handle idx: %d' % idx
)
return 1
def apply(self, input_data_object, r_array, g_array, b_array, a_array=None):
"""Run the algorithm on an input data object, specifying RGBA array names"""
self.SetInputDataObject(input_data_object)
r_arr, rField = _helpers.search_for_array(input_data_object, r_array)
g_arr, gField = _helpers.search_for_array(input_data_object, g_array)
b_arr, bField = _helpers.search_for_array(input_data_object, b_array)
if a_array is not None:
a_arr, aField = _helpers.search_for_array(input_data_object, a_array)
self.SetInputArrayToProcess(3, 0, 0, aField, a_array)
self.set_use_transparency(True)
self.SetInputArrayToProcess(0, 0, 0, rField, r_array)
self.SetInputArrayToProcess(1, 0, 0, gField, g_array)
self.SetInputArrayToProcess(2, 0, 0, bField, b_array)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/filters/math.py",
"copies": "1",
"size": "27308",
"license": "bsd-3-clause",
"hash": 2288933896356771800,
"line_mean": 34.1907216495,
"line_max": 84,
"alpha_frac": 0.5700527318,
"autogenerated": false,
"ratio": 3.9485251590514747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00044963388412428806,
"num_lines": 776
} |
__all__ = [
'Array',
'Context',
'GlobalContext',
'HandleScope',
'Isolate',
'JavaScriptError',
'Object',
'Script',
'UNDEFINED',
'UndefinedType',
'Value',
'from_python',
'to_python',
'run',
'shutdown',
]
import atexit
import collections.abc
import logging
# Disable warning as pylint cannot infer native extension.
# pylint: disable=no-name-in-module
from ._v8 import initialize as _initialize
# Re-export these.
from ._v8 import Array
from ._v8 import Context
from ._v8 import GlobalContext
from ._v8 import HandleScope
from ._v8 import Isolate
from ._v8 import Object
from ._v8 import Script
from ._v8 import UNDEFINED
from ._v8 import UndefinedType
from ._v8 import Value
from ._v8 import shutdown
logging.getLogger(__name__).addHandler(logging.NullHandler())
_PRIMITIVE_TYPES = (UndefinedType, type(None), bool, int, float, str)
class JavaScriptError(Exception):
pass
def run(context, code, name='<main>'):
script = Script(context, name, code)
return script.run(context)
def from_python(context, py_obj):
"""Python-to-JavaScript recursive converter.
This will be trapped in an infinite loop if there are self
references.
"""
# NOTE: str is a sub-class of collections.abc.Sequence, and so you
# MUST check _PRIMITIVE_TYPES before collections.abc.Sequence.
if isinstance(py_obj, _PRIMITIVE_TYPES):
return py_obj
elif isinstance(py_obj, collections.abc.Sequence):
output = Array(context)
for item in py_obj:
output.append(from_python(context, item))
return output
elif isinstance(py_obj, collections.abc.Mapping):
output = Object(context)
for key, value in py_obj.items():
if not isinstance(key, str):
raise TypeError('expect str key: {!r}'.format(key))
output[key] = from_python(context, value)
return output
else:
raise TypeError('unsupported type: {!r}'.format(py_obj))
def to_python(
js_obj,
*,
sequence_type=list,
undefined_to_none=True,
):
"""JavaScript-to-Python recursive converter.
This will be trapped in an infinite loop if there are self
references.
"""
def convert(x):
if x is UNDEFINED and undefined_to_none:
return None
elif isinstance(x, _PRIMITIVE_TYPES):
return x
elif isinstance(x, Array):
return sequence_type(map(convert, x))
elif isinstance(x, Object):
return {key: convert(x[key]) for key in x}
else:
raise TypeError('unsupported type: {!r}'.format(x))
return convert(js_obj)
_initialize(JavaScriptError)
# Is it really necessary to call shutdown on process exit?
atexit.register(shutdown)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/third-party/v8/v8/__init__.py",
"copies": "1",
"size": "2800",
"license": "mit",
"hash": -1627891428961286100,
"line_mean": 24.4545454545,
"line_max": 70,
"alpha_frac": 0.6475,
"autogenerated": false,
"ratio": 3.830369357045144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4977869357045144,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ASSERT',
'Assertions',
]
class Assertions:
def __init__(self, exc_type):
self._exc_type = exc_type
def __call__(self, cond, message, *args):
if not cond:
raise self._exc_type(message % args)
return cond
def fail(self, message, *args):
raise self._exc_type(message % args)
def true(self, value):
self(value, 'expect truth instead of %r', value)
return value
def false(self, value):
self(not value, 'expect falsehood instead of %r', value)
return value
def is_(self, value, expected):
self(value is expected, 'expect %r is %r', value, expected)
return value
def is_not(self, value, expected):
self(value is not expected, 'expect %r is not %r', value, expected)
return value
def none(self, value):
self(value is None, 'expect None instead of %r', value)
return value
def not_none(self, value):
self(value is not None, 'expect non-None value')
return value
def type_of(self, value, type_):
self(
isinstance(value, type_),
'expect %r-typed value instead of %r', type_, value,
)
return value
def not_type_of(self, value, type_):
self(
not isinstance(value, type_),
'expect not %r-typed value instead of %r', type_, value,
)
return value
def in_(self, member, container):
self(member in container, 'expect %r in %r', member, container)
return member
def not_in(self, member, container):
self(member not in container, 'expect %r not in %r', member, container)
return member
def equal(self, value, expected):
self(value == expected, 'expect %r == %r', value, expected)
return value
def not_equal(self, value, expected):
self(value != expected, 'expect %r != %r', value, expected)
return value
def greater(self, value, expected):
self(value > expected, 'expect %r > %r', value, expected)
return value
def greater_or_equal(self, value, expected):
self(value >= expected, 'expect %r >= %r', value, expected)
return value
def less(self, value, expected):
self(value < expected, 'expect %r < %r', value, expected)
return value
def less_or_equal(self, value, expected):
self(value <= expected, 'expect %r <= %r', value, expected)
return value
ASSERT = Assertions(AssertionError)
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/assertions.py",
"copies": "1",
"size": "2548",
"license": "mit",
"hash": 8132271697843178000,
"line_mean": 27,
"line_max": 79,
"alpha_frac": 0.5784929356,
"autogenerated": false,
"ratio": 3.92,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9998492935600001,
"avg_score": 0,
"num_lines": 91
} |
__all__ = [
'AtomicInt',
'AtomicSet',
'Priority',
'generate_names',
'make_get_thread_local',
'set_pthread_name',
]
import collections.abc
import functools
import logging
import threading
from garage.assertions import ASSERT
LOG = logging.getLogger(__name__)
class AtomicInt:
def __init__(self, value=0):
self._lock = threading.Lock()
self._value = value
def get_and_set(self, new_value):
with self._lock:
value = self._value
self._value = new_value
return value
def get_and_add(self, add_to):
with self._lock:
value = self._value
self._value += add_to
return value
class AtomicSet:
def __init__(self):
self._lock = threading.Lock()
self._items = set()
def __contains__(self, item):
with self._lock:
return item in self._items
def check_and_add(self, item):
with self._lock:
has_item = item in self._items
if not has_item:
self._items.add(item)
return has_item
@functools.total_ordering
class Priority:
"""A wrapper class that supports lowest/highest priority sentinels,
which should be handy when used with Python's heap.
This is an immutable value class.
NOTE: Python's heap[0] is the smallest item; so we will have the
highest priority be the smallest.
"""
def __init__(self, priority):
ASSERT.type_of(priority, collections.abc.Hashable)
self._priority = priority
def __str__(self):
if self is Priority.LOWEST:
return 'Priority.LOWEST'
elif self is Priority.HIGHEST:
return 'Priority.HIGHEST'
else:
return 'Priority(%r)' % (self._priority,)
__repr__ = __str__
def __hash__(self):
return hash(self._priority)
def __eq__(self, other):
return self._priority == other._priority
def __lt__(self, other):
# NOTE: Smaller = higher priority!
decision = {
(True, True): False,
(True, False): False,
(False, True): True,
(False, False): None,
}[self is Priority.LOWEST, other is Priority.LOWEST]
if decision is not None:
return decision
decision = {
(True, True): False,
(True, False): True,
(False, True): False,
(False, False): None,
}[self is Priority.HIGHEST, other is Priority.HIGHEST]
if decision is not None:
return decision
return self._priority < other._priority
Priority.LOWEST = Priority(object())
Priority.HIGHEST = Priority(object())
def generate_names(*, name_format='{name}-{serial:02d}', **kwargs):
"""Useful for generate names of an actor with a serial number."""
serial = kwargs.pop('serial', None) or AtomicInt(1)
while True:
yield name_format.format(serial=serial.get_and_add(1), **kwargs)
def make_get_thread_local(name, make):
def get_thread_local():
local = make_get_thread_local.local
if not hasattr(local, name):
setattr(local, name, make())
return getattr(local, name)
return get_thread_local
# Share thread local object globally
make_get_thread_local.local = threading.local()
# NOTE: This function is a hack; don't expect it to always work.
def set_pthread_name(thread, name):
if not thread.ident:
import warnings
warnings.warn('no thread.ident for %r' % name)
return
name = name.encode('utf-8')
if len(name) > 15:
import warnings
warnings.warn('pthread name longer than 16 char: %r' % name)
return
if not hasattr(set_pthread_name, 'pthread_setname_np'):
import ctypes
import ctypes.util
try:
pthread = ctypes.CDLL(ctypes.util.find_library('pthread'))
except FileNotFoundError:
LOG.warning('cannot load lib pthread', exc_info=True)
pthread_setname_np = lambda *_: -1
else:
pthread_setname_np = pthread.pthread_setname_np
# XXX: Evil: Use long for pthread_t, which is not quite true.
pthread_setname_np.argtypes = [ctypes.c_long, ctypes.c_char_p]
pthread_setname_np.restype = ctypes.c_int
set_pthread_name.pthread_setname_np = pthread_setname_np
# XXX: Evil: Use thread.ident as a shortcut of pthread_self().
err = set_pthread_name.pthread_setname_np(thread.ident, name)
if err:
LOG.warning('cannot set pthread name (err=%d)', err)
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/threads/utils.py",
"copies": "1",
"size": "4652",
"license": "mit",
"hash": 2057758160163037200,
"line_mean": 27.1939393939,
"line_max": 74,
"alpha_frac": 0.5902837489,
"autogenerated": false,
"ratio": 3.8669991687448046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49572829176448047,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'__author__', '__author_email__', '__classifiers__', '__desc__', '__license__',
'__package_name__', '__scripts__', '__team__', '__url__', '__version__',
]
__author__ = 'Luke Powers'
__author_email__ = 'luke.powers@openx.com'
__classifiers__ = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
'Private :: Do Not Upload' # Do not allow this package to be uploaded to pypi.python.org
]
__desc__ = '''Configuration module intended to work as a namespace holder for configuration values.'''
__license__ = 'Apache Software License 2.0'
__package_exclude__ = ['tests']
__package_name__ = 'triconf'
__requires__ = [
'argparse>=1.1',
'configobj>=5.0.6'
]
__scripts__ = []
__team__ = 'autoeng'
__url__ = 'http://github.op.dc.openx.org/%s/%s' % ('autoeng', 'openx_python_triconf')
__version__ = '1.0.1'
| {
"repo_name": "luke-powers/triconf",
"path": "src/triconf/__about__.py",
"copies": "1",
"size": "1153",
"license": "bsd-3-clause",
"hash": -3862259556438485000,
"line_mean": 40.1785714286,
"line_max": 107,
"alpha_frac": 0.5394622723,
"autogenerated": false,
"ratio": 3.7679738562091503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48074361285091505,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'__author__', '__classifiers__', '__desc__', '__license__',
'__package_name__', '__scripts__', '__url__', '__version__',
]
__author__ = 'Luke Powers'
__author_email__ = 'luke-powers@users.noreply.github.com'
# For more classifiers, see http://goo.gl/zZQaZ
__classifiers__ = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
'Private :: Do Not Upload'
# Do not allow this package to be uploaded to pypi.python.org
]
__desc__ = '''Tool to scrape Nation States for stats'''
__entry_points__ = {
'console_scripts':
[
'ns-collector=nscollector.__main__:main',
]
}
__license__ = 'Apache Software License 2.0'
__package_exclude__ = ['tests']
__package_name__ = 'NSCollector'
__requires__ = [
'pip>=9.0',
'bs4',
'django',
'requests',
]
__scripts__ = []
__url__ = 'http://github.com/%s/%s' % (__author__.replace(' ', '-'), __package_name__)
__version__ = '0.0.1'
| {
"repo_name": "luke-powers/NSCollector",
"path": "src/nscollector/__about__.py",
"copies": "1",
"size": "1147",
"license": "apache-2.0",
"hash": 2738054058965365000,
"line_mean": 29.1842105263,
"line_max": 86,
"alpha_frac": 0.5693112467,
"autogenerated": false,
"ratio": 3.383480825958702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44527920726587017,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'AutoInitAndCloseable',
'Disposable',
'NoReentrantContext',
'DisposableContext',
]
class AutoInitAndCloseable(object):
"""
Classes with :meth:`init()` to initialize its internal states, and also
:meth:`close()` to destroy these states. The :meth:`init()` method can
be repeatedly called, which will cause initialization only at the first
call. Thus other methods may always call :meth:`init()` at beginning,
which can bring auto-initialization to the class.
A context manager is implemented: :meth:`init()` is explicitly called
when entering the context, while :meth:`destroy()` is called when
exiting the context.
"""
_initialized = False
def _init(self):
"""Override this method to initialize the internal states."""
raise NotImplementedError()
def init(self):
"""Ensure the internal states are initialized."""
if not self._initialized:
self._init()
self._initialized = True
def __enter__(self):
"""Ensure the internal states are initialized."""
self.init()
return self
def _close(self):
"""Override this method to destroy the internal states."""
raise NotImplementedError()
def close(self):
"""Ensure the internal states are destroyed."""
if self._initialized:
try:
self._close()
finally:
self._initialized = False
def __exit__(self, exc_type, exc_val, exc_tb):
"""Cleanup the internal states."""
self.close()
class Disposable(object):
"""
Classes which can only be used once.
"""
_already_used = False
def _check_usage_and_set_used(self):
"""
Check whether the usage flag, ensure the object has not been used,
and then set it to be used.
"""
if self._already_used:
raise RuntimeError('Disposable object cannot be used twice: {!r}.'.
format(self))
self._already_used = True
class NoReentrantContext(object):
"""
Base class for contexts which are not reentrant (i.e., if there is
a context opened by ``__enter__``, and it has not called ``__exit__``,
the ``__enter__`` cannot be called again).
"""
_is_entered = False
def _enter(self):
"""
Enter the context. Subclasses should override this instead of
the true ``__enter__`` method.
"""
raise NotImplementedError()
def _exit(self, exc_type, exc_val, exc_tb):
"""
Exit the context. Subclasses should override this instead of
the true ``__exit__`` method.
"""
raise NotImplementedError()
def _require_entered(self):
"""
Require the context to be entered.
Raises:
RuntimeError: If the context is not entered.
"""
if not self._is_entered:
raise RuntimeError('Context is required be entered: {!r}.'.
format(self))
def __enter__(self):
if self._is_entered:
raise RuntimeError('Context is not reentrant: {!r}.'.
format(self))
ret = self._enter()
self._is_entered = True
return ret
def __exit__(self, exc_type, exc_val, exc_tb):
if self._is_entered:
self._is_entered = False
return self._exit(exc_type, exc_val, exc_tb)
class DisposableContext(NoReentrantContext):
"""
Base class for contexts which can only be entered once.
"""
_has_entered = False
def __enter__(self):
if self._has_entered:
raise RuntimeError(
'A disposable context cannot be entered twice: {!r}.'.
format(self))
ret = super(DisposableContext, self).__enter__()
self._has_entered = True
return ret
| {
"repo_name": "korepwx/tfsnippet",
"path": "tfsnippet/utils/concepts.py",
"copies": "1",
"size": "3955",
"license": "mit",
"hash": 5872066902974685000,
"line_mean": 28.0808823529,
"line_max": 79,
"alpha_frac": 0.5716814159,
"autogenerated": false,
"ratio": 4.479048697621744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5550730113521745,
"avg_score": null,
"num_lines": null
} |
__all__ = ["NavigationTracker"]
from status import *
from devtools_event_listener import DevToolsEventListener
# Tracks the navigation state of the page.
class NavigationTracker(DevToolsEventListener):
kUnknown = 0
kLoading = 1
kNotLoading = 2
def __init__(self, client, known_state=0):
self.client = client
self.client.AddListener(self)
self.loading_state = known_state
self.scheduled_frame_set = set()
# return status and is_pending<bool>
def IsPendingNavigation(self, frame_id):
# Gets whether a navigation is pending for the specified frame. |frame_id|
# may be empty to signify the main frame.
if self.loading_state == NavigationTracker.kUnknown:
# If the loading state is unknown (which happens after first connecting),
# force loading to start and set the state to loading. This will
# cause a frame start event to be received, and the frame stop event
# will not be received until all frames are loaded.
# Loading is forced to start by attaching a temporary iframe.
# Forcing loading to start is not necessary if the main frame is not yet
# loaded.
kStartLoadingIfMainFrameNotLoading = \
"var isLoaded = document.readyState == 'complete' ||"\
" document.readyState == 'interactive';"\
"if (isLoaded) {"\
" var frame = document.createElement('iframe');"\
" frame.src = 'about:blank';"\
" document.body.appendChild(frame);"\
" window.setTimeout(function() {"\
" document.body.removeChild(frame);"\
" }, 0);"\
"}"
params = {}
params["expression"] = kStartLoadingIfMainFrameNotLoading
result = {}
status = self.client.SendCommandAndGetResult("Runtime.evaluate", params, result)
if status.IsError():
return (Status(kUnknownError, "cannot determine loading status"), False)
# Between the time the JavaScript is evaluated and SendCommandAndGetResult
# returns, OnEvent may have received info about the loading state.
# This is only possible during a nested command. Only set the loading state
# if the loading state is still unknown.
if self.loading_state == NavigationTracker.kUnknown:
self.loading_state = NavigationTracker.kLoading
is_pending = (self.loading_state == NavigationTracker.kLoading)
if not frame_id:
is_pending |= (len(self.scheduled_frame_set) > 0)
else:
is_pending |= (frame_id in self.scheduled_frame_set)
return (Status(kOk), is_pending)
def OnConnected(self, client):
self.loading_state = NavigationTracker.kUnknown
self.scheduled_frame_set = set()
# Enable page domain notifications to allow tracking navigating state
return self.client.SendCommand("Page.enable", {})
def OnEvent(self, client, method, params):
# Xwalk does not send Page.frameStoppedLoading until all frames have
# run their onLoad handlers (including frames created during the handlers).
# When it does, it only sends one stopped event for all frames.
if method == "Page.frameStartedLoading":
self.loading_state = NavigationTracker.kLoading
elif method == "Page.frameStoppedLoading":
self.loading_state = NavigationTracker.kNotLoading
elif method == "Page.frameScheduledNavigation":
delay = params.get("delay", None)
if delay == None:
return Status(kUnknownError, "missing or invalid 'delay'")
frame_id = params.get("frameId", None)
if type(frame_id) != str:
return Status(kUnknownError, "missing or invalid 'frameId'")
# WebDriver spec says to ignore redirects over 1s.
if delay > 1.0:
return Status(kOk)
self.scheduled_frame_set.add(frame_id)
elif method == "Page.frameClearedScheduledNavigation":
frame_id = params.get("frameId", None)
if type(frame_id) != str:
return Status(kUnknownError, "missing or invalid 'frameId'")
self.scheduled_frame_set.remove(frame_id)
elif method == "Page.frameNavigated":
# Note: in some cases Page.frameNavigated may be received for subframes
# without a frameStoppedLoading (for example cnn.com).
# If the main frame just navigated, discard any pending scheduled
# navigations. For some reasons at times the cleared event is not
# received when navigating.
# See crbug.com/180742.
if not params["frame"].has_key("parentId"):
self.scheduled_frame_set = set()
elif method == "Inspector.targetCrashed":
self.loading_state = NavigationTracker.kNotLoading
self.scheduled_frame_set = set()
return Status(kOk)
def OnCommandSuccess(self, client, method):
if method == "Page.navigate" and self.loading_state != NavigationTracker.kLoading:
# At this point the browser has initiated the navigation, but besides that,
# it is unknown what will happen.
#
# There are a few cases (perhaps more):
# 1 The RenderViewHost has already queued ViewMsg_Navigate and loading
# will start shortly.
# 2 The RenderViewHost has already queued ViewMsg_Navigate and loading
# will never start because it is just an in-page fragment navigation.
# 3 The RenderViewHost is suspended and hasn't queued ViewMsg_Navigate
# yet. This happens for cross-site navigations. The RenderViewHost
# will not queue ViewMsg_Navigate until it is ready to unload the
# previous page (after running unload handlers and such).
#
# To determine whether a load is expected, do a round trip to the
# renderer to ask what the URL is.
# If case #1, by the time the command returns, the frame started to load
# event will also have been received, since the DevTools command will
# be queued behind ViewMsg_Navigate.
# If case #2, by the time the command returns, the navigation will
# have already happened, although no frame start/stop events will have
# been received.
# If case #3, the URL will be blank if the navigation hasn't been started
# yet. In that case, expect a load to happen in the future.
self.loading_state = NavigationTracker.kUnknown
params = {}
params["expression"] = "document.URL"
result = {}
status = self.client.SendCommandAndGetResult("Runtime.evaluate", params, result)
url = result["result"].get("value", None)
if status.IsError() or type(url) != str:
return Status(kUnknownError, "cannot determine loading status", status)
if self.loading_state == NavigationTracker.kUnknown and not url:
self.loading_state = NavigationTracker.kLoading
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/navigation_tracker.py",
"copies": "1",
"size": "6720",
"license": "bsd-3-clause",
"hash": 7160231154555095000,
"line_mean": 45.993006993,
"line_max": 86,
"alpha_frac": 0.6808035714,
"autogenerated": false,
"ratio": 4.0653357531760435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5246139324576043,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'BadRequestException',
'BadStateException',
'CsrfException',
'DropboxOAuth2Flow',
'DropboxOAuth2FlowNoRedirect',
'NotApprovedException',
'ProviderException',
]
import base64
import os
import six
import sys
import urllib
from .dropbox import Dropbox
from .session import pinned_session
if six.PY3:
url_path_quote = urllib.parse.quote
url_encode = urllib.parse.urlencode
else:
url_path_quote = urllib.quote
url_encode = urllib.urlencode
OAUTH_ROUTE_VERSION = '1'
class DropboxOAuth2FlowBase(object):
def __init__(self, consumer_key, consumer_secret, locale=None):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.locale = locale
self.requests_session = pinned_session()
self._domain = os.environ.get('DROPBOX_DOMAIN', Dropbox.DEFAULT_DOMAIN)
def _get_authorize_url(self, redirect_uri, state):
params = dict(response_type='code',
client_id=self.consumer_key)
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
if state is not None:
params['state'] = state
return self.build_url(Dropbox.HOST_WEB, '/oauth2/authorize', params)
def _finish(self, code, redirect_uri):
url = self.build_url(Dropbox.HOST_API, '/oauth2/token')
params = {'grant_type': 'authorization_code',
'code': code,
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
}
if self.locale is not None:
params['locale'] = self.locale
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
resp = self.requests_session.post(url, data=params)
resp.raise_for_status()
d = resp.json()
access_token = d["access_token"]
user_id = d["uid"]
return access_token, user_id
def build_path(self, target, params=None):
"""Build the path component for an API URL.
This method urlencodes the parameters, adds them
to the end of the target url, and puts a marker for the API
version in front.
:param str target: A target url (e.g. '/files') to build upon.
:param dict params: Optional dictionary of parameters (name to value).
:return: The path and parameters components of an API URL.
:rtype: str
"""
if six.PY2 and isinstance(target, six.text_type):
target = target.encode('utf8')
target_path = url_path_quote(target)
params = params or {}
params = params.copy()
if self.locale:
params['locale'] = self.locale
if params:
query_string = _params_to_urlencoded(params)
return "/%s%s?%s" % (OAUTH_ROUTE_VERSION, target_path, query_string)
else:
return "/%s%s" % (OAUTH_ROUTE_VERSION, target_path)
def build_url(self, host, target, params=None):
"""Build an API URL.
This method adds scheme and hostname to the path
returned from build_path.
:param str target: A target url (e.g. '/files') to build upon.
:param dict params: Optional dictionary of parameters (name to value).
:return: The full API URL.
:rtype: str
"""
return "https://%s.%s%s" % (host, self._domain, self.build_path(target, params))
class DropboxOAuth2FlowNoRedirect(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper for apps that can't provide a redirect URI
(such as the command-line example apps).
Example::
from dropbox import DropboxOAuth2FlowNoRedirect
auth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, APP_SECRET)
authorize_url = auth_flow.start()
print "1. Go to: " + authorize_url
print "2. Click \\"Allow\\" (you might have to log in first)."
print "3. Copy the authorization code."
auth_code = raw_input("Enter the authorization code here: ").strip()
try:
access_token, user_id = auth_flow.finish(auth_code)
except Exception, e:
print('Error: %s' % (e,))
return
dbx = Dropbox(access_token)
"""
def __init__(self, consumer_key, consumer_secret, locale=None):
"""
Construct an instance.
Parameters
:param str consumer_key: Your API app's "app key".
:param str consumer_secret: Your API app's "app secret".
:param str locale: The locale of the user of your application. For
example "en" or "en_US". Some API calls return localized data and
error messages; this setting tells the server which locale to use.
By default, the server uses "en_US".
"""
super(DropboxOAuth2FlowNoRedirect, self).__init__(consumer_key,
consumer_secret,
locale)
def start(self):
"""
Starts the OAuth 2 authorization process.
:return: The URL for a page on Dropbox's website. This page will let
the user "approve" your app, which gives your app permission to
access the user's Dropbox account. Tell the user to visit this URL
and approve your app.
"""
return self._get_authorize_url(None, None)
def finish(self, code):
"""
If the user approves your app, they will be presented with an
"authorization code". Have the user copy/paste that authorization code
into your app and then call this method to get an access token.
:param str code: The authorization code shown to the user when they
approved your app.
:return: A pair of ``(access_token, user_id)``. ``access_token`` is a
string that can be passed to Dropbox. ``user_id`` is the
Dropbox user ID (string) of the user that just approved your app.
:raises: The same exceptions as :meth:`DropboxOAuth2Flow.finish()`.
"""
return self._finish(code, None)
class DropboxOAuth2Flow(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper. Use this for web apps.
OAuth 2 has a two-step authorization process. The first step is having the
user authorize your app. The second involves getting an OAuth 2 access
token from Dropbox.
Example::
from dropbox import DropboxOAuth2Flow
def get_dropbox_auth_flow(web_app_session):
redirect_uri = "https://my-web-server.org/dropbox-auth-finish"
return DropboxOAuth2Flow(
APP_KEY, APP_SECRET, redirect_uri, web_app_session,
"dropbox-auth-csrf-token")
# URL handler for /dropbox-auth-start
def dropbox_auth_start(web_app_session, request):
authorize_url = get_dropbox_auth_flow(web_app_session).start()
redirect_to(authorize_url)
# URL handler for /dropbox-auth-finish
def dropbox_auth_finish(web_app_session, request):
try:
access_token, user_id, url_state = \\
get_dropbox_auth_flow(web_app_session).finish(
request.query_params)
except BadRequestException, e:
http_status(400)
except BadStateException, e:
# Start the auth flow again.
redirect_to("/dropbox-auth-start")
except CsrfException, e:
http_status(403)
except NotApprovedException, e:
flash('Not approved? Why not?')
return redirect_to("/home")
except ProviderException, e:
logger.log("Auth error: %s" % (e,))
http_status(403)
"""
def __init__(self, consumer_key, consumer_secret, redirect_uri, session,
csrf_token_session_key, locale=None):
"""
Construct an instance.
:param str consumer_key: Your API app's "app key".
:param str consumer_secret: Your API app's "app secret".
:param str redirect_uri: The URI that the Dropbox server will redirect
the user to after the user finishes authorizing your app. This URI
must be HTTPS-based and pre-registered with the Dropbox servers,
though localhost URIs are allowed without pre-registration and can
be either HTTP or HTTPS.
:param dict session: A dict-like object that represents the current
user's web session (will be used to save the CSRF token).
:param str csrf_token_session_key: The key to use when storing the CSRF
token in the session (for example: "dropbox-auth-csrf-token").
:param str locale: The locale of the user of your application. For
example "en" or "en_US". Some API calls return localized data and
error messages; this setting tells the server which locale to use.
By default, the server uses "en_US".
"""
super(DropboxOAuth2Flow, self).__init__(consumer_key, consumer_secret, locale)
self.redirect_uri = redirect_uri
self.session = session
self.csrf_token_session_key = csrf_token_session_key
def start(self, url_state=None):
"""
Starts the OAuth 2 authorization process.
This function builds an "authorization URL". You should redirect your
user's browser to this URL, which will give them an opportunity to
grant your app access to their Dropbox account. When the user
completes this process, they will be automatically redirected to the
``redirect_uri`` you passed in to the constructor.
This function will also save a CSRF token to
``session[csrf_token_session_key]`` (as provided to the constructor).
This CSRF token will be checked on :meth:`finish()` to prevent request
forgery.
:param str url_state: Any data that you would like to keep in the URL
through the authorization process. This exact value will be
returned to you by :meth:`finish()`.
:return: The URL for a page on Dropbox's website. This page will let
the user "approve" your app, which gives your app permission to
access the user's Dropbox account. Tell the user to visit this URL
and approve your app.
"""
csrf_token = base64.urlsafe_b64encode(os.urandom(16))
state = csrf_token
if url_state is not None:
state += "|" + url_state
self.session[self.csrf_token_session_key] = csrf_token
return self._get_authorize_url(self.redirect_uri, state)
def finish(self, query_params):
"""
Call this after the user has visited the authorize URL (see
:meth:`start()`), approved your app and was redirected to your redirect
URI.
:param dict query_params: The query parameters on the GET request to
your redirect URI.
:return: A tuple of ``(access_token, user_id, url_state)``.
``access_token`` can be used to construct a
:class:`dropbox.dropbox.Dropbox`. ``user_id`` is the Dropbox user
ID (string) of the user that just approved your app. ``url_state``
is the value you originally passed in to :meth:`start()`.
:raises: :class:`BadRequestException` If the redirect URL was missing
parameters or if the given parameters were not valid.
:raises: :class:`BadStateException` If there's no CSRF token in the
session.
:raises: :class:`CsrfException` If the ``state`` query parameter
doesn't contain the CSRF token from the user's session.
:raises: :class:`NotApprovedException` If the user chose not to
approve your app.
:raises: :class:`ProviderException` If Dropbox redirected to your
redirect URI with some unexpected error identifier and error message.
"""
# Check well-formedness of request.
state = query_params.get('state')
if state is None:
raise BadRequestException("Missing query parameter 'state'.")
error = query_params.get('error')
error_description = query_params.get('error_description')
code = query_params.get('code')
if error is not None and code is not None:
raise BadRequestException(
"Query parameters 'code' and 'error' are both set; "
"only one must be set.")
if error is None and code is None:
raise BadRequestException(
"Neither query parameter 'code' or 'error' is set.")
# Check CSRF token
if self.csrf_token_session_key not in self.session:
raise BadStateException('Missing CSRF token in session.')
csrf_token_from_session = self.session[self.csrf_token_session_key]
if len(csrf_token_from_session) <= 20:
raise AssertionError('CSRF token unexpectedly short: %r' %
csrf_token_from_session)
split_pos = state.find('|')
if split_pos < 0:
given_csrf_token = state
url_state = None
else:
given_csrf_token = state[0:split_pos]
url_state = state[split_pos+1:]
if not _safe_equals(csrf_token_from_session, given_csrf_token):
raise CsrfException('expected %r, got %r' %
(csrf_token_from_session, given_csrf_token))
del self.session[self.csrf_token_session_key]
# Check for error identifier
if error is not None:
if error == 'access_denied':
# The user clicked "Deny"
if error_description is None:
raise NotApprovedException(
'No additional description from Dropbox')
else:
raise NotApprovedException(
'Additional description from Dropbox: %s' %
error_description)
else:
# All other errors
full_message = error
if error_description is not None:
full_message += ": " + error_description
raise ProviderException(full_message)
# If everything went ok, make the network call to get an access token.
access_token, user_id = self._finish(code, self.redirect_uri)
return access_token, user_id, url_state
class BadRequestException(Exception):
"""
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
The recommended action is to show an HTTP 400 error page.
"""
pass
class BadStateException(Exception):
"""
Thrown if all the parameters are correct, but there's no CSRF token in the
session. This probably means that the session expired.
The recommended action is to redirect the user's browser to try the
approval process again.
"""
pass
class CsrfException(Exception):
"""
Thrown if the given 'state' parameter doesn't contain the CSRF token from
the user's session. This is blocked to prevent CSRF attacks.
The recommended action is to respond with an HTTP 403 error page.
"""
pass
class NotApprovedException(Exception):
"""
The user chose not to approve your app.
"""
pass
class ProviderException(Exception):
"""
Dropbox redirected to your redirect URI with some unexpected error
identifier and error message.
The recommended action is to log the error, tell the user something went
wrong, and let them try again.
"""
pass
def _safe_equals(a, b):
if len(a) != len(b): return False
res = 0
for ca, cb in zip(a, b):
res |= ord(ca) ^ ord(cb)
return res == 0
def _params_to_urlencoded(params):
"""
Returns a application/x-www-form-urlencoded ``str`` representing the
key/value pairs in ``params``.
Keys are values are ``str()``'d before calling ``urllib.urlencode``, with
the exception of unicode objects which are utf8-encoded.
"""
def encode(o):
if isinstance(o, six.text_type):
return o.encode('utf8')
else:
return str(o)
utf8_params = {encode(k): encode(v) for k, v in six.iteritems(params)}
return url_encode(utf8_params)
| {
"repo_name": "ewjoachim/dropbox-sdk-python",
"path": "dropbox/oauth.py",
"copies": "1",
"size": "16593",
"license": "mit",
"hash": 6121741112827093000,
"line_mean": 36.2040358744,
"line_max": 88,
"alpha_frac": 0.6071837522,
"autogenerated": false,
"ratio": 4.304280155642023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5411463907842022,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ballsave',
'ballsearch',
'drops'
'replay',
'scoredisplay',
'scoredisplayhd'
'service',
'trough',
'OSC',
'dmdhelper',
'attract',
'tilt',
'switchmonitor',
'rgbshow'
]
from ballsave import *
from ballsearch import *
from drops import *
from replay import *
from score_display import *
from score_display_hd import *
from trough import *
from service import *
from osc import *
from dmdhelper import *
from attract import *
from tilt import *
from switchmonitor import *
from rgbshow import *
from ..game import Mode
class TransitionOutHelperMode(Mode):
def __init__(self, game, priority, transition, layer):
super(TransitionOutHelperMode, self).__init__(game=game, priority=priority)
self.layer = layer
self.layer.transition = transition
self.layer.transition.in_out = 'out'
self.layer.transition.completed_handler = self.transition_completed
def mode_started(self):
self.layer.transition.start()
def transition_completed(self):
self.game.modes.remove(self)
class SwitchSequenceRecognizer(Mode):
"""Listens to switch events to detect and act upon sequences."""
switches = {}
switch_log = []
def __init__(self, game, priority):
super(SwitchSequenceRecognizer, self).__init__(game=game, priority=priority)
self.switches = {}
self.switch_log = []
def add_sequence(self, sequence, handler):
unique_switch_names = list(set(map(lambda sw: sw.name, sequence)))
sequence_switch_nums = map(lambda sw: sw.number, sequence)
#sequence_str = self.switch_separator_char.join(sequence_switch_nums)
self.switches[tuple(sequence_switch_nums)] = handler
for sw in unique_switch_names:
# No concern about duplicate switch handlers, as add_switch_handler() protects against this.
self.add_switch_handler(name=sw, event_type='active', delay=None, handler=self.switch_active)
def reset(self):
"""Resets the remembered sequence."""
self.switch_log = []
def switch_active(self, sw):
self.switch_log.append(sw.number)
log_tuple = tuple(self.switch_log)
for sequence, handler in self.switches.items():
if log_tuple[-len(sequence):] == sequence:
handler()
| {
"repo_name": "mjocean/PyProcGameHD-SkeletonGame",
"path": "procgame/modes/__init__.py",
"copies": "1",
"size": "2381",
"license": "mit",
"hash": 2914828939698002000,
"line_mean": 29.9220779221,
"line_max": 105,
"alpha_frac": 0.6442671147,
"autogenerated": false,
"ratio": 3.827974276527331,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4972241391227331,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'BaseDatadog',
]
import os
import logging
import re
import socket
import time
from contextlib import contextmanager
from pprint import pformat
try:
import simplejson as json
except ImportError:
import json
http_log = logging.getLogger('dd.dogapi.http')
log = logging.getLogger('dd.dogapi')
from dogapi.exceptions import *
from dogapi.constants import *
from dogapi.common import *
if is_p3k():
import http.client as http_client
from urllib.parse import urlencode
else:
import httplib as http_client
from urllib import urlencode
__all__ = [
'BaseDatadog'
]
class BaseDatadog(object):
def __init__(self, api_key=None, application_key=None, api_version='v1', api_host=None, timeout=2, max_timeouts=3, backoff_period=300, swallow=True, use_ec2_instance_id=False, json_responses=False):
self.http_conn_cls = http_client.HTTPSConnection
self._api_host = None
self.api_host = api_host or os.environ.get('DATADOG_HOST', 'https://app.datadoghq.com')
# http transport params
self.backoff_period = backoff_period
self.max_timeouts = max_timeouts
self._backoff_timestamp = None
self._timeout_counter = 0
self.api_key = api_key
self.api_version = api_version
self.application_key = application_key
self.timeout = timeout
self.swallow = swallow
self._default_host = socket.gethostname()
self._use_ec2_instance_id = None
self.use_ec2_instance_id = use_ec2_instance_id
self.json_responses = json_responses
def http_request(self, method, path, body=None, response_formatter=None, error_formatter=None, **params):
try:
# Check if it's ok to submit
if not self._should_submit():
raise HttpBackoff("Too many timeouts. Won't try again for {1} seconds.".format(*self._backoff_status()))
# Construct the url
if self.api_key:
params['api_key'] = self.api_key
if self.application_key:
params['application_key'] = self.application_key
url = "/api/%s/%s?%s" % (self.api_version, path.lstrip('/'), urlencode(params))
try:
conn = self.http_conn_cls(self.api_host, timeout=self.timeout)
except TypeError:
# timeout= parameter is only supported 2.6+
conn = self.http_conn_cls(self.api_host)
# Construct the body, if necessary
headers = {}
if isinstance(body, dict):
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
try:
start_time = time.time()
# Make the request
try:
conn.request(method, url, body, headers)
except timeout_exceptions:
# Keep a count of the timeouts to know when to back off
self._timeout_counter += 1
raise HttpTimeout('%s %s timed out after %d seconds.' % (method, url, self.timeout))
except socket.error as e:
# Translate the low level socket error into a more
# descriptive one
raise ClientError("Could not request %s %s%s: %s" % (method, self.api_host, url, e))
# If the request succeeded, reset the timeout counter
self._timeout_counter = 0
# Parse the response as json
response = conn.getresponse()
duration = round((time.time() - start_time) * 1000., 4)
log.info("%s %s %s (%sms)" % (response.status, method, url, duration))
response_str = response.read()
if response_str:
try:
if is_p3k():
response_obj = json.loads(response_str.decode('utf-8'))
else:
response_obj = json.loads(response_str)
except ValueError:
raise ValueError('Invalid JSON response: {0}'.format(response_str))
if response_obj and 'errors' in response_obj:
raise ApiError(response_obj)
else:
response_obj = None
if response_obj is None and self.json_responses:
response_obj = {}
if self.json_responses or response_formatter is None:
return response_obj
else:
return response_formatter(response_obj)
finally:
conn.close()
except ClientError as e:
if self.swallow:
log.error(str(e))
if self.json_responses or error_formatter is None:
return {'errors': e.args[0]}
else:
return error_formatter({'errors': e.args[0]})
else:
raise
except ApiError as e:
if self.swallow:
for error in e.args[0]['errors']:
log.error(str(error))
if self.json_responses or error_formatter is None:
return e.args[0]
else:
return error_formatter(e.args[0])
else:
raise
def use_ec2_instance_id():
def fget(self):
return self._use_ec2_instance_id
def fset(self, value):
self._use_ec2_instance_id = value
if value:
self._default_host = get_ec2_instance_id()
else:
self._default_host = socket.gethostname()
def fdel(self):
del self._use_ec2_instance_id
return locals()
use_ec2_instance_id = property(**use_ec2_instance_id())
def api_host():
def fget(self):
return self._api_host
def fset(self, value):
match = re.match('^(https?)://(.*)', value)
http_conn_cls = http_client.HTTPSConnection
if match:
host = match.group(2)
if match.group(1) == 'http':
http_conn_cls = http_client.HTTPConnection
else:
host = value
self._api_host = host
self.http_conn_cls = http_conn_cls
return locals()
api_host = property(**api_host())
# Private functions
def _should_submit(self):
""" Returns True if we're in a state where we should make a request
(backoff expired, no backoff in effect), false otherwise.
"""
now = time.time()
should_submit = False
# If we're not backing off, but the timeout counter exceeds the max
# number of timeouts, then enter the backoff state, recording the time
# we started backing off
if not self._backoff_timestamp and self._timeout_counter >= self.max_timeouts:
log.info("Max number of dogapi timeouts exceeded, backing off for {0} seconds".format(self.backoff_period))
self._backoff_timestamp = now
should_submit = False
# If we are backing off but the we've waiting sufficiently long enough
# (backoff_retry_age), exit the backoff state and reset the timeout
# counter so that we try submitting metrics again
elif self._backoff_timestamp:
backed_off_time, backoff_time_left = self._backoff_status()
if backoff_time_left < 0:
log.info("Exiting backoff state after {0} seconds, will try to submit metrics again".format(backed_off_time))
self._backoff_timestamp = None
self._timeout_counter = 0
should_submit = True
else:
log.info("In backoff state, won't submit metrics for another {0} seconds".format(backoff_time_left))
should_submit = False
else:
should_submit = True
return should_submit
def _backoff_status(self):
now = time.time()
backed_off_time = now - self._backoff_timestamp
backoff_time_left = self.backoff_period - backed_off_time
return round(backed_off_time, 2), round(backoff_time_left, 2)
| {
"repo_name": "DataDog/dogapi",
"path": "src/dogapi/http/base.py",
"copies": "2",
"size": "8358",
"license": "bsd-3-clause",
"hash": 2502027126064669000,
"line_mean": 36.3125,
"line_max": 202,
"alpha_frac": 0.5516870065,
"autogenerated": false,
"ratio": 4.35539343408025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.590708044058025,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'BaseForm',
'Form',
)
class BaseForm(object):
"""
Base Form Class. Provides core behaviour like field construction,
validation, and data and error proxying.
"""
def __init__(self, fields, prefix=''):
"""
:param fields:
A dict or sequence of 2-tuples of partially-constructed fields.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
"""
if prefix and prefix[-1] not in '-_;:/.':
prefix += '-'
self._prefix = prefix
self._errors = None
self._fields = {}
if hasattr(fields, 'items'):
fields = fields.items()
translations = self._get_translations()
for name, unbound_field in fields:
field = unbound_field.bind(form=self, name=name, prefix=prefix, translations=translations)
self._fields[name] = field
def __iter__(self):
""" Iterate form fields in arbitrary order """
return self._fields.itervalues()
def __contains__(self, name):
""" Returns `True` if the named field is a member of this form. """
return (name in self._fields)
def __getitem__(self, name):
""" Dict-style access to this form's fields."""
return self._fields[name]
def __setitem__(self, name, value):
""" Bind a field to this form. """
self._fields[name] = value.bind(form=self, name=name, prefix=self._prefix)
def __delitem__(self, name):
""" Remove a field from this form. """
del self._fields[name]
def _get_translations(self):
"""
Override in subclasses to provide alternate translations factory.
Must return an object that provides gettext() and ngettext() methods.
"""
return None
def populate_obj(self, obj):
"""
Populates the attributes of the passed `obj` with data from the form's
fields.
:note: This is a destructive operation; Any attribute with the same name
as a field will be overridden. Use with caution.
"""
for name, field in self._fields.iteritems():
field.populate_obj(obj, name)
def process(self, formdata=None, obj=None, **kwargs):
"""
Take form, object data, and keyword arg input and have the fields
process them.
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent.
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
if formdata is not None and not hasattr(formdata, 'getlist'):
if hasattr(formdata, 'getall'):
formdata = WebobInputWrapper(formdata)
else:
raise TypeError("formdata should be a multidict-type wrapper that supports the 'getlist' method")
for name, field, in self._fields.iteritems():
if obj is not None and hasattr(obj, name):
field.process(formdata, getattr(obj, name))
elif name in kwargs:
field.process(formdata, kwargs[name])
else:
field.process(formdata)
def validate(self, extra_validators=None):
"""
Validates the form by calling `validate` on each field.
:param extra_validators:
If provided, is a dict mapping field names to a sequence of
callables which will be passed as extra validators to the field's
`validate` method.
Returns `True` if no errors occur.
"""
self._errors = None
success = True
for name, field in self._fields.iteritems():
if extra_validators is not None and name in extra_validators:
extra = extra_validators[name]
else:
extra = tuple()
if not field.validate(self, extra):
success = False
return success
@property
def data(self):
return dict((name, f.data) for name, f in self._fields.iteritems())
@property
def errors(self):
if self._errors is None:
self._errors = dict((name, f.errors) for name, f in self._fields.iteritems() if f.errors)
return self._errors
class FormMeta(type):
"""
The metaclass for `Form` and any subclasses of `Form`.
`FormMeta`'s responsibility is to create the `_unbound_fields` list, which
is a list of `UnboundField` instances sorted by their order of
instantiation. The list is created at the first instantiation of the form.
If any fields are added/removed from the form, the list is cleared to be
re-generated on the next instantiaton.
Any properties which begin with an underscore or are not `UnboundField`
instances are ignored by the metaclass.
"""
def __init__(cls, name, bases, attrs):
type.__init__(cls, name, bases, attrs)
cls._unbound_fields = None
def __call__(cls, *args, **kwargs):
"""
Construct a new `Form` instance, creating `_unbound_fields` on the
class if it is empty.
"""
if cls._unbound_fields is None:
fields = []
for name in dir(cls):
if not name.startswith('_'):
unbound_field = getattr(cls, name)
if hasattr(unbound_field, '_formfield'):
fields.append((name, unbound_field))
# We keep the name as the second element of the sort
# to ensure a stable sort.
fields.sort(key=lambda x: (x[1].creation_counter, x[0]))
cls._unbound_fields = fields
return type.__call__(cls, *args, **kwargs)
def __setattr__(cls, name, value):
"""
Add an attribute to the class, clearing `_unbound_fields` if needed.
"""
if not name.startswith('_') and hasattr(value, '_formfield'):
cls._unbound_fields = None
type.__setattr__(cls, name, value)
def __delattr__(cls, name):
"""
Remove an attribute from the class, clearing `_unbound_fields` if
needed.
"""
if not name.startswith('_'):
cls._unbound_fields = None
type.__delattr__(cls, name)
class Form(BaseForm):
"""
Declarative Form base class. Extends BaseForm's core behaviour allowing
fields to be defined on Form subclasses as class attributes.
In addition, form and instance input data are taken at construction time
and passed to `process()`.
"""
__metaclass__ = FormMeta
def __init__(self, formdata=None, obj=None, prefix='', **kwargs):
"""
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent.
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
super(Form, self).__init__(self._unbound_fields, prefix=prefix)
for name, field in self._fields.iteritems():
# Set all the fields to attributes so that they obscure the class
# attributes with the same names.
setattr(self, name, field)
self.process(formdata, obj, **kwargs)
def __iter__(self):
""" Iterate form fields in their order of definition on the form. """
for name, _ in self._unbound_fields:
if name in self._fields:
yield self._fields[name]
def __setitem__(self, name, value):
raise TypeError('Fields may not be added to Form instances, only classes.')
def __delitem__(self, name):
del self._fields[name]
setattr(self, name, None)
def __delattr__(self, name):
try:
self.__delitem__(name)
except KeyError:
super(Form, self).__delattr__(name)
def validate(self):
"""
Validates the form by calling `validate` on each field, passing any
extra `Form.validate_<fieldname>` validators to the field validator.
"""
extra = {}
for name in self._fields:
inline = getattr(self.__class__, 'validate_%s' % name, None)
if inline is not None:
extra[name] = [inline]
return super(Form, self).validate(extra)
class WebobInputWrapper(object):
"""
Wrap a webob MultiDict for use as passing as `formdata` to Field.
Since for consistency, we have decided in WTForms to support as input a
small subset of the API provided in common between cgi.FieldStorage,
Django's QueryDict, and Werkzeug's MultiDict, we need to wrap Webob, the
only supported framework whose multidict does not fit this API, but is
nevertheless used by a lot of frameworks.
While we could write a full wrapper to support all the methods, this will
undoubtedly result in bugs due to some subtle differences between the
various wrappers. So we will keep it simple.
"""
def __init__(self, multidict):
self._wrapped = multidict
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
def __contains__(self, name):
return (name in self._wrapped)
def getlist(self, name):
return self._wrapped.getall(name)
| {
"repo_name": "BarcampBangalore/Barcamp-Bangalore-Android-App",
"path": "gcm_flask/wtforms/form.py",
"copies": "15",
"size": "10228",
"license": "apache-2.0",
"hash": -6870739214644900000,
"line_mean": 34.2689655172,
"line_max": 113,
"alpha_frac": 0.5913179507,
"autogenerated": false,
"ratio": 4.521662245800177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'BASE_IMAGE_RELEASE_CODE_NAME',
# Pod.
'PodConfig',
'generate_machine_name',
'generate_pod_id',
'validate_pod_id',
'validate_pod_name',
'validate_pod_version',
# XAR.
'validate_xar_name',
# App.
'validate_app_name',
# Image.
'validate_image_id',
'validate_image_name',
'validate_image_tag',
'validate_image_version',
# Host system.
'machine_id_to_pod_id',
'pod_id_to_machine_id',
'read_host_machine_id',
'read_host_pod_id',
]
import dataclasses
import re
import typing
import uuid
from pathlib import Path
from g1.bases.assertions import ASSERT
BASE_IMAGE_RELEASE_CODE_NAME = 'focal'
_SERVICE_TYPES = frozenset((
'simple',
'exec',
'forking',
'oneshot',
'dbus',
'notify',
'idle',
None,
))
@dataclasses.dataclass(frozen=True)
class PodConfig:
@dataclasses.dataclass(frozen=True)
class App:
"""Descriptor of systemd unit file of container app."""
name: str
exec: typing.List[str] = dataclasses.field(default_factory=list)
type: typing.Optional[str] = None
user: str = 'nobody'
group: str = 'nogroup'
# Advanced usage for overriding the entire service section
# generation.
service_section: typing.Optional[str] = None
# TODO: Support ".timer" and ".socket" unit file.
def __post_init__(self):
validate_app_name(self.name)
if self.service_section is None:
ASSERT.not_empty(self.exec)
ASSERT.in_(self.type, _SERVICE_TYPES)
else:
ASSERT.empty(self.exec)
ASSERT.none(self.type)
@dataclasses.dataclass(frozen=True)
class Image:
id: typing.Optional[str] = None
name: typing.Optional[str] = None
version: typing.Optional[str] = None
tag: typing.Optional[str] = None
def __post_init__(self):
ASSERT.only_one((self.id, self.name or self.version, self.tag))
ASSERT.not_xor(self.name, self.version)
if self.id:
validate_image_id(self.id)
elif self.name:
validate_image_name(self.name)
validate_image_version(self.version)
else:
validate_image_tag(self.tag)
@dataclasses.dataclass(frozen=True)
class Mount:
"""Configure a bind mount."""
source: str
target: str
read_only: bool = True
def __post_init__(self):
# Empty source path means host's /var/tmp.
if self.source:
ASSERT.predicate(Path(self.source), Path.is_absolute)
ASSERT.predicate(Path(self.target), Path.is_absolute)
@dataclasses.dataclass(frozen=True)
class Overlay:
"""Configure an overlay.
This is more advanced and flexible than ``Mount`` above.
"""
sources: typing.List[str]
target: str
read_only: bool = True
def __post_init__(self):
ASSERT.not_empty(self.sources)
for i, source in enumerate(self.sources):
# Empty source path means host's /var/tmp.
if source:
ASSERT.predicate(Path(source), Path.is_absolute)
else:
ASSERT.equal(i, len(self.sources) - 1)
ASSERT.predicate(Path(self.target), Path.is_absolute)
name: str
version: str
apps: typing.List[App]
# Image are ordered from low to high.
images: typing.List[Image]
mounts: typing.List[Mount] = ()
overlays: typing.List[Overlay] = ()
def __post_init__(self):
validate_pod_name(self.name)
validate_pod_version(self.version)
ASSERT.not_empty(self.images)
ASSERT.unique(app.name for app in self.apps)
ASSERT.unique(
[mount.target for mount in self.mounts] + \
[overlay.target for overlay in self.overlays]
)
# Generic name and version pattern.
# For now, let's only allow a restrictive set of names.
_NAME_PATTERN = re.compile(r'[a-z0-9]+(-[a-z0-9]+)*')
_VERSION_PATTERN = re.compile(r'[a-z0-9]+((?:-|\.)[a-z0-9]+)*')
def validate_name(name):
return ASSERT.predicate(name, _NAME_PATTERN.fullmatch)
def validate_version(version):
return ASSERT.predicate(version, _VERSION_PATTERN.fullmatch)
# For now these are just an alias of the generic validator.
validate_pod_name = validate_name
validate_pod_version = validate_version
validate_app_name = validate_name
validate_image_name = validate_name
validate_image_version = validate_version
validate_image_tag = validate_name
_UUID_PATTERN = re.compile(
r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
)
def validate_pod_id(pod_id):
return ASSERT.predicate(pod_id, _UUID_PATTERN.fullmatch)
def generate_pod_id():
return validate_pod_id(str(uuid.uuid4()))
def generate_machine_name(pod_id):
return 'pod-%s' % pod_id
# Allow xar names like "foo_bar.sh".
_XAR_NAME_PATTERN = re.compile(r'[\w\-.]+')
def validate_xar_name(name):
return ASSERT.predicate(name, _XAR_NAME_PATTERN.fullmatch)
# SHA-256.
_IMAGE_ID_PATTERN = re.compile(r'[0-9a-f]{64}')
def validate_image_id(image_id):
return ASSERT.predicate(image_id, _IMAGE_ID_PATTERN.fullmatch)
def machine_id_to_pod_id(machine_id):
ASSERT.equal(len(machine_id), 32)
return '%s-%s-%s-%s-%s' % (
machine_id[0:8],
machine_id[8:12],
machine_id[12:16],
machine_id[16:20],
machine_id[20:32],
)
def pod_id_to_machine_id(pod_id):
return pod_id.replace('-', '')
def read_host_machine_id():
return Path('/etc/machine-id').read_text().strip()
def read_host_pod_id():
return machine_id_to_pod_id(read_host_machine_id())
| {
"repo_name": "clchiou/garage",
"path": "py/g1/containers/g1/containers/models.py",
"copies": "1",
"size": "5857",
"license": "mit",
"hash": -5538121317481386000,
"line_mean": 25.1473214286,
"line_max": 75,
"alpha_frac": 0.6003073246,
"autogenerated": false,
"ratio": 3.3430365296803655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44433438542803655,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'BasePlace',
'Place',
)
from .node import BasePlace
from .token_list import TokenList
class Place(BasePlace):
def __init__(self, name, *, net=None, tokens=None, token_type=None):
super().__init__(name=name, net=net)
self._tokens = None
self.update(tokens=tokens, token_type=token_type)
@property
def tokens(self):
return self._tokens
@property
def token_type(self):
return self._token_type
def update(self, *, tokens=None, token_type=None):
self._notify_on_change()
if self._tokens is not None:
if tokens is None:
tokens = self._tokens
if token_type is None:
token_type = self._tokens.token_type
self._tokens = TokenList(tokens, token_type=token_type)
def add_token(self, token, *, count=1):
self._notify_on_change()
self._tokens.add(token, count=count)
def _notify_on_change(self):
for output in self.outputs():
output.notify_input_changed()
def label(self):
return super().label() + '\n' + '{{{}}}'.format(', '.join(repr(token) for token in self._tokens))
| {
"repo_name": "simone-campagna/petra",
"path": "petra/place.py",
"copies": "1",
"size": "1186",
"license": "apache-2.0",
"hash": -3807733241480469000,
"line_mean": 27.9268292683,
"line_max": 105,
"alpha_frac": 0.5809443508,
"autogenerated": false,
"ratio": 3.694704049844237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9769416616628284,
"avg_score": 0.0012463568031906734,
"num_lines": 41
} |
__all__ = [
'BaseSession',
'Request',
'Response',
'Sender',
]
import functools
import itertools
import json
import logging
import urllib.parse
import lxml.etree
import requests
import requests.adapters
import requests.cookies
import urllib3.exceptions
import urllib3.util.ssl_
from g1.asyncs.bases import adapters
from g1.asyncs.bases import tasks
from g1.asyncs.bases import timers
from g1.bases import classes
from g1.bases import collections as g1_collections
from g1.bases.assertions import ASSERT
from g1.threads import executors
from . import policies
from . import recvfiles
LOG = logging.getLogger(__name__)
class Sender:
"""Request sender with local cache, rate limit, and retry."""
def __init__(
self,
send,
*,
cache_size=8,
circuit_breakers=None,
rate_limit=None,
retry=None,
):
self._send = send
self._cache = g1_collections.LruCache(cache_size)
self._unbounded_cache = {}
self._circuit_breakers = circuit_breakers or policies.NO_BREAK
self._rate_limit = rate_limit or policies.unlimited
self._retry = retry or policies.no_retry
async def __call__(self, request, **kwargs):
"""Send a request and return a response.
If argument ``cache_key`` is not ``None``, session will check
its cache before sending the request. For now, we don't support
setting ``cache_key`` in ``request``.
``sticky_key`` is similar to ``cache_key`` except that it refers
to an unbounded cache (thus the name "sticky").
If argument ``cache_revalidate`` is evaludated to true, session
will revalidate the cache entry.
"""
cache_key = kwargs.pop('cache_key', None)
sticky_key = kwargs.pop('sticky_key', None)
cache_revalidate = kwargs.pop('cache_revalidate', None)
if cache_key is not None and sticky_key is not None:
raise AssertionError(
'expect at most one: cache_key=%r, sticky_key=%r' %
(cache_key, sticky_key)
)
if cache_key is not None:
return await self._try_cache(
self._cache,
cache_key,
cache_revalidate,
request,
kwargs,
)
if sticky_key is not None:
return await self._try_cache(
self._unbounded_cache,
sticky_key,
cache_revalidate,
request,
kwargs,
)
breaker = self._circuit_breakers.get(
urllib.parse.urlparse(request.url).netloc
)
for retry_count in itertools.count():
# Check rate limit out of the breaker async-with context to
# avoid adding extra delay in the context so that, when the
# breaker is in YELLOW state, another request may "go" into
# the context as soon as the previous one completes.
await self._rate_limit()
async with breaker:
response, backoff = await self._loop_body(
request, kwargs, breaker, retry_count
)
if response is not None:
return response
# Call `sleep` out of the breaker async-with context for the
# same reason above.
await timers.sleep(ASSERT.not_none(backoff))
ASSERT.unreachable('retry loop should not break')
async def _try_cache(self, cache, key, revalidate, request, kwargs):
task = cache.get(key)
if task is None:
task = cache[key] = tasks.spawn(self(request, **kwargs))
result = 'miss'
elif revalidate:
task = cache[key] = tasks.spawn(self(request, **kwargs))
result = 'revalidate'
else:
result = 'hit'
LOG.debug(
'send: cache %s: key=%r, %r, kwargs=%r', \
result, key, request, kwargs,
)
# Here is a risk that, if all task waiting for this task get
# cancelled before this task completes, this task might not
# be joined, but this risk is probably too small.
return await task.get_result()
async def _loop_body(self, request, kwargs, breaker, retry_count):
if retry_count:
LOG.warning('retry %d times: %r', retry_count, request)
try:
response = await self._send(request, **kwargs)
except (
requests.RequestException,
urllib3.exceptions.HTTPError,
) as exc:
status_code = self._get_status_code(exc)
if status_code is not None and 400 <= status_code < 500:
# From the perspective of circuit breaker, a 4xx is
# considered a "success".
breaker.notify_success()
# It does not seem to make sense to retry on 4xx errors
# as our request was explicitly rejected by the server.
raise
breaker.notify_failure()
backoff = self._retry(retry_count)
if backoff is None:
raise
LOG.warning(
'http error: status_code=%s, request=%r, exc=%r',
status_code,
request,
exc,
)
return None, backoff
except Exception:
breaker.notify_failure()
raise
else:
breaker.notify_success()
return response, None
@staticmethod
def _get_status_code(exc):
# requests.Response defines __bool__ that returns to true when
# status code is less than 400; so we have to explicitly check
# `is None` here, rather than `if not response:`.
response = getattr(exc, 'response', None)
if response is None:
return None
return response.status_code
class BaseSession:
"""Base session.
All this does is backing an HTTP session with an executor; this does
not provide rate limit nor retry. You use this as a building block
for higher level session types.
"""
_SSL_CONTEXT = urllib3.util.ssl_.create_urllib3_context()
_SSL_CONTEXT.load_default_certs()
def __init__(
self,
*,
executor=None,
num_pools=0,
num_connections_per_pool=0,
):
# If you do not provide an executor, I will just make one for
# myself, but to save you the effort to shut down the executor,
# I will also make it daemonic. This is mostly fine since if
# the process is exiting, you probably do not care much about
# unfinished HTTP requests in the executor (if it is not fine,
# you may always provide an executor to me, and properly shut it
# down on process exit).
self._executor = executor or executors.Executor(daemon=True)
self._session = requests.Session()
adapter_kwargs = {}
if num_pools > 0:
adapter_kwargs['pool_connections'] = num_pools
if num_connections_per_pool > 0:
adapter_kwargs['pool_maxsize'] = num_pools
if adapter_kwargs:
LOG.info(
'config session: num_pools=%d num_connections_per_pool=%d',
num_pools,
num_connections_per_pool,
)
self._session.mount(
'https://', requests.adapters.HTTPAdapter(**adapter_kwargs)
)
self._session.mount(
'http://', requests.adapters.HTTPAdapter(**adapter_kwargs)
)
# Make all connections share one SSL context to reduce memory
# footprint.
(self._session.get_adapter('https://').poolmanager\
.connection_pool_kw['ssl_context']) = self._SSL_CONTEXT
@property
def headers(self):
return self._session.headers
@property
def cookies(self):
return self._session.cookies
def update_cookies(self, cookie_dict):
"""Update cookies with a dict-like object."""
requests.cookies.cookiejar_from_dict(
cookie_dict, self._session.cookies
)
async def send(self, request, **kwargs):
"""Send an HTTP request and return a response.
If argument ``priority`` is not ``None``, the request is sent
with priority (this requires ``PriorityExecutor``). For now, we
do not support setting ``priority`` in ``request``.
"""
priority = kwargs.pop('priority', None)
if priority is None:
future = self._executor.submit(
self.send_blocking, request, **kwargs
)
else:
LOG.debug(
'send: priority=%r, %r, kwargs=%r', priority, request, kwargs
)
future = self._executor.submit_with_priority(
priority, self.send_blocking, request, **kwargs
)
future.set_finalizer(lambda response: response.close())
return await adapters.FutureAdapter(future).get_result()
def send_blocking(self, request, **kwargs):
"""Send a request in a blocking manner.
If ``stream`` is set to true, we will return the original
response object, and will NOT copy-then-close it to our response
class. In this case, the caller is responsible for closing the
response object.
This does not implement rate limit nor retry.
"""
LOG.debug('send: %r, kwargs=%r', request, kwargs)
# ``requests.Session.get`` and friends do a little more than
# ``requests.Session.request``; so let's use the former.
method = getattr(self._session, request.method.lower())
# ``kwargs`` may overwrite ``request._kwargs``.
final_kwargs = request._kwargs.copy()
final_kwargs.update(kwargs)
source = method(request.url, **final_kwargs)
stream = final_kwargs.get('stream')
if stream:
response = source
else:
try:
response = Response(
source,
source.content, # Force consuming the content.
)
finally:
source.close()
try:
response.raise_for_status()
except Exception:
# Force consuming the content. In case caller sets
# stream=True, this ensures that exc.response.content is not
# empty.
response.content # pylint: disable=pointless-statement
# On error, close the original response for the caller since
# the caller usually forgets to do this.
response.close()
raise
return response
class Request:
def __init__(self, method, url, **kwargs):
self.method = method
self.url = url
self._kwargs = kwargs
__repr__ = classes.make_repr(
'{method} {self.url} kwargs={self._kwargs!r}',
method=lambda self: self.method.upper(),
)
@property
def headers(self):
return self._kwargs.setdefault('headers', {})
def copy(self):
return Request(self.method, self.url, **self._kwargs)
class Response:
"""HTTP response.
This class provides an interface that is mostly compatible with
``requests`` Response class.
We do this because if a ``requests`` Response object is not closed
(doc does not seem to suggest explicitly closing responses?), it
will not release the connection back to the connection pool.
"""
def __init__(self, source, content, *, _copy_history=True):
"""Make a "copy" from a ``requests`` Response object.
Note that this consumes the content of the ``source`` object,
which forces ``source`` to read the whole response body from the
server (and so we do not need to do this in the Sender class).
"""
self._content = content
self.status_code = source.status_code
self.headers = source.headers
self.url = source.url
if _copy_history:
self.history = [
Response(
r,
# TODO: Should we load r.content?
None,
# TODO: In some rare cases, history seems to have
# loops. We probably should try to detect loops,
# but for now, let us only go into one level of the
# history.
_copy_history=False,
) for r in source.history
]
else:
# Make it non-iterable so that if user (accidentally)
# iterates this, it will err out.
self.history = None
self.encoding = source.encoding
self.reason = source.reason
self.cookies = source.cookies
self.elapsed = source.elapsed
# We do not copy source.request for now.
__repr__ = classes.make_repr(
'status_code={self.status_code} url={self.url}',
)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def close(self):
# Nothing to do here; just for interface compatibility.
pass
def raise_for_status(self):
if not 400 <= self.status_code < 600:
return
if isinstance(self.reason, bytes):
# Try utf-8 first because some servers choose to localize
# their reason strings. If the string is not utf-8, fall
# back to iso-8859-1.
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
raise requests.HTTPError(
'%s %s error: %s %s' % (
self.status_code,
'client' if 400 <= self.status_code < 500 else 'server',
reason,
self.url,
),
response=self,
)
@property
def content(self):
return self._content
@classes.memorizing_property
def text(self):
# NOTE: Unlike ``requests``, we do NOT fall back to
# auto-detected encoding.
return self.content.decode(ASSERT.not_none(self.encoding))
def json(self, **kwargs):
"""Parse response as a JSON document."""
return json.loads(self.content, **kwargs)
#
# Interface that ``requests.Response`` does not provide (we will
# monkey-patch it below).
#
def html(self, encoding=None, errors=None):
"""Parse response as an HTML document.
Caller may pass ``encoding`` and ``errors`` to instructing us
how to decode response content. This is useful because lxml's
default is to **silently** skip the rest of the document when
there is any encoding error in the middle.
lxml's strict-but-silent policy is counterproductive because web
is full of malformed documents, and it should either be lenient
about the error, or raise it to the caller, not a mix of both as
it is right now.
"""
if encoding and errors:
string = self.content.decode(encoding=encoding, errors=errors)
parser = _get_html_parser(None)
else:
ASSERT.none(errors)
string = self.content
parser = _get_html_parser(
encoding or ASSERT.not_none(self.encoding)
)
# Check whether fromstring returns None because apparently
# HTMLParser is more lenient than XMLParser and may cause
# fromstring to return None on some malformed HTML input.
return ASSERT.not_none(lxml.etree.fromstring(string, parser))
def xml(self):
"""Parse response as an XML document."""
return lxml.etree.fromstring(self.content, _XML_PARSER)
@functools.lru_cache(maxsize=8)
def _get_html_parser(encoding):
return lxml.etree.HTMLParser(encoding=encoding)
_XML_PARSER = lxml.etree.XMLParser()
#
# Monkey-patch ``requests.Response``.
#
# Just to make sure we do not accidentally override them.
ASSERT.false(hasattr(requests.Response, 'recvfile'))
requests.Response.recvfile = recvfiles.recvfile
ASSERT.false(hasattr(requests.Response, 'html'))
requests.Response.html = Response.html
ASSERT.false(hasattr(requests.Response, 'xml'))
requests.Response.xml = Response.xml
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/clients/g1/http/clients/bases.py",
"copies": "1",
"size": "16547",
"license": "mit",
"hash": 6069847564674486000,
"line_mean": 32.5638945233,
"line_max": 77,
"alpha_frac": 0.5799238533,
"autogenerated": false,
"ratio": 4.398458266879319,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5478382120179319,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'BaseTransition',
'Transition',
)
import collections
import itertools
from .errors import AnnotationError
from .expression import make_expression
from .iterable import Iterable
from .node import BaseTransition
from .token_list import TokenList
class Substitution(collections.OrderedDict):
pass
class Transition(BaseTransition):
def __init__(self, name, *, net=None, guard=None):
super().__init__(name=name, net=net)
self._guard = None
self.update(guard=guard)
@property
def guard(self):
return self._guard
def _add_input_arc(self, arc):
used_variables = set()
for input_arc in self.input_arcs():
used_variables.update(input_arc.annotation.variables())
invalid_variables = used_variables.intersection(arc.annotation.variables())
if invalid_variables:
raise AnnotationError("{!r}: cannot add {!r}: variable redefinition: {}".format(
self, arc, ', '.join(repr(variable) for variable in sorted(invalid_variables))))
super()._add_input_arc(arc)
def update(self, *, guard=None):
self._guard = make_expression(guard)
def iter_substitutions(self):
input_iterators = []
for arc in self.input_arcs():
annotation = arc.annotation
substitutions = arc.filter_substitutions()
if substitutions is not None:
input_iterators.append(arc.filter_substitutions())
for dct_tuple in itertools.product(*input_iterators):
substitution = Substitution()
for dct in dct_tuple:
if dct is not None: # inhibitor arcs
substitution.update(dct)
print("Subst", substitution)
if self._guard is None or self._guard.evaluate(globals_d=self._net.globals_d, locals_d=substitution):
yield substitution
def substitutions(self):
return Iterable(self.iter_substitutions())
def first_substitution(self):
it = iter(self.iter_substitutions())
try:
return next(it)
except StopIteration:
return None
def fire(self, substitution=None):
#print("pre fire:", self._net.get_marking(), substitution)
if substitution is None:
substitution = self.first_substitution()
if substitution is None:
return False
# compute all output arc expressions:
lst = []
for arc in self.output_arcs():
result = arc.produce_token(substitution)
lst.append((arc, result))
# produce all output tokens:
for arc, result in lst:
print("add:", result, arc)
arc.add_token(result)
# remove all input tokens:
for arc in self.input_arcs():
arc.remove_substitution(substitution)
# notify firing:
self._net.notify_transition_fired(self)
#print("post fire:", substitution, self._net.get_marking())
#input("...")
return True
def label(self):
label = super().label()
if self._guard is not None:
label += '\n' + str(self._guard)
return label
| {
"repo_name": "simone-campagna/petra",
"path": "petra/transition.py",
"copies": "1",
"size": "3240",
"license": "apache-2.0",
"hash": 844382075591611800,
"line_mean": 31.0792079208,
"line_max": 113,
"alpha_frac": 0.5978395062,
"autogenerated": false,
"ratio": 4.396200814111262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5494040320311262,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'binnig_update',
'binning',
'uniform_mapping',
'greedy_max_entropy_mapping',
'almost_uniform_mapping'
]
import numpy as np
from .special import COUNT_T, RGB_T, RAW_T, BIN_T
from .special import binning_rgb
from .special import binning_raw
from .utils import wrong_dtype_exception
def binnig_update(img, out, mapping=None):
bins = out.shape[-1]
n_channels = img.shape[0]
if img.dtype == RGB_T:
binning_rgb(img.reshape(n_channels, -1), mapping, out.reshape(n_channels, -1, bins))
return out
elif img.dtype == RAW_T:
binning_raw(img.reshape(1, -1), mapping, out.reshape(1, -1, bins))
return out
else:
raise wrong_dtype_exception(img.dtype)
def binning(run, mapping, counts=None):
if counts is None:
n_channels, width, height = run.get_img(0).shape
counts = counts or np.zeros(shape=(n_channels, width, height, np.max(mapping) + 1), dtype=COUNT_T)
for img in run:
binnig_update(img, counts, mapping=mapping)
return counts
def uniform_mapping(max_value=None, run=None, bins=32):
assert max_value is not None or run is not None, 'You must specify either `max_value` or `run` parameters!'
max_value = max_value or (2 ** 10 - 1 if run.image_type == 'raw' else 2 ** 8 - 1)
per_bin = (max_value + 1) / bins
return (np.arange(max_value + 1) / per_bin).astype(BIN_T)
def almost_uniform_mapping(bincount, minimal_bin_range=16, bin_minimal=(1.0 / 32)):
if type(bin_minimal) in [int, long]:
bin_minimal = float(bin_minimal) / np.sum(bincount)
fractions = bincount.astype('float64') / np.sum(bincount)
mapping = np.ndarray(shape=bincount.shape, dtype=BIN_T)
current_i = 0
current_bin = 0
while current_i < bincount.shape[0]:
bin_prob = np.cumsum(fractions[current_i:])
try:
if bin_prob[minimal_bin_range - 1] < bin_minimal:
i_range = np.min(np.where(bin_prob >= bin_minimal)[0]) + 1
else:
i_range = minimal_bin_range
mapping[current_i:(current_i + i_range)] = current_bin
current_i += i_range
current_bin += 1
except:
mapping[current_i:] = current_bin - 1
current_i = bincount.shape[0]
return mapping
def greedy_max_entropy_mapping(bincount=None, run=None, bins=32, max_value=None):
def get_bin(bc, bins):
target = np.sum(bc) / bins
try:
return np.max(np.arange(bc.shape[0])[np.cumsum(bc) <= target])
except:
return 0
assert bincount is not None or run is not None, 'You must specify either `bincount` or `run` parameters!'
if bincount is None:
bincounts = [
np.bincount(img.reshape(-1))
for img in run
]
max_shape = max_value + 1 if max_value is not None else np.max([ bc.shape[0] for bc in bincounts ])
bincount = np.zeros(shape=max_shape, dtype='int64')
for bc in bincounts:
s = bc.shape[0]
bincount[:s] += bc
mapping = np.zeros_like(bincount, dtype=BIN_T)
left = 0
for i in range(bins):
right = get_bin(bincount[left:], bins - i)
mapping[left:(left + right + 1)] = i
left += right + 1
return mapping
| {
"repo_name": "yandexdataschool/crayimage",
"path": "crayimage/imgutils/binning.py",
"copies": "1",
"size": "3078",
"license": "mit",
"hash": 308721026403810750,
"line_mean": 27.7663551402,
"line_max": 109,
"alpha_frac": 0.6452241715,
"autogenerated": false,
"ratio": 3.0235756385068764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4168799810006876,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'BufferHandler',
'DirHandler',
'FileHandler',
'make_buffer_handler',
'make_dir_handler',
'make_file_handler',
# Context.
'LOCAL_PATH',
]
import mimetypes
from g1.bases import labels
from .. import consts
from .. import wsgi_apps
from . import composers
from . import etags
def make_dir_handler(local_dir_path):
dir_handler = DirHandler(local_dir_path)
return composers.Chain([
dir_handler.check,
composers.MethodRouter({
consts.METHOD_HEAD: dir_handler.head,
consts.METHOD_GET: dir_handler.get,
}),
])
def make_file_handler(local_file_path, headers=()):
file_handler = FileHandler(local_file_path, headers=headers)
return composers.Chain([
composers.MethodRouter({
consts.METHOD_HEAD: file_handler.head,
consts.METHOD_GET: file_handler.get,
}),
])
def make_buffer_handler(filename, content, headers=()):
buffer_handler = BufferHandler(filename, content, headers=headers)
return composers.Chain([
composers.MethodRouter({
consts.METHOD_HEAD: buffer_handler.head,
consts.METHOD_GET: buffer_handler.get,
}),
])
def get_local_path(request, local_dir_path):
path_str = composers.get_path_str(request)
# We use ``resolve`` to normalize path, which also follows symlinks.
# A side effect is that this handler rejects any symlink to file
# that is out of scope, which may be not bad.
local_path = (local_dir_path / path_str.lstrip('/')).resolve()
try:
local_path.relative_to(local_dir_path)
except ValueError:
raise wsgi_apps.HttpError(
consts.Statuses.NOT_FOUND,
'out of scope: %s vs %s' % (local_path, local_dir_path),
) from None
if not local_path.is_file():
# We don't want this to be a generic dir handler, and so we do
# not handle directories.
raise wsgi_apps.HttpError(
consts.Statuses.NOT_FOUND, 'not a file: %s' % local_path
)
return local_path
_CONTENT_TYPE_FIXES = {
# Although RFC4329 obsoletes text/javascript and recommends
# application/javascript (and the stdlib correctly implements the
# RFC), the HTML spec still chooses text/javascript (for
# compatibility reason). For more details:
# https://html.spec.whatwg.org/multipage/infrastructure.html#dependencies
'application/javascript': 'text/javascript',
}
def guess_content_type(filename):
content_type, file_encoding = mimetypes.guess_type(filename)
if content_type:
fixed_type = _CONTENT_TYPE_FIXES.get(content_type)
if fixed_type is not None:
content_type = fixed_type
if file_encoding:
content_type = '%s+%s' % (content_type, file_encoding)
else:
if file_encoding:
content_type = 'application/x-' + file_encoding
else:
content_type = 'application/octet-stream'
return content_type
LOCAL_PATH = labels.Label(__name__, 'local_path')
def _make_headers(path, file):
return {
consts.HEADER_CONTENT_TYPE: guess_content_type(path.name),
consts.HEADER_CONTENT_LENGTH: str(path.stat().st_size),
consts.HEADER_ETAG: etags.compute_etag_from_file(file),
}
class DirHandler:
"""Serve files under the given directory.
NOTE: It doe NOT re-calculate cached response headers even when file
content is changed after handler initialization.
"""
def __init__(self, local_dir_path):
if not mimetypes.inited:
mimetypes.init()
self._local_dir_path = local_dir_path.resolve()
self._headers_cache = {}
async def check(self, request, response):
"""Check whether request path is under the given directory.
Use this to pre-check for DirHandler when it is wrapped deeply
inside other handlers.
"""
del response # Unused.
request.context.set(
LOCAL_PATH, get_local_path(request, self._local_dir_path)
)
def _prepare(self, request, response):
local_path = request.context.get(LOCAL_PATH)
if local_path is None:
local_path = get_local_path(request, self._local_dir_path)
file = local_path.open('rb')
response.status = consts.Statuses.OK
response.headers.update(self._get_headers(local_path, file))
try:
etags.maybe_raise_304(request, response)
except Exception:
file.close()
raise
return file
def _get_headers(self, local_path, file):
headers = self._headers_cache.get(local_path)
if headers is None:
headers = self._headers_cache[local_path] = _make_headers(
local_path, file
)
file.seek(0)
return headers
async def head(self, request, response):
self._prepare(request, response).close()
async def get(self, request, response):
response.sendfile(self._prepare(request, response))
__call__ = get
class FileHandler:
"""Serve a local file.
NOTE: It does NOT re-calculate response headers even when file
content is changed after handler initialization.
"""
def __init__(self, local_file_path, headers=()):
if not mimetypes.inited:
mimetypes.init()
self._path = local_file_path
with self._path.open('rb') as file:
self._headers = _make_headers(self._path, file)
self._headers.update(headers)
async def head(self, request, response):
response.status = consts.Statuses.OK
response.headers.update(self._headers)
etags.maybe_raise_304(request, response)
async def get(self, request, response):
await self.head(request, response)
response.commit()
response.sendfile(self._path.open('rb'))
__call__ = get
class BufferHandler:
"""Serve a buffer as a file."""
def __init__(self, filename, content, headers=()):
if not mimetypes.inited:
mimetypes.init()
self._content = content
self._headers = {
consts.HEADER_CONTENT_TYPE: guess_content_type(filename),
consts.HEADER_CONTENT_LENGTH: str(len(self._content)),
consts.HEADER_ETAG: etags.compute_etag(self._content),
}
self._headers.update(headers)
async def head(self, request, response):
response.status = consts.Statuses.OK
response.headers.update(self._headers)
etags.maybe_raise_304(request, response)
async def get(self, request, response):
await self.head(request, response)
response.commit()
await response.write(self._content)
__call__ = get
| {
"repo_name": "clchiou/garage",
"path": "py/g1/webs/g1/webs/handlers/files.py",
"copies": "1",
"size": "6805",
"license": "mit",
"hash": 1321641003932177400,
"line_mean": 29.7918552036,
"line_max": 77,
"alpha_frac": 0.6240999265,
"autogenerated": false,
"ratio": 3.8598979013045946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4983997827804595,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'build_image',
]
import contextlib
import dataclasses
import json
import logging
import tempfile
from pathlib import Path
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from g1.containers import models as ctr_models
from g1.containers import scripts as ctr_scripts
import shipyard2
from . import utils
LOG = logging.getLogger(__name__)
@scripts.using_sudo()
def build_image(
*,
parameters,
builder_id,
builder_images,
name,
version,
rules,
output,
):
# Although it is tempting to mount source repos under the drydock
# directory rather than /usr/src, this is not possible because the
# base image does not have /home/plumber/drydock directory yet, and
# so systemd-nspawn will reject mounting source repos under drydock.
root_host_paths = parameters['//bases:roots']
builder_config = _generate_builder_config(
name=name,
version=version,
apps=_get_apps(
builder_images,
root_host_paths,
rules,
),
images=_get_images(
builder_images,
ASSERT.not_none(parameters['//images/bases:base/version']),
),
mounts=_get_mounts(
parameters['//releases:shipyard-data'],
name,
rules,
),
overlays=_get_overlays(root_host_paths),
)
with contextlib.ExitStack() as stack:
tempdir_path = Path(
stack.enter_context(
tempfile.TemporaryDirectory(dir=output.parent)
)
)
builder_config_path = tempdir_path / 'builder.json'
builder_config_path.write_text(json.dumps(builder_config))
if shipyard2.is_debug():
LOG.debug('builder config: %s', builder_config_path.read_text())
# The builder pod might not be cleaned up when `ctr pods run`
# fails; so let's always do `ctr pods remove` on our way out.
stack.callback(ctr_scripts.ctr_remove_pod, builder_id)
LOG.info('start builder pod')
ctr_scripts.ctr_run_pod(builder_id, builder_config_path)
LOG.info('export intermediate builder image to: %s', output)
rootfs_path = tempdir_path / 'rootfs'
stack.callback(scripts.rm, rootfs_path, recursive=True)
ctr_scripts.ctr([
'pods',
'export-overlay',
builder_id,
rootfs_path,
])
ctr_scripts.ctr_build_image(
utils.get_builder_name(name), version, rootfs_path, output
)
ctr_scripts.ctr_import_image(output)
def _generate_builder_config(name, version, apps, images, mounts, overlays):
return {
'name': utils.get_builder_name(name),
'version': version,
'apps': apps,
'images': images,
'mounts': mounts,
'overlays': overlays,
}
_INITIALIZE_BUILDER = (
# pylint: disable=line-too-long
'adduser --disabled-password --gecos "" plumber',
'echo "plumber ALL=(ALL:ALL) NOPASSWD: ALL" > /etc/sudoers.d/99-plumber',
'chmod 440 /etc/sudoers.d/99-plumber',
'apt-get --yes install software-properties-common',
# Clear the default repositories from `ctr images build-base` as
# they conflict with mime.
'echo -n > /etc/apt/sources.list',
'add-apt-repository --yes "deb http://us.archive.ubuntu.com/ubuntu/ %s main restricted universe"'
% ctr_models.BASE_IMAGE_RELEASE_CODE_NAME,
'add-apt-repository --yes "deb http://us.archive.ubuntu.com/ubuntu/ %s-updates main restricted universe"'
% ctr_models.BASE_IMAGE_RELEASE_CODE_NAME,
'add-apt-repository --yes "deb http://security.ubuntu.com/ubuntu/ %s-security main restricted universe"'
% ctr_models.BASE_IMAGE_RELEASE_CODE_NAME,
'apt-get --yes update',
'apt-get --yes full-upgrade',
# foreman needs at least python3; let's use 3.8 to be safe.
'apt-get --yes install python3.8',
'update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1',
'update-alternatives --set python3 /usr/bin/python3.8',
# pylint: enable=line-too-long
)
def _get_apps(builder_images, root_host_paths, rules):
builder_script = []
if not builder_images:
LOG.info('no intermediate builder images; initialize builder')
builder_script.extend(_INITIALIZE_BUILDER)
if rules:
builder_script.append(
' '.join([
'sudo',
*('-u', 'plumber'),
*('-g', 'plumber'),
'/usr/src/garage/shipyard2/scripts/foreman.sh',
'build',
*(('--debug', ) if shipyard2.is_debug() else ()),
*_foreman_make_path_args(root_host_paths),
*('--parameter', '//bases:inside-builder-pod=true'),
*map(str, rules),
])
)
ASSERT.not_empty(builder_script)
return [
{
'name': 'builder',
'type': 'oneshot',
'exec': ['/bin/bash', '-c', '; '.join(builder_script)],
'user': 'root',
'group': 'root',
},
]
def _foreman_make_path_args(root_host_paths):
root_paths = list(map(_root_host_to_target, root_host_paths))
for root_path in root_paths:
yield '--path'
yield str(root_path / 'shipyard2' / 'rules')
yield '--parameter'
yield '//bases:roots=%s' % ','.join(map(str, root_paths))
def _get_images(builder_images, base_version):
return [
{
'name': shipyard2.BASE,
'version': base_version,
},
{
'name': utils.get_builder_name(shipyard2.BASE),
'version': base_version,
},
*map(dataclasses.asdict, builder_images),
]
def _get_mounts(shipyard_data_path, name, rules):
mounts = []
if shipyard_data_path is not None:
image_data_path = shipyard_data_path / 'image-data'
if _should_mount_image_data(image_data_path, name, rules):
mounts.append({
'source': str(image_data_path),
'target': '/usr/src/image-data',
'read_only': True,
})
return mounts
def _should_mount_image_data(image_data_path, name, rules):
"""True if we should mount image-data directory.
Check presence of the following directories:
* <image-data>/images/<image-path>/<image-name>.
* <image-data>/<rule-path>.
"""
if (image_data_path / foreman.get_relpath() / name).is_dir():
return True
for rule in rules:
if (image_data_path / rule.path).is_dir():
return True
return False
def _get_overlays(root_host_paths):
return [{
'sources': [str(root_host_path), ''],
'target': str(_root_host_to_target(root_host_path)),
'read_only': False,
} for root_host_path in root_host_paths]
def _root_host_to_target(root_host_path):
return Path('/usr/src') / root_host_path.name
| {
"repo_name": "clchiou/garage",
"path": "shipyard2/shipyard2/rules/images/build_image.py",
"copies": "1",
"size": "6975",
"license": "mit",
"hash": 46870623332532470,
"line_mean": 30.995412844,
"line_max": 109,
"alpha_frac": 0.592688172,
"autogenerated": false,
"ratio": 3.560490045941807,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4653178217941807,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Cache',
'NULL_CACHE',
]
import collections
import contextlib
import dataclasses
import hashlib
import io
import logging
import random
import shutil
import tempfile
import threading
from pathlib import Path
import g1.files
from g1.bases import timers
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
# By default we keep 80% of entries post eviction.
POST_EVICTION_SIZE_RATIO = 0.8
class CacheInterface:
@dataclasses.dataclass(frozen=True)
class Stats:
num_hits: int
num_misses: int
_SENTINEL = object()
def get_stats(self):
raise NotImplementedError
def estimate_size(self):
raise NotImplementedError
def evict(self):
raise NotImplementedError
def get(self, key, default=None):
raise NotImplementedError
def get_file(self, key, default=None):
raise NotImplementedError
def set(self, key, value):
raise NotImplementedError
def setting_file(self, key):
raise NotImplementedError
def pop(self, key, default=_SENTINEL):
raise NotImplementedError
class NullCache(CacheInterface):
def __init__(self):
self._num_misses = 0
def get_stats(self):
return self.Stats(
num_hits=0,
num_misses=self._num_misses,
)
def estimate_size(self):
return 0
def evict(self):
return 0
def get(self, key, default=None):
del key # Unused.
self._num_misses += 1
return default
def get_file(self, key, default=None):
del key # Unused.
self._num_misses += 1
return default
def set(self, key, value):
pass
@contextlib.contextmanager
def setting_file(self, key):
yield io.BytesIO()
def pop(self, key, default=CacheInterface._SENTINEL):
if default is self._SENTINEL:
raise KeyError(key)
return default
NULL_CACHE = NullCache()
class Cache(CacheInterface):
"""File-based LRU cache.
Cache keys and values are bytes objects. A cache value is stored in
its own file, whose path the MD5 hash of its key, with the first two
hexadecimal digits as the directory name, and the rest as the file
name. This two-level structure should prevent any directory grown
too big.
"""
@staticmethod
def _get_relpath(key):
hasher = hashlib.md5()
hasher.update(key)
digest = hasher.hexdigest()
return Path(digest[:2]) / digest[2:]
def __init__(
self,
cache_dir_path,
capacity,
*,
post_eviction_size=None,
executor=None, # Use this to evict in the background.
):
self._lock = threading.Lock()
self._cache_dir_path = ASSERT.predicate(cache_dir_path, Path.is_dir)
self._capacity = ASSERT.greater(capacity, 0)
self._post_eviction_size = (
post_eviction_size if post_eviction_size is not None else
int(self._capacity * POST_EVICTION_SIZE_RATIO)
)
ASSERT(
0 <= self._post_eviction_size <= self._capacity,
'expect 0 <= post_eviction_size <= {}, not {}',
self._capacity,
self._post_eviction_size,
)
self._executor = executor
# By the way, if cache cold start is an issue, we could store
# and load this table from a file.
self._access_log = collections.OrderedDict()
self._num_hits = 0
self._num_misses = 0
# It's safe to call these methods after this point.
self._eviction_countdown = self._estimate_eviction_countdown()
self._maybe_evict()
def get_stats(self):
return self.Stats(
num_hits=self._num_hits,
num_misses=self._num_misses,
)
def _log_access(self, path):
# Although this is a LRU cache, let's keep access counts, which
# could be useful in understanding cache performance.
self._access_log[path] = self._access_log.get(path, 0) + 1
self._access_log.move_to_end(path, last=False)
def _make_get_recency(self):
recency_table = dict((p, r) for r, p in enumerate(self._access_log))
least_recency = len(self._access_log)
return lambda path: recency_table.get(path, least_recency)
def estimate_size(self):
dir_paths = list(_iter_dirs(self._cache_dir_path))
if not dir_paths:
return 0
# Estimate the size of the cache by multiplying the two, given
# that MD5 yields a uniform distribution.
return len(dir_paths) * _count_files(random.choice(dir_paths))
def _estimate_eviction_countdown(self):
# Just a guess of how far away we are from the next eviction.
return self._capacity - self.estimate_size()
def _should_evict(self):
return (
len(self._access_log) > self._capacity
or self._eviction_countdown < 0
)
def _maybe_evict(self):
with self._lock:
if self._should_evict():
self._evict_require_lock_by_caller()
def evict(self):
with self._lock:
return self._evict_require_lock_by_caller()
def _evict_require_lock_by_caller(self):
stopwatch = timers.Stopwatch()
stopwatch.start()
num_evicted = self._evict()
stopwatch.stop()
LOG.info(
'evict %d entries in %f seconds: %s',
num_evicted,
stopwatch.get_duration(),
self._cache_dir_path,
)
return num_evicted
def _evict(self):
# Estimate post-eviction size per directory, given that MD5
# yields a uniform distribution of sizes.
#
# NOTE: It might "over-evict" when post_eviction_size is less
# than 256, since in which case target_size_per_dir is likely 0.
target_size_per_dir = int(
self._post_eviction_size / _count_dirs(self._cache_dir_path)
)
get_recency = self._make_get_recency()
num_evicted = 0
for dir_path in _iter_dirs(self._cache_dir_path):
num_evicted += self._evict_dir(
dir_path, target_size_per_dir, get_recency
)
self._eviction_countdown = self._estimate_eviction_countdown()
return num_evicted
def _evict_dir(self, dir_path, target_size, get_recency):
num_evicted = 0
paths = list(_iter_files(dir_path))
paths.sort(key=get_recency)
for path in paths[target_size:]:
path.unlink()
count = self._access_log.pop(path, 0)
LOG.debug('evict: %d %s', count, path)
num_evicted += 1
g1.files.remove_empty_dir(dir_path)
return num_evicted
def _get_path(self, key):
return self._cache_dir_path / self._get_relpath(key)
def get(self, key, default=None):
with self._lock:
return self._get_require_lock_by_caller(
key, default, Path.read_bytes
)
def get_file(self, key, default=None):
"""Get cache entry as a pair of file object and it size.
The caller has to close the file object. Note that even if this
cache entry is removed or evicted, the file will only removed by
the file system when the file is closed.
"""
with self._lock:
return self._get_require_lock_by_caller(
key,
default,
lambda path: (path.open('rb'), path.stat().st_size),
)
def _get_require_lock_by_caller(self, key, default, getter):
path = self._get_path(key)
if not path.exists():
self._num_misses += 1
return default
value = getter(path)
self._log_access(path)
self._num_hits += 1
return value
def set(self, key, value):
with self._lock:
return self._set_require_lock_by_caller(
key, lambda path: path.write_bytes(value)
)
@contextlib.contextmanager
def setting_file(self, key):
"""Set a cache entry via a file-like object."""
# We use mktemp (which is unsafe in general) because we want to
# rename it on success, but NamedTemporaryFile's file closer
# raises FileNotFoundError. I think in our use case here,
# mktemp is safe enough.
value_tmp_path = Path(tempfile.mktemp())
try:
with value_tmp_path.open('wb') as value_file:
yield value_file
with self._lock:
# Use shutil.move because /tmp might be in another file
# system than the cache directory. (shutil.move detects
# this and uses os.rename when they are in the same file
# system.)
self._set_require_lock_by_caller(
key,
lambda path: shutil.move(value_tmp_path, path),
)
finally:
value_tmp_path.unlink(missing_ok=True)
def _set_require_lock_by_caller(self, key, setter):
path = self._get_path(key)
if not path.exists():
path.parent.mkdir(exist_ok=True)
self._eviction_countdown -= 1
setter(path)
self._log_access(path)
if self._should_evict():
if self._executor:
self._executor.submit(self._maybe_evict)
else:
self._evict_require_lock_by_caller()
def pop(self, key, default=CacheInterface._SENTINEL):
with self._lock:
return self._pop_require_lock_by_caller(key, default)
def _pop_require_lock_by_caller(self, key, default):
path = self._get_path(key)
if not path.exists():
if default is self._SENTINEL:
raise KeyError(key)
return default
value = path.read_bytes()
path.unlink()
g1.files.remove_empty_dir(path.parent)
self._access_log.pop(path, None)
self._eviction_countdown += 1
return value
def _iter_dirs(dir_path):
return filter(Path.is_dir, dir_path.iterdir())
def _iter_files(dir_path):
return filter(Path.is_file, dir_path.iterdir())
def _count_dirs(dir_path):
return sum(1 for _ in _iter_dirs(dir_path))
def _count_files(dir_path):
return sum(1 for _ in _iter_files(dir_path))
| {
"repo_name": "clchiou/garage",
"path": "py/g1/files/g1/files/caches.py",
"copies": "1",
"size": "10465",
"license": "mit",
"hash": -7733741168624707000,
"line_mean": 29.1585014409,
"line_max": 76,
"alpha_frac": 0.5855709508,
"autogenerated": false,
"ratio": 3.760330578512397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4845901529312397,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'_calculate_time_range',
'update_time_steps',
'get_requested_time',
'get_input_time_steps',
'get_combined_input_time_steps',
]
import collections
import numpy as np
def _calculate_time_range(nt, dt=1.0):
"""Discretizes time range accoridng to step size ``dt`` in seconds"""
return np.arange(0, nt * dt, dt, dtype=float)
def update_time_steps(algorithm, nt, dt=1.0, explicit=False):
"""Handles setting up the timesteps on on the pipeline for a file series reader.
Args:
algorithm (vtkDataObject): The data object (Proxy) on the pipeline
(pass `self` from algorithm subclasses)
nt (int or list): Number of timesteps (Pass a list to use length of
that list)
dt (float): The discrete value in seconds for the time step.
explicit (boolean): if true, this will treat the nt argument as the exact
timestep values to use
Return:
numpy.array : Returns the timesteps as an array
"""
if explicit and isinstance(nt, collections.Iterable):
timesteps = nt
else:
if isinstance(nt, collections.Iterable):
nt = len(nt)
timesteps = _calculate_time_range(nt, dt=1.0)
if len(timesteps) < 1:
# NOTE: we may want to raise a warning here on the dev side.
# if developing a new algorithm that uses this, you may want to
# know exactly when this failse to update
#'update_time_steps() is not updating because passed time step values are NULL.'
return None
executive = algorithm.GetExecutive()
oi = executive.GetOutputInformation(0)
# oi = outInfo.GetInformationObject(0)
oi.Remove(executive.TIME_STEPS())
oi.Remove(executive.TIME_RANGE())
for t in timesteps:
oi.Append(executive.TIME_STEPS(), t)
oi.Append(executive.TIME_RANGE(), timesteps[0])
oi.Append(executive.TIME_RANGE(), timesteps[-1])
return timesteps
def get_requested_time(algorithm, outInfo, idx=0):
"""Handles setting up the timesteps on on the pipeline for a file series
reader.
Args:
algorithm (vtkDataObject) : The data object (Proxy) on the pipeline
(pass `self` from algorithm subclasses)
outInfo (vtkInformationVector) : The output information for the
algorithm
idx (int) : the index for the output port
Return:
int : the index of the requested time
Example:
>>> # Get requested time index
>>> i = _helpers.get_requested_time(self, outInfo)
"""
executive = algorithm.GetExecutive()
timesteps = algorithm.get_time_step_values()
outInfo = outInfo.GetInformationObject(idx)
if timesteps is None or len(timesteps) == 0:
return 0
elif outInfo.Has(executive.UPDATE_TIME_STEP()) and len(timesteps) > 0:
utime = outInfo.Get(executive.UPDATE_TIME_STEP())
return np.argmin(np.abs(np.array(timesteps) - utime))
else:
# if we cant match the time, give first
if not len(timesteps) > 0:
raise AssertionError('Number of timesteps must be greater than 0')
return 0
def get_input_time_steps(algorithm, port=0, idx=0):
"""Get the timestep values for the algorithm's input
Args:
algorithm (vtkDataObject) : The data object (Proxy) on the pipeline
(pass `self` from algorithm subclasses)
port (int) : the input port
idx (int) : optional : the connection index on the input port
Return:
list : the time step values of the input (if there arn't any, returns ``None``)
"""
executive = algorithm.GetExecutive()
ii = executive.GetInputInformation(port, idx)
return ii.Get(executive.TIME_STEPS())
def get_combined_input_time_steps(algorithm, idx=0):
"""This will iterate over all input ports and combine their unique timesteps
for an output algorithm to have.
Args:
algorithm (vtkDataObject) : The data object (Proxy) on the pipeline
(pass `self` from algorithm subclasses)
Return:
np.ndarray : a 1D array of all the unique timestep values (empty array if no time variance)
"""
executive = algorithm.GetExecutive()
tsteps = []
for port in range(executive.GetNumberOfInputPorts()):
ii = executive.GetInputInformation(port, idx)
ti = ii.Get(executive.TIME_STEPS())
if ti is None:
ti = np.array([])
tsteps.append(ti)
return np.unique(np.concatenate(tsteps, 0))
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/_helpers/timeseries.py",
"copies": "1",
"size": "4533",
"license": "bsd-3-clause",
"hash": -3240956809086617600,
"line_mean": 34.6929133858,
"line_max": 99,
"alpha_frac": 0.6463710567,
"autogenerated": false,
"ratio": 3.8513169073916735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4997687964091674,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'category',
'entry',
'sequence',
'HD_InitialEntryMode'
]
from category import *
from entry import *
from sequence import *
from hd_initialentry import *
import time
import locale
from .. import dmd
def generate_highscore_frames(categories, width, height):
"""Utility function that returns a sequence of :class:`~procgame.dmd.Frame` objects
describing the current high scores in each of the *categories* supplied.
*categories* should be a list of :class:`HighScoreCategory` objects.
"""
markup = dmd.MarkupFrameGenerator(width, height)
frames = list()
for category in categories:
for index, score in enumerate(category.scores):
score_str = locale.format("%d", score.score, True) # Add commas to the score.
if score.score == 1:
score_str += category.score_suffix_singular
else:
score_str += category.score_suffix_plural
text = '[%s]\n#%s#\n[%s]' % (category.titles[index], score.inits, score_str)
frame = markup.frame_for_markup(markup=text, y_offset=4)
frames.append(frame)
return frames
def get_highscore_data(categories):
"""Utility function that returns a list of high score dictionaries.
Each list entry contains: a category, player, and score.
"""
data = list()
for category in categories:
for index, score in enumerate(category.scores):
score_str = locale.format("%d", score.score, True) # Add commas to the score.
if score.score == 1:
score_str += category.score_suffix_singular
else:
score_str += category.score_suffix_plural
data.append({'category':category.titles[index], 'player': score.inits, 'score': score_str})
return data
| {
"repo_name": "mjocean/PyProcGameHD-SkeletonGame",
"path": "procgame/highscore/__init__.py",
"copies": "1",
"size": "1835",
"license": "mit",
"hash": -7657306159784471000,
"line_mean": 35.7,
"line_max": 103,
"alpha_frac": 0.629972752,
"autogenerated": false,
"ratio": 4.068736141906873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5198708893906874,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'Category',
)
class Category(tuple):
"""
This type acts as tuple with one key difference - two instances of it are
equal only when they have the same type. This allows you to easily mitigate
collisions when using common types (like string) in a dict or as a Wiring
:term:`specification`.
Example::
from wiring import Graph, Category
class Public(Category):
pass
class Secret(Category):
pass
graph = Graph()
graph.register_instance(Public('database', 1), 'db://public/1')
graph.register_instance(Secret('database', 1), 'db://private/1')
assert Public('database', 1) != Private('database', 1)
assert (
graph.get(Public('database', 1))
!= graph.get(Private('database', 1))
)
"""
def __new__(cls, *args):
return super(Category, cls).__new__(cls, args)
def __repr__(self):
return type(self).__name__ + super(Category, self).__repr__()
def __str__(self):
return repr(self)
def __eq__(self, other):
return (
type(self) == type(other) and super(Category, self).__eq__(other)
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((
type(self),
super(Category, self).__hash__()
))
| {
"repo_name": "msiedlarek/wiring",
"path": "wiring/categories.py",
"copies": "1",
"size": "1412",
"license": "apache-2.0",
"hash": 8547945390414328000,
"line_mean": 24.6727272727,
"line_max": 79,
"alpha_frac": 0.540368272,
"autogenerated": false,
"ratio": 4.189910979228487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5230279251228487,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'check_numpy',
]
__displayname__ = 'Version Verifier'
try:
from ._helpers import PVGeoError
except ImportError:
PVGeoError = RuntimeError
def check_numpy(alert='print'):
"""A method to check the active environment's version of NumPy for
compatibility with PVGeo.
Args:
alert (str): raise a ``'warn'`` (warning) or an ``'error'`` (PVGeoError) if NumPy is not at a satisfactory version.
"""
import numpy as np
import warnings
v = np.array(np.__version__.split('.')[0:2], dtype=int)
if v[0] >= 1 and v[1] >= 10:
return True
msg = (
'WARNING: Your version of NumPy is below 1.10.x (you are using %s), please update the NumPy module used in ParaView for performance enhancement. Some filters/readers may be unavailable or crash otherwise.'
% np.__version__
)
if alert == 'error':
raise PVGeoError(msg)
elif alert == 'warn':
warnings.warn(msg)
elif alert == 'print':
print(msg)
return False
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/version.py",
"copies": "1",
"size": "1027",
"license": "bsd-3-clause",
"hash": 2440664839277276700,
"line_mean": 27.5277777778,
"line_max": 213,
"alpha_frac": 0.6192794547,
"autogenerated": false,
"ratio": 3.7481751824817517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9865685551997683,
"avg_score": 0.00035381703681372056,
"num_lines": 36
} |
__all__ = [
'ClientBase',
]
import capnp
from garage.asyncs import futures
from garage.asyncs import queues
from garage.asyncs.messaging import reqrep
class ClientBase:
"""Abstract base class for implementing reqrep client."""
def _parse_response(self, request, response_message):
"""Parse response object (and may raise error)."""
raise NotImplementedError
def __init__(self, request_queue, *, packed=False):
self.__request_queue = request_queue
if packed:
self.__from_bytes = capnp.MessageReader.from_packed_bytes
self.__to_bytes = capnp.MessageBuilder.to_packed_bytes
else:
self.__from_bytes = capnp.MessageReader.from_bytes
self.__to_bytes = capnp.MessageBuilder.to_bytes
async def _transact(self, request):
"""Make a transaction.
This is intended to be called by subclass.
"""
raw_request = self.__to_bytes(request._message)
async with futures.Future() as raw_resposne_future:
try:
await self.__request_queue.put((
raw_request,
raw_resposne_future.promise(),
))
except queues.Closed:
raise reqrep.Terminated from None
raw_response = await raw_resposne_future.result()
return self._parse_response(
request,
self.__from_bytes(raw_response),
)
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/asyncs/messaging/utils.py",
"copies": "1",
"size": "1471",
"license": "mit",
"hash": -1604131539618571500,
"line_mean": 27.8431372549,
"line_max": 69,
"alpha_frac": 0.5927940177,
"autogenerated": false,
"ratio": 4.214899713467049,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 51
} |
__all__ = [
'Client',
'ServerTimeoutError',
]
import logging
import nng
import nng.asyncs
from g1.bases import classes
from g1.bases import collections
from g1.bases.assertions import ASSERT
from . import utils
LOG = logging.getLogger(__name__)
# This is just an alias for now.
ServerTimeoutError = nng.errors.Errors.ETIMEDOUT
class Client:
def __init__(self, request_type, response_type, wiredata):
self.socket = nng.asyncs.Socket(nng.Protocols.REQ0)
self.transceive = Transceiver(self.socket, response_type, wiredata)
self.m = collections.Namespace(
**{
name: Method(name, request_type, self.transceive)
for name in request_type.m
}
)
__repr__ = classes.make_repr('{self.socket!r}')
def __enter__(self):
self.socket.__enter__()
return self
def __exit__(self, *args):
return self.socket.__exit__(*args)
class Transceiver:
def __init__(self, socket, response_type, wiredata):
self._socket = socket
self._response_type = response_type
self._wiredata = wiredata
async def __call__(self, request):
with nng.asyncs.Context(ASSERT.not_none(self._socket)) as context:
await context.send(self._wiredata.to_lower(request))
wire_response = await context.recv()
return self._wiredata.to_upper(self._response_type, wire_response)
class Method:
_SENTINEL = object()
def __init__(
self, name, request_type, transceive, *, on_timeout_return=_SENTINEL
):
self._name = name
self._request_type = request_type
self._transceive = transceive
self._on_timeout_return = on_timeout_return
def _make_args(self):
return self._name, self._request_type, self._transceive
def on_timeout_return(self, on_timeout_return):
return Method(*self._make_args(), on_timeout_return=on_timeout_return)
async def __call__(self, **kwargs):
request = self._request_type(
args=self._request_type.m[self._name](**kwargs)
)
try:
response = await self._transceive(request)
except ServerTimeoutError:
if self._on_timeout_return is self._SENTINEL:
raise
LOG.debug('server timeout: request=%r', request)
return self._on_timeout_return
if response.error is not None:
raise utils.select(response.error)[1]
return getattr(response.result, self._name)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/messaging/g1/messaging/reqrep/clients.py",
"copies": "1",
"size": "2550",
"license": "mit",
"hash": -6277805485287667000,
"line_mean": 27.3333333333,
"line_max": 78,
"alpha_frac": 0.6137254902,
"autogenerated": false,
"ratio": 3.811659192825112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4925384683025112,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"CLOEXEC",
"NONBLOCK",
"ACCESS",
"MODIFY",
"ATTRIB",
"CLOSE_WRITE",
"CLOSE_NOWRITE",
"CLOSE",
"OPEN",
"MOVED_FROM",
"MOVED_TO",
"MOVE",
"CREATE",
"DELETE",
"DELETE_SELF",
"MOVE_SELF",
"UNMOUNT",
"Q_OVERFLOW",
"IGNORED",
"ONLYDIR",
"DONT_FOLLOW",
"ADD_MASK",
"ISDIR",
"ONESHOT",
"ALL_EVENTS",
"event",
"init",
"add_watch",
"rm_watch",
"unpack_event",
"unpack_events",
]
import ctypes
import ctypes.util
import errno
import os
import select
import struct
CLOEXEC = 0o02000000
NONBLOCK = 0o00004000
ACCESS = 0x00000001
MODIFY = 0x00000002
ATTRIB = 0x00000004
CLOSE_WRITE = 0x00000008
CLOSE_NOWRITE = 0x00000010
CLOSE = CLOSE_WRITE | CLOSE_NOWRITE
OPEN = 0x00000020
MOVED_FROM = 0x00000040
MOVED_TO = 0x00000080
MOVE = MOVED_FROM | MOVED_TO
CREATE = 0x00000100
DELETE = 0x00000200
DELETE_SELF = 0x00000400
MOVE_SELF = 0x00000800
UNMOUNT = 0x00002000
Q_OVERFLOW = 0x00004000
IGNORED = 0x00008000
ONLYDIR = 0x01000000
DONT_FOLLOW = 0x02000000
ADD_MASK = 0x20000000
ISDIR = 0x40000000
ONESHOT = 0x80000000
ALL_EVENTS = ACCESS | MODIFY | ATTRIB | CLOSE | OPEN | MOVE | CREATE | DELETE | DELETE_SELF | MOVE_SELF
class event(object):
""" See inotify(7) man page. """
__slots__ = (
"wd",
"mask",
"cookie",
"name",
)
def __init__(self, wd, mask, cookie, name):
self.wd = wd
self.mask = mask
self.cookie = cookie
self.name = name
def __repr__(self):
return "inotify.event(wd=%d, mask=0x%x, cookie=%d, name=%r)" % (self.wd, self.mask, self.cookie, self.name)
def errcheck(result, func, arguments):
if result < 0:
n = ctypes.get_errno()
raise OSError(n, os.strerror(n))
return result
libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
try:
libc.inotify_init1
except AttributeError:
libc.inotify_init.argtypes = []
libc.inotify_init.errcheck = errcheck
def init(flags=0):
""" See inotify_init(2) man page. """
assert flags == 0
return libc.inotify_init()
else:
libc.inotify_init1.argtypes = [ctypes.c_int]
libc.inotify_init1.errcheck = errcheck
def init(flags=0):
""" See inotify_init1(2) man page. """
return libc.inotify_init1(flags)
libc.inotify_add_watch.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_uint32]
libc.inotify_add_watch.errcheck = errcheck
def add_watch(fd, name, mask):
""" See inotify_add_watch(2) man page. """
return libc.inotify_add_watch(fd, name.encode(), mask)
libc.inotify_rm_watch.argtypes = [ctypes.c_int, ctypes.c_int]
libc.inotify_rm_watch.errcheck = errcheck
def rm_watch(fd, wd):
""" See inotify_rm_watch(2) man page. """
libc.inotify_rm_watch(fd, wd)
def unpack_event(buf):
""" Returns the first event from buf and the rest of the buf. """
headsize = 16
wd, mask, cookie, namesize = struct.unpack("iIII", buf[:headsize])
name = buf[headsize:headsize + namesize]
if isinstance(name, str):
name = name.rstrip("\0")
else:
n = len(name)
while n > 0 and name[n - 1] == 0:
n -= 1
name = name[:n]
ev = event(wd, mask, cookie, name or None)
buf = buf[headsize + namesize:]
return ev, buf
def unpack_events(buf):
""" Returns the events from buf as a list. """
events = []
while buf:
ev, buf = unpack_event(buf)
events.append(ev)
return events
class Instance(object):
def __init__(self, flags=0):
self.fd = init(flags)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
def __iter__(self):
while True:
try:
r, _, _ = select.select([self.fd], [], [])
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
if r:
for event in self.read_events():
yield event
def add_watch(self, name, mask):
return add_watch(self.fd, name, mask)
def rm_watch(self, wd):
rm_watch(self.fd, wd)
def read_events(self, bufsize=65536):
return unpack_events(os.read(self.fd, bufsize))
def close(self):
if self.fd is not None:
try:
os.close(self.fd)
finally:
self.fd = None
| {
"repo_name": "tsavola/pyinotify-basic",
"path": "inotify/__init__.py",
"copies": "1",
"size": "4503",
"license": "mit",
"hash": -4917993005044944000,
"line_mean": 19.9441860465,
"line_max": 115,
"alpha_frac": 0.5782811459,
"autogenerated": false,
"ratio": 3.153361344537815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9230938205157777,
"avg_score": 0.00014085705600763428,
"num_lines": 215
} |
__all__ = [
'cloudinit',
]
from pathlib import Path
import logging
# Prefer ruamel.yaml over PyYAML.
try:
from ruamel.yaml import YAML
import io
_YAML = YAML()
def yaml_dump(data):
with io.StringIO() as output:
_YAML.dump(data, output)
return output.getvalue()
yaml_load = _YAML.load
except ImportError:
import yaml
yaml_dump = yaml.dump
yaml_load = yaml.load
from garage import apps
from garage import scripts
from garage.assertions import ASSERT
from . import keys
LOG = logging.getLogger(__name__)
@apps.with_prog('gen-user-data')
@apps.with_help('generate user data')
@apps.with_argument(
'--ssh-host-key',
nargs=3, metavar=('ALGORITHM', 'PRIVATE_KEY', 'PUBLIC_KEY'),
action='append', required=True,
help='add SSH host key for authenticating server',
)
@apps.with_argument(
'--ssh-authorized-key',
metavar='PATH', type=Path, action='append', required=True,
help='add SSH authorized key for authenticating client',
)
@apps.with_argument(
'--local-vm',
nargs=4, metavar=('HOSTNAME', 'FQDN', 'INTERFACE', 'IP_ADDRESS'),
help='''set additional data for local VirtualBox machine, which
are: hostname, FQDN, host-only network interface, and
its IP address
'''
)
@apps.with_argument(
'--password',
help='set login password, which should only be used in testing',
)
@apps.with_argument(
'output', type=Path,
help='set output YAML file path',
)
def generate_user_data(args):
"""Generate cloud-init user data.
SSH host key is a tuple of algorithm, private key file, and public
key file. "algorithm" is what you chose when generating the key
pair, and should be one of dsa, ecdsa, ed25519, or rsa.
SSH authorized key is your public key for password-less login.
"""
templates_dir = Path(__file__).parent / 'templates'
user_data = yaml_load((templates_dir / 'user-data.yaml').read_text())
key_algorithms = frozenset(algo for algo, _ in keys.HOST_KEYS)
# Insert `ssh_keys`
ssh_keys = user_data['ssh_keys']
for algorithm, private_key, public_key in args.ssh_host_key:
if algorithm not in key_algorithms:
LOG.error('unsupported ssh key algorithm: %s', algorithm)
return 1
private_key = scripts.ensure_file(private_key)
public_key = scripts.ensure_file(public_key)
# Just a sanity check
if private_key.suffix != '':
LOG.warning('private key file has suffix: %s', private_key)
if public_key.suffix != '.pub':
LOG.warning('public key file suffix not .pub: %s', public_key)
ssh_keys.update({
('%s_private' % algorithm): private_key.read_text(),
('%s_public' % algorithm): public_key.read_text(),
})
# Insert `ssh-authorized-keys` to account plumber
ASSERT.equal(len(user_data['users']), 1)
plumber = user_data['users'][0]
ASSERT.equal(plumber['name'], 'plumber')
public_keys = plumber['ssh-authorized-keys']
for public_key in args.ssh_authorized_key:
public_key = scripts.ensure_file(public_key)
if public_key.suffix != '.pub':
LOG.warning('public key file suffix not .pub: %s', public_key)
public_keys.append(public_key.read_text())
if args.local_vm:
# Insert fields only for local VirtualBox virtual machine
hostname, fqdn, interface, ip_address = args.local_vm
user_data['hostname'] = hostname
user_data['fqdn'] = fqdn
# Insert host-only network configuration file
#
# I need this because I couldn't configure host-only network
# interface from cloud-init metadata. Also, note that you
# should not set `gateway` for the host-only interface.
cfg = (templates_dir / '99-host-only.yaml').read_text().format(
interface=interface,
ip_address=ip_address,
)
user_data.setdefault('write_files', []).append({
'path': '/etc/netplan/99-host-only.yaml',
'owner': 'root:root',
'permissions': '0644',
'content': cfg,
})
# Do this for the first boot in case the host-only interface is
# not brought up.
user_data['runcmd'].append('netplan generate')
user_data['runcmd'].append('netplan apply')
else:
user_data.pop('hostname')
user_data.pop('fqdn')
if args.password:
LOG.warning('use password login, which is insecure')
user_data['chpasswd'] = {
'list': 'plumber:%s\n' % args.password,
'expire': False,
}
user_data_yaml = yaml_dump(user_data)
if args.output.exists():
LOG.warning('attempt to overwrite: %s', args.output)
scripts.ensure_contents(
args.output,
'#cloud-config\n\n' + user_data_yaml,
)
if args.local_vm:
scripts.execute([
'cloud-localds', '--verbose',
args.output.with_suffix('.iso'),
args.output,
])
return 0
@apps.with_prog('cloud-init')
@apps.with_help('manage cloud-init data')
@apps.with_apps(
'operation', 'operation on cloud-init data',
generate_user_data,
)
def cloudinit(args):
"""Manage cloud-init data."""
return args.operation(args)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/mob/cloudinit.py",
"copies": "1",
"size": "5364",
"license": "mit",
"hash": -5575308496075629000,
"line_mean": 29.3050847458,
"line_max": 74,
"alpha_frac": 0.6137211037,
"autogenerated": false,
"ratio": 3.701863354037267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4815584457737267,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'cmd_build',
'cmd_release',
'cmd_unrelease',
]
import json
import logging
from pathlib import Path
import foreman
from g1 import scripts
from g1.bases import argparses
from g1.bases.assertions import ASSERT
import shipyard2
from . import repos
REPO_ROOT_PATH = Path(__file__).parent.parent.parent.parent
ASSERT.predicate(REPO_ROOT_PATH / '.git', Path.is_dir)
LOG = logging.getLogger(__name__)
select_env_argument = argparses.argument(
'--env',
default='production',
help='provide environment (default: %(default)s)',
)
select_label_argument = argparses.argument(
'label',
type=foreman.Label.parse,
help='provide pod label',
)
select_version_argument = argparses.argument(
'version',
help='provide build artifact version',
)
@argparses.begin_parser(
'build',
**argparses.make_help_kwargs('build pod or image'),
)
@argparses.argument(
'--also-release',
action=argparses.StoreBoolAction,
default=True,
help='also set pod release version (default: %(default_string)s)',
)
@select_env_argument
@argparses.argument(
'--args-file',
type=Path,
action='append',
required=True,
help='add json file of foreman build command-line arguments',
)
@argparses.argument(
'rule',
type=foreman.Label.parse,
help='provide pod or image build rule',
)
@select_version_argument
@argparses.end
def cmd_build(args):
LOG.info('build: %s %s', args.rule, args.version)
scripts.run([
REPO_ROOT_PATH / 'shipyard2' / 'scripts' / 'foreman.sh',
'build',
*(('--debug', ) if shipyard2.is_debug() else ()),
*_read_args_file(args.args_file or ()),
*('--parameter', '//bases:inside-builder-pod=false'),
*(
'--parameter',
'//%s:%s=%s' % (
args.rule.path,
args.rule.name.with_name('version'),
args.version,
),
),
args.rule,
])
if args.also_release:
if _look_like_pod_rule(args.rule):
release = _get_envs_dir(args).release_pod
elif _look_like_xar_rule(args.rule):
release = _get_envs_dir(args).release_xar
else:
ASSERT.predicate(args.rule, _look_like_image_rule)
release = None
if release:
label = _guess_label_from_rule(args.rule)
LOG.info('release: %s %s to %s', label, args.version, args.env)
release(args.env, label, args.version)
return 0
def _read_args_file(args_file_paths):
for path in args_file_paths:
yield from ASSERT.isinstance(json.loads(path.read_text()), list)
def _look_like_pod_rule(rule):
return rule.path.parts[0] == shipyard2.RELEASE_PODS_DIR_NAME
def _look_like_xar_rule(rule):
return rule.path.parts[0] == shipyard2.RELEASE_XARS_DIR_NAME
def _look_like_image_rule(rule):
return rule.path.parts[0] == shipyard2.RELEASE_IMAGES_DIR_NAME
def _guess_label_from_rule(rule):
"""Guess pod, xar, or image label from build rule.
For example, //pod/foo:bar/build becomes //foo:bar.
"""
name_parts = rule.name.parts
ASSERT(
len(name_parts) == 2 and name_parts[1] == 'build',
'expect pod, xar, or image build rule: {}',
rule,
)
return foreman.Label.parse(
'//%s:%s' % ('/'.join(rule.path.parts[1:]), name_parts[0])
)
@argparses.begin_parser(
'release',
**argparses.make_help_kwargs('release pod at given version'),
)
@select_env_argument
@argparses.argument(
'type',
choices=('pods', 'xars'),
help='provide build artifact type',
)
@select_label_argument
@select_version_argument
@argparses.end
def cmd_release(args):
LOG.info('release: %s %s to %s', args.label, args.version, args.env)
if args.type == 'pods':
release = _get_envs_dir(args).release_pod
else:
ASSERT.equal(args.type, 'xars')
release = _get_envs_dir(args).release_xar
release(args.env, args.label, args.version)
return 0
@argparses.begin_parser(
'unrelease',
**argparses.make_help_kwargs('undo pod release'),
)
@select_env_argument
@select_label_argument
@argparses.end
def cmd_unrelease(args):
LOG.info('unrelease: %s from %s', args.label, args.env)
_get_envs_dir(args).unrelease(args.env, args.label)
return 0
@argparses.begin_parser(
'remove',
**argparses.make_help_kwargs('remove build artifact'),
)
@argparses.argument(
'type',
choices=('pods', 'xars', 'builder-images', 'images', 'volumes'),
help='provide build artifact type',
)
@select_label_argument
@select_version_argument
@argparses.end
def cmd_remove(args):
if args.type == 'pods':
dir_object_type = repos.PodDir
elif args.type == 'xars':
dir_object_type = repos.XarDir
elif args.type == 'builder-images':
dir_object_type = repos.BuilderImageDir
elif args.type == 'images':
dir_object_type = repos.ImageDir
else:
ASSERT.equal(args.type, 'volumes')
dir_object_type = repos.VolumeDir
if args.type == 'pods' or args.type == 'xars':
envs_dir = _get_envs_dir(args)
for env in envs_dir.envs:
if envs_dir.has_release(env, args.label):
LOG.warning(
'skip: remove: %s %s %s',
args.type,
args.label,
args.version,
)
return 1
dir_object = dir_object_type.from_relpath(
args.release_repo,
args.label.path / args.label.name / args.version,
)
LOG.info('remove: %s %s %s', args.type, args.label, args.version)
dir_object.remove()
return 0
def _get_envs_dir(args):
return repos.EnvsDir(args.release_repo)
| {
"repo_name": "clchiou/garage",
"path": "shipyard2/shipyard2/releases/build.py",
"copies": "1",
"size": "5770",
"license": "mit",
"hash": 4848672242771401000,
"line_mean": 25.5898617512,
"line_max": 75,
"alpha_frac": 0.6116117851,
"autogenerated": false,
"ratio": 3.230683090705487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43422948758054875,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'cmd_cleanup',
]
import collections
import logging
import foreman
from g1.bases import argparses
from g1.bases.assertions import ASSERT
from . import repos
LOG = logging.getLogger(__name__)
@argparses.begin_parser(
'cleanup',
**argparses.make_help_kwargs('clean up build artifacts'),
)
@argparses.argument(
'--also-base',
action=argparses.StoreBoolAction,
default=False,
help=(
'also clean up the base image - '
'you mostly should set this to false because you usually need '
'the base image even when no pod refers to it temporarily '
'(default: %(default_string)s)'
),
)
@argparses.argument(
'--also-builder',
action=argparses.StoreBoolAction,
default=False,
help=(
'also clean up builder images - '
'you mostly should set this to false because builder images '
'are not referenced by pods and thus will all be removed in a '
'cleanup (default: %(default_string)s)'
),
)
@argparses.argument(
'keep',
type=int,
help='keep these latest versions (0 to remove all)',
)
@argparses.end
def cmd_cleanup(args):
ASSERT.greater_or_equal(args.keep, 0)
LOG.info('clean up pods')
_cleanup(
args.keep,
_get_current_pod_versions(args.release_repo),
repos.PodDir.group_dirs(args.release_repo),
)
LOG.info('clean up xars')
_cleanup(
args.keep,
_get_current_xar_versions(args.release_repo),
repos.XarDir.group_dirs(args.release_repo),
)
if args.also_builder:
LOG.info('clean up builder images')
_cleanup(
args.keep,
# Builder images are not referenced by pods and thus do not
# have current versions.
{},
repos.BuilderImageDir.group_dirs(args.release_repo),
)
LOG.info('clean up images')
groups = repos.ImageDir.group_dirs(args.release_repo)
if not args.also_base:
groups.pop(foreman.Label.parse('//bases:base'), None)
_cleanup(
args.keep,
_get_current_image_versions(args.release_repo),
groups,
)
LOG.info('clean up volumes')
_cleanup(
args.keep,
_get_current_volume_versions(args.release_repo),
repos.VolumeDir.group_dirs(args.release_repo),
)
return 0
def _cleanup(to_keep, current_versions, groups):
for label, dir_objects in groups.items():
current_version_set = current_versions.get(label, ())
to_remove = len(dir_objects) - to_keep
while to_remove > 0 and dir_objects:
dir_object = dir_objects.pop()
if dir_object.version not in current_version_set:
LOG.info('remove: %s %s', label, dir_object.version)
dir_object.remove()
to_remove -= 1
def _get_current_pod_versions(repo_path):
return _get_current_versions_from_envs(
repo_path, repos.EnvsDir.iter_pod_dirs
)
def _get_current_xar_versions(repo_path):
return _get_current_versions_from_envs(
repo_path, repos.EnvsDir.iter_xar_dirs
)
def _get_current_versions_from_envs(repo_path, iter_dir_objects):
current_versions = collections.defaultdict(set)
envs_dir = repos.EnvsDir(repo_path)
for env in envs_dir.envs:
for dir_object in iter_dir_objects(envs_dir, env):
current_versions[dir_object.label].add(dir_object.version)
return dict(current_versions)
def _get_current_image_versions(repo_path):
current_versions = collections.defaultdict(set)
for labels_and_versions in (
_get_pod_dep_versions(repo_path, repos.PodDir.iter_image_dirs),
_get_xar_dep_versions(repo_path),
):
for label, versions in labels_and_versions.items():
current_versions[label].update(versions)
return dict(current_versions)
def _get_current_volume_versions(repo_path):
return _get_pod_dep_versions(repo_path, repos.PodDir.iter_volume_dirs)
def _get_pod_dep_versions(repo_path, iter_dir_objects):
current_versions = collections.defaultdict(set)
for pod_dir in repos.PodDir.iter_dirs(repo_path):
for dir_object in iter_dir_objects(pod_dir):
current_versions[dir_object.label].add(dir_object.version)
return dict(current_versions)
def _get_xar_dep_versions(repo_path):
current_versions = collections.defaultdict(set)
for xar_dir in repos.XarDir.iter_dirs(repo_path):
image_dir = xar_dir.get_image_dir()
if image_dir is not None:
current_versions[image_dir.label].add(image_dir.version)
return dict(current_versions)
| {
"repo_name": "clchiou/garage",
"path": "shipyard2/shipyard2/releases/cleanup.py",
"copies": "1",
"size": "4640",
"license": "mit",
"hash": 7606659219471721000,
"line_mean": 29.3267973856,
"line_max": 74,
"alpha_frac": 0.6439655172,
"autogenerated": false,
"ratio": 3.4808702175543886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9624835734754389,
"avg_score": 0,
"num_lines": 153
} |
__all__ = [
'cmd_init',
'get_repo_path',
# Command-line arguments.
'grace_period_arguments',
'make_grace_period_kwargs',
# App-specific helpers.
'chown_app',
'chown_root',
'make_dir',
'rsync_copy',
'setup_file',
]
import logging
import shutil
from pathlib import Path
from g1 import scripts
from g1.apps import parameters
from g1.bases import argparses
from g1.bases import datetimes
from g1.bases import oses
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
PARAMS = parameters.define(
'g1.containers',
parameters.Namespace(
repository=parameters.Parameter(
Path('/var/lib/g1/containers'),
'path to the repository directory',
convert=Path,
validate=Path.is_absolute,
format=str,
),
application_group=parameters.Parameter(
'plumber',
'set application group',
validate=bool, # Check not empty.
),
xar_runner_script_directory=parameters.Parameter(
Path('/usr/local/bin'),
'path to the xar runner script directory',
convert=Path,
validate=Path.is_absolute,
format=str,
),
),
)
REPO_LAYOUT_VERSION = 'v1'
def cmd_init():
"""Initialize the repository."""
oses.assert_group_exist(PARAMS.application_group.get())
# For rsync_copy.
scripts.check_command_exist('rsync')
oses.assert_root_privilege()
make_dir(get_repo_path(), 0o750, chown_app, parents=True)
def get_repo_path():
return PARAMS.repository.get() / REPO_LAYOUT_VERSION
#
# Command-line arguments.
#
grace_period_arguments = argparses.argument(
'--grace-period',
type=argparses.parse_timedelta,
default='24h',
help='set grace period (default to %(default)s)',
)
def make_grace_period_kwargs(args):
return {'expiration': datetimes.utcnow() - args.grace_period}
#
# App-specific helpers.
#
def chown_app(path):
"""Change owner to root and group to the application group."""
shutil.chown(path, 'root', ASSERT.true(PARAMS.application_group.get()))
def chown_root(path):
"""Change owner and group to root."""
shutil.chown(path, 'root', 'root')
def make_dir(path, mode, chown, *, parents=False, exist_ok=True):
LOG.info('create directory: %s', path)
path.mkdir(mode=mode, parents=parents, exist_ok=exist_ok)
chown(path)
def setup_file(path, mode, chown):
path.chmod(mode)
chown(path)
def rsync_copy(src_path, dst_path, rsync_args=()):
# We do NOT use ``shutil.copytree`` because shutil's file copy
# functions in general do not preserve the file owner/group.
LOG.info('copy: %s -> %s', src_path, dst_path)
scripts.run([
'rsync',
'--archive',
*rsync_args,
# Trailing slash is an rsync trick.
'%s/' % src_path,
dst_path,
])
| {
"repo_name": "clchiou/garage",
"path": "py/g1/containers/g1/containers/bases.py",
"copies": "1",
"size": "2931",
"license": "mit",
"hash": 5028007680462324000,
"line_mean": 23.0245901639,
"line_max": 75,
"alpha_frac": 0.6236779256,
"autogenerated": false,
"ratio": 3.485136741973841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9608814667573842,
"avg_score": 0,
"num_lines": 122
} |
__all__ = [
'columnar_arguments',
'make_columnar_kwargs',
]
from g1.bases import argparses
from g1.bases import functionals
from g1.bases.assertions import ASSERT
from . import Formats
def columnar_arguments(columns, default_columns):
return functionals.compose(
argparses.argument(
'--format',
action=argparses.StoreEnumAction,
default=Formats.TEXT,
help='set output format (default: %(default_string)s)',
),
argparses.argument(
'--header',
action=argparses.StoreBoolAction,
default=True,
help='enable/disable header output (default: %(default_string)s)',
),
argparses.begin_argument(
'--columns',
type=lambda columns_str: ASSERT.all(
list(filter(None, columns_str.split(','))),
columns.__contains__,
),
default=','.join(default_columns),
help=(
'set output columns that are comma separated '
'from available columns: %(columns)s '
'(default: %(default)s)'
),
),
argparses.apply(
lambda action:
setattr(action, 'columns', ', '.join(sorted(columns)))
),
argparses.end,
)
def make_columnar_kwargs(args):
return {
'format': args.format,
'header': args.header,
'columns': args.columns,
}
| {
"repo_name": "clchiou/garage",
"path": "py/g1/texts/g1/texts/columns/argparses.py",
"copies": "1",
"size": "1483",
"license": "mit",
"hash": 5087140734717455000,
"line_mean": 26.9811320755,
"line_max": 78,
"alpha_frac": 0.5387727579,
"autogenerated": false,
"ratio": 4.2250712250712255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 53
} |
__all__ = (
'Column', 'TextColumn', 'NumberColumn',
)
class Column(object):
"""Represents a single column of a table.
``verbose_name`` defines a display name for this column used for output.
``name`` is the internal name of the column. Normally you don't need to
specify this, as the attribute that you make the column available under
is used. However, in certain circumstances it can be useful to override
this default, e.g. when using ModelTables if you want a column to not
use the model field name.
``default`` is the default value for this column. If the data source
does provide ``None`` for a row, the default will be used instead. Note
that whether this effects ordering might depend on the table type (model
or normal). Also, you can specify a callable, which will be passed a
``BoundRow`` instance and is expected to return the default to be used.
Additionally, you may specify ``data``. It works very much like
``default``, except it's effect does not depend on the actual cell
value. When given a function, it will always be called with a row object,
expected to return the cell value. If given a string, that name will be
used to read the data from the source (instead of the column's name).
Note the interaction with ``default``. If ``default`` is specified as
well, it will be used whenver ``data`` yields in a None value.
You can use ``visible`` to flag the column as hidden by default.
However, this can be overridden by the ``visibility`` argument to the
table constructor. If you want to make the column completely unavailable
to the user, set ``inaccessible`` to True.
Setting ``sortable`` to False will result in this column being unusable
in ordering. You can further change the *default* sort direction to
descending using ``direction``. Note that this option changes the actual
direction only indirectly. Normal und reverse order, the terms
django-tables exposes, now simply mean different things.
"""
ASC = 1
DESC = 2
# Tracks each time a Column instance is created. Used to retain order.
creation_counter = 0
def __init__(self, verbose_name=None, name=None, default=None, data=None,
visible=True, inaccessible=False, sortable=None,
direction=ASC):
self.verbose_name = verbose_name
self.name = name
self.default = default
self.data = data
self.visible = visible
self.inaccessible = inaccessible
self.sortable = sortable
self.direction = direction
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
def _set_direction(self, value):
if isinstance(value, basestring):
if value in ('asc', 'desc'):
self._direction = (value == 'asc') and Column.ASC or Column.DESC
else:
raise ValueError('Invalid direction value: %s' % value)
else:
self._direction = value
direction = property(lambda s: s._direction, _set_direction)
class TextColumn(Column):
pass
class NumberColumn(Column):
pass | {
"repo_name": "icomms/wqmanager",
"path": "apps/django_tables/columns.py",
"copies": "3",
"size": "3196",
"license": "bsd-3-clause",
"hash": -326920432366089400,
"line_mean": 39.4683544304,
"line_max": 80,
"alpha_frac": 0.6736545682,
"autogenerated": false,
"ratio": 4.457461645746164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003252861375224244,
"num_lines": 79
} |
__all__ = [
'CombineTables',
'ReshapeTable',
'ExtractArray',
'SplitTableOnArray',
'AppendTableToCellData',
]
__displayname__ = 'Table Operations'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import pyvista as pv
from .. import _helpers, interface
from ..base import FilterBase, FilterPreserveTypeBase
###############################################################################
###############################################################################
class CombineTables(FilterBase):
"""Takes two tables and combines them if they have the same number of rows.
Currently this cannot handle time varing tables as that gets complicated
real quick if the tables do not have the same timestep values
"""
__displayname__ = 'Combine Tables'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=2,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkTable',
)
# Parameters... none
# CRITICAL for multiple input ports
def FillInputPortInformation(self, port, info):
"""Used by pipeline. Necessary when dealing with multiple input ports"""
# all are tables so no need to check port
info.Set(self.INPUT_REQUIRED_DATA_TYPE(), "vtkTable")
return 1
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi0 = self.GetInputData(inInfo, 0, 0)
pdi1 = self.GetInputData(inInfo, 1, 0)
pdo = self.GetOutputData(outInfo, 0)
pdo.DeepCopy(pdi0)
# Get number of rows
nrows = pdi0.GetNumberOfRows()
nrows1 = pdi1.GetNumberOfRows()
if not (nrows == nrows1):
raise AssertionError('Tables must have the same number of rows')
for i in range(pdi1.GetRowData().GetNumberOfArrays()):
arr = pdi1.GetRowData().GetArray(i)
pdo.GetRowData().AddArray(arr)
return 1
def apply(self, table0, table1):
"""Run the algorithm on the two input tables"""
self.SetInputDataObject(0, table0)
self.SetInputDataObject(1, table1)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
# ---- Reshape Table ----#
class ReshapeTable(FilterBase):
"""This filter will take a ``vtkTable`` object and reshape it. This filter
essentially treats ``vtkTable``s as 2D matrices and reshapes them using
``numpy.reshape`` in a C contiguous manner. Unfortunately, data fields will
be renamed arbitrarily because VTK data arrays require a name.
"""
__displayname__ = 'Reshape Table'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkTable',
)
# Parameters
self.__nrows = kwargs.get('nrows', 1)
self.__ncols = kwargs.get('ncols', 1)
self.__names = kwargs.get('names', [])
self.__order = kwargs.get('order', 'F')
def _reshape(self, pdi, pdo):
"""Internal helper to perfrom the reshape"""
# Get number of columns
cols = pdi.GetNumberOfColumns()
# Get number of rows
rows = pdi.GetColumn(0).GetNumberOfTuples()
if len(self.__names) != 0:
num = len(self.__names)
if num < self.__ncols:
for i in range(num, self.__ncols):
self.__names.append('Field %d' % i)
elif num > self.__ncols:
raise _helpers.PVGeoError(
'Too many array names. `ncols` specified as %d and %d names given.'
% (self.__ncols, num)
)
else:
self.__names = ['Field %d' % i for i in range(self.__ncols)]
# Make a 2D numpy array and fill with data from input table
data = np.empty((rows, cols))
for i in range(cols):
c = pdi.GetColumn(i)
data[:, i] = interface.convert_array(c)
if (self.__ncols * self.__nrows) != (cols * rows):
raise _helpers.PVGeoError(
'Total number of elements must remain %d. Check reshape dimensions.'
% (cols * rows)
)
# Use numpy.reshape() to reshape data NOTE: only 2D because its a table
# NOTE: column access of this reshape is not contigous
data = np.array(
np.reshape(data.flatten(), (self.__nrows, self.__ncols), order=self.__order)
)
pdo.SetNumberOfRows(self.__nrows)
# Add new array to output table and assign incremental names (e.g. Field0)
for i in range(self.__ncols):
# Make a contigous array from the column we want
col = np.array(data[:, i])
# allow type to be determined by input
# VTK arrays need a name. Set arbitrarily
insert = interface.convert_array(
col, name=self.__names[i]
) # array_type=vtk.VTK_FLOAT
# pdo.AddColumn(insert) # these are not getting added to the output table
# ... work around:
pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._reshape(pdi, pdo)
return 1
#### Seters and Geters ####
def set_names(self, names):
"""Set names using a semicolon (;) seperated string or a list of strings
Args:
names (string): a string of data array names for the reshaped table
using a semicolon (;) to spearate
"""
# parse the names (a semicolon seperated list of names)
if isinstance(names, str):
names = names.split(';')
if self.__names != names:
self.__names = names
self.Modified()
def add_name(self, name):
"""Use to append a name to the list of data array names for the output
table.
"""
self.__names.append(name)
self.Modified()
def get_names(self):
"""Returns a list of the names given to the new arrays"""
return self.__names
def set_number_of_columns(self, ncols):
"""Set the number of columns for the output ``vtkTable``"""
if isinstance(ncols, float):
ncols = int(ncols)
if self.__ncols != ncols:
self.__ncols = ncols
self.Modified()
def set_number_of_rows(self, nrows):
"""Set the number of rows for the output ``vtkTable``"""
if isinstance(nrows, float):
nrows = int(nrows)
if self.__nrows != nrows:
self.__nrows = nrows
self.Modified()
def set_order(self, order):
"""Set the reshape order (``'C'`` of ``'F'``)"""
if self.__order != order:
self.__order = order
self.Modified()
###############################################################################
class ExtractArray(FilterBase):
"""Extract an array from a ``vtkDataSet`` and make a ``vtkTable`` of it."""
__displayname__ = 'Extract Array'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType='vtkTable',
)
self.__input_array = [None, None]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi = self.GetInputData(inInfo, 0, 0)
table = self.GetOutputData(outInfo, 0)
# Note user has to select a single array to save out
field, name = self.__input_array[0], self.__input_array[1]
vtkarr = _helpers.get_vtk_array(pdi, field, name)
table.GetRowData().AddArray(vtkarr)
return 1
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field:
self.__input_array[0] = field
self.Modified()
if self.__input_array[1] != name:
self.__input_array[1] = name
self.Modified()
return 1
def apply(self, input_data_object, array_name):
"""Run the algorithm on the input data object, specifying the array name
to extract.
"""
self.SetInputDataObject(input_data_object)
arr, field = _helpers.search_for_array(input_data_object, array_name)
self.SetInputArrayToProcess(0, 0, 0, field, array_name)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class SplitTableOnArray(FilterBase):
"""A filter to seperate table data based on the unique values of a given data
array into a ``vtkMultiBlockDataSet``.
"""
__displayname__ = 'Split Table On Array'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkMultiBlockDataSet',
)
self.__input_array = [None, None]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
table = self.GetInputData(inInfo, 0, 0)
# Get number of points
output = vtk.vtkMultiBlockDataSet.GetData(outInfo, 0)
#### Perfrom task ####
# Get input array
field, name = self.__input_array[0], self.__input_array[1]
wtbl = dsa.WrapDataObject(table)
spliton = _helpers.get_numpy_array(wtbl, field, name)
uniq = np.unique(spliton)
# Split the input data based on indices
df = interface.table_to_data_frame(table)
blk = 0
output.SetNumberOfBlocks(len(uniq))
for val in uniq:
temp = interface.data_frame_to_table(df[df[name] == val])
output.SetBlock(blk, temp)
output.GetMetaData(blk).Set(
vtk.vtkCompositeDataSet.NAME(), '{}{}'.format(name, val)
)
blk += 1
return 1
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field:
self.__input_array[0] = field
self.Modified()
if self.__input_array[1] != name:
self.__input_array[1] = name
self.Modified()
return 1
def apply(self, input_data_object, array_name):
"""Run the algorithm on the input data object, specifying the array name
to use for the split.
"""
self.SetInputDataObject(input_data_object)
arr, field = _helpers.search_for_array(input_data_object, array_name)
self.SetInputArrayToProcess(0, 0, 0, field, array_name)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class AppendTableToCellData(FilterPreserveTypeBase):
"""Takes two inputs, a dataset to preserve and a table of data, where the
data in the table is appended to the CellData of the input dataset.
The 0th port is the dataset to preserve and the 1st port is a table whos rows
will be appended as CellData to the 0th port. The number of rows in the table
MUST match the number of cells in the input dataset.
"""
__displayname__ = 'Append Table to Cell Data'
__category__ = 'filter'
def __init__(self):
FilterPreserveTypeBase.__init__(self, nInputPorts=2)
self._preserve_port = 0 # ensure port 0's type is preserved
self.__timesteps = None
def _update_time_steps(self):
"""For internal use only: appropriately sets the timesteps."""
# Use the inputs' timesteps: this merges the timesteps values
tsAll = _helpers.get_combined_input_time_steps(self)
# Use both inputs' time steps
self.__timesteps = _helpers.update_time_steps(self, tsAll, explicit=True)
return 1
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi0 = self.GetInputData(inInfo, 0, 0) # Keep me!
table = self.GetInputData(inInfo, 1, 0) # add my data to the input
pdo = self.GetOutputData(outInfo, 0) # The output
pdo.DeepCopy(pdi0)
# Get number of rows
nrows = table.GetNumberOfRows()
ncells = pdo.GetNumberOfCells()
if nrows != ncells:
raise _helpers.PVGeoError(
'Number rows in table ({}) does not match number of cells ({})'.format(
nrows, ncells
)
)
for i in range(table.GetRowData().GetNumberOfArrays()):
arr = table.GetRowData().GetArray(i)
pdo.GetCellData().AddArray(arr)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to handle time variance"""
self._update_time_steps()
return 1
def apply(self, dataset, table):
"""Update the algorithm and get the output data object
Args:
dataset (vtkDataSet): Any dataset with CellData
table (vtkTable): table of data values that will be appended to
``dataset``'s CellData
Return:
vtkDataSet: The appended dataset as a new object
"""
self.SetInputDataObject(0, dataset)
self.SetInputDataObject(1, table)
self.Update()
return pv.wrap(self.GetOutput())
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps."""
# if unset, force at least one attempt to set the timesteps
if self.__timesteps is None:
self._update_time_steps()
# self.__timesteps should already be of type list
return self.__timesteps if self.__timesteps is not None else None
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/filters/tables.py",
"copies": "1",
"size": "15362",
"license": "bsd-3-clause",
"hash": 5322099062648182000,
"line_mean": 34.1533180778,
"line_max": 88,
"alpha_frac": 0.5621663846,
"autogenerated": false,
"ratio": 4.150770062145366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005287764342883839,
"num_lines": 437
} |
__all__ = [
'Command',
]
class Command(object):
"""A GTP command.
A GTP command contains:
- An optional sequence number (used for matching up responses with
commands)
- A command name, e.g. 'genmove'
- One or more arguments to the command, e.g. 'black'
"""
def __init__(self, sequence, name, args):
self.sequence = sequence
self.name = name
self.args = tuple(args)
def __eq__(self, other):
return self.sequence == other.sequence and \
self.name == other.name and \
self.args == other.args
def __repr__(self):
return 'Command(%r, %r, %r)' % (self.sequence, self.name, self.args)
def __str__(self):
return repr(self)
def parse(command_string):
"""Parse a GTP protocol line into a Command object.
Example:
>>> parse('999 play white D4')
Command(999, 'play', ('white', 'D4'))
"""
pieces = command_string.split()
# Check for the sequence number.
try:
sequence = int(pieces[0])
pieces = pieces[1:]
except ValueError:
# The first piece was non-numeric, so there was no sequence
# number.
sequence = None
name, args = pieces[0], pieces[1:]
return Command(sequence, name, args)
| {
"repo_name": "maxpumperla/betago",
"path": "betago/gtp/command.py",
"copies": "1",
"size": "1284",
"license": "mit",
"hash": -4506880383006361600,
"line_mean": 25.2040816327,
"line_max": 76,
"alpha_frac": 0.5755451713,
"autogenerated": false,
"ratio": 3.7876106194690267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9863155790769027,
"avg_score": 0,
"num_lines": 49
} |
__all__ = [
'ComponentStore',
]
from pathlib import Path
import copy
import hashlib
import json
import logging
import requests
import tempfile
from typing import Callable, Iterable
from . import _components as comp
from .structures import ComponentReference
from ._key_value_store import KeyValueStore
_COMPONENT_FILENAME = 'component.yaml'
class ComponentStore:
def __init__(
self, local_search_paths=None, url_search_prefixes=None, auth=None):
"""Instantiates a ComponentStore."""
self.local_search_paths = local_search_paths or ['.']
self.url_search_prefixes = url_search_prefixes or []
self._auth = auth
self._component_file_name = 'component.yaml'
self._digests_subpath = 'versions/sha256'
self._tags_subpath = 'versions/tags'
cache_base_dir = Path(tempfile.gettempdir()) / '.kfp_components'
self._git_blob_hash_to_data_db = KeyValueStore(cache_dir=cache_base_dir / 'git_blob_hash_to_data')
self._url_to_info_db = KeyValueStore(cache_dir=cache_base_dir / 'url_to_info')
def load_component_from_url(self, url):
"""Loads a component from a URL.
Args:
url: The url of the component specification.
Returns:
A factory function with a strongly-typed signature.
"""
return comp.load_component_from_url(url=url, auth=self._auth)
def load_component_from_file(self, path):
"""Loads a component from a path.
Args:
path: The path of the component specification.
Returns:
A factory function with a strongly-typed signature.
"""
return comp.load_component_from_file(path)
def load_component(self, name, digest=None, tag=None):
"""
Loads component local file or URL and creates a task factory function
Search locations:
* :code:`<local-search-path>/<name>/component.yaml`
* :code:`<url-search-prefix>/<name>/component.yaml`
If the digest is specified, then the search locations are:
* :code:`<local-search-path>/<name>/versions/sha256/<digest>`
* :code:`<url-search-prefix>/<name>/versions/sha256/<digest>`
If the tag is specified, then the search locations are:
* :code:`<local-search-path>/<name>/versions/tags/<digest>`
* :code:`<url-search-prefix>/<name>/versions/tags/<digest>`
Args:
name: Component name used to search and load the component artifact containing the component definition.
Component name usually has the following form: group/subgroup/component
digest: Strict component version. SHA256 hash digest of the component artifact file. Can be used to load a specific component version so that the pipeline is reproducible.
tag: Version tag. Can be used to load component version from a specific branch. The version of the component referenced by a tag can change in future.
Returns:
A factory function with a strongly-typed signature.
Once called with the required arguments, the factory constructs a pipeline task instance (ContainerOp).
"""
#This function should be called load_task_factory since it returns a factory function.
#The real load_component function should produce an object with component properties (e.g. name, description, inputs/outputs).
#TODO: Change this function to return component spec object but it should be callable to construct tasks.
component_ref = ComponentReference(name=name, digest=digest, tag=tag)
component_ref = self._load_component_spec_in_component_ref(component_ref)
return comp._create_task_factory_from_component_spec(
component_spec=component_ref.spec,
component_ref=component_ref,
)
def _load_component_spec_in_component_ref(
self,
component_ref: ComponentReference,
) -> ComponentReference:
"""Takes component_ref, finds the component spec and returns component_ref with .spec set to the component spec.
See ComponentStore.load_component for the details of the search logic.
"""
if component_ref.spec:
return component_ref
component_ref = copy.copy(component_ref)
if component_ref.url:
component_ref.spec = comp._load_component_spec_from_url(url=component_ref.url, auth=self._auth)
return component_ref
name = component_ref.name
if not name:
raise TypeError("name is required")
if name.startswith('/') or name.endswith('/'):
raise ValueError('Component name should not start or end with slash: "{}"'.format(name))
digest = component_ref.digest
tag = component_ref.tag
tried_locations = []
if digest is not None and tag is not None:
raise ValueError('Cannot specify both tag and digest')
if digest is not None:
path_suffix = name + '/' + self._digests_subpath + '/' + digest
elif tag is not None:
path_suffix = name + '/' + self._tags_subpath + '/' + tag
#TODO: Handle symlinks in GIT URLs
else:
path_suffix = name + '/' + self._component_file_name
#Trying local search paths
for local_search_path in self.local_search_paths:
component_path = Path(local_search_path, path_suffix)
tried_locations.append(str(component_path))
if component_path.is_file():
# TODO: Verify that the content matches the digest (if specified).
component_ref._local_path = str(component_path)
component_ref.spec = comp._load_component_spec_from_file(str(component_path))
return component_ref
#Trying URL prefixes
for url_search_prefix in self.url_search_prefixes:
url = url_search_prefix + path_suffix
tried_locations.append(url)
try:
response = requests.get(url, auth=self._auth) #Does not throw exceptions on bad status, but throws on dead domains and malformed URLs. Should we log those cases?
response.raise_for_status()
except:
continue
if response.content:
# TODO: Verify that the content matches the digest (if specified).
component_ref.url = url
component_ref.spec = comp._load_component_spec_from_yaml_or_zip_bytes(response.content)
return component_ref
raise RuntimeError('Component {} was not found. Tried the following locations:\n{}'.format(name, '\n'.join(tried_locations)))
def _load_component_from_ref(self, component_ref: ComponentReference) -> Callable:
component_ref = self._load_component_spec_in_component_ref(component_ref)
return comp._create_task_factory_from_component_spec(component_spec=component_ref.spec, component_ref=component_ref)
def search(self, name: str):
"""Searches for components by name in the configured component store.
Prints the component name and URL for components that match the given name.
Only components on GitHub are currently supported.
Example::
kfp.components.ComponentStore.default_store.search('xgboost')
# Returns results:
# Xgboost train https://raw.githubusercontent.com/.../components/XGBoost/Train/component.yaml
# Xgboost predict https://raw.githubusercontent.com/.../components/XGBoost/Predict/component.yaml
"""
self._refresh_component_cache()
for url in self._url_to_info_db.keys():
component_info = json.loads(self._url_to_info_db.try_get_value_bytes(url))
component_name = component_info['name']
if name.casefold() in component_name.casefold():
print('\t'.join([
component_name,
url,
]))
def list(self):
self.search('')
def _refresh_component_cache(self):
for url_search_prefix in self.url_search_prefixes:
if url_search_prefix.startswith('https://raw.githubusercontent.com/'):
logging.info('Searching for components in "{}"'.format(url_search_prefix))
for candidate in _list_candidate_component_uris_from_github_repo(url_search_prefix, auth=self._auth):
component_url = candidate['url']
if self._url_to_info_db.exists(component_url):
continue
logging.debug('Found new component URL: "{}"'.format(component_url))
blob_hash = candidate['git_blob_hash']
if not self._git_blob_hash_to_data_db.exists(blob_hash):
logging.debug('Downloading component spec from "{}"'.format(component_url))
response = _get_request_session().get(component_url, auth=self._auth)
response.raise_for_status()
component_data = response.content
# Verifying the hash
received_data_hash = _calculate_git_blob_hash(component_data)
if received_data_hash.lower() != blob_hash.lower():
raise RuntimeError(
'The downloaded component ({}) has incorrect hash: "{}" != "{}"'.format(
component_url, received_data_hash, blob_hash,
)
)
# Verifying that the component is loadable
try:
component_spec = comp._load_component_spec_from_component_text(component_data)
except:
continue
self._git_blob_hash_to_data_db.store_value_bytes(blob_hash, component_data)
else:
component_data = self._git_blob_hash_to_data_db.try_get_value_bytes(blob_hash)
component_spec = comp._load_component_spec_from_component_text(component_data)
component_name = component_spec.name
self._url_to_info_db.store_value_text(component_url, json.dumps(dict(
name=component_name,
url=component_url,
git_blob_hash=blob_hash,
digest=_calculate_component_digest(component_data),
)))
def _get_request_session(max_retries: int = 3):
session = requests.Session()
retry_strategy = requests.packages.urllib3.util.retry.Retry(
total=max_retries,
backoff_factor=0.1,
status_forcelist=[413, 429, 500, 502, 503, 504],
method_whitelist=frozenset(['GET', 'POST']),
)
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retry_strategy))
session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retry_strategy))
return session
def _calculate_git_blob_hash(data: bytes) -> str:
return hashlib.sha1(b'blob ' + str(len(data)).encode('utf-8') + b'\x00' + data).hexdigest()
def _calculate_component_digest(data: bytes) -> str:
return hashlib.sha256(data.replace(b'\r\n', b'\n')).hexdigest()
def _list_candidate_component_uris_from_github_repo(url_search_prefix: str, auth=None) -> Iterable[str]:
(schema, _, host, org, repo, ref, path_prefix) = url_search_prefix.split('/', 6)
for page in range(1, 999):
search_url = (
'https://api.github.com/search/code?q=filename:{}+repo:{}/{}&page={}&per_page=1000'
).format(_COMPONENT_FILENAME, org, repo, page)
response = _get_request_session().get(search_url, auth=auth)
response.raise_for_status()
result = response.json()
items = result['items']
if not items:
break
for item in items:
html_url = item['html_url']
# Constructing direct content URL
# There is an API (/repos/:owner/:repo/git/blobs/:file_sha) for
# getting the blob content, but it requires decoding the content.
raw_url = html_url.replace(
'https://github.com/', 'https://raw.githubusercontent.com/'
).replace('/blob/', '/', 1)
if not raw_url.endswith(_COMPONENT_FILENAME):
# GitHub matches component_test.yaml when searching for filename:"component.yaml"
continue
result_item = dict(
url=raw_url,
path = item['path'],
git_blob_hash = item['sha'],
)
yield result_item
ComponentStore.default_store = ComponentStore(
local_search_paths=[
'.',
],
url_search_prefixes=[
'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/'
],
)
| {
"repo_name": "kubeflow/pipelines",
"path": "sdk/python/kfp/components/_component_store.py",
"copies": "1",
"size": "13085",
"license": "apache-2.0",
"hash": 6916295595545179000,
"line_mean": 42.3278145695,
"line_max": 183,
"alpha_frac": 0.6022927016,
"autogenerated": false,
"ratio": 4.302860901019401,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030350482205207165,
"num_lines": 302
} |
__all__ = [
'ComponentStore',
]
from pathlib import Path
import copy
import requests
from typing import Callable
from . import _components as comp
from .structures import ComponentReference
class ComponentStore:
def __init__(self, local_search_paths=None, url_search_prefixes=None):
self.local_search_paths = local_search_paths or ['.']
self.url_search_prefixes = url_search_prefixes or []
self._component_file_name = 'component.yaml'
self._digests_subpath = 'versions/sha256'
self._tags_subpath = 'versions/tags'
def load_component_from_url(self, url):
return comp.load_component_from_url(url)
def load_component_from_file(self, path):
return comp.load_component_from_file(path)
def load_component(self, name, digest=None, tag=None):
'''
Loads component local file or URL and creates a task factory function
Search locations:
<local-search-path>/<name>/component.yaml
<url-search-prefix>/<name>/component.yaml
If the digest is specified, then the search locations are:
<local-search-path>/<name>/versions/sha256/<digest>
<url-search-prefix>/<name>/versions/sha256/<digest>
If the tag is specified, then the search locations are:
<local-search-path>/<name>/versions/tags/<digest>
<url-search-prefix>/<name>/versions/tags/<digest>
Args:
name: Component name used to search and load the component artifact containing the component definition.
Component name usually has the following form: group/subgroup/component
digest: Strict component version. SHA256 hash digest of the component artifact file. Can be used to load a specific component version so that the pipeline is reproducible.
tag: Version tag. Can be used to load component version from a specific branch. The version of the component referenced by a tag can change in future.
Returns:
A factory function with a strongly-typed signature.
Once called with the required arguments, the factory constructs a pipeline task instance (ContainerOp).
'''
#This function should be called load_task_factory since it returns a factory function.
#The real load_component function should produce an object with component properties (e.g. name, description, inputs/outputs).
#TODO: Change this function to return component spec object but it should be callable to construct tasks.
component_ref = ComponentReference(name=name, digest=digest, tag=tag)
component_ref = self._load_component_spec_in_component_ref(component_ref)
return comp._create_task_factory_from_component_spec(
component_spec=component_ref.spec,
component_ref=component_ref,
)
def _load_component_spec_in_component_ref(
self,
component_ref: ComponentReference,
) -> ComponentReference:
'''Takes component_ref, finds the component spec and returns component_ref with .spec set to the component spec.
See ComponentStore.load_component for the details of the search logic.
'''
if component_ref.spec:
return component_ref
component_ref = copy.copy(component_ref)
if component_ref.url:
component_ref.spec = comp._load_component_spec_from_url(component_ref.url)
return component_ref
name = component_ref.name
if not name:
raise TypeError("name is required")
if name.startswith('/') or name.endswith('/'):
raise ValueError('Component name should not start or end with slash: "{}"'.format(name))
digest = component_ref.digest
tag = component_ref.tag
tried_locations = []
if digest is not None and tag is not None:
raise ValueError('Cannot specify both tag and digest')
if digest is not None:
path_suffix = name + '/' + self._digests_subpath + '/' + digest
elif tag is not None:
path_suffix = name + '/' + self._tags_subpath + '/' + tag
#TODO: Handle symlinks in GIT URLs
else:
path_suffix = name + '/' + self._component_file_name
#Trying local search paths
for local_search_path in self.local_search_paths:
component_path = Path(local_search_path, path_suffix)
tried_locations.append(str(component_path))
if component_path.is_file():
# TODO: Verify that the content matches the digest (if specified).
component_ref._local_path = str(component_path)
component_ref.spec = comp._load_component_spec_from_file(str(component_path))
return component_ref
#Trying URL prefixes
for url_search_prefix in self.url_search_prefixes:
url = url_search_prefix + path_suffix
tried_locations.append(url)
try:
response = requests.get(url) #Does not throw exceptions on bad status, but throws on dead domains and malformed URLs. Should we log those cases?
response.raise_for_status()
except:
continue
if response.content:
# TODO: Verify that the content matches the digest (if specified).
component_ref.url = url
component_ref.spec = comp._load_component_spec_from_yaml_or_zip_bytes(response.content)
return component_ref
raise RuntimeError('Component {} was not found. Tried the following locations:\n{}'.format(name, '\n'.join(tried_locations)))
def _load_component_from_ref(self, component_ref: ComponentReference) -> Callable:
component_ref = self._load_component_spec_in_component_ref(component_ref)
return comp._create_task_factory_from_component_spec(component_spec=component_ref.spec, component_ref=component_ref)
ComponentStore.default_store = ComponentStore(
local_search_paths=[
'.',
],
url_search_prefixes=[
'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/'
],
)
| {
"repo_name": "kubeflow/kfp-tekton-backend",
"path": "sdk/python/kfp/components/_component_store.py",
"copies": "1",
"size": "6197",
"license": "apache-2.0",
"hash": -1577578450024795600,
"line_mean": 43.2642857143,
"line_max": 183,
"alpha_frac": 0.6496691948,
"autogenerated": false,
"ratio": 4.382602545968883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5532271740768883,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'compute_etag',
'compute_etag_from_file',
'maybe_raise_304',
]
import hashlib
import logging
import re
from .. import consts
from .. import wsgi_apps
LOG = logging.getLogger(__name__)
_CHUNK_SIZE = 8192
def compute_etag(content):
hasher = hashlib.md5()
hasher.update(content)
return '"%s"' % hasher.hexdigest()
def compute_etag_from_file(content_file):
hasher = hashlib.md5()
buffer = memoryview(bytearray(_CHUNK_SIZE))
while True:
num_read = content_file.readinto(buffer)
if num_read <= 0:
break
hasher.update(buffer[:num_read])
return '"%s"' % hasher.hexdigest()
def maybe_raise_304(request, response):
"""Check If-None-Match with ETag and maybe raise 304."""
if request.method not in (consts.METHOD_HEAD, consts.METHOD_GET):
LOG.warning(
'check If-None-Match in non-standard request method: %s %s',
request.method,
request.path_str,
)
if_none_match = request.get_header(consts.HEADER_IF_NONE_MATCH)
if if_none_match is None:
return
etag = response.headers.get(consts.HEADER_ETAG)
if etag is None:
return
# TODO: Handle W/"..." weak validator.
if etag in _parse_etags(if_none_match):
raise wsgi_apps.HttpError(
consts.Statuses.NOT_MODIFIED,
'etag matches: %s vs %s' % (etag, if_none_match),
response.headers,
)
_ETAGS_PATTERN = re.compile(r'((?:W/)?"[^"]+")(?:\s*,\s*)?')
def _parse_etags(etags_str):
if etags_str.strip() == '*':
return _MatchAll()
return frozenset(
match.group(1) for match in _ETAGS_PATTERN.finditer(etags_str)
)
class _MatchAll:
def __contains__(self, _):
return True
| {
"repo_name": "clchiou/garage",
"path": "py/g1/webs/g1/webs/handlers/etags.py",
"copies": "1",
"size": "1787",
"license": "mit",
"hash": -4296764127639388000,
"line_mean": 23.4794520548,
"line_max": 72,
"alpha_frac": 0.5987688864,
"autogenerated": false,
"ratio": 3.4103053435114505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45090742299114506,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ConnectionManager',
]
import collections
import contextlib
import logging
from g1.asyncs.bases import locks
from g1.asyncs.bases import timers
from g1.bases import assertions
from g1.bases.assertions import ASSERT
from g1.operations.databases.bases import interfaces
LOG = logging.getLogger(__name__)
ASSERT_REQUEST = assertions.Assertions(
lambda *_: interfaces.InvalidRequestError()
)
_WAIT_FOR_READER = 4 # Unit: seconds.
_WAIT_FOR_WRITER = 4 # Unit: seconds.
_NUM_REMEMBERED = 8
class ConnectionManager:
"""Connection manager.
This protects concurrent access to the underlying connection object
by providing a reader-writer lock interface guarding the connection
object. This mimics SQLite's transaction model that is also a
reader-writer lock (I am not sure if this is a good idea).
"""
def __init__(self, conn):
self._conn = conn
# To implement reader lock.
self._num_readers = 0
self._num_readers_gate = locks.Gate()
# To implement writer lock.
self._tx_id = 0
self._tx_id_gate = locks.Gate()
self._tx = None
# Use collections.deque as a bounded list to track completed
# transaction completion states. They are a best effort for
# generating user-friendly error responses.
self._rollback_tx_ids = collections.deque(maxlen=_NUM_REMEMBERED)
self._commit_tx_ids = collections.deque(maxlen=_NUM_REMEMBERED)
self._timeout_tx_ids = collections.deque(maxlen=_NUM_REMEMBERED)
@property
def tx_id(self):
return self._tx_id
def close(self):
if self._tx_id != 0:
LOG.warning('roll back transaction on close: %#016x', self._tx_id)
self.rollback_due_to_timeout()
self._conn.close()
self._conn = None # Make sure this manager becomes unusable.
#
# Reader-writer lock.
#
@contextlib.asynccontextmanager
async def reading(self):
"""Use connection in a read transaction."""
await self._wait_for_writer()
self._num_readers += 1
try:
yield self._conn
finally:
self._num_readers -= 1
if self._num_readers == 0:
self._num_readers_gate.unblock()
@contextlib.asynccontextmanager
async def writing(self, tx_id):
"""Use connection in a write transaction."""
ASSERT_REQUEST.greater(tx_id, 0)
if tx_id != self._tx_id:
if tx_id in self._timeout_tx_ids:
raise interfaces.TransactionTimeoutError
raise interfaces.TransactionNotFoundError
yield self._conn
@contextlib.asynccontextmanager
async def transacting(self):
"""Use connection in a one-shot write transaction."""
tx_id = interfaces.generate_transaction_id()
await self.begin(tx_id)
try:
yield self._conn
except BaseException:
self.rollback(tx_id)
raise
else:
self.commit(tx_id)
async def _wait_for_reader(self):
if self._num_readers == 0:
return
with timers.timeout_ignore(_WAIT_FOR_READER):
while self._num_readers != 0:
await self._num_readers_gate.wait()
if self._num_readers != 0:
LOG.warning('wait for reader timeout: %d', self._num_readers)
raise interfaces.TransactionTimeoutError
async def _wait_for_writer(self):
if self._tx_id == 0:
return
with timers.timeout_ignore(_WAIT_FOR_WRITER):
while self._tx_id != 0:
await self._tx_id_gate.wait()
if self._tx_id != 0:
LOG.warning('wait for writer timeout: %#016x', self._tx_id)
raise interfaces.TransactionTimeoutError
#
# "begin" transactions.
#
async def begin(self, tx_id):
ASSERT_REQUEST.greater(tx_id, 0)
if tx_id == self._tx_id:
return self._conn # begin is idempotent.
await self._wait_for_reader()
await self._wait_for_writer()
# It is possible that _wait_for_writer is about to return but
# this task is not scheduled to execute, and another reader
# takes place first; so let's check _num_readers again.
if self._num_readers != 0:
LOG.warning('another reader preempt the begin: %#016x', tx_id)
raise interfaces.TransactionTimeoutError
LOG.info('begin transaction: %#016x', tx_id)
self._tx_id = tx_id
self._tx = self._conn.begin()
return self._conn
#
# "end" transactions.
#
def rollback(self, tx_id):
if not self._check_end(
tx_id, self._rollback_tx_ids, self._timeout_tx_ids
):
return
self._tx.rollback()
self._end(self._rollback_tx_ids)
def rollback_due_to_timeout(self):
ASSERT.not_equal(self._tx_id, 0)
self._tx.rollback()
self._end(self._timeout_tx_ids)
def commit(self, tx_id):
if not self._check_end(tx_id, self._commit_tx_ids):
return
self._tx.commit()
self._end(self._commit_tx_ids)
def _check_end(self, tx_id, *tx_id_lists):
"""Check preconditions of an "end" call."""
ASSERT_REQUEST.greater(tx_id, 0)
# Do not check self._tx_id == 0 to support idempotent end.
if tx_id == self._tx_id:
return True
elif any(tx_id in tx_id_list for tx_id_list in tx_id_lists):
return False # end is idempotent.
else:
raise interfaces.TransactionNotFoundError
def _end(self, tx_id_lists):
LOG.info('end transaction: %#016x', self._tx_id)
tx_id_lists.append(self._tx_id)
self._tx_id = 0
self._tx = None
self._tx_id_gate.unblock()
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/databases/servers/g1/operations/databases/servers/connections.py",
"copies": "1",
"size": "5880",
"license": "mit",
"hash": -1977415965759700700,
"line_mean": 31.3076923077,
"line_max": 78,
"alpha_frac": 0.5964285714,
"autogenerated": false,
"ratio": 3.863337713534823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4959766284934823,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"ConsulError",
"ConflictError",
"NotFound",
"SupportDisabled",
"TransactionError",
"UnauthorizedError"
]
class ConsulError(Exception):
"""Consul base error
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
def __init__(self, msg, *, meta=None):
self.value = msg
self.meta = meta or {}
if isinstance(msg, bytes):
msg = msg.decode("utf-8")
super().__init__(msg)
class NotFound(ConsulError):
"""Raised when object does not exists
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class ConflictError(ConsulError):
"""Raised when there is a conflict in agent
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class UnauthorizedError(ConsulError):
"""Raised when session with sufficent rights is required
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class SupportDisabled(Exception):
"""Endpoint is not active.
"""
class TransactionError(Exception):
"""Raised by failing transaction
Attributes:
errors (Mapping): The errors where index is the index in operations
operations (Collection): The operations
meta (Meta): meta of the error
For example token has not the sufficient rights for writing key::
errors = {
0: {"OpIndex": 0, "What": "Permission denied"}
}
operations = [
{"Verb": "get", "Key": "foo"},
{"Verb": "set", "Key": "bar", "Value": "YmFy", "Flags": None}
]
"""
def __init__(self, errors, operations, meta, *, msg=None):
self.errors = errors
self.operations = operations
self.meta = meta
msg = msg or "Transaction failed"
super().__init__(msg)
| {
"repo_name": "johnnoone/aioconsul",
"path": "aioconsul/exceptions.py",
"copies": "1",
"size": "1964",
"license": "bsd-3-clause",
"hash": 3017824726051999000,
"line_mean": 22.9512195122,
"line_max": 75,
"alpha_frac": 0.5809572301,
"autogenerated": false,
"ratio": 4.251082251082251,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5332039481182251,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ContextBase',
'DialerBase',
'Protocols',
'SocketBase',
]
import ctypes
import enum
from g1.bases import classes
from g1.bases.assertions import ASSERT
from . import _nng
from . import errors
from . import options
Protocols = enum.Enum(
'Protocols',
[(
name.upper(),
(
_nng.F['nng_%s_open' % name],
_nng.F['nng_%s_open_raw' % name],
),
) for name in _nng.PROTOCOLS],
)
class CommonOptions(options.OptionsBase):
# Generic options.
raw = options.make(_nng.Options.NNG_OPT_RAW)
protocol_id = options.make(_nng.Options.NNG_OPT_PROTO)
protocol_name = options.make(_nng.Options.NNG_OPT_PROTONAME)
peer_id = options.make(_nng.Options.NNG_OPT_PEER)
peer_name = options.make(_nng.Options.NNG_OPT_PEERNAME)
recv_fd = options.make(_nng.Options.NNG_OPT_RECVFD)
send_fd = options.make(_nng.Options.NNG_OPT_SENDFD)
max_recv_size = options.make(_nng.Options.NNG_OPT_RECVMAXSZ)
class ContextOptions(options.OptionsBase):
# Generic options.
recv_timeout = options.make(_nng.Options.NNG_OPT_RECVTIMEO)
send_timeout = options.make(_nng.Options.NNG_OPT_SENDTIMEO)
# Protocol "pubsub0" options.
def subscribe(self, topic):
options.setopt_bytes(
self, _nng.Options.NNG_OPT_SUB_SUBSCRIBE[0], topic
)
def unsubscribe(self, topic):
options.setopt_bytes(
self, _nng.Options.NNG_OPT_SUB_UNSUBSCRIBE[0], topic
)
# Protocol "reqrep0" options.
resend_time = options.make(_nng.Options.NNG_OPT_REQ_RESENDTIME)
# Protocol "survey0" options.
survey_time = options.make(_nng.Options.NNG_OPT_SURVEYOR_SURVEYTIME)
class SocketBase(CommonOptions, ContextOptions):
_name = 'socket'
# Generic options.
name = options.make(_nng.Options.NNG_OPT_SOCKNAME)
recv_buffer_size = options.make(_nng.Options.NNG_OPT_RECVBUF)
send_buffer_size = options.make(_nng.Options.NNG_OPT_SENDBUF)
max_ttl = options.make(_nng.Options.NNG_OPT_MAXTTL)
min_reconnect_time = options.make(_nng.Options.NNG_OPT_RECONNMINT)
max_reconnect_time = options.make(_nng.Options.NNG_OPT_RECONNMAXT)
# TCP options.
tcp_nodelay = options.make(_nng.Options.NNG_OPT_TCP_NODELAY)
tcp_keepalive = options.make(_nng.Options.NNG_OPT_TCP_KEEPALIVE)
# End of options.
_dialer_type = classes.abstract_method
dial = classes.abstract_method
send = classes.abstract_method
recv = classes.abstract_method
sendmsg = classes.abstract_method
recvmsg = classes.abstract_method
def __init__(self, protocol, *, raw=False):
# In case ``__init__`` raises.
self._handle = None
ASSERT.isinstance(protocol, Protocols)
opener = protocol.value[1] if raw else protocol.value[0]
handle = _nng.nng_socket()
errors.check(opener(ctypes.byref(handle)))
self._handle = handle.value
self.protocol = protocol
self.dialers = {}
self.listeners = {}
__repr__ = classes.make_repr(
'id={self.id} dialers={self.dialers} listeners={self.listeners}'
)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def __del__(self):
# You have to check whether ``__init__`` raises.
if self._handle is not None:
self.close()
@property
def id(self):
return _nng.F.nng_socket_id(self._handle)
def close(self):
try:
errors.check(_nng.F.nng_close(self._handle))
except errors.Errors.ECLOSED:
pass
self.dialers.clear()
self.listeners.clear()
def _dial(self, url, *, flags=0, create_only=False):
handle = _nng.nng_dialer()
handle_p = ctypes.byref(handle)
url = _nng.ensure_bytes(url)
if create_only:
rv = _nng.F.nng_dialer_create(handle_p, self._handle, url)
else:
rv = _nng.F.nng_dial(self._handle, url, handle_p, flags)
errors.check(rv)
dialer = self._dialer_type(self, handle.value)
self.dialers[dialer.id] = dialer
return dialer
def listen(self, url, *, create_only=False):
handle = _nng.nng_listener()
handle_p = ctypes.byref(handle)
url = _nng.ensure_bytes(url)
if create_only:
rv = _nng.F.nng_listener_create(handle_p, self._handle, url)
else:
rv = _nng.F.nng_listen(self._handle, url, handle_p, 0)
errors.check(rv)
listener = Listener(self, handle.value)
self.listeners[listener.id] = listener
return listener
class ContextBase(ContextOptions):
_name = 'ctx'
send = classes.abstract_method
recv = classes.abstract_method
sendmsg = classes.abstract_method
recvmsg = classes.abstract_method
def __init__(self, socket):
# In case ``__init__`` raises.
self._handle = None
handle = _nng.nng_ctx()
errors.check(_nng.F.nng_ctx_open(ctypes.byref(handle), socket._handle))
self.socket = socket
self._handle = handle
__repr__ = classes.make_repr('id={self.id} {self.socket}')
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def __del__(self):
# You have to check whether ``__init__`` raises.
if self._handle is not None:
self.close()
@property
def id(self):
return _nng.F.nng_ctx_id(self._handle)
def close(self):
try:
errors.check(_nng.F.nng_ctx_close(self._handle))
except errors.Errors.ECLOSED:
pass
class Endpoint(CommonOptions):
# Generic options.
name = options.make(_nng.Options.NNG_OPT_SOCKNAME, mode='ro')
recv_buffer_size = options.make(_nng.Options.NNG_OPT_RECVBUF, mode='ro')
send_buffer_size = options.make(_nng.Options.NNG_OPT_SENDBUF, mode='ro')
local_address = options.make(_nng.Options.NNG_OPT_LOCADDR)
remote_address = options.make(_nng.Options.NNG_OPT_REMADDR)
url = options.make(_nng.Options.NNG_OPT_URL)
max_ttl = options.make(_nng.Options.NNG_OPT_MAXTTL, mode='ro')
# TCP options.
tcp_nodelay = options.make(_nng.Options.NNG_OPT_TCP_NODELAY, mode='ro')
tcp_keepalive = options.make(_nng.Options.NNG_OPT_TCP_KEEPALIVE, mode='ro')
tcp_bound_port = options.make(_nng.Options.NNG_OPT_TCP_BOUND_PORT)
# TLS options.
tls_auth_mode = options.make(_nng.Options.NNG_OPT_TLS_AUTH_MODE)
tls_cert_key_file = options.make(_nng.Options.NNG_OPT_TLS_CERT_KEY_FILE)
tls_ca_file = options.make(_nng.Options.NNG_OPT_TLS_CA_FILE)
tls_server_name = options.make(_nng.Options.NNG_OPT_TLS_SERVER_NAME)
tls_verified = options.make(_nng.Options.NNG_OPT_TLS_VERIFIED)
# WebSocket options.
ws_request_headers = options.make(_nng.Options.NNG_OPT_WS_REQUEST_HEADERS)
ws_response_headers = options.make(
_nng.Options.NNG_OPT_WS_RESPONSE_HEADERS
)
ws_request_uri = options.make(_nng.Options.NNG_OPT_WS_REQUEST_URI)
ws_max_send_frame = options.make(_nng.Options.NNG_OPT_WS_SENDMAXFRAME)
ws_max_recv_frame = options.make(_nng.Options.NNG_OPT_WS_RECVMAXFRAME)
ws_protocol = options.make(_nng.Options.NNG_OPT_WS_PROTOCOL)
def ws_request_get_header(self, name):
name = (
_nng.Options.NNG_OPT_WS_REQUEST_HEADER[0] +
_nng.ensure_bytes(name)
)
return options.getopt_string(self, name)
def ws_request_set_header(self, name, value):
name = (
_nng.Options.NNG_OPT_WS_REQUEST_HEADER[0] +
_nng.ensure_bytes(name)
)
options.setopt_string(self, name, value)
return value
def ws_response_get_header(self, name):
name = (
_nng.Options.NNG_OPT_WS_RESPONSE_HEADER[0] +
_nng.ensure_bytes(name)
)
return options.getopt_string(self, name)
def ws_response_set_header(self, name, value):
name = (
_nng.Options.NNG_OPT_WS_RESPONSE_HEADER[0] +
_nng.ensure_bytes(name)
)
options.setopt_string(self, name, value)
return value
# End of options.
_endpoints = classes.abstract_property
_get_id = classes.abstract_method
_close = classes.abstract_method
def __init__(self, socket, handle):
self._socket = socket
self._handle = handle
__repr__ = classes.make_repr('id={self.id}')
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
@property
def id(self):
return self._get_id(self._handle)
def close(self):
try:
errors.check(self._close(self._handle))
except errors.Errors.ECLOSED:
pass
getattr(self._socket, self._endpoints).pop(self.id)
class DialerBase(Endpoint):
_name = 'dialer'
# Generic options.
min_reconnect_time = options.make(_nng.Options.NNG_OPT_RECONNMINT)
max_reconnect_time = options.make(_nng.Options.NNG_OPT_RECONNMAXT)
# End of options.
_endpoints = 'dialers'
_get_id = _nng.F.nng_dialer_id
_close = _nng.F.nng_dialer_close
def _start(self, *, flags=0):
errors.check(_nng.F.nng_dialer_start(self._handle, flags))
class Listener(Endpoint):
_name = 'listener'
# Generic options.
min_reconnect_time = options.make(
_nng.Options.NNG_OPT_RECONNMINT, mode='ro'
)
max_reconnect_time = options.make(
_nng.Options.NNG_OPT_RECONNMAXT, mode='ro'
)
# End of options.
_endpoints = 'listeners'
_get_id = _nng.F.nng_listener_id
_close = _nng.F.nng_listener_close
def start(self):
errors.check(_nng.F.nng_listener_start(self._handle, 0))
| {
"repo_name": "clchiou/garage",
"path": "py/g1/third-party/nng/nng/bases.py",
"copies": "1",
"size": "9827",
"license": "mit",
"hash": -4038693703162032000,
"line_mean": 25.4878706199,
"line_max": 79,
"alpha_frac": 0.6151419558,
"autogenerated": false,
"ratio": 3.186446173800259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4301588129600259,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Context',
]
import collections
import collections.abc
from . import classes
from .assertions import ASSERT
class Context(collections.abc.MutableMapping):
"""Context.
This is just a thin wrapper of collections.ChainMap.
"""
def __init__(self, content=None, *, _context=None):
if _context is not None:
ASSERT.none(content)
self._context = _context
return
if content is None:
content = {}
self._context = collections.ChainMap(content)
__repr__ = classes.make_repr('{self._context!r}')
def make(self, content=None, *, allow_overwrite=False):
if content is None:
content = {}
if not allow_overwrite:
ASSERT.isdisjoint(frozenset(content), self._context)
return Context(_context=self._context.new_child(content))
def __len__(self):
return len(self._context)
def __iter__(self):
return iter(self._context)
def __getitem__(self, key):
return self._context[key]
def __setitem__(self, key, value):
self.set(key, value)
def __delitem__(self, key):
raise AssertionError('do not support overwrite/delete for now')
def get(self, key, default=None):
return self._context.get(key, default)
def set(self, key, value, *, allow_overwrite=False):
if not allow_overwrite:
ASSERT.not_in(key, self._context)
self._context[key] = value
def asdict(self):
"""Return content as a dict; useful for testing."""
return dict(self._context)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/bases/g1/bases/contexts.py",
"copies": "1",
"size": "1609",
"license": "mit",
"hash": 3408155465053020000,
"line_mean": 25.3770491803,
"line_max": 71,
"alpha_frac": 0.5978868863,
"autogenerated": false,
"ratio": 4.032581453634085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 61
} |
__all__ = [
'copy_exec',
'get_repo_path',
'get_zipapp_target_path',
'make_dir',
'set_dir_attrs',
'set_exec_attrs',
'set_file_attrs',
]
import logging
import shutil
from pathlib import Path
from g1.apps import parameters
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
PARAMS = parameters.define(
'g1.operations',
parameters.Namespace(
repository=parameters.Parameter(
Path('/var/lib/g1/operations'),
'path to the repository directory',
convert=Path,
validate=Path.is_absolute,
format=str,
),
application_group=parameters.Parameter(
'plumber',
'set application group',
validate=bool, # Check not empty.
),
zipapp_directory=parameters.Parameter(
Path('/usr/local/bin'),
'path to install zipapp',
convert=Path,
validate=Path.is_absolute,
format=str,
),
),
)
REPO_LAYOUT_VERSION = 'v1'
def get_repo_path():
return PARAMS.repository.get() / REPO_LAYOUT_VERSION
def get_zipapp_target_path(name):
return PARAMS.zipapp_directory.get() / name
def make_dir(path, *, parents=False):
LOG.info('create directory: %s', path)
path.mkdir(mode=0o750, parents=parents, exist_ok=True)
# Use set_dir_attrs just in case ``path`` is already created.
set_dir_attrs(path)
def copy_exec(src_path, dst_path):
shutil.copyfile(src_path, dst_path)
set_exec_attrs(dst_path)
def set_dir_attrs(path):
ASSERT.predicate(path, Path.is_dir)
path.chmod(0o750)
_chown(path)
def set_file_attrs(path):
ASSERT.predicate(path, Path.is_file)
path.chmod(0o640)
_chown(path)
def set_exec_attrs(path):
ASSERT.predicate(path, Path.is_file)
path.chmod(0o755)
_chown(path)
def _chown(path):
shutil.chown(path, 'root', PARAMS.application_group.get())
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/cores/g1/operations/cores/bases.py",
"copies": "1",
"size": "1964",
"license": "mit",
"hash": 2235058019174360300,
"line_mean": 21.5747126437,
"line_max": 65,
"alpha_frac": 0.616089613,
"autogenerated": false,
"ratio": 3.3175675675675675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44336571805675673,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"CorruptedMessage",
"Stats",
"UnexpectedCommand",
"UnexpectedEOF",
"UnknownCommand",
"log",
]
import logging
import struct
log = logging.getLogger("offhand")
class UnexpectedEOF(Exception):
def __init__(self):
Exception.__init__(self, "Connection closed unexpectedly")
class UnknownCommand(Exception):
def __init__(self, command):
Exception.__init__(self, "Unknown command: %r" % command)
class UnexpectedCommand(Exception):
def __init__(self, command):
Exception.__init__(self, "Unexpected command: %r" % command)
class CorruptedMessage(Exception):
def __init__(self):
Exception.__init__(self, "Corrupted message")
class Stats(object):
__slots__ = [
"connecting",
"connected",
"idle",
"busy",
"total_engaged",
"total_canceled",
"total_rolledback",
"total_timeouts",
"total_disconnects",
"total_errors",
]
def __init__(self, copy=None):
for key in self.__slots__:
setattr(self, key, getattr(copy, key) if copy else 0)
def __nonzero__(self):
return any(getattr(self, key) for key in self.__slots__)
def __str__(self):
return " ".join("%s=%s" % (key, getattr(self, key)) for key in self.__slots__)
def parse_message(data):
message = []
offset = 0
while True:
remain = len(data) - offset
if remain == 0:
break
if remain < 4:
raise CorruptedMessage()
part_size, = struct.unpack("<I", data[offset: offset + 4])
offset += 4
if remain < 4 + part_size:
raise CorruptedMessage()
message.append(data[offset: offset + part_size])
offset += part_size
return message
| {
"repo_name": "ninchat/offhand",
"path": "python/offhand/__init__.py",
"copies": "1",
"size": "1817",
"license": "bsd-2-clause",
"hash": 5965481283021618000,
"line_mean": 20.1279069767,
"line_max": 86,
"alpha_frac": 0.5613648872,
"autogenerated": false,
"ratio": 3.932900432900433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9993597049584528,
"avg_score": 0.00013365410318096765,
"num_lines": 86
} |
__all__ = [
"create_ants_transform",
"new_ants_transform",
"read_transform",
"write_transform",
"transform_from_displacement_field",
]
import os
import numpy as np
from . import ants_image as iio
from . import ants_transform as tio
from .. import utils
def new_ants_transform(
precision="float", dimension=3, transform_type="AffineTransform", parameters=None
):
"""
Create a new ANTsTransform
ANTsR function: None
Example
-------
>>> import ants
>>> tx = ants.new_ants_transform()
"""
libfn = utils.get_lib_fn(
"newAntsTransform%s%i" % (utils.short_ptype(precision), dimension)
)
itk_tx = libfn(precision, dimension, transform_type)
ants_tx = tio.ANTsTransform(
precision=precision,
dimension=dimension,
transform_type=transform_type,
pointer=itk_tx,
)
if parameters is not None:
ants_tx.set_parameters(parameters)
return ants_tx
def create_ants_transform(
transform_type="AffineTransform",
precision="float",
dimension=3,
matrix=None,
offset=None,
center=None,
translation=None,
parameters=None,
fixed_parameters=None,
displacement_field=None,
supported_types=False,
):
"""
Create and initialize an ANTsTransform
ANTsR function: `createAntsrTransform`
Arguments
---------
transform_type : string
type of transform(s)
precision : string
numerical precision
dimension : integer
spatial dimension of transform
matrix : ndarray
matrix for linear transforms
offset : tuple/list
offset for linear transforms
center : tuple/list
center for linear transforms
translation : tuple/list
translation for linear transforms
parameters : ndarray/list
array of parameters
fixed_parameters : ndarray/list
array of fixed parameters
displacement_field : ANTsImage
multichannel ANTsImage for non-linear transform
supported_types : boolean
flag that returns array of possible transforms types
Returns
-------
ANTsTransform or list of ANTsTransform types
Example
-------
>>> import ants
>>> translation = (3,4,5)
>>> tx = ants.create_ants_transform( type='Euler3DTransform', translation=translation )
"""
def _check_arg(arg, dim=1):
if arg is None:
if dim == 1:
return []
elif dim == 2:
return [[]]
elif isinstance(arg, np.ndarray):
return arg.tolist()
elif isinstance(arg, (tuple, list)):
return list(arg)
else:
raise ValueError("Incompatible input argument")
matrix = _check_arg(matrix, dim=2)
offset = _check_arg(offset)
center = _check_arg(center)
translation = _check_arg(translation)
parameters = _check_arg(parameters)
fixed_parameters = _check_arg(fixed_parameters)
matrix_offset_types = {
"AffineTransform",
"CenteredAffineTransform",
"Euler2DTransform",
"Euler3DTransform",
"Rigid3DTransform",
"Rigid2DTransform",
"QuaternionRigidTransform",
"Similarity2DTransform",
"CenteredSimilarity2DTransform",
"Similarity3DTransform",
"CenteredRigid2DTransform",
"CenteredEuler3DTransform",
}
# user_matrix_types = {'Affine','CenteredAffine',
# 'Euler', 'CenteredEuler',
# 'Rigid', 'CenteredRigid', 'QuaternionRigid',
# 'Similarity', 'CenteredSimilarity'}
if supported_types:
return set(list(matrix_offset_types) + ["DisplacementFieldTransform"])
# Check for valid dimension
if (dimension < 2) or (dimension > 4):
raise ValueError("Unsupported dimension: %i" % dimension)
# Check for valid precision
precision_types = ("float", "double")
if precision not in precision_types:
raise ValueError("Unsupported Precision %s" % str(precision))
# Check for supported transform type
if (transform_type not in matrix_offset_types) and (
transform_type != "DisplacementFieldTransform"
):
raise ValueError("Unsupported type %s" % str(transform_type))
# Check parameters with type
if transform_type == "Euler3DTransform":
dimension = 3
elif transform_type == "Euler2DTransform":
dimension = 2
elif transform_type == "Rigid3DTransform":
dimension = 3
elif transform_type == "QuaternionRigidTransform":
dimension = 3
elif transform_type == "Rigid2DTransform":
dimension = 2
elif transform_type == "CenteredRigid2DTransform":
dimension = 2
elif transform_type == "CenteredEuler3DTransform":
dimension = 3
elif transform_type == "Similarity3DTransform":
dimension = 3
elif transform_type == "Similarity2DTransform":
dimension = 2
elif transform_type == "CenteredSimilarity2DTransform":
dimension = 2
# If displacement field
if displacement_field is not None:
# raise ValueError('Displacement field transform not currently supported')
itk_tx = transform_from_displacement_field(displacement_field)
return tio.ants_transform(itk_tx)
# Transforms that derive from itk::MatrixOffsetTransformBase
libfn = utils.get_lib_fn(
"matrixOffset%s%i" % (utils.short_ptype(precision), dimension)
)
itk_tx = libfn(
transform_type,
precision,
dimension,
matrix,
offset,
center,
translation,
parameters,
fixed_parameters,
)
return tio.ANTsTransform(
precision=precision,
dimension=dimension,
transform_type=transform_type,
pointer=itk_tx,
)
def transform_from_displacement_field(field):
"""
Convert deformation field (multiChannel image) to ANTsTransform
ANTsR function: `antsrTransformFromDisplacementField`
Arguments
---------
field : ANTsImage
deformation field as multi-channel ANTsImage
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16') )
>>> mi = ants.image_read(ants.get_ants_data('r64') )
>>> fi = ants.resample_image(fi,(60,60),1,0)
>>> mi = ants.resample_image(mi,(60,60),1,0) # speed up
>>> mytx = ants.registration(fixed=fi, moving=mi, type_of_transform = ('SyN') )
>>> vec = ants.image_read( mytx['fwdtransforms'][0] )
>>> atx = ants.transform_from_displacement_field( vec )
"""
if not isinstance(field, iio.ANTsImage):
raise ValueError("field must be ANTsImage type")
libfn = utils.get_lib_fn("antsTransformFromDisplacementFieldF%i" % field.dimension)
field = field.clone("float")
txptr = libfn(field.pointer)
return tio.ANTsTransform(
precision="float",
dimension=field.dimension,
transform_type="DisplacementFieldTransform",
pointer=txptr,
)
def read_transform(filename, dimension=2, precision="float"):
"""
Read a transform from file
ANTsR function: `readAntsrTransform`
Arguments
---------
filename : string
filename of transform
dimension : integer
spatial dimension of transform
precision : string
numerical precision of transform
Returns
-------
ANTsTransform
Example
-------
>>> import ants
>>> tx = ants.new_ants_transform(dimension=2)
>>> tx.set_parameters((0.9,0,0,1.1,10,11))
>>> ants.write_transform(tx, '~/desktop/tx.mat')
>>> tx2 = ants.read_transform('~/desktop/tx.mat')
"""
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
raise ValueError("filename does not exist!")
libfn1 = utils.get_lib_fn("getTransformDimensionFromFile")
dimension = libfn1(filename)
libfn2 = utils.get_lib_fn("getTransformNameFromFile")
transform_type = libfn2(filename)
libfn3 = utils.get_lib_fn(
"readTransform%s%i" % (utils.short_ptype(precision), dimension)
)
itk_tx = libfn3(filename, dimension, precision)
return tio.ANTsTransform(
precision=precision,
dimension=dimension,
transform_type=transform_type,
pointer=itk_tx,
)
def write_transform(transform, filename):
"""
Write ANTsTransform to file
ANTsR function: `writeAntsrTransform`
Arguments
---------
transform : ANTsTransform
transform to save
filename : string
filename of transform (file extension is ".mat" for affine transforms)
Returns
-------
N/A
Example
-------
>>> import ants
>>> tx = ants.new_ants_transform(dimension=2)
>>> tx.set_parameters((0.9,0,0,1.1,10,11))
>>> ants.write_transform(tx, '~/desktop/tx.mat')
>>> tx2 = ants.read_transform('~/desktop/tx.mat')
"""
filename = os.path.expanduser(filename)
libfn = utils.get_lib_fn("writeTransform%s" % (transform._libsuffix))
libfn(transform.pointer, filename)
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/core/ants_transform_io.py",
"copies": "1",
"size": "9186",
"license": "apache-2.0",
"hash": 7761440627765396000,
"line_mean": 26.0973451327,
"line_max": 91,
"alpha_frac": 0.6257348138,
"autogenerated": false,
"ratio": 3.93573264781491,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506146746161491,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'CreateEvenRectilinearGrid',
'CreateUniformGrid',
'CreateTensorMesh',
]
__displayname__ = 'Grids'
import numpy as np
import vtk
from .. import interface
from ..base import AlgorithmBase
def _makeSpatialCellData(nx, ny, nz):
"""Used for testing"""
arr = np.fromfunction(lambda k, j, i: k * j * i, (nz, ny, nx))
return arr.flatten()
class CreateUniformGrid(AlgorithmBase):
"""Create uniform grid (``vtkImageData``)"""
__displayname__ = 'Create Uniform Grid'
__category__ = 'source'
def __init__(
self, extent=(10, 10, 10), spacing=(1.0, 1.0, 1.0), origin=(0.0, 0.0, 0.0)
):
AlgorithmBase.__init__(
self, nInputPorts=0, nOutputPorts=1, outputType='vtkImageData'
)
self.__extent = extent
self.__spacing = spacing
self.__origin = origin
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate the output"""
pdo = self.GetOutputData(outInfo, 0)
nx, ny, nz = self.__extent[0], self.__extent[1], self.__extent[2]
sx, sy, sz = self.__spacing[0], self.__spacing[1], self.__spacing[2]
ox, oy, oz = self.__origin[0], self.__origin[1], self.__origin[2]
# Setup the ImageData
pdo.SetDimensions(nx, ny, nz)
pdo.SetOrigin(ox, oy, oz)
pdo.SetSpacing(sx, sy, sz)
# pdo.SetExtent(0,nx-1, 0,ny-1, 0,nz-1)
# Add CELL data
data = _makeSpatialCellData(
nx - 1, ny - 1, nz - 1
) # minus 1 b/c cell data not point data
data = interface.convert_array(data, name='Spatial Cell Data', deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
pdo.GetCellData().AddArray(data)
# Add Point data
data = _makeSpatialCellData(nx, ny, nz)
data = interface.convert_array(data, name='Spatial Point Data', deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
pdo.GetPointData().AddArray(data)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to handle output extents"""
# Now set whole output extent
ext = [
0,
self.__extent[0] - 1,
0,
self.__extent[1] - 1,
0,
self.__extent[2] - 1,
]
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
#### Setters / Getters ####
def set_extent(self, nx, ny, nz):
"""Set the extent of the output grid."""
if self.__extent != [nx, ny, nz]:
self.__extent = [nx, ny, nz]
self.Modified()
def set_spacing(self, dx, dy, dz):
"""Set the spacing for the points along each axial direction."""
if self.__spacing != [dx, dy, dz]:
self.__spacing = [dx, dy, dz]
self.Modified()
def set_origin(self, x0, y0, z0):
"""Set the origin of the output grid."""
if self.__origin != [x0, y0, z0]:
self.__origin = [x0, y0, z0]
self.Modified()
class CreateEvenRectilinearGrid(AlgorithmBase):
"""This creates a vtkRectilinearGrid where the discretization along a
given axis is uniformly distributed.
"""
__displayname__ = 'Create Even Rectilinear Grid'
__category__ = 'source'
def __init__(
self, extent=[10, 10, 10], xrng=[-1.0, 1.0], yrng=[-1.0, 1.0], zrng=[-1.0, 1.0]
):
AlgorithmBase.__init__(
self, nInputPorts=0, nOutputPorts=1, outputType='vtkRectilinearGrid'
)
self.__extent = extent
self.__xrange = xrng
self.__yrange = yrng
self.__zrange = zrng
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate the output"""
# Get output of Proxy
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
nx, ny, nz = self.__extent[0] + 1, self.__extent[1] + 1, self.__extent[2] + 1
xcoords = np.linspace(self.__xrange[0], self.__xrange[1], num=nx)
ycoords = np.linspace(self.__yrange[0], self.__yrange[1], num=ny)
zcoords = np.linspace(self.__zrange[0], self.__zrange[1], num=nz)
# CONVERT TO VTK #
xcoords = interface.convert_array(xcoords, deep=True)
ycoords = interface.convert_array(ycoords, deep=True)
zcoords = interface.convert_array(zcoords, deep=True)
pdo.SetDimensions(nx, ny, nz)
pdo.SetXCoordinates(xcoords)
pdo.SetYCoordinates(ycoords)
pdo.SetZCoordinates(zcoords)
data = _makeSpatialCellData(nx - 1, ny - 1, nz - 1)
data = interface.convert_array(data, name='Spatial Data', deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
pdo.GetCellData().AddArray(data)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to handle output extents"""
# Now set whole output extent
ext = [0, self.__extent[0], 0, self.__extent[1], 0, self.__extent[2]]
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
#### Setters / Getters ####
def set_extent(self, nx, ny, nz):
"""Set the extent of the output grid."""
if self.__extent != [nx, ny, nz]:
self.__extent = [nx, ny, nz]
self.Modified()
def set_x_range(self, start, stop):
"""Set range (min, max) for the grid in the X-direction."""
if self.__xrange != [start, stop]:
self.__xrange = [start, stop]
self.Modified()
def set_y_range(self, start, stop):
"""Set range (min, max) for the grid in the Y-direction"""
if self.__yrange != [start, stop]:
self.__yrange = [start, stop]
self.Modified()
def set_z_range(self, start, stop):
"""Set range (min, max) for the grid in the Z-direction"""
if self.__zrange != [start, stop]:
self.__zrange = [start, stop]
self.Modified()
class CreateTensorMesh(AlgorithmBase):
"""This creates a vtkRectilinearGrid where the discretization along a
given axis is not uniform. Cell spacings along each axis can be set via
strings with repeating patterns or explicitly using the ``Set*Cells``
methods.
"""
__displayname__ = 'Create Tensor Mesh'
__category__ = 'source'
def __init__(
self,
origin=[-350.0, -400.0, 0.0],
data_name='Data',
xcellstr='200 100 50 20*50.0 50 100 200',
ycellstr='200 100 50 21*50.0 50 100 200',
zcellstr='20*25.0 50 100 200',
):
AlgorithmBase.__init__(
self, nInputPorts=0, nOutputPorts=1, outputType='vtkRectilinearGrid'
)
self.__origin = origin
self.__xcells = CreateTensorMesh._read_cell_line(xcellstr)
self.__ycells = CreateTensorMesh._read_cell_line(ycellstr)
self.__zcells = CreateTensorMesh._read_cell_line(zcellstr)
self.__data_name = data_name
@staticmethod
def _read_cell_line(line):
"""Read cell sizes for each line in the UBC mesh line strings"""
# OPTIMIZE: work in progress
# TODO: when optimized, make sure to combine with UBC reader
line_list = []
for seg in line.split():
if '*' in seg:
sp = seg.split('*')
seg_arr = np.ones((int(sp[0]),), dtype=float) * float(sp[1])
else:
seg_arr = np.array([float(seg)], dtype=float)
line_list.append(seg_arr)
return np.concatenate(line_list)
def get_extent(self):
"""Get the extent of the created mesh"""
ne, nn, nz = len(self.__xcells), len(self.__ycells), len(self.__zcells)
return (0, ne, 0, nn, 0, nz)
def _make_model(self, pdo):
"""Generates the output data object"""
ox, oy, oz = self.__origin[0], self.__origin[1], self.__origin[2]
# Read the cell sizes
cx = self.__xcells
cy = self.__ycells
cz = self.__zcells
# Invert the indexing of the vector to start from the bottom.
cz = cz[::-1]
# Adjust the reference point to the bottom south west corner
oz = oz - np.sum(cz)
# Now generate the coordinates for from cell width and origin
cox = ox + np.cumsum(cx)
cox = np.insert(cox, 0, ox)
coy = oy + np.cumsum(cy)
coy = np.insert(coy, 0, oy)
coz = oz + np.cumsum(cz)
coz = np.insert(coz, 0, oz)
# Set the dims and coordinates for the output
ext = self.get_extent()
nx, ny, nz = ext[1] + 1, ext[3] + 1, ext[5] + 1
pdo.SetDimensions(nx, ny, nz)
# Convert to VTK array for setting coordinates
pdo.SetXCoordinates(interface.convert_array(cox, deep=True))
pdo.SetYCoordinates(interface.convert_array(coy, deep=True))
pdo.SetZCoordinates(interface.convert_array(coz, deep=True))
return pdo
def _add_model_data(self, pdo, data):
"""Add an array to the output data object. If data is None, random
values will be generated.
"""
nx, ny, nz = pdo.GetDimensions()
nx, ny, nz = nx - 1, ny - 1, nz - 1
# ADD DATA to cells
if data is None:
data = np.random.rand(nx * ny * nz)
data = interface.convert_array(data, name='Random Data', deep=True)
else:
data = interface.convert_array(data, name=self.__data_name, deep=True)
pdo.GetCellData().AddArray(data)
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output data object"""
# Get input/output of Proxy
pdo = self.GetOutputData(outInfo, 0)
# Perform the task
self._make_model(pdo)
self._add_model_data(pdo, None) # TODO: add ability to set input data
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set output whole extent"""
# Now set whole output extent
ext = self.get_extent()
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
#### Getters / Setters ####
def set_origin(self, x0, y0, z0):
"""Set the origin of the output"""
if self.__origin != [x0, y0, z0]:
self.__origin = [x0, y0, z0]
self.Modified()
def set_x_cells(self, xcells):
"""Set the spacings for the cells in the X direction
Args:
xcells (list or np.array(floats)) : the spacings along the X-axis"""
if len(xcells) != len(self.__xcells) or not np.allclose(self.__xcells, xcells):
self.__xcells = xcells
self.Modified()
def set_y_cells(self, ycells):
"""Set the spacings for the cells in the Y direction
Args:
ycells (list or np.array(floats)) : the spacings along the Y-axis"""
if len(ycells) != len(self.__ycells) or not np.allclose(self.__ycells, ycells):
self.__ycells = ycells
self.Modified()
def set_z_cells(self, zcells):
"""Set the spacings for the cells in the Z direction
Args:
zcells (list or np.array(floats)): the spacings along the Z-axis"""
if len(zcells) != len(self.__zcells) or not np.allclose(self.__zcells, zcells):
self.__zcells = zcells
self.Modified()
def set_x_cells_str(self, xcellstr):
"""Set the spacings for the cells in the X direction
Args:
xcellstr (str) : the spacings along the X-axis in the UBC style"""
xcells = CreateTensorMesh._read_cell_line(xcellstr)
self.set_x_cells(xcells)
def set_y_cells_str(self, ycellstr):
"""Set the spacings for the cells in the Y direction
Args:
ycellstr (str) : the spacings along the Y-axis in the UBC style"""
ycells = CreateTensorMesh._read_cell_line(ycellstr)
self.set_y_cells(ycells)
def set_z_cells_str(self, zcellstr):
"""Set the spacings for the cells in the Z direction
Args:
zcellstr (str) : the spacings along the Z-axis in the UBC style"""
zcells = CreateTensorMesh._read_cell_line(zcellstr)
self.set_z_cells(zcells)
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/model_build/grids.py",
"copies": "1",
"size": "12708",
"license": "bsd-3-clause",
"hash": 3741089605596108300,
"line_mean": 35.1022727273,
"line_max": 87,
"alpha_frac": 0.5774315392,
"autogenerated": false,
"ratio": 3.4845078146421717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9558408744911133,
"avg_score": 0.000706121786207798,
"num_lines": 352
} |
__all__ = (
'create_graph',
'draw',
)
import os
import graphviz as gv
def create_graph(net, *, name=None, format=None, engine=None):
print(name, format)
graph = gv.Digraph(name=name, format=format, engine=None)
#graph.node_attr["charset"] = "UTF-8"
#graph.node_attr["fillcolor"] = "#FFFFFF"
#graph.node_attr["fontcolor"] = "#000000"
#graph.node_attr["style"] = "filled"
#graph.edge_attr["style"] = "solid"
#graph.graph_attr["splines"] = "true"
#graph.graph_attr["overlap"] = "false"
for place in net.places():
graph.node(place.name, label=place.label())
for transition in net.transitions():
guards = []
args = {}
for arc in transition.input_arcs():
graph.edge(arc.source.name, transition.name, arc.label())
transition_guard = transition.label()
graph.node(transition.name, shape='square', label=transition.label())
for arc in transition.output_arcs():
graph.edge(transition.name, arc.target.name, label=arc.label())
return graph
def draw(net, *, filename=None, name=None, format=None, engine=None):
if filename:
fname, ext = os.path.splitext(filename)
if format is None and ext:
format = ext[len(os.path.extsep):]
if name is None:
name = os.path.basename(fname)
else:
fname = name
graph = create_graph(net, name=name, format=format, engine=engine)
graph.render(filename=fname, cleanup=True)
| {
"repo_name": "simone-campagna/petra",
"path": "petra/utils/graph.py",
"copies": "1",
"size": "1505",
"license": "apache-2.0",
"hash": 7612651568433648000,
"line_mean": 32.4444444444,
"line_max": 77,
"alpha_frac": 0.6166112957,
"autogenerated": false,
"ratio": 3.4518348623853212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9550252450713705,
"avg_score": 0.0036387414743231164,
"num_lines": 45
} |
__all__ = ['nCube','nCubeMesh','RegularCubeMesh']
from numpy import ndarray,array,asarray,rank,bitwise_xor,eye,hstack,vstack,arange,zeros
from simplex import Simplex
class RegularCubeMesh:
"""
A regular grid of hypercubes.
Examples:
# create a 2x2 cube mesh
bitmap = ones((2,2),dtype='bool')
c_mesh = RegularCubeMesh(bitmap)
# creates a 3x3 cube mesh with a center hole
bitmap = ones((3,3),dtype='bool')
bitmap[1,1] = False
c_mesh = RegularCubeMesh(bitmap)
# creates a 10x10x10 cube mesh with a center hole
bitmap = ones((10,10,10),dtype='bool')
bitmap[5,5,5] = False
c_mesh = RegularCubeMesh(bitmap)
"""
def __init__(self,bitmap):
self.bitmap = asarray(bitmap,dtype='bool')
def cube_array(self):
"""
Return a cube array that represents this mesh's bitmap
"""
cubes = vstack(self.bitmap.nonzero()).transpose()
cubes = hstack((cubes,zeros((cubes.shape[0],rank(self.bitmap)),dtype=cubes.dtype) + arange(rank(self.bitmap))))
return cubes
def dimension(self):
return rank(self.bitmap)
class nCube:
def __init__(self,s,dtype='int32'):
self.indices = array(s,dtype=dtype)
assert(self.indices.shape == (1,) or self.indices.shape == (2,)*rank(self.indices))
self.compute_corner_simplex()
def compute_corner_simplex(self):
if rank(self.indices) < 2:
self.corner_simplex = Simplex(self.indices)
else:
corner_value = self.indices.min()
corner_index = (self.indices == corner_value).nonzero()
rest = self.indices[[tuple(x) for x in bitwise_xor(eye(rank(self.indices),dtype=int),array(corner_index))]]
parity = sum(corner_index)[0] % 2
self.corner_simplex = Simplex([corner_value] + rest.tolist(),parity)
def __str__(self):
return 'nCube(' + ndarray.__str__(self.indices) + ')'
def __hash__(self):
return hash(self.corner_simplex)
def __eq__(self,other):
return self.corner_simplex == other
## Ideas for boundary()
##In [17]: A.take([0],axis=0)
##Out[17]:
##array([[[0, 1],
## [2, 3]]])
##
##In [18]: A.take([1],axis=0)
##Out[18]:
##array([[[4, 5],
## [6, 7]]])
def boundary(self):
raise NotImplementedError
def relative_orientation(self,other):
"""
Determine whether two cubes that represent the same
face have the same orientation or opposite orientations
Returns:
False if same orientation
True if opposite orientation
"""
if self.corner_simplex != other.corner_simplex:
raise ValueError,'Cubes do not share the same vertices'
return self.corner_simplex.parity ^ other.corner_simplex.parity
class nCubeMesh:
def __init__(self,indices,vertices):
self.indices = asarray(simplices,dtype='i')
self.vertices = asarray(vertices, dtype='d')
def manifold_dimension(self):
if rank(self.indices) >= 2:
return rank(self.indices)
else:
return self.indices.shape[1]
def embedding_dimension(self):
return self.vertices.shape[1]
| {
"repo_name": "pkuwwt/pydec",
"path": "pydec/mesh/ncube.py",
"copies": "6",
"size": "3322",
"license": "bsd-3-clause",
"hash": -4973985097254441000,
"line_mean": 25.7903225806,
"line_max": 119,
"alpha_frac": 0.5878988561,
"autogenerated": false,
"ratio": 3.6788482834994465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03174775172426332,
"num_lines": 124
} |
__all__ = [
'customAxisTicks',
'resetAxisTicks',
'scaleAxis',
'setAxisLabelsFromBounds',
]
import vtk
def customAxisTicks(rng, axis=0, uniform=False):
"""Use to set custom axis ticks in the render view
Args:
rng (list(float)): A list or tuple of floats for the axis ticks
axis (int): The axis to set (X=0, Y=1, or Z=2)
uniform (bool): An optional flag to use the given range on all axii
"""
from paraview.simple import GetActiveViewOrCreate, RenderAllViews
# note that third parameter is the step size
# get the active view
rv = GetActiveViewOrCreate('RenderView')
if axis == 0 or uniform:
rv.AxesGrid.XAxisUseCustomLabels = 1
rv.AxesGrid.XAxisLabels = rng
if axis == 1 or uniform:
rv.AxesGrid.YAxisUseCustomLabels = 1
rv.AxesGrid.YAxisLabels = rng
if axis == 2 or uniform:
rv.AxesGrid.ZAxisUseCustomLabels = 1
rv.AxesGrid.ZAxisLabels = rng
RenderAllViews()
return None
customAxisTicks.__displayname__ = 'Custom Axis Ticks'
customAxisTicks.__category__ = 'macro'
def resetAxisTicks(axis):
"""Use to reset the axis ticks in the render view for any given axii
Args:
axis (int or list(int)): The axis to set (X=0, Y=1, or Z=2)
"""
from paraview.simple import GetActiveViewOrCreate, RenderAllViews
rv = GetActiveViewOrCreate('RenderView')
if not isinstance(axis, (list, tuple)):
axis = [axis]
for ax in axis:
if ax == 0:
rv.AxesGrid.XAxisLabels = []
rv.AxesGrid.XAxisUseCustomLabels = 0
if ax == 1:
rv.AxesGrid.YAxisLabels = []
rv.AxesGrid.YAxisUseCustomLabels = 0
if ax == 2:
rv.AxesGrid.ZAxisLabels = []
rv.AxesGrid.ZAxisUseCustomLabels = 0
RenderAllViews()
return None
resetAxisTicks.__displayname__ = 'Reset Axis Ticks'
resetAxisTicks.__category__ = 'macro'
def scaleAxis(axis, scale):
"""Use to scale an axis visually"""
import paraview.simple as pvs
sc = [1, 1, 1] # Default Scale
sc[axis] = scale
for f in pvs.GetSources().values():
# get active view
rv = pvs.GetActiveViewOrCreate('RenderView')
# get display properties
disp = pvs.GetDisplayProperties(f, view=rv)
# Set the scale for the data axis
disp.Scale = sc
disp.DataAxesGrid.Scale = sc
disp.PolarAxes.Scale = sc
# Update the view
pvs.RenderAllViews()
pvs.ResetCamera()
return None
scaleAxis.__displayname__ = 'Scale Axis'
scaleAxis.__category__ = 'macro'
def setAxisLabelsFromBounds(name, num=(10, 10, 5)):
"""Sets the axis labels from a given input data source. Use the num argument
to control the number of labels along each axis. If num is a scalar, then
a uniform number of labels is used on each axis.
Args:
name (str): The string name of the input source on the data pipeline
num (tuple(int) or int): the number of labels for each axis
Example:
>>> import pvmacros as pvm
>>> pvm.vis.setAxisLabelsFromBounds('TableToPoints1', num=(5, 10, 2))
"""
import paraview.simple as pvs
import paraview.servermanager as sm
import numpy as np
# Get the input data
src = pvs.FindSource(name)
data = sm.Fetch(src)
xmin, xmax, ymin, ymax, zmin, zmax = data.GetBounds()
if not isinstance(num, (tuple, list)):
num = list(num)
# Cast as ints if needed
for i, val in enumerate(num):
if not isinstance(val, int):
num[i] = int(val)
# Calculate ranges for each axis
xrng = np.linspace(xmin, xmax, num=num[0])
yrng = np.linspace(ymin, ymax, num=num[1])
zrng = np.linspace(zmin, zmax, num=num[2])
# Set the axis labels
customAxisTicks(xrng, axis=0, uniform=False)
customAxisTicks(yrng, axis=1, uniform=False)
customAxisTicks(zrng, axis=2, uniform=False)
return
setAxisLabelsFromBounds.__displayname__ = 'Set Axis Labels from Bounds'
setAxisLabelsFromBounds.__category__ = 'macro'
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "pvmacros/vis/axes.py",
"copies": "1",
"size": "4099",
"license": "bsd-3-clause",
"hash": -2056325974394472000,
"line_mean": 29.1397058824,
"line_max": 80,
"alpha_frac": 0.6391802879,
"autogenerated": false,
"ratio": 3.5924627519719543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47316430398719544,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'DashApi',
]
try:
import simplejson as json
except ImportError:
import json
class DashApi(object):
def dashboard(self, dash_id):
"""
Return the dashboard with the given id.
See the `dashboard API documentation <http://docs.datadoghq.com/api/#dashboard>`_ for the
dashboard data format.
"""
return self.http_request('GET', '/dash/' + str(dash_id),
response_formatter=lambda x: x['dash'],
)
def dashboards(self):
"""
Return all of your dashboards.
See the `dashboard API documentation <http://docs.datadoghq.com/api/#dashboard>`_ for the
dashboard data format.
"""
return self.http_request('GET', '/dash',
response_formatter=lambda x: x['dashes'],
)
def create_dashboard(self, title, description, graphs, template_variables=None):
"""
Create a new dashboard with the given *title*, *description* and *graphs*.
See the `dashboard API documentation <http://docs.datadoghq.com/api/#dashboard>`_ for the
dashboard data format.
"""
if isinstance(graphs, str):
graphs = json.loads(graphs)
body = {
'title': title,
'description': description,
'graphs': graphs,
'template_variables': template_variables or [],
}
return self.http_request('POST', '/dash', body,
response_formatter=lambda x: x['dash']['id'],
)
def update_dashboard(self, dash_id, title, description, graphs, template_variables=None):
"""
Update the dashboard whose id is *dash_id*, replacing it's *title*, *description* and *graphs*.
Return the dashboard with the given id.
See the `dashboard API documentation <http://docs.datadoghq.com/api/#dashboard>`_ for the
dashboard data format.
"""
if isinstance(graphs, str):
graphs = json.loads(graphs)
body = {
'title': title,
'description': description,
'graphs': graphs,
'template_variables': template_variables or [],
}
return self.http_request('PUT', '/dash/' + str(dash_id), body,
response_formatter=lambda x: x['dash']['id'],
)
def delete_dashboard(self, dash_id):
"""
Delete the dashboard with the given *dash_id*.
>>> dog_http_api.delete_dashboard(dash_id)
"""
return self.http_request('DELETE', '/dash/' + str(dash_id))
| {
"repo_name": "DataDog/dogapi",
"path": "src/dogapi/http/dashes.py",
"copies": "2",
"size": "2573",
"license": "bsd-3-clause",
"hash": -477662402951493950,
"line_mean": 31.5696202532,
"line_max": 104,
"alpha_frac": 0.5713175282,
"autogenerated": false,
"ratio": 4.288333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5859650861533333,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'DATABASE_PORT',
'DATABASE_PUBLISHER_PORT',
# Database interface.
'DatabaseInterface',
'DatabaseRequest',
'DatabaseResponse',
# Database watcher interface.
'DatabaseEvent',
# Type aliases.
'Expiration',
'Key',
'LeaseId',
'Revision',
'TransactionId',
'Value',
# Data types.
'KeyValue',
'Lease',
'SortBys',
'Sort',
# Error types.
'DatabaseError',
'InternalError',
'InvalidRequestError',
'KeyNotFoundError',
'LeaseNotFoundError',
'TransactionNotFoundError',
'TransactionTimeoutError',
# Misc.
'generate_lease_id',
'generate_transaction_id',
'next_key',
]
import dataclasses
import enum
import random
import typing
from g1.bases.assertions import ASSERT
from g1.messaging import reqrep
DATABASE_PORT = 2390
DATABASE_PUBLISHER_PORT = 2391
# Type aliases. Integers are 64-bit.
Revision = int
Key = bytes
Value = bytes
LeaseId = int
Expiration = int # Unit: seconds.
TransactionId = int
class DatabaseError(Exception):
"""Base error type."""
class InvalidRequestError(DatabaseError):
"""When receiving an invalid request."""
class InternalError(DatabaseError):
"""When a non-DatabaseError exception type is not caught."""
class KeyNotFoundError(DatabaseError):
"""When no key is found for the given ID."""
class LeaseNotFoundError(DatabaseError):
"""When no lease is found for the given ID."""
class TransactionNotFoundError(DatabaseError):
"""When no transaction is found for the given ID."""
class TransactionTimeoutError(DatabaseError):
"""When a transaction has timed out."""
@dataclasses.dataclass(frozen=True)
class KeyOnly:
revision: Revision
key: Key
def __post_init__(self):
ASSERT.greater(self.revision, 0)
ASSERT.true(self.key)
@dataclasses.dataclass(frozen=True)
class KeyValue:
"""Represent a key-value pair.
* The revision field is last revision at which this pair was
modified. Note that this is not the revision of the current
keyspace.
* The key field is never an empty string since we do not allow such
case for now.
"""
revision: Revision
key: Key
value: Value
def __post_init__(self):
ASSERT.greater(self.revision, 0)
ASSERT.true(self.key)
@enum.unique
class SortBys(enum.Enum):
"""Instruct how scan operations sort their results.
Note that REVISION means the last modified revision of a key, not
the revision of the keyspace we are scanning.
"""
NONE = 0
REVISION = 1
KEY = 2
VALUE = 3
@dataclasses.dataclass(frozen=True)
class Sort:
sort_by: SortBys
ascending: bool
@dataclasses.dataclass(frozen=True)
class Lease:
lease: LeaseId
expiration: Expiration
keys: typing.List[Key]
def __post_init__(self):
ASSERT.greater(self.lease, 0)
ASSERT.greater_or_equal(self.expiration, 0)
@reqrep.raising(
InvalidRequestError,
InternalError,
TransactionNotFoundError,
TransactionTimeoutError,
)
class DatabaseInterface:
"""Database interface.
This is modeled after etcd with a few differences:
* A transaction covers multiple operations (rather than a special
transaction request bundle together operations).
* All write operations are idempotent.
* Lease operations do not increment revision.
NOTE: Although we model the interface after etcd, and offer more in
certain aspects, our storage implementation is based on SQLite. So
in terms of actual supported concurrent operations and performance,
our server is mostly just a toy compared to etcd.
Common arguments:
* revision:
Scan the keyspace at this revision. The special value 0 means the
current revision.
* limit:
Return no more results than the given number. The special value 0
means returning all results.
* transaction:
Execute this request in the given transaction. The special value
0 means no transaction. When it is not 0, methods might raise
TransactionNotFoundError or TransactionTimeoutError.
"""
__module__ = 'g1.operations.databases'
#
# Key-value operations.
#
def get_revision(self, *, transaction: TransactionId = 0) -> Revision:
"""Return the revision of the current keyspace."""
raise NotImplementedError
def get(
self,
*,
key: Key,
revision: Revision = 0,
transaction: TransactionId = 0,
) -> typing.Optional[KeyValue]:
"""Get the pair by the given key, at the given revision.
* key:
Get the pair by the given key. This cannot be empty.
"""
raise NotImplementedError
def count(
self,
*,
revision: Revision = 0,
key_start: Key = b'',
key_end: Key = b'',
transaction: TransactionId = 0,
) -> int:
"""Scan key spaces but only return the count of the results.
Check scan for descriptions of arguments.
"""
raise NotImplementedError
def scan_keys(
self,
*,
revision: Revision = 0,
key_start: Key = b'',
key_end: Key = b'',
sorts: typing.List[Sort] = (),
limit: int = 0,
transaction: TransactionId = 0,
) -> typing.List[KeyOnly]:
"""Scan key spaces but only return keys of the results.
Check scan for descriptions of arguments.
"""
raise NotImplementedError
def scan(
self,
*,
revision: Revision = 0,
key_start: Key = b'',
key_end: Key = b'',
sorts: typing.List[Sort] = (),
limit: int = 0,
transaction: TransactionId = 0,
) -> typing.List[KeyValue]:
"""Scan key spaces.
* key_start, key_end:
Scan keys of the given range. An empty byte string means that
end of the range is unbounded. Default is to scan the entire
key space.
* sorts:
Sort results by the given conditions. Default is to sort
results by an implementation defined order.
"""
raise NotImplementedError
def set(
self,
*,
key: Key,
value: Value,
transaction: TransactionId = 0,
) -> typing.Optional[KeyValue]:
"""Set a key-value pair.
This increments the revision by 1, and returns the key-value
pair prior to the update.
This is idempotent in the sense that if the value is the same as
the current value, the revision is not incremented.
* key:
Set to the pair by the given key. This cannot be empty.
* value:
Set the value of the pair to the given value. This cannot be
empty.
"""
raise NotImplementedError
def delete(
self,
*,
key_start: Key = b'',
key_end: Key = b'',
transaction: TransactionId = 0,
) -> typing.List[KeyValue]:
"""Delete pairs of the given key range.
This increments the revision by 1, and returns the key-value
pairs prior to the deletion.
This is idempotent in the sense that if no key is deleted, the
revision is not incremented.
Check scan for descriptions of arguments.
"""
raise NotImplementedError
#
# Leases.
#
def lease_get(
self,
*,
lease: LeaseId,
transaction: TransactionId = 0,
) -> typing.Optional[Lease]:
"""Get lease.
* lease:
Lease ID to get for.
"""
raise NotImplementedError
def lease_count(
self,
*,
lease_start: LeaseId = 0,
lease_end: LeaseId = 0,
transaction: TransactionId = 0,
) -> int:
"""Count leases.
Check lease_scan for descriptions of arguments.
"""
raise NotImplementedError
def lease_scan(
self,
*,
lease_start: LeaseId = 0,
lease_end: LeaseId = 0,
limit: int = 0,
transaction: TransactionId = 0,
) -> typing.List[Lease]:
"""Scan leases.
* lease_start, lease_end:
Scan leases of the given range. The special value 0 means
that end of the range is unbounded. Default is to scan all
leases.
"""
raise NotImplementedError
def lease_grant(
self,
*,
lease: LeaseId,
expiration: Expiration,
transaction: TransactionId = 0,
) -> typing.Optional[Lease]:
"""Grant a new lease or extend the expiration time.
This returns the lease object prior to the call.
This is idempotent in the sense that if the lease exists, this
updates the expiration time.
* lease:
The new lease ID. This cannot be empty.
* expiration:
The expiration time of the lease.
"""
raise NotImplementedError
@reqrep.raising(
KeyNotFoundError,
LeaseNotFoundError,
)
def lease_associate(
self,
*,
lease: LeaseId,
key: Key,
transaction: TransactionId = 0,
) -> Lease:
"""Associate a lease with a key.
This returns the lease object prior to the call.
This is idempotent in the sense that if the lease is already
associated with the key, this is a no-op.
* lease:
Lease ID in the association. It cannot be empty. This raises
LeaseNotFoundError if lease does not exist.
* key:
Key in the association. It cannot be empty. This raises
KeyNotFoundError if key does not exist.
"""
raise NotImplementedError
@reqrep.raising(
KeyNotFoundError,
LeaseNotFoundError,
)
def lease_dissociate(
self,
*,
lease: LeaseId,
key: Key,
transaction: TransactionId = 0,
) -> Lease:
"""Dissociate a lease from a key.
This returns the lease object prior to the call.
This is idempotent in the sense that if the lease is not
associated with the key, this is a no-op.
Check lease_associate for descriptions of arguments.
"""
raise NotImplementedError
def lease_revoke(
self,
*,
lease: LeaseId,
transaction: TransactionId = 0,
) -> typing.Optional[Lease]:
"""Revoke the given lease.
This is idempotent in the sense that if the lease does not
exist, it is a no-op.
* lease:
The lease ID, which cannot be empty.
"""
raise NotImplementedError
#
# Transactions.
#
def begin(self, *, transaction: TransactionId):
"""Begin a transaction.
This raises TransactionTimeoutError if it cannot begin a
transaction (probably because other transactions are still in
progress).
This is idempotent in the sense that if the transaction has
begun already, this is a no-op.
NOTE: Strictly speaking, given this interface design, there is
a chance that two clients begin their transactions with the same
transaction ID. Nevertheless, since transaction IDs are 64-bit
integers, the chance of collision among randomly-generated IDs
is very, very low in practical terms.
"""
raise NotImplementedError
def rollback(self, *, transaction: TransactionId):
"""Roll back a transaction.
This is idempotent in the sense that if the transaction was
already rolled back, this is a no-op.
"""
raise NotImplementedError
def commit(self, *, transaction: TransactionId):
"""Commit a transaction.
This is idempotent in the sense that if the transaction was
already committed, this is a no-op.
"""
raise NotImplementedError
#
# Maintenance.
#
def compact(self, *, revision: Revision):
"""Remove key spaces before the given revision.
This does not remove the current key space even if the given
revision is greater than the current revision.
"""
raise NotImplementedError
DatabaseRequest, DatabaseResponse = reqrep.generate_interface_types(
DatabaseInterface, 'Database'
)
@dataclasses.dataclass(frozen=True)
class DatabaseEvent:
"""Event of a key space change."""
__module__ = 'g1.operations.databases'
# Although these fields are None-able, we strip off typing.Optional
# annotation from them because our capnp converter does not support
# typing.Union nor typing.Optional.
previous: KeyValue
current: KeyValue
def __post_init__(self):
ASSERT.any((self.previous is not None, self.current is not None))
if self.previous is not None and self.current is not None:
ASSERT.less(self.previous.revision, self.current.revision)
ASSERT.equal(self.previous.key, self.current.key)
# It is possible that both values are None (after the key spaces
# were compacted).
def is_creation(self):
# NOTE: We could have a "false" creation if the key spaces were
# compacted. For now we do not handle this case.
return self.previous is None and self.current is not None
def is_update(self):
return self.previous is not None and self.current is not None
def is_deletion(self):
return self.current is None
def generate_lease_id():
# Sadly Python stdlib's sqlite3 module, which SQLAlchemy is based
# on, does not support unsigned 64-bit integer.
return random.randrange(1, 1 << 63)
def generate_transaction_id():
# Sadly Python stdlib's sqlite3 module, which SQLAlchemy is based
# on, does not support unsigned 64-bit integer.
return random.randrange(1, 1 << 63)
def next_key(key):
ASSERT.true(key)
bs = []
carry = 1
for b in reversed(key):
b += carry
if b > 0xff:
bs.append(0)
else:
bs.append(b)
carry = 0
if carry:
bs.append(1)
bs.reverse()
return bytes(bs)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/databases/bases/g1/operations/databases/bases/interfaces.py",
"copies": "1",
"size": "14289",
"license": "mit",
"hash": 835693825143514100,
"line_mean": 24.9328493648,
"line_max": 74,
"alpha_frac": 0.6164182238,
"autogenerated": false,
"ratio": 4.388513513513513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5504931737313513,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'DatabaseServer',
]
import collections
import functools
import logging
import time
import sqlalchemy
from g1.asyncs.bases import tasks
from g1.asyncs.bases import timers
from g1.bases.assertions import ASSERT
from g1.operations.databases.bases import interfaces
from . import connections
from . import databases
from . import schemas
# I am not sure why pylint cannot lint contextlib.asynccontextmanager
# correctly; let us disable this check for now.
#
# pylint: disable=not-async-context-manager
LOG = logging.getLogger(__name__)
_TRANSACTION_TIMEOUT = 16 # Unit: seconds.
def _make_reader(database_func):
@functools.wraps(database_func)
async def wrapper(self, *, transaction=0, **kwargs):
if transaction == 0:
conn_ctx = self._manager.reading()
else:
conn_ctx = self._manager.writing(transaction)
async with conn_ctx as conn:
return database_func(conn, self._tables, **kwargs)
return wrapper
def _make_writer(database_func, need_tx_revision=False):
@functools.wraps(database_func)
async def wrapper(self, *, transaction=0, **kwargs):
if transaction == 0:
conn_ctx = self._manager.transacting()
if need_tx_revision:
kwargs['tx_revision'] = None
else:
conn_ctx = self._manager.writing(transaction)
if need_tx_revision:
kwargs['tx_revision'] = self._tx_revision
self._update_tx_expiration()
async with conn_ctx as conn:
return database_func(conn, self._tables, **kwargs)
return wrapper
async def _sleep(amount, result):
await timers.sleep(amount)
return result
class DatabaseServer(interfaces.DatabaseInterface):
# DatabaseInterface declares methods as non-async, but we define
# async methods here; so we have to disable this pylint check for
# now.
#
# pylint: disable=invalid-overridden-method
def __init__(self, engine, publisher):
self._engine = engine
self._manager = connections.ConnectionManager(self._engine.connect())
self._metadata = sqlalchemy.MetaData()
self._tables = schemas.make_tables(self._metadata)
self._tx_revision = None
# A transaction is automatically rolled back if it is inactive
# after a certain amount of time. This is a fail-safe mechanism
# to prevent deadlocks due to client crashes.
self._timer_queue = tasks.CompletionQueue()
self._tx_expiration = time.monotonic()
# For publishing database events.
self._publisher = publisher
self._pending_events = collections.deque()
async def serve(self):
await self._check_lease_expiration()
await self._run_timer_tasks()
async def _check_lease_expiration(self):
ASSERT.equal(self._manager.tx_id, 0)
async with self._manager.reading() as conn:
expirations = databases.lease_scan_expirations(conn, self._tables)
now = time.time()
for expiration in expirations:
self._timer_queue.spawn(
_sleep(expiration - now, self._lease_expire)
)
async def _run_timer_tasks(self):
async for timer_task in self._timer_queue:
timer_callback = timer_task.get_result_nonblocking()
await timer_callback()
def shutdown(self):
for timer_task in self._timer_queue.close(graceful=False):
timer_task.cancel()
def __enter__(self):
LOG.info('database start')
self._metadata.create_all(self._engine)
return self
def __exit__(self, *args):
LOG.info('database stop')
self._manager.close()
#
# Transactions.
#
async def begin(self, *, transaction):
conn = await self._manager.begin(transaction)
try:
self._tx_revision = databases.get_revision(conn, self._tables)
self._update_tx_expiration()
except BaseException:
self._rollback(transaction)
raise
async def rollback(self, *, transaction):
self._rollback(transaction)
def _rollback(self, transaction):
self._manager.rollback(transaction)
self._tx_revision = None
self._pending_events.clear()
def _rollback_due_to_timeout(self):
self._manager.rollback_due_to_timeout()
self._tx_revision = None
self._pending_events.clear()
async def commit(self, *, transaction):
async with self._manager.writing(transaction) as conn:
databases.increment_revision(
conn, self._tables, revision=self._tx_revision
)
self._manager.commit(transaction)
self._tx_revision = None
try:
for event in self._pending_events:
self._publisher.publish_nonblocking(event)
finally:
self._pending_events.clear()
def _update_tx_expiration(self):
if self._manager.tx_id == 0:
return
tx_expiration = time.monotonic() + _TRANSACTION_TIMEOUT
if tx_expiration > self._tx_expiration:
self._tx_expiration = tx_expiration
self._timer_queue.spawn(
_sleep(_TRANSACTION_TIMEOUT, self._check_tx_expiration)
)
# Make the signature of this function async to keep it the same with
# _lease_expire.
async def _check_tx_expiration(self):
if self._manager.tx_id == 0:
return
if self._tx_expiration >= time.monotonic():
return
LOG.warning('transaction timeout: %#016x', self._manager.tx_id)
self._rollback_due_to_timeout()
#
# Key-value operations.
#
get_revision = _make_reader(databases.get_revision)
get = _make_reader(databases.get)
count = _make_reader(databases.count)
scan_keys = _make_reader(databases.scan_keys)
scan = _make_reader(databases.scan)
_set = _make_writer(databases.set_, need_tx_revision=True)
_delete = _make_writer(databases.delete, need_tx_revision=True)
async def set(self, *, key, value, transaction=0):
prior = await self._set(key=key, value=value, transaction=transaction)
if prior is None or prior.value != value:
if transaction != 0:
revision = ASSERT.not_none(self._tx_revision) + 1
else:
ASSERT.equal(self._manager.tx_id, 0)
async with self._manager.reading() as conn:
revision = databases.get_revision(conn, self._tables)
self._maybe_publish_events(
transaction,
[
interfaces.DatabaseEvent(
previous=prior,
current=interfaces.KeyValue(
revision=revision, key=key, value=value
),
),
],
)
return prior
async def delete(self, *, key_start=b'', key_end=b'', transaction=0):
prior = await self._delete(
key_start=key_start, key_end=key_end, transaction=transaction
)
self._maybe_publish_events(
transaction,
(
interfaces.DatabaseEvent(previous=previous, current=None)
for previous in prior
),
)
return prior
def _maybe_publish_events(self, transaction, events):
if transaction == 0:
for event in events:
self._publisher.publish_nonblocking(event)
else:
self._pending_events.extend(events)
#
# Leases.
#
lease_get = _make_reader(databases.lease_get)
lease_count = _make_reader(databases.lease_count)
lease_scan = _make_reader(databases.lease_scan)
_lease_grant = _make_writer(databases.lease_grant)
lease_associate = _make_writer(databases.lease_associate)
lease_dissociate = _make_writer(databases.lease_dissociate)
lease_revoke = _make_writer(databases.lease_revoke)
async def lease_grant(self, **kwargs): # pylint: disable=arguments-differ
result = await self._lease_grant(**kwargs)
self._timer_queue.spawn(
_sleep(kwargs['expiration'] - time.time(), self._lease_expire)
)
return result
async def _lease_expire(self):
prior = ()
try:
async with self._manager.transacting() as conn:
prior = databases.lease_expire(
conn, self._tables, current_time=time.time()
)
except interfaces.TransactionTimeoutError:
LOG.warning('lease_expire: timeout on beginning transaction')
if prior:
LOG.info('expire %d pairs', len(prior))
self._maybe_publish_events(
0,
(
interfaces.DatabaseEvent(previous=previous, current=None)
for previous in prior
),
)
#
# Maintenance.
#
async def compact(self, **kwargs): # pylint: disable=arguments-differ
async with self._manager.transacting() as conn:
return databases.compact(conn, self._tables, **kwargs)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/databases/servers/g1/operations/databases/servers/servers.py",
"copies": "1",
"size": "9279",
"license": "mit",
"hash": 8730191180591167000,
"line_mean": 32.0213523132,
"line_max": 78,
"alpha_frac": 0.5982325682,
"autogenerated": false,
"ratio": 4.225409836065574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5323642404265574,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"dedent_block_string_value",
"print_block_string",
"get_block_string_indentation",
]
def dedent_block_string_value(raw_string: str) -> str:
"""Produce the value of a block string from its parsed raw value.
Similar to CoffeeScript's block string, Python's docstring trim or Ruby's
strip_heredoc.
This implements the GraphQL spec's BlockStringValue() static algorithm.
For internal use only.
"""
# Expand a block string's raw value into independent lines.
lines = raw_string.splitlines()
# Remove common indentation from all lines but first.
common_indent = get_block_string_indentation(raw_string)
if common_indent:
lines[1:] = [line[common_indent:] for line in lines[1:]]
# Remove leading and trailing blank lines.
start_line = 0
end_line = len(lines)
while start_line < end_line and is_blank(lines[start_line]):
start_line += 1
while end_line > start_line and is_blank(lines[end_line - 1]):
end_line -= 1
# Return a string of the lines joined with U+000A.
return "\n".join(lines[start_line:end_line])
def is_blank(s: str) -> bool:
"""Check whether string contains only space or tab characters."""
return all(c == " " or c == "\t" for c in s)
def get_block_string_indentation(value: str) -> int:
"""Get the amount of indentation for the given block string.
For internal use only.
"""
is_first_line = is_empty_line = True
indent = 0
common_indent = None
for c in value:
if c in "\r\n":
is_first_line = False
is_empty_line = True
indent = 0
elif c in "\t ":
indent += 1
else:
if (
is_empty_line
and not is_first_line
and (common_indent is None or indent < common_indent)
):
common_indent = indent
is_empty_line = False
return common_indent or 0
def print_block_string(
value: str, indentation: str = "", prefer_multiple_lines: bool = False
) -> str:
"""Print a block string in the indented block form.
Prints a block string in the indented block form by adding a leading and
trailing blank line. However, if a block string starts with whitespace and
is a single-line, adding a leading blank line would strip that whitespace.
For internal use only.
"""
is_single_line = "\n" not in value
has_leading_space = value.startswith(" ") or value.startswith("\t")
has_trailing_quote = value.endswith('"')
has_trailing_slash = value.endswith("\\")
print_as_multiple_lines = (
not is_single_line
or has_trailing_quote
or has_trailing_slash
or prefer_multiple_lines
)
# Format a multi-line block quote to account for leading space.
if print_as_multiple_lines and not (is_single_line and has_leading_space):
result = "\n" + indentation
else:
result = ""
result += value.replace("\n", "\n" + indentation) if indentation else value
if print_as_multiple_lines:
result += "\n"
return '"""' + result.replace('"""', '\\"""') + '"""'
| {
"repo_name": "graphql-python/graphql-core",
"path": "src/graphql/language/block_string.py",
"copies": "1",
"size": "3198",
"license": "mit",
"hash": -6614283977143846000,
"line_mean": 30.0485436893,
"line_max": 79,
"alpha_frac": 0.6138211382,
"autogenerated": false,
"ratio": 3.895249695493301,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5009070833693301,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'DEFAULT_FILTERS',
'DEFAULT_XAR_FILTERS',
'merge_image',
]
import contextlib
import logging
import tempfile
from pathlib import Path
from g1 import scripts
from g1.containers import models
from g1.containers import scripts as ctr_scripts
from . import utils
LOG = logging.getLogger(__name__)
DEFAULT_FILTERS = (
# Do not leak any source codes to the application image.
# Keep drydock path in sync with //bases:build.
('exclude', '/home/plumber/drydock'),
('exclude', '/home/plumber/.gradle'),
('exclude', '/home/plumber/.python_history'),
('exclude', '/home/plumber/.wget-hsts'),
('exclude', '/root/.cache'),
('exclude', '/usr/src'),
# Include only relevant files under /etc.
('include', '/etc/'),
# We use distro java at the moment.
('include', '/etc/alternatives/'),
('include', '/etc/alternatives/java'),
('include', '/etc/java*'),
('include', '/etc/java*/**'),
('include', '/etc/group'),
('include', '/etc/group-'),
('include', '/etc/gshadow'),
('include', '/etc/gshadow-'),
('include', '/etc/inputrc'),
('include', '/etc/ld.so.cache'),
('include', '/etc/passwd'),
('include', '/etc/passwd-'),
('include', '/etc/shadow'),
('include', '/etc/shadow-'),
('include', '/etc/ssl/'),
('include', '/etc/ssl/**'),
('include', '/etc/subgid'),
('include', '/etc/subgid-'),
('include', '/etc/subuid'),
('include', '/etc/subuid-'),
('include', '/etc/sudoers.d/'),
('include', '/etc/sudoers.d/**'),
('exclude', '/etc/**'),
# Exclude distro binaries from application image (note that base
# image includes a base set of distro binaries).
('exclude', '/bin'),
('exclude', '/sbin'),
# We use distro java at the moment.
('include', '/usr/bin/'),
('include', '/usr/bin/java'),
('exclude', '/usr/bin/**'),
('exclude', '/usr/bin'),
('exclude', '/usr/sbin'),
# Exclude headers.
('exclude', '/usr/include'),
('exclude', '/usr/local/include'),
# Exclude distro systemd files.
('exclude', '/lib/systemd'),
('exclude', '/usr/lib/systemd'),
# In general, don't exclude distro libraries since we might depend
# on them, except these libraries.
('exclude', '/usr/lib/apt'),
('exclude', '/usr/lib/gcc'),
('exclude', '/usr/lib/git-core'),
('exclude', '/usr/lib/python*'),
('exclude', '/usr/lib/**/*perl*'),
# Exclude these to save more space.
('exclude', '/usr/share/**'),
('exclude', '/var/**'),
)
# For XAR images, we only include a few selected directories, and
# exclude everything else.
#
# To support Python, we include our code under /usr/local in the XAR
# image (like our pod image). An alternative is to use venv to install
# our codebase, but this seems to be too much effort; so we do not take
# this approach for now.
#
# We explicitly remove CPython binaries from /usr/local/bin so that the
# `env` command will not (and should not) resolve to them.
#
# We do not include /usr/bin/java (symlink to /etc/alternatives) for
# now. If you want to use Java, you have to directly invoke it under
# /usr/lib/jvm/...
DEFAULT_XAR_FILTERS = (
('include', '/usr/'),
('include', '/usr/lib/'),
('exclude', '/usr/lib/**/*perl*'),
('include', '/usr/lib/jvm/'),
('include', '/usr/lib/jvm/**'),
('include', '/usr/lib/x86_64-linux-gnu/'),
('include', '/usr/lib/x86_64-linux-gnu/**'),
('include', '/usr/local/'),
('include', '/usr/local/bin/'),
('exclude', '/usr/local/bin/python*'),
('include', '/usr/local/bin/*'),
('include', '/usr/local/lib/'),
('include', '/usr/local/lib/**'),
('exclude', '**'),
)
@scripts.using_sudo()
def merge_image(
*,
name,
version,
builder_images,
default_filters,
filters,
output,
):
rootfs_paths = [
ctr_scripts.ctr_get_image_rootfs_path(image)
for image in builder_images
]
rootfs_paths.append(
ctr_scripts.ctr_get_image_rootfs_path(
models.PodConfig.Image(
name=utils.get_builder_name(name),
version=version,
)
)
)
filter_rules = _get_filter_rules(default_filters, filters)
with contextlib.ExitStack() as stack:
tempdir_path = stack.enter_context(
tempfile.TemporaryDirectory(dir=output.parent)
)
output_rootfs_path = Path(tempdir_path) / 'rootfs'
stack.callback(scripts.rm, output_rootfs_path, recursive=True)
LOG.info('generate application image under: %s', output_rootfs_path)
# NOTE: Do NOT overlay-mount these rootfs (and then rsync from
# the overlay) because the overlay does not include base and
# base-builder, and thus some tombstone files may not be copied
# correctly (I don't know why but rsync complains about this).
# For now our workaround is to rsync each rootfs sequentially.
for rootfs_path in rootfs_paths:
utils.rsync(rootfs_path, output_rootfs_path, filter_rules)
ctr_scripts.ctr_build_image(name, version, output_rootfs_path, output)
def _get_filter_rules(default_filters, filters):
return [
# Log which files are included/excluded due to filter rules.
'--debug=FILTER2',
# Add filters before default_filters so that the former may
# override the latter. I have a feeling that this "override"
# thing could be brittle, but let's leave this here for now.
*('--%s=%s' % pair for pair in filters),
*('--%s=%s' % pair for pair in default_filters),
]
| {
"repo_name": "clchiou/garage",
"path": "shipyard2/shipyard2/rules/images/merge_image.py",
"copies": "1",
"size": "5627",
"license": "mit",
"hash": -3166568908898312000,
"line_mean": 33.3109756098,
"line_max": 78,
"alpha_frac": 0.5997867425,
"autogenerated": false,
"ratio": 3.5323289391086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46321156816085995,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'DEFAULT_USER_AGENT',
'make_connection_pool_params_dict',
'make_params_dict',
'make_rate_limit',
'make_retry',
]
from g1.apps import parameters
from .. import policies
DEFAULT_USER_AGENT = 'Mozilla/5.0'
def make_params_dict(
# Cache.
cache_size=8,
# Circuit breaker.
failure_threshold=0,
failure_period=8,
failure_timeout=8,
success_threshold=2,
# Rate limit.
max_request_rate=0,
max_requests=64,
raise_unavailable=False,
# Retry.
max_retries=0,
backoff_base=1,
):
return dict(
cache_size=parameters.Parameter(
cache_size,
type=int,
validate=(0).__lt__,
),
failure_threshold=parameters.Parameter(
failure_threshold,
type=int,
validate=(0).__le__,
),
failure_period=parameters.Parameter(
failure_period,
type=(int, float),
validate=(0).__lt__,
unit='seconds',
),
failure_timeout=parameters.Parameter(
failure_timeout,
type=(int, float),
validate=(0).__lt__,
unit='seconds',
),
success_threshold=parameters.Parameter(
success_threshold,
type=int,
validate=(0).__lt__,
),
max_request_rate=parameters.Parameter(
max_request_rate,
type=(int, float),
unit='requests/second',
),
max_requests=parameters.Parameter(
max_requests,
type=int,
validate=(0).__lt__,
),
raise_unavailable=parameters.Parameter(
raise_unavailable,
type=bool,
),
max_retries=parameters.Parameter(max_retries, type=int),
backoff_base=parameters.Parameter(
backoff_base,
type=(int, float),
validate=(0).__lt__,
unit='seconds',
),
)
def make_connection_pool_params_dict(
num_pools=0,
num_connections_per_pool=0,
):
return dict(
num_pools=parameters.Parameter(
num_pools,
type=int,
validate=(0).__le__,
),
num_connections_per_pool=parameters.Parameter(
num_connections_per_pool,
type=int,
validate=(0).__le__,
),
)
def make_circuit_breakers(params):
failure_threshold = params.failure_threshold.get()
if failure_threshold <= 0:
return None
return policies.TristateBreakers(
failure_threshold=failure_threshold,
failure_period=params.failure_period.get(),
failure_timeout=params.failure_timeout.get(),
success_threshold=params.success_threshold.get(),
)
def make_rate_limit(params):
max_request_rate = params.max_request_rate.get()
if max_request_rate <= 0:
return None
return policies.TokenBucket(
max_request_rate,
params.max_requests.get(),
params.raise_unavailable.get(),
)
def make_retry(params):
max_retries = params.max_retries.get()
if max_retries <= 0:
return None
return policies.ExponentialBackoff(max_retries, params.backoff_base.get())
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/clients/g1/http/clients/parts/bases.py",
"copies": "1",
"size": "3262",
"license": "mit",
"hash": 5030547067880569000,
"line_mean": 24.2868217054,
"line_max": 78,
"alpha_frac": 0.5533415083,
"autogenerated": false,
"ratio": 4.062266500622665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5115608008922665,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'deleteDownstream',
]
__displayname__ = 'Pipeline'
def deleteDownstream(input_source=None):
"""Delete downstream filters for a given input source. If no input source
provided, all filters on the pipeline will be deleted.
Args:
input_source (str): The name of the object on the pipeline to preserve.
"""
import paraview.simple as pvs
if input_source is None:
# The below snippet deletes all Filters on the pipeline
# - i.e. deletes anything that has an input
# - preserves readers and sources
for f in pvs.GetSources().values():
if f.GetProperty("Input") is not None:
pvs.Delete(f)
else:
# Be able to specify upstream source
src = pvs.FindSource(input_source)
# print('src: ', src)
# Delete ALL things downstream of input_source
for f in pvs.GetSources().values():
# print('f: ', f)
# print('f.Input: ', f.GetProperty("Input"))
if f.GetPropertyValue("Input") is src:
# print('Deleting: ', f)
pvs.Delete(f)
# Done
return None
deleteDownstream.__displayname__ = 'Delete Downstream Filters'
deleteDownstream.__category__ = 'macro'
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "pvmacros/pipeline.py",
"copies": "1",
"size": "1263",
"license": "bsd-3-clause",
"hash": 9080383847292226000,
"line_mean": 29.8048780488,
"line_max": 79,
"alpha_frac": 0.5993665875,
"autogenerated": false,
"ratio": 4.048076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 41
} |
__all__ = [
'depends',
'config_fields',
'run'
]
import glob
import os
depends = [
]
config_fields = {
"locations": list,
}
def run(tracker):
for location in tracker.config['locations']:
tracker.run_job(job_clean, "Cleaning %s" % location, location)
tracker.run_job(job_clean_no_remove_regex, "Cleaning regex", "regex")
def job_clean(tracker, location):
path = os.path.join('..', location)
files = glob.glob("%s/*.regex" % path) + glob.glob("%s/*.out" % path)
if not files:
tracker.print_string("(nothing to do)")
for filename in files:
tracker.print_operation("RM", filename)
try:
os.remove(filename)
except OSError as err:
tracker.print_error(err)
tracker.failure()
def job_clean_no_remove_regex(tracker, location):
path = os.path.join('..', location)
files = glob.glob("%s/*.out" % path)
if not files:
tracker.print_string("(nothing to do)")
for filename in files:
tracker.print_operation("RM", filename)
try:
os.remove(filename)
except OSError as err:
tracker.print_error(err)
tracker.failure()
| {
"repo_name": "ammongit/ucr-class-map",
"path": "scripts/target_clean.py",
"copies": "1",
"size": "1210",
"license": "mit",
"hash": 8210196982293180000,
"line_mean": 22.2692307692,
"line_max": 73,
"alpha_frac": 0.5851239669,
"autogenerated": false,
"ratio": 3.6336336336336337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4718757600533634,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'depends',
'config_fields',
'run'
]
import glob
import os
depends = [
]
config_fields = {
}
def run(tracker):
for location in ('js', 'tests'):
tracker.run_job(job_clean, "Cleaning %s" % location, location)
tracker.run_job(job_clean_no_remove_regex, "Cleaning regex", "regex")
def job_clean(tracker, location):
path = os.path.join('..', location)
files = glob.glob("%s/*.regex" % path) + glob.glob("%s/*.out" % path)
if not files:
tracker.print_string("(nothing to do)")
for filename in files:
tracker.print_operation("RM", filename)
try:
os.remove(filename)
except OSError as err:
tracker.print_error(err)
tracker.failure()
def job_clean_no_remove_regex(tracker, location):
path = os.path.join('..', location)
files = glob.glob("%s/*.out" % path)
if not files:
tracker.print_string("(nothing to do)")
for filename in files:
tracker.print_operation("RM", filename)
try:
os.remove(filename)
except OSError as err:
tracker.print_error(err)
tracker.failure()
| {
"repo_name": "BradleyCai/ucr-schedule-visualizer",
"path": "scripts/target_clean.py",
"copies": "2",
"size": "1177",
"license": "mit",
"hash": 6005885149075181000,
"line_mean": 21.2075471698,
"line_max": 73,
"alpha_frac": 0.577740017,
"autogenerated": false,
"ratio": 3.599388379204893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5177128396204893,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'depends',
'config_fields',
'run',
]
import random
depends = [
# Specify dependencies here. Each of these is a target
# as specified in a target_* file.
]
config_fields = {
# Settings can be passed to a target as a JSON file
# Specify the types of each field here
# If this dictionary is empty, it tells build.py to
# not load a config file
}
def run(tracker):
tracker.run_job(job_hello_world, "Printing \"hello world\"")
tracker.print_notice("Important notice here")
tracker.run_job(job_foo_bar, "Fooing bars")
tracker.print_warning("Something possibly bad happened")
def job_hello_world(tracker):
tracker.print_operation("PRINT", "Hello")
tracker.print_operation("PRINT", "World")
def job_foo_bar(tracker):
for i in range(11):
tracker.print_operation("FOO", "bar %d" % i)
if i % 3 == 0:
tracker.run_job(job_crunch_numbers, "Crunching numbers")
if random.random() < 0.2:
tracker.print_error("Oh noes!")
def job_crunch_numbers(tracker):
for i in range(random.randint(2, 7)):
tracker.print_operation("CALC", str(random.randint(-100, 100)))
| {
"repo_name": "ammongit/ucr-class-map",
"path": "scripts/target_sample.py",
"copies": "1",
"size": "1186",
"license": "mit",
"hash": 3373570609063107000,
"line_mean": 25.3555555556,
"line_max": 71,
"alpha_frac": 0.6374367622,
"autogenerated": false,
"ratio": 3.4080459770114944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9416854577249999,
"avg_score": 0.02572563239229906,
"num_lines": 45
} |
__all__ = [
'depends',
'config_fields',
'run',
]
"""
The purpose of this target is to 'compile' the regular expressions
found in the regex/ directory and inject them into their respective
Javascript sources in the js/ directory. This is done by invoking a
Python script in regex/ that combines multiple *.regex files into
'compiled' *.out files. The purpose of this is to break down these
long and complicated regular expressions into more meaningful parts,
which can be fixed easier. This means that whenever you need to
modify a regular expression, you should only modify the *.regex files
in regex/. The *.out files or the regular expressions in the javascript
sources should not be modified, as they will be overwritten.
"""
import glob
import os
import re
import shutil
DEPENDENCY_REGEX = re.compile(r'%\{.*?\}')
DEPENDENCY_NAME_REGEX = re.compile(r'%\{(.*?)\}')
REGEX_INJECTION_PATTERN = r'this\.%s\s*=\s*\/.\/g;'
REGEX_REPLACE_PATTERN = r'this.%s = /%s/g;'
depends = [
]
config_fields = {
"source-files": list,
"regex-directory": str,
"regex-subdirectories": list,
"copy-to": list,
"inject-file": str,
"to-inject": dict,
}
def run(tracker):
directory = tracker.config['regex-directory']
tracker.print_activity("Switching directory to '%s'" % directory)
try:
os.chdir(directory)
except OSError as err:
tracker.print_error("Unable to change directory to %s: %s" % (directory, err))
tracker.terminate()
for directory in tracker.config['regex-subdirectories']:
try:
old_cwd = os.getcwd()
os.chdir(directory)
except OSError as err:
tracker.print_error("Unable to change directory to %s: %s" % (directory, err))
tracker.terminate()
updated = tracker.run_job(job_compile_regexes, "Compiling regex sources")
tracker.run_job(job_copy_out_files, "Copying compiled regex artifacts", updated)
tracker.run_job(job_inject_regex_artifacts, "Injecting compiled regex artifacts", updated)
os.chdir(old_cwd)
### Defined jobs ###
def job_compile_regexes(tracker):
if not tracker.config['source-files']:
tracker.print_string("(nothing to do)")
updated = False
for source in tracker.config["source-files"]:
updated |= tracker.run_job(job_compile_regex, None, source)
return updated
def job_compile_regex(tracker, name):
source, target = get_output_file_name(name)
modified = get_mtime(tracker, target)
tracker.print_operation("REGEX", target)
compiled, needs_update = tracker.run_job(job_combine_regex, None, source, modified)
if not needs_update:
tracker.print_string("(No need to update '%s')" % target)
return False
elif compiled is None:
return False
if os.path.exists(target):
if tracker.args.dontoverwrite:
tracker.print_warning("Not overwriting '%s'." % target)
return False
tracker.print_notice("Overwriting '%s'." % target)
try:
with open(target, "w") as fh:
fh.write(compiled)
except IOError as err:
tracker.print_error("Unable to write to '%s': %s." % (target, err))
tracker.failure()
return True
def job_combine_regex(tracker, source, modified, depends={}):
tracker.print_operation("DEP", source)
try:
with open(source, 'r') as fh:
body = fh.read()
except (OSError, IOError) as err:
tracker.print_error("Unable to read from '%s': %s." % (source, err))
tracker.failure()
return None
needs_update = False
for depend in set(DEPENDENCY_REGEX.findall(body)):
depend = DEPENDENCY_NAME_REGEX.match(depend).group(1)
if depend not in depends.keys():
depends[depend], this_needs_update = tracker.run_job(job_combine_regex, None, depend + '.regex', modified, depends)
needs_update |= this_needs_update
else:
tracker.print_operation("DEP", "%s (cached)" % source)
body = body.replace("%%{%s}" % depend, depends[depend])
needs_update |= (get_mtime(tracker, source) > modified)
needs_update |= tracker.args.alwaysbuild
return body.rstrip(), needs_update
def job_copy_out_files(tracker, updated):
files = glob.glob("*.out")
copied = False
for filename in files:
for directory in tracker.config['copy-to']:
dest = os.path.join(directory, filename)
if not updated and os.path.exists(dest):
continue
tracker.print_operation("COPY", "%s -> %s" % (filename, dest))
try:
copied = True
shutil.copy(filename, dest)
except (OSError, IOError) as err:
tracker.print_error("Unable to copy file: %s.\n" % err)
tracker.failure()
if not copied:
tracker.print_string("(nothing to do)")
def job_inject_regex_artifacts(tracker, updated):
if not tracker.config['to-inject'] or not updated:
tracker.print_string("(nothing to do)")
return
output_file = tracker.config["inject-file"]
for input_file, field in tracker.config["to-inject"].items():
tracker.print_operation("INJ", "%s:%s <- %s" % (output_file, field, input_file))
# Get artifact
try:
with open(input_file, 'r') as fh:
to_replace = fh.read()
except IOError as err:
tracker.print_error("Unable to read from '%s': %s." % (input_file, err))
tracker.failure()
continue
else:
to_replace = to_replace.replace(r"\n", r"\\n").strip()
# Replace pattern with artifact
try:
with open(output_file, 'r') as fh:
output_text = fh.read()
except IOError as err:
tracker.print_error("Unable to read from '%s': %s." % (output_file, err))
tracker.failure()
continue
else:
output_text = \
re.sub(REGEX_INJECTION_PATTERN % re.escape(field),
REGEX_REPLACE_PATTERN % (field, to_replace),
output_text.rstrip())
# Write result to file
try:
with open(output_file, 'w') as fh:
fh.write(output_text)
except IOError as err:
tracker.print_error("Unable to write to '%s': %s." % (output_file, err))
tracker.failure()
# Helper functions
def get_output_file_name(name):
if name.endswith('.regex', re.IGNORECASE):
source = name
target = name[:-6] + '.out'
else:
source = name + '.regex'
target = name + '.out'
return source, target
def get_mtime(target, path):
try:
stat = os.stat(path)
except FileNotFoundError:
return 0
except (IOError, OSError) as err:
tracker.print_error("Unable to stat '%s': %s." % (target, err))
tracker.terminate()
return stat.st_mtime
| {
"repo_name": "ammongit/ucr-class-map",
"path": "scripts/target_build.py",
"copies": "1",
"size": "7049",
"license": "mit",
"hash": -995953861065559800,
"line_mean": 31.3348623853,
"line_max": 127,
"alpha_frac": 0.6033479926,
"autogenerated": false,
"ratio": 3.9139367018323155,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9985033358809043,
"avg_score": 0.006450267124654576,
"num_lines": 218
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.