text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Updates load balancer health check policy"""
from baseCmd import *
from baseResponse import *
class updateLBHealthCheckPolicyCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""ID of load balancer health check policy"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only"""
self.customid = None
self.typeInfo['customid'] = 'string'
"""an optional field, whether to the display the policy to the end user or not"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
self.required = ["id", ]
class updateLBHealthCheckPolicyResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the account of the HealthCheck policy"""
self.account = None
self.typeInfo['account'] = 'string'
"""the domain of the HealthCheck policy"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain ID of the HealthCheck policy"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the LB rule ID"""
self.lbruleid = None
self.typeInfo['lbruleid'] = 'string'
"""the id of the zone the HealthCheck policy belongs to"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the list of healthcheckpolicies"""
self.healthcheckpolicy = []
class healthcheckpolicy:
def __init__(self):
""""the LB HealthCheck policy ID"""
self.id = None
""""the description of the healthcheck policy"""
self.description = None
""""is policy for display to the regular user"""
self.fordisplay = None
""""Amount of time between health checks"""
self.healthcheckinterval = None
""""Number of consecutive health check success before declaring an instance healthy"""
self.healthcheckthresshold = None
""""the pingpath of the healthcheck policy"""
self.pingpath = None
""""Time to wait when receiving a response from the health check"""
self.responsetime = None
""""the state of the policy"""
self.state = None
""""Number of consecutive health check failures before declaring an instance unhealthy."""
self.unhealthcheckthresshold = None
|
{
"content_hash": "42b658acf9da8e3ec7b525ca56ff7f0b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 113,
"avg_line_length": 37.36363636363637,
"alnum_prop": 0.6127331711273317,
"repo_name": "MissionCriticalCloud/marvin",
"id": "10c31a33c0f6588ffc322a05a357710ae51a9005",
"size": "2466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marvin/cloudstackAPI/updateLBHealthCheckPolicy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2573421"
}
],
"symlink_target": ""
}
|
from animalia import mollusca_v1
async def sample_method_lro_signatures():
# Create a client
client = mollusca_v1.SnippetsAsyncClient()
# Initialize request argument(s)
my_message = mollusca_v1.MessageWithNesting()
my_message.message.required_string = "required_string_value"
my_message.my_int = 656
request = mollusca_v1.SignatureRequest(
my_string="my_string_value",
my_int=656,
my_bool=True,
my_message=my_message,
single_enum="DEFAULT",
)
# Make the request
operation = client.method_lro_signatures(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END mollusca_v1_generated_Snippets_MethodLroSignatures_async]
|
{
"content_hash": "1bc1455a7006311caa1a6e06bf0297ae",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 64,
"avg_line_length": 26.193548387096776,
"alnum_prop": 0.6810344827586207,
"repo_name": "googleapis/gapic-generator-python",
"id": "cc387195b745612d5fd15ff06085535417c86975",
"size": "2203",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/snippetgen/goldens/mollusca_v1_generated_snippets_method_lro_signatures_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2702"
},
{
"name": "Jinja",
"bytes": "767902"
},
{
"name": "Python",
"bytes": "4802905"
},
{
"name": "Shell",
"bytes": "31013"
},
{
"name": "Starlark",
"bytes": "26281"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
from soap.common.base import (
DynamicMethods, Comparable, base_dispatcher, dict_merge
)
from soap.common.cache import (
invalidate_cache, cached, cached_property, Flyweight
)
from soap.common.formatting import underline, superscript, indent, code_gobble
from soap.common.profile import timeit, timed, profile_calls, profile_memory
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
|
{
"content_hash": "583a462c56b32f1024c9c6bfa7bc4d97",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 26.77777777777778,
"alnum_prop": 0.7572614107883817,
"repo_name": "admk/soap",
"id": "e8adc9ad65bd41eeb55cddf606c72c1c7c7be9c7",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/xitong/master",
"path": "soap/common/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Isabelle",
"bytes": "8132"
},
{
"name": "Python",
"bytes": "461377"
},
{
"name": "VHDL",
"bytes": "1728"
}
],
"symlink_target": ""
}
|
"""
Created on Fri Aug 22 12:01:47 2014
@author: pierre
"""
import urls
import argparse
import urllib.request
import json
import textwrap
import random
max_repos = 3
max_page=33
WIDTH = 79
parser = argparse.ArgumentParser()
def main():
parser.add_argument('-l', '--language', help='Your random Programming Language', required=True)
return parser.parse_args()
def load_top_repository(url, args):
query = {
'q': 'language:{0}'.format(args.language),
'sort': 'stars',
'order': 'desc'
}
request = urllib.request.urlopen(url + '?' + urllib.parse.urlencode(query))
text = request.read().decode('utf-8')
repos = json.loads(text)['items']
for r in repos[0:max_repos]:
process(r)
def load_rand_repository(url, args):
randpage = random.randint(2, max_page)
query = {
'q': 'language:{0}'.format(args.language),
'page': randpage
}
request = urllib.request.urlopen(url + '?' + urllib.parse.urlencode(query))
text = request.read().decode('utf-8')
repos = json.loads(text)['items']
for r in repos[0:max_repos]:
process(r)
def process(r):
print('{o}/{n} (watchers: {w}, forks: {f}, updated: {u})' \
.format(o=r['owner']['login'], n=r['name'], w=r['watchers'],
f=r['forks'], u=r['pushed_at'][:10]))
desc = '\n'.join(textwrap.wrap(r['description'], WIDTH))
print('{d}'.format(d=desc))
print('{url}'.format(url=r['url']), end=' ')
if 'homepage' in r and r['homepage']:
print('/ {hp}'.format(hp=r['homepage']))
else:
pass
print("----------\n")
if __name__ == '__main__':
args = main()
load_top_repository(url=urls.GITHUB_SEARCH_REPO, args=args)
load_rand_repository(url=urls.GITHUB_SEARCH_REPO, args=args)
|
{
"content_hash": "d7ac6bac57da2fa912832701c6651b7f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 99,
"avg_line_length": 28.234375,
"alnum_prop": 0.5882678472606531,
"repo_name": "bloodywing/rgithub",
"id": "f32caa4df031520e6f38a3c13e31b018b763e05d",
"size": "1831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rgithub/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2134"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Cohort'
db.create_table('cohort', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('context', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['avocado.DataContext'], unique=True, null=True, blank=True)),
('count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['samples.Project'], null=True, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('autocreated', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('samples', ['Cohort'])
# Adding model 'CohortSample'
db.create_table('cohort_sample', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.BooleanField')(default=False)),
('removed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('object_set', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['samples.Cohort'], db_column='cohort_id')),
('set_object', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['samples.Sample'], db_column='sample_id')),
))
db.send_create_signal('samples', ['CohortSample'])
# Adding unique constraint on 'CohortSample', fields ['object_set', 'set_object']
db.create_unique('cohort_sample', ['cohort_id', 'sample_id'])
# Adding model 'CohortVariant'
db.create_table('cohort_variant', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('variant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['variants.Variant'])),
('cohort', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['samples.Cohort'])),
('af', self.gf('django.db.models.fields.FloatField')(null=True, db_index=True)),
))
db.send_create_signal('samples', ['CohortVariant'])
# Adding unique constraint on 'CohortVariant', fields ['variant', 'cohort']
db.create_unique('cohort_variant', ['variant_id', 'cohort_id'])
def backwards(self, orm):
# Removing unique constraint on 'CohortVariant', fields ['variant', 'cohort']
db.delete_unique('cohort_variant', ['variant_id', 'cohort_id'])
# Removing unique constraint on 'CohortSample', fields ['object_set', 'set_object']
db.delete_unique('cohort_sample', ['cohort_id', 'sample_id'])
# Deleting model 'Cohort'
db.delete_table('cohort')
# Deleting model 'CohortSample'
db.delete_table('cohort_sample')
# Deleting model 'CohortVariant'
db.delete_table('cohort_variant')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 27, 15, 1, 22, 859081)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 27, 15, 1, 22, 858860)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'avocado.datacontext': {
'Meta': {'object_name': 'DataContext'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'composite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'_count'"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datacontext+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'genome.chromosome': {
'Meta': {'ordering': "['order']", 'object_name': 'Chromosome', 'db_table': "'chromosome'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'})
},
'genome.genome': {
'Meta': {'object_name': 'Genome', 'db_table': "'genome'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'released': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'genome.genotype': {
'Meta': {'object_name': 'Genotype', 'db_table': "'genotype'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'literature.pubmed': {
'Meta': {'object_name': 'PubMed', 'db_table': "'pubmed'"},
'pmid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'phenotypes.phenotype': {
'Meta': {'object_name': 'Phenotype', 'db_table': "'phenotype'"},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hpo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1000'})
},
'samples.batch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'Batch', 'db_table': "'batch'"},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Project']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'samples.cohort': {
'Meta': {'object_name': 'Cohort', 'db_table': "'cohort'"},
'autocreated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'context': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['avocado.DataContext']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Project']", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'samples': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Sample']", 'through': "orm['samples.CohortSample']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'samples.cohortsample': {
'Meta': {'unique_together': "(('object_set', 'set_object'),)", 'object_name': 'CohortSample', 'db_table': "'cohort_sample'"},
'added': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Cohort']", 'db_column': "'cohort_id'"}),
'removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'set_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']", 'db_column': "'sample_id'"})
},
'samples.cohortvariant': {
'Meta': {'unique_together': "(('variant', 'cohort'),)", 'object_name': 'CohortVariant', 'db_table': "'cohort_variant'"},
'af': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'cohort': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Cohort']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'samples.person': {
'Meta': {'object_name': 'Person', 'db_table': "'person'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mrn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'proband': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Person']", 'through': "orm['samples.Relation']", 'symmetrical': 'False'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'samples.project': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'Project', 'db_table': "'project'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'samples.relation': {
'Meta': {'ordering': "('person', '-generation')", 'object_name': 'Relation', 'db_table': "'relation'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'generation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'family'", 'to': "orm['samples.Person']"}),
'relative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relative_of'", 'to': "orm['samples.Person']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'samples.result': {
'Meta': {'unique_together': "(('sample', 'variant'),)", 'object_name': 'Result', 'db_table': "'sample_result'"},
'baseq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'coverage_alt': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'coverage_ref': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'downsampling': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fisher_strand': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genotype']", 'null': 'True', 'blank': 'True'}),
'genotype_quality': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'haplotype_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'homopolymer_run': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_dbsnp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mq': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq0': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phred_scaled_likelihood': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'quality_by_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'read_pos_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']"}),
'spanning_deletions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'strand_bias': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'samples.sample': {
'Meta': {'unique_together': "(('batch', 'name'),)", 'object_name': 'Sample', 'db_table': "'sample'"},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Batch']"}),
'bio_sample': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'samples'", 'null': 'True', 'to': "orm['samples.Person']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.IntegerField', [], {})
},
'samples.samplerun': {
'Meta': {'object_name': 'SampleRun', 'db_table': "'sample_run'"},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genome']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']"})
},
'variants.variant': {
'Meta': {'unique_together': "(('chr', 'pos', 'ref', 'alt'),)", 'object_name': 'Variant', 'db_table': "'variant'"},
'alt': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'variant_pubmed'", 'symmetrical': 'False'}),
'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'liftover': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'phenotypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['phenotypes.Phenotype']", 'through': "orm['variants.VariantPhenotype']", 'symmetrical': 'False'}),
'pos': ('django.db.models.fields.IntegerField', [], {}),
'ref': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'rsid': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.VariantType']", 'null': 'True'})
},
'variants.variantphenotype': {
'Meta': {'object_name': 'VariantPhenotype', 'db_table': "'variant_phenotype'"},
'hgmd_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phenotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['phenotypes.Phenotype']"}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'variants.varianttype': {
'Meta': {'ordering': "['order']", 'object_name': 'VariantType', 'db_table': "'variant_type'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['samples']
|
{
"content_hash": "c2c90786127313a2f105f1d02148ce04",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 192,
"avg_line_length": 80.25078369905957,
"alnum_prop": 0.5519140625,
"repo_name": "chop-dbhi/varify-data-warehouse",
"id": "a1d384f030b17526bb090215dcfca87e3c63bdc8",
"size": "25618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vdw/samples/migrations/0007_auto__add_cohort__add_cohortsample__add_unique_cohortsample_object_set.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Puppet",
"bytes": "14864"
},
{
"name": "Python",
"bytes": "1796480"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
}
|
import unittest
from app.trainer.processing import Processing
from app.trainer.pt_br_mapper import UNICODE_ACCENTED_LETTERS
class TestProcessing(unittest.TestCase):
def test_lower_case(self):
tweet = Processing("Processing CAN Be LoWer Case TweeT").lower_case()
self.assertEqual(tweet, "processing can be lower case tweet")
def test_accented_letters(self):
"""Accentuation codes from UNICODE_ACCENTED_LETTERS"""
text = "tweet with accent {0} and another words"
for code in UNICODE_ACCENTED_LETTERS.items():
text_parsed = text.format(code[0])
text_result = text.format(code[1])
text_proccessed = Processing(text_parsed).accented_letters()
self.assertEqual(text_proccessed, text_result)
|
{
"content_hash": "9f6f13015ff9915959cb1a2fd4ef2169",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 34.30434782608695,
"alnum_prop": 0.6856780735107731,
"repo_name": "fernandopso/mining-svm-tfidf",
"id": "cfcdc2697f160c22ef52b645e5999def50909908",
"size": "789",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/trainer/test_processing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24330"
}
],
"symlink_target": ""
}
|
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
# TODO Tiger and later: need to set kWindowApplicationScaledAttribute for DPI
# independence?
from ctypes import *
import string
import math
from sys import byteorder
from pyglet.font import base
import pyglet.image
from pyglet.window.carbon import carbon, _oscheck
from pyglet.window.carbon import _create_cfstring
from pyglet.window.carbon.types import *
class FixedPoint(Structure):
_fields_ = [
('x', Fixed),
('y', Fixed)
]
class ATSTrapezoid(Structure):
_fields_ = [
('upperLeft', FixedPoint),
('upperRight', FixedPoint),
('lowerRight', FixedPoint),
('lowerLeft', FixedPoint)
]
# TODO: most of the ATS and CG here not used any more.
CGGlyph = c_ushort
ATSUFontID = c_uint32
RGBColor = c_short * 3
ATSURGBAlphaColor = c_float * 4
kCGImageAlphaNone = 0
kCGImageAlphaPremultipliedLast = 1
kCGTextFill = 0
kATSUInvalidFontErr = -8796
kATSFontContextUnspecified = 0
kATSFontContextGlobal = 1
kATSFontContextLocal = 2
kATSFontFilterSelectorUnspecified = 0
kATSFontFilterSelectorGeneration = 3
kATSFontFilterSelectorFontFamily = 7
kATSFontFilterSelectorFontFamilyApplierFunction = 8
kATSFontFilterSelectorFontApplierFunction = 9
kATSOptionFlagsDoNotNotify = 0x00000001 << 8
kATSOptionFlagsIterationScopeMask = 0x00000007 << 12
kATSOptionFlagsDefaultScope = 0x00000000 << 12
kATSOptionFlagsUnRestrictedScope = 0x00000001 << 12
kATSOptionFlagsRestrictedScope = 0x00000002 << 12
kATSOptionFlagsProcessSubdirectories = 0x00000001 << 6
kATSUFromTextBeginning = c_ulong(0xFFFFFFFF)
kATSUToTextEnd = c_ulong(0xFFFFFFFF)
kATSULineAscentTag = 8
kATSULineDescentTag = 9
ATSUTextMeasurement = Fixed
kATSUQDBoldfaceTag = 256
kATSUQDItalicTag = 257
kATSUFontTag = 261
kATSUSizeTag = 262
kATSUCGContextTag = 32767
kATSUColorTag = 263
kATSURGBAlphaColorTag = 288
kATSULineWidthTag = 1
kFontFullName = 4
kFontNoPlatformCode = c_ulong(-1)
kFontNoScriptCode = c_ulong(-1)
kFontNoLanguageCode = c_ulong(-1)
kATSUseDeviceOrigins = 1
kATSFontFormatUnspecified = 0
kATSFontContextLocal = 2
carbon.CGColorSpaceCreateWithName.restype = c_void_p
carbon.CGBitmapContextCreate.restype = POINTER(c_void_p)
UniCharArrayOffset = c_uint32
UniCharCount = c_uint32
def fixed(value):
# This is a guess... could easily be wrong
#return c_int32(int(value) * (1 << 16))
return c_int32(carbon.Long2Fix(c_long(int(value))))
carbon.Fix2X.restype = c_double
def fix2float(value):
return carbon.Fix2X(value)
def create_atsu_style(attributes):
# attributes is a dict of ATSUAttributeTag => ctypes value
tags, values = zip(*attributes.items())
tags = (c_int * len(tags))(*tags)
sizes = (c_uint * len(values))(*[sizeof(v) for v in values])
values = (c_void_p * len(values))(*[cast(pointer(v), c_void_p) \
for v in values])
style = c_void_p()
carbon.ATSUCreateStyle(byref(style))
carbon.ATSUSetAttributes(style, len(tags), tags, sizes, values)
return style
def set_layout_attributes(layout, attributes):
if attributes:
# attributes is a dict of ATSUAttributeTag => ctypes value
tags, values = zip(*attributes.items())
tags = (c_int * len(tags))(*tags)
sizes = (c_uint * len(values))(*[sizeof(v) for v in values])
values = (c_void_p * len(values))(*[cast(pointer(v), c_void_p) \
for v in values])
carbon.ATSUSetLayoutControls(layout, len(tags), tags, sizes, values)
def str_ucs2(text):
if byteorder == 'big':
text = text.encode('utf_16_be')
else:
text = text.encode('utf_16_le') # explicit endian avoids BOM
return create_string_buffer(text + '\0')
class CarbonGlyphRenderer(base.GlyphRenderer):
_bitmap = None
_bitmap_context = None
_bitmap_rect = None
def __init__(self, font):
super(CarbonGlyphRenderer, self).__init__(font)
self._create_bitmap_context(256, 256)
self.font = font
def __del__(self):
if self._bitmap_context:
carbon.CGContextRelease(self._bitmap_context)
def render(self, text):
# Convert text to UCS2
text_len = len(text)
text = str_ucs2(text)
# Create ATSU text layout for this text and font
layout = c_void_p()
carbon.ATSUCreateTextLayout(byref(layout))
set_layout_attributes(layout, {
kATSUCGContextTag: self._bitmap_context})
carbon.ATSUSetTextPointerLocation(layout,
text,
kATSUFromTextBeginning,
kATSUToTextEnd,
text_len)
carbon.ATSUSetRunStyle(layout, self.font.atsu_style,
kATSUFromTextBeginning, kATSUToTextEnd)
# Turning on transient font matching screws up font layout
# predictability when strange fonts are installed
carbon.ATSUSetTransientFontMatching(layout, False)
# Get bitmap dimensions required
rect = Rect()
carbon.ATSUMeasureTextImage(layout,
kATSUFromTextBeginning,
kATSUToTextEnd,
0, 0,
byref(rect))
image_width = rect.right - rect.left + 2
image_height = rect.bottom - rect.top + 2
baseline = rect.bottom + 1
lsb = rect.left
# Resize Quartz context if necessary
if (image_width > self._bitmap_rect.size.width or
image_height > self._bitmap_rect.size.height):
self._create_bitmap_context(
int(max(image_width, self._bitmap_rect.size.width)),
int(max(image_height, self._bitmap_rect.size.height)))
set_layout_attributes(layout, {
kATSUCGContextTag: self._bitmap_context})
# Get typographic box, which gives advance.
bounds_actual = c_uint32()
bounds = ATSTrapezoid()
carbon.ATSUGetGlyphBounds(
layout,
0, 0,
kATSUFromTextBeginning,
kATSUToTextEnd,
kATSUseDeviceOrigins,
1,
byref(bounds),
byref(bounds_actual))
advance = fix2float(bounds.lowerRight.x) - fix2float(bounds.lowerLeft.x)
# Draw to the bitmap
carbon.CGContextClearRect(self._bitmap_context, self._bitmap_rect)
carbon.ATSUDrawText(layout,
0,
kATSUToTextEnd,
fixed(-lsb + 1), fixed(baseline))
# A negative pitch is required, but it is much faster to load the
# glyph upside-down and flip the tex_coords. Note region used
# to start at top of glyph image.
pitch = int(4 * self._bitmap_rect.size.width)
image = pyglet.image.ImageData(image_width,
self._bitmap_rect.size.height, 'RGBA', self._bitmap, pitch)
skip_rows = int(self._bitmap_rect.size.height - image_height)
image = image.get_region(0, skip_rows, image.width, image_height)
glyph = self.font.create_glyph(image)
glyph.set_bearings(baseline, lsb - 1, int(advance))
t = list(glyph.tex_coords)
glyph.tex_coords = t[9:12] + t[6:9] + t[3:6] + t[:3]
return glyph
def _create_bitmap_context(self, width, height):
'''Create or recreate bitmap and Quartz context.'''
if self._bitmap_context:
carbon.CGContextRelease(self._bitmap_context)
components = 4
pitch = width * components
self._bitmap = (c_ubyte * (pitch * height))()
color_space = carbon.CGColorSpaceCreateDeviceRGB()
context = carbon.CGBitmapContextCreate(self._bitmap,
width, height, 8, pitch,
color_space, kCGImageAlphaPremultipliedLast)
carbon.CGColorSpaceRelease(color_space)
# Disable RGB decimated antialiasing, use standard
# antialiasing which won't break alpha.
carbon.CGContextSetShouldSmoothFonts(context, False)
carbon.CGContextSetShouldAntialias(context, True)
self._bitmap_context = context
self._bitmap_rect = CGRect()
self._bitmap_rect.origin.x = 0
self._bitmap_rect.origin.y = 0
self._bitmap_rect.size.width = width
self._bitmap_rect.size.height = height
class CarbonFont(base.Font):
glyph_renderer_class = CarbonGlyphRenderer
def __init__(self, name, size, bold=False, italic=False, dpi=None):
super(CarbonFont, self).__init__()
if not name:
name = 'Helvetica'
if dpi is not None:
# If application is not DPI-aware, DPI is fixed at 72. Scale
# font size to emulate other DPI if necessary. This will need
# to be fixed if issue #87 is implemented.
size = size * dpi / 72.
font_id = ATSUFontID()
carbon.ATSUFindFontFromName(
name,
len(name),
kFontFullName,
kFontNoPlatformCode,
kFontNoScriptCode,
kFontNoLanguageCode,
byref(font_id))
attributes = {
kATSUSizeTag: fixed(size),
kATSUFontTag: font_id,
kATSURGBAlphaColorTag: ATSURGBAlphaColor(1, 1, 1, 1),
kATSUQDBoldfaceTag: c_byte(bold),
kATSUQDItalicTag: c_byte(italic)
}
self.atsu_style = create_atsu_style(attributes)
self.calculate_metrics()
@classmethod
def have_font(cls, name):
font_id = ATSUFontID()
r = carbon.ATSUFindFontFromName(
name,
len(name),
kFontFullName,
kFontNoPlatformCode,
kFontNoScriptCode,
kFontNoLanguageCode,
byref(font_id))
return r != kATSUInvalidFontErr
def calculate_metrics(self):
# It seems the only way to get the font's ascent and descent is to lay
# out some glyphs and measure them.
# fake ucs2 string
text = '\0a'
layout = c_void_p()
carbon.ATSUCreateTextLayout(byref(layout))
carbon.ATSUSetTextPointerLocation(layout, text,
kATSUFromTextBeginning, kATSUToTextEnd, 1)
carbon.ATSUSetRunStyle(layout, self.atsu_style,
kATSUFromTextBeginning, kATSUToTextEnd)
# determine the metrics for this font only
carbon.ATSUSetTransientFontMatching(layout, False)
value = ATSUTextMeasurement()
carbon.ATSUGetLineControl(layout, 0, kATSULineAscentTag,
sizeof(value), byref(value), None)
self.ascent = int(math.ceil(fix2float(value)))
carbon.ATSUGetLineControl(layout, 0, kATSULineDescentTag,
sizeof(value), byref(value), None)
self.descent = -int(math.ceil(fix2float(value)))
@classmethod
def add_font_data(cls, data):
container = c_void_p()
r = carbon.ATSFontActivateFromMemory(data, len(data),
kATSFontContextLocal, kATSFontFormatUnspecified, None, 0,
byref(container))
_oscheck(r)
|
{
"content_hash": "f20645ecec8aa1873b269b63e382e58e",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 80,
"avg_line_length": 33.03235294117647,
"alnum_prop": 0.6247885317424985,
"repo_name": "gnulinooks/sympy",
"id": "27f27c2d57fb70169dd7e43bd300fd84cca2ff3b",
"size": "12954",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sympy/thirdparty/pyglet/pyglet/font/carbon.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
=======================================================
This module contains low-level functions from the LAPACK library.
The `*gegv` family of routines have been removed from LAPACK 3.6.0
and have been deprecated in SciPy 0.17.0. They will be removed in
a future release.
.. versionadded:: 0.12.0
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
get_lapack_funcs
All functions
-------------
.. autosummary::
:toctree: generated/
sgbsv
dgbsv
cgbsv
zgbsv
sgbtrf
dgbtrf
cgbtrf
zgbtrf
sgbtrs
dgbtrs
cgbtrs
zgbtrs
sgebal
dgebal
cgebal
zgebal
sgees
dgees
cgees
zgees
sgeev
dgeev
cgeev
zgeev
sgeev_lwork
dgeev_lwork
cgeev_lwork
zgeev_lwork
sgegv
dgegv
cgegv
zgegv
sgehrd
dgehrd
cgehrd
zgehrd
sgehrd_lwork
dgehrd_lwork
cgehrd_lwork
zgehrd_lwork
sgelss
dgelss
cgelss
zgelss
sgelss_lwork
dgelss_lwork
cgelss_lwork
zgelss_lwork
sgelsd
dgelsd
cgelsd
zgelsd
sgelsd_lwork
dgelsd_lwork
cgelsd_lwork
zgelsd_lwork
sgelsy
dgelsy
cgelsy
zgelsy
sgelsy_lwork
dgelsy_lwork
cgelsy_lwork
zgelsy_lwork
sgeqp3
dgeqp3
cgeqp3
zgeqp3
sgeqrf
dgeqrf
cgeqrf
zgeqrf
sgerqf
dgerqf
cgerqf
zgerqf
sgesdd
dgesdd
cgesdd
zgesdd
sgesdd_lwork
dgesdd_lwork
cgesdd_lwork
zgesdd_lwork
sgesvd
dgesvd
cgesvd
zgesvd
sgesvd_lwork
dgesvd_lwork
cgesvd_lwork
zgesvd_lwork
sgesv
dgesv
cgesv
zgesv
sgesvx
dgesvx
cgesvx
zgesvx
sgecon
dgecon
cgecon
zgecon
ssysv
dsysv
csysv
zsysv
ssysv_lwork
dsysv_lwork
csysv_lwork
zsysv_lwork
ssysvx
dsysvx
csysvx
zsysvx
ssysvx_lwork
dsysvx_lwork
csysvx_lwork
zsysvx_lwork
chesv
zhesv
chesv_lwork
zhesv_lwork
chesvx
zhesvx
chesvx_lwork
zhesvx_lwork
sgetrf
dgetrf
cgetrf
zgetrf
sgetri
dgetri
cgetri
zgetri
sgetri_lwork
dgetri_lwork
cgetri_lwork
zgetri_lwork
sgetrs
dgetrs
cgetrs
zgetrs
sgges
dgges
cgges
zgges
sggev
dggev
cggev
zggev
chbevd
zhbevd
chbevx
zhbevx
cheev
zheev
cheevd
zheevd
cheevr
zheevr
chegv
zhegv
chegvd
zhegvd
chegvx
zhegvx
slarf
dlarf
clarf
zlarf
slarfg
dlarfg
clarfg
zlarfg
slartg
dlartg
clartg
zlartg
slasd4
dlasd4
slaswp
dlaswp
claswp
zlaswp
slauum
dlauum
clauum
zlauum
spbsv
dpbsv
cpbsv
zpbsv
spbtrf
dpbtrf
cpbtrf
zpbtrf
spbtrs
dpbtrs
cpbtrs
zpbtrs
sposv
dposv
cposv
zposv
sposvx
dposvx
cposvx
zposvx
spocon
dpocon
cpocon
zpocon
spotrf
dpotrf
cpotrf
zpotrf
spotri
dpotri
cpotri
zpotri
spotrs
dpotrs
cpotrs
zpotrs
crot
zrot
strsyl
dtrsyl
ctrsyl
ztrsyl
strtri
dtrtri
ctrtri
ztrtri
strtrs
dtrtrs
ctrtrs
ztrtrs
cunghr
zunghr
cungqr
zungqr
cungrq
zungrq
cunmqr
zunmqr
sgtsv
dgtsv
cgtsv
zgtsv
sptsv
dptsv
cptsv
zptsv
slamch
dlamch
sorghr
dorghr
sorgqr
dorgqr
sorgrq
dorgrq
sormqr
dormqr
ssbev
dsbev
ssbevd
dsbevd
ssbevx
dsbevx
ssyev
dsyev
ssyevd
dsyevd
ssyevr
dsyevr
ssygv
dsygv
ssygvd
dsygvd
ssygvx
dsygvx
slange
dlange
clange
zlange
ilaver
"""
#
# Author: Pearu Peterson, March 2002
#
from __future__ import division, print_function, absolute_import
__all__ = ['get_lapack_funcs']
import numpy as _np
from .blas import _get_funcs
# Backward compatibility:
from .blas import find_best_blas_type as find_best_lapack_type
from scipy.linalg import _flapack
try:
from scipy.linalg import _clapack
except ImportError:
_clapack = None
# Backward compatibility
from scipy._lib._util import DeprecatedImport as _DeprecatedImport
clapack = _DeprecatedImport("scipy.linalg.blas.clapack", "scipy.linalg.lapack")
flapack = _DeprecatedImport("scipy.linalg.blas.flapack", "scipy.linalg.lapack")
# Expose all functions (only flapack --- clapack is an implementation detail)
empty_module = None
from scipy.linalg._flapack import *
del empty_module
_dep_message = """The `*gegv` family of routines has been deprecated in
LAPACK 3.6.0 in favor of the `*ggev` family of routines.
The corresponding wrappers will be removed from SciPy in
a future release."""
cgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message)
dgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message)
sgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message)
zgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message)
# Modyfy _flapack in this scope so the deprecation warnings apply to
# functions returned by get_lapack_funcs.
_flapack.cgegv = cgegv
_flapack.dgegv = dgegv
_flapack.sgegv = sgegv
_flapack.zgegv = zgegv
# some convenience alias for complex functions
_lapack_alias = {
'corghr': 'cunghr', 'zorghr': 'zunghr',
'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
'corgqr': 'cungqr', 'zorgqr': 'zungqr',
'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
'corgrq': 'cungrq', 'zorgrq': 'zungrq',
}
def get_lapack_funcs(names, arrays=(), dtype=None):
"""Return available LAPACK function objects from names.
Arrays are used to determine the optimal prefix of LAPACK routines.
Parameters
----------
names : str or sequence of str
Name(s) of LAPACK functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of LAPACK
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In LAPACK, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy
types {float32, float64, complex64, complex128} respectevely, and
are stored in attribute `typecode` of the returned functions.
"""
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack, _clapack,
"flapack", "clapack", _lapack_alias)
def _compute_lwork(routine, *args, **kwargs):
"""
Round floating-point lwork returned by lapack to integer.
Several LAPACK routines compute optimal values for LWORK, which
they return in a floating-point variable. However, for large
values of LWORK, single-precision floating point is not sufficient
to hold the exact value --- some LAPACK versions (<= 3.5.0 at
least) truncate the returned integer to single precision and in
some cases this can be smaller than the required value.
"""
wi = routine(*args, **kwargs)
if len(wi) < 2:
raise ValueError('')
info = wi[-1]
if info != 0:
raise ValueError("Internal work array size computation failed: "
"%d" % (info,))
lwork = [w.real for w in wi[:-1]]
dtype = getattr(routine, 'dtype', None)
if dtype == _np.float32 or dtype == _np.complex64:
# Single-precision routine -- take next fp value to work
# around possible truncation in LAPACK code
lwork = _np.nextafter(lwork, _np.inf, dtype=_np.float32)
lwork = _np.array(lwork, _np.int64)
if _np.any(_np.logical_or(lwork < 0, lwork > _np.iinfo(_np.int32).max)):
raise ValueError("Too large work array required -- computation cannot "
"be performed with standard 32-bit LAPACK.")
lwork = lwork.astype(_np.int32)
if lwork.size == 1:
return lwork[0]
return lwork
|
{
"content_hash": "906e02e3d471244f6d953132d0c6324a",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 79,
"avg_line_length": 15.73394495412844,
"alnum_prop": 0.642332361516035,
"repo_name": "josephcslater/scipy",
"id": "b4e422e473f3dffc30043056f009024a94191f89",
"size": "8575",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "scipy/linalg/lapack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4126152"
},
{
"name": "C++",
"bytes": "507246"
},
{
"name": "Fortran",
"bytes": "5572451"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11519988"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
__all__ = [
'IdentityDecoder',
'ChunkedDecoder',
'DeflateDecoder',
'CompoundDecoder',
]
from kitsu.http.errors import *
from kitsu.http.headers import *
from kitsu.http.parsers import *
class IdentityDecoder(Parser):
def __init__(self, length=None):
self.length = length
def parseRaw(self, data):
if self.length is None:
body, data = data, ''
elif self.length:
body, data = data[:self.length], data[self.length:]
self.length -= len(body)
if self.length == 0:
self.done = True
else:
body = ''
if data:
self.prepend(data)
if body:
return (body,)
return ()
def finish(self):
if not self.done:
self.done = True
if self.length:
raise HTTPDataError("not enough data for content body")
return ()
class ChunkedDecoder(LineParser):
def __init__(self):
self.length = None
self.extensions = None
self.headers = None
def parseLine(self, line):
if self.headers is not None:
# Reading trailer headers
if not self.headers.parseLine(line):
self.done = True
return (self.headers,)
elif self.length == 0:
# Just finished reading chunk
if line:
raise HTTPDataError("chunk data must end with '\\r\\n'")
self.length = None
else:
# Reading chunk header
parts = line.split(';', 1)
length = int(parts[0],16)
if len(parts) >= 2:
extensions = parts[0].strip()
else:
extensions = None
self.length = length
self.extensions = extensions
if self.length == 0:
# Start reading trailer headers
self.headers = Headers()
else:
# Start reading chunk data
self.setDataMode()
return ()
def parseData(self, data):
body, data = data[:self.length], data[self.length:]
self.length -= len(body)
if self.length == 0:
self.setLineMode(data)
return (body,)
def finish(self):
if not self.done:
self.done = True
raise HTTPDataError("not enough data for chunked body")
return ()
class DeflateDecoder(Parser):
def __init__(self):
from zlib import decompressobj
self.obj = decompressobj()
def parseRaw(self, data):
data = self.obj.decompress(data)
if data:
return (data,)
return ()
def finish(self):
if not self.done:
self.done = True
data = self.obj.flush()
self.prepend(self.obj.unused_data)
self.obj = None
if data:
return (data,)
return ()
class CompoundDecoder(Parser):
def __init__(self, *args):
self.decoders = list(args)
def _process(self, chunks, finish=False):
first = self.decoders[0]
for decoder in self.decoders:
output = []
for chunk in chunks:
if isinstance(chunk, basestring):
output.extend(decoder.parse(chunk))
else:
output.append(chunk)
if finish:
output.extend(decoder.finish())
if decoder is first:
self.done = self.done or decoder.done
chunks = output
return chunks
def clear(self):
return self.decoders[0].clear()
def parseRaw(self, data):
result = self._process((data,))
if self.done:
# Outer decoder finished
# Chain finish calls
result = list(result)
result.extend(self._process((), True))
return result
def finish(self):
if not self.done:
self.done = True
return self._process((), True)
return ()
requestMethodsWithoutBody = frozenset(('HEAD', 'CONNECT'))
responseCodesWithoutBody = frozenset((204, 304))
@classmethod
def from_response(cls, request, response):
# process Content-Length
if getattr(request, 'ignore_content_length', False):
contentLength = None
else:
contentLength = response.headers.getlist('Content-Length')
if contentLength:
contentLength = contentLength[-1]
if contentLength:
try:
contentLength = int(contentLength)
except ValueError:
raise HTTPDataError("invalid Content-Length header")
else:
contentLength = None
else:
contentLength = None
if request.method in cls.requestMethodsWithoutBody:
contentLength = 0
if contentLength is None and response.code in cls.responseCodesWithoutBody:
contentLength = 0
if contentLength == 0:
return None
# process Transfer-Encoding
encodings = response.headers.get('Transfer-Encoding')
if encodings is None:
encodings = 'identity'
encodings = [encoding.strip() for encoding in encodings.split(',')]
encodings.reverse()
decoders = []
baseDecoderFound = False
for encoding in encodings:
encoding = encoding.split(';', 1)[0] # strip parameters
encoding = encoding.strip().lower()
if encoding == 'chunked':
if decoders:
raise HTTPDataError("'chunked' must be the last Transfer-Encoding in chain")
decoders.append(ChunkedDecoder())
baseDecoderFound = True
elif encoding == 'identity':
if decoders:
raise HTTPDataError("'identity' must be the last Transfer-Encoding in chain")
decoders.append(IdentityDecoder(contentLength))
baseDecoderFound = True
elif encoding == 'deflate':
decoders.append(DeflateDecoder())
else:
# TODO: implement gzip, bzip2?
raise HTTPDataError("no decoder for Transfer-Encoding %r" % (encoding,))
if not baseDecoderFound:
# Don't fail if identity not specified
decoders.insert(0, IdentityDecoder(contentLength))
return cls(*decoders)
|
{
"content_hash": "01558817105b0fad9f774f8b23976603",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 97,
"avg_line_length": 32.566502463054185,
"alnum_prop": 0.5315383451822719,
"repo_name": "snaury/kitsu.http",
"id": "52c368590af29547715109f7818d83ee179123f8",
"size": "6611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitsu/http/decoders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63479"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
# source: http://stackoverflow.com/questions/2758159/how-to-embed-a-python-interpreter-in-a-pyqt-widget
import sys, os, re
import traceback, platform
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from electrum import util
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
class Console(QtWidgets.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtWidgets.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QtGui.QFont(MONOSPACE_FONT, 10, QtGui.QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run':self.run_script})
self.set_json(False)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
result = eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QtGui.QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
self.moveCursor(QtGui.QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QtGui.QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = map(lambda x: x.split('.')[-1], completions)
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
l = self.completions_end - self.completions_pos
for x in range(l): c.deleteChar()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QtGui.QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QtGui.QTextCursor.Right)
def register_command(self, c, func):
methods = { c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
if type(self.namespace.get(command)) == type(lambda:None):
self.appendPlainText("'%s' is a function. Type '%s()' to use it in the Python console."%(command, command))
self.newPrompt()
return
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
result = eval(command, self.namespace, self.namespace)
if result != None:
if self.is_json:
util.print_msg(util.json_encode(result))
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
exec(command, self.namespace, self.namespace)
except SystemExit:
self.close()
except Exception:
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3,2,1,-1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(' |\(|\)',cmd)[-1]
beginning = cmd[0:-len(lastword)]
path = lastword.split('.')
ns = self.namespace.keys()
if len(path) == 1:
ns = ns
prefix = ''
else:
obj = self.namespace.get(path[0])
prefix = path[0] + '.'
ns = dir(obj)
completions = []
for x in ns:
if x[0] == '_':continue
xx = prefix + x
if xx.startswith(lastword):
completions.append(xx)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p)>len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1' : app, 'myVar2' : 1234})
console.show()
sys.exit(app.exec_())
|
{
"content_hash": "c56673e44dae0f5caddc3144820eb030",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 123,
"avg_line_length": 31.96904024767802,
"alnum_prop": 0.5568467944993221,
"repo_name": "dabura667/electrum",
"id": "4fb7e0150387c84b588529449eb81bc390d4064d",
"size": "10326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/qt/console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3867"
},
{
"name": "Makefile",
"bytes": "837"
},
{
"name": "NSIS",
"bytes": "7125"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2373"
},
{
"name": "Python",
"bytes": "1313010"
},
{
"name": "Shell",
"bytes": "6888"
}
],
"symlink_target": ""
}
|
from flask import Flask, render_template, redirect, abort, Response, request
import os
from aura.core import ApplicationDescriptionParser, ApplicationDeployment
import json
from sys import argv
import uuid
from threading import Thread
import logging
from copy import deepcopy
app = Flask("aura")
class AURAContext:
def __init__(self):
pass
def allocate(self, conf):
self.applications = {}
for path in conf['applications']:
parser = ApplicationDescriptionParser(path)
desc = parser.get_description()
desc['path'] = path
desc['id'] = str(uuid.uuid4())
self.applications[desc['id']] = desc
self.deployments = {}
self.config = conf
def refresh_applications(self):
for app_id in self.applications:
app = self.applications[app_id]
parser = ApplicationDescriptionParser(app['path'])
del self.applications[app_id]
desc = parser.get_description()
desc['path'] = app['path']
desc['id'] = app_id
self.applications[desc['id']] = desc
global context
context = AURAContext()
# API
@app.route("/")
def index():
return redirect('/application/')
@app.route("/application/")
def application_list():
return render_template("application_list.html", apps = context.applications.values())
@app.route("/application/refresh")
def application_refresh():
context.refresh_applications()
return redirect('/application/')
@app.route("/application/<app_id>")
def application_show(app_id):
if app_id not in context.applications:
abort(404)
return render_template("application_view.html", app = context.applications[app_id])
@app.route("/application/<app_id>/json")
def application_json(app_id):
if app_id not in context.applications:
abort(404)
return json.dumps(context.applications[app_id])
@app.route("/application/<app_id>/deploy")
def application_deploy(app_id):
if app_id not in context.applications:
abort(404)
multiplicities = dict()
for module_name in request.args:
multiplicities[module_name] = int(request.args.get(module_name))
parser = ApplicationDescriptionParser(context.applications[app_id]['path'])
parser.set_multiplicities(multiplicities)
desc = parser.expand_description()
d = ApplicationDeployment(desc, context.config)
deployment_id = str(uuid.uuid4())
context.deployments[deployment_id] = d
t = Thread(target = d.run)
t.start()
return redirect("/deployments/%s" % (deployment_id))
@app.route("/deployments/")
def deployment_list():
deps = []
for x in context.deployments.keys():
cur = context.deployments[x].status()
cur['id'] = x
deps.append(cur)
return render_template("deployment_list.html", deps = deps)
@app.route("/deployments/<dep_id>")
def deployment_show(dep_id):
if dep_id not in context.deployments:
abort(404)
deployment = context.deployments[dep_id].status()
deployment['id'] = dep_id
return render_template("deployment_view.html", deployment = deployment)
@app.route("/deployments/<dep_id>/delete")
def deployment_delete(dep_id):
if dep_id not in context.deployments:
abort(404)
deployment = context.deployments[dep_id]
deployment.delete()
del context.deployments[dep_id]
return redirect("/deployments/")
@app.route("/deployments/<dep_id>/status")
def deployment_status(dep_id):
if dep_id not in context.deployments:
abort(404)
deployment = context.deployments[dep_id]
return json.dumps(deployment.status())
@app.route("/deployments/<dep_id>/<module_name>/<script_seq>/<log_type>/")
def deployment_script_logs(dep_id, module_name, script_seq, log_type):
if dep_id not in context.deployments:
abort(404)
deployment = context.deployments[dep_id].status()
for m in deployment['modules']:
for s in m['scripts']:
if m['name'] == module_name and script_seq == str(s['seq']):
if log_type == 'stdout' or log_type == 'stderr':
if log_type in s:
return Response(s[log_type], mimetype='text/plain', headers={"Content-Disposition": "attachment;filename=%s_%s_%s_%s.log" % (dep_id, module_name, script_seq, log_type)})
else:
return "Nothing yet"
return "Not found"
@app.route("/about/")
def about():
return render_template("about.html")
|
{
"content_hash": "8b3aa3735e5335ab597a0a7a2a014a8b",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 193,
"avg_line_length": 32.40714285714286,
"alnum_prop": 0.6473440599515098,
"repo_name": "giagiannis/aura",
"id": "69ae7ae1b170e80083a397208e02fd2b9f023a02",
"size": "4556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aura/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1953"
},
{
"name": "HTML",
"bytes": "6916"
},
{
"name": "JavaScript",
"bytes": "6843"
},
{
"name": "Python",
"bytes": "27472"
},
{
"name": "Shell",
"bytes": "8089"
}
],
"symlink_target": ""
}
|
"""Unit tests for instrumentation_test_instance."""
# pylint: disable=protected-access
import collections
import tempfile
import unittest
from pylib.base import base_test_result
from pylib.instrumentation import instrumentation_test_instance
import mock # pylint: disable=import-error
_INSTRUMENTATION_TEST_INSTANCE_PATH = (
'pylib.instrumentation.instrumentation_test_instance.%s')
class InstrumentationTestInstanceTest(unittest.TestCase):
def setUp(self):
options = mock.Mock()
options.tool = ''
@staticmethod
def createTestInstance():
c = _INSTRUMENTATION_TEST_INSTANCE_PATH % 'InstrumentationTestInstance'
# yapf: disable
with mock.patch('%s._initializeApkAttributes' % c), (
mock.patch('%s._initializeDataDependencyAttributes' % c)), (
mock.patch('%s._initializeTestFilterAttributes' %c)), (
mock.patch('%s._initializeFlagAttributes' % c)), (
mock.patch('%s._initializeTestControlAttributes' % c)), (
mock.patch('%s._initializeTestCoverageAttributes' % c)), (
mock.patch('%s._initializeSkiaGoldAttributes' % c)):
# yapf: enable
return instrumentation_test_instance.InstrumentationTestInstance(
mock.MagicMock(), mock.MagicMock(), lambda s: None)
_FlagAttributesArgs = collections.namedtuple('_FlagAttributesArgs', [
'command_line_flags', 'device_flags_file', 'strict_mode',
'use_apk_under_test_flags_file', 'coverage_dir'
])
def createFlagAttributesArgs(self,
command_line_flags=None,
device_flags_file=None,
strict_mode=None,
use_apk_under_test_flags_file=False,
coverage_dir=None):
return self._FlagAttributesArgs(command_line_flags, device_flags_file,
strict_mode, use_apk_under_test_flags_file,
coverage_dir)
def test_initializeFlagAttributes_commandLineFlags(self):
o = self.createTestInstance()
args = self.createFlagAttributesArgs(command_line_flags=['--foo', '--bar'])
o._initializeFlagAttributes(args)
self.assertEquals(o._flags, ['--enable-test-intents', '--foo', '--bar'])
def test_initializeFlagAttributes_deviceFlagsFile(self):
o = self.createTestInstance()
with tempfile.NamedTemporaryFile() as flags_file:
flags_file.write('\n'.join(['--foo', '--bar']))
flags_file.flush()
args = self.createFlagAttributesArgs(device_flags_file=flags_file.name)
o._initializeFlagAttributes(args)
self.assertEquals(o._flags, ['--enable-test-intents', '--foo', '--bar'])
def test_initializeFlagAttributes_strictModeOn(self):
o = self.createTestInstance()
args = self.createFlagAttributesArgs(strict_mode='on')
o._initializeFlagAttributes(args)
self.assertEquals(o._flags, ['--enable-test-intents', '--strict-mode=on'])
def test_initializeFlagAttributes_strictModeOn_coverageOn(self):
o = self.createTestInstance()
args = self.createFlagAttributesArgs(
strict_mode='on', coverage_dir='/coverage/dir')
o._initializeFlagAttributes(args)
self.assertEquals(o._flags, ['--enable-test-intents'])
def test_initializeFlagAttributes_strictModeOff(self):
o = self.createTestInstance()
args = self.createFlagAttributesArgs(strict_mode='off')
o._initializeFlagAttributes(args)
self.assertEquals(o._flags, ['--enable-test-intents'])
def testGetTests_noFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'MediumTest': None},
'method': 'testMethod2',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {'value': ['Foo']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest',
'method': 'testMethod1',
'is_junit4': True,
},
{
'annotations': {
'Feature': {'value': ['Foo']},
'MediumTest': None,
},
'class': 'org.chromium.test.SampleTest',
'method': 'testMethod2',
'is_junit4': True,
},
{
'annotations': {
'Feature': {'value': ['Bar']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest2',
'method': 'testMethod1',
'is_junit4': True,
},
]
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_simpleGtestFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'MediumTest': None},
'method': 'testMethod2',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {'value': ['Foo']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod1',
},
]
o._test_filter = 'org.chromium.test.SampleTest.testMethod1'
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_simpleGtestUnqualifiedNameFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'MediumTest': None},
'method': 'testMethod2',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {'value': ['Foo']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod1',
},
]
o._test_filter = 'SampleTest.testMethod1'
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_parameterizedTestGtestFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'SmallTest': None},
'method': 'testMethod1__sandboxed_mode',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {'value': ['Foo']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest',
'method': 'testMethod1',
'is_junit4': True,
},
{
'annotations': {
'Feature': {'value': ['Foo']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest',
'method': 'testMethod1__sandboxed_mode',
'is_junit4': True,
},
]
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
o._test_filter = 'org.chromium.test.SampleTest.testMethod1'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_wildcardGtestFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'MediumTest': None},
'method': 'testMethod2',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {'value': ['Bar']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest2',
'is_junit4': True,
'method': 'testMethod1',
},
]
o._test_filter = 'org.chromium.test.SampleTest2.*'
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_negativeGtestFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'MediumTest': None},
'method': 'testMethod2',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {'value': ['Foo']},
'MediumTest': None,
},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod2',
},
{
'annotations': {
'Feature': {'value': ['Bar']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest2',
'is_junit4': True,
'method': 'testMethod1',
},
]
o._test_filter = '*-org.chromium.test.SampleTest.testMethod1'
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_annotationFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'MediumTest': None},
'method': 'testMethod2',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {'value': ['Foo']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod1',
},
{
'annotations': {
'Feature': {'value': ['Bar']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest2',
'is_junit4': True,
'method': 'testMethod1',
},
]
o._annotations = [('SmallTest', None)]
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_excludedAnnotationFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'junit.framework.TestCase',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'MediumTest': None},
'method': 'testMethod2',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'junit.framework.TestCase',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {
'value': ['Foo']
},
'MediumTest': None,
},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod2',
},
]
o._excluded_annotations = [('SmallTest', None)]
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_annotationSimpleValueFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'junit.framework.TestCase',
'methods': [
{
'annotations': {
'SmallTest': None,
'TestValue': '1',
},
'method': 'testMethod1',
},
{
'annotations': {
'MediumTest': None,
'TestValue': '2',
},
'method': 'testMethod2',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'junit.framework.TestCase',
'methods': [
{
'annotations': {
'SmallTest': None,
'TestValue': '3',
},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {
'value': ['Foo']
},
'SmallTest': None,
'TestValue': '1',
},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod1',
},
]
o._annotations = [('TestValue', '1')]
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTests_annotationDictValueFilter(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {'MediumTest': None},
'method': 'testMethod2',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'java.lang.Object',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {'value': ['Bar']},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest2',
'is_junit4': True,
'method': 'testMethod1',
},
]
o._annotations = [('Feature', 'Bar')]
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGetTestName(self):
test = {
'annotations': {
'RunWith': {'value': 'class J4Runner'},
'SmallTest': {},
'Test': {'expected': 'class org.junit.Test$None',
'timeout': '0'},
'UiThreadTest': {}},
'class': 'org.chromium.TestA',
'is_junit4': True,
'method': 'testSimple'}
unqualified_class_test = {
'class': test['class'].split('.')[-1],
'method': test['method']
}
self.assertEquals(
instrumentation_test_instance.GetTestName(test, sep='.'),
'org.chromium.TestA.testSimple')
self.assertEquals(
instrumentation_test_instance.GetTestName(
unqualified_class_test, sep='.'),
'TestA.testSimple')
def testGetUniqueTestName(self):
test = {
'annotations': {
'RunWith': {'value': 'class J4Runner'},
'SmallTest': {},
'Test': {'expected': 'class org.junit.Test$None', 'timeout': '0'},
'UiThreadTest': {}},
'class': 'org.chromium.TestA',
'flags': ['enable_features=abc'],
'is_junit4': True,
'method': 'testSimple'}
self.assertEquals(
instrumentation_test_instance.GetUniqueTestName(
test, sep='.'),
'org.chromium.TestA.testSimple_with_enable_features=abc')
def testGetTestNameWithoutParameterPostfix(self):
test = {
'annotations': {
'RunWith': {'value': 'class J4Runner'},
'SmallTest': {},
'Test': {'expected': 'class org.junit.Test$None', 'timeout': '0'},
'UiThreadTest': {}},
'class': 'org.chromium.TestA__sandbox_mode',
'flags': 'enable_features=abc',
'is_junit4': True,
'method': 'testSimple'}
unqualified_class_test = {
'class': test['class'].split('.')[-1],
'method': test['method']
}
self.assertEquals(
instrumentation_test_instance.GetTestNameWithoutParameterPostfix(
test, sep='.'),
'org.chromium.TestA')
self.assertEquals(
instrumentation_test_instance.GetTestNameWithoutParameterPostfix(
unqualified_class_test, sep='.'),
'TestA')
def testGetTests_multipleAnnotationValuesRequested(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {'Feature': {'value': ['Foo']}},
'class': 'org.chromium.test.SampleTest',
'superclass': 'junit.framework.TestCase',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
{
'annotations': {
'Feature': {'value': ['Baz']},
'MediumTest': None,
},
'method': 'testMethod2',
},
],
},
{
'annotations': {'Feature': {'value': ['Bar']}},
'class': 'org.chromium.test.SampleTest2',
'superclass': 'junit.framework.TestCase',
'methods': [
{
'annotations': {'SmallTest': None},
'method': 'testMethod1',
},
],
}
]
expected_tests = [
{
'annotations': {
'Feature': {
'value': ['Baz']
},
'MediumTest': None,
},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod2',
},
{
'annotations': {
'Feature': {
'value': ['Bar']
},
'SmallTest': None,
},
'class': 'org.chromium.test.SampleTest2',
'is_junit4': True,
'method': 'testMethod1',
},
]
o._annotations = [('Feature', 'Bar'), ('Feature', 'Baz')]
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testGenerateTestResults_noStatus(self):
results = instrumentation_test_instance.GenerateTestResults(
None, None, [], 1000, None, None)
self.assertEqual([], results)
def testGenerateTestResults_testPassed(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(0, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 1000, None, None)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.PASS, results[0].GetType())
def testGenerateTestResults_testSkipped_true(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(0, {
'test_skipped': 'true',
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(0, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 1000, None, None)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.SKIP, results[0].GetType())
def testGenerateTestResults_testSkipped_false(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(0, {
'test_skipped': 'false',
}),
(0, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 1000, None, None)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.PASS, results[0].GetType())
def testGenerateTestResults_testFailed(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(-2, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 1000, None, None)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.FAIL, results[0].GetType())
def testGenerateTestResults_testUnknownException(self):
stacktrace = 'long\nstacktrace'
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(-1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
'stack': stacktrace,
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 1000, None, None)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.FAIL, results[0].GetType())
self.assertEqual(stacktrace, results[0].GetLog())
def testGenerateJUnitTestResults_testSkipped_true(self):
statuses = [
(1, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
(-3, {
'class': 'test.package.TestClass',
'test': 'testMethod',
}),
]
results = instrumentation_test_instance.GenerateTestResults(
None, None, statuses, 1000, None, None)
self.assertEqual(1, len(results))
self.assertEqual(base_test_result.ResultType.SKIP, results[0].GetType())
def testParameterizedCommandLineFlagsSwitches(self):
o = self.createTestInstance()
raw_tests = [{
'annotations': {
'ParameterizedCommandLineFlags$Switches': {
'value': ['enable-features=abc', 'enable-features=def']
}
},
'class':
'org.chromium.test.SampleTest',
'superclass':
'java.lang.Object',
'methods': [
{
'annotations': {
'SmallTest': None
},
'method': 'testMethod1',
},
{
'annotations': {
'MediumTest': None,
'ParameterizedCommandLineFlags$Switches': {
'value': ['enable-features=ghi', 'enable-features=jkl']
},
},
'method': 'testMethod2',
},
{
'annotations': {
'MediumTest': None,
'ParameterizedCommandLineFlags$Switches': {
'value': []
},
},
'method': 'testMethod3',
},
{
'annotations': {
'MediumTest': None,
'SkipCommandLineParameterization': None,
},
'method': 'testMethod4',
},
],
}]
expected_tests = [
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'flags': ['--enable-features=abc', '--enable-features=def'],
'is_junit4': True,
'method': 'testMethod1'
},
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'flags': ['--enable-features=ghi', '--enable-features=jkl'],
'is_junit4': True,
'method': 'testMethod2'
},
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod3'
},
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod4'
},
]
for i in range(4):
expected_tests[i]['annotations'].update(raw_tests[0]['annotations'])
expected_tests[i]['annotations'].update(
raw_tests[0]['methods'][i]['annotations'])
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testParameterizedCommandLineFlags(self):
o = self.createTestInstance()
raw_tests = [{
'annotations': {
'ParameterizedCommandLineFlags': {
'value': [
{
'ParameterizedCommandLineFlags$Switches': {
'value': [
'enable-features=abc',
'force-fieldtrials=trial/group'
],
}
},
{
'ParameterizedCommandLineFlags$Switches': {
'value': [
'enable-features=abc2',
'force-fieldtrials=trial/group2'
],
}
},
],
},
},
'class':
'org.chromium.test.SampleTest',
'superclass':
'java.lang.Object',
'methods': [
{
'annotations': {
'SmallTest': None
},
'method': 'testMethod1',
},
{
'annotations': {
'MediumTest': None,
'ParameterizedCommandLineFlags': {
'value': [{
'ParameterizedCommandLineFlags$Switches': {
'value': ['enable-features=def']
}
}],
},
},
'method': 'testMethod2',
},
{
'annotations': {
'MediumTest': None,
'ParameterizedCommandLineFlags': {
'value': [],
},
},
'method': 'testMethod3',
},
{
'annotations': {
'MediumTest': None,
'SkipCommandLineParameterization': None,
},
'method': 'testMethod4',
},
],
}]
expected_tests = [
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'flags':
['--enable-features=abc', '--force-fieldtrials=trial/group'],
'is_junit4': True,
'method': 'testMethod1'
},
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'flags': ['--enable-features=def'],
'is_junit4': True,
'method': 'testMethod2'
},
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod3'
},
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'is_junit4': True,
'method': 'testMethod4'
},
{
'annotations': {},
'class':
'org.chromium.test.SampleTest',
'flags': [
'--enable-features=abc2',
'--force-fieldtrials=trial/group2',
],
'is_junit4':
True,
'method':
'testMethod1'
},
]
for i in range(4):
expected_tests[i]['annotations'].update(raw_tests[0]['annotations'])
expected_tests[i]['annotations'].update(
raw_tests[0]['methods'][i]['annotations'])
expected_tests[4]['annotations'].update(raw_tests[0]['annotations'])
expected_tests[4]['annotations'].update(
raw_tests[0]['methods'][0]['annotations'])
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testDifferentCommandLineParameterizations(self):
o = self.createTestInstance()
raw_tests = [{
'annotations': {},
'class':
'org.chromium.test.SampleTest',
'superclass':
'java.lang.Object',
'methods': [
{
'annotations': {
'SmallTest': None,
'ParameterizedCommandLineFlags': {
'value': [
{
'ParameterizedCommandLineFlags$Switches': {
'value': ['a1', 'a2'],
}
},
],
},
},
'method': 'testMethod2',
},
{
'annotations': {
'SmallTest': None,
'ParameterizedCommandLineFlags$Switches': {
'value': ['b1', 'b2'],
},
},
'method': 'testMethod3',
},
],
}]
expected_tests = [
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'flags': ['--a1', '--a2'],
'is_junit4': True,
'method': 'testMethod2'
},
{
'annotations': {},
'class': 'org.chromium.test.SampleTest',
'flags': ['--b1', '--b2'],
'is_junit4': True,
'method': 'testMethod3'
},
]
for i in range(2):
expected_tests[i]['annotations'].update(
raw_tests[0]['methods'][i]['annotations'])
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
actual_tests = o.ProcessRawTests(raw_tests)
self.assertEquals(actual_tests, expected_tests)
def testMultipleCommandLineParameterizations_raises(self):
o = self.createTestInstance()
raw_tests = [
{
'annotations': {
'ParameterizedCommandLineFlags': {
'value': [
{
'ParameterizedCommandLineFlags$Switches': {
'value': [
'enable-features=abc',
'force-fieldtrials=trial/group',
],
}
},
],
},
},
'class':
'org.chromium.test.SampleTest',
'superclass':
'java.lang.Object',
'methods': [
{
'annotations': {
'SmallTest': None,
'ParameterizedCommandLineFlags$Switches': {
'value': [
'enable-features=abc',
'force-fieldtrials=trial/group',
],
},
},
'method': 'testMethod1',
},
],
},
]
o._test_jar = 'path/to/test.jar'
o._junit4_runner_class = 'J4Runner'
self.assertRaises(
instrumentation_test_instance.CommandLineParameterizationException,
o.ProcessRawTests, [raw_tests[0]])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
{
"content_hash": "9351a665c6aca6c6653545aafa0c296c",
"timestamp": "",
"source": "github",
"line_count": 1182,
"max_line_length": 79,
"avg_line_length": 29.75296108291032,
"alnum_prop": 0.48555505004549593,
"repo_name": "youtube/cobalt_sandbox",
"id": "77918bb3ea5abb5ae74b1b46c25200c774caab67",
"size": "35354",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build/android/pylib/instrumentation/instrumentation_test_instance_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import time
import urllib2
from fabric.api import env, run, sudo
from fabric.context_managers import cd, settings as fabric_settings
from fabric.contrib.files import append, contains, sed, uncomment
from fabric.operations import reboot
from fabric.utils import abort
import settings
DISTRO = "ARCH_201208"
SALT_INSTALLERS = ["aur", "aur-git"]
def validate_configurator_version():
"""
Arch is a rolling release distro, therefore it is important to ensure
the configurator version is current.
"""
if settings.CONFIGURATOR_MODULE == "bootmachine.contrib.configurators.salt":
pkgver = settings.SALT_AUR_PKGVER
pkgrel = settings.SALT_AUR_PKGREL
response = urllib2.urlopen("https://aur.archlinux.org/packages/sa/salt/PKGBUILD")
for line in response:
if line.startswith("pkgver=") and not pkgver in line:
abort("The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.".format(
pkgver, line.strip()))
if line.startswith("pkgrel=") and not pkgrel in line:
abort("The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.".format(
pkgrel, line.strip()))
def bootstrap():
"""
Bootstrap Arch Linux.
Only the bare essentials, the configurator will take care of the rest.
"""
validate_configurator_version()
# configure kernel before upgrade
# see: https://projects.archlinux.org/mkinitcpio.git/commit/?id=5b99f78331f567cc1442460efc054b72c45306a6 # nopep8
sed("/etc/mkinitcpio.conf", "xen-", "xen_")
sed("/etc/mkinitcpio.conf", "usbinput", "usbinput fsck")
# remove existing files that break the upgrade
run("rm /etc/profile.d/locale.sh && rm /usr/share/man/man7/archlinux.7.gz")
# upgrade pacakges
run("pacman --noconfirm -Syu")
run("pacman --noconfirm -Syu") # requires a second run!
# install essential packages
run("pacman --noconfirm -S base-devel")
run("pacman --noconfirm -S curl git rsync")
# install and configure yaourt
append("/etc/pacman.conf", "\n[archlinuxfr]\nServer = http://repo.archlinux.fr/$arch",
use_sudo=True)
run("pacman -Syy")
run("pacman --noconfirm -S yaourt")
# create a user, named 'aur', to safely install AUR packages under fakeroot
# uid and gid values auto increment from 1000
# to prevent conficts set the 'aur' user's gid and uid to 902
run("groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur")
# allow users in the wheel group to sudo without a password
uncomment("/etc/sudoers", "wheel.*NOPASSWD")
# upgrade non-pacman rackspace installed packages
with cd("/home/aur/"):
sudo("yaourt --noconfirm -S xe-guest-utilities", user="aur")
# upgrade grub
run("mv /boot/grub /boot/grub-legacy")
run("printf 'y\nY\nY\nY\nY\nY\nY\nY\nY\nY\n' | pacman -S grub-bios")
with fabric_settings(warn_only=True):
run("modprobe dm_mod")
run("grub-install --directory=/usr/lib/grub/i386-pc --target=i386-pc --boot-directory=/boot \
--recheck --debug /dev/xvda")
run("grub-mkconfig -o /boot/grub/grub.cfg")
# allow fabric to sftp with contrib.files.put
# http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8
# change before reboot because then the sshd config will be reloaded
sed("/etc/ssh/sshd_config", "Subsystem sftp /usr/lib/openssh/sftp-server",
"Subsystem sftp internal-sftp")
# a pure systemd installation
run("printf 'y\ny\nY\n' | pacman -S systemd ntp")
sed("/etc/default/grub", 'GRUB_CMDLINE_LINUX=""',
'GRUB_CMDLINE_LINUX="init=/usr/lib/systemd/systemd"')
run("grub-mkconfig -o /boot/grub/grub.cfg")
run("iptables-save > /etc/iptables/iptables.rules")
run("ip6tables-save > /etc/iptables/ip6tables.rules")
for daemon in ["netcfg", "sshd", "syslog-ng", "ntpd", "iptables"]:
run("systemctl enable {0}.service".format(daemon))
with fabric_settings(warn_only=True):
reboot()
if not contains("/proc/1/comm", "systemd"):
abort("systemd installation failure")
run("pacman --noconfirm -Rns initscripts sysvinit")
run("pacman --noconfirm -S systemd-sysvcompat")
sed("/etc/default/grub", 'GRUB_CMDLINE_LINUX="init=/usr/lib/systemd/systemd"',
'GRUB_CMDLINE_LINUX=""')
run("grub-mkconfig -o /boot/grub/grub.cfg")
server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]
run("hostnamectl set-hostname {0}".format(server.name))
uncomment("/etc/locale.gen", "en_US.UTF-8 UTF-8")
uncomment("/etc/locale.gen", "en_US ISO-8859-1")
run("locale-gen")
run("localectl set-locale LANG='en_US.utf8'")
run("timedatectl set-timezone US/Central")
def install_salt(installer="aur"):
"""
Install salt with the chosen installer.
"""
append("/etc/hosts",
"{0} saltmaster-private".format(env.master_server.private_ip))
with cd("/home/aur/"):
if installer == "aur":
validate_configurator_version()
sudo("yaourt --noconfirm -S salt", user="aur")
elif installer == "aur-git":
sudo("yaourt --noconfirm -S salt-git", user="aur")
else:
raise NotImplementedError()
def setup_salt():
"""
Setup the salt configuration files and enable dameon on a reboot.
See: http://salt.readthedocs.org/en/latest/topics/installation/arch.html
"""
server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]
if env.host == env.master_server.public_ip:
run("touch /etc/salt/master")
append("/etc/salt/master", "file_roots:\n base:\n - {0}".format(
settings.REMOTE_STATES_DIR))
append("/etc/salt/master", "pillar_roots:\n base:\n - {0}".format(
settings.REMOTE_PILLARS_DIR))
run("systemctl enable salt-master")
run("touch /etc/salt/minion")
append("/etc/salt/minion", "master: {0}".format(env.master_server.private_ip))
append("/etc/salt/minion", "id: {0}".format(server.name))
append("/etc/salt/minion", "grains:\n roles:")
for role in server.roles:
append("/etc/salt/minion", " - {0}".format(role))
run("systemctl enable salt-minion")
def start_salt():
"""
Starts salt master and minions.
"""
with fabric_settings(warn_only=True):
if env.host == env.master_server.public_ip:
sudo("systemctl start salt-master")
time.sleep(3)
sudo("systemctl start salt-minion")
def stop_salt():
"""
Stops salt master and minions.
"""
with fabric_settings(warn_only=True):
if env.host == env.master_server.public_ip:
sudo("systemctl stop salt-master")
sudo("systemctl stop salt-minion")
def restart_salt():
"""
Restart salt master and the minions.
"""
stop_salt()
start_salt()
|
{
"content_hash": "3c5943d2587fd7e6c2e0f044c03fd22f",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 120,
"avg_line_length": 38.16393442622951,
"alnum_prop": 0.6417525773195877,
"repo_name": "rizumu/bootmachine",
"id": "1db53ef9ddbe7dbb3d4aba7dc465b6c2e5c3b3d2",
"size": "6984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootmachine/contrib/distros/attic/rackspace_arch_201208.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "6980"
},
{
"name": "Python",
"bytes": "140748"
},
{
"name": "Ruby",
"bytes": "8049"
},
{
"name": "Scheme",
"bytes": "1132"
},
{
"name": "Shell",
"bytes": "1365"
}
],
"symlink_target": ""
}
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def PerformanceStatisticsDescription(vim, *args, **kwargs):
'''Data object to capture all information needed to describe a sample inventory.'''
obj = vim.client.factory.create('ns0:PerformanceStatisticsDescription')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'intervals', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
{
"content_hash": "8cb6b878f4a33fc941d851156ccb5c6a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 124,
"avg_line_length": 33,
"alnum_prop": 0.6098484848484849,
"repo_name": "xuru/pyvisdk",
"id": "41593bfe27aae9120ccb02e1cf31c4f15c7f586f",
"size": "1057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/performance_statistics_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
"""Classes and constants used to properly handle EXPANDABLES (one property that becomes many, as with 'opacity')
and SHORTHANDS (multiple properties that can be combined into a single one)"""
import skidmarkoutputs
#
# The Objects
#
class ShorthandHandler(object):
"""This object contains classmethods that are responsible for determining
and returning the shorthand version of properties if all the elements to
be able to do so are present"""
PROPERTIES_AVAILABLE_FOR_SHORTHAND = None
output_format = skidmarkoutputs.CSS_OUTPUT_COMPRESSED
def __init__(self):
pass
@classmethod
def set_output_format(cls, output_format):
cls.output_format = output_format
@classmethod
def get_properties_available_for_shorthand(cls):
# Create a reverse mapping
if cls.PROPERTIES_AVAILABLE_FOR_SHORTHAND is None:
PROPERTIES_AVAILABLE_FOR_SHORTHAND = []
for block_lists in PROPERTY_SHORTHANDS.values():
for block in block_lists:
PROPERTIES_AVAILABLE_FOR_SHORTHAND.extend(block[1:])
cls.PROPERTIES_AVAILABLE_FOR_SHORTHAND = list(set(PROPERTIES_AVAILABLE_FOR_SHORTHAND))
return cls.PROPERTIES_AVAILABLE_FOR_SHORTHAND
@classmethod
def is_shorthand(cls, prop_name):
return PROPERTY_SHORTHANDS.has_key(prop_name)
@classmethod
def expand_shorthand(cls, prop_name, prop_value):
properties = None
blocks = PROPERTY_SHORTHANDS.get(prop_name)
if blocks:
for block in blocks:
style = block[0]
block_values = block[1:]
if style == PROPERTY_SHORTHAND_TYPE_STANDARD4:
properties = cls.expand_standard4(block_values, prop_name, prop_value)
elif style == PROPERTY_SHORTHAND_TYPE_PASSTHRU:
properties = cls.expand_passthru(block_values, prop_name, prop_value)
elif style == PROPERTY_SHORTHAND_TYPE_CUSTOM:
fn_name = "expand_%s" % ( prop_name.replace("-", "_"), )
if hasattr(cls, fn_name):
properties = getattr(cls, fn_name)(block_values, prop_value)
if properties:
break
return properties
@classmethod
def get_all_expand_properties(cls, prop_name):
properties = set()
blocks = PROPERTY_SHORTHANDS.get(prop_name)
if blocks:
for block in blocks:
style = block[0]
block_values = block[1:]
for value in block_values:
properties.add(value)
return list(properties)
#
# Processors: From long to short
#
@classmethod
def process(cls, style, shorthand, block_values):
"""Process a style, given the shorhand and block values. The block values
are the fields, present in the declaration block's properties that match
the property names found in the PROPERTY_SHORTHANDS list."""
shorthand_property = ""
if None not in block_values:
if style == PROPERTY_SHORTHAND_TYPE_STANDARD4:
shorthand_property = cls.process_standard4(shorthand, block_values)
elif style == PROPERTY_SHORTHAND_TYPE_PASSTHRU:
shorthand_property = cls.process_passthru(shorthand, block_values)
elif style == PROPERTY_SHORTHAND_TYPE_CUSTOM:
fn_name = "process_%s" % ( shorthand.replace("-", "_"), )
if hasattr(cls, fn_name):
shorthand_property = getattr(cls, fn_name)(shorthand, block_values)
return shorthand_property or ""
@classmethod
def process_standard4(cls, shorthand, block_values):
"""This is the generic handler for the elements that require 4 params
that are defined as 'top right bottom left', such as padding and margin."""
sep = skidmarkoutputs.OUTPUT_TEMPLATE_PROPERTY_VALUE_SEPARATOR[cls.output_format]
if len(set(block_values)) == 1:
shorthand_property = "%s%s%s" % ( shorthand, sep, block_values[0] )
elif block_values[0] == block_values[2] and block_values[1] == block_values[3]:
shorthand_property = "%s%s%s" % ( shorthand, sep, " ".join(block_values[:2]) )
elif block_values[1] == block_values[3]:
shorthand_property = "%s%s%s" % ( shorthand, sep, " ".join(block_values[:3]) )
else:
shorthand_property = "%s%s%s" % ( shorthand, sep, " ".join(block_values) )
return shorthand_property
@classmethod
def process_passthru(cls, shorthand, block_values):
"""This is the generic handler for shorthands that are composed entirely of
the property data defined in PROPERTY_SHORTHANDS, in the exact order."""
sep = skidmarkoutputs.OUTPUT_TEMPLATE_PROPERTY_VALUE_SEPARATOR[cls.output_format]
return "%s%s%s" % ( shorthand, sep, " ".join(block_values) )
@classmethod
def process_font(cls, shorthand, block_values):
"""Custom handler for the 'font' shorthand"""
sep = skidmarkoutputs.OUTPUT_TEMPLATE_PROPERTY_VALUE_SEPARATOR[cls.output_format]
shorthand_property = None
if len(block_values) == 6:
shorthand_property = "%s%s%s %s %s %s/%s %s" % tuple([shorthand, sep] + block_values)
elif len(block_values) == 5:
shorthand_property = "%s%s%s %s %s/%s %s" % tuple([shorthand, sep] + block_values)
elif len(block_values) == 3:
shorthand_property = "%s%s%s/%s %s" % tuple([shorthand, sep] + block_values)
elif len(block_values) == 2:
shorthand_property = "%s%s%s %s" % tuple([shorthand, sep] + block_values)
return shorthand_property
#
# expanders: from short to long
#
@classmethod
def expand_passthru(cls, block_values, prop_name, prop_value):
"""Expand the passthru format"""
values = prop_value.split(" ", len(block_values))
if len(block_values) == len(values):
all_properties = cls.get_all_expand_properties(prop_name)
return zip(block_values, values) + [ (p_name, None) for p_name in all_properties if p_name not in block_values ]
return None
@classmethod
def expand_standard4(cls, block_values, prop_name, prop_value):
"""Expand the standard4 format: unimplemented at the moment"""
return None
class ExpandableHandler(object):
"""Object that handles expandables"""
def __init__(self):
pass
@classmethod
def ie_opacity(cls, value):
value = float(value)
return "filter: alpha(opacity=%d)" % ( int(value * 100), )
#
# Constants
#
PROPERTY_SHORTHAND_TYPE_STANDARD4 = "standard4"
PROPERTY_SHORTHAND_TYPE_PASSTHRU = "passthru"
PROPERTY_SHORTHAND_TYPE_CUSTOM = "custom"
PROPERTY_EXPANDABLES = {
#
# border-radius
#
"border-radius": [
"-moz-border-radius",
"-webkit-border-radius"
],
"border-top-left-radius": [
"-moz-border-radius-topleft",
"-webkit-border-top-left-radius"
],
"border-top-right-radius": [
"-moz-border-radius-topright",
"-webkit-border-top-right-radius"
],
"border-bottom-left-radius": [
"-moz-border-radius-bottomleft",
"-webkit-border-bottom-left-radius"
],
"border-bottom-right-radius": [
"-moz-border-radius-bottomright",
"-webkit-border-bottom-right-radius"
],
#
# transition
#
"transition": [
"-webkit-transition",
"-moz-transition",
"-o-transition"
],
#
# opacity
#
"opacity": [
"-moz-opacity",
"-khtml-opacity",
ExpandableHandler.ie_opacity
]
}
PROPERTY_SHORTHANDS = {
"padding": [
[ PROPERTY_SHORTHAND_TYPE_STANDARD4,
"padding-top", "padding-right", "padding-bottom", "padding-left" ],
],
"margin": [
[ PROPERTY_SHORTHAND_TYPE_STANDARD4,
"margin-top", "margin-right", "margin-bottom", "margin-left" ],
],
"border-width": [
[ PROPERTY_SHORTHAND_TYPE_STANDARD4,
"border-top-width", "border-right-width", "border-bottom-width", "border-left-width" ],
],
"border-style": [
[ PROPERTY_SHORTHAND_TYPE_STANDARD4,
"border-top-style", "border-right-style", "border-bottom-style", "border-left-style" ],
],
"border-color": [
[ PROPERTY_SHORTHAND_TYPE_STANDARD4,
"border-top-color", "border-right-color", "border-bottom-color", "border-left-color" ],
],
"outline": [
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"outline-width", "outline-style", "outline-color" ]
],
"border": [
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-width", "border-style", "border-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-style", "border-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-style" ],
],
"border-top": [
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-top-width", "border-top-style", "border-top-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-top-style", "border-top-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-top-style" ]
],
"border-right": [
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-right-width", "border-right-style", "border-right-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-right-style", "border-right-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-right-style" ]
],
"border-bottom": [
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-bottom-width", "border-bottom-style", "border-bottom-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-bottom-style", "border-bottom-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-bottom-style" ]
],
"border-left": [
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-left-width", "border-left-style", "border-left-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-left-style", "border-left-color" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"border-left-style" ],
],
"font": [
[ PROPERTY_SHORTHAND_TYPE_CUSTOM,
"font-style", "font-variant", "font-weight", "font-size", "line-height", "font-family" ],
[ PROPERTY_SHORTHAND_TYPE_CUSTOM,
"font-style", "font-weight", "font-size", "line-height", "font-family" ],
[ PROPERTY_SHORTHAND_TYPE_CUSTOM,
"font-size", "line-height", "font-family" ],
[ PROPERTY_SHORTHAND_TYPE_CUSTOM,
"font-size", "font-family" ]
],
"list-style": [
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"list-style-type", "list-style-position", "list-style-image" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"list-style-type", "list-style-position" ],
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"list-style-type" ]
],
"transition": [
[ PROPERTY_SHORTHAND_TYPE_PASSTHRU,
"transition-property", "transition-duration", "transition-timing-function", "transition-delay" ]
]
}
|
{
"content_hash": "22c485693eaac49d2515a5070ac3cbad",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 118,
"avg_line_length": 30.77591036414566,
"alnum_prop": 0.618640211158642,
"repo_name": "socialwireinc/skidmarkcss",
"id": "f11edd5c57cd2f2d3a341ee8f03a59b9a6c8bb5b",
"size": "11014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skidmarkcss/core/propertyexpandables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118720"
}
],
"symlink_target": ""
}
|
"""Controller module for attenuators.
Sample Config:
"Attenuator": [
{
"address": "192.168.1.12",
"port": 23,
"model": "minicircuits",
"paths": ["AP1-2G", "AP1-5G", "AP2-2G", "AP2-5G"]
},
{
"address": "192.168.1.14",
"port": 23,
"model": "minicircuits",
"paths": ["AP-DUT"]
}
]
"""
import importlib
import logging
MOBLY_CONTROLLER_CONFIG_NAME = "Attenuator"
# Keys used inside a config dict for attenuator.
# Keys for making the connection to the attenuator device. Right now we only
# use telnet lib. This can be refactored when the need for a different
# communication protocol arises.
KEY_ADDRESS = "address"
KEY_PORT = "port"
# A string that is the model of the attenuator used. This is essentially the
# module name for the underlying driver for the attenuator hardware.
KEY_MODEL = "model"
# A list of strings, each describing what's the connected to this attenuation
# path
KEY_PATHS = "paths"
PACKAGE_PATH_TEMPLATE = "mobly.controllers.attenuator_lib.%s"
def create(configs):
objs = []
for config in configs:
_validate_config(config)
attenuator_model = config[KEY_MODEL]
# Import the correct driver module for the attenuator device
module_name = PACKAGE_PATH_TEMPLATE % attenuator_model
module = importlib.import_module(module_name)
# Create each
attenuation_device = module.AttenuatorDevice(
path_count=len(config[KEY_PATHS]))
attenuation_device.model = attenuator_model
instances = attenuation_device.open(config[KEY_ADDRESS],
config[KEY_PORT])
for idx, path_name in enumerate(config[KEY_PATHS]):
path = AttenuatorPath(attenuation_device, idx=idx, name=path_name)
objs.append(path)
return objs
def destroy(objs):
for attenuation_path in objs:
attenuation_path.attenuation_device.close()
class Error(Exception):
"""This is the Exception class defined for all errors generated by
Attenuator-related modules.
"""
def _validate_config(config):
"""Verifies that a config dict for an attenuator device is valid.
Args:
config: A dict that is the configuration for an attenuator device.
Raises:
attenuator.Error is raised if a config is not valid.
"""
required_keys = [KEY_ADDRESS, KEY_MODEL, KEY_PORT, KEY_PATHS]
for key in required_keys:
if key not in config:
raise Error("Required key %s missing from config %s",
(key, config))
class AttenuatorPath(object):
"""A convenience class that allows users to control each attenuator path
separately as different objects, as opposed to passing in an index number
to the functions of an attenuator device object.
This decouples the test code from the actual attenuator device used in the
physical test bed.
For example, if a test needs to attenuate four signal paths, this allows the
test to do:
self.attenuation_paths[0].set_atten(50)
self.attenuation_paths[1].set_atten(40)
instead of:
self.attenuators[0].set_atten(0, 50)
self.attenuators[0].set_atten(1, 40)
The benefit the former is that the physical test bed can use either four
single-channel attenuators, or one four-channel attenuators. Whereas the
latter forces the test bed to use a four-channel attenuator.
"""
def __init__(self, attenuation_device, idx=0, name=None):
self.model = attenuation_device.model
self.attenuation_device = attenuation_device
self.idx = idx
if (self.idx >= attenuation_device.path_count):
raise IndexError("Attenuator index out of range!")
def set_atten(self, value):
"""This function sets the attenuation of Attenuator.
Args:
value: This is a floating point value for nominal attenuation to be
set. Unit is db.
"""
self.attenuation_device.set_atten(self.idx, value)
def get_atten(self):
"""Gets the current attenuation setting of Attenuator.
Returns:
A float that is the current attenuation value. Unit is db.
"""
return self.attenuation_device.get_atten(self.idx)
def get_max_atten(self):
"""Gets the max attenuation supported by the Attenuator.
Returns:
A float that is the max attenuation value.
"""
return self.attenuation_device.max_atten
|
{
"content_hash": "700d3b04c7394267fef000d4ac4a95f0",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 80,
"avg_line_length": 32.992753623188406,
"alnum_prop": 0.6512189764990116,
"repo_name": "l-meng/mobly",
"id": "02392e4eb6c46586fca1f456fd6a203a53e8c93b",
"size": "5131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mobly/controllers/attenuator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "440648"
}
],
"symlink_target": ""
}
|
import numpy as np
import os
import scipy
VIS_DIR = "vis"
class Visualizer:
def __init__(self):
self.active = False
def begin(self, dest, max_entries):
self.lines = []
self.active = True
self.max_entries = max_entries
self.next_entry = 0
self.dest_dir = os.path.join(VIS_DIR, dest)
if not os.path.exists(self.dest_dir):
os.mkdir(self.dest_dir)
def reset(self):
self.next_entry = 0
self.active = True
def end(self):
self.active = False
with open(os.path.join(self.dest_dir, "index.html"), "w") as vis_file:
#print >>vis_file, "<html><head><link rel='stylesheet' href='style.css'></head><body><table>"
print >>vis_file, "<html><head>"
print >>vis_file, "<link rel='stylesheet' href='../style.css' />"
print >>vis_file, "</head><body><table>"
for line in self.lines:
print >>vis_file, " <tr>"
for field in line:
print >>vis_file, " <td>",
print >>vis_file, field,
print >>vis_file, "</td>"
print >>vis_file, " </tr>"
print >>vis_file, "</table></body></html>"
def show(self, data):
if not self.active:
return
table_data = []
for i_field, field in enumerate(data):
if isinstance(field, np.ndarray):
filename = "%d_%d.jpg" % (self.next_entry, i_field)
filepath = os.path.join(self.dest_dir, filename)
scipy.misc.imsave(filepath, field)
table_data.append("<img src='%s' />" % filename)
else:
table_data.append(str(field))
self.lines.append(table_data)
self.next_entry += 1
if self.next_entry >= self.max_entries:
self.active = False
visualizer = Visualizer()
|
{
"content_hash": "a869f9bd51665653482fda4dd1d7e387",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 105,
"avg_line_length": 32.4,
"alnum_prop": 0.5118312757201646,
"repo_name": "jacobandreas/nmn2",
"id": "156946b470a7a457642d3b02e6f6bc4fecddde64",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/visualizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86531"
},
{
"name": "Scheme",
"bytes": "26413"
},
{
"name": "Shell",
"bytes": "278"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
try:
import pygraphviz as pgv
except ImportError:
pgv = None
is_gae = request.env.web2py_runtime_gae or False
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1', '127.0.0.1', '::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.is_https:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1") and \
(request.function != 'manage'):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if request.function == 'manage':
if not 'auth' in globals() or not request.args:
redirect(URL(request.controller, 'index'))
manager_action = auth.settings.manager_actions.get(request.args(0), None)
if manager_action is None and request.args(0) == 'auth':
manager_action = dict(role=auth.settings.auth_manager_role,
heading=T('Manage Access Control'),
tables=[auth.table_user(),
auth.table_group(),
auth.table_permission()])
manager_role = manager_action.get('role', None) if manager_action else None
auth.requires_membership(manager_role)(lambda: None)()
menu = False
elif (request.application == 'admin' and not session.authorized) or \
(request.application != 'admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index',
vars=dict(send=URL(args=request.args, vars=request.vars))))
else:
response.subtitle = T('Database Administration (appadmin)')
menu = True
ignore_rw = True
response.view = 'appadmin.html'
# if menu:
# response.menu = [[T('design'), False, URL('admin', 'default', 'design',
# args=[request.application])], [T('db'), False,
# URL('index')], [T('state'), False,
# URL('state')], [T('cache'), False,
# URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
if False and request.tickets_db:
from gluon.restricted import TicketStorage
ts = TicketStorage()
ts._get_table(request.tickets_db, ts.tablename, request.application)
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename, db, request=request):
keyed = hasattr(db[tablename], '_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (
request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases,
disabled_sites=current.REDIS_CLIENT.smembers("disabled_retrieval"))
def disable_site():
sitename = request.vars.get("sitename", None)
if request.vars.get("addremove", None) == "on":
session.flash = "Retrieval enabled for " + sitename
current.REDIS_CLIENT.srem("disabled_retrieval", sitename)
else:
session.flash = "Retrieval disabled for " + sitename
current.REDIS_CLIENT.sadd("disabled_retrieval", sitename)
redirect(URL("appadmin", "index"))
return dict()
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form, table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request, db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query, ignore_common_filters=True).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
try:
is_imap = db._uri.startswith("imap://")
except (KeyError, AttributeError, TypeError):
is_imap = False
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
step = 100
fields = []
if is_imap:
step = 3
stop = start + step
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(DIV(DIV(INPUT(_style='width:400px',
_name='query',
_id='query_inp',
_value=request.vars.query or '',
requires=IS_NOT_EMPTY(
error_message=T("Cannot be empty"))),
LABEL("Query:", _for='query_inp'),
_class="input-field col offset-s3 s6"),
_class="row"),
DIV(DIV(INPUT(_name='update_check',
_id='update_inp',
_type='checkbox',
value=False),
LABEL("Update", _for='update_inp'),
_class="col offset-s3 s6"),
_class="row"),
DIV(DIV(INPUT(_style='width:400px',
_name='update_fields',
_id='update_field_inp',
_value=request.vars.update_fields or ''),
LABEL("Update Fields:", _for='update_fields_inp'),
_class="input-field col offset-s3 s6"),
_class="row"),
DIV(DIV(INPUT(_name='delete_check',
_class='delete',
_id='delete_inp',
_type='checkbox',
value=False),
LABEL("Delete:", _for='delete_inp'),
_class="col offset-s3 s6"),
_class="row"),
DIV(DIV(INPUT(_type='submit',
_value=T('submit')),
_class="input-field col offset-s3 s6"),
_class="row"),
_action=URL(r=request, args=request.args),
_class="row col s12")
tb = None
if form.accepts(request.vars, formname=None):
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query, ignore_common_filters=True).count()
if form.vars.update_check and form.vars.update_fields:
db(query, ignore_common_filters=True).update(
**eval_in_global_env('dict(%s)' % form.vars.update_fields))
response.flash = T('%s %%{row} updated', nrows)
elif form.vars.delete_check:
db(query, ignore_common_filters=True).delete()
response.flash = T('%s %%{row} deleted', nrows)
nrows = db(query, ignore_common_filters=True).count()
if is_imap:
fields = [db[table][name] for name in
("id", "uid", "created", "to",
"sender", "subject")]
if orderby:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop))
except Exception, e:
import traceback
tb = traceback.format_exc()
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'), PRE(str(e)))
# begin handle upload csv
csv_table = table or request.vars.table
if csv_table:
formcsv = FORM(DIV(str(T('or import from csv file')) + " ", _class="row"),
DIV(DIV(DIV(SPAN("File"),
INPUT(_type='file', _name='csvfile'),
_class="btn"),
DIV(INPUT(_class="file-path", _type="text"),
_class="file-path-wrapper"),
_class="col offset-s4 s4 file-field input-field"),
_class="row"),
DIV(DIV(INPUT(_type='hidden', _value=csv_table, _name='table'),
INPUT(_type='submit', _value=T('import')),
_class="col offset-s4 s4"),
_class="row"),
_class="row center")
else:
formcsv = None
if formcsv and formcsv.process().accepted:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'), PRE(str(e)))
# end handle upload csv
return dict(
form=form,
table=table,
start=start,
stop=stop,
step=step,
nrows=nrows,
rows=rows,
query=request.vars.query,
formcsv=formcsv,
tb=tb
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table], '_primarykey')
record = None
db[table]._common_filter = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[
0]]).select().first()
else:
record = db(db[table].id == request.args(
2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable = False
form = SQLFORM(
db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('Done !!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form, table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
if is_gae:
form = FORM(
P(TAG.BUTTON(T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")))
else:
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
session.flash = ""
if is_gae:
if request.vars.yes:
cache.ram.clear()
session.flash += T("Cache Cleared")
else:
clear_ram = False
clear_disk = False
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
if is_gae:
gae_stats = cache.ram.client.get_stats()
try:
gae_stats['ratio'] = ((gae_stats['hits'] * 100) /
(gae_stats['hits'] + gae_stats['misses']))
except ZeroDivisionError:
gae_stats['ratio'] = T("?")
gae_stats['oldest'] = GetInHMS(time.time() - gae_stats['oldest_item_age'])
total.update(gae_stats)
else:
for key, value in cache.ram.storage.iteritems():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
for key in cache.disk.storage:
value = cache.disk.storage[key]
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
ram_keys = ram.keys() # ['hits', 'objects', 'ratio', 'entries', 'keys', 'oldest', 'bytes', 'misses']
ram_keys.remove('ratio')
ram_keys.remove('oldest')
for key in ram_keys:
total[key] = ram[key] + disk[key]
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
if not is_gae:
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
def table_template(table):
from gluon.html import TR, TD, TABLE, TAG
def FONT(*args, **kwargs):
return TAG.font(*args, **kwargs)
def types(field):
f_type = field.type
if not isinstance(f_type,str):
return ' '
elif f_type == 'string':
return field.length
elif f_type == 'id':
return B('pk')
elif f_type.startswith('reference') or \
f_type.startswith('list:reference'):
return B('fk')
else:
return ' '
# This is horribe HTML but the only one graphiz understands
rows = []
cellpadding = 4
color = "#000000"
bgcolor = "#FFFFFF"
face = "Helvetica"
face_bold = "Helvetica Bold"
border = 0
rows.append(TR(TD(FONT(table, _face=face_bold, _color=bgcolor),
_colspan=3, _cellpadding=cellpadding,
_align="center", _bgcolor=color)))
for row in db[table]:
rows.append(TR(TD(FONT(row.name, _color=color, _face=face_bold),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(row.type, _color=color, _face=face),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(types(row), _color=color, _face=face),
_align="center", _cellpadding=cellpadding,
_border=border)))
return "< %s >" % TABLE(*rows, **dict(_bgcolor=bgcolor, _border=1,
_cellborder=0, _cellspacing=0)
).xml()
def bg_graph_model():
graph = pgv.AGraph(layout='dot', directed=True, strict=False, rankdir='LR')
subgraphs = dict()
for tablename in db.tables:
if hasattr(db[tablename],'_meta_graphmodel'):
meta_graphmodel = db[tablename]._meta_graphmodel
else:
meta_graphmodel = dict(group=request.application, color='#ECECEC')
group = meta_graphmodel['group'].replace(' ', '')
if not subgraphs.has_key(group):
subgraphs[group] = dict(meta=meta_graphmodel, tables=[])
subgraphs[group]['tables'].append(tablename)
graph.add_node(tablename, name=tablename, shape='plaintext',
label=table_template(tablename))
for n, key in enumerate(subgraphs.iterkeys()):
graph.subgraph(nbunch=subgraphs[key]['tables'],
name='cluster%d' % n,
style='filled',
color=subgraphs[key]['meta']['color'],
label=subgraphs[key]['meta']['group'])
for tablename in db.tables:
for field in db[tablename]:
f_type = field.type
if isinstance(f_type,str) and (
f_type.startswith('reference') or
f_type.startswith('list:reference')):
referenced_table = f_type.split()[1].split('.')[0]
n1 = graph.get_node(tablename)
n2 = graph.get_node(referenced_table)
graph.add_edge(n1, n2, color="#4C4C4C", label='')
graph.layout()
if not request.args:
response.headers['Content-Type'] = 'image/png'
return graph.draw(format='png', prog='dot')
else:
response.headers['Content-Disposition']='attachment;filename=graph.%s'%request.args(0)
if request.args(0) == 'dot':
return graph.string()
else:
return graph.draw(format=request.args(0), prog='dot')
def graph_model():
return dict(databases=databases, pgv=pgv)
def manage():
tables = manager_action['tables']
if isinstance(tables[0], str):
db = manager_action.get('db', auth.db)
db = globals()[db] if isinstance(db, str) else db
tables = [db[table] for table in tables]
if request.args(0) == 'auth':
auth.table_user()._plural = T('Users')
auth.table_group()._plural = T('Roles')
auth.table_membership()._plural = T('Memberships')
auth.table_permission()._plural = T('Permissions')
if request.extension != 'load':
return dict(heading=manager_action.get('heading',
T('Manage %(action)s') % dict(action=request.args(0).replace('_', ' ').title())),
tablenames=[table._tablename for table in tables],
labels=[table._plural.title() for table in tables])
table = tables[request.args(1, cast=int)]
formname = '%s_grid' % table._tablename
linked_tables = orderby = None
if request.args(0) == 'auth':
auth.table_group()._id.readable = \
auth.table_membership()._id.readable = \
auth.table_permission()._id.readable = False
auth.table_membership().user_id.label = T('User')
auth.table_membership().group_id.label = T('Role')
auth.table_permission().group_id.label = T('Role')
auth.table_permission().name.label = T('Permission')
if table == auth.table_user():
linked_tables=[auth.settings.table_membership_name]
elif table == auth.table_group():
orderby = 'role' if not request.args(3) or '.group_id' not in request.args(3) else None
elif table == auth.table_permission():
orderby = 'group_id'
kwargs = dict(user_signature=True, maxtextlength=1000,
orderby=orderby, linked_tables=linked_tables)
smartgrid_args = manager_action.get('smartgrid_args', {})
kwargs.update(**smartgrid_args.get('DEFAULT', {}))
kwargs.update(**smartgrid_args.get(table._tablename, {}))
grid = SQLFORM.smartgrid(table,
args=request.args[:2],
formname=formname,
**kwargs)
return grid
def hooks():
import functools
import inspect
list_op=['_%s_%s' %(h,m) for h in ['before', 'after'] for m in ['insert','update','delete']]
tables=[]
with_build_it=False
for db_str in sorted(databases):
db = databases[db_str]
for t in db.tables:
method_hooks=[]
for op in list_op:
functions = []
for f in getattr(db[t], op):
if hasattr(f, '__call__'):
try:
if isinstance(f, (functools.partial)):
f = f.func
filename = inspect.getsourcefile(f)
details = {'funcname':f.__name__,
'filename':filename[len(request.folder):] if request.folder in filename else None,
'lineno': inspect.getsourcelines(f)[1]}
if details['filename']: # Built in functions as delete_uploaded_files are not editable
details['url'] = URL(a='admin',c='default',f='edit', args=[request['application'], details['filename']],vars={'lineno':details['lineno']})
if details['filename'] or with_build_it:
functions.append(details)
# compiled app and windows build don't support code inspection
except:
pass
if len(functions):
method_hooks.append({'name':op, 'functions':functions})
if len(method_hooks):
tables.append({'name':"%s.%s" % (db_str,t), 'slug': IS_SLUG()("%s.%s" % (db_str,t))[0], 'method_hooks':method_hooks})
# Render
ul_main = UL(_class='nav nav-list')
for t in tables:
ul_main.append(A(t['name'], _onclick="collapse('a_%s')" % t['slug']))
ul_t = UL(_class='nav nav-list', _id="a_%s" % t['slug'], _style='display:none')
for op in t['method_hooks']:
ul_t.append(LI (op['name']))
ul_t.append(UL([LI(A(f['funcname'], _class="editor_filelink", _href=f['url']if 'url' in f else None, **{'_data-lineno':f['lineno']-1})) for f in op['functions']]))
ul_main.append(ul_t)
return ul_main
|
{
"content_hash": "d1d9a17caade4890683cdfc39912dfd5",
"timestamp": "",
"source": "github",
"line_count": 764,
"max_line_length": 175,
"avg_line_length": 37.8848167539267,
"alnum_prop": 0.5107103372028745,
"repo_name": "stopstalk/stopstalk-deployment",
"id": "aee67c957c89b9a23d347f9a50150c6bf22a3e52",
"size": "28969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/appadmin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "CSS",
"bytes": "83271"
},
{
"name": "Cython",
"bytes": "123663"
},
{
"name": "HTML",
"bytes": "190175"
},
{
"name": "JavaScript",
"bytes": "681456"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Makefile",
"bytes": "98"
},
{
"name": "Python",
"bytes": "7648306"
},
{
"name": "SCSS",
"bytes": "79489"
},
{
"name": "Shell",
"bytes": "6187"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from django.db import transaction
from django.core.management.base import BaseCommand
from onadata.apps.fieldsight.models import Site
from onadata.apps.fsforms.models import FieldSightXF, FInstance
from onadata.apps.viewer.models.parsed_instance import update_mongo_instance
class Command(BaseCommand):
help = 'Deploy Stages'
def handle(self, *args, **options):
organization_id = 13
# project_id = 30
sites = Site.objects.filter(project__organization__id=organization_id).values_list('id', flat=True)
for site_id in sites:
# self.stdout.write('Operating in site '+str(site_id))
with transaction.atomic():
finstances = FInstance.objects.filter(site_id=site_id, site_fxf_id__isnull=False)
for fi in finstances:
site_fsxf = fi.site_fxf
if site_fsxf.site.id != site_id:
correct_form = FieldSightXF.objects.get(site__id=site_id, is_staged=True, fsform=fi.project_fxf, is_deleted=False)
fi.site_fxf = correct_form
fi.save()
parsed_instance = fi.instance.parsed_instance
d = parsed_instance.to_dict_for_mongo()
d.update({'fs_uuid': correct_form.id})
update_mongo_instance(d)
self.stdout.write('Successfully corrected form')
|
{
"content_hash": "495595be42a521f3ae8b81f6ae1a124a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 138,
"avg_line_length": 48.3,
"alnum_prop": 0.598343685300207,
"repo_name": "awemulya/fieldsight-kobocat",
"id": "be3317dc268c07716cecd424d6270f5700a79add",
"size": "1449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onadata/apps/fsforms/management/commands/set_correct_fxf_in_finstance.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "Dockerfile",
"bytes": "2462"
},
{
"name": "HTML",
"bytes": "1488442"
},
{
"name": "JavaScript",
"bytes": "674757"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "5340355"
},
{
"name": "Shell",
"bytes": "16493"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append('lib')
from ikalog.utils.ikamatcher2.reference import Numpy_uint8, Numpy_uint8_fast
from ikalog.utils.ikamatcher2.arm_neon import NEON
from lib.ikamatcher2_kernel_hal import HAL
import numpy as np
import time
def generate_img():
img1 = np.random.randint(2, size=(1024, 1024))
img2 = np.array(img1, dtype=np.uint8)
return img2
def test(kernel):
img_mask = generate_img()
img_test = generate_img()
a = kernel(1024, 1024)
a.load_mask(img_mask)
t1 = time.time()
for i in range(100):
img_test_encoded = a.encode(img_test)
t2 = time.time()
for i in range(100):
a.logical_and_popcnt(img_test_encoded)
a.logical_or_popcnt(img_test_encoded)
t3 = time.time()
print('encode %0.9fs logical_and_popcnt %0.9fs total %0.9fs %s' % (t2 - t1, t3 - t2, t3 - t1, kernel))
test(Numpy_uint8)
test(Numpy_uint8_fast)
test(NEON)
test(HAL)
|
{
"content_hash": "8da723bdf3925c341d28d8a62ca89bbf",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 106,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.6645021645021645,
"repo_name": "hasegaw/IkaLog",
"id": "4216fa23e8881f26ac882165c5e58fa5e111dba2",
"size": "924",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/bench_1024mat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37511"
},
{
"name": "Makefile",
"bytes": "1057"
},
{
"name": "Python",
"bytes": "749095"
},
{
"name": "Shell",
"bytes": "3312"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from ..highlight import PLAIN_CODE
from ..models import Snippet
class SnippetAPITestCase(TestCase):
def setUp(self):
self.api_url = reverse('dpaste_api_create_snippet')
self.client = Client()
def test_empty(self):
"""
The browser sent a content field but with no data.
"""
data = {}
# No data
response = self.client.post(self.api_url, {})
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# No content
data['content'] = ''
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Just some spaces
data['content'] = ' '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Linebreaks or tabs only are not valid either
data['content'] = '\n\t '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
def test_default_format(self):
"""
A valid snippet, contains Unicode, tabs, spaces, linebreaks etc.
"""
data = {'content': u"Hello Wörld.\n\tGood Bye"}
response = self.client.post(self.api_url, data)
content = response.content.decode('utf-8')
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
# The response is a URL with quotes
self.assertTrue(content.startswith('"'))
self.assertTrue(content.endswith('"'))
# The URL returned is the absolute url to the snippet.
# If we call that url our snippet should be in the page content.
response = self.client.get(content[1:-1])
self.assertEqual(response.status_code, 200)
self.assertContains(response, data['content'])
def test_new_url_format(self):
"""
The 'new' url format is just the link with a linebreak.
"""
data = {'content': u"Hello Wörld.\n\tGood Bye", 'format': 'url'}
response = self.client.post(self.api_url, data)
content = response.content.decode('utf-8')
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
# Response is just the link starting with http(s) and ends with a linebreak
self.assertTrue(content.startswith('http'))
self.assertTrue(content.endswith('\n'))
def test_json_format(self):
"""
The 'new' url format is just the link with a linebreak.
"""
data = {
'content': u"Hello Wörld.\n\tGood Bye",
'format': 'json',
'lexer': 'haskell'
}
response = self.client.post(self.api_url, data)
content = response.content.decode('utf-8')
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
from json import loads
json_data = loads(content)
# Response is valid json, containing, content, lexer and url
self.assertEqual(json_data['content'], data['content'])
self.assertEqual(json_data['lexer'], data['lexer'])
self.assertTrue(json_data['url'].startswith('http'))
def test_invalid_format(self):
"""
A broken format will not raise an error, just use the default
format.
"""
data = {
'content': u"Hello Wörld.\n\tGood Bye",
'format': 'broken-format',
'lexer': 'haskell'
}
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
def test_invalid_lexer(self):
"""
A broken lexer will fail loudly.
"""
data = {
'content': u"Hello Wörld.\n\tGood Bye",
'lexer': 'foobar'
}
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
def test_expire_choices_none_given(self):
# No expire choice given will set a default expiration of one month
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye"})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertTrue(Snippet.objects.all()[0].expires)
def test_expire_choices_invalid_given(self):
# A expire choice that does not exist returns a BadRequest
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye", 'expires': 'foobar'})
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
"""
Test all the different expiration choices. We dont actually test
the deletion, since thats handled in the `test_snippet` section.
"""
def test_valid_expiration_choices_onetime(self):
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye", 'expires': 'onetime'})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertEqual(Snippet.objects.all()[0].expire_type, Snippet.EXPIRE_ONETIME)
def test_valid_expiration_choices_never(self):
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye", 'expires': 'never'})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertEqual(Snippet.objects.all()[0].expire_type, Snippet.EXPIRE_KEEP)
def test_valid_expiration_choices_hour(self):
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye", 'expires': 3600})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertTrue(Snippet.objects.all()[0].expires)
def test_valid_expiration_choices_week(self):
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye", 'expires': 3600 * 24 * 7})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertTrue(Snippet.objects.all()[0].expires)
def test_valid_expiration_choices_month(self):
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye", 'expires': 3600 * 24 * 30})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertTrue(Snippet.objects.all()[0].expires)
def test_filename_not_given(self):
"""
No lexer and no filename given returns a BadRequest.
"""
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye",
'lexer': '',
'filename': ''
})
self.assertEqual(response.status_code, 400)
def test_filename_given(self):
"""
No lexer and a Python filename will set a 'python' lexer.
"""
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye",
'lexer': '',
'filename': 'helloworld.py'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertEqual(Snippet.objects.all()[0].lexer, 'python')
def test_awkward_filename_given(self):
"""
A unknown filename will create a 'plain' code snippet.
"""
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye",
'lexer': '',
'filename': 'helloworld.helloworld'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertEqual(Snippet.objects.all()[0].lexer, PLAIN_CODE)
def test_filename_and_lexer_given(self):
"""
A given lexer will overwrite whats the filename guessing.
"""
response = self.client.post(self.api_url, {
'content': u"Hello Wörld.\n\tGood Bye",
'lexer': 'php',
'filename': 'helloworld.py'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
self.assertEqual(Snippet.objects.all()[0].lexer, 'php')
|
{
"content_hash": "02434e48f98fd61c2e9787023db96c33",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 86,
"avg_line_length": 37.01673640167364,
"alnum_prop": 0.6070984514524698,
"repo_name": "SanketDG/dpaste",
"id": "f47ffa974408794927fd4953c1e3c231741e18dd",
"size": "8890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dpaste/tests/test_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7499"
},
{
"name": "HTML",
"bytes": "19216"
},
{
"name": "Nginx",
"bytes": "2813"
},
{
"name": "Python",
"bytes": "79520"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
import logging
import string
import pickle
from urllib.parse import urlparse
import time
import redis
import requests
from bs4 import BeautifulSoup
from .. import config
from .. import _json
from ..services.graphite import post_metric
logger = logging.getLogger(__name__)
db = redis.from_url(config.config['REDIS_URI'])
def check_status(session, status_url=None):
if status_url:
logger.debug(f'Attempting prior URI: {status_url}')
status_page = session.get(f'{config.config["APC_ENDPOINT"]}{status_url}')
status_page_url = urlparse(status_page.url)
if status_url != status_page_url.path:
logger.warn(f'{status_url} != {status_page_url.path}. Clearing cookies and URI.')
db.delete('apc_session')
session.cookies.clear()
return check_status(session, None)
logger.debug(f'Got {status_page.url}')
return status_page, status_page_url
logger.info('config.config: {}'.format(config.config))
login_page = session.get(config.config["APC_ENDPOINT"])
soup = BeautifulSoup(login_page.text, "lxml")
post_endpoint = '{APC_ENDPOINT}{}'.format(
soup.find('form').attrs['action'], **config.config)
logger.debug('Derived login point to be {}'.format(post_endpoint))
data = {
'prefLanguage': '01000000',
'login_username': config.config['APC_USERNAME'],
'login_password': config.config['APC_PASSWORD'],
'submit': 'Log On',
}
status_page = session.post(post_endpoint, data=data)
status_page_url = urlparse(status_page.url)
return status_page, status_page_url
def update_apc_status():
prior_session = db.get('apc_session')
prior_session_url = db.get('apc_path')
if prior_session_url:
prior_session_url = prior_session_url.decode('utf8')
if prior_session:
prior_session = pickle.loads(prior_session)
else:
prior_session = {}
with requests.session() as session:
session.cookies.update(prior_session)
status_page, status_page_url = check_status(session, prior_session_url)
db.set('apc_path', status_page_url.path)
db.set('apc_session', pickle.dumps(session.cookies))
soup = BeautifulSoup(status_page.text, "lxml")
ups_status, outlet_status, event_log = soup.find_all("td", {'id': 'events'})
_, data, _ = tuple(ups_status.find('table').find('table').find('table').children)
*_, capacity, life_mins_raw = data.find_all('td')
vac = ups_status.find('span', {'id': 'langVAC'}).previous_sibling.strip()
if ' ' in vac:
vac = 0
vac = float(vac)
battery_life_status = ups_status.find(
'span', {'id': 'langBatteryLifeStatus'}).parent.parent.find_all(
'td')[-1].text.strip()
capacity = float(capacity.text.strip()[:-1].strip())
life_mins_raw = life_mins_raw.text.strip()
life_value = []
life_span_metric = []
target = life_value
for char in life_mins_raw:
if char not in (string.digits + '.'):
target = life_span_metric
target.append(char)
life_value = float(''.join(life_value))
life_span_metric = ''.join(life_span_metric)
outlet_loads = {}
for outlet in outlet_status.find_all('td', {'class': 'dataName'}):
outlet_name = outlet.text
outlet_status[outlet_name] = None
queue = []
for _ in range(4):
outlet = outlet.next_sibling
javascript_data = outlet.text.strip()
value = javascript_data.index('parseFloat(')
value += len('parseFloat(')
value = javascript_data[value:]
for char in value:
if char not in (string.digits + '.'):
break
queue.append(char)
if queue:
outlet_loads[outlet_name] = {
'value': float(''.join(queue)),
'unit': 'watts'
}
status = 'ok'
if vac < 110:
status = 'warning'
if capacity < 50:
status = 'critical'
if capacity < 30:
status = 'failure_imminent'
post_metric('apc.vac.volts', vac)
post_metric('apc.capacity_left.percentage', capacity)
for key, value in outlet_loads.items():
key = key.lower().replace(' ', '_')
post_metric(f'apc.outlets.{key}.watts', value['value'])
ups_status = {
'vac': vac,
'on_battery': vac < config.config['LOWEST_GRID_VAC_ALLOWED'],
'status': status,
'battery_status': battery_life_status,
'capacity': capacity,
'time_left': {
'value': life_value,
'unit': life_span_metric
},
'load': outlet_loads
}
db.hmset('apc_status', {
'data': _json.dumps(ups_status),
'timestamp': time.time()
})
|
{
"content_hash": "8ec9b3f1003cc8423f9ed8d1ac2d5bef",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 93,
"avg_line_length": 33.7891156462585,
"alnum_prop": 0.5739883229313469,
"repo_name": "benjolitz/powerscout",
"id": "bcc14d74a6777018e8b44e46175633bf8e119700",
"size": "4967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "powerscout/services/apc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "488"
},
{
"name": "Python",
"bytes": "26629"
}
],
"symlink_target": ""
}
|
import sys
def printf(*s) :
'print + sys.stdout.flush()'
for e in s[:-1] :
print e,
print s[-1]
sys.stdout.flush()
def enterConfirm_prompt(enterMsg) :
stopi = False
while not stopi :
print "====\n At any time you can quit by entering 'quit'\n===="
vali = raw_input(enterMsg)
if vali.lower() == 'quit' :
vali = None
stopi = True
else :
print "You've entered:\n\t%s" % vali
valj = confirm_prompt("")
if valj == 'yes' :
stopi = True
if valj == 'quit' :
vali = None
stopi = True
return vali
def confirm_prompt(preMsg) :
while True :
val = raw_input('%splease confirm ("yes", "no", "quit"): ' % preMsg)
if val.lower() == 'yes' or val.lower() == 'no' or val.lower() == 'quit':
return val.lower()
|
{
"content_hash": "683ded047d57a744ab0ee78623a43bf0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 21.62857142857143,
"alnum_prop": 0.582562747688243,
"repo_name": "logan169/pyGeno",
"id": "b3d940e04bc01ff7abd91c443edef77a6e17b696",
"size": "757",
"binary": false,
"copies": "1",
"ref": "refs/heads/bloody",
"path": "pyGeno/tools/io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7253"
},
{
"name": "Makefile",
"bytes": "7057"
},
{
"name": "Python",
"bytes": "158276"
}
],
"symlink_target": ""
}
|
"""
Celery tasks used by the World Vision Sri Lanka Nutrition project
Two tasks are executed daily:
* sync_org_units: Synchronize DHIS2 Organization Units with local data
* sync_child_entities: Create new child cases in CommCare for nutrition
tracking, and associate CommCare child cases with DHIS2 child entities
and enroll them in the Pediatric Nutrition Assessment and Underlying Risk
Assessment programs.
Creating program events for Nutrition Assessment and Risk Assessment programs
is done using FormRepeater payload generators. See payload_generators.py for
details.
"""
from datetime import date, timedelta
import logging
import uuid
from xml.etree import ElementTree
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.xml import V2
from celery.schedules import crontab
from celery.task import periodic_task
from corehq.apps.es import CaseES, UserES
from corehq.apps.hqcase.utils import submit_case_blocks, get_case_by_identifier
from corehq.apps.users.models import CommCareUser
from custom.dhis2.const import CCHQ_CASE_ID, NUTRITION_ASSESSMENT_PROGRAM_FIELDS, ORG_UNIT_FIXTURES, CASE_TYPE, \
TRACKED_ENTITY, CASE_NAME
from custom.dhis2.models import Dhis2Api, Dhis2OrgUnit, Dhis2Settings, FixtureManager, JsonApiError, \
Dhis2ApiQueryError
logger = logging.getLogger(__name__)
def push_case(case, dhis2_api):
"""
Create a DHIS2 tracked entity instance from the form's case and enroll in
the nutrition assessment programme.
"""
if getattr(case, 'dhis_org_id', None):
ou_id = case.dhis_org_id # App sets this case property from user custom data
else:
# This is an old case, or org unit is not set. Skip it
return
program_data = {dhis2_attr: case[cchq_attr]
for cchq_attr, dhis2_attr in NUTRITION_ASSESSMENT_PROGRAM_FIELDS.iteritems()
if getattr(case, cchq_attr, None)}
if 'Gender' in program_data:
# Gender is an optionSet. Options are "Male", "Female" and "Undefined"
# cf. http://dhis1.internal.commcarehq.org:8080/dhis/api/optionSets/wG0c8ReYyNz.json
program_data['Gender'] = program_data['Gender'].capitalize() # "male" -> "Male"
else:
program_data['Gender'] = 'Undefined'
try:
# Search for CCHQ Case ID in case previous attempt to register failed.
instance = next(dhis2_api.gen_instances_with_equals(TRACKED_ENTITY, CCHQ_CASE_ID, case['_id']))
instance_id = instance['Instance']
except StopIteration:
# Create a DHIS2 tracked entity instance
instance = {CCHQ_CASE_ID: case['_id']}
instance.update(program_data)
try:
instance_id = dhis2_api.add_te_inst(TRACKED_ENTITY, ou_id, instance)
except (JsonApiError, Dhis2ApiQueryError) as err:
logger.error('Failed to create DHIS2 entity from CCHQ case "%s". DHIS2 server error: %s',
case['_id'], err)
return
# Enroll in Pediatric Nutrition Assessment
date_of_visit = case['date_of_visit'] if getattr(case, 'date_of_visit', None) else date.today()
try:
response = dhis2_api.enroll_in(instance_id, 'Paediatric Nutrition Assessment', date_of_visit, program_data)
except (JsonApiError, Dhis2ApiQueryError) as err:
logger.error('Failed to push CCHQ case "%s" to DHIS2 program "%s". DHIS2 server error: %s',
case['_id'], 'Paediatric Nutrition Assessment', err)
return
if response['status'] != 'SUCCESS':
logger.error('Failed to push CCHQ case "%s" to DHIS2 program "%s". DHIS2 API error: %s',
case['_id'], 'Paediatric Nutrition Assessment', response)
return
# Set external_id in CCHQ to flag the case as pushed.
update_case_external_id(case, instance_id)
def push_child_entities(settings, children):
"""
Register child entities in DHIS2 and enroll them in the Pediatric
Nutrition Assessment program.
:param children: child_gmp cases where external_id is not set
.. Note:: Once pushed, external_id is set to the ID of the
tracked entity instance.
This fulfills the second requirement of `DHIS2 Integration`_.
.. _DHIS2 Integration: https://www.dropbox.com/s/8djk1vh797t6cmt/WV Sri Lanka Detailed Requirements.docx
"""
dhis2_api = Dhis2Api(settings.dhis2['host'], settings.dhis2['username'], settings.dhis2['password'],
settings.dhis2['top_org_unit_name'])
# nutrition_id = dhis2_api.get_program_stage_id('Nutrition Assessment')
for child in children:
push_case(child, dhis2_api)
def pull_child_entities(settings, dhis2_children):
"""
Create new child cases for nutrition tracking in CommCare.
Sets external_id on new child cases, and CCHQ Case ID on DHIS2
tracked entity instances. (CCHQ Case ID is initially unset because the
case is new and does not exist in CommCare.)
:param settings: DHIS2 settings, incl. relevant domain
:param dhis2_children: A list of dictionaries of TRACKED_ENTITY (i.e.
"Child") tracked entities from the DHIS2 API where
CCHQ Case ID is unset
This fulfills the third requirement of `DHIS2 Integration`_.
.. _DHIS2 Integration: https://www.dropbox.com/s/8djk1vh797t6cmt/WV Sri Lanka Detailed Requirements.docx
"""
dhis2_api = Dhis2Api(settings.dhis2['host'], settings.dhis2['username'], settings.dhis2['password'],
settings.dhis2['top_org_unit_name'])
for dhis2_child in dhis2_children:
# Add each child separately. Although this is slower, it avoids problems if a DHIS2 API call fails
# ("Instance" is DHIS2's friendly name for "id")
logger.info('DHIS2: Syncing DHIS2 child "%s"', dhis2_child['Instance'])
case = get_case_by_identifier(settings.domain, dhis2_child['Instance']) # Get case by external_id
if case:
case_id = case['case_id']
else:
user = get_user_by_org_unit(settings.domain, dhis2_child['Org unit'],
settings.dhis2['top_org_unit_name'])
if not user:
# No user is assigned to this or any higher organisation unit
logger.error('DHIS2: Unable to import DHIS2 instance "%s"; there is no user at org unit "%s" or '
'above to assign the case to.', dhis2_child['Instance'], dhis2_child['Org unit'])
continue
case_id = create_case_from_dhis2(dhis2_child, settings.domain, user)
dhis2_child[CCHQ_CASE_ID] = case_id
dhis2_api.update_te_inst(dhis2_child)
def get_user_by_org_unit(domain, org_unit_id, top_org_unit_name):
"""
Look up user ID by a DHIS2 organisation unit ID
"""
result = (UserES()
.domain(domain)
.mobile_users()
# .term('user_data.dhis_org_id', org_unit_id)
.run())
# cf. http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-dynamic-mapping.html
# If/when we upgrade elasticsearch, we can filter on dynamic mappings and
# uncomment the ".term" line above. Until then, check it ourselves ...
for doc in result.hits:
if doc['user_data'].get('dhis_org_id') == org_unit_id:
return CommCareUser.wrap(doc)
# No user is assigned to this organisation unit (i.e. region or facility).
# Try its parent org unit.
Dhis2OrgUnit.objects = FixtureManager(Dhis2OrgUnit, domain, ORG_UNIT_FIXTURES)
org_units = {ou.id: ou for ou in Dhis2OrgUnit.objects.all()}
if (
org_unit_id in org_units and
org_units[org_unit_id].name != top_org_unit_name and
org_units[org_unit_id].parent_id
):
return get_user_by_org_unit(domain, org_units[org_unit_id].parent_id, top_org_unit_name)
# We don't know that org unit ID, or we're at the top for this project, or we're at the top of DHIS2
return None
def create_case_from_dhis2(dhis2_child, domain, user):
"""
Create a new case using the data pulled from DHIS2
:param dhis2_child: TRACKED_ENTITY (i.e. "Child") from DHIS2
:param domain: (str) The name of the domain
:param user: (Document) The owner of the new case
:return: New case ID
"""
case_id = uuid.uuid4().hex
update = {k: dhis2_child[v] for k, v in NUTRITION_ASSESSMENT_PROGRAM_FIELDS.iteritems()}
update['dhis_org_id'] = dhis2_child['Org unit']
# Do the inverse of push_case() to 'Gender' / 'child_gender'
if 'child_gender' in update:
if update['child_gender'] == 'Undefined':
del update['child_gender']
else:
update['child_gender'] = update['child_gender'].lower()
caseblock = CaseBlock(
create=True,
case_id=case_id,
owner_id=user.userID,
user_id=user.userID,
version=V2,
case_type=CASE_TYPE,
case_name=update[CASE_NAME] if CASE_NAME else '',
external_id=dhis2_child['Instance'],
update=update
)
casexml = ElementTree.tostring(caseblock.as_xml())
submit_case_blocks(casexml, domain)
return case_id
def update_case_external_id(case, external_id):
"""
Update the external_id of a case
"""
caseblock = CaseBlock(
create=False,
case_id=case['_id'],
version=V2,
external_id=external_id
)
casexml = ElementTree.tostring(caseblock.as_xml())
submit_case_blocks(casexml, case['domain'])
def get_children_only_theirs(settings):
"""
Returns a list of child entities that are enrolled in Paediatric Nutrition
Assessment and don't have CCHQ Case ID set.
"""
dhis2_api = Dhis2Api(settings.dhis2['host'], settings.dhis2['username'], settings.dhis2['password'],
settings.dhis2['top_org_unit_name'])
for inst in dhis2_api.gen_instances_in_program('Paediatric Nutrition Assessment'):
if not inst.get(CCHQ_CASE_ID):
yield inst
def gen_children_only_ours(domain):
"""
Returns a list of child_gmp cases where external_id is not set
"""
result = (CaseES()
.domain(domain)
.case_type(CASE_TYPE)
.empty('external_id')
.run())
if result.total:
for doc in result.hits:
yield CommCareCase.wrap(doc)
# Check for new cases on DHIS2 every 6 hours
@periodic_task(run_every=timedelta(hours=6), queue='background_queue')
def fetch_cases():
"""
Import new child cases from DHIS2 for nutrition tracking
"""
for settings in Dhis2Settings.all_enabled():
logger.info('DHIS2: Fetching cases for domain "%s" from "%s"', settings.domain, settings.dhis2['host'])
children = get_children_only_theirs(settings)
pull_child_entities(settings, children)
# There is a large number of org units, but the lookup table is not deployed to handsets.
@periodic_task(run_every=crontab(minute=3, hour=3), queue='background_queue')
def fetch_org_units():
"""
Synchronize DHIS2 Organization Units with local data.
This data is used to fulfill the first requirement of
`DHIS2 Integration`_: Allow mobile users in CommCareHQ to be
associated with a particular DHIS2 Organisation Unit, so that when
they create cases their new cases can be associated with that area
or facility.
.. _DHIS2 Integration: https://www.dropbox.com/s/8djk1vh797t6cmt/WV Sri Lanka Detailed Requirements.docx
"""
for settings in Dhis2Settings.all_enabled():
logger.info('DHIS2: Fetching org units for domain "%s" with "%s"', settings.domain, settings.dhis2['host'])
dhis2_api = Dhis2Api(settings.dhis2['host'], settings.dhis2['username'], settings.dhis2['password'],
settings.dhis2['top_org_unit_name'])
Dhis2OrgUnit.objects = FixtureManager(Dhis2OrgUnit, settings.domain, ORG_UNIT_FIXTURES)
our_org_units = {ou.id: ou for ou in Dhis2OrgUnit.objects.all()}
their_org_units = {}
# Add new org units
for ou in dhis2_api.gen_org_units():
their_org_units[ou['id']] = ou
if ou['id'] not in our_org_units:
logger.info('DHIS2: Adding org unit "%s"', ou['name'])
org_unit = Dhis2OrgUnit(id=ou['id'], name=ou['name'],
parent_id=dhis2_api.get_org_unit_parent_id(ou['id']))
org_unit.save()
# Delete former org units
for id_, ou in our_org_units.iteritems():
if id_ not in their_org_units:
logger.info('DHIS2: Deleting org unit "%s"', ou.name)
ou.delete()
|
{
"content_hash": "ba6d4e6202003f1e78576729809b883d",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 115,
"avg_line_length": 42.20723684210526,
"alnum_prop": 0.6487413295923934,
"repo_name": "puttarajubr/commcare-hq",
"id": "dc5b974aeb4e9c64e68f6083ed94d8435e4f3e5d",
"size": "12831",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "custom/dhis2/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
}
|
"""Setup script for IPython.
Under Posix environments it works like a typical setup.py script.
Under Windows, the command sdist is not supported, since IPython
requires utilities which are not available under Windows."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2011, IPython Development Team.
# Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Minimal Python version sanity check
#-----------------------------------------------------------------------------
from __future__ import print_function
import sys
# This check is also made in IPython/__init__, don't forget to update both when
# changing Python version requirements.
#~ if sys.version[0:3] < '2.6':
#~ error = """\
#~ ERROR: 'IPython requires Python Version 2.6 or above.'
#~ Exiting."""
#~ print >> sys.stderr, error
#~ sys.exit(1)
PY3 = (sys.version_info[0] >= 3)
# At least we're on the python version we need, move on.
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
# Stdlib imports
import os
import shutil
from glob import glob
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
from distutils.core import setup
# On Python 3, we need distribute (new setuptools) to do the 2to3 conversion
if PY3:
import setuptools
# Our own imports
from setupbase import target_update
from setupbase import (
setup_args,
find_packages,
find_package_data,
find_scripts,
find_data_files,
check_for_dependencies,
record_commit_info,
)
from setupext import setupext
isfile = os.path.isfile
pjoin = os.path.join
#-----------------------------------------------------------------------------
# Function definitions
#-----------------------------------------------------------------------------
def cleanup():
"""Clean up the junk left around by the build process"""
if "develop" not in sys.argv:
try:
shutil.rmtree('ipython.egg-info')
except:
try:
os.unlink('ipython.egg-info')
except:
pass
#-------------------------------------------------------------------------------
# Handle OS specific things
#-------------------------------------------------------------------------------
if os.name == 'posix':
os_name = 'posix'
elif os.name in ['nt','dos']:
os_name = 'windows'
else:
print('Unsupported operating system:',os.name)
sys.exit(1)
# Under Windows, 'sdist' has not been supported. Now that the docs build with
# Sphinx it might work, but let's not turn it on until someone confirms that it
# actually works.
if os_name == 'windows' and 'sdist' in sys.argv:
print('The sdist command is not available under Windows. Exiting.')
sys.exit(1)
#-------------------------------------------------------------------------------
# Things related to the IPython documentation
#-------------------------------------------------------------------------------
# update the manuals when building a source dist
if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
import textwrap
# List of things to be updated. Each entry is a triplet of args for
# target_update()
to_update = [
# FIXME - Disabled for now: we need to redo an automatic way
# of generating the magic info inside the rst.
#('docs/magic.tex',
#['IPython/Magic.py'],
#"cd doc && ./update_magic.sh" ),
('docs/man/ipcluster.1.gz',
['docs/man/ipcluster.1'],
'cd docs/man && gzip -9c ipcluster.1 > ipcluster.1.gz'),
('docs/man/ipcontroller.1.gz',
['docs/man/ipcontroller.1'],
'cd docs/man && gzip -9c ipcontroller.1 > ipcontroller.1.gz'),
('docs/man/ipengine.1.gz',
['docs/man/ipengine.1'],
'cd docs/man && gzip -9c ipengine.1 > ipengine.1.gz'),
('docs/man/iplogger.1.gz',
['docs/man/iplogger.1'],
'cd docs/man && gzip -9c iplogger.1 > iplogger.1.gz'),
('docs/man/ipython.1.gz',
['docs/man/ipython.1'],
'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),
('docs/man/irunner.1.gz',
['docs/man/irunner.1'],
'cd docs/man && gzip -9c irunner.1 > irunner.1.gz'),
('docs/man/pycolor.1.gz',
['docs/man/pycolor.1'],
'cd docs/man && gzip -9c pycolor.1 > pycolor.1.gz'),
]
[ target_update(*t) for t in to_update ]
#---------------------------------------------------------------------------
# Find all the packages, package data, and data_files
#---------------------------------------------------------------------------
packages = find_packages()
package_data = find_package_data()
data_files = find_data_files()
setup_args['packages'] = packages
setup_args['package_data'] = package_data
setup_args['data_files'] = data_files
#---------------------------------------------------------------------------
# custom distutils commands
#---------------------------------------------------------------------------
# imports here, so they are after setuptools import if there was one
from distutils.command.sdist import sdist
from distutils.command.upload import upload
class UploadWindowsInstallers(upload):
description = "Upload Windows installers to PyPI (only used from tools/release_windows.py)"
user_options = upload.user_options + [
('files=', 'f', 'exe file (or glob) to upload')
]
def initialize_options(self):
upload.initialize_options(self)
meta = self.distribution.metadata
base = '{name}-{version}'.format(
name=meta.get_name(),
version=meta.get_version()
)
self.files = os.path.join('dist', '%s.*.exe' % base)
def run(self):
for dist_file in glob(self.files):
self.upload_file('bdist_wininst', 'any', dist_file)
setup_args['cmdclass'] = {
'build_py': record_commit_info('IPython'),
'sdist' : record_commit_info('IPython', sdist),
'upload_wininst' : UploadWindowsInstallers,
}
#---------------------------------------------------------------------------
# Handle scripts, dependencies, and setuptools specific things
#---------------------------------------------------------------------------
# For some commands, use setuptools. Note that we do NOT list install here!
# If you want a setuptools-enhanced install, just run 'setupegg.py install'
needs_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist', 'bdist_dumb', 'bdist_wininst', 'install_egg_info',
'egg_info', 'easy_install', 'upload',
))
if sys.platform == 'win32':
# Depend on setuptools for install on *Windows only*
# If we get script-installation working without setuptools,
# then we can back off, but until then use it.
# See Issue #369 on GitHub for more
needs_setuptools.add('install')
if len(needs_setuptools.intersection(sys.argv)) > 0:
import setuptools
# This dict is used for passing extra arguments that are setuptools
# specific to setup
setuptools_extra_args = {}
if 'setuptools' in sys.modules:
setuptools_extra_args['zip_safe'] = False
setuptools_extra_args['entry_points'] = find_scripts(True)
setup_args['extras_require'] = dict(
parallel = 'pyzmq>=2.1.4',
qtconsole = 'pygments',
zmq = 'pyzmq>=2.1.4',
doc = 'Sphinx>=0.3',
test = 'nose>=0.10.1',
notebook = 'tornado>=2.0'
)
requires = setup_args.setdefault('install_requires', [])
setupext.display_status = False
if not setupext.check_for_readline():
if sys.platform == 'darwin':
requires.append('readline')
elif sys.platform.startswith('win'):
# Pyreadline 64 bit windows issue solved in versions >=1.7.1
# Also solves issues with some older versions of pyreadline that
# satisfy the unconstrained depdendency.
requires.append('pyreadline>=1.7.1')
else:
pass
# do we want to install readline here?
# Script to be run by the windows binary installer after the default setup
# routine, to add shortcuts and similar windows-only things. Windows
# post-install scripts MUST reside in the scripts/ dir, otherwise distutils
# doesn't find them.
if 'bdist_wininst' in sys.argv:
if len(sys.argv) > 2 and \
('sdist' in sys.argv or 'bdist_rpm' in sys.argv):
print >> sys.stderr, "ERROR: bdist_wininst must be run alone. Exiting."
sys.exit(1)
setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]
setup_args['options'] = {"bdist_wininst":
{"install_script":
"ipython_win_post_install.py"}}
if PY3:
setuptools_extra_args['use_2to3'] = True
from setuptools.command.build_py import build_py
setup_args['cmdclass'] = {'build_py': record_commit_info('IPython', build_cmd=build_py)}
setuptools_extra_args['entry_points'] = find_scripts(True, suffix='3')
setuptools._dont_write_bytecode = True
else:
# If we are running without setuptools, call this function which will
# check for dependencies an inform the user what is needed. This is
# just to make life easy for users.
check_for_dependencies()
setup_args['scripts'] = find_scripts(False)
#---------------------------------------------------------------------------
# Do the actual setup now
#---------------------------------------------------------------------------
setup_args.update(setuptools_extra_args)
def main():
setup(**setup_args)
cleanup()
if __name__ == '__main__':
main()
|
{
"content_hash": "386bba6b1b2d7d01617bbac6f667bdf5",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 96,
"avg_line_length": 36.89965397923876,
"alnum_prop": 0.530570142535634,
"repo_name": "sodafree/backend",
"id": "e061f2353d86bdc1287309d1ff9d44cbcd093a43",
"size": "10710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/ipython/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "21800"
},
{
"name": "JavaScript",
"bytes": "1050184"
},
{
"name": "Python",
"bytes": "21215906"
},
{
"name": "Shell",
"bytes": "7557"
},
{
"name": "VimL",
"bytes": "25012"
}
],
"symlink_target": ""
}
|
from sys import argv, stderr, exit
from PIL import Image, ImageTk
import Tkinter
import json
import os
import math
import sys
APP = {} # contains global information needed by tkinter functions
REAL_POINTS = []
GUESS_POINTS = []
NEIGHBORS = []
NUM_NEIGHBORS = 4
INDEX = 0
class Point(object):
""" Point Object """
def __init__(self, x, y, density=""):
self.x = x
self.y = y
self.density=density
class Line(object):
""" Line Object """
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.length = distance(p1, p2)
#--------------------------------
# Misc Utility Functions Below
#--------------------------------
def distance(a, b):
""" Returns the distance between the given two Points """
dx = a.x - b.x
dy = a.y - b.y
return math.sqrt(dx * dx + dy * dy)
#--------------------------------
# TKinter Application Code Below
#--------------------------------
def initializeApp(image_path):
""" Initializes data for app Binds tkinter buttons """
global APP
image = Image.open(image_path)
width, height = image.size[0], image.size[1]
APP['window'] = Tkinter.Tk()
APP['frame'] = Tkinter.Frame(APP['window'])
image_tk = ImageTk.PhotoImage(image)
APP['canvas'] = Tkinter.Canvas(APP['frame'], width=width, height=height-50)
APP['canvas'].create_image(width // 2, height // 2, image=image_tk)
APP['dims'] = {'w': width, 'h': width}
APP['buttons'] = getButtons()
APP['points'] = []
APP['lines'] = []
APP['canvas_list'] = []
APP['frame'].pack()
APP['canvas'].pack()
APP['buttons']['ready_btn'].pack(side='right')
APP['window'].mainloop()
def getButtons():
""" Returns dict of buttons; will be added to app object"""
buttons = {'ready_btn': Tkinter.Button(APP['frame'], text="Begin", command=ready)}
return buttons
def draw_point(p, color, text=""):
""" draws a point at the coordinates with the specified color """
global APP
radius = 5 # point radius
new_canvas = APP['canvas'].create_oval(
p.x - radius, p.y - radius, p.x + radius, p.y + radius, fill=color)
if text != "":
APP['canvas_list'].append(new_canvas)
new_canvas = APP['canvas'].create_text(
p.x, p.y-15, text=str(text))
APP['points'].append(p)
APP['canvas_list'].append(new_canvas)
def draw_line(line, color):
""" draws the given line with the specified color """
global APP
new_canvas = APP['canvas'].create_line(
line.p1.x, line.p1.y, line.p2.x, line.p2.y, fill=color,
width=1, arrow=Tkinter.FIRST)
APP['lines'].append(line)
APP['canvas_list'].append(new_canvas)
def ready():
""" Displays connections between test points and predictions """
global REAL_POINTS, GUESS_POINTS, NEIGHBORS, INDEX
if INDEX == 0:
global APP
readPoints()
APP['buttons']['ready_btn']["text"] = "Next point"
elif INDEX == len(REAL_POINTS):
sys.exit(0)
else:
global APP
for canvas in APP['canvas_list']:
APP['canvas'].delete(canvas)
APP['points'] = []
APP['canvas_list'] = []
draw_point(REAL_POINTS[INDEX], 'green', "P" + str(INDEX))
draw_point(GUESS_POINTS[INDEX], 'red')
draw_line(Line(REAL_POINTS[INDEX], GUESS_POINTS[INDEX]), 'blue')
for j in range(INDEX * NUM_NEIGHBORS, INDEX * NUM_NEIGHBORS + NUM_NEIGHBORS):
draw_point(NEIGHBORS[j], 'purple', str(j - INDEX * NUM_NEIGHBORS + 1))
draw_line(Line(REAL_POINTS[INDEX], NEIGHBORS[j]), 'black')
INDEX = INDEX + 1
def readPoints():
global REAL_POINTS, GUESS_POINTS, NEIGHBORS
""" Reads points from input file """
REAL_POINTS = []
GUESS_POINTS = []
NEIGHBORS = []
points_list = sys.stdin.readlines()
for (index, line) in enumerate(points_list):
points = [float(p) for p in line.rstrip().split()]
if len(points) == 4:
REAL_POINTS.append(Point(points[0], points[1]))
GUESS_POINTS.append(Point(points[2], points[3]))
else:
NEIGHBORS.append(Point(points[0], points[1]))
def main(argv):
if len(argv) != 4:
print "Usage: python testresults.py k image_path point_coords"
exit(1)
global NUM_NEIGHBORS
NUM_NEIGHBORS = int(argv[1])
image_path = argv[2]
sys.stdin = open(argv[3])
initializeApp(image_path)
if __name__ == '__main__':
main(argv)
|
{
"content_hash": "ff7ccb3841474a3578f1bf8db59eda6c",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 86,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.5788770053475936,
"repo_name": "TeamSirius/Utilities",
"id": "a16a463c42756c3226e250082ca7475366aebe9b",
"size": "4742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/showneighbors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "904"
},
{
"name": "HTML",
"bytes": "87714"
},
{
"name": "JavaScript",
"bytes": "36422"
},
{
"name": "Python",
"bytes": "139768"
},
{
"name": "Shell",
"bytes": "1264"
}
],
"symlink_target": ""
}
|
"""
Copyright 2016 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Without this the config will prompt for a token
import os
os.environ['LOADIMPACT_API_V3_TOKEN'] = 'token'
import unittest
from collections import namedtuple
from click.testing import CliRunner
from loadimpactcli import userscenario_commands
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
class MockValidation(object):
def __init__(self, status_text):
self.status_text = status_text
class MockValidationResult(object):
def __init__(self, timestamp, message):
self.message = message
self.timestamp = timestamp
class TestUserScenarios(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
Scenario = namedtuple('Scenario', ['id', 'name', 'script'])
self.scenario1 = Scenario(1, u'Scen1', 'debug')
self.scenario2 = Scenario(2, u'Scen2', 'info')
self.scenario3 = Scenario(3, u'Scen3 åäö', 'info')
def test_get_scenario(self):
client = userscenario_commands.client
client.get_user_scenario = MagicMock(return_value=self.scenario1)
result = self.runner.invoke(userscenario_commands.get_scenario, ['1'])
assert result.exit_code == 0
assert result.output == "debug\n"
def test_get_scenario_no_params(self):
result = self.runner.invoke(userscenario_commands.get_scenario, [])
assert result.exit_code == 2
def test_list_scenario(self):
client = userscenario_commands.client
client.DEFAULT_PROJECT = 1
client.list_user_scenarios = MagicMock(return_value=[self.scenario1, self.scenario2])
result = self.runner.invoke(userscenario_commands.list_scenarios, ['--project_id', '1'])
assert result.exit_code == 0
assert result.output == u"ID:\tNAME:\n1\tScen1\n2\tScen2\n"
def test_list_scenario_non_ascii_name(self):
client = userscenario_commands.client
client.DEFAULT_PROJECT = 1
client.list_user_scenarios = MagicMock(return_value=[self.scenario1, self.scenario3])
result = self.runner.invoke(userscenario_commands.list_scenarios, ['--project_id', '1'])
assert result.exit_code == 0
assert result.output == u"ID:\tNAME:\n1\tScen1\n3\tScen3 åäö\n"
def test_create_scenario(self):
client = userscenario_commands.client
client.create_user_scenario = MagicMock(return_value=self.scenario1)
result = self.runner.invoke(userscenario_commands.create_scenario, ['tests/script', 'my script', '--project_id', '1'])
assert result.exit_code == 0
assert result.output == "debug\n"
def test_create_scenario_no_params(self):
result = self.runner.invoke(userscenario_commands.create_scenario, [])
assert result.exit_code == 2
def test_create_scenario_with_datastore_files(self):
client = userscenario_commands.client
client.create_user_scenario = MagicMock(return_value=self.scenario1)
result = self.runner.invoke(userscenario_commands.create_scenario,
['tests/script', 'my script',
'--project_id', '1',
'--datastore_file', 'tests/datastore.csv',
'--datastore_file', 'tests/script'])
assert result.exit_code == 0
assert result.output == "debug\n"
def test_create_scenario_with_existing_datastore(self):
client = userscenario_commands.client
client.create_user_scenario = MagicMock(return_value=self.scenario1)
result = self.runner.invoke(userscenario_commands.create_scenario,
['tests/script', 'my script',
'--project_id', '1',
'--datastore_id', '1'
])
assert result.exit_code == 0
assert result.output == "debug\n"
def test_update_scenario(self):
userscenario_commands.update_user_scenario_script = MagicMock(return_value=self.scenario1)
result = self.runner.invoke(userscenario_commands.update_scenario, ['1', 'tests/script'])
assert result.exit_code == 0
assert result.output == 'debug\n'
def test_update_scenario_no_params(self):
result = self.runner.invoke(userscenario_commands.update_scenario, [])
assert result.exit_code == 2
def test_delete_scenario(self):
userscenario_commands.delete_user_scenario = MagicMock(return_value="Userscenario1")
result = self.runner.invoke(userscenario_commands.delete_scenario, ['1', '--yes'])
assert result.exit_code == 0
assert result.output == 'Userscenario1\n'
def test_delete_scenario_no_params(self):
result = self.runner.invoke(userscenario_commands.update_scenario, [])
assert result.exit_code == 2
def test_validate_scenario(self):
userscenario_commands.client.get_user_scenario = MagicMock(return_value=1)
userscenario_commands.get_validation = MagicMock(return_value=MockValidation('Success'))
userscenario_commands.get_validation_results = MagicMock(return_value=[MockValidationResult(2, 'msg')])
userscenario_commands.get_formatted_validation_results = MagicMock(return_value='Validation 1')
result = self.runner.invoke(userscenario_commands.validate_scenario, ['1'])
assert result.exit_code == 0
def test_validate_scenario_no_params(self):
result = self.runner.invoke(userscenario_commands.delete_scenario, [])
assert result.exit_code == 2
def test_get_formatted_validation_results(self):
MockValidationResult.level = None
userscenario_commands.get_timestamp_as_local_time = MagicMock(return_value=2)
unformatted_validations = [MockValidationResult(2, 'msg 1'), MockValidationResult(2, 'msg 2'), MockValidationResult(2, 'msg 3')]
formatted_validations = userscenario_commands.get_formatted_validation_results(unformatted_validations)
assert formatted_validations == "[2] msg 1\n[2] msg 2\n[2] msg 3\n"
|
{
"content_hash": "755f5e6ef562f7c0f0db5fed1620a13d",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 136,
"avg_line_length": 42.125786163522015,
"alnum_prop": 0.6624365482233503,
"repo_name": "loadimpact/loadimpact-cli",
"id": "76992dc58a1add0b774df43902f797b58331a074",
"size": "6719",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_userscenario_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1060"
},
{
"name": "Python",
"bytes": "75298"
}
],
"symlink_target": ""
}
|
import getpass, sys
from imapclient import IMAPClient
try:
hostname, username = sys.argv[1:]
except ValueError:
print('usage: %s hostname username' % sys.argv[0])
sys.exit(2)
c = IMAPClient(hostname, ssl=True)
try:
c.login(username, getpass.getpass())
except c.Error as e:
print('Could not log in:', e)
sys.exit(1)
else:
select_dict = c.select_folder('INBOX', readonly=True)
for k, v in list(select_dict.items()):
print('%s: %r' % (k, v))
c.logout()
|
{
"content_hash": "4ddf9ba97cadb37735414ac4b7fcb133",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 57,
"avg_line_length": 24.8,
"alnum_prop": 0.6391129032258065,
"repo_name": "jac2130/BayesGame",
"id": "371ea17e29f26d0cfdcc03d134fbb602d9353df8",
"size": "670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foundations-of-python-network-programming/python3/15/folder_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "95"
},
{
"name": "C#",
"bytes": "1110"
},
{
"name": "CSS",
"bytes": "2118"
},
{
"name": "HTML",
"bytes": "166635"
},
{
"name": "JavaScript",
"bytes": "751618"
},
{
"name": "PHP",
"bytes": "339"
},
{
"name": "Perl",
"bytes": "3136"
},
{
"name": "Python",
"bytes": "1821680"
},
{
"name": "Shell",
"bytes": "1630"
},
{
"name": "Smarty",
"bytes": "7840"
}
],
"symlink_target": ""
}
|
import csv
from sys import argv
import networkx as nx
import numpy as np
from numpy import ones, concatenate
from math import ceil, sqrt
import matplotlib.pyplot as plt
def weightFunction(mdx, fdx, similarity_measure, l):
return (1.0 + similarity_measure) * abs(mdx - fdx) * (1.0 + l) ** (abs(mdx - fdx))
def weightFunctionWrapper(row_from_similarity_table, l):
return weightFunction(row_from_similarity_table[0],
row_from_similarity_table[1],
row_from_similarity_table[2], l)
def createGraph(numberOfSlices, epsilon, similarityData = None):
if not similarityData:
similarityData = createSimilarityData(numberOfSlices, epsilon)
graphWeights = createWeightsFromData(similarityData)
G = nx.DiGraph()
G.add_weighted_edges_from(graphWeights)
return G
def getShortestPath(node1, node2, Graph, shortest_paths = None ):
if not shortest_paths:
shortest_paths = nx.all_pairs_dijkstra_path(G)
def convertDataFromStrings(row):
return int(row[0]), int(row[1]), float(row[2])
def createGraphFromFile(file_name):
weights = []
f = open(file_name, 'rb')
csvreader = csv.reader(f, delimiter=' ')
for row in csvreader:
weights.append(row)
weights.append([row[1], row[0], row[2]])
weights = map(convertDataFromStrings,weights)
G = nx.DiGraph()
G.add_weighted_edges_from(weights)
return G
def createGraphsFromSimilarities(similarities_data, lambdas):
"""l lambda"""
similarities = []
f = open(similarities_data, 'rb')
csvreader = csv.reader(f, delimiter=' ')
for row in csvreader:
similarities.append(row)
similarities.append([row[1], row[0], row[2]])
similarities = map(convertDataFromStrings, similarities)
graphs = []
for l in lambdas:
weights = []
for row in similarities:
weights.append([row[0], row[1], weightFunctionWrapper(row,l)])
G = nx.DiGraph()
G.add_weighted_edges_from(weights)
graphs.append(G)
return graphs
def nodesOmitted(Graph, referenceNode):
shortest_paths = nx.all_pairs_dijkstra_path(Graph)
front_path = shortest_paths[referenceNode][0]
back_path = shortest_paths[referenceNode][len(Graph.nodes()) - 1]
path = concatenate([front_path, back_path])
omitted = ones(len(Graph.nodes()))
for node in path:
omitted[node] = 0
return omitted
G = nx.DiGraph()
G.add_weighted_edges_from(weights)
return G
def plotOmittedFromGraph(graph, reference_slice, l = "not specified"):
omitted = nodesOmitted(graph, reference_slice)
omitted[reference_slice] = -1
reshape_parameter = int(ceil(sqrt(len(omitted))))
magic_number = reshape_parameter / 2.
padding = ones(reshape_parameter ** 2 - len(omitted)) * .5
to_plot = concatenate([omitted, padding]).reshape((reshape_parameter,
reshape_parameter)) * -1
##setting variables for text annotations
x = np.linspace(8./reshape_parameter,
reshape_parameter - 8./reshape_parameter, reshape_parameter)
y = np.linspace(10./reshape_parameter,
reshape_parameter - 8./reshape_parameter, reshape_parameter)
plt.figure()
plt.title(str(l))
ax = plt.gca()
ax.invert_yaxis()
plt.pcolor(to_plot,cmap='PiYG')
for i in range(reshape_parameter):
for j in range(reshape_parameter):
plt.text(x[i], y[j], str(i+j*reshape_parameter), color="white",
horizontalalignment='center')
plt.colorbar()
##czary
def setLambdaForWrapper(el):
def WrapperWithLambda(row):
return weightFunctionWrapper(row, el)
return WrapperWithLambda
if __name__ == "__main__":
"""usage python graph_reconstruction file_name reference_slice lambda1 lambda2 ... lambdaN"""
file_name = argv[1]
reference_slice = int(argv[2])
lambdas = map(float, argv[3:])
graphs = createGraphsFromSimilarities(file_name, lambdas)
for i, graph in enumerate(graphs):
plotOmittedFromGraph(graph, reference_slice, lambdas[i])
plt.show()
|
{
"content_hash": "d7368263739ebc8a867f2d0dc008a5ac",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 97,
"avg_line_length": 33.63709677419355,
"alnum_prop": 0.6499640374011029,
"repo_name": "chrisfilo/poSSum",
"id": "510ebfbc47d9848665966a5a781090fce5517a5e",
"size": "4190",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_bananas_and_graphs/graph_reconstruction.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7378"
},
{
"name": "Python",
"bytes": "469293"
},
{
"name": "Shell",
"bytes": "71781"
}
],
"symlink_target": ""
}
|
import datetime
import random
from random import randint
for i in range(1,50):
somedate=datetime.date(randint(1945,2015),randint(1,12),randint(1,28))
date=somedate+datetime.timedelta(random.randint(1,365))
print date
|
{
"content_hash": "a54387f3c69b007c623058fe46a08ec4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 74,
"avg_line_length": 32.714285714285715,
"alnum_prop": 0.7554585152838428,
"repo_name": "kfaraaz/tests",
"id": "1e368e04a641a5a9b46d2efe044fe5b58010afbb",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/randmDate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "13098"
},
{
"name": "Python",
"bytes": "10589"
},
{
"name": "Shell",
"bytes": "1015"
}
],
"symlink_target": ""
}
|
import torch
import torch.nn as nn
import os
from foolbox.models import PyTorchModel
from foolbox.utils import accuracy, samples
def create() -> PyTorchModel:
model = nn.Sequential(
nn.Conv2d(1, 32, 3),
nn.ReLU(),
nn.Conv2d(32, 64, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Dropout2d(0.25),
nn.Flatten(),
nn.Linear(9216, 128),
nn.ReLU(),
nn.Dropout2d(0.5),
nn.Linear(128, 10),
)
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mnist_cnn.pth")
model.load_state_dict(torch.load(path)) # type: ignore
model.eval()
preprocessing = dict(mean=0.1307, std=0.3081)
fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
return fmodel
if __name__ == "__main__":
# test the model
fmodel = create()
images, labels = samples(fmodel, dataset="mnist", batchsize=20)
print(accuracy(fmodel, images, labels))
|
{
"content_hash": "8b8847ddda96028490ff555f0ac5bc17",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 85,
"avg_line_length": 28.294117647058822,
"alnum_prop": 0.6101871101871101,
"repo_name": "bethgelab/foolbox",
"id": "59f6cfa1a0fb620e47e8fc4eb5308b8628b0a4a1",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/zoo/mnist/foolbox_model.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "668"
},
{
"name": "Jupyter Notebook",
"bytes": "23091"
},
{
"name": "Makefile",
"bytes": "2670"
},
{
"name": "Python",
"bytes": "405918"
},
{
"name": "TeX",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
print('\nLambda is an assigned or anonymous function in Python')
print('Assigned example name = lambda arg1, arg2, argN : return expression')
multiply = lambda a, b : a * b
print(multiply(2, 10))
def amplifyWrapper(koeff):
return lambda a : a * koeff
amplifyX10 = amplifyWrapper(10)
print(amplifyX10(2))
|
{
"content_hash": "5c335b57faf776f16e85447103a986c5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 27,
"alnum_prop": 0.6975308641975309,
"repo_name": "alkryukov/sandbox",
"id": "17ed62858a371d0f9a519e7943cf77fc6b8d0b18",
"size": "324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learnings/python/basics/01020_lambdas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "70"
},
{
"name": "C",
"bytes": "97"
},
{
"name": "CSS",
"bytes": "90"
},
{
"name": "Dockerfile",
"bytes": "507"
},
{
"name": "Go",
"bytes": "2918"
},
{
"name": "HTML",
"bytes": "3807"
},
{
"name": "JavaScript",
"bytes": "10367"
},
{
"name": "Python",
"bytes": "101707"
},
{
"name": "Shell",
"bytes": "1162"
},
{
"name": "TypeScript",
"bytes": "1913"
}
],
"symlink_target": ""
}
|
"""A stateless agent interface."""
import collections
import functools
from typing import Any, Callable, Optional, Tuple
import dm_env
import haiku as hk
from examples.impala import util
import jax
import jax.numpy as jnp
import numpy as np
AgentOutput = collections.namedtuple("AgentOutput",
["policy_logits", "values", "action"])
Action = int
Nest = Any
NetFactory = Callable[[int], hk.RNNCore]
class Agent:
"""A stateless agent interface."""
def __init__(self, num_actions: int, obs_spec: Nest,
net_factory: NetFactory):
"""Constructs an Agent object.
Args:
num_actions: Number of possible actions for the agent. Assumes a flat,
discrete, 0-indexed action space.
obs_spec: The observation spec of the environment.
net_factory: A function from num_actions to a Haiku module representing
the agent. This module should have an initial_state() function and an
unroll function.
"""
self._obs_spec = obs_spec
net_factory = functools.partial(net_factory, num_actions)
# Instantiate two hk.transforms() - one for getting the initial state of the
# agent, another for actually initializing and running the agent.
_, self._initial_state_apply_fn = hk.without_apply_rng(
hk.transform(
lambda batch_size: net_factory().initial_state(batch_size)))
self._init_fn, self._apply_fn = hk.without_apply_rng(
hk.transform(lambda obs, state: net_factory().unroll(obs, state)))
@functools.partial(jax.jit, static_argnums=0)
def initial_params(self, rng_key):
"""Initializes the agent params given the RNG key."""
dummy_inputs = jax.tree_util.tree_map(
lambda t: np.zeros(t.shape, t.dtype), self._obs_spec)
dummy_inputs = util.preprocess_step(dm_env.restart(dummy_inputs))
dummy_inputs = jax.tree_util.tree_map(
lambda t: t[None, None, ...], dummy_inputs)
return self._init_fn(rng_key, dummy_inputs, self.initial_state(1))
@functools.partial(jax.jit, static_argnums=(0, 1))
def initial_state(self, batch_size: Optional[int]):
"""Returns agent initial state."""
# We expect that generating the initial_state does not require parameters.
return self._initial_state_apply_fn(None, batch_size)
@functools.partial(jax.jit, static_argnums=(0,))
def step(
self,
rng_key,
params: hk.Params,
timestep: dm_env.TimeStep,
state: Nest,
) -> Tuple[AgentOutput, Nest]:
"""For a given single-step, unbatched timestep, output the chosen action."""
# Pad timestep, state to be [T, B, ...] and [B, ...] respectively.
timestep = jax.tree_util.tree_map(lambda t: t[None, None, ...], timestep)
state = jax.tree_util.tree_map(lambda t: t[None, ...], state)
net_out, next_state = self._apply_fn(params, timestep, state)
# Remove the padding from above.
net_out = jax.tree_util.tree_map(
lambda t: jnp.squeeze(t, axis=(0, 1)), net_out)
next_state = jax.tree_util.tree_map(
lambda t: jnp.squeeze(t, axis=0), next_state)
# Sample an action and return.
action = hk.multinomial(rng_key, net_out.policy_logits, num_samples=1)
action = jnp.squeeze(action, axis=-1)
return AgentOutput(net_out.policy_logits, net_out.value, action), next_state
def unroll(
self,
params: hk.Params,
trajectory: dm_env.TimeStep,
state: Nest,
) -> AgentOutput:
"""Unroll the agent along trajectory."""
net_out, _ = self._apply_fn(params, trajectory, state)
return AgentOutput(net_out.policy_logits, net_out.value, action=[])
|
{
"content_hash": "3093bd20acc52115b1550f7b424ddb5c",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 80,
"avg_line_length": 38.136842105263156,
"alnum_prop": 0.6627104609439691,
"repo_name": "deepmind/dm-haiku",
"id": "4be208e5baa32e0877acc23608a4407f16e057f0",
"size": "4319",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/impala/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1024855"
},
{
"name": "Shell",
"bytes": "1907"
},
{
"name": "Starlark",
"bytes": "31643"
}
],
"symlink_target": ""
}
|
import numpy as np
from scipy.signal import argrelmax, detrend
from scipy.integrate import trapz
from scipy.ndimage.filters import gaussian_filter
import peakutils
def get_hyper_peaks(hyper_image, threshold):
"""
Given a Hyperspectral image, computes the average spectrum and finds the
peaks of that spectrum.
Input:
hyper_image: An image where each pixel represents one PiFM spectrum.
threshold: The minimum percentage to consider when finding peaks.
Output:
peak_locs: the locations of the peaks in point space.
spectrum: the computed average spectrum of the hyper image
"""
im_list = []
for i in range(hyper_image.shape[0]):
for j in range(hyper_image.shape[1]):
im_list.append(hyper_image[i, j, :])
spectrum = np.column_stack(tuple(im_list)).mean(axis=-1)
scale_threshold = threshold*spectrum.max()
thresh_spec = spectrum.copy()
thresh_spec[thresh_spec < scale_threshold] = 0
peak_locs = []
peak_locs = argrelmax(thresh_spec, order=2)[0]
peak_locs = peakutils.indexes(spectrum, thres=threshold)
return peak_locs, spectrum
def sum_around_peak(hyper_image, peak_loc, width):
"""
Given a hyper image, peak location, and a desired width this function
computes the integral amplitude under each peak and returns an image of
these values with the same dimensions as the hyperspectral image.
Input:
hyper_image: An image where each pixel represents one PiFM spectrum.
peak_loc: An integer representing the location of a detected peak.
width: total width to integrate around this peak.
Output:
result_array: an image of the integrated intensities with the same
first two dimensions as the input hyperspectral data.
"""
half_width = np.floor(width/2)
result_array = np.zeros(hyper_image.shape[:-1])
top = peak_loc+half_width
bottom = peak_loc-half_width
if top >= hyper_image.shape[-1]:
top = hyper_image.shape[-1]
if bottom < 0:
bottom = 0
indices = np.arange(bottom, top, dtype=int)
for i in range(hyper_image.shape[0]):
for j in range(hyper_image.shape[1]):
result_array[i, j] = trapz(hyper_image[i, j, indices])
return result_array
def generate_features(hyper_image, peak_locs, width, filt=False, sigma=1.0):
"""
Converts a hyperspectral image into a feature array, where each feature is
the integrated intensity under a detected PiFM peak. Options included for
gaussian filtering of the features.
Input:
hyper_image: An image where each pixel represents one PiFM spectrum.
peak_locs: An array of integers representing the locations detected peaks.
width: total width to integrate around this peak.
Output:
feature_array: an n_sample by n_peaks feature array to apply machine
learning methods on.
"""
features = []
imgs = []
for peak in peak_locs:
img = sum_around_peak(hyper_image, peak, width)
img = detrend(img, type='constant')
if filt:
img = gaussian_filter(img, sigma=sigma)
features.append(img.ravel())
imgs.append(img)
feature_array = np.column_stack(tuple(features))
return feature_array, imgs
def feature_by_pixel(hyper_image):
"""
Converts a hyper image to a feature array where each sample is a PiFM
spectrum in its entirety.
Input:
hyper_image: An image where each pixel represents one PiFM spectrum.
Output:
feature_array: an n_sample by n_wavenumbers feature array.
"""
x_pixels, y_pixels, spectrum_length = hyper_image.shape
features = []
for j in range(x_pixels):
for i in range(y_pixels):
features.append(hyper_image[j, i, :])
return np.array(features)
|
{
"content_hash": "cd4bbf0df71b7340a22ddb8437e40e5c",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 82,
"avg_line_length": 33.543103448275865,
"alnum_prop": 0.6633256232331021,
"repo_name": "kongjy/hyperAFM",
"id": "449957ef8b4789ec36552328db89c51db157fdaf",
"size": "3891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperAFM/gen_features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9105162"
},
{
"name": "Python",
"bytes": "96882"
}
],
"symlink_target": ""
}
|
def main(request, response):
response.headers.set(b"Content-Type", b"text/plain")
response.status = 200
response.content = request.headers.get(b"Content-Type")
response.close_connection = True
|
{
"content_hash": "7d0da4929743495ecefbb9afa4b0ee25",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 59,
"avg_line_length": 41.8,
"alnum_prop": 0.7177033492822966,
"repo_name": "scheib/chromium",
"id": "53e1de5b32ded36383c7f1447fbd3126a911c682",
"size": "209",
"binary": false,
"copies": "21",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/xhr/resources/echo-content-type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import unittest
from tree_building import Record, BuildTree
class TreeBuildingTest(unittest.TestCase):
"""
Record(record_id, parent_id): records given to be processed
Node(node_id): Node in tree
BuildTree(records): records as argument and returns tree
BuildTree should raise ValueError if given records are invalid
"""
def test_empty_list_input(self):
records = []
root = BuildTree(records)
self.assertIsNone(root)
def test_one_node(self):
records = [
Record(0, 0)
]
root = BuildTree(records)
self.assert_node_is_leaf(root, node_id=0)
def test_three_nodes_in_order(self):
records = [
Record(0, 0),
Record(1, 0),
Record(2, 0)
]
root = BuildTree(records)
self.assert_node_is_branch(root, node_id=0, children_count=2)
self.assert_node_is_leaf(root.children[0], node_id=1)
self.assert_node_is_leaf(root.children[1], node_id=2)
def test_three_nodes_in_reverse_order(self):
records = [
Record(2, 0),
Record(1, 0),
Record(0, 0)
]
root = BuildTree(records)
self.assert_node_is_branch(root, node_id=0, children_count=2)
self.assert_node_is_leaf(root.children[0], node_id=1)
self.assert_node_is_leaf(root.children[1], node_id=2)
def test_more_than_two_children(self):
records = [
Record(0, 0),
Record(1, 0),
Record(2, 0),
Record(3, 0)
]
root = BuildTree(records)
self.assert_node_is_branch(root, node_id=0, children_count=3)
self.assert_node_is_leaf(root.children[0], node_id=1)
self.assert_node_is_leaf(root.children[1], node_id=2)
self.assert_node_is_leaf(root.children[2], node_id=3)
def test_binary_tree(self):
records = [
Record(6, 2),
Record(0, 0),
Record(3, 1),
Record(2, 0),
Record(4, 1),
Record(5, 2),
Record(1, 0)
]
root = BuildTree(records)
self.assert_node_is_branch(root, 0, 2)
self.assert_node_is_branch(root.children[0], 1, 2)
self.assert_node_is_branch(root.children[1], 2, 2)
self.assert_node_is_leaf(root.children[0].children[0], 3)
self.assert_node_is_leaf(root.children[0].children[1], 4)
self.assert_node_is_leaf(root.children[1].children[0], 5)
self.assert_node_is_leaf(root.children[1].children[1], 6)
def test_unbalanced_tree(self):
records = [
Record(0, 0),
Record(1, 0),
Record(2, 0),
Record(3, 1),
Record(4, 1),
Record(5, 1),
Record(6, 2),
]
root = BuildTree(records)
self.assert_node_is_branch(root, 0, 2)
self.assert_node_is_branch(root.children[0], 1, 3)
self.assert_node_is_branch(root.children[1], 2, 1)
self.assert_node_is_leaf(root.children[0].children[0], 3)
self.assert_node_is_leaf(root.children[0].children[1], 4)
self.assert_node_is_leaf(root.children[0].children[2], 5)
self.assert_node_is_leaf(root.children[1].children[0], 6)
def test_root_node_has_parent(self):
records = [
Record(0, 1),
Record(1, 0)
]
# Root parent_id should be equal to record_id(0)
with self.assertRaises(ValueError) as err:
BuildTree(records)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Node parent_id should be smaller than it's record_id.")
def test_no_root_node(self):
records = [
Record(1, 0),
Record(2, 0)
]
# Record with record_id 0 (root) is missing
with self.assertRaises(ValueError) as err:
BuildTree(records)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Record id is invalid or out of order.")
def test_non_continuous(self):
records = [
Record(2, 0),
Record(4, 2),
Record(1, 0),
Record(0, 0)
]
# Record with record_id 3 is missing
with self.assertRaises(ValueError) as err:
BuildTree(records)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Record id is invalid or out of order.")
def test_cycle_directly(self):
records = [
Record(5, 2),
Record(3, 2),
Record(2, 2),
Record(4, 1),
Record(1, 0),
Record(0, 0),
Record(6, 3)
]
# Cycle caused by Record 2 with parent_id pointing to itself
with self.assertRaises(ValueError) as err:
BuildTree(records)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Only root should have equal record and parent id.")
def test_cycle_indirectly(self):
records = [
Record(5, 2),
Record(3, 2),
Record(2, 6),
Record(4, 1),
Record(1, 0),
Record(0, 0),
Record(6, 3)
]
# Cycle caused by Record 2 with parent_id(6) greater than record_id(2)
with self.assertRaises(ValueError) as err:
BuildTree(records)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Node parent_id should be smaller than it's record_id.")
def test_higher_id_parent_of_lower_id(self):
records = [
Record(0, 0),
Record(2, 0),
Record(1, 2)
]
# Record 1 have parent_id(2) greater than record_id(1)
with self.assertRaises(ValueError) as err:
BuildTree(records)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Node parent_id should be smaller than it's record_id.")
def assert_node_is_branch(self, node, node_id, children_count):
self.assertEqual(node.node_id, node_id)
self.assertNotEqual(len(node.children), 0)
self.assertEqual(len(node.children), children_count)
def assert_node_is_leaf(self, node, node_id):
self.assertEqual(node.node_id, node_id)
self.assertEqual(len(node.children), 0)
|
{
"content_hash": "57993a9035e68ae81fee001b0fb1ec19",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 104,
"avg_line_length": 34.26701570680628,
"alnum_prop": 0.5656226126814362,
"repo_name": "exercism/python",
"id": "426ed2b95b37668abebfbaf93e7782a93903e286",
"size": "6545",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "exercises/practice/tree-building/tree_building_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "103144"
},
{
"name": "Python",
"bytes": "934764"
},
{
"name": "Shell",
"bytes": "2960"
}
],
"symlink_target": ""
}
|
from south.v2 import DataMigration
from django.core.management import call_command
class Migration(DataMigration):
def forwards(self, orm):
call_command("loaddata", "account_types.yaml")
def backwards(self, orm):
orm.AccountType.objects.all().delete()
models = {
'webinars.account': {
'Meta': {'object_name': 'Account'},
'account_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountType']"}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '64'}),
'extra_username': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.accounttype': {
'Meta': {'object_name': 'AccountType'},
'can_api_create_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_load_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_register_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_report_views': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'extra_username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'listing_priority': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.cmsform': {
'Meta': {'object_name': 'CmsForm'},
'guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'is_sync_target': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.event': {
'Meta': {'object_name': 'Event'},
'_update_cms_form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.CmsForm']"}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'attended_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'avoid_sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cms_forms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['webinars.CmsForm']", 'symmetrical': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_hubspot_snapshotted_at': ('sanetime.dj.SaneTimeField', [], {'default': '0'}),
'noshow_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registrants_synced_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'requested_registrants_sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'started_registrants_sync_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.hub': {
'Meta': {'object_name': 'Hub'},
'attended_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'attended_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'avoid_lead_update_on_unformed_events': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'events_synced_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'registered_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'requested_events_sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'started_events_sync_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/New_York'", 'max_length': '64'}),
'uninstalled_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.hubspotregistrantsnapshot': {
'Meta': {'object_name': 'HubSpotRegistrantSnapshot'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'conversion_event_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.landingpage': {
'Meta': {'object_name': 'LandingPage'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'form_title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'webinars.registrant': {
'Meta': {'object_name': 'Registrant'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']", 'null': 'True'}),
'conversion_event_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.task': {
'Meta': {'object_name': 'Task'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']", 'null': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'sync_all_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_events': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_specific_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.taskrunner': {
'Meta': {'object_name': 'TaskRunner'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexeventsnapshot': {
'Meta': {'object_name': 'WebexEventSnapshot'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.webexregistrantsnapshot': {
'Meta': {'object_name': 'WebexRegistrantSnapshot'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
}
}
complete_apps = ['webinars']
|
{
"content_hash": "6ffdb39431be8d0873bd0356c856870f",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 157,
"avg_line_length": 77.62359550561797,
"alnum_prop": 0.5442570746182239,
"repo_name": "prior/webinars",
"id": "3ac8d1d8b43bb9ce5276c87bb681bd04cb196c44",
"size": "13835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webinars_web/webinars/migrations/0002_load_account_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1671417"
},
{
"name": "Shell",
"bytes": "1175"
}
],
"symlink_target": ""
}
|
import unittest
import os
from fontTools import afmLib
CWD = os.path.abspath(os.path.dirname(__file__))
DATADIR = os.path.join(CWD, 'data')
AFM = os.path.join(DATADIR, 'TestAFM.afm')
class AFMTest(unittest.TestCase):
def test_read_afm(self):
afm = afmLib.AFM(AFM)
self.assertEqual(sorted(afm.kernpairs()),
sorted([('V', 'A'), ('T', 'comma'), ('V', 'd'), ('T', 'c'), ('T', 'period')]))
self.assertEqual(afm['V', 'A'], -60)
self.assertEqual(afm['V', 'd'], 30)
self.assertEqual(afm['A'], (65, 668, (8, -25, 660, 666)))
def test_write_afm(self):
afm = afmLib.AFM(AFM)
newAfm, afmData = self.write(afm)
self.assertEqual(afm.kernpairs(), newAfm.kernpairs())
self.assertEqual(afm.chars(), newAfm.chars())
self.assertEqual(afm.comments(), newAfm.comments()[1:]) # skip the "generated by afmLib" comment
for pair in afm.kernpairs():
self.assertEqual(afm[pair], newAfm[pair])
for char in afm.chars():
self.assertEqual(afm[char], newAfm[char])
with open(AFM, 'r') as f:
originalLines = f.read().splitlines()
newLines = afmData.splitlines()
del newLines[1] # remove the "generated by afmLib" comment
self.assertEqual(originalLines, newLines)
@staticmethod
def write(afm, sep='\r'):
temp = os.path.join(DATADIR, 'temp.afm')
try:
afm.write(temp, sep)
with open(temp, 'r') as f:
afmData = f.read()
afm = afmLib.AFM(temp)
finally:
if os.path.exists(temp):
os.remove(temp)
return afm, afmData
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
{
"content_hash": "ad841e215a767fcfb4127c87407015ad",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 99,
"avg_line_length": 28.867924528301888,
"alnum_prop": 0.6509803921568628,
"repo_name": "fonttools/fonttools",
"id": "3e9d9d8819c482b2bdd5f7964e2b43abe1d27824",
"size": "1530",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "Tests/afmLib/afmLib_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3522"
},
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "5442538"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.contrib import admin
from apps.core import models
class LayerTagInline(admin.TabularInline):
extra = 0
model = models.LayerTag
class LayerImageInline(admin.TabularInline):
extra = 0
model = models.LayerImage
class LayerMetaInline(admin.TabularInline):
extra = 0
model = models.LayerMeta
class LayerAdmin(admin.ModelAdmin):
inlines = [LayerTagInline, LayerImageInline, LayerMetaInline]
readonly_fields = ('created_at', 'updated_at')
fieldsets = (
(None, {
'fields': (
'user', 'name', 'description',
('capture_start', 'capture_end'),
('area', 'area_unit'),
'projection', 'srid',
'created_at', 'updated_at', 'deleted_at'
)
}),
('Tiles', {
'fields': (
'tile_srid', 'tile_format', 'tile_origin', 'resampling',
'transparency'
)
}),
)
admin.site.register(models.Layer, LayerAdmin)
|
{
"content_hash": "be625720e04442997836fc17d6f8f8c3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 72,
"avg_line_length": 24.25531914893617,
"alnum_prop": 0.5736842105263158,
"repo_name": "KAPPS-/raster-foundry",
"id": "2f0af50e9d95df837d4f2a3a314694c31eac319f",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/rf/apps/core/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "117282"
},
{
"name": "HTML",
"bytes": "114910"
},
{
"name": "JavaScript",
"bytes": "56060"
},
{
"name": "Python",
"bytes": "27731"
},
{
"name": "Ruby",
"bytes": "1213"
},
{
"name": "Shell",
"bytes": "7739"
}
],
"symlink_target": ""
}
|
"""
Logging functionality.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011-2012)
"""
import time
from zope.interface import implementer
from twisted.python import log
# almost iso, we dump the T in the middle (makes it more tricky to read imho)
TIME_FORMAT = "%Y-%m-%d %H:%M:%SZ"
@implementer(log.ILogObserver)
class DebugLogObserver(log.FileLogObserver):
def __init__(self, file_, debug=False, profile=False, payload=False):
log.FileLogObserver.__init__(self, file_)
self.debug = debug
self.profile = profile
self.payload = payload
def formatTime(self, when):
# over ride default time format so we get logs in utc time
# utc time is strongly preferable when debuggin systems across multiple timezones
iso_time_string = time.strftime(TIME_FORMAT, time.gmtime(when))
return iso_time_string
def emit(self, eventDict):
if self.debug is False and eventDict.get('debug', False):
pass # don't print debug messages if we didn't ask for it
elif self.profile is False and eventDict.get('profile', False):
pass # don't print profile messages if we didn't ask for it
elif self.payload is False and eventDict.get('payload', False):
pass # don't print payload message if we didn't ask for it
else:
log.FileLogObserver.emit(self, eventDict)
|
{
"content_hash": "b0cd8929bfcbbf53e88498674d7cfc13",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 89,
"avg_line_length": 30.29787234042553,
"alnum_prop": 0.6706460674157303,
"repo_name": "NORDUnet/opennsa",
"id": "bfd21fc272bc1c1596249c9d1156a826f889a1a4",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opennsa/logging.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1165"
},
{
"name": "HTML",
"bytes": "1480"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "746817"
},
{
"name": "SCSS",
"bytes": "63"
},
{
"name": "Shell",
"bytes": "4132"
}
],
"symlink_target": ""
}
|
import os
import Configuracoes
from Jogador import Jogador
import Dado
import Tabuleiro
import Acao
def turno(jogador):
os.system("clear")
jogador.mostra_resumo()
dados = [] # Array que contem os valores dos dados
soma_dos_dados = 0
jogador.jogadas -= 1 # Decrementa a quantidade de jogadas
if jogador.esta_preso:
print("{jogador} esta preso, e deve tirar valores iguais nos dados para sair!".format(jogador=jogador.nome))
for dado in range(Configuracoes.quantidade_dados):
dados.append(Dado.jogarDado())
print("{jogador} jogou os dados e Conseguiu os seguintes numeros: ".format(jogador=jogador.nome), dados)
if Dado.dadosIguais(dados):
if jogador.dados_iguais == 3:
Acao.prender(jogador)
if jogador.esta_preso:
print("Voce se Libertou!")
jogador.esta_preso = False
else:
print("dados iguais, Uma nova jogada para {jogador}".format(jogador=jogador.nome))
jogador.dados_iguais += 1 # Incrementa a quantidade de jogadas com dados iguais
jogador.jogadas += 1 # Adiciona uma nova jogada para o jogador
if not jogador.esta_preso:
for dado in dados:
soma_dos_dados += dado
jogador.caminha(soma_dos_dados)
# Carrega o objeto do terreno onde o jogador esta para a variavel casa
casa = Tabuleiro.tabuleiro[jogador.posicao]
Acao.verifica_terreno(jogador, casa)
if jogador.jogadas == 0:
print("Fim do turno do {jogador}.".format(jogador=jogador.nome))
else:
turno(jogador)
if __name__ == '__main__':
for num_jogador in range(Configuracoes.quantidade_jogadores):
nome = input("Digite o nome do jogador {num}: ".format(num=str(num_jogador))) # Pega o nome do jogador
jogador = Jogador(nome) # Cria uma instancia do objeto para o jogador
Configuracoes.jogadores.append(jogador) # Adiciona o jogador na lista de jogadores
while len(Configuracoes.jogadores) > 1:
for jogador in Configuracoes.jogadores:
jogador.inicia_turno()
turno(jogador)
input("Pressione qualquer tecla para continuar...")
|
{
"content_hash": "39c6ebee7f1178c9cfa7c971f4d3ba77",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 116,
"avg_line_length": 37.706896551724135,
"alnum_prop": 0.6611796982167353,
"repo_name": "fcrozetta/Text-Based-Monopoly",
"id": "775b86b7b6625fa06201c1840c3c00c7d55354c6",
"size": "2187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11333"
}
],
"symlink_target": ""
}
|
from fabricate import *
programs = ['test_strided_comm', 'testcount']
def build():
for program in programs:
sources = [program, '../src/clustergis']
compile(sources)
link(sources, program)
def compile(sources):
for source in sources:
run('mpicc -Wall -O3 -I../src/ `geos-config --cflags` -c ' + source + '.c -o ' + source + '.o')
def link(sources, program='a.out'):
objects = ' '.join(s + '.o' for s in sources)
run('mpicc -o ' + program + ' -Wall -O3 `geos-config --cflags` ' + objects + ' `geos-config --ldflags` -lgeos_c')
def clean():
autoclean()
main()
|
{
"content_hash": "e10b8f6f40a00c422ae9d6363429d52e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 114,
"avg_line_length": 26.227272727272727,
"alnum_prop": 0.6291161178509532,
"repo_name": "nathankerr/clusterGIS",
"id": "59bd85aaeb06e73965d37659457b9598e8c63c73",
"size": "596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29991"
},
{
"name": "Python",
"bytes": "54320"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='pynanowit',
packages=['pynanowit'],
include_package_data=True,
install_requires=['flask', 'waitress'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
|
{
"content_hash": "9497e963de83105996faf480c4738930",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 23.2,
"alnum_prop": 0.6594827586206896,
"repo_name": "vorjdux/pynanowit",
"id": "81b0f7f19c0d667330496797414df3d8d6ec9394",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3080"
},
{
"name": "HTML",
"bytes": "4097"
},
{
"name": "Python",
"bytes": "13625"
}
],
"symlink_target": ""
}
|
"""
Various classes for tracking attribute values. Work in progress.
$Id$
"""
__version__='$Revision: 8706 $'
# Note that the tracking classes don't depend on Topographica, but the
# tests do.
import param
def value_printer(**kw):
obj_name = kw['obj'].name if hasattr(kw['obj'],'name') else str(kw['obj'])
print "t=%s: %s.%s=%s"%(kw['time'],obj_name,kw['name'],kw['value'])
class Lister(object):
def __init__(self,attrs):
self.times = dict([(attr_name,list()) for attr_name in attrs])
self.values = dict([(attr_name,list()) for attr_name in attrs])
def __call__(self,**kw):
self.times[kw['name']].append(kw['time'])
self.values[kw['name']].append(kw['value'])
class Logger(param.Parameterized):
# CB: need to do like plotfilesaver
filename = param.String("test.txt")
def __init__(self,attrs,**params):
super(Logger,self).__init__(**params)
self.f = open(self.filename,'w')
def __call__(self,**kw):
obj_name = kw['obj'].name if hasattr(kw['obj'],'name') else str(kw['obj'])
log= "t=%s: %s.%s=%s\n"%(kw['time'],obj_name,kw['name'],kw['value'])
self.f.write(log)
def close(self):
self.f.close()
# would be nice to have some that calculate summary statistics (e.g. running mean, variance, etc)
class AttributeTracker(param.Parameterized):
"""
Tracks access to one or more of an object's attributes.
By default, tracks every access to the specified attributes:
>>> class Test(object):
... name = 'test1'
... def get_x(self):
... return 1
... x = property(get_x)
>>> t = Test()
>>> p = AttributeTracker(t,['x'])
>>> junk=t.x
t=None: test1.x=1
>>> junk=t.x
t=None: test1.x=1
>>> junk=t.x
t=None: test1.x=1
Typical usage is to track a dynamic parameter of a Parameterized
instance. In that case, only track access once for any particular
simulation time:
>>> import topo
>>> from topo import pattern
>>> from topo.tests.utils import Series
>>> g = pattern.Gaussian(x=Series(),name='testg')
>>> p = AttributeTracker(g,['x'],time_fn=topo.sim.time)
>>> junk=g()
t=0: testg.x=0
>>> junk=g()
>>> # tracker records nothing because time didn't advance
>>> topo.sim.run(1)
>>> junk=g()
t=1: testg.x=1
Example of storing parameter values in memory:
>>> g = pattern.Gaussian(x=Series(start=-1),y=Series(start=0),name='testg')
>>> v = Lister(['x','y'])
>>> p = AttributeTracker(g,['x','y'],time_fn=topo.sim.time,value_tracker=v)
>>> junk=g()
>>> topo.sim.run(1)
>>> junk=g()
>>> v.times['x']
[mpq(1), mpq(2)]
>>> v.values['x']
[-1, 0]
>>> v.times['y']
[mpq(1), mpq(2)]
>>> v.values['y']
[0, 1]
"""
value_tracker = param.Callable(value_printer)
time_fn = param.Callable(None,constant=True,doc="""
Set to topo.sim.time for tracking dynamic parameters in topographica simulations.""")
def __init__(self,obj,attr_names,**params):
super(AttributeTracker,self).__init__(**params)
self.obj=obj
# CEBALERT: if someone attaches multiple trackers to an
# object, they have to be detached in reverse order
# (should probably just prevent attaching multiple trackers)
self._original_getattribute = type(obj).__getattribute__
self._last_access= dict( [(attr_name,None) for attr_name in attr_names] )
def _tracked_getattribute(instance,attr_name):
v = self._original_getattribute(instance,attr_name)
t = self.time_fn() if self.time_fn is not None else None
if instance is self.obj and attr_name in self._last_access and \
(self._last_access[attr_name] is None or t>self._last_access[attr_name]):
self.value_tracker(obj=instance,name=attr_name,time=t,value=v)
self._last_access[attr_name]=t
return v
type(obj).__getattribute__=_tracked_getattribute
def stop_tracking(self):
type(self.obj).__getattribute__=self._original_getattribute
## ## CB: currently unused
## class MethodTracker(param.Parameterized):
## value_tracker = param.Callable(value_printer)
## time_fn = param.Parameter(None,constant=True) # can't have Callable constant=True set to None
## def __init__(self,obj,method_name,**params):
## super(MethodTracker,self).__init__(**params)
## self.obj = obj
## self.method_name = method_name
## self.original_method = getattr(type(obj),method_name)
## self._last_time = None
## def _tracked(instance,*args,**kw):
## v = self.original_method(instance,*args,**kw)
## t = self.time_fn() if self.time_fn is not None else None
## if instance is self.obj and (self._last_time is None or t>self._last_time):
## self.value_tracker(self.method_name,t,v)
## self._last_time=t
## return v
## setattr(type(obj),self.method_name,_tracked)
## def stop_tracking(self):
## setattr(type(self.obj),self.method_name,self.original_method)
if __name__=='__main__' or __name__=="__mynamespace__": # for the ipython hack
import doctest
doctest.testmod(verbose=True)
|
{
"content_hash": "4f51b8a0f2882b4485411618263e5b62",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 100,
"avg_line_length": 31.056818181818183,
"alnum_prop": 0.5810464690815953,
"repo_name": "ioam/svn-history",
"id": "4cacc23112f01997be4b8d48de5954f46b222f78",
"size": "5466",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/tracker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Elixir",
"bytes": "202"
},
{
"name": "Emacs Lisp",
"bytes": "21378"
},
{
"name": "JavaScript",
"bytes": "12602"
},
{
"name": "PHP",
"bytes": "596890"
},
{
"name": "Perl",
"bytes": "43403"
},
{
"name": "Python",
"bytes": "3334771"
},
{
"name": "Shell",
"bytes": "9260"
},
{
"name": "Tcl",
"bytes": "433956"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ObjectDB.db_lock_storage'
db.alter_column('objects_objectdb', 'db_lock_storage', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'ObjectDB.db_lock_storage'
db.alter_column('objects_objectdb', 'db_lock_storage', self.gf('django.db.models.fields.CharField')(max_length=512))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'objects.alias': {
'Meta': {'object_name': 'Alias'},
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objattribute': {
'Meta': {'object_name': 'ObjAttribute'},
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_lock_storage': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'db_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objectdb': {
'Meta': {'object_name': 'ObjectDB'},
'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_destination': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'destinations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_home': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'homes_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'db_player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']", 'null': 'True', 'blank': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objectnick': {
'Meta': {'unique_together': "(('db_nick', 'db_type', 'db_obj'),)", 'object_name': 'ObjectNick'},
'db_nick': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'db_real': ('django.db.models.fields.TextField', [], {}),
'db_type': ('django.db.models.fields.CharField', [], {'default': "'inputline'", 'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'players.playerdb': {
'Meta': {'object_name': 'PlayerDB'},
'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_is_connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']", 'null': 'True', 'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True'})
}
}
complete_apps = ['objects']
|
{
"content_hash": "aba7c9f2407ec2cca6be2d34f9ab26da",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 187,
"avg_line_length": 72.89915966386555,
"alnum_prop": 0.5586167146974064,
"repo_name": "google-code-export/evennia",
"id": "674545fc1b01d1a3625320eb3cc1c80a20518c98",
"size": "8699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/objects/migrations/0014_auto__chg_field_objectdb_db_lock_storage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "4450"
},
{
"name": "CSS",
"bytes": "19010"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "Gettext Catalog",
"bytes": "12141"
},
{
"name": "HTML",
"bytes": "31276"
},
{
"name": "JavaScript",
"bytes": "10492"
},
{
"name": "Python",
"bytes": "2789785"
}
],
"symlink_target": ""
}
|
"""Module for CLI output styling.
This module sets the style of the CLI output
"""
class CLIStyle(object):
"""Class for styling CLI output
This class defines some constants for styling the output of CLI-Scripts
To use this class, simply use the constants in your print() functions.
e.g.: print(CLIStyle.COLOR_GREEN + "Hello World" + CLIStyle.STYLE_RESET)
"""
STYLE_RESET = '\033[0m'
CLEAR_SCREEN = '\033[2J'
STYLE_BOLD = '\033[1m'
STYLE_UNDERLINE = '\033[4m'
COLOR_GREEN = '\033[32m'
COLOR_RED = '\033[31m'
COLOR_YELLOW = '\033[33m'
META_H1 = CLEAR_SCREEN + STYLE_RESET + STYLE_BOLD + STYLE_UNDERLINE
META_OK = COLOR_GREEN
META_ERR = COLOR_RED
META_INPUT = COLOR_YELLOW
|
{
"content_hash": "76bb411a1650896ea8e1f88e240e06ea",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.6531713900134952,
"repo_name": "NETHINKS/opennms-docker-env",
"id": "17c556b2093153db25cd93e8ffebeab8773dfaba",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/container_generator/clistyle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1428"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "64686"
},
{
"name": "Shell",
"bytes": "77405"
},
{
"name": "Smarty",
"bytes": "10117"
}
],
"symlink_target": ""
}
|
from rpc.whisperrpc import *
from rpc.factory import *
from rpc.standard_library import *
if __name__ == '__main__':
params = {
'remote_host' : '127.0.0.1',
'remote_port' : 8881,
'admin_pass' : 'test',
}
rpc_conn = RpcConnection(
params.get('remote_host'), port = params.get('remote_port'),
user = 'admin', passwd = params.get('admin_pass'))
global_service = RpcService_MediaElementService(
rpc_conn, "/rpc-config")
standardlib_service = RpcService_StandardLibraryService(
rpc_conn, '/rpc-config/standard_library')
# Deleting old elements and exports:
global_service.DeleteElementSpec("files")
global_service.DeleteElementSpec("normalizer")
global_service.DeleteElementSpec("mp3")
global_service.DeleteElementSpec("example")
global_service.DeleteElementSpec("example_flv")
global_service.DeleteElementSpec("example_mp4")
global_service.DeleteElementSpec("livestream")
global_service.DeleteElementSpec("f4v2flv")
global_service.DeleteElementSpec("normalize")
global_service.StopExportElement('http', '/')
global_service.StopExportElement('http', '/mp3')
global_service.StopExportElement('http', '/example')
global_service.StopExportElement('rtmp', 'example')
global_service.StopExportElement('rtmp', 'livecam')
global_service.StopExportElement('http', '/whispercast/livecam')
# Create new elements:
ret = standardlib_service.AddAioFileElementSpec(
"files", True, False,
AioFileElementSpec(_media_type_ = 'raw',
_home_dir_ = '',
_file_pattern_ = '',
_default_index_file_ = 'index.html'))
print "File source added: %s" % ret
ret = standardlib_service.AddAioFileElementSpec(
"mp3", True, False,
AioFileElementSpec(_media_type_ = 'mp3',
_home_dir_ = 'mp3',
_file_pattern_ = '\\.mp3$'))
print "File source added: %s" % ret
ret = standardlib_service.AddAioFileElementSpec(
"example_flv", True, False,
AioFileElementSpec(_media_type_ = 'flv',
_home_dir_ = 'media',
_file_pattern_ = '\\.flv$'))
print "File source added: %s" % ret
ret = standardlib_service.AddAioFileElementSpec(
"example_mp4", True, False,
AioFileElementSpec(_media_type_ = 'f4v',
_home_dir_ = 'media',
_file_pattern_ = '4v$'))
print "File source added: %s" % ret
ret = standardlib_service.AddLoadBalancingElementSpec(
"example", True, False,
LoadBalancingElementSpec(_sub_elements_ = ["example_flv",
"f4v2flv/example_mp4"]))
print "File source added: %s" % ret
ret = standardlib_service.AddRtmpPublishingElementSpec(
"livestream", True, False,
RtmpPublishingElementSpec(
[RtmpPublishingElementDataSpec(_name_ = "live1",
_media_type_ = "flv"),
]));
print "File source added: %s" % ret
ret = standardlib_service.AddNormalizingElementSpec(
"normalizer", True, False,
NormalizingElementSpec())
print "Normalizer added: %s" % ret
ret = standardlib_service.AddF4vToFlvConverterElementSpec(
"f4v2flv", True, False,
F4vToFlvConverterElementSpec())
print "F4v2Flv added: %s" % ret
# Export them
ret = global_service.StartExportElement(
ElementExportSpec(_media_name_ = "files",
_protocol_ = "http",
_path_ = "/",
_enable_buffer_flow_control_ = True))
print "Export added: %s" % ret
ret = global_service.StartExportElement(
ElementExportSpec(_media_name_ = "example",
_protocol_ = "http",
_path_ = "/example",
_content_type_ = "video/x-flv",
_enable_buffer_flow_control_ = True))
print "Export added: %s" % ret
ret = global_service.StartExportElement(
ElementExportSpec(_media_name_ = "normalizer/example",
_protocol_ = "rtmp",
_path_ = "example",
_enable_buffer_flow_control_ = True))
print "Export added: %s" % ret
ret = global_service.StartExportElement(
ElementExportSpec(_media_name_ = "normalizer/mp3",
_protocol_ = "http",
_path_ = "/mp3",
_content_type_ = "audio/mpeg",
_enable_buffer_flow_control_ = True))
print "Export added: %s" % ret
ret = global_service.StartExportElement(
ElementExportSpec(_media_name_ = "livestream",
_protocol_ = "rtmp",
_path_ = "livecam",
_content_type_ = "flv",
_enable_buffer_flow_control_ = False))
print "Export added: %s" % ret
ret = global_service.StartExportElement(
ElementExportSpec(_media_name_ = "livestream",
_protocol_ = "http",
_path_ = "/whispercast/livecam",
_content_type_ = "flv",
_enable_buffer_flow_control_ = False))
print "Export added: %s" % ret
# Save config
ret = global_service.SaveConfig()
print "Saved : %s" % ret
|
{
"content_hash": "51bb63d2adf126f88ef02fc9878c2316",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 75,
"avg_line_length": 40.80434782608695,
"alnum_prop": 0.546972118629018,
"repo_name": "cpopescu/whispercast",
"id": "8840bad9651f51fff972b1ff8129f64b591857d0",
"size": "5650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whispercast/scripts/set_config.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "5501"
},
{
"name": "C",
"bytes": "384162"
},
{
"name": "C++",
"bytes": "5162993"
},
{
"name": "JavaScript",
"bytes": "69451"
},
{
"name": "Objective-C",
"bytes": "14940"
},
{
"name": "PHP",
"bytes": "39739"
},
{
"name": "Python",
"bytes": "73884"
},
{
"name": "Shell",
"bytes": "68433"
}
],
"symlink_target": ""
}
|
"""
.. module:: runtests
.. moduleauthor:: Artem Mustafa <artemmus@yahoo.com>
"""
import unittest
import logging
def load_tests(loader, tests, pattern):
return unittest.defaultTestLoader.discover('tests', pattern='test_*.py')
if __name__ == '__main__':
log = logging.getLogger('mysql_executor')
log.setLevel(logging.ERROR)
unittest.main()
|
{
"content_hash": "ec7707f5714113bbac355a6c82538e98",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 76,
"avg_line_length": 20,
"alnum_prop": 0.6805555555555556,
"repo_name": "artemmus/mysql_executor",
"id": "b624952a51891c1dae4a407ecc1a305fb4994c11",
"size": "382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35525"
}
],
"symlink_target": ""
}
|
from functools import partial
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QLabel, QVBoxLayout
from electrum_ltc.i18n import _
from electrum_ltc.plugin import hook
from electrum_ltc.wallet import Standard_Wallet
from electrum_ltc.gui.qt.util import WindowModalDialog
from .jade import JadePlugin
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
class Plugin(JadePlugin, QtPluginBase):
icon_unpaired = "jade_unpaired.png"
icon_paired = "jade.png"
def create_handler(self, window):
return Jade_Handler(window)
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on Jade"), show_address)
class Jade_Handler(QtHandlerBase):
setup_signal = pyqtSignal()
auth_signal = pyqtSignal(object, object)
def __init__(self, win):
super(Jade_Handler, self).__init__(win, 'Jade')
def message_dialog(self, msg):
self.clear_dialog()
self.dialog = dialog = WindowModalDialog(self.top_level_window(), _("Jade Status"))
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
dialog.show()
|
{
"content_hash": "8ac4eb1acbfc855479897e05b1e1f1c3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 91,
"avg_line_length": 32.659574468085104,
"alnum_prop": 0.6768729641693811,
"repo_name": "pooler/electrum-ltc",
"id": "4b90ff80a624c8b2d3b1d977236ce86dda10c6b9",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_ltc/plugins/jade/qt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13024"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "NSIS",
"bytes": "7354"
},
{
"name": "Python",
"bytes": "5325268"
},
{
"name": "QML",
"bytes": "318745"
},
{
"name": "Ruby",
"bytes": "16856"
},
{
"name": "Shell",
"bytes": "105672"
},
{
"name": "kvlang",
"bytes": "70748"
}
],
"symlink_target": ""
}
|
"""Ensure user-defined pretraining & rl configs inherit from base configs."""
from ml_collections import ConfigDict
from .pretrain import get_config as get_pretrain_config
from .rl import get_config as get_rl_config
# Ref: https://github.com/deepmind/jaxline/blob/master/jaxline/base_config.py
def __validate_keys( # pylint: disable=invalid-name
base_config,
config,
base_filename,
):
"""Validate keys."""
for key in base_config.keys():
if key not in config:
raise ValueError(
f"Key {key} missing from config. This config is required to have "
f"keys: {list(base_config.keys())}. See base_configs/{base_filename} "
"for more details.")
if (isinstance(base_config[key], ConfigDict) and config[key] is not None):
__validate_keys(base_config[key], config[key], base_filename)
def validate_config(config, mode):
"""Ensures a config inherits from a base config.
Args:
config: The child config to validate.
mode: Can be one of 'pretraining' or 'rl'.
Raises:
ValueError: if the base config contains keys that are not present in config.
"""
assert mode in ["pretrain", "rl"]
base_config = get_rl_config() if mode == "rl" else get_pretrain_config()
base_filename = "rl.py" if mode == "rl" else "pretrain.py"
__validate_keys(base_config, config, base_filename)
|
{
"content_hash": "001e631cddb9c6916d7242603effc23d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 34.717948717948715,
"alnum_prop": 0.6831610044313147,
"repo_name": "google-research/google-research",
"id": "0e862666b944bdc57bcba1bf16c1a0a47c6eae1d",
"size": "1962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xirl/base_configs/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class LegendwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendwidth", parent_name="indicator", **kwargs):
super(LegendwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
{
"content_hash": "847b2ca2107f00c8f18c21422cc317fd",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 37.5,
"alnum_prop": 0.6177777777777778,
"repo_name": "plotly/plotly.py",
"id": "8ba5cf55975da0c91cd1cb8be49d8af84667b3b2",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/indicator/_legendwidth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
__author__ = 'guorongxu'
import re
import os
import sys
import logging
import MatrixPrinter
## Parsing mutation files and return a dictionary of ID and name.
def parse(root_raw_dir, root_expression_dir, tumor_type, release_year, release_month, release_day):
mutation_file_folder = root_raw_dir + "/" + tumor_type + "/gdac.broadinstitute.org_"\
+ tumor_type +".Mutation_Packager_Oncotated_Calls.Level_3."\
+release_year + release_month + release_day + "00.0.0/"
output_file_name = root_expression_dir + "/" + tumor_type + "/mutation_matrix.txt"
if not os.path.exists(os.path.dirname(output_file_name)):
os.makedirs(os.path.dirname(output_file_name))
sample_list = {}
sample_name_list = {}
gene_name_list = {}
for sample_file in os.listdir(mutation_file_folder):
if sample_file.endswith(".maf.txt"):
gene_list = {}
mutation_file = mutation_file_folder + sample_file
sample_file_short = sample_file[0:sample_file.rindex("-")]
if sample_file_short not in sample_name_list:
sample_name_list.update({sample_file_short: sample_file_short})
index_num = 164
with open(mutation_file) as fp:
lines = fp.readlines()
for line in lines:
if not line.startswith("#"):
if not line.startswith("Hugo_Symbol"):
fields = re.split(r'\t', line)
## Add the gene name into the gene name list
if fields[0] not in gene_name_list:
gene_name_list.update({fields[0]: fields[0]})
## Add the gene name and score to the gene list.
## If gene has multipe score, give the score of the highest scoring mutation.
if fields[0] not in gene_list:
gene_list.update({fields[0]: get_score(fields[index_num])})
else:
score = gene_list.get(fields[0])
if score < get_score(fields[index_num]):
gene_list.update({fields[0]: get_score(fields[index_num])})
else:
index_num = get_index_num(line)
sample_list.update({sample_file_short: gene_list})
fp.closed
MatrixPrinter.print_matrix(output_file_name, sample_list, sample_name_list, gene_name_list)
## To get the column index from the mutation files
## The index number of i_Ensembl_so_term is different in different tumor type
def get_index_num(header_line):
fields = re.split(r'\t', header_line)
for index, elem in enumerate(fields):
if elem == "i_Ensembl_so_term":
return index
return 164
def get_score(variant_classification):
if variant_classification == "":
return 1
elif variant_classification == "intergenic_variant":
return 3
elif variant_classification == "feature_truncation":
return 3
elif variant_classification == "regulatory_region_variant":
return 3
elif variant_classification == "feature_elongation":
return 3
elif variant_classification == "regulatory_region_amplification":
return 3
elif variant_classification == "regulatory_region_ablation":
return 4
elif variant_classification == "TF_binding_site_variant":
return 3
elif variant_classification == "TFBS_amplification":
return 3
elif variant_classification == "TFBS_ablation":
return 3
elif variant_classification == "downstream_gene_variant":
return 3
elif variant_classification == "upstream_gene_variant":
return 3
elif variant_classification == "non_coding_transcript_variant":
return 3
elif variant_classification == "NMD_transcript_variant":
return 3
elif variant_classification == "intron_variant":
return 3
elif variant_classification == "non_coding_transcript_exon_variant":
return 3
elif variant_classification == "3_prime_UTR_variant":
return 3
elif variant_classification == "5_prime_UTR_variant":
return 3
elif variant_classification == "mature_miRNA_variant":
return 3
elif variant_classification == "coding_sequence_variant":
return 3
elif variant_classification == "synonymous_variant":
return 2
elif variant_classification == "stop_retained_variant":
return 2
elif variant_classification == "incomplete_terminal_codon_variant":
return 2
elif variant_classification == "splice_region_variant":
return 2
elif variant_classification == "protein_altering_variant":
return 4
elif variant_classification == "missense_variant":
return 4
elif variant_classification == "missense":
return 4
elif variant_classification == "inframe_deletion":
return 4
elif variant_classification == "inframe_insertion":
return 4
elif variant_classification == "transcript_amplification":
return 5
elif variant_classification == "start_lost":
return 5
elif variant_classification == "initiator_codon_variant":
return 5
elif variant_classification == "stop_lost":
return 5
elif variant_classification == "frameshift_variant":
return 5
elif variant_classification == "stop_gained":
return 5
elif variant_classification == "splice_donor_variant":
return 5
elif variant_classification == "splice_acceptor_variant":
return 5
elif variant_classification == "transcript_ablation":
return 5
else:
print variant_classification + " is missing..."
## Main entry
if __name__ == "__main__":
logging.basicConfig(filename='search_engine.log',level=logging.DEBUG)
root_raw_dir = sys.argv[1]
root_expression_dir = sys.argv[2]
tumor_type = sys.argv[3]
release_year = sys.argv[4]
release_month = sys.argv[5]
release_day = sys.argv[6]
logging.info("Root raw dir: " + root_raw_dir)
logging.info("Root expression dir: " + root_expression_dir)
logging.info("Tumor type: " + tumor_type)
logging.info("Release year: " + release_year)
logging.info("Release month: " + release_month)
logging.info("Release day: " + release_day)
parse(root_raw_dir, root_expression_dir, tumor_type, release_year, release_month, release_day)
|
{
"content_hash": "125f53477fce4f2110ef6e58c8d0556f",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 105,
"avg_line_length": 39.25443786982248,
"alnum_prop": 0.6097377148025324,
"repo_name": "ucsd-ccbb/Oncolist",
"id": "fdef135e9cc469dc63b3935f3d34ce3a6d4da508",
"size": "6634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/TCGA/MutationParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90415"
},
{
"name": "HTML",
"bytes": "6408827"
},
{
"name": "JavaScript",
"bytes": "5713003"
},
{
"name": "Jupyter Notebook",
"bytes": "27242"
},
{
"name": "Python",
"bytes": "1044959"
},
{
"name": "Shell",
"bytes": "24875"
}
],
"symlink_target": ""
}
|
import os
import signal
def handle_sigterm(signum, frame):
# do stuff
os._exit(0)
# subprocess
signal.signal(signal.SGITERM, handle_sigterm)
# where to kill subprocess
os.kill(pid, signal.SIGTERM)
|
{
"content_hash": "8052a222b1b4e87aa30a3e27b8df908f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 45,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.7307692307692307,
"repo_name": "louistin/fullstack",
"id": "e5867e9f472a7bc07ab26311ef4029267df5d31a",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/processing/example.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "120813"
},
{
"name": "C++",
"bytes": "261575"
},
{
"name": "HTML",
"bytes": "588772"
},
{
"name": "Lua",
"bytes": "16542"
},
{
"name": "Makefile",
"bytes": "437"
},
{
"name": "OpenEdge ABL",
"bytes": "420649"
},
{
"name": "Python",
"bytes": "57963"
},
{
"name": "Shell",
"bytes": "97"
},
{
"name": "Vim script",
"bytes": "21969"
}
],
"symlink_target": ""
}
|
import os
import sys
import math
import numpy as np
from PyQt4 import QtCore
from PyQt4.QtGui import *
from PyQt4.QtCore import Qt, QSize, pyqtSignal
from Orange.canvas.utils import environ
from Orange.widgets import gui
from Orange.widgets.utils import colorbrewer
DefaultRGBColors = [
(0, 0, 255), (255, 0, 0), (0, 255, 0), (255, 128, 0), (255, 255, 0),
(255, 0, 255), (0, 255, 255), (128, 0, 255), (0, 128, 255), (255, 223, 128),
(127, 111, 64), (92, 46, 0), (0, 84, 0), (192, 192, 0), (0, 127, 127),
(128, 0, 0), (127, 0, 127)]
DefaultColorBrewerPalette = {
3: [(127, 201, 127), (190, 174, 212), (253, 192, 134)],
4: [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153)],
5: [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153),
(56, 108, 176)],
6: [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153),
(56, 108, 176), (240, 2, 127)],
7: [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153),
(56, 108, 176), (240, 2, 127), (191, 91, 23)],
8: [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153),
(56, 108, 176), (240, 2, 127), (191, 91, 23), (102, 102, 102)]}
ColorButtonSize = 25
#A 10X10 single color pixmap
class ColorPixmap(QIcon):
def __init__(self, color=QColor(Qt.white), size=12):
p = QPixmap(size, size)
p.fill(color)
self.color = color
QIcon.__init__(self, p)
# a widget for selecting the colors to be used
class ColorPaletteDlg(QDialog):
shemaChanged = pyqtSignal()
def __init__(self, parent, windowTitle="Color Palette"):
super().__init__(parent, windowTitle=windowTitle)
self.setLayout(QVBoxLayout())
self.layout().setMargin(4)
self.contPaletteNames = []
self.exContPaletteNames = []
self.discPaletteNames = []
self.colorButtonNames = []
self.colorSchemas = []
self.selectedSchemaIndex = 0
self.mainArea = gui.widgetBox(self, spacing=4)
self.layout().addWidget(self.mainArea)
self.schemaCombo = gui.comboBox(
self.mainArea, self, "selectedSchemaIndex", box="Saved Profiles",
callback=self.paletteSelected)
self.hbox = gui.widgetBox(self, orientation="horizontal")
self.okButton = gui.button(self.hbox, self, "OK", self.acceptChanges)
self.cancelButton = gui.button(self.hbox, self, "Cancel", self.reject)
self.setMinimumWidth(230)
self.resize(350, 200)
def acceptChanges(self):
state = self.getCurrentState()
oldState = self.colorSchemas[self.selectedSchemaIndex][1]
if state == oldState:
QDialog.accept(self)
else:
# if we change the default schema, we must save it under a new name
if self.colorSchemas[self.selectedSchemaIndex][0] == "Default":
if QMessageBox.information(
self, 'Question',
'The color schema has changed. Save?',
'Yes', 'Discard', '', 0, 1):
QDialog.reject(self)
else:
self.selectedSchemaIndex = self.schemaCombo.count() - 1
self.schemaCombo.setCurrentIndex(self.selectedSchemaIndex)
self.paletteSelected()
QDialog.accept(self)
# simply save the new users schema
else:
self.colorSchemas[self.selectedSchemaIndex] = \
[self.colorSchemas[self.selectedSchemaIndex][0], state]
QDialog.accept(self)
def createBox(self, boxName, boxCaption=None):
box = gui.widgetBox(self.mainArea, boxCaption)
box.setAlignment(Qt.AlignLeft)
return box
def createColorButton(self, box, buttonName, buttonCaption,
initialColor=Qt.black):
self.__dict__["butt" + buttonName] = ColorButton(self, box, buttonCaption)
self.__dict__["butt" + buttonName].setColor(QColor(initialColor))
self.colorButtonNames.append(buttonName)
def createContinuousPalette(self, paletteName, boxCaption,
passThroughBlack=0,
initialColor1=Qt.white, initialColor2=Qt.black):
buttBox = gui.widgetBox(self.mainArea, boxCaption)
box = gui.widgetBox(buttBox, orientation="horizontal")
self.__dict__["cont" + paletteName + "Left"] = ColorButton(self, box, color=QColor(initialColor1))
self.__dict__["cont" + paletteName + "View"] = PaletteView(box)
self.__dict__["cont" + paletteName + "Right"] = ColorButton(self, box, color=QColor(initialColor2))
self.__dict__["cont" + paletteName + "passThroughBlack"] = passThroughBlack
self.__dict__["cont" + paletteName + "passThroughBlackCheckbox"] = gui.checkBox(buttBox, self,
"cont" + paletteName + "passThroughBlack",
"Pass through black",
callback=self.colorSchemaChange)
self.contPaletteNames.append(paletteName)
def createExtendedContinuousPalette(self, paletteName, boxCaption,
passThroughColors=0, initialColor1=Qt.white,
initialColor2=Qt.black,
extendedPassThroughColors=((Qt.red, 1),
(Qt.black, 1),
(Qt.green, 1))):
buttBox = gui.widgetBox(self.mainArea, boxCaption)
box = gui.widgetBox(buttBox, orientation="horizontal")
self.__dict__["exCont" + paletteName + "Left"] = ColorButton(self, box, color=QColor(initialColor1))
self.__dict__["exCont" + paletteName + "View"] = PaletteView(box)
self.__dict__["exCont" + paletteName + "Right"] = ColorButton(self, box, color=QColor(initialColor2))
self.__dict__["exCont" + paletteName + "passThroughColors"] = passThroughColors
self.__dict__["exCont" + paletteName + "passThroughColorsCheckbox"] = gui.checkBox(buttBox, self,
"exCont" + paletteName + "passThroughColors",
"Use pass-through colors",
callback=self.colorSchemaChange)
box = gui.widgetBox(buttBox, "Pass-through colors", orientation="horizontal")
for i, (color, check) in enumerate(extendedPassThroughColors):
self.__dict__["exCont" + paletteName + "passThroughColor" + str(i)] = check
self.__dict__["exCont" + paletteName + "passThroughColor" + str(i) + "Checkbox"] = cb = gui.checkBox(box,
self,
"exCont" + paletteName + "passThroughColor" + str(
i),
"",
tooltip="Use color",
callback=self.colorSchemaChange)
self.__dict__["exCont" + paletteName + "color" + str(i)] = ColorButton(self, box, color=QColor(color))
if i < len(extendedPassThroughColors) - 1:
gui.rubber(box)
self.__dict__["exCont" + paletteName + "colorCount"] = len(extendedPassThroughColors)
self.exContPaletteNames.append(paletteName)
# #####################################################
# DISCRETE COLOR PALETTE
# #####################################################
def createDiscretePalette(self, paletteName, boxCaption, rgbColors=DefaultRGBColors):
vbox = gui.widgetBox(self.mainArea, boxCaption, orientation='vertical')
self.__dict__["disc" + paletteName + "View"] = PaletteView(vbox)
self.__dict__["disc" + paletteName + "View"].rgbColors = rgbColors
hbox = gui.widgetBox(vbox, orientation='horizontal')
self.__dict__["disc" + paletteName + "EditButt"] = gui.button(hbox, self, "Edit palette", self.editPalette,
tooltip="Edit the order and colors of the palette",
toggleButton=1)
self.__dict__["disc" + paletteName + "LoadButt"] = gui.button(hbox, self, "Load palette", self.loadPalette,
tooltip="Load a predefined color palette",
toggleButton=1)
self.discPaletteNames.append(paletteName)
def editPalette(self):
for paletteName in self.discPaletteNames:
if self.__dict__["disc" + paletteName + "EditButt"].isChecked():
colors = self.__dict__["disc" + paletteName + "View"].rgbColors
if type(colors) == dict:
colors = colors[max(colors.keys())]
dlg = PaletteEditor(colors, parent=self)
if dlg.exec_() and colors != dlg.getRgbColors():
self.__dict__["disc" + paletteName + "View"].setDiscPalette(dlg.getRgbColors())
self.__dict__["disc" + paletteName + "EditButt"].setChecked(0)
return
def loadPalette(self):
for paletteName in self.discPaletteNames:
if self.__dict__["disc" + paletteName + "LoadButt"].isChecked():
self.__dict__["disc" + paletteName + "LoadButt"].setChecked(0)
dlg = ColorPalleteListing()
if dlg.exec() == QDialog.Accepted:
colors = dlg.selectedColors
self.__dict__["disc" + paletteName + "View"].setDiscPalette(colors)
# #####################################################
def getCurrentSchemeIndex(self):
return self.selectedSchemaIndex
def getColor(self, buttonName):
return self.__dict__["butt" + buttonName].getColor()
def getContinuousPalette(self, paletteName):
c1 = self.__dict__["cont" + paletteName + "Left"].getColor()
c2 = self.__dict__["cont" + paletteName + "Right"].getColor()
b = self.__dict__["cont" + paletteName + "passThroughBlack"]
return ContinuousPaletteGenerator(c1, c2, b)
def getExtendedContinuousPalette(self, paletteName):
c1 = self.__dict__["exCont" + paletteName + "Left"].getColor()
c2 = self.__dict__["exCont" + paletteName + "Right"].getColor()
colors = self.__dict__["exCont" + paletteName + "passThroughColors"]
if colors:
colors = [self.__dict__["exCont" + paletteName + "color" + str(i)].getColor()
for i in range(self.__dict__["exCont" + paletteName + "colorCount"])
if self.__dict__["exCont" + paletteName + "passThroughColor" + str(i)]]
return ExtendedContinuousPaletteGenerator(c1, c2, colors or [])
def getDiscretePalette(self, paletteName):
return ColorPaletteGenerator(
rgb_colors=self.__dict__["disc" + paletteName + "View"].rgbColors)
def getColorSchemas(self):
return self.colorSchemas
def getCurrentState(self):
l1 = [(name, self.qRgbFromQColor(self.__dict__["butt" + name].getColor())) for name in self.colorButtonNames]
l2 = [(name, (self.qRgbFromQColor(self.__dict__["cont" + name + "Left"].getColor()),
self.qRgbFromQColor(self.__dict__["cont" + name + "Right"].getColor()),
self.__dict__["cont" + name + "passThroughBlack"])) for name in self.contPaletteNames]
l3 = [(name, self.__dict__["disc" + name + "View"].rgbColors) for name in self.discPaletteNames]
l4 = [(name, (self.qRgbFromQColor(self.__dict__["exCont" + name + "Left"].getColor()),
self.qRgbFromQColor(self.__dict__["exCont" + name + "Right"].getColor()),
self.__dict__["exCont" + name + "passThroughColors"],
[(self.qRgbFromQColor(self.__dict__["exCont" + name + "color" + str(i)].getColor()),
self.__dict__["exCont" + name + "passThroughColor" + str(i)])
for i in range(self.__dict__["exCont" + name + "colorCount"])]))
for name in self.exContPaletteNames]
return [l1, l2, l3, l4]
def setColorSchemas(self, schemas=None, selectedSchemaIndex=0):
self.schemaCombo.clear()
if not schemas or type(schemas) != list:
schemas = [("Default", self.getCurrentState())]
self.colorSchemas = schemas
self.schemaCombo.addItems([s[0] for s in schemas])
self.schemaCombo.addItem("Save current palette as...")
self.selectedSchemaIndex = selectedSchemaIndex
self.schemaCombo.setCurrentIndex(self.selectedSchemaIndex)
self.paletteSelected()
def setCurrentState(self, state):
if len(state) > 3:
[buttons, contPalettes, discPalettes, exContPalettes] = state
else:
[buttons, contPalettes, discPalettes] = state
exContPalettes = []
for (name, but) in buttons:
self.__dict__["butt" + name].setColor(rgbToQColor(but))
for (name, (l, r, chk)) in contPalettes:
self.__dict__["cont" + name + "Left"].setColor(rgbToQColor(l))
self.__dict__["cont" + name + "Right"].setColor(rgbToQColor(r))
self.__dict__["cont" + name + "passThroughBlack"] = chk
self.__dict__["cont" + name + "passThroughBlackCheckbox"].setChecked(chk)
self.__dict__["cont" + name + "View"].setContPalette(rgbToQColor(l), rgbToQColor(r), chk)
for (name, rgbColors) in discPalettes:
self.__dict__["disc" + name + "View"].setDiscPalette(rgbColors)
for name, (l, r, chk, colors) in exContPalettes:
self.__dict__["exCont" + name + "Left"].setColor(rgbToQColor(l))
self.__dict__["exCont" + name + "Right"].setColor(rgbToQColor(r))
self.__dict__["exCont" + name + "passThroughColors"] = chk
self.__dict__["exCont" + name + "passThroughColorsCheckbox"].setChecked(chk)
colorsList = []
for i, (color, check) in enumerate(colors):
self.__dict__["exCont" + name + "passThroughColor" + str(i)] = check
self.__dict__["exCont" + name + "passThroughColor" + str(i) + "Checkbox"].setChecked(check)
self.__dict__["exCont" + name + "color" + str(i)].setColor(rgbToQColor(color))
if check and chk:
colorsList.append(rgbToQColor(color))
self.__dict__["exCont" + name + "colorCount"] = self.__dict__.get("exCont" + name + "colorCount",
len(colors))
self.__dict__["exCont" + name + "View"].setExContPalette(rgbToQColor(l), rgbToQColor(r),
colorsList)
def paletteSelected(self):
if not self.schemaCombo.count():
return
self.selectedSchemaIndex = self.schemaCombo.currentIndex()
# if we selected "Save current palette as..." option then add another option to the list
if self.selectedSchemaIndex == self.schemaCombo.count() - 1:
message = "Please enter a name for the current color settings.\nPressing 'Cancel' will cancel your changes and close the dialog."
ok = 0
while not ok:
text, ok = QInputDialog.getText(self, "Name Your Color Settings", message)
if (ok):
newName = str(text)
oldNames = [str(self.schemaCombo.itemText(i)).lower() for i in range(self.schemaCombo.count() - 1)]
if newName.lower() == "default":
ok = False
message = "The 'Default' settings cannot be changed. Please enter a different name:"
elif newName.lower() in oldNames:
index = oldNames.index(newName.lower())
self.colorSchemas.pop(index)
if ok:
self.colorSchemas.insert(0, (newName, self.getCurrentState()))
self.schemaCombo.insertItem(0, newName)
self.schemaCombo.setCurrentIndex(0)
self.selectedSchemaIndex = 0
else:
ok = 1
state = self.getCurrentState() # if we pressed cancel we have to select a different item than the "Save current palette as..."
self.selectedSchemaIndex = 0
self.schemaCombo.setCurrentIndex(0)
self.setCurrentState(state)
else:
schema = self.colorSchemas[self.selectedSchemaIndex][1]
self.setCurrentState(schema)
def qRgbFromQColor(self, qcolor):
return qcolor.rgba()
def createPalette(self, color1, color2, passThroughBlack, colorNumber=250):
if passThroughBlack:
palette = [qRgb(color1.red() - color1.red() * i * 2. / colorNumber,
color1.green() - color1.green() * i * 2. / colorNumber,
color1.blue() - color1.blue() * i * 2. / colorNumber) for i in range(colorNumber / 2)]
palette += [qRgb(color2.red() * i * 2. / colorNumber, color2.green() * i * 2. / colorNumber,
color2.blue() * i * 2. / colorNumber) for i in range(colorNumber - (colorNumber / 2))]
else:
palette = [qRgb(color1.red() + (color2.red() - color1.red()) * i / colorNumber,
color1.green() + (color2.green() - color1.green()) * i / colorNumber,
color1.blue() + (color2.blue() - color1.blue()) * i / colorNumber) for i in
range(colorNumber)]
return palette
# this function is called if one of the color buttons was pressed or there was any other change of the color palette
def colorSchemaChange(self):
self.setCurrentState(self.getCurrentState())
self.shemaChanged.emit()
class ColorPalleteListing(QDialog):
def __init__(self, parent=None, windowTitle="Color Palette List",
**kwargs):
super().__init__(parent, windowTitle=windowTitle, **kwargs)
self.setLayout(QVBoxLayout())
self.layout().setMargin(0)
sa = QScrollArea(
horizontalScrollBarPolicy=Qt.ScrollBarAlwaysOff,
verticalScrollBarPolicy=Qt.ScrollBarAlwaysOn
)
sa.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
sa.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.layout().addWidget(sa)
space = QWidget(self)
space.setLayout(QVBoxLayout())
sa.setWidget(space)
sa.setWidgetResizable(True)
self.buttons = []
self.setMinimumWidth(400)
box = gui.widgetBox(space, "Information", addSpace=True)
gui.widgetLabel(
box,
'<p align="center">This dialog shows a list of predefined '
'color palettes <br>from colorbrewer.org that can be used '
'in Orange.<br/>You can select a palette by clicking on it.</p>'
)
box = gui.widgetBox(space, "Default Palette", addSpace=True)
butt = _ColorButton(
DefaultRGBColors, flat=True, toolTip="Default color palette",
clicked=self._buttonClicked
)
box.layout().addWidget(butt)
self.buttons.append(butt)
for type in ["Qualitative", "Spectral", "Diverging", "Sequential", "Pastels"]:
colorGroup = colorbrewer.colorSchemes.get(type.lower(), {})
if colorGroup:
box = gui.widgetBox(space, type + " Palettes", addSpace=True)
items = sorted(colorGroup.items())
for key, colors in items:
butt = _ColorButton(colors, self, toolTip=key, flat=True,
clicked=self._buttonClicked)
box.layout().addWidget(butt)
self.buttons.append(butt)
buttons = QDialogButtonBox(
QDialogButtonBox.Cancel, rejected=self.reject
)
self.layout().addWidget(buttons)
self.selectedColors = None
def sizeHint(self):
return QSize(300, 400)
def _buttonClicked(self):
button = self.sender()
self.selectedColors = button.colors
self.accept()
class _ColorButton(QPushButton):
def __init__(self, colors, parent=None, **kwargs):
self.colors = colors
super().__init__(parent, **kwargs)
self.setIcon(self._paletteicon(colors, self.sizeHint()))
def sizeHint(self):
return QSize(320, 40)
def resizeEvent(self, event):
super().resizeEvent(event)
size = self.size()
self.setIconSize(size - QSize(20, 14))
self.setIcon(self._paletteicon(self.colors, self.iconSize()))
def _paletteicon(self, colors, size):
return QIcon(
createDiscPalettePixmap(size.width(), size.height(), colors))
class PaletteEditor(QDialog):
def __init__(self, rgbColors, parent=None, windowTitle="Palette Editor",
**kwargs):
super().__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
self.layout().setMargin(4)
hbox = gui.widgetBox(self, "Information", orientation='horizontal')
gui.widgetLabel(
hbox,
'<p align="center">You can reorder colors in the list using the'
'<br/>buttons on the right or by dragging and dropping the items.'
'<br/>To change a specific color double click the item in the '
'list.</p>')
hbox = gui.widgetBox(self, box=True, orientation="horizontal")
self.discListbox = gui.listBox(hbox, self, enableDragDrop=1)
vbox = gui.widgetBox(hbox, orientation='vertical')
buttonUPAttr = gui.button(vbox, self, "", callback=self.moveAttrUP, tooltip="Move selected colors up")
buttonDOWNAttr = gui.button(vbox, self, "", callback=self.moveAttrDOWN, tooltip="Move selected colors down")
buttonUPAttr.setIcon(QIcon(os.path.join(environ.widget_install_dir, "icons/Dlg_up3.png")))
buttonUPAttr.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding))
buttonUPAttr.setMaximumWidth(30)
buttonDOWNAttr.setIcon(QIcon(os.path.join(environ.widget_install_dir, "icons/Dlg_down3.png")))
buttonDOWNAttr.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding))
buttonDOWNAttr.setMaximumWidth(30)
self.discListbox.itemDoubleClicked.connect(self.changeDiscreteColor)
box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
accepted=self.accept, rejected=self.reject)
self.layout().addWidget(box)
self.discListbox.setIconSize(QSize(25, 25))
for ind, (r, g, b) in enumerate(rgbColors):
item = QListWidgetItem(ColorPixmap(QColor(r, g, b), 25), "Color %d" % (ind + 1))
item.rgbColor = (r, g, b)
self.discListbox.addItem(item)
self.resize(300, 300)
def changeDiscreteColor(self, item):
r, g, b = item.rgbColor
color = QColorDialog.getColor(QColor(r, g, b), self)
if color.isValid():
item.setIcon(ColorPixmap(color, 25))
item.rgbColor = (color.red(), color.green(), color.blue())
# move selected attribute in "Attribute Order" list one place up
def moveAttrUP(self):
if len(self.discListbox.selectedIndexes()) == 0: return
ind = self.discListbox.selectedIndexes()[0].row()
if ind == 0: return
iconI = self.discListbox.item(ind - 1).icon()
iconII = self.discListbox.item(ind).icon()
self.discListbox.item(ind - 1).setIcon(iconII)
self.discListbox.item(ind).setIcon(iconI)
self.discListbox.item(ind - 1).rgbColor, self.discListbox.item(ind).rgbColor = self.discListbox.item(
ind).rgbColor, self.discListbox.item(ind - 1).rgbColor
self.discListbox.setCurrentRow(ind - 1)
# move selected attribute in "Attribute Order" list one place down
def moveAttrDOWN(self):
if len(self.discListbox.selectedIndexes()) == 0: return
ind = self.discListbox.selectedIndexes()[0].row()
if ind == self.discListbox.count() - 1: return
iconI = self.discListbox.item(ind + 1).icon()
iconII = self.discListbox.item(ind).icon()
self.discListbox.item(ind + 1).setIcon(iconII)
self.discListbox.item(ind).setIcon(iconI)
self.discListbox.item(ind).rgbColor, self.discListbox.item(ind + 1).rgbColor = self.discListbox.item(
ind + 1).rgbColor, self.discListbox.item(ind).rgbColor
self.discListbox.setCurrentRow(ind + 1)
def getRgbColors(self):
return [self.discListbox.item(i).rgbColor for i in range(self.discListbox.count())]
class ContinuousPaletteGenerator:
def __init__(self, color1, color2, passThroughBlack):
self.c1Red, self.c1Green, self.c1Blue = color1.red(), color1.green(), color1.blue()
self.c2Red, self.c2Green, self.c2Blue = color2.red(), color2.green(), color2.blue()
self.passThroughBlack = passThroughBlack
def getRGB(self, val):
if self.passThroughBlack:
red = np.array([self.c1Red, 0, self.c2Red])
green = np.array([self.c1Green, 0, self.c2Green])
blue = np.array([self.c1Blue, 0, self.c2Blue])
cs = [0, 0.5, 1]
else:
red = np.array([self.c1Red, self.c2Red])
green = np.array([self.c1Green, self.c2Green])
blue = np.array([self.c1Blue, self.c2Blue])
cs = [0, 1]
r = np.interp(val, cs, red)
g = np.interp(val, cs, green)
b = np.interp(val, cs, blue)
rgb = np.c_[r, g, b]
rgb[np.isnan(rgb).any(axis=1), :] = (157, 185, 250)
if np.isscalar(val):
return tuple(rgb[0].tolist())
else:
return rgb
# val must be between 0 and 1
def __getitem__(self, val):
return QColor(*self.getRGB(val))
class ExtendedContinuousPaletteGenerator:
def __init__(self, color1, color2, passThroughColors):
self.colors = [color1] + passThroughColors + [color2]
self.gammaFunc = lambda x, gamma: ((math.exp(gamma * math.log(2 * x - 1)) if x > 0.5 else -math.exp(
gamma * math.log(-2 * x + 1)) if x != 0.5 else 0.0) + 1) / 2.0
def getRGB(self, val, gamma=1.0):
index = int(val * (len(self.colors) - 1))
if index == len(self.colors) - 1:
return (self.colors[-1].red(), self.colors[-1].green(), self.colors[-1].blue())
else:
red1, green1, blue1 = self.colors[index].red(), self.colors[index].green(), self.colors[index].blue()
red2, green2, blue2 = self.colors[index + 1].red(), self.colors[index + 1].green(), self.colors[
index + 1].blue()
x = val * (len(self.colors) - 1) - index
if gamma != 1.0:
x = self.gammaFunc(x, gamma)
return [(c2 - c1) * x + c1 for c1, c2 in [(red1, red2), (green1, green2), (blue1, blue2)]]
## if self.passThroughBlack:
## if val < 0.5:
## return (self.c1Red - self.c1Red*val*2, self.c1Green - self.c1Green*val*2, self.c1Blue - self.c1Blue*val*2)
## else:
## return (self.c2Red*(val-0.5)*2., self.c2Green*(val-0.5)*2., self.c2Blue*(val-0.5)*2.)
## else:
## return (self.c1Red + (self.c2Red-self.c1Red)*val, self.c1Green + (self.c2Green-self.c1Green)*val, self.c1Blue + (self.c2Blue-self.c1Blue)*val)
# val must be between 0 and 1
def __getitem__(self, val):
return QColor(*self.getRGB(val))
class ColorPaletteGenerator:
maxHueVal = 260
def __init__(self, number_of_colors=None, rgb_colors=DefaultRGBColors):
self.number_of_colors = 0
self.rgb_colors = self.default_colors = rgb_colors
self.rgb_array = None # np.ndarray
self.rgba_array = None # np.ndarray
if isinstance(rgb_colors, dict):
self.rgb_colors_dict = rgb_colors
self.set_number_of_colors(max(rgb_colors.keys()))
else:
self.rgb_colors_dict = None
self.set_number_of_colors(number_of_colors)
def set_number_of_colors(self, number_of_colors=None):
"""Change the palette if there are palettes for different number of
colors. Else, just copy rgbColors to numpy array"""
self.number_of_colors = number_of_colors
if self.rgb_colors_dict is not None:
number_of_colors = max(3, number_of_colors)
if number_of_colors not in self.rgb_colors_dict:
keys = [n for n in self.rgb_colors_dict
if n >= number_of_colors]
if keys:
number_of_colors = min(keys)
else:
raise ValueError("Not enough colors")
self.rgb_colors = \
self.rgb_colors_dict[number_of_colors]
elif number_of_colors is None or \
number_of_colors < len(self.rgb_colors):
self.rgb_colors = self.default_colors
else:
self.rgb_colors = []
for i in range(self.number_of_colors):
col = QColor()
col.setHsv(360 / number_of_colors * i, 255, 255)
self.rgb_colors.append(col.getRgb())
self.rgb_array = np.array(self.rgb_colors)
self.rgba_array = np.hstack([
self.rgb_array, np.full((len(self.rgb_colors), 1), 255)])
def __getitem__(self, index):
return QColor(*self.getRGB(index))
# noinspection PyPep8Naming
def getRGB(self, index):
if isinstance(index, np.ndarray):
return self.rgb_array[index.astype(int)]
else:
return self.rgb_colors[int(index)]
# noinspection PyPep8Naming
def getRGBA(self, index):
if isinstance(index, np.ndarray):
return self.rgba_array[index.astype(int)]
else:
return self.rgba_colors[int(index)]
getColor = getRGB
# only for backward compatibility
class ColorPaletteHSV(ColorPaletteGenerator):
pass
# black and white color palette
class ColorPaletteBW:
def __init__(self, numberOfColors=-1, brightest=50, darkest=255):
self.numberOfColors = numberOfColors
self.brightest = brightest
self.darkest = darkest
self.hueValues = []
if numberOfColors == -1:
return # used for coloring continuous variables
else:
self.values = [int(brightest + (darkest - brightest) * x / float(numberOfColors - 1)) for x in
range(numberOfColors)]
def __getitem__(self, index):
if self.numberOfColors == -1: # is this color for continuous attribute?
val = int(self.brightest + (self.darkest - self.brightest) * index)
return QColor(val, val, val)
else:
index = int(index) # get color for discrete attribute
return QColor(self.values[index], self.values[index],
self.values[index]) # index must be between 0 and self.numberofColors
# get QColor instance for given index
def getColor(self, index):
return self[index]
class ColorSchema:
def __init__(self, name, palette, additionalColors, passThroughBlack):
self.name = name
self.palette = palette
self.additionalColors = additionalColors
self.passThroughBlack = passThroughBlack
def getName(self):
return self.name
def getPalette(self):
return self.palette
def getAdditionalColors(self):
return self.additionalColors
def getPassThroughBlack(self):
return self.passThroughBlack
class PaletteView(QGraphicsView):
def __init__(self, parent=None):
self.canvas = QGraphicsScene(0, 0, 1000, ColorButtonSize)
QGraphicsView.__init__(self, self.canvas, parent)
self.ensureVisible(0, 0, 1, 1)
self.color1 = None
self.color2 = None
self.rgbColors = []
self.passThroughColors = None
#self.setFrameStyle(QFrame.NoFrame)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setFixedHeight(ColorButtonSize)
self.setMinimumWidth(ColorButtonSize)
if parent and parent.layout() is not None:
parent.layout().addWidget(self)
def resizeEvent(self, ev):
self.updateImage()
def setDiscPalette(self, rgbColors):
self.rgbColors = rgbColors
self.updateImage()
def setContPalette(self, color1, color2, passThroughBlack):
self.color1 = color1
self.color2 = color2
self.passThroughBlack = passThroughBlack
self.updateImage()
def setExContPalette(self, color1, color2, passThroughColors):
self.color1 = color1
self.color2 = color2
self.passThroughColors = passThroughColors
self.updateImage()
def updateImage(self):
for item in self.scene().items():
item.hide()
if self.color1 is None:
img = createDiscPalettePixmap(self.width(), self.height(), self.rgbColors)
elif self.passThroughColors is None:
img = createContPalettePixmap(self.width(), self.height(), self.color1, self.color2, self.passThroughBlack)
else:
img = createExContPalettePixmap(self.width(), self.height(), self.color1, self.color2,
self.passThroughColors)
self.scene().addPixmap(img)
self.scene().update()
# create a pixmap with color going from color1 to color2
def createContPalettePixmap(width, height, color1, color2, passThroughBlack):
p = QPainter()
img = QPixmap(width, height)
p.begin(img)
#p.eraseRect(0, 0, w, h)
p.setPen(QPen(Qt.NoPen))
g = QLinearGradient(0, 0, width, height)
g.setColorAt(0, color1)
g.setColorAt(1, color2)
if passThroughBlack:
g.setColorAt(0.5, Qt.black)
p.fillRect(img.rect(), QBrush(g))
return img
# create a pixmap with a discrete palette
def createDiscPalettePixmap(width, height, palette):
p = QPainter()
img = QPixmap(width, height)
p.begin(img)
p.setPen(QPen(Qt.NoPen))
if type(palette) == dict: # if palette is the dict with different
palette = palette[max(palette.keys())]
if len(palette) == 0: return img
rectWidth = width / float(len(palette))
for i, col in enumerate(palette):
p.setBrush(QBrush(QColor(*col)))
p.drawRect(QtCore.QRectF(i * rectWidth, 0, (i + 1) * rectWidth, height))
return img
# create a pixmap withcolor going from color1 to color2 passing through all intermidiate colors in passThroughColors
def createExContPalettePixmap(width, height, color1, color2, passThroughColors):
p = QPainter()
img = QPixmap(width, height)
p.begin(img)
#p.eraseRect(0, 0, w, h)
p.setPen(QPen(Qt.NoPen))
g = QLinearGradient(0, 0, width, height)
g.setColorAt(0, color1)
g.setColorAt(1, color2)
for i, color in enumerate(passThroughColors):
g.setColorAt(float(i + 1) / (len(passThroughColors) + 1), color)
p.fillRect(img.rect(), QBrush(g))
return img
class ColorButton(QWidget):
def __init__(self, master=None, parent=None, label=None, color=None):
QWidget.__init__(self, master)
self.parent = parent
self.master = master
if self.parent and self.parent.layout() is not None:
self.parent.layout().addWidget(self)
self.setLayout(QHBoxLayout())
self.layout().setMargin(0)
self.icon = QFrame(self)
self.icon.setFixedSize(ColorButtonSize, ColorButtonSize)
self.icon.setAutoFillBackground(1)
self.icon.setFrameStyle(QFrame.StyledPanel + QFrame.Sunken)
self.layout().addWidget(self.icon)
if label != None:
self.label = gui.widgetLabel(self, label)
self.layout().addWidget(self.label)
if color != None:
self.setColor(color)
def setColor(self, color):
self.color = color
palette = QPalette()
palette.setBrush(QPalette.Background, color)
self.icon.setPalette(palette)
def getColor(self):
return self.color
def mousePressEvent(self, ev):
color = QColorDialog.getColor(self.color)
if color.isValid():
self.setColor(color)
if self.master and hasattr(self.master, "colorSchemaChange"):
self.master.colorSchemaChange()
def rgbToQColor(rgb):
return QColor(rgb & 0xFFFFFFFF)
class PaletteItemDelegate(QItemDelegate):
def __init__(self, selector, *args):
QItemDelegate.__init__(self, *args)
self.selector = selector
def paint(self, painter, option, index):
img = self.selector.paletteImg[index.row()]
painter.drawPixmap(option.rect.x(), option.rect.y(), img)
def sizeHint(self, option, index):
img = self.selector.paletteImg[index.row()]
return img.size()
class PaletteSelectorComboBox(QComboBox):
def __init__(self, *args):
QComboBox.__init__(self, *args)
self.paletteImg = []
self.cachedPalettes = []
## self.setItemDelegate(PaletteItemDelegate(self, self))
size = self.sizeHint()
size = QSize(size.width() * 2 / 3, size.height() * 2 / 3)
self.setIconSize(size)
def setPalettes(self, name, paletteDlg):
self.clear()
self.cachedPalettes = []
shemas = paletteDlg.getColorSchemas()
if name in paletteDlg.discPaletteNames:
pass
if name in paletteDlg.contPaletteNames:
pass
if name in paletteDlg.exContPaletteNames:
palettes = []
paletteIndex = paletteDlg.exContPaletteNames.index(name)
for schemaName, state in shemas:
butt, disc, cont, exCont = state
name, (c1, c2, chk, colors) = exCont[paletteIndex]
palettes.append((schemaName, (
(rgbToQColor(c1), rgbToQColor(c2), [rgbToQColor(color) for color, check in colors if check and chk]))))
self.setContinuousPalettes(palettes)
def setDiscretePalettes(self, palettes):
self.clear()
paletteImg = []
self.cachedPalettes = []
for name, colors in palettes:
self.addItem(name)
self.paletteImg.append(createDiscPalettePixmap(200, 20, colors))
self.cachedPalettes.append(ColorPaletteGenerator(rgb_colors=colors))
def setContinuousPalettes(self, palettes):
self.clear()
paletteImg = []
self.cachedPalettes = []
for name, (c1, c2, colors) in palettes:
icon = QIcon(createExContPalettePixmap(self.iconSize().width(), self.iconSize().height(), c1, c2, colors))
self.addItem(icon, name)
if __name__ == "__main__":
a = QApplication(sys.argv)
c = ColorPaletteDlg(None)
c.createContinuousPalette("continuousPalette", "Continuous Palette")
c.createDiscretePalette("discPalette", "Discrete Palette")
box = c.createBox("otherColors", "Colors")
c.createColorButton(box, "Canvas", "Canvas")
c.createColorButton(box, "Grid", "Grid")
c.setColorSchemas()
c.show()
a.exec()
|
{
"content_hash": "1c289858d6c2c622f75f330bd0ea9c65",
"timestamp": "",
"source": "github",
"line_count": 928,
"max_line_length": 164,
"avg_line_length": 44.02155172413793,
"alnum_prop": 0.5704494272006266,
"repo_name": "qusp/orange3",
"id": "9f5c2d14fb9d72693bae0ed2ea4565a2113aba20",
"size": "40852",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Orange/widgets/utils/colorpalette.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "JavaScript",
"bytes": "3025"
},
{
"name": "NSIS",
"bytes": "19239"
},
{
"name": "Python",
"bytes": "3378832"
},
{
"name": "Shell",
"bytes": "37336"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0013_auto_20170907_1846'),
]
operations = [
migrations.AlterModelOptions(
name='goods',
options={'ordering': ['category', 'display_order'], 'verbose_name': '상품', 'verbose_name_plural': '상품'},
),
]
|
{
"content_hash": "2a0113cf26b3297a13ad18fede0fd582",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 115,
"avg_line_length": 23.88235294117647,
"alnum_prop": 0.5960591133004927,
"repo_name": "Beomi/irkshop",
"id": "2c9e82af77422fa9f39d0fc1b02f6ebb2dbfb3e0",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goods/migrations/0014_auto_20170907_1928.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "177470"
},
{
"name": "HTML",
"bytes": "211551"
},
{
"name": "JavaScript",
"bytes": "212543"
},
{
"name": "Python",
"bytes": "71285"
}
],
"symlink_target": ""
}
|
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 18823
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
{
"content_hash": "2e3f79c2a01881ef0e2fc6c3039fb5c1",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 84,
"avg_line_length": 25.416326530612245,
"alnum_prop": 0.6452545366950377,
"repo_name": "wargo32/NewYorkCoin",
"id": "a79046cc00b2d2462efb2397f467c568189640c9",
"size": "6435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/pyminer/pyminer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32009"
},
{
"name": "C++",
"bytes": "2619802"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Makefile",
"bytes": "8671"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69722"
},
{
"name": "Shell",
"bytes": "13173"
}
],
"symlink_target": ""
}
|
__author__ = 'adeb'
import multiprocessing
def spawn(f):
def fun(q_in, q_out):
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
return fun
def parmap(f, x, nprocs=multiprocessing.cpu_count()):
"""
Parallel map that can be used with method functions or lambda functions contrary to the built multiprocessing map
or imap functions.
"""
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=spawn(f), args=(q_in,q_out)) for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(x)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i, x in sorted(res)]
|
{
"content_hash": "14a8d129cc1418409cabbc79021ae402",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 117,
"avg_line_length": 25.771428571428572,
"alnum_prop": 0.5742793791574279,
"repo_name": "adbrebs/spynet",
"id": "c4025941b876a799d0375d79cca94d4f8d7b3f15",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/multiprocess.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "114487"
}
],
"symlink_target": ""
}
|
import re
from random import randint
markov_data = []
def process_text(text):
markov_data.append(filter(lambda x: x != '', re.split(r'[\.,;\?!\s\"]+', text)))
def markov_chain(length=32, order=2):
chunks = []
for d in markov_data:
for i in range(0, len(d) - order + 1):
chunks.append(d[i:i+order])
chain = chunks[randint(0, len(chunks) - 1)]
while len(chain) < length:
tail = chain[-(order - 1):]
def eligible(chunk):
ihead = map(lambda x: x.upper(), chunk[0:order - 1])
itail = map(lambda x: x.upper(), tail)
return ihead == itail
eligible_chunks = filter(eligible, chunks)
if len(eligible_chunks) == 0:
break
chain.append(eligible_chunks[randint(0, len(eligible_chunks) - 1)][-1])
sentence = ' '.join(chain)
return sentence[0].upper() + sentence[1:]
|
{
"content_hash": "9fe9af10f314c77ac2b0c67095e4ab04",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.5587583148558758,
"repo_name": "erichensley/crapbot",
"id": "c6a3b5ae85bb980159933217537866e16b605011",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markov.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30023"
}
],
"symlink_target": ""
}
|
import time
import os
import sys
from multiprocessing import Process
from termcolor import cprint
from probes.osx import MemInfo
from probes.osx import LoadAvg
class Foreman(object):
def __init__(self, diary, args):
self.args = args
self.diary = diary
self.probes = {}
self.worker_ps = None
def load_probes(self, config):
for probe in config.options('probes'):
os_name, stat_name = probe.split('.')
# This isn't perfect: we're importing the OS-representing
# module too many times. Will do for now.
mod = __import__('probes.' + os_name, fromlist=[stat_name])
klass = getattr(mod, stat_name)
# We check if this probe has a config section.
# If it does, we turn it into a dict and pass to the
# probe's constructor so it can deal with it.
if probe in config.sections():
probe_opts = dict(config.items(probe))
else:
probe_opts = {}
try:
self.probes[stat_name] = klass(probe_opts)
print "[*] %s loaded and ready." % (probe,)
except Exception as e:
cprint(
"[x] Looks like you lack some prerequisites to run %s:" %
(probe,), "yellow")
print str(e)
def start(self):
print "[&] Foreman.start() in %s." % (os.getpid(),)
self.worker_ps = Process(target=self.run)
self.worker_ps.start()
def stop(self):
print "[!] Telling the foreman to take a break.."
self.worker_ps.terminate()
self.worker_ps.join()
def run(self):
print "[&] Foreman run(): %s." % (os.getpid(),)
while True:
self.probe_system()
time.sleep(self.args['interval'])
def probe_system(self):
probe_time = int(time.time())
# Note: remember about .iter() vs .iteritems()
for probe_name, probe_obj in self.probes.iteritems():
# Signature follows .write("MemInfo", MemInfo.report())
self.diary.write(probe_name, probe_obj.report())
|
{
"content_hash": "d2524156d076410c2240f2aa722d9228",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 77,
"avg_line_length": 33.41538461538462,
"alnum_prop": 0.5501841620626151,
"repo_name": "diego351/monster",
"id": "d635628d13ac4194a2ed03cd74088c860076cc69",
"size": "2172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/Foreman.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1601"
},
{
"name": "JavaScript",
"bytes": "18613"
},
{
"name": "Python",
"bytes": "43400"
}
],
"symlink_target": ""
}
|
import argparse, time, sys, os
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/..'))
from rgbmatrix import RGBMatrix
class SampleBase(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(SampleBase, self).__init__(*args, **kwargs)
self.add_argument("-r", "--rows", action = "store", help = "Display rows. 16 for 16x32, 32 for 32x32. Default: 32", default = 32, type = int)
self.add_argument("-P", "--parallel", action = "store", help = "For Plus-models or RPi2: parallel chains. 1..3. Default: 1", default = 1, type = int)
self.add_argument("-c", "--chain", action = "store", help = "Daisy-chained boards. Default: 1.", default = 1, type = int)
self.add_argument("-p", "--pwmbits", action = "store", help = "Bits used for PWM. Something between 1..11. Default: 11", default = 11, type = int)
self.add_argument("-l", "--luminance", action = "store_true", help = "Don't do luminance correction (CIE1931)")
self.add_argument("-b", "--brightness", action = "store", help = "Sets brightness level. Default: 100. Range: 1..100", default = 100, type = int)
self.args = {}
def usleep(self, value):
time.sleep(value / 1000000.0)
def Run(self):
print("Running")
def process(self):
self.args = vars(self.parse_args())
if self.args["rows"] != 16 and self.args["rows"] != 32:
print("Rows can either be 16 or 32")
return False
if self.args["chain"] < 1:
print("Chain outside usable range")
return False
if self.args["chain"] > 8:
print("That is a long chain. Expect some flicker.")
if self.args["parallel"] < 1 or self.args["parallel"] > 3:
print("Parallel outside usable range.")
return False
self.matrix = RGBMatrix(self.args["rows"], self.args["chain"], self.args["parallel"])
self.matrix.pwmBits = self.args["pwmbits"]
self.matrix.brightness = self.args["brightness"]
if self.args["luminance"]:
self.matrix.luminanceCorrect = False
try:
# Start loop
print("Press CTRL-C to stop sample")
self.Run()
except KeyboardInterrupt:
print "Exiting\n"
sys.exit(0)
return True
|
{
"content_hash": "618997ac081a97f62a03297d2659d5db",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 157,
"avg_line_length": 40.5,
"alnum_prop": 0.5815240527884206,
"repo_name": "billthefighter/sunrise",
"id": "65a74898b0090845b5414de01ea406885b9c0dc3",
"size": "2349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samplebase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16052"
}
],
"symlink_target": ""
}
|
import sys
import os
#Put lib on path, once Google App Engine does not allow doing it directly
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import settings
from tekton.gae import middleware
import webapp2
class BaseHandler(webapp2.RequestHandler):
def get(self):
self.make_convetion()
def post(self):
self.make_convetion()
def make_convetion(self):
middleware.execute_2(settings.MIDDLEWARES, self)
app = webapp2.WSGIApplication([("/.*", BaseHandler)], debug=False)
|
{
"content_hash": "5cae5cb9968494b752e6f463d7f380b2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 22.041666666666668,
"alnum_prop": 0.7051039697542533,
"repo_name": "renzon/angular",
"id": "2c59b0a903a0569f44d49a6ea8260786babdceea",
"size": "529",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "backend/src/convention.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''Tests for base compliance checker class'''
from __future__ import unicode_literals
from unittest import TestCase
from netCDF4 import Dataset
from compliance_checker import base
import os
class TestBase(TestCase):
'''
Tests functionality of the base compliance checker class
'''
def setUp(self):
self.acdd = base.BaseCheck()
self.ds = Dataset(filename=os.devnull, mode='w', diskless=True)
def tearDown(self):
self.ds.close()
def test_attr_presence(self):
# attribute not present, should fail
priority = base.BaseCheck.MEDIUM
rv1, rv2, rv3, rv4 = [], [], [], []
attr = 'test'
base.attr_check(attr, self.ds, priority, rv1)
assert rv1[0] == base.Result(priority, False, 'test',
['Attr test not present'])
# test with empty string
self.ds.test = ''
base.attr_check(attr, self.ds, priority, rv2)
assert rv2[0] == base.Result(priority, False, 'test',
["Attr test is empty or completely whitespace"])
# test with whitespace in the form of a space and a tab
self.ds.test = ' '
base.attr_check(attr, self.ds, priority, rv3)
assert rv3[0] == base.Result(priority, False, 'test',
["Attr test is empty or completely whitespace"])
# test with actual string contents
self.ds.test = 'abc 123'
base.attr_check(attr, self.ds, priority, rv4)
assert rv4[0] == base.Result(priority, True, 'test', [])
def test_attr_in_valid_choices(self):
"""Tests attribute membership in a set"""
rv1, rv2, rv3 = [], [], []
priority = base.BaseCheck.MEDIUM
valid_choices = ['a', 'b', 'c']
attr = ('test', valid_choices)
base.attr_check(attr, self.ds, priority, rv1)
assert rv1[0] == base.Result(priority, (0, 2), 'test', ["Attr test not present"])
self.ds.test = ''
base.attr_check(attr, self.ds, priority, rv2)
assert rv2[0] == base.Result(priority, (1, 2), 'test', ["Attr test present, but not in expected value list (%s)" % valid_choices])
self.ds.test = 'a'
base.attr_check(attr, self.ds, priority, rv3)
assert rv3[0] == base.Result(priority, (2, 2), 'test', [])
def test_attr_fn(self):
"""Test attribute against a checker function"""
# simple test. In an actual program, this use case would be covered
rv1, rv2, rv3 = [], [], []
priority = base.BaseCheck.MEDIUM
def verify_dummy(ds):
if ds.dummy + 'y' == 'dummyy':
return base.ratable_result(True, 'dummy', [])
else:
return base.ratable_result(False, 'dummy', ['not "dummyy"'])
attr = ('dummy', verify_dummy)
base.attr_check(attr, self.ds, priority, rv1)
assert rv1[0] == base.Result(priority, False, 'dummy',
['Attr dummy not present'])
self.ds.dummy = 'doomy'
base.attr_check(attr, self.ds, priority, rv2)
assert rv2[0] == base.Result(priority, False, 'dummy', ['not "dummyy"'])
self.ds.dummy = 'dummy'
base.attr_check(attr, self.ds, priority, rv3)
assert rv3[0] == base.Result(priority, True, 'dummy', [])
|
{
"content_hash": "afda282f6da9aa882e9348e6589c52b5",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 138,
"avg_line_length": 41.8125,
"alnum_prop": 0.5683109118086697,
"repo_name": "petejan/compliance-checker",
"id": "11c617985369d6c338df10e5136a37f5ebc7a9d1",
"size": "3391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compliance_checker/tests/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "522241"
}
],
"symlink_target": ""
}
|
from queryset import QuerySet, QuerySetManager
from queryset import DoesNotExist, MultipleObjectsReturned
import sys
import pymongo
_document_registry = {}
def get_document(name):
return _document_registry[name]
class ValidationError(Exception):
pass
class BaseField(object):
"""A base class for fields in a MongoDB document. Instances of this class
may be added to subclasses of `Document` to define a document's schema.
"""
# Fields may have _types inserted into indexes by default
_index_with_types = True
def __init__(self, db_field=None, name=None, required=False, default=None,
unique=False, unique_with=None, primary_key=False, validation=None,
choices=None):
self.db_field = (db_field or name) if not primary_key else '_id'
if name:
import warnings
msg = "Fields' 'name' attribute deprecated in favour of 'db_field'"
warnings.warn(msg, DeprecationWarning)
self.name = None
self.required = required or primary_key
self.default = default
self.unique = bool(unique or unique_with)
self.unique_with = unique_with
self.primary_key = primary_key
self.validation = validation
self.choices = choices
def __get__(self, instance, owner):
"""Descriptor for retrieving a value from a field in a document. Do
any necessary conversion between Python and MongoDB types.
"""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available, if not use default
value = instance._data.get(self.name)
if value is None:
value = self.default
# Allow callable default values
if callable(value):
value = value()
return value
def __set__(self, instance, value):
"""Descriptor for assigning a value to a field in a document.
"""
instance._data[self.name] = value
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type.
"""
return value
def to_mongo(self, value):
"""Convert a Python type to a MongoDB-compatible type.
"""
return self.to_python(value)
def prepare_query_value(self, op, value):
"""Prepare a value that is being used in a query for PyMongo.
"""
return value
def validate(self, value):
"""Perform validation on a value.
"""
pass
def _validate(self, value):
# check choices
if self.choices is not None:
if value not in self.choices:
raise ValidationError("Value must be one of %s."%unicode(self.choices))
# check validation argument
if self.validation is not None:
if callable(self.validation):
if not self.validation(value):
raise ValidationError('Value does not match custom validation method.')
else:
raise ValueError('validation argument must be a callable.')
self.validate(value)
class ObjectIdField(BaseField):
"""An field wrapper around MongoDB's ObjectIds.
"""
def to_python(self, value):
return value
# return unicode(value)
def to_mongo(self, value):
if not isinstance(value, pymongo.objectid.ObjectId):
try:
return pymongo.objectid.ObjectId(unicode(value))
except Exception, e:
#e.message attribute has been deprecated since Python 2.6
raise ValidationError(unicode(e))
return value
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def validate(self, value):
try:
pymongo.objectid.ObjectId(unicode(value))
except:
raise ValidationError('Invalid Object ID')
class DocumentMetaclass(type):
"""Metaclass for all documents.
"""
def __new__(cls, name, bases, attrs):
metaclass = attrs.get('__metaclass__')
super_new = super(DocumentMetaclass, cls).__new__
if metaclass and issubclass(metaclass, DocumentMetaclass):
return super_new(cls, name, bases, attrs)
doc_fields = {}
class_name = [name]
superclasses = {}
simple_class = True
for base in bases:
# Include all fields present in superclasses
if hasattr(base, '_fields'):
doc_fields.update(base._fields)
class_name.append(base._class_name)
# Get superclasses from superclass
superclasses[base._class_name] = base
superclasses.update(base._superclasses)
if hasattr(base, '_meta'):
# Ensure that the Document class may be subclassed -
# inheritance may be disabled to remove dependency on
# additional fields _cls and _types
if base._meta.get('allow_inheritance', True) == False:
raise ValueError('Document %s may not be subclassed' %
base.__name__)
else:
simple_class = False
meta = attrs.get('_meta', attrs.get('meta', {}))
if 'allow_inheritance' not in meta:
meta['allow_inheritance'] = True
# Only simple classes - direct subclasses of Document - may set
# allow_inheritance to False
if not simple_class and not meta['allow_inheritance']:
raise ValueError('Only direct subclasses of Document may set '
'"allow_inheritance" to False')
attrs['_meta'] = meta
attrs['_class_name'] = '.'.join(reversed(class_name))
attrs['_superclasses'] = superclasses
# Add the document's fields to the _fields attribute
for attr_name, attr_value in attrs.items():
if hasattr(attr_value, "__class__") and \
issubclass(attr_value.__class__, BaseField):
attr_value.name = attr_name
if not attr_value.db_field:
attr_value.db_field = attr_name
doc_fields[attr_name] = attr_value
attrs['_fields'] = doc_fields
new_class = super_new(cls, name, bases, attrs)
for field in new_class._fields.values():
field.owner_document = new_class
module = attrs.get('__module__')
base_excs = tuple(base.DoesNotExist for base in bases
if hasattr(base, 'DoesNotExist')) or (DoesNotExist,)
exc = subclass_exception('DoesNotExist', base_excs, module)
new_class.add_to_class('DoesNotExist', exc)
base_excs = tuple(base.MultipleObjectsReturned for base in bases
if hasattr(base, 'MultipleObjectsReturned'))
base_excs = base_excs or (MultipleObjectsReturned,)
exc = subclass_exception('MultipleObjectsReturned', base_excs, module)
new_class.add_to_class('MultipleObjectsReturned', exc)
return new_class
def add_to_class(self, name, value):
setattr(self, name, value)
class TopLevelDocumentMetaclass(DocumentMetaclass):
"""Metaclass for top-level documents (i.e. documents that have their own
collection in the database.
"""
def __new__(cls, name, bases, attrs):
global _document_registry
super_new = super(TopLevelDocumentMetaclass, cls).__new__
# Classes defined in this package are abstract and should not have
# their own metadata with DB collection, etc.
# __metaclass__ is only set on the class with the __metaclass__
# attribute (i.e. it is not set on subclasses). This differentiates
# 'real' documents from the 'Document' class
if attrs.get('__metaclass__') == TopLevelDocumentMetaclass:
return super_new(cls, name, bases, attrs)
collection = name.lower()
id_field = None
base_indexes = []
# Subclassed documents inherit collection from superclass
for base in bases:
if hasattr(base, '_meta') and 'collection' in base._meta:
collection = base._meta['collection']
id_field = id_field or base._meta.get('id_field')
base_indexes += base._meta.get('indexes', [])
meta = {
'collection': collection,
'max_documents': None,
'max_size': None,
'ordering': [], # default ordering applied at runtime
'indexes': [], # indexes to be ensured at runtime
'id_field': id_field,
}
# Apply document-defined meta options
meta.update(attrs.get('meta', {}))
attrs['_meta'] = meta
# Set up collection manager, needs the class to have fields so use
# DocumentMetaclass before instantiating CollectionManager object
new_class = super_new(cls, name, bases, attrs)
new_class.objects = QuerySetManager()
user_indexes = [QuerySet._build_index_spec(new_class, spec)
for spec in meta['indexes']] + base_indexes
new_class._meta['indexes'] = user_indexes
unique_indexes = []
for field_name, field in new_class._fields.items():
# Generate a list of indexes needed by uniqueness constraints
if field.unique:
field.required = True
unique_fields = [field_name]
# Add any unique_with fields to the back of the index spec
if field.unique_with:
if isinstance(field.unique_with, basestring):
field.unique_with = [field.unique_with]
# Convert unique_with field names to real field names
unique_with = []
for other_name in field.unique_with:
parts = other_name.split('.')
# Lookup real name
parts = QuerySet._lookup_field(new_class, parts)
name_parts = [part.db_field for part in parts]
unique_with.append('.'.join(name_parts))
# Unique field should be required
parts[-1].required = True
unique_fields += unique_with
# Add the new index to the list
index = [(f, pymongo.ASCENDING) for f in unique_fields]
unique_indexes.append(index)
# Check for custom primary key
if field.primary_key:
if not new_class._meta['id_field']:
new_class._meta['id_field'] = field_name
# Make 'Document.id' an alias to the real primary key field
new_class.id = field
#new_class._fields['id'] = field
else:
raise ValueError('Cannot override primary key field')
new_class._meta['unique_indexes'] = unique_indexes
if not new_class._meta['id_field']:
new_class._meta['id_field'] = 'id'
new_class._fields['id'] = ObjectIdField(db_field='_id')
new_class.id = new_class._fields['id']
_document_registry[name] = new_class
return new_class
class BaseDocument(object):
def __init__(self, dynamic_fields_list=None, **values):
self._data = {}
self._dynamic_fields = {}
# Assign initial values to instance
if dynamic_fields_list is not None:
for field_name in dynamic_fields_list:
self._fields[field_name] = BaseField(name=field_name)
self._data[field_name] = values[field_name]
for attr_name, attr_value in self._fields.items():
if attr_name in values:
setattr(self, attr_name, values.pop(attr_name))
else:
# Use default value if present
value = getattr(self, attr_name, None)
setattr(self, attr_name, value)
def validate(self):
"""Ensure that all fields' values are valid and that required fields
are present.
"""
# Get a list of tuples of field names and their current values
fields = [(field, getattr(self, name))
for name, field in self._fields.items()]
# Ensure that each field is matched to a valid value
for field, value in fields:
if value is not None:
try:
field._validate(value)
except (ValueError, AttributeError, AssertionError), e:
raise ValidationError('Invalid value for field of type "' +
field.__class__.__name__ + '"')
elif field.required:
raise ValidationError('Field "%s" is required' % field.name)
@classmethod
def _get_subclasses(cls):
"""Return a dictionary of all subclasses (found recursively).
"""
try:
subclasses = cls.__subclasses__()
except:
subclasses = cls.__subclasses__(cls)
all_subclasses = {}
for subclass in subclasses:
all_subclasses[subclass._class_name] = subclass
all_subclasses.update(subclass._get_subclasses())
return all_subclasses
def __iter__(self):
return iter(self._fields)
def __getitem__(self, name):
"""Dictionary-style field access, return a field's value if present.
"""
try:
if name in self._fields:
return getattr(self, name)
except AttributeError:
pass
raise KeyError(name)
def __setitem__(self, name, value):
"""Dictionary-style field access, set a field's value.
"""
# Ensure that the field exists before settings its value
if name not in self._fields:
raise KeyError(name)
return setattr(self, name, value)
def __contains__(self, name):
try:
val = getattr(self, name)
return val is not None
except AttributeError:
return False
def __len__(self):
return len(self._data)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return u'<%s: %s>' % (self.__class__.__name__, u)
def __str__(self):
if hasattr(self, '__unicode__'):
return unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def to_mongo(self):
"""Return data dictionary ready for use with MongoDB.
"""
data = {}
for field_name, field in self._fields.items():
value = getattr(self, field_name, None)
if value is not None:
data[field.db_field] = field.to_mongo(value)
# Only add _cls and _types if allow_inheritance is not False
if not (hasattr(self, '_meta') and
self._meta.get('allow_inheritance', True) == False):
data['_cls'] = self._class_name
data['_types'] = self._superclasses.keys() + [self._class_name]
return data
@classmethod
def _from_son(cls, son):
"""Create an instance of a Document (subclass) from a PyMongo SON.
"""
# get the class name from the document, falling back to the given
# class if unavailable
class_name = son.get(u'_cls', cls._class_name)
data = dict((str(key), value) for key, value in son.items())
if '_types' in data:
del data['_types']
if '_cls' in data:
del data['_cls']
# Return correct subclass for document type
if class_name != cls._class_name:
subclasses = cls._get_subclasses()
if class_name not in subclasses:
# Type of document is probably more generic than the class
# that has been queried to return this SON
return None
cls = subclasses[class_name]
present_fields = data.keys()
for field_name, field in cls._fields.items():
if field.db_field in data:
data[field_name] = field.to_python(data[field.db_field])
if '_dynamic_fields_list' in data:
dynamic_fields_list = data['_dynamic_fields_list']
for field_name in dynamic_fields_list:
data[field_name] = field.to_python(data[field_name])
else:
dynamic_fields_list = None
obj = cls(dynamic_fields_list, **data)
obj._present_fields = present_fields
return obj
def __eq__(self, other):
if isinstance(other, self.__class__) and hasattr(other, 'id'):
if self.id == other.id:
return True
return False
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parents, unused):
return types.ClassType(name, parents, {})
else:
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
|
{
"content_hash": "43a2f9d0299b036541c118aaf26d2e17",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 91,
"avg_line_length": 36.91983122362869,
"alnum_prop": 0.564,
"repo_name": "alex/mongoengine",
"id": "37d58ae9b7da617978ffb7c5b42359a8ba7abbed",
"size": "17500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongoengine/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "179419"
}
],
"symlink_target": ""
}
|
"""Function signature objects for callables.
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.7 and 3.2+.
"""
#-----------------------------------------------------------------------------
# Python 3.3 stdlib inspect.py is public domain
#
# Backports Copyright (C) 2013 Aaron Iles
# Used under Apache License Version 2.0
#
# Further Changes are Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import itertools
import functools
import re
import types
# patch for single-file
# we don't support 2.6, so we can just import OrderedDict
from collections import OrderedDict
__version__ = '0.3'
# end patch
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', '__builtin__', base_module):
return annotation.__name__
return annotation.__module__ + '.' + annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
if obj.__self__ is None:
# Unbound method - treat it as a function (no distinction in Py 3)
obj = obj.__func__
else:
# Bound method: trim off the first parameter (typically self or
# cls)
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError(
'callable {0!r} is not supported by signature'.format(obj))
class _void(object):
'''A private marker - used in Parameter & Signature'''
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs['name']
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {0!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter(object):
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{0} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
msg = '{0!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{0}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{0}:{1}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{0}={1}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
'''Result of :meth:`Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
arguments : :class:`collections.OrderedDict`
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
signature : :class:`Signature`
The Signature object that created this instance.
args : tuple
Tuple of positional arguments values.
kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes:
parameters : :class:`collections.OrderedDict`
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
return_annotation
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {0} before {1}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {0!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = getattr(func, '__annotations__', {})
defaults = func.__defaults__
kwdefaults = getattr(func, '__kwdefaults__', None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = dict((param, idx)
for idx, param in enumerate(other.parameters.keys()))
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments')
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'.
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'.
format(arg=param_name))
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a :class:`BoundArguments` object, that maps the passed `args`
and `kwargs` to the function's signature. Raises :exc:`TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a :class:`BoundArguments` object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises :exc:`TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({0})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {0}'.format(anno)
return rendered
|
{
"content_hash": "d93bb4d8a732eb269239aaf2b591ddfb",
"timestamp": "",
"source": "github",
"line_count": 823,
"max_line_length": 84,
"avg_line_length": 35.838396111786146,
"alnum_prop": 0.5358196304458382,
"repo_name": "mattvonrocketstein/smash",
"id": "e3e7863adadb3cf050ee4477c47337d8e00ae47c",
"size": "29495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/ipy3x/utils/signatures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
}
|
'''
Receives a live video stream from the robot using ZeroMQ
'''
import io
import socket
import struct
from PIL import Image
import cv2
import numpy as np
import zmq
# Configure the following parameter:
IP_ADDRESS = "192.168.0.56"
cv2.startWindowThread()
cv2.namedWindow('Robot Camera', cv2.WINDOW_NORMAL)
# Setup SUBSCRIBE socket
context = zmq.Context()
zmq_socket = context.socket(zmq.SUB)
zmq_socket.setsockopt(zmq.SUBSCRIBE, b'')
zmq_socket.setsockopt(zmq.CONFLATE, 1)
zmq_socket.connect("tcp://{}:5557".format(IP_ADDRESS))
try:
i = 0
while True:
# Construct a stream to hold the image data and read the image
# data from the connection
image_stream = io.BytesIO()
payload = zmq_socket.recv()
image_stream.write(payload)
# Rewind the stream, open it as an image with PIL and do some
# processing on it
image_stream.seek(0)
image = Image.open(image_stream)
downsampled_image = np.array(image.convert('L'))
cv2.imshow('Robot Camera', downsampled_image)
cv2.waitKey(1)
i += 1
finally:
pass
|
{
"content_hash": "59a6fbbd639b6a361771eec740600d48",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 70,
"avg_line_length": 25.08888888888889,
"alnum_prop": 0.662533215234721,
"repo_name": "cosmoharrigan/rc-nfq",
"id": "2c80f8a1929abe88ca5447a867934faf46eb31f9",
"size": "1129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rcnfq/show_video_zmq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59706"
}
],
"symlink_target": ""
}
|
r"""Script to generate a HTML summary comparing IREE and TFLite latencies and memory usage.
Example usage:
python parse_tflite_benchmarks.py \
--iree_version=20220924.276 \
--tflite_version=20220924.162 \
--platform=server \
--input_csv=server_results.csv \
--output_path=/tmp/server_summary.html
"""
import argparse
import pandas as pd
import pathlib
import sys
from datetime import date
# Add build_tools python dir to the search path.
sys.path.insert(0, str(pathlib.Path(__file__).parent / ".." / ".." / "python"))
from reporting.common import html_utils
# Supported platforms.
_PLATFORM_SERVER = "server"
_PLATFORM_MOBILE = "mobile"
# A map of model name to data type.
_MODEL_TO_DATA_TYPE = {
"albert_lite_base_squadv1_1": "fp32",
"albert_lite_base_squadv1_1_fp16": "fp16",
"deeplabv3": "fp32",
"deeplabv3_fp16": "fp16",
"efficientnet_lite0_fp32_2": "fp32",
"efficientnet_lite0_fp32_2_fp16": "fp16",
"efficientnet_lite0_int8_2": "int8",
"inception_v4_299_fp32": "fp32",
"inception_v4_299_fp32_fp16": "fp16",
"inception_v4_299_uint8": "uint8",
"mobilebert-baseline-tf2-quant": "int8",
"mobilebert_float_384_gpu": "fp32",
"mobilebert_float_384_gpu_fp16": "fp16",
"mobilenet_v2_1.0_224": "fp32",
"mobilenet_v2_1.0_224_fp16": "fp16",
"mobilenet_v2_224_1.0_uint8": "uint8",
"person_detect": "int8",
"resnet_v2_101_1_default_1": "fp32",
"resnet_v2_101_1_default_1_fp16": "fp16",
"ssd_mobilenet_v2_static_1.0_int8": "int8",
"ssd_mobilenet_v2_fpnlite_fp32": "fp32",
"ssd_mobilenet_v2_fpnlite_fp32_fp16": "fp16",
"ssd_mobilenet_v2_fpnlite_uint8": 'uint8',
}
# Column headers.
_MODEL = "model"
_DATA_TYPE = "data type"
_RUNTIME = "runtime"
_LATENCY = "latency (ms)"
_TASKSET = "taskset"
_MEMORY = "vmhwm (KB)"
_THREADS = "threads"
_CONFIG = "config"
_DRIVER = "driver/delegate"
_TFLITE_CONFIG = "TFLite config"
_IREE_CONFIG = "IREE config"
_IREE_LATENCY = "IREE latency (ms)"
_TFLITE_LATENCY = "TFLite latency (ms)"
_IREE_MEMORY = "IREE vmhwm (kb)"
_TFLITE_MEMORY = "TFLite vmhwm (kb)"
_IREE_VS_TFLITE_LATENCY = "IREE vs TFLite latency"
_IREE_VS_TFLITE_MEMORY = "IREE vs TFLite memory"
_PERF_COLUMNS = [_IREE_VS_TFLITE_LATENCY, _IREE_VS_TFLITE_MEMORY]
_NUMBER_COLUMNS = [_IREE_LATENCY, _TFLITE_LATENCY, _IREE_MEMORY, _TFLITE_MEMORY]
_CONFIG_COLUMNS = [_TFLITE_CONFIG, _IREE_CONFIG]
def get_tflite_model_list(df):
"""Retrieves the list of TFLite models, filtering out duplicates.
The .csv file includes multiple entries of the same model but under a
different configuration (e.g. XNNPack enabled, XNNPack disabled).
"""
df = df.loc[df.runtime == "tflite"]
# Remove rows where the model name ends with `noxnn` since this is a duplicate.
df = df[~df.model.str.endswith("noxnn")]
return df.model.unique()
def get_fastest_result(model, df):
"""Retrieves the lowest latency result from multiple configurations.
Benchmarks are run under different configurations (e.g. number of threads,
Big core, LITTLE core, etc). This method retrieves the fastest configuration
whilst ensuring apples to apples comparisons (e.g. FP16 results are not
considered when the model is FP32).
Args:
model: The model name.
df: The dataframe to filter through.
Returns:
A dataframe containing the lowest latency.
"""
df = df[df.model.str.startswith(model)]
if not model.endswith("fp16"):
df = df[~df[_MODEL].str.endswith("fp16")]
df = df[df[_LATENCY] != 0]
df = df[df[_LATENCY] == df[_LATENCY].min()]
return df.head(1)
def get_tflite_config(model, df):
"""Generates a configuration string from TFLite config variables."""
config = []
if _TASKSET in df.columns:
taskset = df.taskset.iloc[0]
config.append(f"taskset {taskset}")
threads = df.threads.iloc[0]
config.append(f"{threads} threads" if threads > 1 else f"{threads} thread")
config.append("no xnnpack" if model.endswith("noxnn") else "xnnpack")
return ", ".join(config)
def generate_tflite_summary(dataframe):
"""Generates a dataframe containing the fastest TFLite result for each model."""
summary = pd.DataFrame(columns=[_MODEL, _LATENCY, _MEMORY, _CONFIG])
tflite_df = dataframe[dataframe.runtime == "tflite"]
model_list = get_tflite_model_list(dataframe)
for model in model_list:
df = get_fastest_result(model, tflite_df)
if df.empty:
print(f"Warning: TFLite results invalid for {model}.")
continue
latency = df[_LATENCY].iloc[0]
full_model_name = df.model.iloc[0]
memory = df[_MEMORY].iloc[0]
config = get_tflite_config(full_model_name, df)
summary.loc[len(summary)] = [model, latency, memory, config]
return summary
def get_iree_model_list(df):
"""Retrieves the list of IREE models, filtering out duplicates.
The .csv file includes multiple entries of the same model but under a
different configuration (e.g. mmt4d).
"""
df = df.loc[df.runtime == "iree"]
df = df[~df.model.str.endswith("mmt4d")]
df = df[~df.model.str.endswith("padfuse")]
return df.model.unique()
def get_iree_config(model, df):
"""Generates a configuration string from IREE config variables.
The configuration is embedded in the model name.
"""
config = []
if _TASKSET in df.columns:
taskset = df.taskset.iloc[0]
config.append(f"taskset {taskset}")
threads = df.threads.iloc[0]
config.append(f"{threads} threads" if threads > 1 else f"{threads} thread")
if model.endswith("im2col_mmt4d"):
config.append("im2col")
config.append("mmt4d")
elif model.endswith("mmt4d"):
config.append("mmt4d")
elif model.endswith("padfuse"):
config.append("fused pad")
return ", ".join(config)
def generate_iree_summary(dataframe):
"""Generates a dataframe containing the fastest IREE result for each model."""
summary = pd.DataFrame(columns=[_MODEL, _LATENCY, _MEMORY, _CONFIG])
iree_df = dataframe[dataframe.runtime == "iree"]
model_list = get_iree_model_list(dataframe)
for model in model_list:
df = get_fastest_result(model, iree_df)
if df.empty:
print(f"Warning: IREE results invalid for {model}.")
continue
latency = df[_LATENCY].iloc[0]
full_model_name = df.model.iloc[0]
memory = df[_MEMORY].iloc[0]
config = get_iree_config(full_model_name, df)
summary.loc[len(summary)] = [model, latency, memory, config]
return summary
def get_common_html_style(df, title):
"""Returns HTML style attributes common to both server and mobile."""
st = df.style.set_table_styles(html_utils.get_table_css())
st = st.hide(axis="index")
st = st.set_caption(title)
st = st.set_properties(subset=[_MODEL],
**{
"width": "300px",
"text-align": "left",
})
st = st.set_properties(subset=[_DATA_TYPE],
**{
"width": "100",
"text-align": "center",
})
st = st.set_properties(subset=_NUMBER_COLUMNS,
**{
"width": "100",
"text-align": "right",
})
st = st.set_properties(subset=_PERF_COLUMNS,
**{
"width": "150px",
"text-align": "right",
"color": "#ffffff"
})
st = st.applymap(html_utils.style_latency, subset=[_IREE_VS_TFLITE_LATENCY])
st = st.applymap(html_utils.style_memory, subset=[_IREE_VS_TFLITE_MEMORY])
return st
def generate_summary(dataframe, title):
"""Generates a table comparing latencies and memory usage between IREE and TFLite.
For each model, retrieves the lowest latency configuration from both IREE and TFLite.
Args:
dataframe: The raw data to summarize.
title: The title of the table.
Returns:
An HTML string containing the summarized report.
"""
summary = pd.DataFrame(columns=[
_MODEL, _DATA_TYPE, _TFLITE_CONFIG, _IREE_CONFIG, _TFLITE_LATENCY,
_IREE_LATENCY, _IREE_VS_TFLITE_LATENCY, _TFLITE_MEMORY, _IREE_MEMORY,
_IREE_VS_TFLITE_MEMORY
])
tflite_df = generate_tflite_summary(dataframe)
iree_df = generate_iree_summary(dataframe)
model_list = tflite_df[_MODEL].unique()
for model in model_list:
tflite_results = tflite_df[tflite_df.model == model]
iree_results = iree_df[iree_df.model == model]
if tflite_results.empty:
print(f"Warning: No TFLite results found for model {model}")
continue
if iree_results.empty:
print(f"Warning: No IREE results found for model {model}")
continue
iree_latency = iree_results[_LATENCY].iloc[0]
tflite_latency = tflite_results[_LATENCY].iloc[0]
latency_comparison = html_utils.format_latency_comparison(
iree_latency, tflite_latency)
iree_memory = iree_results[_MEMORY].iloc[0]
tflite_memory = tflite_results[_MEMORY].iloc[0]
memory_comparison = html_utils.format_memory_comparison(
iree_memory, tflite_memory)
iree_config = iree_results.config.iloc[0]
tflite_config = tflite_results.config.iloc[0]
summary.loc[len(summary)] = [
model,
_MODEL_TO_DATA_TYPE[model],
tflite_config,
iree_config,
f"{tflite_latency:.1f}",
f"{iree_latency:.1f}",
latency_comparison,
f"{tflite_memory:,.0f}",
f"{iree_memory:,.0f}",
memory_comparison,
]
summary = summary.round(2)
st = get_common_html_style(summary, title)
st = st.set_properties(subset=_CONFIG_COLUMNS,
**{
"width": "300px",
"text-align": "left",
})
return st.to_html().replace("\\n", "<br>") + "<br/>"
def generate_detail(dataframe, title, platform):
"""Generates a table comparing latencies and memory usage between IREE and TFLite.
The table generated is more detailed than `generate_summary`. It lists latencies
of all IREE configurations, using the fastest TFLite configuration as baseline.
Args:
dataframe: The raw data to summarize.
title: The title of the table.
platform: Either `server` or `mobile`.
Returns:
An HTML string containing the detailed report.
"""
summary = pd.DataFrame(columns=[
_MODEL, _DATA_TYPE, _TFLITE_CONFIG, _IREE_CONFIG, _TASKSET, _THREADS,
_TFLITE_LATENCY, _IREE_LATENCY, _IREE_VS_TFLITE_LATENCY, _TFLITE_MEMORY,
_IREE_MEMORY, _IREE_VS_TFLITE_MEMORY
])
model_list = get_tflite_model_list(dataframe)
for model in model_list:
df = dataframe[dataframe.model.str.startswith(model)]
# If result does not use FP16, remove FP16 results from dataframe to
# maintain apples-to-apples comparisons.
if not model.endswith("fp16"):
df = df[~df.model.str.endswith("fp16")]
if _TASKSET in df.columns:
tasksets = df.taskset.unique()
else:
tasksets = ["none"]
for taskset in tasksets:
per_taskset_df = df if taskset == "none" else df[df.taskset == taskset]
threads = per_taskset_df.threads.unique()
for thread in threads:
per_thread_df = per_taskset_df[per_taskset_df.threads == thread]
tflite_df = get_fastest_result(
model, per_thread_df[per_thread_df.runtime == "tflite"])
if tflite_df.empty:
continue
tflite_latency = tflite_df[_LATENCY].iloc[0]
tflite_memory = tflite_df[_MEMORY].iloc[0]
if tflite_latency == 0 or tflite_memory == 0:
continue
full_model_name = tflite_df.model.iloc[0]
# For TFLite config, we only want to know if XNNPack was used. The other
# configuration settings are covered in other columns.
tflite_config = "no xnnpack" if full_model_name.endswith(
"noxnn") else "xnnpack"
iree_df = per_thread_df[per_thread_df.runtime == "iree"]
for _, row in iree_df.iterrows():
iree_config = row[_DRIVER]
model_name = row[_MODEL]
if model_name.endswith("im2col_mmt4d"):
iree_config += ", im2col, mmt4d"
elif model_name.endswith("mmt4d"):
iree_config += ", mmt4d"
elif model_name.endswith("padfuse"):
iree_config += ", fused pad"
iree_latency = row[_LATENCY]
latency_comparison = html_utils.format_latency_comparison(
iree_latency, tflite_latency)
iree_memory = row[_MEMORY]
memory_comparison = html_utils.format_memory_comparison(
iree_memory, tflite_memory)
if iree_latency == 0 or iree_memory == 0:
continue
summary.loc[len(summary)] = [
model, _MODEL_TO_DATA_TYPE[model], tflite_config, iree_config,
taskset, thread, f"{tflite_latency:.1f}", f"{iree_latency:.1f}",
latency_comparison, f"{tflite_memory:,.0f}",
f"{iree_memory:,.0f}", memory_comparison
]
summary = summary.round(2)
st = get_common_html_style(summary, title)
st = st.set_properties(subset=[_TASKSET, _THREADS],
**{
"width": "100",
"text-align": "center",
})
st = st.set_properties(subset=[_TFLITE_CONFIG],
**{
"width": "150px",
"text-align": "left",
})
st = st.set_properties(subset=[_IREE_CONFIG],
**{
"width": "300px",
"text-align": "left",
})
if platform != "mobile":
st.hide_columns(subset=[_TASKSET])
return st.to_html().replace("\\n", "<br>") + "<br/>"
def main(args):
"""Summarizes IREE vs TFLite benchmark results."""
if args.platform == _PLATFORM_SERVER:
cpu_drivers = ["cpu", "local-task"]
gpu_drivers = ["gpu", "cuda"]
else:
cpu_drivers = ["cpu", "local-task"]
gpu_drivers = ["gpu", "vulkan", "adreno"]
version_html = (f"<i>IREE version: {args.iree_version}</i><br/>"
f"<i>TFlite version: {args.tflite_version}</i><br/>"
f"<i>last updated: {date.today().isoformat()}</i><br/><br/>")
html = html_utils.generate_header_and_legend(version_html)
df = pd.read_csv(args.input_csv)
# Generate CPU Summary.
results = df[df[_DRIVER].isin(cpu_drivers)]
html += generate_summary(results, args.platform.capitalize() + " CPU Summary")
# Generate GPU Summary.
results = df[df[_DRIVER].isin(gpu_drivers)]
html += generate_summary(results, args.platform.capitalize() + " GPU Summary")
# Generate CPU Detailed View.
results = df[df[_DRIVER].isin(cpu_drivers)]
html += generate_detail(results,
args.platform.capitalize() + " CPU Detailed",
args.platform)
# Generate GPU Detailed View.
results = df[df[_DRIVER].isin(gpu_drivers)]
html += generate_detail(results,
args.platform.capitalize() + " GPU Detailed",
args.platform)
args.output_path.write_text(html)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--iree_version",
type=str,
default=None,
required=True,
help="The IREE version.")
parser.add_argument("--tflite_version",
type=str,
default=None,
required=True,
help="The TFLite version.")
parser.add_argument(
"--platform",
action="store",
type=str.lower,
help=
"The platform the models were benchmarked on. Either server or mobile.",
required=True,
choices=[_PLATFORM_SERVER, _PLATFORM_MOBILE])
parser.add_argument(
"--input_csv",
type=str,
default=None,
help=
"The path to the csv file containing benchmark results for both IREE and TFLite."
)
parser.add_argument(
"--output_path",
type=pathlib.Path,
default="/tmp/summary.html",
help="The path to the output html file that summarizes results.")
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
|
{
"content_hash": "52fc08fdb1b00108d3630162e365dbba",
"timestamp": "",
"source": "github",
"line_count": 476,
"max_line_length": 91,
"avg_line_length": 34.5,
"alnum_prop": 0.6097917427840701,
"repo_name": "iree-org/iree",
"id": "d08af9797fbfdba7e55fb1ce4b1e103728a27ce7",
"size": "16639",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build_tools/benchmarks/reporting/parse_tflite_benchmarks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "23010"
},
{
"name": "Batchfile",
"bytes": "353"
},
{
"name": "C",
"bytes": "3830546"
},
{
"name": "C++",
"bytes": "8161374"
},
{
"name": "CMake",
"bytes": "899403"
},
{
"name": "Dockerfile",
"bytes": "28245"
},
{
"name": "GLSL",
"bytes": "2629"
},
{
"name": "HTML",
"bytes": "31018"
},
{
"name": "Java",
"bytes": "31697"
},
{
"name": "JavaScript",
"bytes": "18714"
},
{
"name": "MLIR",
"bytes": "5606822"
},
{
"name": "NASL",
"bytes": "3852"
},
{
"name": "PowerShell",
"bytes": "7893"
},
{
"name": "Python",
"bytes": "1143963"
},
{
"name": "Shell",
"bytes": "248374"
},
{
"name": "Starlark",
"bytes": "600260"
}
],
"symlink_target": ""
}
|
"""RequestContext: context for requests that persist through all of openapp."""
import copy
from keystoneclient import auth
from keystoneclient import service_catalog
from oslo_context import context
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from openapp import exception
from openapp.i18n import _, _LW
from openapp import policy
LOG = logging.getLogger(__name__)
class _ContextAuthPlugin(auth.BaseAuthPlugin):
"""A keystoneclient auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
sc = {'serviceCatalog': sc}
self.service_catalog = service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
endpoint_type=interface,
region_name=region_name)
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id=None, project_id=None,
is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False,
user_auth_plugin=None, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
user = kwargs.pop('user', None)
tenant = kwargs.pop('tenant', None)
super(RequestContext, self).__init__(
auth_token=auth_token,
user=user_id or user,
tenant=project_id or tenant,
domain=kwargs.pop('domain', None),
user_domain=kwargs.pop('user_domain', None),
project_domain=kwargs.pop('project_domain', None),
is_admin=is_admin,
read_only=kwargs.pop('read_only', False),
show_deleted=kwargs.pop('show_deleted', False),
request_id=request_id,
resource_uuid=kwargs.pop('resource_uuid', None),
overwrite=overwrite)
# oslo_context's RequestContext.to_dict() generates this field, we can
# safely ignore this as we don't use it.
kwargs.pop('user_identity', None)
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: %s') %
str(kwargs))
# FIXME(dims): user_id and project_id duplicate information that is
# already present in the oslo_context's RequestContext. We need to
# get rid of them.
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('volume', 'volumev2', 'key-manager')]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME(dims): defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'read_deleted': getattr(self, 'read_deleted', 'no'),
'roles': getattr(self, 'roles', None),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': timeutils.strtime(self.timestamp) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'quota_class': getattr(self, 'quota_class', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None),
'instance_lock_checked': getattr(self, 'instance_lock_checked',
False)
})
return values
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.deepcopy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def __str__(self):
return "<Context %s>" % self.to_dict()
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.Forbidden() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.Forbidden()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.Forbidden()
elif context.project_id != project_id:
raise exception.Forbidden()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.Forbidden()
elif context.user_id != user_id:
raise exception.Forbidden()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.Forbidden()
elif context.quota_class != class_name:
raise exception.Forbidden()
|
{
"content_hash": "625ed68e5b543a37c65252929a935423",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 79,
"avg_line_length": 37.26877470355731,
"alnum_prop": 0.6028210838901262,
"repo_name": "Yusuke1987/openstack_template",
"id": "63ac551fe5f17030c2c2cd3dfc73863fd72ebb05",
"size": "10199",
"binary": false,
"copies": "1",
"ref": "refs/heads/shidax",
"path": "openapp/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "429001"
},
{
"name": "Shell",
"bytes": "7959"
}
],
"symlink_target": ""
}
|
import requests
def getForecast(apiObj):
forecast = {}
forecast['high'] = apiObj['high']['celsius']
forecast['low'] = apiObj['low']['celsius']
forecast['dateFor'] = apiObj['date']['epoch']
forecast['humidity'] = apiObj['avehumidity']
return forecast
r = requests.get('http://api.wunderground.com/api/58ad3c15401bd354/forecast/q/NC/Cherryville.json')
response = r.json()
forecasts = response['forecast']['simpleforecast']['forecastday']
data = []
for f in forecasts:
data = getForecast(f)
requests.post('http://pharylonapi.azurewebsites.net/api/weather/forecast', data)
|
{
"content_hash": "8d042d7c0904d0c9e88d343344242410",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 99,
"avg_line_length": 32.05263157894737,
"alnum_prop": 0.6847290640394089,
"repo_name": "Pharylon/Weather",
"id": "514d712cf3b874d087ceec065543e11dab157613",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "postWunderground.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1978"
}
],
"symlink_target": ""
}
|
import base64
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
# [END import_libraries]
def do_ocr(photo_file):
"""Run a label request on a single image"""
# [START authenticate]
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
# [END authenticate]
# [START construct_request]
with open(photo_file, 'rb') as image:
image_content = base64.b64encode(image.read())
service_request = service.images().annotate(body={
'requests': [{
'image': {
'content': image_content.decode('UTF-8')
},
'features': [{
'type': 'TEXT_DETECTION',
'maxResults': 1
}]
}]
})
# [END construct_request]
# [START parse_response]
response = service_request.execute()
text = response['responses'][0]['textAnnotations'][0]['description']
# print('Found text: %s for %s' % (text, photo_file))
return text
# [END parse_response]
|
{
"content_hash": "0d251298d535fcf78754c947864e2516",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 32.52777777777778,
"alnum_prop": 0.5644748078565329,
"repo_name": "lighttiger2505/Religo",
"id": "ed403bde3308a49ee0991a8d719dc31bfbc4579f",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "religos/google_vision.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "663"
},
{
"name": "HTML",
"bytes": "14047"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "26794"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.lex
Tool-specific initialization for lex.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/lex.py 3897 2009/01/13 06:45:54 scons"
import os.path
import string
import SCons.Action
import SCons.Tool
import SCons.Util
LexAction = SCons.Action.Action("$LEXCOM", "$LEXCOMSTR")
def lexEmitter(target, source, env):
sourceBase, sourceExt = os.path.splitext(SCons.Util.to_String(source[0]))
if sourceExt == ".lm": # If using Objective-C
target = [sourceBase + ".m"] # the extension is ".m".
# This emitter essentially tries to add to the target all extra
# files generated by flex.
# Different options that are used to trigger the creation of extra files.
fileGenOptions = ["--header-file=", "--tables-file="]
lexflags = env.subst("$LEXFLAGS", target=target, source=source)
for option in SCons.Util.CLVar(lexflags):
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the
# file name to the target list.
fileName = string.strip(option[l:])
target.append(fileName)
return (target, source)
def generate(env):
"""Add Builders and construction variables for lex to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action(".l", LexAction)
c_file.add_emitter(".l", lexEmitter)
c_file.add_action(".lex", LexAction)
c_file.add_emitter(".lex", lexEmitter)
# Objective-C
cxx_file.add_action(".lm", LexAction)
cxx_file.add_emitter(".lm", lexEmitter)
# C++
cxx_file.add_action(".ll", LexAction)
cxx_file.add_emitter(".ll", lexEmitter)
env["LEX"] = env.Detect("flex") or "lex"
env["LEXFLAGS"] = SCons.Util.CLVar("")
env["LEXCOM"] = "$LEX $LEXFLAGS -t $SOURCES > $TARGET"
def exists(env):
return env.Detect(["flex", "lex"])
|
{
"content_hash": "4dbe161240dd54efdb9c78ecf4cd0fc2",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 89,
"avg_line_length": 35.32258064516129,
"alnum_prop": 0.6910197869101978,
"repo_name": "amyvmiwei/chromium",
"id": "cd42f45b42309031c6ebb37ff535b7e5bfa8d8d1",
"size": "3285",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "third_party/scons/scons-local/SCons/Tool/lex.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
hmmlearn
========
``hmmlearn`` is a set of algorithms for learning and inference of
Hiden Markov Models.
"""
__version__ = "0.2.1"
|
{
"content_hash": "5102c3ebacba9af6ea87935191210f61",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 65,
"avg_line_length": 15.222222222222221,
"alnum_prop": 0.6277372262773723,
"repo_name": "aubreyli/hmmlearn",
"id": "e5700a959c23340dfd62dc867eecedcecffb9df3",
"size": "137",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hmmlearn/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "Makefile",
"bytes": "715"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "86268"
},
{
"name": "Shell",
"bytes": "2788"
}
],
"symlink_target": ""
}
|
import argparse
import json
import sys
import gcl
from gcl import query
from gcl import util
def select(dct, path):
for part in path:
if not hasattr(dct, 'keys'):
raise RuntimeError('Value %r cannot be indexed with %r' % (dct, part))
if part not in dct:
raise RuntimeError('Value %r has no key %r' % (dct, part))
dct = dct[part]
return dct
def main(argv=None, stdin=None):
parser = argparse.ArgumentParser(description='Convert (parts of) a GCL model file to JSON.')
parser.add_argument('file', metavar='FILE', type=str, nargs='?',
help='File to parse')
parser.add_argument('selectors', metavar='SELECTOR', type=str, nargs='*',
help='Select nodes to include in the JSON.')
parser.add_argument('--root', '-r', metavar='PATH', type=str, default='',
help='Use the indicated root path as the root of the output JSON object (like a.b.c but without wildcards)')
args = parser.parse_args(argv or sys.argv[1:])
try:
if args.file and args.file != '-':
model = gcl.load(args.file)
else:
model = gcl.loads((stdin or sys.stdin).read(), filename='<stdin>')
sels = query.GPath(args.selectors)
if not sels.everything():
model = sels.select(model).deep()
plain = util.to_python(model)
selectors = args.root.split('.') if args.root else []
selected = select(plain, selectors)
sys.stdout.write(json.dumps(selected, indent=2))
except (gcl.ParseError, RuntimeError) as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
|
{
"content_hash": "821d1ab301b81c16c86a66dceba5e06a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 130,
"avg_line_length": 32.08163265306123,
"alnum_prop": 0.6354961832061069,
"repo_name": "rix0rrr/gcl",
"id": "f951c01b51251180d96081011f4ea34f9a1fcd1d",
"size": "1572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcl/to_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170513"
}
],
"symlink_target": ""
}
|
"""
Example Airflow DAG that interacts with Google Data Catalog service
"""
from google.cloud.datacatalog_v1beta1 import FieldType, TagField, TagTemplateField
from airflow import models
from airflow.operators.bash import BashOperator
from airflow.providers.google.cloud.operators.datacatalog import (
CloudDataCatalogCreateEntryGroupOperator,
CloudDataCatalogCreateEntryOperator,
CloudDataCatalogCreateTagOperator,
CloudDataCatalogCreateTagTemplateFieldOperator,
CloudDataCatalogCreateTagTemplateOperator,
CloudDataCatalogDeleteEntryGroupOperator,
CloudDataCatalogDeleteEntryOperator,
CloudDataCatalogDeleteTagOperator,
CloudDataCatalogDeleteTagTemplateFieldOperator,
CloudDataCatalogDeleteTagTemplateOperator,
CloudDataCatalogGetEntryGroupOperator,
CloudDataCatalogGetEntryOperator,
CloudDataCatalogGetTagTemplateOperator,
CloudDataCatalogListTagsOperator,
CloudDataCatalogLookupEntryOperator,
CloudDataCatalogRenameTagTemplateFieldOperator,
CloudDataCatalogSearchCatalogOperator,
CloudDataCatalogUpdateEntryOperator,
CloudDataCatalogUpdateTagOperator,
CloudDataCatalogUpdateTagTemplateFieldOperator,
CloudDataCatalogUpdateTagTemplateOperator,
)
from airflow.utils.dates import days_ago
from airflow.utils.helpers import chain
PROJECT_ID = "polidea-airflow"
LOCATION = "us-central1"
ENTRY_GROUP_ID = "important_data_jan_2019"
ENTRY_ID = "python_files"
TEMPLATE_ID = "template_id"
FIELD_NAME_1 = "first"
FIELD_NAME_2 = "second"
FIELD_NAME_3 = "first-rename"
with models.DAG("example_gcp_datacatalog", start_date=days_ago(1), schedule_interval=None) as dag:
# Create
# [START howto_operator_gcp_datacatalog_create_entry_group]
create_entry_group = CloudDataCatalogCreateEntryGroupOperator(
task_id="create_entry_group",
location=LOCATION,
entry_group_id=ENTRY_GROUP_ID,
entry_group={"display_name": "analytics data - jan 2011"},
)
# [END howto_operator_gcp_datacatalog_create_entry_group]
# [START howto_operator_gcp_datacatalog_create_entry_group_result]
create_entry_group_result = BashOperator(
task_id="create_entry_group_result",
bash_command="echo \"{{ task_instance.xcom_pull('create_entry_group', key='entry_group_id') }}\"",
)
# [END howto_operator_gcp_datacatalog_create_entry_group_result]
# [START howto_operator_gcp_datacatalog_create_entry_group_result2]
create_entry_group_result2 = BashOperator(
task_id="create_entry_group_result2",
bash_command="echo \"{{ task_instance.xcom_pull('create_entry_group') }}\"",
)
# [END howto_operator_gcp_datacatalog_create_entry_group_result2]
# [START howto_operator_gcp_datacatalog_create_entry_gcs]
create_entry_gcs = CloudDataCatalogCreateEntryOperator(
task_id="create_entry_gcs",
location=LOCATION,
entry_group=ENTRY_GROUP_ID,
entry_id=ENTRY_ID,
entry={
"display_name": "Wizard",
"type_": "FILESET",
"gcs_fileset_spec": {"file_patterns": ["gs://INVALID BUCKET NAME/**"]},
},
)
# [END howto_operator_gcp_datacatalog_create_entry_gcs]
# [START howto_operator_gcp_datacatalog_create_entry_gcs_result]
create_entry_gcs_result = BashOperator(
task_id="create_entry_gcs_result",
bash_command="echo \"{{ task_instance.xcom_pull('create_entry_gcs', key='entry_id') }}\"",
)
# [END howto_operator_gcp_datacatalog_create_entry_gcs_result]
# [START howto_operator_gcp_datacatalog_create_entry_gcs_result2]
create_entry_gcs_result2 = BashOperator(
task_id="create_entry_gcs_result2",
bash_command="echo \"{{ task_instance.xcom_pull('create_entry_gcs') }}\"",
)
# [END howto_operator_gcp_datacatalog_create_entry_gcs_result2]
# [START howto_operator_gcp_datacatalog_create_tag]
create_tag = CloudDataCatalogCreateTagOperator(
task_id="create_tag",
location=LOCATION,
entry_group=ENTRY_GROUP_ID,
entry=ENTRY_ID,
template_id=TEMPLATE_ID,
tag={"fields": {FIELD_NAME_1: TagField(string_value="example-value-string")}},
)
# [END howto_operator_gcp_datacatalog_create_tag]
# [START howto_operator_gcp_datacatalog_create_tag_result]
create_tag_result = BashOperator(
task_id="create_tag_result",
bash_command="echo \"{{ task_instance.xcom_pull('create_tag', key='tag_id') }}\"",
)
# [END howto_operator_gcp_datacatalog_create_tag_result]
# [START howto_operator_gcp_datacatalog_create_tag_result2]
create_tag_result2 = BashOperator(
task_id="create_tag_result2", bash_command="echo \"{{ task_instance.xcom_pull('create_tag') }}\""
)
# [END howto_operator_gcp_datacatalog_create_tag_result2]
# [START howto_operator_gcp_datacatalog_create_tag_template]
create_tag_template = CloudDataCatalogCreateTagTemplateOperator(
task_id="create_tag_template",
location=LOCATION,
tag_template_id=TEMPLATE_ID,
tag_template={
"display_name": "Awesome Tag Template",
"fields": {
FIELD_NAME_1: TagTemplateField(
display_name="first-field", type_=dict(primitive_type="STRING")
)
},
},
)
# [END howto_operator_gcp_datacatalog_create_tag_template]
# [START howto_operator_gcp_datacatalog_create_tag_template_result]
create_tag_template_result = BashOperator(
task_id="create_tag_template_result",
bash_command="echo \"{{ task_instance.xcom_pull('create_tag_template', key='tag_template_id') }}\"",
)
# [END howto_operator_gcp_datacatalog_create_tag_template_result]
# [START howto_operator_gcp_datacatalog_create_tag_template_result2]
create_tag_template_result2 = BashOperator(
task_id="create_tag_template_result2",
bash_command="echo \"{{ task_instance.xcom_pull('create_tag_template') }}\"",
)
# [END howto_operator_gcp_datacatalog_create_tag_template_result2]
# [START howto_operator_gcp_datacatalog_create_tag_template_field]
create_tag_template_field = CloudDataCatalogCreateTagTemplateFieldOperator(
task_id="create_tag_template_field",
location=LOCATION,
tag_template=TEMPLATE_ID,
tag_template_field_id=FIELD_NAME_2,
tag_template_field=TagTemplateField(
display_name="second-field", type_=FieldType(primitive_type="STRING")
),
)
# [END howto_operator_gcp_datacatalog_create_tag_template_field]
# [START howto_operator_gcp_datacatalog_create_tag_template_field_result]
create_tag_template_field_result = BashOperator(
task_id="create_tag_template_field_result",
bash_command=(
"echo \"{{ task_instance.xcom_pull('create_tag_template_field',"
+ " key='tag_template_field_id') }}\""
),
)
# [END howto_operator_gcp_datacatalog_create_tag_template_field_result]
# [START howto_operator_gcp_datacatalog_create_tag_template_field_result2]
create_tag_template_field_result2 = BashOperator(
task_id="create_tag_template_field_result2",
bash_command="echo \"{{ task_instance.xcom_pull('create_tag_template_field') }}\"",
)
# [END howto_operator_gcp_datacatalog_create_tag_template_field_result2]
# Delete
# [START howto_operator_gcp_datacatalog_delete_entry]
delete_entry = CloudDataCatalogDeleteEntryOperator(
task_id="delete_entry", location=LOCATION, entry_group=ENTRY_GROUP_ID, entry=ENTRY_ID
)
# [END howto_operator_gcp_datacatalog_delete_entry]
# [START howto_operator_gcp_datacatalog_delete_entry_group]
delete_entry_group = CloudDataCatalogDeleteEntryGroupOperator(
task_id="delete_entry_group", location=LOCATION, entry_group=ENTRY_GROUP_ID
)
# [END howto_operator_gcp_datacatalog_delete_entry_group]
# [START howto_operator_gcp_datacatalog_delete_tag]
delete_tag = CloudDataCatalogDeleteTagOperator(
task_id="delete_tag",
location=LOCATION,
entry_group=ENTRY_GROUP_ID,
entry=ENTRY_ID,
tag="{{ task_instance.xcom_pull('create_tag', key='tag_id') }}",
)
# [END howto_operator_gcp_datacatalog_delete_tag]
# [START howto_operator_gcp_datacatalog_delete_tag_template_field]
delete_tag_template_field = CloudDataCatalogDeleteTagTemplateFieldOperator(
task_id="delete_tag_template_field",
location=LOCATION,
tag_template=TEMPLATE_ID,
field=FIELD_NAME_2,
force=True,
)
# [END howto_operator_gcp_datacatalog_delete_tag_template_field]
# [START howto_operator_gcp_datacatalog_delete_tag_template]
delete_tag_template = CloudDataCatalogDeleteTagTemplateOperator(
task_id="delete_tag_template", location=LOCATION, tag_template=TEMPLATE_ID, force=True
)
# [END howto_operator_gcp_datacatalog_delete_tag_template]
# Get
# [START howto_operator_gcp_datacatalog_get_entry_group]
get_entry_group = CloudDataCatalogGetEntryGroupOperator(
task_id="get_entry_group",
location=LOCATION,
entry_group=ENTRY_GROUP_ID,
read_mask={"paths": ["name", "display_name"]},
)
# [END howto_operator_gcp_datacatalog_get_entry_group]
# [START howto_operator_gcp_datacatalog_get_entry_group_result]
get_entry_group_result = BashOperator(
task_id="get_entry_group_result",
bash_command="echo \"{{ task_instance.xcom_pull('get_entry_group') }}\"",
)
# [END howto_operator_gcp_datacatalog_get_entry_group_result]
# [START howto_operator_gcp_datacatalog_get_entry]
get_entry = CloudDataCatalogGetEntryOperator(
task_id="get_entry", location=LOCATION, entry_group=ENTRY_GROUP_ID, entry=ENTRY_ID
)
# [END howto_operator_gcp_datacatalog_get_entry]
# [START howto_operator_gcp_datacatalog_get_entry_result]
get_entry_result = BashOperator(
task_id="get_entry_result", bash_command="echo \"{{ task_instance.xcom_pull('get_entry') }}\""
)
# [END howto_operator_gcp_datacatalog_get_entry_result]
# [START howto_operator_gcp_datacatalog_get_tag_template]
get_tag_template = CloudDataCatalogGetTagTemplateOperator(
task_id="get_tag_template", location=LOCATION, tag_template=TEMPLATE_ID
)
# [END howto_operator_gcp_datacatalog_get_tag_template]
# [START howto_operator_gcp_datacatalog_get_tag_template_result]
get_tag_template_result = BashOperator(
task_id="get_tag_template_result",
bash_command="echo \"{{ task_instance.xcom_pull('get_tag_template') }}\"",
)
# [END howto_operator_gcp_datacatalog_get_tag_template_result]
# List
# [START howto_operator_gcp_datacatalog_list_tags]
list_tags = CloudDataCatalogListTagsOperator(
task_id="list_tags", location=LOCATION, entry_group=ENTRY_GROUP_ID, entry=ENTRY_ID
)
# [END howto_operator_gcp_datacatalog_list_tags]
# [START howto_operator_gcp_datacatalog_list_tags_result]
list_tags_result = BashOperator(
task_id="list_tags_result", bash_command="echo \"{{ task_instance.xcom_pull('list_tags') }}\""
)
# [END howto_operator_gcp_datacatalog_list_tags_result]
# Lookup
# [START howto_operator_gcp_datacatalog_lookup_entry_linked_resource]
current_entry_template = (
"//datacatalog.googleapis.com/projects/{project_id}/locations/{location}/"
"entryGroups/{entry_group}/entries/{entry}"
)
lookup_entry_linked_resource = CloudDataCatalogLookupEntryOperator(
task_id="lookup_entry",
linked_resource=current_entry_template.format(
project_id=PROJECT_ID, location=LOCATION, entry_group=ENTRY_GROUP_ID, entry=ENTRY_ID
),
)
# [END howto_operator_gcp_datacatalog_lookup_entry_linked_resource]
# [START howto_operator_gcp_datacatalog_lookup_entry_result]
lookup_entry_result = BashOperator(
task_id="lookup_entry_result",
bash_command="echo \"{{ task_instance.xcom_pull('lookup_entry')['display_name'] }}\"",
)
# [END howto_operator_gcp_datacatalog_lookup_entry_result]
# Rename
# [START howto_operator_gcp_datacatalog_rename_tag_template_field]
rename_tag_template_field = CloudDataCatalogRenameTagTemplateFieldOperator(
task_id="rename_tag_template_field",
location=LOCATION,
tag_template=TEMPLATE_ID,
field=FIELD_NAME_1,
new_tag_template_field_id=FIELD_NAME_3,
)
# [END howto_operator_gcp_datacatalog_rename_tag_template_field]
# Search
# [START howto_operator_gcp_datacatalog_search_catalog]
search_catalog = CloudDataCatalogSearchCatalogOperator(
task_id="search_catalog", scope={"include_project_ids": [PROJECT_ID]}, query=f"projectid:{PROJECT_ID}"
)
# [END howto_operator_gcp_datacatalog_search_catalog]
# [START howto_operator_gcp_datacatalog_search_catalog_result]
search_catalog_result = BashOperator(
task_id="search_catalog_result",
bash_command="echo \"{{ task_instance.xcom_pull('search_catalog') }}\"",
)
# [END howto_operator_gcp_datacatalog_search_catalog_result]
# Update
# [START howto_operator_gcp_datacatalog_update_entry]
update_entry = CloudDataCatalogUpdateEntryOperator(
task_id="update_entry",
entry={"display_name": "New Wizard"},
update_mask={"paths": ["display_name"]},
location=LOCATION,
entry_group=ENTRY_GROUP_ID,
entry_id=ENTRY_ID,
)
# [END howto_operator_gcp_datacatalog_update_entry]
# [START howto_operator_gcp_datacatalog_update_tag]
update_tag = CloudDataCatalogUpdateTagOperator(
task_id="update_tag",
tag={"fields": {FIELD_NAME_1: TagField(string_value="new-value-string")}},
update_mask={"paths": ["fields"]},
location=LOCATION,
entry_group=ENTRY_GROUP_ID,
entry=ENTRY_ID,
tag_id="{{ task_instance.xcom_pull('create_tag', key='tag_id') }}",
)
# [END howto_operator_gcp_datacatalog_update_tag]
# [START howto_operator_gcp_datacatalog_update_tag_template]
update_tag_template = CloudDataCatalogUpdateTagTemplateOperator(
task_id="update_tag_template",
tag_template={"display_name": "Awesome Tag Template"},
update_mask={"paths": ["display_name"]},
location=LOCATION,
tag_template_id=TEMPLATE_ID,
)
# [END howto_operator_gcp_datacatalog_update_tag_template]
# [START howto_operator_gcp_datacatalog_update_tag_template_field]
update_tag_template_field = CloudDataCatalogUpdateTagTemplateFieldOperator(
task_id="update_tag_template_field",
tag_template_field={"display_name": "Updated template field"},
update_mask={"paths": ["display_name"]},
location=LOCATION,
tag_template=TEMPLATE_ID,
tag_template_field_id=FIELD_NAME_1,
)
# [END howto_operator_gcp_datacatalog_update_tag_template_field]
# Create
create_tasks = [
create_entry_group,
create_entry_gcs,
create_tag_template,
create_tag_template_field,
create_tag,
]
chain(*create_tasks)
create_entry_group >> delete_entry_group
create_entry_group >> create_entry_group_result
create_entry_group >> create_entry_group_result2
create_entry_gcs >> delete_entry
create_entry_gcs >> create_entry_gcs_result
create_entry_gcs >> create_entry_gcs_result2
create_tag_template >> delete_tag_template_field
create_tag_template >> create_tag_template_result
create_tag_template >> create_tag_template_result2
create_tag_template_field >> delete_tag_template_field
create_tag_template_field >> create_tag_template_field_result
create_tag_template_field >> create_tag_template_field_result2
create_tag >> delete_tag
create_tag >> create_tag_result
create_tag >> create_tag_result2
# Delete
delete_tasks = [
delete_tag,
delete_tag_template_field,
delete_tag_template,
delete_entry,
delete_entry_group,
]
chain(*delete_tasks)
# Get
create_tag_template >> get_tag_template >> delete_tag_template
get_tag_template >> get_tag_template_result
create_entry_gcs >> get_entry >> delete_entry
get_entry >> get_entry_result
create_entry_group >> get_entry_group >> delete_entry_group
get_entry_group >> get_entry_group_result
# List
create_tag >> list_tags >> delete_tag
list_tags >> list_tags_result
# Lookup
create_entry_gcs >> lookup_entry_linked_resource >> delete_entry
lookup_entry_linked_resource >> lookup_entry_result
# Rename
update_tag >> rename_tag_template_field
create_tag_template_field >> rename_tag_template_field >> delete_tag_template_field
# Search
chain(create_tasks, search_catalog, delete_tasks)
search_catalog >> search_catalog_result
# Update
create_entry_gcs >> update_entry >> delete_entry
create_tag >> update_tag >> delete_tag
create_tag_template >> update_tag_template >> delete_tag_template
create_tag_template_field >> update_tag_template_field >> rename_tag_template_field
|
{
"content_hash": "6ef484198f5c33d47f04ba3fec4cc6b0",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 110,
"avg_line_length": 39.83179723502304,
"alnum_prop": 0.6832301729623417,
"repo_name": "sekikn/incubator-airflow",
"id": "e6e94f2913925aa8adcc635f867c946918f89193",
"size": "18075",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/example_dags/example_datacatalog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
import sublime, sublime_plugin
import os, sys
class JavascriptEnhancementsOpenTermivalViewHereCommand(sublime_plugin.WindowCommand):
def run(self, **args):
window = self.window
view = window.active_view()
paths = args.get("paths") if "paths" in args else []
path = self.get_path(paths)
if not path:
return
if os.path.isfile(path):
path = os.path.dirname(path)
window.run_command("set_layout", args={"cells": [[0, 0, 1, 1], [0, 1, 1, 2]], "cols": [0.0, 1.0], "rows": [0.0, 0.7, 1.0]})
window.focus_group(1)
terminal_view = window.new_file()
args = {"cmd": "/bin/bash -l", "title": "JavaScript Enhancements Terminal (bash)", "cwd": path, "syntax": None, "keep_open": False}
terminal_view.run_command('terminal_view_activate', args=args)
def get_path(self, paths):
if paths:
return paths[0]
elif self.window.active_view() and self.window.active_view().file_name():
return self.window.active_view().file_name()
elif self.window.folders():
return self.window.folders()[0]
else:
sublime.error_message('JavaScript Enhancements: No place to open TerminalView to.')
return False
def is_visible(self):
if sublime.platform() != 'windows':
try:
sys.modules["TerminalView"]
return True
except Exception as err:
pass
return False
def is_enabled(self):
if sublime.platform() != 'windows':
try:
sys.modules["TerminalView"]
return True
except Exception as err:
pass
return False
|
{
"content_hash": "a77ddce5346505842ede6dc10c8885eb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 136,
"avg_line_length": 30.784313725490197,
"alnum_prop": 0.6280254777070063,
"repo_name": "pichillilorenzo/JavaScriptEnhancements",
"id": "8f982c5dc09a614fa745abf7a9219a86c92e44b3",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/commands/open_terminal_view_here.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "119"
},
{
"name": "CSS",
"bytes": "4035"
},
{
"name": "JavaScript",
"bytes": "2118"
},
{
"name": "Python",
"bytes": "374510"
},
{
"name": "Shell",
"bytes": "4984"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import os
import sys
import re
# Local Imports
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.libraries.functions.format import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import stack_tools
from resource_management.core.shell import call
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
STACK_SELECT_PREFIX = 'ambari-python-wrap'
# a mapping of Ambari server role to <stack-selector-tool> component name for all
# non-clients
SERVER_ROLE_DIRECTORY_MAP = {
'ACCUMULO_MASTER' : 'accumulo-master',
'ACCUMULO_MONITOR' : 'accumulo-monitor',
'ACCUMULO_GC' : 'accumulo-gc',
'ACCUMULO_TRACER' : 'accumulo-tracer',
'ACCUMULO_TSERVER' : 'accumulo-tablet',
'ATLAS_SERVER' : 'atlas-server',
'FLUME_HANDLER' : 'flume-server',
'FALCON_SERVER' : 'falcon-server',
'NAMENODE' : 'hadoop-hdfs-namenode',
'DATANODE' : 'hadoop-hdfs-datanode',
'SECONDARY_NAMENODE' : 'hadoop-hdfs-secondarynamenode',
'NFS_GATEWAY' : 'hadoop-hdfs-nfs3',
'JOURNALNODE' : 'hadoop-hdfs-journalnode',
'HBASE_MASTER' : 'hbase-master',
'HBASE_REGIONSERVER' : 'hbase-regionserver',
'HIVE_METASTORE' : 'hive-metastore',
'HIVE_SERVER' : 'hive-server2',
'HIVE_SERVER_INTERACTIVE' : 'hive-server2-hive2',
'WEBHCAT_SERVER' : 'hive-webhcat',
'KAFKA_BROKER' : 'kafka-broker',
'KNOX_GATEWAY' : 'knox-server',
'OOZIE_SERVER' : 'oozie-server',
'RANGER_ADMIN' : 'ranger-admin',
'RANGER_USERSYNC' : 'ranger-usersync',
'RANGER_TAGSYNC' : 'ranger-tagsync',
'RANGER_KMS' : 'ranger-kms',
'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
'SPARK_THRIFTSERVER' : 'spark-thriftserver',
'NIMBUS' : 'storm-nimbus',
'SUPERVISOR' : 'storm-supervisor',
'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
'NODEMANAGER' : 'hadoop-yarn-nodemanager',
'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
'ZOOKEEPER_SERVER' : 'zookeeper-server',
# ZKFC is tied to NN since it doesn't have its own componnet in <stack-selector-tool> and there is
# a requirement that the ZKFC is installed on each NN
'ZKFC' : 'hadoop-hdfs-namenode'
}
# mapping of service check to <stack-selector-tool> component
SERVICE_CHECK_DIRECTORY_MAP = {
"HDFS_SERVICE_CHECK" : "hadoop-client",
"TEZ_SERVICE_CHECK" : "hadoop-client",
"PIG_SERVICE_CHECK" : "hadoop-client",
"HIVE_SERVICE_CHECK" : "hadoop-client",
"OOZIE_SERVICE_CHECK" : "hadoop-client",
"MAHOUT_SERVICE_CHECK" : "mahout-client",
"MAPREDUCE2_SERVICE_CHECK" : "hadoop-client",
"YARN_SERVICE_CHECK" : "hadoop-client",
"SLIDER_SERVICE_CHECK" : "slider-client"
}
# <stack-root>/current/hadoop-client/[bin|sbin|libexec|lib]
# <stack-root>/2.3.0.0-1234/hadoop/[bin|sbin|libexec|lib]
HADOOP_DIR_TEMPLATE = "{0}/{1}/{2}/{3}"
# <stack-root>/current/hadoop-client
# <stack-root>/2.3.0.0-1234/hadoop
HADOOP_HOME_DIR_TEMPLATE = "{0}/{1}/{2}"
HADOOP_DIR_DEFAULTS = {
"home": "/usr/lib/hadoop",
"libexec": "/usr/lib/hadoop/libexec",
"sbin": "/usr/lib/hadoop/sbin",
"bin": "/usr/bin",
"lib": "/usr/lib/hadoop/lib"
}
def select_all(version_to_select):
"""
Executes <stack-selector-tool> on every component for the specified version. If the value passed in is a
stack version such as "2.3", then this will find the latest installed version which
could be "2.3.0.0-9999". If a version is specified instead, such as 2.3.0.0-1234, it will use
that exact version.
:param version_to_select: the version to <stack-selector-tool> on, such as "2.3" or "2.3.0.0-1234"
"""
stack_root = Script.get_stack_root()
(stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
# it's an error, but it shouldn't really stop anything from working
if version_to_select is None:
Logger.error(format("Unable to execute {stack_selector_name} after installing because there was no version specified"))
return
Logger.info("Executing {0} set all on {1}".format(stack_selector_name, version_to_select))
command = format('{sudo} {stack_selector_path} set all `ambari-python-wrap {stack_selector_path} versions | grep ^{version_to_select} | tail -1`')
only_if_command = format('ls -d {stack_root}/{version_to_select}*')
Execute(command, only_if = only_if_command)
def select(component, version):
"""
Executes <stack-selector-tool> on the specific component and version. Some global
variables that are imported via params/status_params/params_linux will need
to be recalcuated after the <stack-selector-tool>. However, python does not re-import
existing modules. The only way to ensure that the configuration variables are
recalculated is to call reload(...) on each module that has global parameters.
After invoking <stack-selector-tool>, this function will also reload params, status_params,
and params_linux.
:param component: the <stack-selector-tool> component, such as oozie-server. If "all", then all components
will be updated.
:param version: the version to set the component to, such as 2.2.0.0-1234
"""
stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
command = (STACK_SELECT_PREFIX, stack_selector_path, "set", component, version)
Execute(command, sudo=True)
# don't trust the ordering of modules:
# 1) status_params
# 2) params_linux
# 3) params
modules = sys.modules
param_modules = "status_params", "params_linux", "params"
for moduleName in param_modules:
if moduleName in modules:
module = modules.get(moduleName)
reload(module)
Logger.info("After {0}, reloaded module {1}".format(command, moduleName))
def get_role_component_current_stack_version():
"""
Gets the current HDP version of the component that this role command is for.
:return: the current HDP version of the specified component or None
"""
stack_select_component = None
role = default("/role", "")
role_command = default("/roleCommand", "")
stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
if role in SERVER_ROLE_DIRECTORY_MAP:
stack_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
stack_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]
if stack_select_component is None:
return None
current_stack_version = get_stack_version(stack_select_component)
if current_stack_version is None:
Logger.warning("Unable to determine {0} version for {1}".format(
stack_selector_name, stack_select_component))
else:
Logger.info("{0} is currently at version {1}".format(
stack_select_component, current_stack_version))
return current_stack_version
def get_hadoop_dir(target, force_latest_on_upgrade=False):
"""
Return the hadoop shared directory in the following override order
1. Use default for 2.1 and lower
2. If 2.2 and higher, use <stack-root>/current/hadoop-client/{target}
3. If 2.2 and higher AND for an upgrade, use <stack-root>/<version>/hadoop/{target}.
However, if the upgrade has not yet invoked <stack-selector-tool>, return the current
version of the component.
:target: the target directory
:force_latest_on_upgrade: if True, then this will return the "current" directory
without the stack version built into the path, such as <stack-root>/current/hadoop-client
"""
stack_root = Script.get_stack_root()
stack_version = Script.get_stack_version()
if not target in HADOOP_DIR_DEFAULTS:
raise Fail("Target {0} not defined".format(target))
hadoop_dir = HADOOP_DIR_DEFAULTS[target]
formatted_stack_version = format_stack_version(stack_version)
if formatted_stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
# home uses a different template
if target == "home":
hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client")
else:
hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client", target)
# if we are not forcing "current" for HDP 2.2, then attempt to determine
# if the exact version needs to be returned in the directory
if not force_latest_on_upgrade:
stack_info = _get_upgrade_stack()
if stack_info is not None:
stack_version = stack_info[1]
# determine if <stack-selector-tool> has been run and if not, then use the current
# hdp version until this component is upgraded
current_stack_version = get_role_component_current_stack_version()
if current_stack_version is not None and stack_version != current_stack_version:
stack_version = current_stack_version
if target == "home":
# home uses a different template
hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
else:
hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)
return hadoop_dir
def get_hadoop_dir_for_stack_version(target, stack_version):
"""
Return the hadoop shared directory for the provided stack version. This is necessary
when folder paths of downgrade-source stack-version are needed after <stack-selector-tool>.
:target: the target directory
:stack_version: stack version to get hadoop dir for
"""
stack_root = Script.get_stack_root()
if not target in HADOOP_DIR_DEFAULTS:
raise Fail("Target {0} not defined".format(target))
hadoop_dir = HADOOP_DIR_DEFAULTS[target]
formatted_stack_version = format_stack_version(stack_version)
if formatted_stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
# home uses a different template
if target == "home":
hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
else:
hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)
return hadoop_dir
def _get_upgrade_stack():
"""
Gets the stack name and stack version if an upgrade is currently in progress.
:return: the stack name and stack version as a tuple, or None if an
upgrade is not in progress.
"""
from resource_management.libraries.functions.default import default
direction = default("/commandParams/upgrade_direction", None)
stack_name = default("/hostLevelParams/stack_name", None)
stack_version = default("/commandParams/version", None)
if direction and stack_name and stack_version:
return (stack_name, stack_version)
return None
def unsafe_get_stack_versions():
"""
Gets list of stack versions installed on the host.
By default a call to <stack-selector-tool> versions is made to get the list of installed stack versions.
DO NOT use a fall-back since this function is called by alerts in order to find potential errors.
:return: Returns a tuple of (exit code, output, list of installed stack versions).
"""
stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))
versions = []
if 0 == code:
for line in out.splitlines():
versions.append(line.rstrip('\n'))
return (code, out, versions)
def get_stack_versions(stack_root):
"""
Gets list of stack versions installed on the host.
By default a call to <stack-selector-tool> versions is made to get the list of installed stack versions.
As a fallback list of installed versions is collected from stack version directories in stack install root.
:param stack_root: Stack install root
:return: Returns list of installed stack versions.
"""
stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))
versions = []
if 0 == code:
for line in out.splitlines():
versions.append(line.rstrip('\n'))
if not versions:
versions = get_versions_from_stack_root(stack_root)
return versions
def get_stack_version_before_install(component_name):
"""
Works in the similar way to '<stack-selector-tool> status component',
but also works for not yet installed packages.
Note: won't work if doing initial install.
"""
stack_root = Script.get_stack_root()
component_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", component_name)
stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
if os.path.islink(component_dir):
stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
if match is None:
Logger.info('Failed to get extracted version with {0} in method get_stack_version_before_install'.format(stack_selector_name))
return None # lazy fail
return stack_version
else:
return None
|
{
"content_hash": "d64a45dfc2214e4b822702b5931d4d26",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 148,
"avg_line_length": 42.176991150442475,
"alnum_prop": 0.7262554203385089,
"repo_name": "alexryndin/ambari",
"id": "265e7df9a0a08bab3306e8034c6cfad2c7c14bd9",
"size": "14320",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import logging
from twisted.internet import protocol
from twisted.python import log
from txmsgpackrpc.protocol import MsgpackStreamProtocol
from txmsgpackrpc.handler import SimpleConnectionHandler
class MsgpackServerFactory(protocol.Factory):
protocol = MsgpackStreamProtocol
def __init__(self, handler):
self.handler = handler
self.connections = set()
def buildProtocol(self, addr):
p = self.protocol(self, sendErrors=True)
return p
def addConnection(self, connection):
self.connections.add(connection)
def delConnection(self, connection):
self.connections.remove(connection)
def getRemoteMethod(self, protocol, methodName):
return getattr(self.handler, "remote_" + methodName)
class MsgpackClientFactory(protocol.ReconnectingClientFactory):
maxDelay = 12
protocol = MsgpackStreamProtocol
def __init__(self, handler=SimpleConnectionHandler, connectTimeout=None, waitTimeout=None, handlerConfig={}):
self.connectTimeout = connectTimeout
self.waitTimeout = waitTimeout
self.handler = handler(self, **handlerConfig)
def buildProtocol(self, addr):
self.resetDelay()
p = self.protocol(self, timeout=self.waitTimeout)
return p
def clientConnectionFailed(self, connector, reason):
# log.msg("clientConnectionFailed", logLevel=logging.DEBUG)
connector.timeout = self.connectTimeout
protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
if self.maxRetries is not None and (self.retries > self.maxRetries):
self.stopTrying()
self.handler.callbackWaitingForConnection(lambda d: d.errback(reason))
def clientConnectionLost(self, connector, reason):
# log.msg("clientConnectionLost", logLevel=logging.DEBUG)
connector.timeout = self.connectTimeout
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
if self.maxRetries is not None and (self.retries > self.maxRetries):
self.stopTrying()
self.handler.callbackWaitingForConnection(lambda d: d.errback(reason))
def addConnection(self, connection):
self.handler.addConnection(connection)
def delConnection(self, connection):
self.handler.delConnection(connection)
def getRemoteMethod(self, protocol, methodName):
raise NotImplementedError('Cannot call RPC method on client')
__all__ = ['MsgpackServerFactory', 'MsgpackClientFactory']
|
{
"content_hash": "3307e285c721ae4aa1b49b3c77c30c65",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 113,
"avg_line_length": 34.986486486486484,
"alnum_prop": 0.7180378524526845,
"repo_name": "tobixx/txmsgpackrpc",
"id": "d69afa4194f50f988099f2f8ba3588a7c041a3d5",
"size": "2589",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "txmsgpackrpc/factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50273"
},
{
"name": "Shell",
"bytes": "694"
}
],
"symlink_target": ""
}
|
"""The Channel class provides a wrapper for interacting with RabbitMQ
implementing the methods and behaviors for an AMQP Channel.
"""
import collections
import logging
import warnings
import uuid
import pika.frame as frame
import pika.exceptions as exceptions
import pika.spec as spec
from pika.utils import is_callable
from pika.compat import unicode_type, dictkeys, as_bytes
LOGGER = logging.getLogger(__name__)
MAX_CHANNELS = 32768
class Channel(object):
"""A Channel is the primary communication method for interacting with
RabbitMQ. It is recommended that you do not directly invoke
the creation of a channel object in your application code but rather
construct the a channel by calling the active connection's channel()
method.
"""
CLOSED = 0
OPENING = 1
OPEN = 2
CLOSING = 3
_ON_CHANNEL_CLEANUP_CB_KEY = '_on_channel_cleanup'
def __init__(self, connection, channel_number, on_open_callback=None):
"""Create a new instance of the Channel
:param pika.connection.Connection connection: The connection
:param int channel_number: The channel number for this instance
:param method on_open_callback: The method to call on channel open
"""
if not isinstance(channel_number, int):
raise exceptions.InvalidChannelNumber
self.channel_number = channel_number
self.callbacks = connection.callbacks
self.connection = connection
# The frame-handler changes depending on the type of frame processed
self.frame_dispatcher = ContentFrameDispatcher()
self._blocked = collections.deque(list())
self._blocking = None
self._has_on_flow_callback = False
self._cancelled = set()
self._consumers = dict()
self._consumers_with_noack = set()
self._on_flowok_callback = None
self._on_getok_callback = None
self._on_openok_callback = on_open_callback
self._pending = dict()
self._state = self.CLOSED
# opaque cookie value set by wrapper layer (e.g., BlockingConnection)
# via _set_cookie
self._cookie = None
def __int__(self):
"""Return the channel object as its channel number
:rtype: int
"""
return self.channel_number
def add_callback(self, callback, replies, one_shot=True):
"""Pass in a callback handler and a list replies from the
RabbitMQ broker which you'd like the callback notified of. Callbacks
should allow for the frame parameter to be passed in.
:param method callback: The method to call
:param list replies: The replies to get a callback for
:param bool one_shot: Only handle the first type callback
"""
for reply in replies:
self.callbacks.add(self.channel_number, reply, callback, one_shot)
def add_on_cancel_callback(self, callback):
"""Pass a callback function that will be called when the basic_cancel
is sent by the server. The callback function should receive a frame
parameter.
:param method callback: The method to call on callback
"""
self.callbacks.add(self.channel_number, spec.Basic.Cancel, callback,
False)
def add_on_close_callback(self, callback):
"""Pass a callback function that will be called when the channel is
closed. The callback function will receive the channel, the
reply_code (int) and the reply_text (int) sent by the server describing
why the channel was closed.
:param method callback: The method to call on callback
"""
self.callbacks.add(self.channel_number, '_on_channel_close', callback,
False, self)
def add_on_flow_callback(self, callback):
"""Pass a callback function that will be called when Channel.Flow is
called by the remote server. Note that newer versions of RabbitMQ
will not issue this but instead use TCP backpressure
:param method callback: The method to call on callback
"""
self._has_on_flow_callback = True
self.callbacks.add(self.channel_number, spec.Channel.Flow, callback,
False)
def add_on_return_callback(self, callback):
"""Pass a callback function that will be called when basic_publish as
sent a message that has been rejected and returned by the server.
:param method callback: The method to call on callback with the
signature callback(channel, method, properties,
body), where
channel: pika.Channel
method: pika.spec.Basic.Return
properties: pika.spec.BasicProperties
body: str, unicode, or bytes (python 3.x)
"""
self.callbacks.add(self.channel_number, '_on_return', callback, False)
def basic_ack(self, delivery_tag=0, multiple=False):
"""Acknowledge one or more messages. When sent by the client, this
method acknowledges one or more messages delivered via the Deliver or
Get-Ok methods. When sent by server, this method acknowledges one or
more messages published with the Publish method on a channel in
confirm mode. The acknowledgement can be for a single message or a
set of messages up to and including a specific message.
:param int delivery-tag: The server-assigned delivery tag
:param bool multiple: If set to True, the delivery tag is treated as
"up to and including", so that multiple messages
can be acknowledged with a single method. If set
to False, the delivery tag refers to a single
message. If the multiple field is 1, and the
delivery tag is zero, this indicates
acknowledgement of all outstanding messages.
"""
if not self.is_open:
raise exceptions.ChannelClosed()
return self._send_method(spec.Basic.Ack(delivery_tag, multiple))
def basic_cancel(self, callback=None, consumer_tag='', nowait=False):
"""This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send any more
messages for that consumer. The client may receive an arbitrary number
of messages in between sending the cancel method and receiving the
cancel-ok reply. It may also be sent from the server to the client in
the event of the consumer being unexpectedly cancelled (i.e. cancelled
for any reason other than the server receiving the corresponding
basic.cancel from the client). This allows clients to be notified of
the loss of consumers due to events such as queue deletion.
:param method callback: Method to call for a Basic.CancelOk response
:param str consumer_tag: Identifier for the consumer
:param bool nowait: Do not expect a Basic.CancelOk response
:raises: ValueError
"""
self._validate_channel_and_callback(callback)
if consumer_tag not in self.consumer_tags:
return
if callback:
if nowait is True:
raise ValueError('Can not pass a callback if nowait is True')
self.callbacks.add(self.channel_number, spec.Basic.CancelOk,
callback)
self._cancelled.add(consumer_tag)
self._rpc(spec.Basic.Cancel(consumer_tag=consumer_tag,
nowait=nowait), self._on_cancelok,
[(spec.Basic.CancelOk, {'consumer_tag': consumer_tag})] if
nowait is False else [])
def basic_consume(self, consumer_callback,
queue='',
no_ack=False,
exclusive=False,
consumer_tag=None,
arguments=None):
"""Sends the AMQP command Basic.Consume to the broker and binds messages
for the consumer_tag to the consumer callback. If you do not pass in
a consumer_tag, one will be automatically generated for you. Returns
the consumer tag.
For more information on basic_consume, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.consume
:param method consumer_callback: The method to callback when consuming
with the signature consumer_callback(channel, method, properties,
body), where
channel: pika.Channel
method: pika.spec.Basic.Deliver
properties: pika.spec.BasicProperties
body: str, unicode, or bytes (python 3.x)
:param queue: The queue to consume from
:type queue: str or unicode
:param bool no_ack: Tell the broker to not expect a response
:param bool exclusive: Don't allow other consumers on the queue
:param consumer_tag: Specify your own consumer tag
:type consumer_tag: str or unicode
:param dict arguments: Custom key/value pair arguments for the consume
:rtype: str
"""
self._validate_channel_and_callback(consumer_callback)
# If a consumer tag was not passed, create one
if not consumer_tag:
consumer_tag = self._generate_consumer_tag()
if consumer_tag in self._consumers or consumer_tag in self._cancelled:
raise exceptions.DuplicateConsumerTag(consumer_tag)
if no_ack:
self._consumers_with_noack.add(consumer_tag)
self._consumers[consumer_tag] = consumer_callback
self._pending[consumer_tag] = list()
self._rpc(spec.Basic.Consume(queue=queue,
consumer_tag=consumer_tag,
no_ack=no_ack,
exclusive=exclusive,
arguments=arguments or dict()),
self._on_eventok, [(spec.Basic.ConsumeOk,
{'consumer_tag': consumer_tag})])
return consumer_tag
def _generate_consumer_tag(self):
"""Generate a consumer tag
NOTE: this protected method may be called by derived classes
:returns: consumer tag
:rtype: str
"""
return 'ctag%i.%s' % (self.channel_number,
uuid.uuid4().hex)
def basic_get(self, callback=None, queue='', no_ack=False):
"""Get a single message from the AMQP broker. If you want to
be notified of Basic.GetEmpty, use the Channel.add_callback method
adding your Basic.GetEmpty callback which should expect only one
parameter, frame. For more information on basic_get and its
parameters, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.get
:param method callback: The method to callback with a message that has
the signature callback(channel, method, properties, body), where:
channel: pika.Channel
method: pika.spec.Basic.GetOk
properties: pika.spec.BasicProperties
body: str, unicode, or bytes (python 3.x)
:param queue: The queue to get a message from
:type queue: str or unicode
:param bool no_ack: Tell the broker to not expect a reply
"""
self._validate_channel_and_callback(callback)
self._on_getok_callback = callback
self._send_method(spec.Basic.Get(queue=queue, no_ack=no_ack))
def basic_nack(self, delivery_tag=None, multiple=False, requeue=True):
"""This method allows a client to reject one or more incoming messages.
It can be used to interrupt and cancel large incoming messages, or
return untreatable messages to their original queue.
:param int delivery-tag: The server-assigned delivery tag
:param bool multiple: If set to True, the delivery tag is treated as
"up to and including", so that multiple messages
can be acknowledged with a single method. If set
to False, the delivery tag refers to a single
message. If the multiple field is 1, and the
delivery tag is zero, this indicates
acknowledgement of all outstanding messages.
:param bool requeue: If requeue is true, the server will attempt to
requeue the message. If requeue is false or the
requeue attempt fails the messages are discarded or
dead-lettered.
"""
if not self.is_open:
raise exceptions.ChannelClosed()
return self._send_method(spec.Basic.Nack(delivery_tag, multiple,
requeue))
def basic_publish(self, exchange, routing_key, body,
properties=None,
mandatory=False,
immediate=False):
"""Publish to the channel with the given exchange, routing key and body.
For more information on basic_publish and what the parameters do, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish
:param exchange: The exchange to publish to
:type exchange: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param body: The message body
:type body: str or unicode
:param pika.spec.BasicProperties properties: Basic.properties
:param bool mandatory: The mandatory flag
:param bool immediate: The immediate flag
"""
if not self.is_open:
raise exceptions.ChannelClosed()
if immediate:
LOGGER.warning('The immediate flag is deprecated in RabbitMQ')
if isinstance(body, unicode_type):
body = body.encode('utf-8')
properties = properties or spec.BasicProperties()
self._send_method(spec.Basic.Publish(exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate),
(properties, body))
def basic_qos(self,
callback=None,
prefetch_size=0,
prefetch_count=0,
all_channels=False):
"""Specify quality of service. This method requests a specific quality
of service. The QoS can be specified for the current channel or for all
channels on the connection. The client can request that messages be sent
in advance so that when the client finishes processing a message, the
following message is already held locally, rather than needing to be
sent down the channel. Prefetching gives a performance improvement.
:param method callback: The method to callback for Basic.QosOk response
:param int prefetch_size: This field specifies the prefetch window
size. The server will send a message in
advance if it is equal to or smaller in size
than the available prefetch size (and also
falls into other prefetch limits). May be set
to zero, meaning "no specific limit",
although other prefetch limits may still
apply. The prefetch-size is ignored if the
no-ack option is set.
:param int prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in
combination with the prefetch-size field; a
message will only be sent in advance if both
prefetch windows (and those at the channel
and connection level) allow it. The
prefetch-count is ignored if the no-ack
option is set.
:param bool all_channels: Should the QoS apply to all channels
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Basic.Qos(prefetch_size, prefetch_count,
all_channels), callback,
[spec.Basic.QosOk])
def basic_reject(self, delivery_tag, requeue=True):
"""Reject an incoming message. This method allows a client to reject a
message. It can be used to interrupt and cancel large incoming messages,
or return untreatable messages to their original queue.
:param int delivery-tag: The server-assigned delivery tag
:param bool requeue: If requeue is true, the server will attempt to
requeue the message. If requeue is false or the
requeue attempt fails the messages are discarded or
dead-lettered.
:raises: TypeError
"""
if not self.is_open:
raise exceptions.ChannelClosed()
if not isinstance(delivery_tag, int):
raise TypeError('delivery_tag must be an integer')
return self._send_method(spec.Basic.Reject(delivery_tag, requeue))
def basic_recover(self, callback=None, requeue=False):
"""This method asks the server to redeliver all unacknowledged messages
on a specified channel. Zero or more messages may be redelivered. This
method replaces the asynchronous Recover.
:param method callback: Method to call when receiving Basic.RecoverOk
:param bool requeue: If False, the message will be redelivered to the
original recipient. If True, the server will
attempt to requeue the message, potentially then
delivering it to an alternative subscriber.
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Basic.Recover(requeue), callback,
[spec.Basic.RecoverOk])
def close(self, reply_code=0, reply_text="Normal Shutdown"):
"""Will invoke a clean shutdown of the channel with the AMQP Broker.
:param int reply_code: The reply code to close the channel with
:param str reply_text: The reply text to close the channel with
"""
if not self.is_open:
raise exceptions.ChannelClosed()
LOGGER.info('Channel.close(%s, %s)', reply_code, reply_text)
if self._consumers:
LOGGER.debug('Cancelling %i consumers', len(self._consumers))
for consumer_tag in dictkeys(self._consumers):
self.basic_cancel(consumer_tag=consumer_tag)
self._set_state(self.CLOSING)
self._rpc(spec.Channel.Close(reply_code, reply_text, 0, 0),
self._on_closeok, [spec.Channel.CloseOk])
def confirm_delivery(self, callback=None, nowait=False):
"""Turn on Confirm mode in the channel. Pass in a callback to be
notified by the Broker when a message has been confirmed as received or
rejected (Basic.Ack, Basic.Nack) from the broker to the publisher.
For more information see:
http://www.rabbitmq.com/extensions.html#confirms
:param method callback: The callback for delivery confirmations
:param bool nowait: Do not send a reply frame (Confirm.SelectOk)
"""
self._validate_channel_and_callback(callback)
if (self.connection.publisher_confirms is False or
self.connection.basic_nack is False):
raise exceptions.MethodNotImplemented('Not Supported on Server')
# Add the ack and nack callbacks
if callback is not None:
self.callbacks.add(self.channel_number, spec.Basic.Ack, callback,
False)
self.callbacks.add(self.channel_number, spec.Basic.Nack, callback,
False)
# Send the RPC command
self._rpc(spec.Confirm.Select(nowait), self._on_selectok,
[spec.Confirm.SelectOk] if nowait is False else [])
@property
def consumer_tags(self):
"""Property method that returns a list of currently active consumers
:rtype: list
"""
return dictkeys(self._consumers)
def exchange_bind(self,
callback=None,
destination=None,
source=None,
routing_key='',
nowait=False,
arguments=None):
"""Bind an exchange to another exchange.
:param method callback: The method to call on Exchange.BindOk
:param destination: The destination exchange to bind
:type destination: str or unicode
:param source: The source exchange to bind to
:type source: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param bool nowait: Do not wait for an Exchange.BindOk
:param dict arguments: Custom key/value pair arguments for the binding
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Exchange.Bind(0, destination, source, routing_key,
nowait, arguments or dict()),
callback, [spec.Exchange.BindOk] if nowait is False
else [])
def exchange_declare(self,
callback=None,
exchange=None,
exchange_type='direct',
passive=False,
durable=False,
auto_delete=False,
internal=False,
nowait=False,
arguments=None,
type=None):
"""This method creates an exchange if it does not already exist, and if
the exchange exists, verifies that it is of the correct and expected
class.
If passive set, the server will reply with Declare-Ok if the exchange
already exists with the same name, and raise an error if not and if the
exchange does not already exist, the server MUST raise a channel
exception with reply code 404 (not found).
:param method callback: Call this method on Exchange.DeclareOk
:param exchange: The exchange name consists of a non-empty
:type exchange: str or unicode
sequence of these characters: letters,
digits, hyphen, underscore, period, or
colon.
:param str exchange_type: The exchange type to use
:param bool passive: Perform a declare or just check to see if it exists
:param bool durable: Survive a reboot of RabbitMQ
:param bool auto_delete: Remove when no more queues are bound to it
:param bool internal: Can only be published to by other exchanges
:param bool nowait: Do not expect an Exchange.DeclareOk response
:param dict arguments: Custom key/value pair arguments for the exchange
:param str type: The deprecated exchange type parameter
"""
self._validate_channel_and_callback(callback)
if type is not None:
warnings.warn('type is deprecated, use exchange_type instead',
DeprecationWarning)
if exchange_type == 'direct' and type != exchange_type:
exchange_type = type
return self._rpc(spec.Exchange.Declare(0, exchange, exchange_type,
passive, durable, auto_delete,
internal, nowait,
arguments or dict()), callback,
[spec.Exchange.DeclareOk] if nowait is False else [])
def exchange_delete(self,
callback=None,
exchange=None,
if_unused=False,
nowait=False):
"""Delete the exchange.
:param method callback: The method to call on Exchange.DeleteOk
:param exchange: The exchange name
:type exchange: str or unicode
:param bool if_unused: only delete if the exchange is unused
:param bool nowait: Do not wait for an Exchange.DeleteOk
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Exchange.Delete(0, exchange, if_unused, nowait),
callback, [spec.Exchange.DeleteOk] if nowait is False
else [])
def exchange_unbind(self,
callback=None,
destination=None,
source=None,
routing_key='',
nowait=False,
arguments=None):
"""Unbind an exchange from another exchange.
:param method callback: The method to call on Exchange.UnbindOk
:param destination: The destination exchange to unbind
:type destination: str or unicode
:param source: The source exchange to unbind from
:type source: str or unicode
:param routing_key: The routing key to unbind
:type routing_key: str or unicode
:param bool nowait: Do not wait for an Exchange.UnbindOk
:param dict arguments: Custom key/value pair arguments for the binding
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Exchange.Unbind(0, destination, source,
routing_key, nowait, arguments),
callback, [spec.Exchange.UnbindOk] if nowait is False
else [])
def flow(self, callback, active):
"""Turn Channel flow control off and on. Pass a callback to be notified
of the response from the server. active is a bool. Callback should
expect a bool in response indicating channel flow state. For more
information, please reference:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#channel.flow
:param method callback: The callback method
:param bool active: Turn flow on or off
"""
self._validate_channel_and_callback(callback)
self._on_flowok_callback = callback
self._rpc(spec.Channel.Flow(active), self._on_flowok,
[spec.Channel.FlowOk])
@property
def is_closed(self):
"""Returns True if the channel is closed.
:rtype: bool
"""
return self._state == self.CLOSED
@property
def is_closing(self):
"""Returns True if the channel is closing.
:rtype: bool
"""
return self._state == self.CLOSING
@property
def is_open(self):
"""Returns True if the channel is open.
:rtype: bool
"""
return self._state == self.OPEN
def open(self):
"""Open the channel"""
self._set_state(self.OPENING)
self._add_callbacks()
self._rpc(spec.Channel.Open(), self._on_openok, [spec.Channel.OpenOk])
def queue_bind(self, callback, queue, exchange,
routing_key=None,
nowait=False,
arguments=None):
"""Bind the queue to the specified exchange
:param method callback: The method to call on Queue.BindOk
:param queue: The queue to bind to the exchange
:type queue: str or unicode
:param exchange: The source exchange to bind to
:type exchange: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param bool nowait: Do not wait for a Queue.BindOk
:param dict arguments: Custom key/value pair arguments for the binding
"""
self._validate_channel_and_callback(callback)
replies = [spec.Queue.BindOk] if nowait is False else []
if routing_key is None:
routing_key = queue
return self._rpc(spec.Queue.Bind(0, queue, exchange, routing_key,
nowait, arguments or dict()), callback,
replies)
def queue_declare(self, callback,
queue='',
passive=False,
durable=False,
exclusive=False,
auto_delete=False,
nowait=False,
arguments=None):
"""Declare queue, create if needed. This method creates or checks a
queue. When creating a new queue the client can specify various
properties that control the durability of the queue and its contents,
and the level of sharing for the queue.
Leave the queue name empty for a auto-named queue in RabbitMQ
:param method callback: The method to call on Queue.DeclareOk
:param queue: The queue name
:type queue: str or unicode
:param bool passive: Only check to see if the queue exists
:param bool durable: Survive reboots of the broker
:param bool exclusive: Only allow access by the current connection
:param bool auto_delete: Delete after consumer cancels or disconnects
:param bool nowait: Do not wait for a Queue.DeclareOk
:param dict arguments: Custom key/value arguments for the queue
"""
if queue:
condition = (spec.Queue.DeclareOk,
{'queue': queue})
else:
condition = spec.Queue.DeclareOk
replies = [condition] if nowait is False else []
self._validate_channel_and_callback(callback)
return self._rpc(spec.Queue.Declare(0, queue, passive, durable,
exclusive, auto_delete, nowait,
arguments or dict()), callback,
replies)
def queue_delete(self,
callback=None,
queue='',
if_unused=False,
if_empty=False,
nowait=False):
"""Delete a queue from the broker.
:param method callback: The method to call on Queue.DeleteOk
:param queue: The queue to delete
:type queue: str or unicode
:param bool if_unused: only delete if it's unused
:param bool if_empty: only delete if the queue is empty
:param bool nowait: Do not wait for a Queue.DeleteOk
"""
replies = [spec.Queue.DeleteOk] if nowait is False else []
self._validate_channel_and_callback(callback)
return self._rpc(spec.Queue.Delete(0, queue, if_unused, if_empty,
nowait), callback, replies)
def queue_purge(self, callback=None, queue='', nowait=False):
"""Purge all of the messages from the specified queue
:param method callback: The method to call on Queue.PurgeOk
:param queue: The queue to purge
:type queue: str or unicode
:param bool nowait: Do not expect a Queue.PurgeOk response
"""
replies = [spec.Queue.PurgeOk] if nowait is False else []
self._validate_channel_and_callback(callback)
return self._rpc(spec.Queue.Purge(0, queue, nowait), callback, replies)
def queue_unbind(self,
callback=None,
queue='',
exchange=None,
routing_key=None,
arguments=None):
"""Unbind a queue from an exchange.
:param method callback: The method to call on Queue.UnbindOk
:param queue: The queue to unbind from the exchange
:type queue: str or unicode
:param exchange: The source exchange to bind from
:type exchange: str or unicode
:param routing_key: The routing key to unbind
:type routing_key: str or unicode
:param dict arguments: Custom key/value pair arguments for the binding
"""
self._validate_channel_and_callback(callback)
if routing_key is None:
routing_key = queue
return self._rpc(spec.Queue.Unbind(0, queue, exchange, routing_key,
arguments or dict()), callback,
[spec.Queue.UnbindOk])
def tx_commit(self, callback=None):
"""Commit a transaction
:param method callback: The callback for delivery confirmations
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Tx.Commit(), callback, [spec.Tx.CommitOk])
def tx_rollback(self, callback=None):
"""Rollback a transaction.
:param method callback: The callback for delivery confirmations
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Tx.Rollback(), callback, [spec.Tx.RollbackOk])
def tx_select(self, callback=None):
"""Select standard transaction mode. This method sets the channel to use
standard transactions. The client must use this method at least once on
a channel before using the Commit or Rollback methods.
:param method callback: The callback for delivery confirmations
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Tx.Select(), callback, [spec.Tx.SelectOk])
# Internal methods
def _add_callbacks(self):
"""Callbacks that add the required behavior for a channel when
connecting and connected to a server.
"""
# Add a callback for Basic.GetEmpty
self.callbacks.add(self.channel_number, spec.Basic.GetEmpty,
self._on_getempty, False)
# Add a callback for Basic.Cancel
self.callbacks.add(self.channel_number, spec.Basic.Cancel,
self._on_cancel, False)
# Deprecated in newer versions of RabbitMQ but still register for it
self.callbacks.add(self.channel_number, spec.Channel.Flow,
self._on_flow, False)
# Add a callback for when the server closes our channel
self.callbacks.add(self.channel_number, spec.Channel.Close,
self._on_close, True)
def _add_on_cleanup_callback(self, callback):
"""For internal use only (e.g., Connection needs to remove closed
channels from its channel container). Pass a callback function that will
be called when the channel is being cleaned up after all channel-close
callbacks callbacks.
:param method callback: The method to call on callback with the
signature: callback(channel)
"""
self.callbacks.add(self.channel_number, self._ON_CHANNEL_CLEANUP_CB_KEY,
callback, one_shot=True, only_caller=self)
def _add_pending_msg(self, consumer_tag, method_frame, header_frame, body):
"""Add the received message to the pending message stack.
:param str consumer_tag: The consumer tag for the message
:param pika.frame.Method method_frame: The received method frame
:param pika.frame.Header header_frame: The received header frame
:param body: The message body
:type body: str or unicode
"""
self._pending[consumer_tag].append((self, method_frame.method,
header_frame.properties, body))
def _cleanup(self):
"""Remove all consumers and any callbacks for the channel."""
self.callbacks.process(self.channel_number,
self._ON_CHANNEL_CLEANUP_CB_KEY, self,
self)
self._consumers = dict()
self.callbacks.cleanup(str(self.channel_number))
self._cookie = None
def _cleanup_consumer_ref(self, consumer_tag):
"""Remove any references to the consumer tag in internal structures
for consumer state.
:param str consumer_tag: The consumer tag to cleanup
"""
if consumer_tag in self._consumers_with_noack:
self._consumers_with_noack.remove(consumer_tag)
if consumer_tag in self._consumers:
del self._consumers[consumer_tag]
if consumer_tag in self._pending:
del self._pending[consumer_tag]
self._cancelled.discard(consumer_tag)
def _get_cookie(self):
"""Used by the wrapper implementation (e.g., `BlockingChannel`) to
retrieve the cookie that it set via `_set_cookie`
:returns: opaque cookie value that was set via `_set_cookie`
"""
return self._cookie
def _get_pending_msg(self, consumer_tag):
"""Get a pending message for the consumer tag from the stack.
:param str consumer_tag: The consumer tag to get a message from
:rtype: tuple(pika.frame.Header, pika.frame.Method, str|unicode)
"""
return self._pending[consumer_tag].pop(0)
def _handle_content_frame(self, frame_value):
"""This is invoked by the connection when frames that are not registered
with the CallbackManager have been found. This should only be the case
when the frames are related to content delivery.
The frame_dispatcher will be invoked which will return the fully formed
message in three parts when all of the body frames have been received.
:param pika.amqp_object.Frame frame_value: The frame to deliver
"""
try:
response = self.frame_dispatcher.process(frame_value)
except exceptions.UnexpectedFrameError:
return self._unexpected_frame(frame_value)
if response:
if isinstance(response[0].method, spec.Basic.Deliver):
self._on_deliver(*response)
elif isinstance(response[0].method, spec.Basic.GetOk):
self._on_getok(*response)
elif isinstance(response[0].method, spec.Basic.Return):
self._on_return(*response)
def _has_content(self, method_frame):
"""Return a bool if it's a content method as defined by the spec
:param pika.amqp_object.Method method_frame: The method frame received
"""
return spec.has_content(method_frame.INDEX)
def _on_cancel(self, method_frame):
"""When the broker cancels a consumer, delete it from our internal
dictionary.
:param pika.frame.Method method_frame: The method frame received
"""
if method_frame.method.consumer_tag in self._cancelled:
# User-initiated cancel is waiting for Cancel-ok
return
self._cleanup_consumer_ref(method_frame.method.consumer_tag)
def _on_cancelok(self, method_frame):
"""Called in response to a frame from the Broker when the
client sends Basic.Cancel
:param pika.frame.Method method_frame: The method frame received
"""
self._cleanup_consumer_ref(method_frame.method.consumer_tag)
def _on_close(self, method_frame):
"""Handle the case where our channel has been closed for us
:param pika.frame.Method method_frame: The close frame
"""
LOGGER.info('%s', method_frame)
LOGGER.warning('Received remote Channel.Close (%s): %s',
method_frame.method.reply_code,
method_frame.method.reply_text)
if self.connection.is_open:
self._send_method(spec.Channel.CloseOk())
self._set_state(self.CLOSED)
self.callbacks.process(self.channel_number, '_on_channel_close', self,
self, method_frame.method.reply_code,
method_frame.method.reply_text)
self._cleanup()
def _on_closeok(self, method_frame):
"""Invoked when RabbitMQ replies to a Channel.Close method
:param pika.frame.Method method_frame: The CloseOk frame
"""
self._set_state(self.CLOSED)
self.callbacks.process(self.channel_number, '_on_channel_close', self,
self, 0, '')
self._cleanup()
def _on_deliver(self, method_frame, header_frame, body):
"""Cope with reentrancy. If a particular consumer is still active when
another delivery appears for it, queue the deliveries up until it
finally exits.
:param pika.frame.Method method_frame: The method frame received
:param pika.frame.Header header_frame: The header frame received
:param body: The body received
:type body: str or unicode
"""
consumer_tag = method_frame.method.consumer_tag
if consumer_tag in self._cancelled:
if self.is_open and consumer_tag not in self._consumers_with_noack:
self.basic_reject(method_frame.method.delivery_tag)
return
if consumer_tag not in self._consumers:
return self._add_pending_msg(consumer_tag, method_frame,
header_frame, body)
while self._pending[consumer_tag]:
self._consumers[consumer_tag](*self._get_pending_msg(consumer_tag))
self._consumers[consumer_tag](self, method_frame.method,
header_frame.properties, body)
def _on_eventok(self, method_frame):
"""Generic events that returned ok that may have internal callbacks.
We keep a list of what we've yet to implement so that we don't silently
drain events that we don't support.
:param pika.frame.Method method_frame: The method frame received
"""
LOGGER.debug('Discarding frame %r', method_frame)
def _on_flow(self, method_frame_unused):
"""Called if the server sends a Channel.Flow frame.
:param pika.frame.Method method_frame_unused: The Channel.Flow frame
"""
if self._has_on_flow_callback is False:
LOGGER.warning('Channel.Flow received from server')
def _on_flowok(self, method_frame):
"""Called in response to us asking the server to toggle on Channel.Flow
:param pika.frame.Method method_frame: The method frame received
"""
self.flow_active = method_frame.method.active
if self._on_flowok_callback:
self._on_flowok_callback(method_frame.method.active)
self._on_flowok_callback = None
else:
LOGGER.warning('Channel.FlowOk received with no active callbacks')
def _on_getempty(self, method_frame):
"""When we receive an empty reply do nothing but log it
:param pika.frame.Method method_frame: The method frame received
"""
LOGGER.debug('Received Basic.GetEmpty: %r', method_frame)
def _on_getok(self, method_frame, header_frame, body):
"""Called in reply to a Basic.Get when there is a message.
:param pika.frame.Method method_frame: The method frame received
:param pika.frame.Header header_frame: The header frame received
:param body: The body received
:type body: str or unicode
"""
if self._on_getok_callback is not None:
callback = self._on_getok_callback
self._on_getok_callback = None
callback(self, method_frame.method, header_frame.properties, body)
else:
LOGGER.error('Basic.GetOk received with no active callback')
def _on_openok(self, frame_unused):
"""Called by our callback handler when we receive a Channel.OpenOk and
subsequently calls our _on_openok_callback which was passed into the
Channel constructor. The reason we do this is because we want to make
sure that the on_open_callback parameter passed into the Channel
constructor is not the first callback we make.
:param pika.frame.Method frame_unused: Unused Channel.OpenOk frame
"""
self._set_state(self.OPEN)
if self._on_openok_callback is not None:
self._on_openok_callback(self)
def _on_return(self, method_frame, header_frame, body):
"""Called if the server sends a Basic.Return frame.
:param pika.frame.Method method_frame: The Basic.Return frame
:param pika.frame.Header header_frame: The content header frame
:param body: The message body
:type body: str or unicode
"""
if not self.callbacks.process(self.channel_number, '_on_return', self,
self,
method_frame.method,
header_frame.properties,
body):
LOGGER.warning('Basic.Return received from server (%r, %r)',
method_frame.method, header_frame.properties)
def _on_selectok(self, method_frame):
"""Called when the broker sends a Confirm.SelectOk frame
:param pika.frame.Method method_frame: The method frame received
"""
LOGGER.debug("Confirm.SelectOk Received: %r", method_frame)
def _on_synchronous_complete(self, method_frame_unused):
"""This is called when a synchronous command is completed. It will undo
the blocking state and send all the frames that stacked up while we
were in the blocking state.
:param pika.frame.Method method_frame_unused: The method frame received
"""
LOGGER.debug('%i blocked frames', len(self._blocked))
self._blocking = None
while len(self._blocked) > 0 and self._blocking is None:
self._rpc(*self._blocked.popleft())
def _rpc(self, method_frame, callback=None, acceptable_replies=None):
"""Shortcut wrapper to the Connection's rpc command using its callback
stack, passing in our channel number.
:param pika.amqp_object.Method method_frame: The method frame to call
:param method callback: The callback for the RPC response
:param list acceptable_replies: The replies this RPC call expects
"""
# Make sure the channel is open
if self.is_closed:
raise exceptions.ChannelClosed
# If the channel is blocking, add subsequent commands to our stack
if self._blocking:
return self._blocked.append([method_frame, callback,
acceptable_replies])
# Validate we got None or a list of acceptable_replies
if acceptable_replies and not isinstance(acceptable_replies, list):
raise TypeError("acceptable_replies should be list or None")
# Validate the callback is callable
if callback and not is_callable(callback):
raise TypeError("callback should be None, a function or method.")
# Block until a response frame is received for synchronous frames
if method_frame.synchronous:
self._blocking = method_frame.NAME
# If acceptable replies are set, add callbacks
if acceptable_replies:
for reply in acceptable_replies or list():
if isinstance(reply, tuple):
reply, arguments = reply
else:
arguments = None
LOGGER.debug('Adding in on_synchronous_complete callback')
self.callbacks.add(self.channel_number, reply,
self._on_synchronous_complete,
arguments=arguments)
if callback:
LOGGER.debug('Adding passed in callback')
self.callbacks.add(self.channel_number, reply, callback,
arguments=arguments)
self._send_method(method_frame)
def _send_method(self, method_frame, content=None):
"""Shortcut wrapper to send a method through our connection, passing in
the channel number
:param pika.object.Method method_frame: The method frame to send
:param tuple content: If set, is a content frame, is tuple of
properties and body.
"""
self.connection._send_method(self.channel_number, method_frame, content)
def _set_cookie(self, cookie):
"""Used by wrapper layer (e.g., `BlockingConnection`) to link the
channel implementation back to the proxy. See `_get_cookie`.
:param cookie: an opaque value; typically a proxy channel implementation
instance (e.g., `BlockingChannel` instance)
"""
self._cookie = cookie
def _set_state(self, connection_state):
"""Set the channel connection state to the specified state value.
:param int connection_state: The connection_state value
"""
self._state = connection_state
def _unexpected_frame(self, frame_value):
"""Invoked when a frame is received that is not setup to be processed.
:param pika.frame.Frame frame_value: The frame received
"""
LOGGER.warning('Unexpected frame: %r', frame_value)
def _validate_channel_and_callback(self, callback):
if not self.is_open:
raise exceptions.ChannelClosed()
if callback is not None and not is_callable(callback):
raise ValueError('callback must be a function or method')
class ContentFrameDispatcher(object):
"""Handle content related frames, building a message and return the message
back in three parts upon receipt.
"""
def __init__(self):
"""Create a new instance of the Dispatcher passing in the callback
manager.
"""
self._method_frame = None
self._header_frame = None
self._seen_so_far = 0
self._body_fragments = list()
def process(self, frame_value):
"""Invoked by the Channel object when passed frames that are not
setup in the rpc process and that don't have explicit reply types
defined. This includes Basic.Publish, Basic.GetOk and Basic.Return
:param Method|Header|Body frame_value: The frame to process
"""
if (isinstance(frame_value, frame.Method) and
spec.has_content(frame_value.method.INDEX)):
self._method_frame = frame_value
elif isinstance(frame_value, frame.Header):
self._header_frame = frame_value
if frame_value.body_size == 0:
return self._finish()
elif isinstance(frame_value, frame.Body):
return self._handle_body_frame(frame_value)
else:
raise exceptions.UnexpectedFrameError(frame_value)
def _finish(self):
"""Invoked when all of the message has been received
:rtype: tuple(pika.frame.Method, pika.frame.Header, str)
"""
content = (self._method_frame, self._header_frame,
b''.join(self._body_fragments))
self._reset()
return content
def _handle_body_frame(self, body_frame):
"""Receive body frames and append them to the stack. When the body size
matches, call the finish method.
:param Body body_frame: The body frame
:raises: pika.exceptions.BodyTooLongError
:rtype: tuple(pika.frame.Method, pika.frame.Header, str)|None
"""
self._seen_so_far += len(body_frame.fragment)
self._body_fragments.append(body_frame.fragment)
if self._seen_so_far == self._header_frame.body_size:
return self._finish()
elif self._seen_so_far > self._header_frame.body_size:
raise exceptions.BodyTooLongError(self._seen_so_far,
self._header_frame.body_size)
return None
def _reset(self):
"""Reset the values for processing frames"""
self._method_frame = None
self._header_frame = None
self._seen_so_far = 0
self._body_fragments = list()
|
{
"content_hash": "494d490452f007104d818638deb3c607",
"timestamp": "",
"source": "github",
"line_count": 1254,
"max_line_length": 80,
"avg_line_length": 42.33572567783094,
"alnum_prop": 0.5985985797434497,
"repo_name": "hugoxia/pika",
"id": "2bf3b0f0d0b7066ee6779e0bb883da5c3ff91705",
"size": "53089",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "pika/channel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "707627"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from opensearchsdk.service import BaseService
from opensearchsdk.base import Base
from opensearchsdk import log
from opensearchsdk import set_stream_logger
from opensearchsdk.config import Config
__author__ = 'barycenter'
def lazy_prop(fn):
"""
lazy load for query
:param fn:
:return:
"""
attr_name = '_lazy_' + fn.__name__
@property
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
class Q(Base):
"""
search condition, it could be organized complex query string
for examle:
Q(title="1"), Q(title="2") means query=title:"1" AND title:"2"
Q(title="1"), Q(body="2")|Q(body="3") means query=title:"1" AND (body="2" OR body:"3")
~Q(tilte="1") means ANDNOT title:"1"
"""
def __init__(self, *args, **kwargs):
boost = None
# only support the first one
if len(args) > 0:
tmp = args[0]
if ":" not in tmp:
log.warning("there is no index, it will be set default index")
self.query_string = "default:'%s'" % tmp
else:
self.query_string = tmp
elif len(kwargs) > 0:
boost = kwargs.get('boost')
if boost is not None:
kwargs.pop('boost')
if len(kwargs) == 0:
raise ValueError("cannot only assign a boost parameter")
self.query_string = "%s:'%s'" % (kwargs.items()[0][0], kwargs.items()[0][1])
if boost is not None:
self.query_string = "%s^%s" % (self.query_string, boost)
def __or__(self, other):
if other:
if type(other) is not Q:
raise TypeError("please use Q instance")
query_string = "(%s OR %s)" % (self.query_string, other.query_string)
q = Q()
q.query_string = query_string
else:
raise TypeError("invalid operate")
return q
def __invert__(self):
query_string = "ANDNOT %s" % self.query_string
q = Q()
q.query_string = query_string
return q
def __and__(self, other):
if other:
if type(other) is not Q:
raise TypeError("please use Q instance")
query_string = "(%s AND %s)" % (self.query_string, other.query_string)
else:
raise TypeError("invalid operate")
q = Q()
q.query_string = query_string
return q
def __pos__(self):
"""
support + for sort sub search
"""
query_string = "+%s" % self.query_string
q = Q()
q.query_string = query_string
return q
def __neg__(self):
"""
support - for sub search
"""
query_string = "-%s" % self.query_string
q = Q()
q.query_string = query_string
return q
def __repr__(self):
return self.query_string
def __str__(self):
return self.__repr__()
class FQ(Base):
"""
filter condition
"""
def __init__(self, *args):
if len(args) > 0:
tmp = args[0]
self.query_string = "'%s'" % tmp
def __and__(self, other):
if other:
if type(other) is not FQ:
raise TypeError("please use FQ instance")
query_string = "(%s AND %s)" % (self.query_string, other.query_string)
else:
raise TypeError("invalid operate")
q = FQ()
q.query_string = query_string
return q
def __or__(self, other):
if other:
if type(other) is not FQ:
raise TypeError("please use FQ instance")
query_string = "(%s OR %s)" % (self.query_string, other.query_string)
else:
raise TypeError("invalid operate")
q = FQ()
q.query_string = query_string
return q
def __repr__(self):
return self.query_string
def __str__(self):
return self.__repr__()
class Search(BaseService):
"""
do opensearch operation
WARNING: this class is NOT thread safe
"""
BASE_URL = "/search"
def __init__(self, client):
BaseService.__init__(self)
self.__client = client
self.query_string = ""
self.sub_searches = []
self.__index_name = None
self.__fetch_fields = None
self.__qp = None
self.__disable = None
self.__first_formula_name = None
self.__formula_name = None
self.__summary = dict()
self.__query = None
self.__rank = None
self.__sort = None
self.__config = dict()
self.__aggregate = None
self.__filter = None
self.__distinct = dict()
self.__kvpairs = None
if Config.DEBUG:
set_stream_logger()
def _check_client(self):
if not self.__client:
self.invalid_client()
def index_name(self, *args):
"""
pass application name you want to search, it support multiple application names
:param args:
:return:
"""
self.__index_name = ";".join(args)
return self
def fetch_fields(self, *args):
self.__fetch_fields = ";".join(args)
return self
def qp(self, *args):
self.__qp = ";".join(args)
return self
def disable(self):
self.__disable = 'qp'
return self
def first_formula_name(self, param):
self.__first_formula_name = param
return self
def formula_name(self, param):
self.__formula_name = param
return self
def summary(self, summary_field, summary_element=None, summary_ellipsis=None, summary_snipped=None,
summary_len=None, summary_prefix=None, summary_postfix=None):
if not summary_field:
raise ValueError("please set summary_field when using summary")
self.__summary['summary_field'] = summary_field
if 'summary_element' not in self.__summary or self.__summary['summary_element'] != summary_element:
self.__summary['summary_element'] = summary_element
if 'summary_ellipsis' not in self.__summary or self.__summary['summary_ellipsis'] != summary_ellipsis:
self.__summary['summary_ellipsis'] = summary_ellipsis
if 'summary_snipped' not in self.__summary or self.__summary['summary_snipped'] != summary_snipped:
self.__summary['summary_snipped'] = summary_snipped
if 'summary_len' not in self.__summary or self.__summary['summary_len'] != summary_len:
self.__summary['summary_len'] = summary_len
if summary_prefix is None and summary_postfix is not None:
raise ValueError("summary_prefix and summary_postfix should be provided both")
if summary_prefix is not None and summary_postfix is None:
raise ValueError("summary_prefix and summary_postfix should be provided both ")
if 'summary_prefix' not in self.__summary or self.__summary['summary_prefix'] != summary_prefix:
self.__summary['summary_prefix'] = summary_prefix
self.__summary['summary_postfix'] = summary_postfix
return self
def filter(self, *args):
"""
if argument is a string, it means a condition
and it always use AND to join conditions, if you want to use complex condition, you could use FQ
this method should be optimized to support call more than one time
:param args:
:param kwargs:
:return:
"""
query_temp_list = []
for item in args:
if type(item) == list:
query_temp_list.extend(item)
else:
query_temp_list.append(item)
query_temp_list2 = []
for item in query_temp_list:
if type(item) is FQ:
query_temp_list2.append(item.query_string)
else:
query_temp_list2.append(item)
self.__filter = " AND ".join(query_temp_list2)
return self
def config(self, start=0, hit=10, result_format='json', rerank_size='200'):
"""
config sub search, could multi call this function
:param start:
:param hit:
:param result_format:
:param rerank_size:
:return:
"""
if 'start' not in self.__config or self.__config['start'] != start:
self.__config['start'] = start
if 'hit' not in self.__config or self.__config['hit'] != hit:
self.__config['hit'] = hit
if 'result_format' not in self.__config or self.__config['result_format'] != result_format:
self.__config['result_format'] = result_format
if 'rerank_size' not in self.__config or self.__config['rerank_size'] != rerank_size:
self.__config['rerank_size'] = rerank_size
return self
def query(self, *args, **kwargs):
"""
if argument is a string, it means default:'argument'
if argument is a k=v pair, it means k:'v'
and it always use AND to join conditions, if you want to use complex condition, you could use Q
:param args:
:param kwargs:
:return:
"""
if self.__query is None:
self.__query = list()
query_temp_list = []
for item in args:
if type(item) == list:
query_temp_list.extend(item)
else:
query_temp_list.append(item)
kwargs_query = ["%s:'%s'" % (item[0], item[1]) for item in kwargs.items()]
query_temp_list.extend(kwargs_query)
for item in query_temp_list:
if type(item) is Q:
self.__query.append(item.query_string)
else:
if ":" not in item:
log.warning("there is no query index %s, it will be set to default" % item)
self.__query.append("default:'%s'" % item)
else:
self.__query.append(item)
return self
def rank(self, *args, **kwargs):
"""
this method will help query to RANK, but you must assign a query first
:param args:
:param kwargs:
:return:
"""
if len(args) > 0:
tmp = args[0]
if ":" not in tmp:
log.warning("there is no index, it will be set default index")
query_string = "default:'%s'" % tmp
else:
query_string = tmp
elif len(kwargs) > 0:
boost = kwargs.get('boost')
if boost is not None:
kwargs.pop('boost')
if len(kwargs) == 0:
raise ValueError("cannot only assign a boost parameter")
query_string = "%s:'%s'" % (kwargs.items()[0][0], kwargs.items()[0][1])
self.__rank = query_string
return self
def sort(self, *args):
"""
could using plaintext to sort, default is +
class Q also support -Q('field1') and +Q('field2')
:param args:
:return:
"""
if len(args) == 0:
return self
sort_items = []
for item in args:
item = str(item)
if not item.startswith("+") and not item.startswith("-"):
# default is +
sort_items.append("+%s" % item)
else:
sort_items.append(item)
self.__sort = ";".join(sort_items)
return self
def aggregate(self, group_key, agg_fun, range_p=None, agg_filter=None, agg_sampler_threshold=None,
agg_sampler_step=None, max_group=None):
"""
pass aggregate parameters
:param group_key:
:param agg_fun:
:param range_p:
:param agg_filter:
:param agg_sampler_threshold:
:param agg_sampler_step:
:param max_group:
:return:
"""
if self.__aggregate is None:
self.__aggregate = []
aggregate_item = {}
if group_key:
aggregate_item['group_key'] = group_key
else:
raise ValueError("please set a group_key when using aggregate")
if agg_fun:
aggregate_item['agg_fun'] = agg_fun
else:
raise ValueError("please set a agg_fun when using aggregate")
if range_p:
aggregate_item['range'] = range_p
if agg_filter:
aggregate_item['agg_filter'] = agg_filter
if agg_sampler_threshold:
aggregate_item['agg_sampler_threshold'] = agg_sampler_threshold
if agg_sampler_step:
aggregate_item['agg_sampler_step'] = agg_sampler_step
if max_group:
aggregate_item['max_group'] = max_group
self.__aggregate.append(aggregate_item)
return self
def distinct(self, dist_key, dist_times=None, dist_count=None,
reserved=None, update_total_hit=None, dist_filter=None, grade=None):
if not dist_key:
raise ValueError("disk_key required")
if dist_key:
if 'dist_key' not in self.__distinct or self.__distinct['dist_key'] != dist_key:
self.__distinct['dist_key'] = dist_key
if dist_times:
if 'dist_times' not in self.__distinct or self.__distinct['dist_times'] != dist_times:
self.__distinct['dist_times'] = dist_times
if dist_count:
if 'dist_count' not in self.__distinct or self.__distinct['dist_count'] != dist_count:
self.__distinct['dist_count'] = dist_count
if reserved:
if 'reserved' not in self.__distinct or self.__distinct['reserved'] != reserved:
self.__distinct['reserved'] = reserved
if update_total_hit:
if 'update_total_hit' not in self.__distinct or self.__distinct['update_total_hit'] != update_total_hit:
self.__distinct['update_total_hit'] = update_total_hit
if dist_filter:
if 'dist_filter' not in self.__distinct or self.__distinct['dist_filter'] != dist_filter:
self.__distinct['dist_filter'] = dist_filter
if grade:
if 'grade' not in self.__distinct or self.__distinct['grade'] != grade:
self.__distinct['grade'] = grade
return self
def kvpairs(self, **kwargs):
if len(kwargs) > 0:
self.__kvpairs = ",".join(["%s:%s" % (item[0], item[1]) for item in kwargs.items()])
return self
def query_raw(self, query_string):
"""
pass raw query string, such as:
query=default:'连衣裙'&&filter=(hit+sale)*rate>10000 AND create_time<1402345600
ATTENTION:
if you want to use this method, must call other function first, for example, index_name, kvpairs, etc
:param param:
:return:
"""
self.query_string = query_string
return self.__result_set()
def results(self):
"""
build query strings according to pre settings
:return:
"""
self.query_string = self.__build_query_string()
return self.__result_set()
def __build_query_string(self):
# process query and rank
if self.__query is None:
raise ValueError("please define a query search")
query_sub = " AND ".join(self.__query)
query_sub = query_sub.replace("AND ANDNOT", "ANDNOT")
if self.__rank is not None:
query_sub = "(%s) RANK %s" % (query_sub, self.__rank)
self.sub_searches.append("query=%s" % query_sub)
# process config
if self.__config:
# has set config sub search
self.sub_searches.append("config=%s" %
";".join(["%s:%s" % (item[0], item[1]) for item in self.__config.items()]))
# process filter
if self.__filter:
self.sub_searches.append("filter=%s" % self.__filter)
# process sort
if self.__sort:
# has set sort sub search
self.sub_searches.append("sort=%s" % self.__sort)
# process aggregate
if self.__aggregate:
# has set sort aggregate sub search
tmp_list = []
for item in self.__aggregate:
tmp_list.append(",".join(["%s:%s" % (it[0], it[1]) for it in item.items()]))
self.sub_searches.append("aggregate=%s" % ";".join(tmp_list))
# process distinct
if self.__distinct:
# has set distinct sub search
self.sub_searches.append("distinct=%s" %
",".join(["%s:%s" % (item[0], item[1]) for item in self.__distinct.items()]))
# process kvpair
if self.__kvpairs:
# has set kv pair sub search
self.sub_searches.append("kvpairs=%s" % self.__kvpairs)
return "&&".join(self.sub_searches)
def __result_set(self):
log.debug("querying %s" % self.query_string)
self._check_client()
params = dict()
if not self.__index_name:
raise ValueError("must provide an index_name at least")
params['index_name'] = self.__index_name
params['query'] = self.query_string
if self.__fetch_fields:
params['fetch_fields'] = self.__fetch_fields
if self.__qp:
params['qp'] = self.__qp
if self.__disable:
params['disable'] = self.__disable
if self.__first_formula_name:
params['first_formula_name'] = self.__first_formula_name
if self.__formula_name:
params['formula_name'] = self.__formula_name
if self.__summary:
params['summary'] = self.__summary
url = "%s" % Search.BASE_URL
resp = self.__client.send_message(url, method='GET', params=params)
resp_info = self.wrap_result(resp)
return resp_info
|
{
"content_hash": "6bf735cf97d3c16097df36b2a8ebd54d",
"timestamp": "",
"source": "github",
"line_count": 562,
"max_line_length": 116,
"avg_line_length": 32.2491103202847,
"alnum_prop": 0.53774001324211,
"repo_name": "Cicero-Zhao/opensearch-sdk",
"id": "1453527082b0209e43dd9403ea147a28d1455068",
"size": "18158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opensearchsdk/service/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71850"
}
],
"symlink_target": ""
}
|
import ConfigParser
import re
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
import daisy.api.policy
from daisy.common import exception
from daisy import i18n
# NOTE(bourke): The default dict_type is collections.OrderedDict in py27, but
# we must set manually for compatibility with py26
CONFIG = ConfigParser.SafeConfigParser(dict_type=OrderedDict)
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
property_opts = [
cfg.StrOpt('property_protection_file',
help=_('The location of the property protection file.'
'This file contains the rules for property protections '
'and the roles/policies associated with it. If this '
'config value is not specified, by default, property '
'protections won\'t be enforced. If a value is '
'specified and the file is not found, then the '
'daisy-api service will not start.')),
cfg.StrOpt('property_protection_rule_format',
default='roles',
choices=('roles', 'policies'),
help=_('This config value indicates whether "roles" or '
'"policies" are used in the property protection file.')),
]
CONF = cfg.CONF
CONF.register_opts(property_opts)
# NOTE (spredzy): Due to the particularly lengthy name of the exception
# and the number of occurrence it is raise in this file, a variable is
# created
InvalidPropProtectConf = exception.InvalidPropertyProtectionConfiguration
def is_property_protection_enabled():
if CONF.property_protection_file:
return True
return False
class PropertyRules(object):
def __init__(self, policy_enforcer=None):
self.rules = []
self.prop_exp_mapping = {}
self.policies = []
self.policy_enforcer = policy_enforcer or daisy.api.policy.Enforcer()
self.prop_prot_rule_format = CONF.property_protection_rule_format
self.prop_prot_rule_format = self.prop_prot_rule_format.lower()
self._load_rules()
def _load_rules(self):
try:
conf_file = CONF.find_file(CONF.property_protection_file)
CONFIG.read(conf_file)
except Exception as e:
msg = (_LE("Couldn't find property protection file %(file)s: "
"%(error)s.") % {'file': CONF.property_protection_file,
'error': e})
LOG.error(msg)
raise InvalidPropProtectConf()
if self.prop_prot_rule_format not in ['policies', 'roles']:
msg = _LE("Invalid value '%s' for "
"'property_protection_rule_format'. "
"The permitted values are "
"'roles' and 'policies'") % self.prop_prot_rule_format
LOG.error(msg)
raise InvalidPropProtectConf()
operations = ['create', 'read', 'update', 'delete']
properties = CONFIG.sections()
for property_exp in properties:
property_dict = {}
compiled_rule = self._compile_rule(property_exp)
for operation in operations:
permissions = CONFIG.get(property_exp, operation)
if permissions:
if self.prop_prot_rule_format == 'policies':
if ',' in permissions:
LOG.error(
_LE("Multiple policies '%s' not allowed "
"for a given operation. Policies can be "
"combined in the policy file"),
permissions)
raise InvalidPropProtectConf()
self.prop_exp_mapping[compiled_rule] = property_exp
self._add_policy_rules(property_exp, operation,
permissions)
permissions = [permissions]
else:
permissions = [permission.strip() for permission in
permissions.split(',')]
if '@' in permissions and '!' in permissions:
msg = (_LE(
"Malformed property protection rule in "
"[%(prop)s] %(op)s=%(perm)s: '@' and '!' "
"are mutually exclusive") %
dict(prop=property_exp,
op=operation,
perm=permissions))
LOG.error(msg)
raise InvalidPropProtectConf()
property_dict[operation] = permissions
else:
property_dict[operation] = []
LOG.warn(
_('Property protection on operation %(operation)s'
' for rule %(rule)s is not found. No role will be'
' allowed to perform this operation.') %
{'operation': operation,
'rule': property_exp})
self.rules.append((compiled_rule, property_dict))
def _compile_rule(self, rule):
try:
return re.compile(rule)
except Exception as e:
msg = (_LE("Encountered a malformed property protection rule"
" %(rule)s: %(error)s.") % {'rule': rule,
'error': e})
LOG.error(msg)
raise InvalidPropProtectConf()
def _add_policy_rules(self, property_exp, action, rule):
"""Add policy rules to the policy enforcer.
For example, if the file listed as property_protection_file has:
[prop_a]
create = glance_creator
then the corresponding policy rule would be:
"prop_a:create": "rule:glance_creator"
where glance_creator is defined in policy.json. For example:
"glance_creator": "role:admin or role:glance_create_user"
"""
rule = "rule:%s" % rule
rule_name = "%s:%s" % (property_exp, action)
rule_dict = policy.Rules.from_dict({
rule_name: rule
})
self.policy_enforcer.add_rules(rule_dict)
def _check_policy(self, property_exp, action, context):
try:
action = ":".join([property_exp, action])
self.policy_enforcer.enforce(context, action, {})
except exception.Forbidden:
return False
return True
def check_property_rules(self, property_name, action, context):
roles = context.roles
if not self.rules:
return True
if action not in ['create', 'read', 'update', 'delete']:
return False
for rule_exp, rule in self.rules:
if rule_exp.search(str(property_name)):
break
else: # no matching rules
return False
rule_roles = rule.get(action)
if rule_roles:
if '!' in rule_roles:
return False
elif '@' in rule_roles:
return True
if self.prop_prot_rule_format == 'policies':
prop_exp_key = self.prop_exp_mapping[rule_exp]
return self._check_policy(prop_exp_key, action,
context)
if set(roles).intersection(set(rule_roles)):
return True
return False
|
{
"content_hash": "fdb0fc9907902653b25bab09bd42f3f4",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 79,
"avg_line_length": 40.40625,
"alnum_prop": 0.52861562258314,
"repo_name": "OpenDaisy/daisy-api",
"id": "802a7d1db444c562eeeeee83250d34cd1575d18d",
"size": "8363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daisy/common/property_utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1475450"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
}
|
"""The blocking connection adapter module implements blocking semantics on top
of Pika's core AMQP driver. While most of the asynchronous expectations are
removed when using the blocking connection adapter, it attempts to remain true
to the asynchronous RPC nature of the AMQP protocol, supporting server sent
RPC commands.
The user facing classes in the module consist of the
:py:class:`~pika.adapters.blocking_connection.BlockingConnection`
and the :class:`~pika.adapters.blocking_connection.BlockingChannel`
classes.
"""
# Disable "access to protected member warnings: this wrapper implementation is
# a friend of those instances
# pylint: disable=W0212
from collections import namedtuple, deque
import contextlib
import functools
import logging
import time
import pika.channel
from pika import compat
from pika import exceptions
import pika.spec
# NOTE: import SelectConnection after others to avoid circular depenency
from pika.adapters.select_connection import SelectConnection
LOGGER = logging.getLogger(__name__)
class _CallbackResult(object):
""" CallbackResult is a non-thread-safe implementation for receiving
callback results; INTERNAL USE ONLY!
"""
__slots__ = ('_value_class', '_ready', '_values')
def __init__(self, value_class=None):
"""
:param callable value_class: only needed if the CallbackResult
instance will be used with
`set_value_once` and `append_element`.
*args and **kwargs of the value setter
methods will be passed to this class.
"""
self._value_class = value_class
self._ready = None
self._values = None
self.reset()
def reset(self):
"""Reset value, but not _value_class"""
self._ready = False
self._values = None
def __bool__(self):
""" Called by python runtime to implement truth value testing and the
built-in operation bool(); NOTE: python 3.x
"""
return self.is_ready()
# python 2.x version of __bool__
__nonzero__ = __bool__
def __enter__(self):
""" Entry into context manager that automatically resets the object
on exit; this usage pattern helps garbage-collection by eliminating
potential circular references.
"""
return self
def __exit__(self, *args, **kwargs):
"""Reset value"""
self.reset()
def is_ready(self):
"""
:returns: True if the object is in a signaled state
"""
return self._ready
@property
def ready(self):
"""True if the object is in a signaled state"""
return self._ready
def signal_once(self, *_args, **_kwargs): # pylint: disable=W0613
""" Set as ready
:raises AssertionError: if result was already signalled
"""
assert not self._ready, '_CallbackResult was already set'
self._ready = True
def set_value_once(self, *args, **kwargs):
""" Set as ready with value; the value may be retrived via the `value`
property getter
:raises AssertionError: if result was already set
"""
self.signal_once()
try:
self._values = (self._value_class(*args, **kwargs),)
except Exception:
LOGGER.error(
"set_value_once failed: value_class=%r; args=%r; kwargs=%r",
self._value_class, args, kwargs)
raise
def append_element(self, *args, **kwargs):
"""Append an element to values"""
assert not self._ready or isinstance(self._values, list), (
'_CallbackResult state is incompatible with append_element: '
'ready=%r; values=%r' % (self._ready, self._values))
try:
value = self._value_class(*args, **kwargs)
except Exception:
LOGGER.error(
"append_element failed: value_class=%r; args=%r; kwargs=%r",
self._value_class, args, kwargs)
raise
if self._values is None:
self._values = [value]
else:
self._values.append(value)
self._ready = True
@property
def value(self):
"""
:returns: a reference to the value that was set via `set_value_once`
:raises AssertionError: if result was not set or value is incompatible
with `set_value_once`
"""
assert self._ready, '_CallbackResult was not set'
assert isinstance(self._values, tuple) and len(self._values) == 1, (
'_CallbackResult value is incompatible with set_value_once: %r'
% (self._values,))
return self._values[0]
@property
def elements(self):
"""
:returns: a reference to the list containing one or more elements that
were added via `append_element`
:raises AssertionError: if result was not set or value is incompatible
with `append_element`
"""
assert self._ready, '_CallbackResult was not set'
assert isinstance(self._values, list) and len(self._values) > 0, (
'_CallbackResult value is incompatible with append_element: %r'
% (self._values,))
return self._values
class _IoloopTimerContext(object): # pylint: disable=R0903
"""Context manager for registering and safely unregistering a
SelectConnection ioloop-based timer
"""
def __init__(self, duration, connection):
"""
:param float duration: non-negative timer duration in seconds
:param SelectConnection connection:
"""
assert hasattr(connection, 'add_timeout'), connection
self._duration = duration
self._connection = connection
self._callback_result = _CallbackResult()
self._timer_id = None
def __enter__(self):
"""Register a timer"""
self._timer_id = self._connection.add_timeout(
self._duration,
self._callback_result.signal_once)
return self
def __exit__(self, *_args, **_kwargs):
"""Unregister timer if it hasn't fired yet"""
if not self._callback_result:
self._connection.remove_timeout(self._timer_id)
def is_ready(self):
"""
:returns: True if timer has fired, False otherwise
"""
return self._callback_result.is_ready()
class _TimerEvt(object): # pylint: disable=R0903
"""Represents a timer created via `BlockingConnection.add_timeout`"""
__slots__ = ('timer_id', '_callback')
def __init__(self, callback):
"""
:param callback: see callback_method in `BlockingConnection.add_timeout`
"""
self._callback = callback
# Will be set to timer id returned from the underlying implementation's
# `add_timeout` method
self.timer_id = None
def __repr__(self):
return '%s(timer_id=%s, callback=%s)' % (self.__class__.__name__,
self.timer_id, self._callback)
def dispatch(self):
"""Dispatch the user's callback method"""
self._callback()
class _ConnectionBlockedUnblockedEvtBase(object): # pylint: disable=R0903
"""Base class for `_ConnectionBlockedEvt` and `_ConnectionUnblockedEvt`"""
__slots__ = ('_callback', '_method_frame')
def __init__(self, callback, method_frame):
"""
:param callback: see callback_method parameter in
`BlockingConnection.add_on_connection_blocked_callback` and
`BlockingConnection.add_on_connection_unblocked_callback`
:param pika.frame.Method method_frame: with method_frame.method of type
`pika.spec.Connection.Blocked` or `pika.spec.Connection.Unblocked`
"""
self._callback = callback
self._method_frame = method_frame
def __repr__(self):
return '%s(callback=%s, frame=%s)' % (self.__class__.__name__,
self._callback,
self._method_frame)
def dispatch(self):
"""Dispatch the user's callback method"""
self._callback(self._method_frame)
class _ConnectionBlockedEvt( # pylint: disable=R0903
_ConnectionBlockedUnblockedEvtBase):
"""Represents a Connection.Blocked notification from RabbitMQ broker`"""
pass
class _ConnectionUnblockedEvt( # pylint: disable=R0903
_ConnectionBlockedUnblockedEvtBase):
"""Represents a Connection.Unblocked notification from RabbitMQ broker`"""
pass
class BlockingConnection(object): # pylint: disable=R0902
"""The BlockingConnection creates a layer on top of Pika's asynchronous core
providing methods that will block until their expected response has
returned. Due to the asynchronous nature of the `Basic.Deliver` and
`Basic.Return` calls from RabbitMQ to your application, you can still
implement continuation-passing style asynchronous methods if you'd like to
receive messages from RabbitMQ using
:meth:`basic_consume <BlockingChannel.basic_consume>` or if you want to be
notified of a delivery failure when using
:meth:`basic_publish <BlockingChannel.basic_publish>` .
For more information about communicating with the blocking_connection
adapter, be sure to check out the
:class:`BlockingChannel <BlockingChannel>` class which implements the
:class:`Channel <pika.channel.Channel>` based communication for the
blocking_connection adapter.
"""
# Connection-opened callback args
_OnOpenedArgs = namedtuple('BlockingConnection__OnOpenedArgs',
'connection')
# Connection-establishment error callback args
_OnOpenErrorArgs = namedtuple('BlockingConnection__OnOpenErrorArgs',
'connection error')
# Connection-closing callback args
_OnClosedArgs = namedtuple('BlockingConnection__OnClosedArgs',
'connection reason_code reason_text')
# Channel-opened callback args
_OnChannelOpenedArgs = namedtuple(
'BlockingConnection__OnChannelOpenedArgs',
'channel')
def __init__(self, parameters=None, _impl_class=None):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param _impl_class: for tests/debugging only; implementation class;
None=default
:raises RuntimeError:
"""
# Used by the _acquire_event_dispatch decorator; when already greater
# than 0, event dispatch is already acquired higher up the call stack
self._event_dispatch_suspend_depth = 0
# Connection-specific events that are ready for dispatch: _TimerEvt,
# _ConnectionBlockedEvt, _ConnectionUnblockedEvt
self._ready_events = deque()
# Channel numbers of channels that are requesting a call to their
# BlockingChannel._dispatch_events method; See
# `_request_channel_dispatch`
self._channels_pending_dispatch = set()
# Receives on_open_callback args from Connection
self._opened_result = _CallbackResult(self._OnOpenedArgs)
# Receives on_open_error_callback args from Connection
self._open_error_result = _CallbackResult(self._OnOpenErrorArgs)
# Receives on_close_callback args from Connection
self._closed_result = _CallbackResult(self._OnClosedArgs)
# Set to True when when user calls close() on the connection
# NOTE: this is a workaround to detect socket error because
# on_close_callback passes reason_code=0 when called due to socket error
self._user_initiated_close = False
impl_class = _impl_class or SelectConnection
self._impl = impl_class(
parameters=parameters,
on_open_callback=self._opened_result.set_value_once,
on_open_error_callback=self._open_error_result.set_value_once,
on_close_callback=self._closed_result.set_value_once,
stop_ioloop_on_close=False)
self._impl.ioloop.activate_poller()
self._process_io_for_connection_setup()
def _cleanup(self):
"""Clean up members that might inhibit garbage collection"""
self._impl.ioloop.deactivate_poller()
self._ready_events.clear()
self._opened_result.reset()
self._open_error_result.reset()
self._closed_result.reset()
@contextlib.contextmanager
def _acquire_event_dispatch(self):
""" Context manager that controls access to event dispatcher for
preventing reentrancy.
The "as" value is True if the managed code block owns the event
dispatcher and False if caller higher up in the call stack already owns
it. Only managed code that gets ownership (got True) is permitted to
dispatch
"""
try:
# __enter__ part
self._event_dispatch_suspend_depth += 1
yield self._event_dispatch_suspend_depth == 1
finally:
# __exit__ part
self._event_dispatch_suspend_depth -= 1
def _process_io_for_connection_setup(self): # pylint: disable=C0103
""" Perform follow-up processing for connection setup request: flush
connection output and process input while waiting for connection-open
or connection-error.
:raises AMQPConnectionError: on connection open error
"""
if not self._open_error_result.ready:
self._flush_output(self._opened_result.is_ready,
self._open_error_result.is_ready)
if self._open_error_result.ready:
try:
exception_or_message = self._open_error_result.value.error
if isinstance(exception_or_message, Exception):
raise exception_or_message
raise exceptions.AMQPConnectionError(exception_or_message)
finally:
self._cleanup()
assert self._opened_result.ready
assert self._opened_result.value.connection is self._impl
def _flush_output(self, *waiters):
""" Flush output and process input while waiting for any of the given
callbacks to return true. The wait is aborted upon connection-close.
Otherwise, processing continues until the output is flushed AND at least
one of the callbacks returns true. If there are no callbacks, then
processing ends when all output is flushed.
:param waiters: sequence of zero or more callables taking no args and
returning true when it's time to stop processing.
Their results are OR'ed together.
"""
if self.is_closed:
raise exceptions.ConnectionClosed()
# Conditions for terminating the processing loop:
# connection closed
# OR
# empty outbound buffer and no waiters
# OR
# empty outbound buffer and any waiter is ready
is_done = (lambda:
self._closed_result.ready or
(not self._impl.outbound_buffer and
(not waiters or any(ready() for ready in waiters))))
# Process I/O until our completion condition is satisified
while not is_done():
self._impl.ioloop.poll()
self._impl.ioloop.process_timeouts()
if self._open_error_result.ready or self._closed_result.ready:
try:
if not self._user_initiated_close:
if self._open_error_result.ready:
maybe_exception = self._open_error_result.value.error
LOGGER.error('Connection open failed - %r',
maybe_exception)
if isinstance(maybe_exception, Exception):
raise maybe_exception
else:
raise exceptions.ConnectionClosed(maybe_exception)
else:
result = self._closed_result.value
LOGGER.error('Connection close detected; result=%r',
result)
raise exceptions.ConnectionClosed(result.reason_code,
result.reason_text)
else:
LOGGER.info('Connection closed; result=%r',
self._closed_result.value)
finally:
self._cleanup()
def _request_channel_dispatch(self, channel_number):
"""Called by BlockingChannel instances to request a call to their
_dispatch_events method or to terminate `process_data_events`;
BlockingConnection will honor these requests from a safe context.
:param int channel_number: positive channel number to request a call
to the channel's `_dispatch_events`; a negative channel number to
request termination of `process_data_events`
"""
self._channels_pending_dispatch.add(channel_number)
def _dispatch_channel_events(self):
"""Invoke the `_dispatch_events` method on open channels that requested
it
"""
if not self._channels_pending_dispatch:
return
with self._acquire_event_dispatch() as dispatch_acquired:
if not dispatch_acquired:
# Nested dispatch or dispatch blocked higher in call stack
return
candidates = list(self._channels_pending_dispatch)
self._channels_pending_dispatch.clear()
for channel_number in candidates:
if channel_number < 0:
# This was meant to terminate process_data_events
continue
try:
impl_channel = self._impl._channels[channel_number]
except KeyError:
continue
if impl_channel.is_open:
impl_channel._get_cookie()._dispatch_events()
def _on_timer_ready(self, evt):
"""Handle expiry of a timer that was registered via `add_timeout`
:param _TimerEvt evt:
"""
self._ready_events.append(evt)
def _on_connection_blocked(self, user_callback, method_frame):
"""Handle Connection.Blocked notification from RabbitMQ broker
:param callable user_callback: callback_method passed to
`add_on_connection_blocked_callback`
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
self._ready_events.append(
_ConnectionBlockedEvt(user_callback, method_frame))
def _on_connection_unblocked(self, user_callback, method_frame):
"""Handle Connection.Unblocked notification from RabbitMQ broker
:param callable user_callback: callback_method passed to
`add_on_connection_unblocked_callback`
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
self._ready_events.append(
_ConnectionUnblockedEvt(user_callback, method_frame))
def _dispatch_connection_events(self):
"""Dispatch ready connection events"""
if not self._ready_events:
return
with self._acquire_event_dispatch() as dispatch_acquired:
if not dispatch_acquired:
# Nested dispatch or dispatch blocked higher in call stack
return
# Limit dispatch to the number of currently ready events to avoid
# getting stuck in this loop
for _ in compat.xrange(len(self._ready_events)):
try:
evt = self._ready_events.popleft()
except IndexError:
# Some events (e.g., timers) must have been cancelled
break
evt.dispatch()
def add_on_connection_blocked_callback(self, # pylint: disable=C0103
callback_method):
"""Add a callback to be notified when RabbitMQ has sent a
`Connection.Blocked` frame indicating that RabbitMQ is low on
resources. Publishers can use this to voluntarily suspend publishing,
instead of relying on back pressure throttling. The callback
will be passed the `Connection.Blocked` method frame.
:param method callback_method: Callback to call on `Connection.Blocked`,
having the signature callback_method(pika.frame.Method), where the
method frame's `method` member is of type
`pika.spec.Connection.Blocked`
"""
self._impl.add_on_connection_blocked_callback(
functools.partial(self._on_connection_blocked, callback_method))
def add_on_connection_unblocked_callback(self, # pylint: disable=C0103
callback_method):
"""Add a callback to be notified when RabbitMQ has sent a
`Connection.Unblocked` frame letting publishers know it's ok
to start publishing again. The callback will be passed the
`Connection.Unblocked` method frame.
:param method callback_method: Callback to call on
`Connection.Unblocked`, having the signature
callback_method(pika.frame.Method), where the method frame's
`method` member is of type `pika.spec.Connection.Unblocked`
"""
self._impl.add_on_connection_unblocked_callback(
functools.partial(self._on_connection_unblocked, callback_method))
def add_timeout(self, deadline, callback_method):
"""Create a single-shot timer to fire after deadline seconds. Do not
confuse with Tornado's timeout where you pass in the time you want to
have your callback called. Only pass in the seconds until it's to be
called.
NOTE: the timer callbacks are dispatched only in the scope of
specially-designated methods: see
`BlockingConnection.process_data_events` and
`BlockingChannel.start_consuming`.
:param float deadline: The number of seconds to wait to call callback
:param callable callback_method: The callback method with the signature
callback_method()
:returns: opaque timer id
"""
if not callable(callback_method):
raise ValueError(
'callback_method parameter must be callable, but got %r'
% (callback_method,))
evt = _TimerEvt(callback=callback_method)
timer_id = self._impl.add_timeout(
deadline,
functools.partial(self._on_timer_ready, evt))
evt.timer_id = timer_id
return timer_id
def remove_timeout(self, timeout_id):
"""Remove a timer if it's still in the timeout stack
:param timeout_id: The opaque timer id to remove
"""
# Remove from the impl's timeout stack
self._impl.remove_timeout(timeout_id)
# Remove from ready events, if the timer fired already
for i, evt in enumerate(self._ready_events):
if isinstance(evt, _TimerEvt) and evt.timer_id == timeout_id:
index_to_remove = i
break
else:
# Not found
return
del self._ready_events[index_to_remove]
def close(self, reply_code=200, reply_text='Normal shutdown'):
"""Disconnect from RabbitMQ. If there are any open channels, it will
attempt to close them prior to fully disconnecting. Channels which
have active consumers will attempt to send a Basic.Cancel to RabbitMQ
to cleanly stop the delivery of messages prior to closing the channel.
:param int reply_code: The code number for the close
:param str reply_text: The text reason for the close
"""
if self.is_closed:
LOGGER.debug('Close called on closed connection (%s): %s',
reply_code, reply_text)
return
LOGGER.info('Closing connection (%s): %s', reply_code, reply_text)
self._user_initiated_close = True
# Close channels that remain opened
for impl_channel in pika.compat.dictvalues(self._impl._channels):
channel = impl_channel._get_cookie()
if channel.is_open:
channel.close(reply_code, reply_text)
# Close the connection
self._impl.close(reply_code, reply_text)
self._flush_output(self._closed_result.is_ready)
def process_data_events(self, time_limit=0):
"""Will make sure that data events are processed. Dispatches timer and
channel callbacks if not called from the scope of BlockingConnection or
BlockingChannel callback. Your app can block on this method.
:param float time_limit: suggested upper bound on processing time in
seconds. The actual blocking time depends on the granularity of the
underlying ioloop. Zero means return as soon as possible. None means
there is no limit on processing time and the function will block
until I/O produces actionalable events. Defaults to 0 for backward
compatibility. This parameter is NEW in pika 0.10.0.
"""
common_terminator = lambda: bool(
self._channels_pending_dispatch or self._ready_events)
if time_limit is None:
self._flush_output(common_terminator)
else:
with _IoloopTimerContext(time_limit, self._impl) as timer:
self._flush_output(timer.is_ready, common_terminator)
if self._ready_events:
self._dispatch_connection_events()
if self._channels_pending_dispatch:
self._dispatch_channel_events()
def sleep(self, duration):
"""A safer way to sleep than calling time.sleep() directly that would
keep the adapter from ignoring frames sent from the broker. The
connection will "sleep" or block the number of seconds specified in
duration in small intervals.
:param float duration: The time to sleep in seconds
"""
assert duration >= 0, duration
deadline = time.time() + duration
time_limit = duration
# Process events at least once
while True:
self.process_data_events(time_limit)
time_limit = deadline - time.time()
if time_limit <= 0:
break
def channel(self, channel_number=None):
"""Create a new channel with the next available channel number or pass
in a channel number to use. Must be non-zero if you would like to
specify but it is recommended that you let Pika manage the channel
numbers.
:rtype: pika.adapters.blocking_connection.BlockingChannel
"""
with _CallbackResult(self._OnChannelOpenedArgs) as opened_args:
impl_channel = self._impl.channel(
on_open_callback=opened_args.set_value_once,
channel_number=channel_number)
# Create our proxy channel
channel = BlockingChannel(impl_channel, self)
# Link implementation channel with our proxy channel
impl_channel._set_cookie(channel)
# Drive I/O until Channel.Open-ok
channel._flush_output(opened_args.is_ready)
return channel
def __enter__(self):
# Prepare `with` context
return self
def __exit__(self, tp, value, traceback):
# Close connection after `with` context
self.close()
#
# Connections state properties
#
@property
def is_closed(self):
"""
Returns a boolean reporting the current connection state.
"""
return self._impl.is_closed
@property
def is_closing(self):
"""
Returns True if connection is in the process of closing due to
client-initiated `close` request, but closing is not yet complete.
"""
return self._impl.is_closing
@property
def is_open(self):
"""
Returns a boolean reporting the current connection state.
"""
return self._impl.is_open
#
# Properties that reflect server capabilities for the current connection
#
@property
def basic_nack_supported(self):
"""Specifies if the server supports basic.nack on the active connection.
:rtype: bool
"""
return self._impl.basic_nack
@property
def consumer_cancel_notify_supported(self): # pylint: disable=C0103
"""Specifies if the server supports consumer cancel notification on the
active connection.
:rtype: bool
"""
return self._impl.consumer_cancel_notify
@property
def exchange_exchange_bindings_supported(self): # pylint: disable=C0103
"""Specifies if the active connection supports exchange to exchange
bindings.
:rtype: bool
"""
return self._impl.exchange_exchange_bindings
@property
def publisher_confirms_supported(self):
"""Specifies if the active connection can use publisher confirmations.
:rtype: bool
"""
return self._impl.publisher_confirms
# Legacy property names for backward compatibility
basic_nack = basic_nack_supported
consumer_cancel_notify = consumer_cancel_notify_supported
exchange_exchange_bindings = exchange_exchange_bindings_supported
publisher_confirms = publisher_confirms_supported
class _ChannelPendingEvt(object): # pylint: disable=R0903
"""Base class for BlockingChannel pending events"""
pass
class _ConsumerDeliveryEvt(_ChannelPendingEvt): # pylint: disable=R0903
"""This event represents consumer message delivery `Basic.Deliver`; it
contains method, properties, and body of the delivered message.
"""
__slots__ = ('method', 'properties', 'body')
def __init__(self, method, properties, body):
"""
:param spec.Basic.Deliver method: NOTE: consumer_tag and delivery_tag
are valid only within source channel
:param spec.BasicProperties properties: message properties
:param body: message body; empty string if no body
:type body: str or unicode
"""
self.method = method
self.properties = properties
self.body = body
class _ConsumerCancellationEvt(_ChannelPendingEvt): # pylint: disable=R0903
"""This event represents server-initiated consumer cancellation delivered to
client via Basic.Cancel. After receiving Basic.Cancel, there will be no
further deliveries for the consumer identified by `consumer_tag` in
`Basic.Cancel`
"""
__slots__ = ('method_frame')
def __init__(self, method_frame):
"""
:param pika.frame.Method method_frame: method frame with method of type
`spec.Basic.Cancel`
"""
self.method_frame = method_frame
def __repr__(self):
return '%s(method_frame=%r)' % (self.__class__.__name__,
self.method_frame)
@property
def method(self):
"""method of type spec.Basic.Cancel"""
return self.method_frame.method
class _ReturnedMessageEvt(_ChannelPendingEvt): # pylint: disable=R0903
"""This event represents a message returned by broker via `Basic.Return`"""
__slots__ = ('callback', 'channel', 'method', 'properties', 'body')
def __init__(self, callback, channel, method, properties, body): # pylint: disable=R0913
"""
:param callable callback: user's callback, having the signature
callback(channel, method, properties, body), where
channel: pika.Channel
method: pika.spec.Basic.Return
properties: pika.spec.BasicProperties
body: str, unicode, or bytes (python 3.x)
:param pika.Channel channel:
:param pika.spec.Basic.Return method:
:param pika.spec.BasicProperties properties:
:param body: str, unicode, or bytes (python 3.x)
"""
self.callback = callback
self.channel = channel
self.method = method
self.properties = properties
self.body = body
def __repr__(self):
return ('%s(callback=%r, channel=%r, method=%r, properties=%r, '
'body=%.300r') % (self.__class__.__name__, self.callback,
self.channel, self.method, self.properties,
self.body)
def dispatch(self):
"""Dispatch user's callback"""
self.callback(self.channel, self.method, self.properties, self.body)
class ReturnedMessage(object): # pylint: disable=R0903
"""Represents a message returned via Basic.Return in publish-acknowledgments
mode
"""
__slots__ = ('method', 'properties', 'body')
def __init__(self, method, properties, body):
"""
:param spec.Basic.Return method:
:param spec.BasicProperties properties: message properties
:param body: message body; empty string if no body
:type body: str or unicode
"""
self.method = method
self.properties = properties
self.body = body
class _ConsumerInfo(object):
"""Information about an active consumer"""
__slots__ = ('consumer_tag', 'no_ack', 'consumer_cb',
'alternate_event_sink', 'state')
# Consumer states
SETTING_UP = 1
ACTIVE = 2
TEARING_DOWN = 3
CANCELLED_BY_BROKER = 4
def __init__(self, consumer_tag, no_ack, consumer_cb=None,
alternate_event_sink=None):
"""
NOTE: exactly one of consumer_cb/alternate_event_sink musts be non-None.
:param str consumer_tag:
:param bool no_ack: the no-ack value for the consumer
:param callable consumer_cb: The function for dispatching messages to
user, having the signature:
consumer_callback(channel, method, properties, body)
channel: BlockingChannel
method: spec.Basic.Deliver
properties: spec.BasicProperties
body: str or unicode
:param callable alternate_event_sink: if specified, _ConsumerDeliveryEvt
and _ConsumerCancellationEvt objects will be diverted to this
callback instead of being deposited in the channel's
`_pending_events` container. Signature:
alternate_event_sink(evt)
"""
assert (consumer_cb is None) != (alternate_event_sink is None), (
'exactly one of consumer_cb/alternate_event_sink must be non-None',
consumer_cb, alternate_event_sink)
self.consumer_tag = consumer_tag
self.no_ack = no_ack
self.consumer_cb = consumer_cb
self.alternate_event_sink = alternate_event_sink
self.state = self.SETTING_UP
@property
def setting_up(self):
"""True if in SETTING_UP state"""
return self.state == self.SETTING_UP
@property
def active(self):
"""True if in ACTIVE state"""
return self.state == self.ACTIVE
@property
def tearing_down(self):
"""True if in TEARING_DOWN state"""
return self.state == self.TEARING_DOWN
@property
def cancelled_by_broker(self):
"""True if in CANCELLED_BY_BROKER state"""
return self.state == self.CANCELLED_BY_BROKER
class _QueueConsumerGeneratorInfo(object): # pylint: disable=R0903
"""Container for information about the active queue consumer generator """
__slots__ = ('params', 'consumer_tag', 'pending_events')
def __init__(self, params, consumer_tag):
"""
:params tuple params: a three-tuple (queue, no_ack, exclusive) that were
used to create the queue consumer
:param str consumer_tag: consumer tag
"""
self.params = params
self.consumer_tag = consumer_tag
#self.messages = deque()
# Holds pending events of types _ConsumerDeliveryEvt and
# _ConsumerCancellationEvt
self.pending_events = deque()
def __repr__(self):
return '%s(params=%r, consumer_tag=%r)' % (
self.__class__.__name__, self.params, self.consumer_tag)
class BlockingChannel(object): # pylint: disable=R0904,R0902
"""The BlockingChannel implements blocking semantics for most things that
one would use callback-passing-style for with the
:py:class:`~pika.channel.Channel` class. In addition,
the `BlockingChannel` class implements a :term:`generator` that allows
you to :doc:`consume messages </examples/blocking_consumer_generator>`
without using callbacks.
Example of creating a BlockingChannel::
import pika
# Create our connection object
connection = pika.BlockingConnection()
# The returned object will be a synchronous channel
channel = connection.channel()
"""
# Used as value_class with _CallbackResult for receiving Basic.GetOk args
_RxMessageArgs = namedtuple(
'BlockingChannel__RxMessageArgs',
[
'channel', # implementation pika.Channel instance
'method', # Basic.GetOk
'properties', # pika.spec.BasicProperties
'body' # str, unicode, or bytes (python 3.x)
])
# For use as value_class with any _CallbackResult that expects method_frame
# as the only arg
_MethodFrameCallbackResultArgs = namedtuple(
'BlockingChannel__MethodFrameCallbackResultArgs',
'method_frame')
# Broker's basic-ack/basic-nack args when delivery confirmation is enabled;
# may concern a single or multiple messages
_OnMessageConfirmationReportArgs = namedtuple( # pylint: disable=C0103
'BlockingChannel__OnMessageConfirmationReportArgs',
'method_frame')
# Parameters for broker-inititated Channel.Close request: reply_code
# holds the broker's non-zero error code and reply_text holds the
# corresponding error message text.
_OnChannelClosedByBrokerArgs = namedtuple(
'BlockingChannel__OnChannelClosedByBrokerArgs',
'method_frame')
# For use as value_class with _CallbackResult expecting Channel.Flow
# confirmation.
_FlowOkCallbackResultArgs = namedtuple(
'BlockingChannel__FlowOkCallbackResultArgs',
'active' # True if broker will start or continue sending; False if not
)
_CONSUMER_CANCELLED_CB_KEY = 'blocking_channel_consumer_cancelled'
def __init__(self, channel_impl, connection):
"""Create a new instance of the Channel
:param channel_impl: Channel implementation object as returned from
SelectConnection.channel()
:param BlockingConnection connection: The connection object
"""
self._impl = channel_impl
self._connection = connection
# A mapping of consumer tags to _ConsumerInfo for active consumers
self._consumer_infos = dict()
# Queue consumer generator generator info of type
# _QueueConsumerGeneratorInfo created by BlockingChannel.consume
self._queue_consumer_generator = None
# Whether RabbitMQ delivery confirmation has been enabled
self._delivery_confirmation = False
# Receives message delivery confirmation report (Basic.ack or
# Basic.nack) from broker when delivery confirmations are enabled
self._message_confirmation_result = _CallbackResult(
self._OnMessageConfirmationReportArgs)
# deque of pending events: _ConsumerDeliveryEvt and
# _ConsumerCancellationEvt objects that will be returned by
# `BlockingChannel.get_event()`
self._pending_events = deque()
# Holds a ReturnedMessage object representing a message received via
# Basic.Return in publisher-acknowledgments mode.
self._puback_return = None
# Receives Basic.ConsumeOk reply from server
self._basic_consume_ok_result = _CallbackResult()
# Receives the broker-inititated Channel.Close parameters
self._channel_closed_by_broker_result = _CallbackResult( # pylint: disable=C0103
self._OnChannelClosedByBrokerArgs)
# Receives args from Basic.GetEmpty response
# http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.get
self._basic_getempty_result = _CallbackResult(
self._MethodFrameCallbackResultArgs)
self._impl.add_on_cancel_callback(self._on_consumer_cancelled_by_broker)
self._impl.add_callback(
self._basic_consume_ok_result.signal_once,
replies=[pika.spec.Basic.ConsumeOk],
one_shot=False)
self._impl.add_callback(
self._channel_closed_by_broker_result.set_value_once,
replies=[pika.spec.Channel.Close],
one_shot=True)
self._impl.add_callback(
self._basic_getempty_result.set_value_once,
replies=[pika.spec.Basic.GetEmpty],
one_shot=False)
LOGGER.info("Created channel=%s", self.channel_number)
def _cleanup(self):
"""Clean up members that might inhibit garbage collection"""
self._message_confirmation_result.reset()
self._pending_events = deque()
self._consumer_infos = dict()
def __int__(self):
"""Return the channel object as its channel number
:rtype: int
"""
return self.channel_number
@property
def channel_number(self):
"""Channel number"""
return self._impl.channel_number
@property
def connection(self):
"""The channel's BlockingConnection instance"""
return self._connection
@property
def is_closed(self):
"""Returns True if the channel is closed.
:rtype: bool
"""
return self._impl.is_closed
@property
def is_closing(self):
"""Returns True if client-initiated closing of the channel is in
progress.
:rtype: bool
"""
return self._impl.is_closing
@property
def is_open(self):
"""Returns True if the channel is open.
:rtype: bool
"""
return self._impl.is_open
_ALWAYS_READY_WAITERS = ((lambda: True), )
def _flush_output(self, *waiters):
""" Flush output and process input while waiting for any of the given
callbacks to return true. The wait is aborted upon channel-close or
connection-close.
Otherwise, processing continues until the output is flushed AND at least
one of the callbacks returns true. If there are no callbacks, then
processing ends when all output is flushed.
:param waiters: sequence of zero or more callables taking no args and
returning true when it's time to stop processing.
Their results are OR'ed together.
"""
if self.is_closed:
raise exceptions.ChannelClosed()
if not waiters:
waiters = self._ALWAYS_READY_WAITERS
self._connection._flush_output(
self._channel_closed_by_broker_result.is_ready,
*waiters)
if self._channel_closed_by_broker_result:
# Channel was force-closed by broker
self._cleanup()
method = (
self._channel_closed_by_broker_result.value.method_frame.method)
raise exceptions.ChannelClosed(method.reply_code, method.reply_text)
def _on_puback_message_returned(self, channel, method, properties, body):
"""Called as the result of Basic.Return from broker in
publisher-acknowledgements mode. Saves the info as a ReturnedMessage
instance in self._puback_return.
:param pika.Channel channel: our self._impl channel
:param pika.spec.Basic.Return method:
:param pika.spec.BasicProperties properties: message properties
:param body: returned message body; empty string if no body
:type body: str, unicode
"""
assert channel is self._impl, (
channel.channel_number, self.channel_number)
assert isinstance(method, pika.spec.Basic.Return), method
assert isinstance(properties, pika.spec.BasicProperties), (
properties)
LOGGER.warn(
"Published message was returned: _delivery_confirmation=%s; "
"channel=%s; method=%r; properties=%r; body_size=%d; "
"body_prefix=%.255r", self._delivery_confirmation,
channel.channel_number, method, properties,
len(body) if body is not None else None, body)
self._puback_return = ReturnedMessage(method, properties, body)
def _add_pending_event(self, evt):
"""Append an event to the channel's list of events that are ready for
dispatch to user and signal our connection that this channel is ready
for event dispatch
:param _ChannelPendingEvt evt: an event derived from _ChannelPendingEvt
"""
self._pending_events.append(evt)
self.connection._request_channel_dispatch(self.channel_number)
def _on_consumer_cancelled_by_broker(self, # pylint: disable=C0103
method_frame):
"""Called by impl when broker cancels consumer via Basic.Cancel.
This is a RabbitMQ-specific feature. The circumstances include deletion
of queue being consumed as well as failure of a HA node responsible for
the queue being consumed.
:param pika.frame.Method method_frame: method frame with the
`spec.Basic.Cancel` method
"""
evt = _ConsumerCancellationEvt(method_frame)
consumer = self._consumer_infos[method_frame.method.consumer_tag]
# Don't interfere with client-initiated cancellation flow
if not consumer.tearing_down:
consumer.state = _ConsumerInfo.CANCELLED_BY_BROKER
if consumer.alternate_event_sink is not None:
consumer.alternate_event_sink(evt)
else:
self._add_pending_event(evt)
def _on_consumer_message_delivery(self, channel, # pylint: disable=W0613
method, properties, body):
"""Called by impl when a message is delivered for a consumer
:param Channel channel: The implementation channel object
:param spec.Basic.Deliver method:
:param pika.spec.BasicProperties properties: message properties
:param body: delivered message body; empty string if no body
:type body: str, unicode, or bytes (python 3.x)
"""
evt = _ConsumerDeliveryEvt(method, properties, body)
consumer = self._consumer_infos[method.consumer_tag]
if consumer.alternate_event_sink is not None:
consumer.alternate_event_sink(evt)
else:
self._add_pending_event(evt)
def _on_consumer_generator_event(self, evt):
"""Sink for the queue consumer generator's consumer events; append the
event to queue consumer generator's pending events buffer.
:param evt: an object of type _ConsumerDeliveryEvt or
_ConsumerCancellationEvt
"""
self._queue_consumer_generator.pending_events.append(evt)
# Schedule termination of connection.process_data_events using a
# negative channel number
self.connection._request_channel_dispatch(-self.channel_number)
def _cancel_all_consumers(self):
"""Cancel all consumers.
NOTE: pending non-ackable messages will be lost; pending ackable
messages will be rejected.
"""
if self._consumer_infos:
LOGGER.debug('Cancelling %i consumers', len(self._consumer_infos))
if self._queue_consumer_generator is not None:
# Cancel queue consumer generator
self.cancel()
# Cancel consumers created via basic_consume
for consumer_tag in pika.compat.dictkeys(self._consumer_infos):
self.basic_cancel(consumer_tag)
def _dispatch_events(self):
"""Called by BlockingConnection to dispatch pending events.
`BlockingChannel` schedules this callback via
`BlockingConnection._request_channel_dispatch`
"""
while self._pending_events:
evt = self._pending_events.popleft()
if type(evt) is _ConsumerDeliveryEvt:
consumer_info = self._consumer_infos[evt.method.consumer_tag]
consumer_info.consumer_cb(self, evt.method, evt.properties,
evt.body)
elif type(evt) is _ConsumerCancellationEvt:
del self._consumer_infos[evt.method_frame.method.consumer_tag]
self._impl.callbacks.process(self.channel_number,
self._CONSUMER_CANCELLED_CB_KEY,
self,
evt.method_frame)
else:
evt.dispatch()
def close(self, reply_code=0, reply_text="Normal Shutdown"):
"""Will invoke a clean shutdown of the channel with the AMQP Broker.
:param int reply_code: The reply code to close the channel with
:param str reply_text: The reply text to close the channel with
"""
LOGGER.info('Channel.close(%s, %s)', reply_code, reply_text)
# Cancel remaining consumers
self._cancel_all_consumers()
# Close the channel
try:
with _CallbackResult() as close_ok_result:
self._impl.add_callback(callback=close_ok_result.signal_once,
replies=[pika.spec.Channel.CloseOk],
one_shot=True)
self._impl.close(reply_code=reply_code, reply_text=reply_text)
self._flush_output(close_ok_result.is_ready)
finally:
self._cleanup()
def flow(self, active):
"""Turn Channel flow control off and on.
NOTE: RabbitMQ doesn't support active=False; per
https://www.rabbitmq.com/specification.html: "active=false is not
supported by the server. Limiting prefetch with basic.qos provides much
better control"
For more information, please reference:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#channel.flow
:param bool active: Turn flow on (True) or off (False)
:returns: True if broker will start or continue sending; False if not
:rtype: bool
"""
with _CallbackResult(self._FlowOkCallbackResultArgs) as flow_ok_result:
self._impl.flow(callback=flow_ok_result.set_value_once,
active=active)
self._flush_output(flow_ok_result.is_ready)
return flow_ok_result.value.active
def add_on_cancel_callback(self, callback):
"""Pass a callback function that will be called when Basic.Cancel
is sent by the broker. The callback function should receive a method
frame parameter.
:param callable callback: a callable for handling broker's Basic.Cancel
notification with the call signature: callback(method_frame)
where method_frame is of type `pika.frame.Method` with method of
type `spec.Basic.Cancel`
"""
self._impl.callbacks.add(self.channel_number,
self._CONSUMER_CANCELLED_CB_KEY,
callback,
one_shot=False)
def add_on_return_callback(self, callback):
"""Pass a callback function that will be called when a published
message is rejected and returned by the server via `Basic.Return`.
:param callable callback: The method to call on callback with the
signature callback(channel, method, properties, body), where
channel: pika.Channel
method: pika.spec.Basic.Return
properties: pika.spec.BasicProperties
body: str, unicode, or bytes (python 3.x)
"""
self._impl.add_on_return_callback(
lambda _channel, method, properties, body: (
self._add_pending_event(
_ReturnedMessageEvt(
callback, self, method, properties, body))))
def basic_consume(self, # pylint: disable=R0913
consumer_callback,
queue,
no_ack=False,
exclusive=False,
consumer_tag=None,
arguments=None):
"""Sends the AMQP command Basic.Consume to the broker and binds messages
for the consumer_tag to the consumer callback. If you do not pass in
a consumer_tag, one will be automatically generated for you. Returns
the consumer tag.
NOTE: the consumer callbacks are dispatched only in the scope of
specially-designated methods: see
`BlockingConnection.process_data_events` and
`BlockingChannel.start_consuming`.
For more information about Basic.Consume, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.consume
:param callable consumer_callback: The function for dispatching messages
to user, having the signature:
consumer_callback(channel, method, properties, body)
channel: BlockingChannel
method: spec.Basic.Deliver
properties: spec.BasicProperties
body: str or unicode
:param queue: The queue to consume from
:type queue: str or unicode
:param bool no_ack: Tell the broker to not expect a response (i.e.,
no ack/nack)
:param bool exclusive: Don't allow other consumers on the queue
:param consumer_tag: You may specify your own consumer tag; if left
empty, a consumer tag will be generated automatically
:type consumer_tag: str or unicode
:param dict arguments: Custom key/value pair arguments for the consumer
:returns: consumer tag
:rtype: str
:raises pika.exceptions.DuplicateConsumerTag: if consumer with given
consumer_tag is already present.
"""
if not callable(consumer_callback):
raise ValueError('consumer callback must be callable; got %r'
% consumer_callback)
return self._basic_consume_impl(
queue=queue,
no_ack=no_ack,
exclusive=exclusive,
consumer_tag=consumer_tag,
arguments=arguments,
consumer_callback=consumer_callback)
def _basic_consume_impl(self, # pylint: disable=R0913
queue,
no_ack,
exclusive,
consumer_tag,
arguments=None,
consumer_callback=None,
alternate_event_sink=None):
"""The low-level implementation used by `basic_consume` and `consume`.
See `basic_consume` docstring for more info.
NOTE: exactly one of consumer_callback/alternate_event_sink musts be
non-None.
This method has one additional parameter alternate_event_sink over the
args described in `basic_consume`.
:param callable alternate_event_sink: if specified, _ConsumerDeliveryEvt
and _ConsumerCancellationEvt objects will be diverted to this
callback instead of being deposited in the channel's
`_pending_events` container. Signature:
alternate_event_sink(evt)
:raises pika.exceptions.DuplicateConsumerTag: if consumer with given
consumer_tag is already present.
"""
if (consumer_callback is None) == (alternate_event_sink is None):
raise ValueError(
('exactly one of consumer_callback/alternate_event_sink must '
'be non-None', consumer_callback, alternate_event_sink))
if not consumer_tag:
# Need a consumer tag to register consumer info before sending
# request to broker, because I/O might dispatch incoming messages
# immediately following Basic.Consume-ok before _flush_output
# returns
consumer_tag = self._impl._generate_consumer_tag()
if consumer_tag in self._consumer_infos:
raise exceptions.DuplicateConsumerTag(consumer_tag)
# Create new consumer
self._consumer_infos[consumer_tag] = _ConsumerInfo(
consumer_tag,
no_ack=no_ack,
consumer_cb=consumer_callback,
alternate_event_sink=alternate_event_sink)
try:
with self._basic_consume_ok_result as ok_result:
tag = self._impl.basic_consume(
consumer_callback=self._on_consumer_message_delivery,
queue=queue,
no_ack=no_ack,
exclusive=exclusive,
consumer_tag=consumer_tag,
arguments=arguments)
assert tag == consumer_tag, (tag, consumer_tag)
self._flush_output(ok_result.is_ready)
except Exception:
# If channel was closed, self._consumer_infos will be empty
if consumer_tag in self._consumer_infos:
del self._consumer_infos[consumer_tag]
raise
# NOTE: Consumer could get cancelled by broker immediately after opening
# (e.g., queue getting deleted externally)
if self._consumer_infos[consumer_tag].setting_up:
self._consumer_infos[consumer_tag].state = _ConsumerInfo.ACTIVE
return consumer_tag
def basic_cancel(self, consumer_tag):
"""This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send any more
messages for that consumer. The client may receive an arbitrary number
of messages in between sending the cancel method and receiving the
cancel-ok reply.
NOTE: When cancelling a no_ack=False consumer, this implementation
automatically Nacks and suppresses any incoming messages that have not
yet been dispatched to the consumer's callback. However, when cancelling
a no_ack=True consumer, this method will return any pending messages
that arrived before broker confirmed the cancellation.
:param str consumer_tag: Identifier for the consumer; the result of
passing a consumer_tag that was created on another channel is
undefined (bad things will happen)
:returns: (NEW IN pika 0.10.0) empty sequence for a no_ack=False
consumer; for a no_ack=True consumer, returns a (possibly empty)
sequence of pending messages that arrived before broker confirmed
the cancellation (this is done instead of via consumer's callback in
order to prevent reentrancy/recursion. Each message is four-tuple:
(channel, method, properties, body)
channel: BlockingChannel
method: spec.Basic.Deliver
properties: spec.BasicProperties
body: str or unicode
"""
try:
consumer_info = self._consumer_infos[consumer_tag]
except KeyError:
LOGGER.warn("User is attempting to cancel an unknown consumer=%s; "
"already cancelled by user or broker?", consumer_tag)
return []
try:
# Assertion failure here is most likely due to reentrance
assert consumer_info.active or consumer_info.cancelled_by_broker, (
consumer_info.state)
# Assertion failure here signals disconnect between consumer state
# in BlockingConnection and Connection
assert (consumer_info.cancelled_by_broker or
consumer_tag in self._impl._consumers), consumer_tag
no_ack = consumer_info.no_ack
consumer_info.state = _ConsumerInfo.TEARING_DOWN
with _CallbackResult() as cancel_ok_result:
# Nack pending messages for no_ack=False consumer
if not no_ack:
pending_messages = self._remove_pending_deliveries(
consumer_tag)
if pending_messages:
# NOTE: we use impl's basic_reject to avoid the
# possibility of redelivery before basic_cancel takes
# control of nacking.
# NOTE: we can't use basic_nack with the multiple option
# to avoid nacking messages already held by our client.
for message in pending_messages:
self._impl.basic_reject(message.method.delivery_tag,
requeue=True)
# Cancel the consumer; impl takes care of rejecting any
# additional deliveries that arrive for a no_ack=False
# consumer
self._impl.basic_cancel(
callback=cancel_ok_result.signal_once,
consumer_tag=consumer_tag,
nowait=False)
# Flush output and wait for Basic.Cancel-ok or
# broker-initiated Basic.Cancel
self._flush_output(
cancel_ok_result.is_ready,
lambda: consumer_tag not in self._impl._consumers)
if no_ack:
# Return pending messages for no_ack=True consumer
return [
(evt.method, evt.properties, evt.body)
for evt in self._remove_pending_deliveries(consumer_tag)]
else:
# impl takes care of rejecting any incoming deliveries during
# cancellation
messages = self._remove_pending_deliveries(consumer_tag)
assert not messages, messages
return []
finally:
# NOTE: The entry could be purged if channel or connection closes
if consumer_tag in self._consumer_infos:
del self._consumer_infos[consumer_tag]
def _remove_pending_deliveries(self, consumer_tag):
"""Extract _ConsumerDeliveryEvt objects destined for the given consumer
from pending events, discarding the _ConsumerCancellationEvt, if any
:param str consumer_tag:
:returns: a (possibly empty) sequence of _ConsumerDeliveryEvt destined
for the given consumer tag
"""
remaining_events = deque()
unprocessed_messages = []
while self._pending_events:
evt = self._pending_events.popleft()
if type(evt) is _ConsumerDeliveryEvt:
if evt.method.consumer_tag == consumer_tag:
unprocessed_messages.append(evt)
continue
if type(evt) is _ConsumerCancellationEvt:
if evt.method_frame.method.consumer_tag == consumer_tag:
# A broker-initiated Basic.Cancel must have arrived
# before our cancel request completed
continue
remaining_events.append(evt)
self._pending_events = remaining_events
return unprocessed_messages
def start_consuming(self):
"""Processes I/O events and dispatches timers and `basic_consume`
callbacks until all consumers are cancelled.
NOTE: this blocking function may not be called from the scope of a
pika callback, because dispatching `basic_consume` callbacks from this
context would constitute recursion.
:raises pika.exceptions.RecursionError: if called from the scope of a
`BlockingConnection` or `BlockingChannel` callback
"""
# Check if called from the scope of an event dispatch callback
with self.connection._acquire_event_dispatch() as dispatch_allowed:
if not dispatch_allowed:
raise exceptions.RecursionError(
'start_consuming may not be called from the scope of '
'another BlockingConnection or BlockingChannel callback')
# Process events as long as consumers exist on this channel
while self._consumer_infos:
self.connection.process_data_events(time_limit=None)
def stop_consuming(self, consumer_tag=None):
""" Cancels all consumers, signalling the `start_consuming` loop to
exit.
NOTE: pending non-ackable messages will be lost; pending ackable
messages will be rejected.
"""
if consumer_tag:
self.basic_cancel(consumer_tag)
else:
self._cancel_all_consumers()
def consume(self, queue, no_ack=False, # pylint: disable=R0913
exclusive=False, arguments=None,
inactivity_timeout=None):
"""Blocking consumption of a queue instead of via a callback. This
method is a generator that yields each message as a tuple of method,
properties, and body. The active generator iterator terminates when the
consumer is cancelled by client or broker.
Example:
for method, properties, body in channel.consume('queue'):
print body
channel.basic_ack(method.delivery_tag)
You should call `BlockingChannel.cancel()` when you escape out of the
generator loop.
If you don't cancel this consumer, then next call on the same channel
to `consume()` with the exact same (queue, no_ack, exclusive) parameters
will resume the existing consumer generator; however, calling with
different parameters will result in an exception.
:param queue: The queue name to consume
:type queue: str or unicode
:param bool no_ack: Tell the broker to not expect a ack/nack response
:param bool exclusive: Don't allow other consumers on the queue
:param dict arguments: Custom key/value pair arguments for the consumer
:param float inactivity_timeout: if a number is given (in
seconds), will cause the method to yield None after the given period
of inactivity; this permits for pseudo-regular maintenance
activities to be carried out by the user while waiting for messages
to arrive. If None is given (default), then the method blocks until
the next event arrives. NOTE that timing granularity is limited by
the timer resolution of the underlying implementation.
NEW in pika 0.10.0.
:yields: tuple(spec.Basic.Deliver, spec.BasicProperties, str or unicode)
:raises ValueError: if consumer-creation parameters don't match those
of the existing queue consumer generator, if any.
NEW in pika 0.10.0
"""
params = (queue, no_ack, exclusive)
if self._queue_consumer_generator is not None:
if params != self._queue_consumer_generator.params:
raise ValueError(
'Consume with different params not allowed on existing '
'queue consumer generator; previous params: %r; '
'new params: %r'
% (self._queue_consumer_generator.params,
(queue, no_ack, exclusive)))
else:
LOGGER.debug('Creating new queue consumer generator; params: %r',
params)
# Need a consumer tag to register consumer info before sending
# request to broker, because I/O might pick up incoming messages
# in addition to Basic.Consume-ok
consumer_tag = self._impl._generate_consumer_tag()
self._queue_consumer_generator = _QueueConsumerGeneratorInfo(
params,
consumer_tag)
try:
self._basic_consume_impl(
queue=queue,
no_ack=no_ack,
exclusive=exclusive,
consumer_tag=consumer_tag,
arguments=arguments,
alternate_event_sink=self._on_consumer_generator_event)
except Exception:
self._queue_consumer_generator = None
raise
LOGGER.info('Created new queue consumer generator %r',
self._queue_consumer_generator)
while self._queue_consumer_generator is not None:
if self._queue_consumer_generator.pending_events:
evt = self._queue_consumer_generator.pending_events.popleft()
if type(evt) is _ConsumerCancellationEvt:
# Consumer was cancelled by broker
self._queue_consumer_generator = None
break
else:
yield (evt.method, evt.properties, evt.body)
continue
# Wait for a message to arrive
if inactivity_timeout is None:
self.connection.process_data_events(time_limit=None)
continue
# Wait with inactivity timeout
wait_start_time = time.time()
wait_deadline = wait_start_time + inactivity_timeout
delta = inactivity_timeout
while (self._queue_consumer_generator is not None and
not self._queue_consumer_generator.pending_events):
self.connection.process_data_events(time_limit=delta)
if not self._queue_consumer_generator:
# Consumer was cancelled by client
break
if self._queue_consumer_generator.pending_events:
# Got message(s)
break
delta = wait_deadline - time.time()
if delta <= 0.0:
# Signal inactivity timeout
yield None
break
def get_waiting_message_count(self):
"""Returns the number of messages that may be retrieved from the current
queue consumer generator via `BlockingChannel.consume` without blocking.
NEW in pika 0.10.0
:rtype: int
"""
if self._queue_consumer_generator is not None:
pending_events = self._queue_consumer_generator.pending_events
count = len(pending_events)
if count and type(pending_events[-1]) is _ConsumerCancellationEvt:
count -= 1
else:
count = 0
return count
def cancel(self):
"""Cancel the queue consumer created by `BlockingChannel.consume`,
rejecting all pending ackable messages.
NOTE: If you're looking to cancel a consumer issued with
BlockingChannel.basic_consume then you should call
BlockingChannel.basic_cancel.
:return int: The number of messages requeued by Basic.Nack.
NEW in 0.10.0: returns 0
"""
if self._queue_consumer_generator is None:
LOGGER.warning('cancel: queue consumer generator is inactive '
'(already cancelled by client or broker?)')
return 0
try:
_, no_ack, _ = self._queue_consumer_generator.params
if not no_ack:
# Reject messages held by queue consumer generator; NOTE: we
# can't use basic_nack with the multiple option to avoid nacking
# messages already held by our client.
pending_events = self._queue_consumer_generator.pending_events
for _ in compat.xrange(self.get_waiting_message_count()):
evt = pending_events.popleft()
self._impl.basic_reject(evt.method.delivery_tag,
requeue=True)
self.basic_cancel(self._queue_consumer_generator.consumer_tag)
finally:
self._queue_consumer_generator = None
# Return 0 for compatibility with legacy implementation; the number of
# nacked messages is not meaningful since only messages consumed with
# no_ack=False may be nacked, and those arriving after calling
# basic_cancel will be rejected automatically by impl channel, so we'll
# never know how many of those were nacked.
return 0
def basic_ack(self, delivery_tag=0, multiple=False):
"""Acknowledge one or more messages. When sent by the client, this
method acknowledges one or more messages delivered via the Deliver or
Get-Ok methods. When sent by server, this method acknowledges one or
more messages published with the Publish method on a channel in
confirm mode. The acknowledgement can be for a single message or a
set of messages up to and including a specific message.
:param int delivery-tag: The server-assigned delivery tag
:param bool multiple: If set to True, the delivery tag is treated as
"up to and including", so that multiple messages
can be acknowledged with a single method. If set
to False, the delivery tag refers to a single
message. If the multiple field is 1, and the
delivery tag is zero, this indicates
acknowledgement of all outstanding messages.
"""
self._impl.basic_ack(delivery_tag=delivery_tag, multiple=multiple)
self._flush_output()
def basic_nack(self, delivery_tag=None, multiple=False, requeue=True):
"""This method allows a client to reject one or more incoming messages.
It can be used to interrupt and cancel large incoming messages, or
return untreatable messages to their original queue.
:param int delivery-tag: The server-assigned delivery tag
:param bool multiple: If set to True, the delivery tag is treated as
"up to and including", so that multiple messages
can be acknowledged with a single method. If set
to False, the delivery tag refers to a single
message. If the multiple field is 1, and the
delivery tag is zero, this indicates
acknowledgement of all outstanding messages.
:param bool requeue: If requeue is true, the server will attempt to
requeue the message. If requeue is false or the
requeue attempt fails the messages are discarded or
dead-lettered.
"""
self._impl.basic_nack(delivery_tag=delivery_tag, multiple=multiple,
requeue=requeue)
self._flush_output()
def basic_get(self, queue=None, no_ack=False):
"""Get a single message from the AMQP broker. Returns a sequence with
the method frame, message properties, and body.
:param queue: Name of queue to get a message from
:type queue: str or unicode
:param bool no_ack: Tell the broker to not expect a reply
:returns: a three-tuple; (None, None, None) if the queue was empty;
otherwise (method, properties, body); NOTE: body may be None
:rtype: (None, None, None)|(spec.Basic.GetOk,
spec.BasicProperties,
str or unicode or None)
"""
assert not self._basic_getempty_result
# NOTE: nested with for python 2.6 compatibility
with _CallbackResult(self._RxMessageArgs) as get_ok_result:
with self._basic_getempty_result:
self._impl.basic_get(callback=get_ok_result.set_value_once,
queue=queue,
no_ack=no_ack)
self._flush_output(get_ok_result.is_ready,
self._basic_getempty_result.is_ready)
if get_ok_result:
evt = get_ok_result.value
return (evt.method, evt.properties, evt.body)
else:
assert self._basic_getempty_result, (
"wait completed without GetOk and GetEmpty")
return None, None, None
def basic_publish(self, exchange, routing_key, body, # pylint: disable=R0913
properties=None, mandatory=False, immediate=False):
"""Publish to the channel with the given exchange, routing key and body.
Returns a boolean value indicating the success of the operation.
This is the legacy BlockingChannel method for publishing. See also
`BlockingChannel.publish` that provides more information about failures.
For more information on basic_publish and what the parameters do, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish
NOTE: mandatory and immediate may be enabled even without delivery
confirmation, but in the absence of delivery confirmation the
synchronous implementation has no way to know how long to wait for
the Basic.Return or lack thereof.
:param exchange: The exchange to publish to
:type exchange: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param body: The message body; empty string if no body
:type body: str or unicode
:param pika.spec.BasicProperties properties: message properties
:param bool mandatory: The mandatory flag
:param bool immediate: The immediate flag
:returns: True if delivery confirmation is not enabled (NEW in pika
0.10.0); otherwise returns False if the message could not be
deliveved (Basic.nack and/or Basic.Return) and True if the message
was delivered (Basic.ack and no Basic.Return)
"""
try:
self.publish(exchange, routing_key, body, properties,
mandatory, immediate)
except (exceptions.NackError, exceptions.UnroutableError):
return False
else:
return True
def publish(self, exchange, routing_key, body, # pylint: disable=R0913
properties=None, mandatory=False, immediate=False):
"""Publish to the channel with the given exchange, routing key, and
body. Unlike the legacy `BlockingChannel.basic_publish`, this method
provides more information about failures via exceptions.
For more information on basic_publish and what the parameters do, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish
NOTE: mandatory and immediate may be enabled even without delivery
confirmation, but in the absence of delivery confirmation the
synchronous implementation has no way to know how long to wait for
the Basic.Return.
:param exchange: The exchange to publish to
:type exchange: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param body: The message body; empty string if no body
:type body: str or unicode
:param pika.spec.BasicProperties properties: message properties
:param bool mandatory: The mandatory flag
:param bool immediate: The immediate flag
:raises UnroutableError: raised when a message published in
publisher-acknowledgments mode (see
`BlockingChannel.confirm_delivery`) is returned via `Basic.Return`
followed by `Basic.Ack`.
:raises NackError: raised when a message published in
publisher-acknowledgements mode is Nack'ed by the broker. See
`BlockingChannel.confirm_delivery`.
"""
if self._delivery_confirmation:
# In publisher-acknowledgments mode
with self._message_confirmation_result:
self._impl.basic_publish(exchange=exchange,
routing_key=routing_key,
body=body,
properties=properties,
mandatory=mandatory,
immediate=immediate)
self._flush_output(self._message_confirmation_result.is_ready)
conf_method = (self._message_confirmation_result.value
.method_frame
.method)
if isinstance(conf_method, pika.spec.Basic.Nack):
# Broker was unable to process the message due to internal
# error
LOGGER.warn(
"Message was Nack'ed by broker: nack=%r; channel=%s; "
"exchange=%s; routing_key=%s; mandatory=%r; "
"immediate=%r", conf_method, self.channel_number,
exchange, routing_key, mandatory, immediate)
if self._puback_return is not None:
returned_messages = [self._puback_return]
self._puback_return = None
else:
returned_messages = []
raise exceptions.NackError(returned_messages)
else:
assert isinstance(conf_method, pika.spec.Basic.Ack), (
conf_method)
if self._puback_return is not None:
# Unroutable message was returned
messages = [self._puback_return]
self._puback_return = None
raise exceptions.UnroutableError(messages)
else:
# In non-publisher-acknowledgments mode
self._impl.basic_publish(exchange=exchange,
routing_key=routing_key,
body=body,
properties=properties,
mandatory=mandatory,
immediate=immediate)
self._flush_output()
def basic_qos(self, prefetch_size=0, prefetch_count=0, all_channels=False):
"""Specify quality of service. This method requests a specific quality
of service. The QoS can be specified for the current channel or for all
channels on the connection. The client can request that messages be sent
in advance so that when the client finishes processing a message, the
following message is already held locally, rather than needing to be
sent down the channel. Prefetching gives a performance improvement.
:param int prefetch_size: This field specifies the prefetch window
size. The server will send a message in
advance if it is equal to or smaller in size
than the available prefetch size (and also
falls into other prefetch limits). May be set
to zero, meaning "no specific limit",
although other prefetch limits may still
apply. The prefetch-size is ignored if the
no-ack option is set in the consumer.
:param int prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in
combination with the prefetch-size field; a
message will only be sent in advance if both
prefetch windows (and those at the channel
and connection level) allow it. The
prefetch-count is ignored if the no-ack
option is set in the consumer.
:param bool all_channels: Should the QoS apply to all channels
"""
with _CallbackResult() as qos_ok_result:
self._impl.basic_qos(callback=qos_ok_result.signal_once,
prefetch_size=prefetch_size,
prefetch_count=prefetch_count,
all_channels=all_channels)
self._flush_output(qos_ok_result.is_ready)
def basic_recover(self, requeue=False):
"""This method asks the server to redeliver all unacknowledged messages
on a specified channel. Zero or more messages may be redelivered. This
method replaces the asynchronous Recover.
:param bool requeue: If False, the message will be redelivered to the
original recipient. If True, the server will
attempt to requeue the message, potentially then
delivering it to an alternative subscriber.
"""
with _CallbackResult() as recover_ok_result:
self._impl.basic_recover(callback=recover_ok_result.signal_once,
requeue=requeue)
self._flush_output(recover_ok_result.is_ready)
def basic_reject(self, delivery_tag=None, requeue=True):
"""Reject an incoming message. This method allows a client to reject a
message. It can be used to interrupt and cancel large incoming messages,
or return untreatable messages to their original queue.
:param int delivery-tag: The server-assigned delivery tag
:param bool requeue: If requeue is true, the server will attempt to
requeue the message. If requeue is false or the
requeue attempt fails the messages are discarded or
dead-lettered.
"""
self._impl.basic_reject(delivery_tag=delivery_tag, requeue=requeue)
self._flush_output()
def confirm_delivery(self):
"""Turn on RabbitMQ-proprietary Confirm mode in the channel.
For more information see:
http://www.rabbitmq.com/extensions.html#confirms
"""
if self._delivery_confirmation:
LOGGER.error('confirm_delivery: confirmation was already enabled '
'on channel=%s', self.channel_number)
return
with _CallbackResult() as select_ok_result:
self._impl.add_callback(callback=select_ok_result.signal_once,
replies=[pika.spec.Confirm.SelectOk],
one_shot=True)
self._impl.confirm_delivery(
callback=self._message_confirmation_result.set_value_once,
nowait=False)
self._flush_output(select_ok_result.is_ready)
self._delivery_confirmation = True
# Unroutable messages returned after this point will be in the context
# of publisher acknowledgments
self._impl.add_on_return_callback(self._on_puback_message_returned)
def exchange_declare(self, exchange=None, # pylint: disable=R0913
exchange_type='direct', passive=False, durable=False,
auto_delete=False, internal=False,
arguments=None, **kwargs):
"""This method creates an exchange if it does not already exist, and if
the exchange exists, verifies that it is of the correct and expected
class.
If passive set, the server will reply with Declare-Ok if the exchange
already exists with the same name, and raise an error if not and if the
exchange does not already exist, the server MUST raise a channel
exception with reply code 404 (not found).
:param exchange: The exchange name consists of a non-empty sequence of
these characters: letters, digits, hyphen, underscore,
period, or colon.
:type exchange: str or unicode
:param str exchange_type: The exchange type to use
:param bool passive: Perform a declare or just check to see if it exists
:param bool durable: Survive a reboot of RabbitMQ
:param bool auto_delete: Remove when no more queues are bound to it
:param bool internal: Can only be published to by other exchanges
:param dict arguments: Custom key/value pair arguments for the exchange
:param str type: via kwargs: the deprecated exchange type parameter
:returns: Method frame from the Exchange.Declare-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Exchange.DeclareOk`
"""
assert len(kwargs) <= 1, kwargs
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as declare_ok_result:
self._impl.exchange_declare(
callback=declare_ok_result.set_value_once,
exchange=exchange,
exchange_type=exchange_type,
passive=passive,
durable=durable,
auto_delete=auto_delete,
internal=internal,
nowait=False,
arguments=arguments,
type=kwargs["type"] if kwargs else None)
self._flush_output(declare_ok_result.is_ready)
return declare_ok_result.value.method_frame
def exchange_delete(self, exchange=None, if_unused=False):
"""Delete the exchange.
:param exchange: The exchange name
:type exchange: str or unicode
:param bool if_unused: only delete if the exchange is unused
:returns: Method frame from the Exchange.Delete-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Exchange.DeleteOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as delete_ok_result:
self._impl.exchange_delete(
callback=delete_ok_result.set_value_once,
exchange=exchange,
if_unused=if_unused,
nowait=False)
self._flush_output(delete_ok_result.is_ready)
return delete_ok_result.value.method_frame
def exchange_bind(self, destination=None, source=None, routing_key='',
arguments=None):
"""Bind an exchange to another exchange.
:param destination: The destination exchange to bind
:type destination: str or unicode
:param source: The source exchange to bind to
:type source: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param dict arguments: Custom key/value pair arguments for the binding
:returns: Method frame from the Exchange.Bind-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Exchange.BindOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as bind_ok_result:
self._impl.exchange_bind(
callback=bind_ok_result.set_value_once,
destination=destination,
source=source,
routing_key=routing_key,
nowait=False,
arguments=arguments)
self._flush_output(bind_ok_result.is_ready)
return bind_ok_result.value.method_frame
def exchange_unbind(self, destination=None, source=None, routing_key='',
arguments=None):
"""Unbind an exchange from another exchange.
:param destination: The destination exchange to unbind
:type destination: str or unicode
:param source: The source exchange to unbind from
:type source: str or unicode
:param routing_key: The routing key to unbind
:type routing_key: str or unicode
:param dict arguments: Custom key/value pair arguments for the binding
:returns: Method frame from the Exchange.Unbind-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Exchange.UnbindOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as unbind_ok_result:
self._impl.exchange_unbind(
callback=unbind_ok_result.set_value_once,
destination=destination,
source=source,
routing_key=routing_key,
nowait=False,
arguments=arguments)
self._flush_output(unbind_ok_result.is_ready)
return unbind_ok_result.value.method_frame
def queue_declare(self, queue='', passive=False, durable=False, # pylint: disable=R0913
exclusive=False, auto_delete=False,
arguments=None):
"""Declare queue, create if needed. This method creates or checks a
queue. When creating a new queue the client can specify various
properties that control the durability of the queue and its contents,
and the level of sharing for the queue.
Leave the queue name empty for a auto-named queue in RabbitMQ
:param queue: The queue name
:type queue: str or unicode; if empty string, the broker will create a
unique queue name;
:param bool passive: Only check to see if the queue exists
:param bool durable: Survive reboots of the broker
:param bool exclusive: Only allow access by the current connection
:param bool auto_delete: Delete after consumer cancels or disconnects
:param dict arguments: Custom key/value arguments for the queue
:returns: Method frame from the Queue.Declare-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Queue.DeclareOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as declare_ok_result:
self._impl.queue_declare(
callback=declare_ok_result.set_value_once,
queue=queue,
passive=passive,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
nowait=False,
arguments=arguments)
self._flush_output(declare_ok_result.is_ready)
return declare_ok_result.value.method_frame
def queue_delete(self, queue='', if_unused=False, if_empty=False):
"""Delete a queue from the broker.
:param queue: The queue to delete
:type queue: str or unicode
:param bool if_unused: only delete if it's unused
:param bool if_empty: only delete if the queue is empty
:returns: Method frame from the Queue.Delete-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Queue.DeleteOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as delete_ok_result:
self._impl.queue_delete(callback=delete_ok_result.set_value_once,
queue=queue,
if_unused=if_unused,
if_empty=if_empty,
nowait=False)
self._flush_output(delete_ok_result.is_ready)
return delete_ok_result.value.method_frame
def queue_purge(self, queue=''):
"""Purge all of the messages from the specified queue
:param queue: The queue to purge
:type queue: str or unicode
:returns: Method frame from the Queue.Purge-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Queue.PurgeOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as purge_ok_result:
self._impl.queue_purge(callback=purge_ok_result.set_value_once,
queue=queue,
nowait=False)
self._flush_output(purge_ok_result.is_ready)
return purge_ok_result.value.method_frame
def queue_bind(self, queue, exchange, routing_key=None,
arguments=None):
"""Bind the queue to the specified exchange
:param queue: The queue to bind to the exchange
:type queue: str or unicode
:param exchange: The source exchange to bind to
:type exchange: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param dict arguments: Custom key/value pair arguments for the binding
:returns: Method frame from the Queue.Bind-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Queue.BindOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as bind_ok_result:
self._impl.queue_bind(callback=bind_ok_result.set_value_once,
queue=queue,
exchange=exchange,
routing_key=routing_key,
nowait=False,
arguments=arguments)
self._flush_output(bind_ok_result.is_ready)
return bind_ok_result.value.method_frame
def queue_unbind(self, queue='', exchange=None, routing_key=None,
arguments=None):
"""Unbind a queue from an exchange.
:param queue: The queue to unbind from the exchange
:type queue: str or unicode
:param exchange: The source exchange to bind from
:type exchange: str or unicode
:param routing_key: The routing key to unbind
:type routing_key: str or unicode
:param dict arguments: Custom key/value pair arguments for the binding
:returns: Method frame from the Queue.Unbind-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Queue.UnbindOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as unbind_ok_result:
self._impl.queue_unbind(callback=unbind_ok_result.set_value_once,
queue=queue,
exchange=exchange,
routing_key=routing_key,
arguments=arguments)
self._flush_output(unbind_ok_result.is_ready)
return unbind_ok_result.value.method_frame
def tx_select(self):
"""Select standard transaction mode. This method sets the channel to use
standard transactions. The client must use this method at least once on
a channel before using the Commit or Rollback methods.
:returns: Method frame from the Tx.Select-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Tx.SelectOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as select_ok_result:
self._impl.tx_select(select_ok_result.set_value_once)
self._flush_output(select_ok_result.is_ready)
return select_ok_result.value.method_frame
def tx_commit(self):
"""Commit a transaction.
:returns: Method frame from the Tx.Commit-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Tx.CommitOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as commit_ok_result:
self._impl.tx_commit(commit_ok_result.set_value_once)
self._flush_output(commit_ok_result.is_ready)
return commit_ok_result.value.method_frame
def tx_rollback(self):
"""Rollback a transaction.
:returns: Method frame from the Tx.Commit-ok response
:rtype: `pika.frame.Method` having `method` attribute of type
`spec.Tx.CommitOk`
"""
with _CallbackResult(
self._MethodFrameCallbackResultArgs) as rollback_ok_result:
self._impl.tx_rollback(rollback_ok_result.set_value_once)
self._flush_output(rollback_ok_result.is_ready)
return rollback_ok_result.value.method_frame
|
{
"content_hash": "037dd796befdd69db56d9b624fd7a61a",
"timestamp": "",
"source": "github",
"line_count": 2498,
"max_line_length": 93,
"avg_line_length": 40.848678943154525,
"alnum_prop": 0.6040670325362603,
"repo_name": "knowsis/pika",
"id": "b83ffa165871b8b082c146645585168498c61c38",
"size": "102040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pika/adapters/blocking_connection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "761321"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import crits.picture_fetcher as picture_fetcher
import os
import urllib
import StringIO
import Image
from django.test import TestCase
from django.core.urlresolvers import reverse
from mock import Mock
class TestPictureFetcher(TestCase):
def _read_test_page(self):
file = os.path.join(os.path.dirname(__file__), 'picture_query_result')
with open(file, 'r') as fp:
return fp.read()
def setUp(self):
self.mock = Mock()
self.request = self.mock.urlopen.return_value
self.request.read.return_value = self._read_test_page()
self.image_class = Mock()
self.image_object = self.image_class.open.return_value
self.old_square = picture_fetcher.square_image
m = Mock()
m.return_value = self.image_object
picture_fetcher.square_image = m
self.result = picture_fetcher.fetch_thumbnail(
'test query', urllib=self.mock, image_class=self.image_class
)
def tearDown(self):
picture_fetcher.square_image = self.old_square
def test_should_return_thumbnail(self):
self.assertEqual(self.image_object, self.result)
def test_should_call_mock_with_correct_url(self):
arg_list = self.mock.urlopen.call_args_list
url = \
'http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=%s' % (
urllib.quote('test query')
)
self.assertTrue(len(filter(lambda a: a[0][0] == url, arg_list)) > 0)
def test_should_call_read(self):
self.assertTrue(self.request.read.called)
def test_should_call_urlopen_with_image_url(self):
arg_list = self.mock.urlopen.call_args_list
url = \
'http://www.sswug.org/docimages/303570/MySQL%20SQLDataSource/test%20query.jpg'
self.assertTrue(len(filter(lambda a: a[0][0] == url, arg_list)) > 0)
def test_should_call_open_on_image_with_string_io(self):
args, kwds = self.image_class.open.call_args
self.assertTrue(isinstance(args[0], StringIO.StringIO))
def test_should_call_thumbnail(self):
self.assertTrue(self.image_object.thumbnail.called)
class TestPictureStorer(TestCase):
def setUp(self):
self.old_fetcher = picture_fetcher.fetch_thumbnail
self.fetch_mock = Mock()
self.fetch_mock.return_value = Image.new('RGB', (10, 10))
picture_fetcher.fetch_thumbnail = self.fetch_mock
self.path = os.path.dirname(__file__) + \
'/../../public/downloads/the_thumbnail.png'
self.result = picture_fetcher.download_thumbnail(
'the thumbnail'
)
def tearDown(self):
picture_fetcher.fetch_thumbnail = self.old_fetcher
def test_should_save_the_image(self):
self.assertTrue(os.path.exists(self.path))
def test_should_return_the_url_to_the_image(self):
self.assertEqual(
reverse('public', args=('downloads/the_thumbnail.png',)),
self.result
)
|
{
"content_hash": "28a1ac1e83ff0599dfd8834b38331f1a",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 83,
"avg_line_length": 34.168539325842694,
"alnum_prop": 0.6395922393949359,
"repo_name": "mop/twit-miner",
"id": "5edda2487677280cacce00fd56f10aa0b210e0e3",
"size": "3041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twit_miner/crits/tests/test_picture_fetcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1856"
},
{
"name": "Python",
"bytes": "81506"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'RC3E - Node Software'
copyright = '2016, Oliver Knodel, Patrick Lehmann'
author = 'Oliver Knodel, Patrick Lehmann'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
rst_prolog = """\
.. |br| raw:: html
<br />
"""
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'RC3E - Node Software v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'RC3E-NodeSoftwaredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RC3E-NodeSoftware.tex', 'RC3E - Node Software Documentation',
'Oliver Knodel, Patrick Lehmann', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rc3e-nodesoftware', 'RC3E - Node Software Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'RC3E-NodeSoftware', 'RC3E - Node Software Documentation',
author, 'RC3E-NodeSoftware', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
{
"content_hash": "bdc41ae57813c5a57501dd6553d7a9d9",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 80,
"avg_line_length": 32.69415807560137,
"alnum_prop": 0.7044355686356948,
"repo_name": "VLSI-EDA/rc3e-node",
"id": "ea4e9857ceb6fc29676ed87c12e77ad04f8286e6",
"size": "9970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from codecs import open
from os import path, system
import sys
import subprocess
from setuptools.command.install import install
import pkgutil
__version__ = '0.0.10'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
class XPDFInstall(install):
def run(self):
try:
if path.isfile('/usr/local/bin/pdftotext'):
print("Detected xpdf library.")
else:
print("Did not detect xpdf library. Now attempting to install...")
if sys.platform.startswith('linux'):
bash_script = 'linux_install.sh'
bash_instructions = "sh -c cd /tmp/ && wget http://www.xpdfreader.com/dl/xpdf-tools-linux-4.00.tar.gz && tar -xvzf xpdf-tools-linux-4.00.tar.gz && sudo cp xpdf-tools-linux-4.00/bin64/* /usr/local/bin && sudo cp xpdf-tools-linux-4.00/doc/sample-xpdfrc /usr/local/etc/xpdfrc"
elif sys.platform.startswith('darwin'):
bash_script = 'mac_install.sh'
bash_instructions = "cd /tmp/ && wget http://www.xpdfreader.com/dl/xpdf-tools-mac-4.00.tar.gz && tar -xvzf xpdf-tools-mac-4.00.tar.gz && cp xpdf-tools-mac-4.00/bin64/* /usr/local/bin && cp xpdf-tools-mac-4.00/doc/sample-xpdfrc /usr/local/etc/xpdfrc"
# subprocess.call([bash_instructions])
system(bash_instructions)
except Exception as e:
print(e)
print("Error installing xpdf. Please follow custom installation instructions at: https://github.com/ecatkins/xpdf_python.")
else:
install.run(self)
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
setup(
name='xpdf_python',
version=__version__,
description='Python wrapper for xpdf',
long_description=long_description,
url='https://github.com/ecatkins/xpdf_python',
download_url='https://github.com/ecatkins/xpdf_python/tarball/' + __version__,
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='Edward Atkins',
install_requires=install_requires,
dependency_links=dependency_links,
author_email='ecatkins@gmail.com',
#run custom code
package_data = {
'install_xpdf':['install_xpdf/mac_install.sh','install_xpdf/linux_install.sh']
},
cmdclass={'install': XPDFInstall},
)
|
{
"content_hash": "b5d28cf81bcbeb56345da7aa0d7c6dab",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 293,
"avg_line_length": 39.91891891891892,
"alnum_prop": 0.6364251861882194,
"repo_name": "ecatkins/xpdf_python",
"id": "b3383b0bae83ac180a31505e2ba4096da237f3d9",
"size": "2954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6139"
},
{
"name": "Shell",
"bytes": "943"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import unittest
from textwrap import dedent
from pants.engine import parser
from pants.engine.objects import Resolvable
from pants_test.engine.examples import parsers
# A duck-typed Serializable with an `==` suitable for ease of testing.
class Bob(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
def _asdict(self):
return self._kwargs
def _key(self):
return {k: v for k, v in self._kwargs.items() if k != 'type_alias'}
def __eq__(self, other):
return isinstance(other, Bob) and self._key() == other._key()
class TestTable(parser.SymbolTable):
@classmethod
def table(cls):
return {'bob': Bob}
class TestTable2(parser.SymbolTable):
@classmethod
def table(cls):
return {'nancy': Bob}
def parse(parser, document, **args):
return parser.parse('/dev/null', document, **args)
class JsonParserTest(unittest.TestCase):
def parse(self, document, symbol_table=None, **kwargs):
symbol_table = symbol_table or parser.EmptyTable()
return parse(parsers.JsonParser(symbol_table), document, **kwargs)
def round_trip(self, obj, symbol_table=None):
document = parsers.encode_json(obj, inline=True)
return self.parse(document, symbol_table=symbol_table)
def test_comments(self):
document = dedent("""
# Top level comment.
{
# Nested comment
"hobbies": [1, 2, 3]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([dict(hobbies=[1, 2, 3])], self.round_trip(results[0]))
def test_single(self):
document = dedent("""
# A simple example with a single Bob.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"hobbies": [1, 2, 3]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([Bob(hobbies=[1, 2, 3])], self.round_trip(results[0]))
self.assertEqual('pants_test.engine.test_parsers.Bob', results[0]._asdict()['type_alias'])
def test_symbol_table(self):
document = dedent("""
# An simple example with a single Bob.
{
"type_alias": "bob",
"hobbies": [1, 2, 3]
}
""")
results = self.parse(document, symbol_table=TestTable())
self.assertEqual(1, len(results))
self.assertEqual([Bob(hobbies=[1, 2, 3])],
self.round_trip(results[0], symbol_table=TestTable()))
self.assertEqual('bob', results[0]._asdict()['type_alias'])
def test_nested_single(self):
document = dedent("""
# An example with nested Bobs.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"uncle": {
"type_alias": "pants_test.engine.test_parsers.Bob",
"age": 42
},
"hobbies": [1, 2, 3]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([Bob(uncle=Bob(age=42), hobbies=[1, 2, 3])], self.round_trip(results[0]))
def test_nested_deep(self):
document = dedent("""
# An example with deeply nested Bobs.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"configs": [
{
"mappings": {
"uncle": {
"type_alias": "pants_test.engine.test_parsers.Bob",
"age": 42
}
}
}
]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([Bob(configs=[dict(mappings=dict(uncle=Bob(age=42)))])],
self.round_trip(results[0]))
def test_nested_many(self):
document = dedent("""
# An example with many nested Bobs.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"cousins": [
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"name": "Jake",
"age": 42
},
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"name": "Jane",
"age": 37
}
]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([Bob(cousins=[Bob(name='Jake', age=42), Bob(name='Jane', age=37)])],
self.round_trip(results[0]))
def test_multiple(self):
document = dedent("""
# An example with several Bobs.
# One with hobbies.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"hobbies": [1, 2, 3]
}
# Another that is aged.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"age": 42
}
""")
results = self.parse(document)
self.assertEqual([Bob(hobbies=[1, 2, 3]), Bob(age=42)], results)
def test_tricky_spacing(self):
document = dedent("""
# An example with several Bobs.
# One with hobbies.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
# And internal comment and blank lines.
"hobbies": [1, 2, 3]} {
# This comment is inside an empty object that started on the prior line!
}
# Another that is aged.
{"type_alias": "pants_test.engine.test_parsers.Bob","age": 42}
""").strip()
results = self.parse(document)
self.assertEqual([Bob(hobbies=[1, 2, 3]), {}, Bob(age=42)], results)
def test_error_presentation(self):
document = dedent("""
# An example with several Bobs.
# One with hobbies.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
# And internal comment and blank lines.
"hobbies": [1, 2, 3]} {
# This comment is inside an empty object that started on the prior line!
}
# Another that is imaginary aged.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"age": 42i,
"four": 1,
"five": 1,
"six": 1,
"seven": 1,
"eight": 1,
"nine": 1
}
""").strip()
filepath = '/dev/null'
with self.assertRaises(parser.ParseError) as exc:
parsers.JsonParser(parser.EmptyTable()).parse(filepath, document)
# Strip trailing whitespace from the message since our expected literal below will have
# trailing ws stripped via editors and code reviews calling for it.
actual_lines = [line.rstrip() for line in str(exc.exception).splitlines()]
# This message from the json stdlib varies between python releases, so fuzz the match a bit.
self.assertRegexpMatches(actual_lines[0],
r'Expecting (?:,|\',\'|",") delimiter: line 3 column 12 \(char 67\)')
self.assertEqual(dedent("""
In document at {filepath}:
# An example with several Bobs.
# One with hobbies.
{{
"type_alias": "pants_test.engine.test_parsers.Bob",
# And internal comment and blank lines.
"hobbies": [1, 2, 3]}} {{
# This comment is inside an empty object that started on the prior line!
}}
# Another that is imaginary aged.
1: {{
2: "type_alias": "pants_test.engine.test_parsers.Bob",
3: "age": 42i,
4: "four": 1,
5: "five": 1,
6: "six": 1,
7: "seven": 1,
8: "eight": 1,
9: "nine": 1
10: }}
""".format(filepath=filepath)).strip(), '\n'.join(actual_lines[1:]))
class JsonEncoderTest(unittest.TestCase):
def setUp(self):
bill = Bob(name='bill')
class SimpleResolvable(Resolvable):
@property
def address(self):
return '::an opaque address::'
def resolve(self):
return bill
resolvable_bill = SimpleResolvable()
self.bob = Bob(name='bob', relative=resolvable_bill, friend=bill)
def test_shallow_encoding(self):
expected_json = dedent("""
{
"name": "bob",
"type_alias": "pants_test.engine.test_parsers.Bob",
"friend": {
"name": "bill",
"type_alias": "pants_test.engine.test_parsers.Bob"
},
"relative": "::an opaque address::"
}
""").strip()
self.assertEqual(json.dumps(json.loads(expected_json)),
parsers.encode_json(self.bob, inline=False))
def test_inlined_encoding(self):
expected_json = dedent("""
{
"name": "bob",
"type_alias": "pants_test.engine.test_parsers.Bob",
"friend": {
"name": "bill",
"type_alias": "pants_test.engine.test_parsers.Bob"
},
"relative": {
"name": "bill",
"type_alias": "pants_test.engine.test_parsers.Bob"
}
}
""").strip()
self.assertEqual(json.dumps(json.loads(expected_json)),
parsers.encode_json(self.bob, inline=True))
class PythonAssignmentsParserTest(unittest.TestCase):
def test_no_symbol_table(self):
document = dedent("""
from pants_test.engine.test_parsers import Bob
nancy = Bob(
hobbies=[1, 2, 3]
)
""")
results = parse(parsers.PythonAssignmentsParser(parser.EmptyTable()), document)
self.assertEqual([Bob(name='nancy', hobbies=[1, 2, 3])], results)
# No symbol table was used so no `type_alias` plumbing can be expected.
self.assertNotIn('type_alias', results[0]._asdict())
def test_symbol_table(self):
document = dedent("""
bill = nancy(
hobbies=[1, 2, 3]
)
""")
results = parse(parsers.PythonAssignmentsParser(TestTable2()), document)
self.assertEqual([Bob(name='bill', hobbies=[1, 2, 3])], results)
self.assertEqual('nancy', results[0]._asdict()['type_alias'])
class PythonCallbacksParserTest(unittest.TestCase):
def test(self):
document = dedent("""
nancy(
name='bill',
hobbies=[1, 2, 3]
)
""")
results = parse(parsers.PythonCallbacksParser(TestTable2()), document)
self.assertEqual([Bob(name='bill', hobbies=[1, 2, 3])], results)
self.assertEqual('nancy', results[0]._asdict()['type_alias'])
|
{
"content_hash": "3d06af2e6f34d65d52095669a808f1d5",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 98,
"avg_line_length": 28.64841498559078,
"alnum_prop": 0.5884719847097878,
"repo_name": "foursquare/pants",
"id": "3f72f1ebf36fe4c8eb98a59fe5c115560759f6d9",
"size": "10088",
"binary": false,
"copies": "5",
"ref": "refs/heads/1.7.0+fsX",
"path": "tests/python/pants_test/engine/test_parsers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "3034"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1922"
},
{
"name": "HTML",
"bytes": "49126"
},
{
"name": "Java",
"bytes": "490360"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5461553"
},
{
"name": "Rust",
"bytes": "443987"
},
{
"name": "Scala",
"bytes": "76065"
},
{
"name": "Shell",
"bytes": "77142"
},
{
"name": "Starlark",
"bytes": "357125"
},
{
"name": "Thrift",
"bytes": "3365"
}
],
"symlink_target": ""
}
|
import unittest
from chainer.backends import cuda
from chainer import distributions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
import numpy
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
'extreme_values': [True, False],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestBernoulli(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Bernoulli
self.scipy_dist = stats.bernoulli
self.test_targets = set([
"batch_shape", "entropy", "log_prob", "mean", "prob", "sample",
"stddev", "support", "variance"])
if self.extreme_values:
p = numpy.random.randint(0, 2, self.shape).astype(numpy.float32)
else:
p = numpy.random.uniform(0, 1, self.shape).astype(numpy.float32)
self.params = {"p": p}
self.scipy_params = {"p": p}
self.support = '{0, 1}'
self.continuous = False
self.old_settings = None
if self.extreme_values:
self.old_settings = numpy.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
if self.old_settings is not None:
numpy.seterr(**self.old_settings)
def sample_for_test(self):
smp = numpy.random.randint(
2, size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'dtype': [numpy.float32, numpy.float64],
}))
class TestBernoulliLogProb(unittest.TestCase):
def setUp(self):
self.logit = numpy.random.normal(size=self.shape).astype(self.dtype)
self.x = numpy.random.randint(0, 2, size=self.shape).astype(self.dtype)
self.gy = numpy.random.normal(size=self.shape).astype(self.dtype)
self.ggx = numpy.random.normal(size=self.shape).astype(self.dtype)
self.backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def check_forward(self, x_data, logit_data):
distributions.bernoulli._bernoulli_log_prob(x_data, logit_data)
def test_forward_cpu(self):
self.check_forward(self.x, self.logit)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.logit))
def check_backward(self, logit_data, x_data, y_grad):
def f(logit):
return distributions.bernoulli._bernoulli_log_prob(
logit, x_data)
gradient_check.check_backward(
f, logit_data, y_grad, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.logit, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.logit), cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
def check_double_backward(self, logit_data, x_data, y_grad, x_grad_grad):
def f(logit):
return distributions.bernoulli._bernoulli_log_prob(
logit, x_data)
gradient_check.check_double_backward(
f, logit_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.logit, self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.logit), cuda.to_gpu(self.x),
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
|
{
"content_hash": "fe0f2f1a9168cf644941d5687d4c3c14",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 32.69911504424779,
"alnum_prop": 0.6208389715832205,
"repo_name": "rezoo/chainer",
"id": "6fb513dbf46f114cb138dde7db2247890c0866a4",
"size": "3695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/distributions_tests/test_bernoulli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "Dockerfile",
"bytes": "1238"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "4367165"
}
],
"symlink_target": ""
}
|
import random
races = [("Dwarf", "+CON;+WIS;-CHA"), ("Elf", "+DEX;+INT;-CON"), ("Gnome", "+CON;+CHA;-STR"), ("Half-Elf", "ANY"), ("Half-Orc", "ANY"), ("Halfling", "+DEX;+CHA;-STR"), ("Human", "ANY")]
class Race():
def __init__(self, inRace):
if inRace == "rand":
self.finalRace, self.raceStats = random.choice(races)
else:
self.finalRace, self.raceStats = inRace
def generate(self):
return (self.finalRace, self.raceStats)
|
{
"content_hash": "99a9b75b78115e88c490012c2088c20c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 185,
"avg_line_length": 37.07692307692308,
"alnum_prop": 0.5518672199170125,
"repo_name": "laisrael/Game-Tools-NPC-Generator",
"id": "17f8f448c04986790506d5304bdb0641c430dbe6",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "racegen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22271"
}
],
"symlink_target": ""
}
|
from io_processing.result_interpreter.abst_result_interpreter import AbstractInterpreter, \
InterpreterOptions
from io_processing.surveillance_handler import MonitorTags, \
EventlineHandler
import csv
from io_processing.result_interpreter.checkpoint_interpreter import CPCategory
class EventlineInterpreter(AbstractInterpreter):
def __init__(self, export_options=False, file_path=False, ignore=False):
AbstractInterpreter.__init__(self, export_options, file_path)
if ignore: return
# CSV Output
self._file_path = file_path
self.core = CheckpointInterpreterCore()
self.core.init_csv(self._file_path)
def interprete_data(self, monitor_inputs):
''' is invoked in certain time
intervals by the monitor
all informations associated with the ECU are gathered
in CheckpointCollections
receives a result table in instead of a tuple list
'''
if InterpreterOptions.CONNECTION in self.export_options:
self._export_connection(monitor_inputs)
# write to csv on the fly
if InterpreterOptions.CSV_FILE in self.export_options:
try:
for monitor_input in monitor_inputs:
if not isinstance(monitor_input, (list, tuple)): continue
self.core.export_csv_on_fly(monitor_input[0], monitor_input[1], monitor_input[2], "", monitor_input[3], monitor_input[4], monitor_input[5], monitor_input[6], monitor_input[7], monitor_input[8])
except:
pass
def get_handler(self):
'''
returns the handler classes that will send their
data to this interpreter
'''
return [EventlineHandler]
class CheckpointInterpreterCore(object):
def __init__(self):
self._category_dict = self._get_categories()
self._already = []
self._csv_path = ""
def init_csv(self, filepath):
try:
# idx = filepath[::-1].find('.')
# filepath = filepath[:(-idx - 1)] + "_run" + filepath[(-idx - 1):]
self.csv_writer = csv.writer(open(filepath, 'w'), delimiter=';')
el = ["Time", "Component ID", "Description", "Monitor Tag", "Processed Message", "Message Size", "Stream ID", "Category", "Message Identifier"]
self.csv_writer.writerow(el)
except:
pass
def cp_string(self, mon_tag, asc_comp_id, stream_id, message):
# TESLA:
if mon_tag == MonitorTags.CP_INIT_EXCHANGE_FIRST_KEY_KN:
return "Intend exchanging key K_N with '%s' -> Start to encrypt it" % (asc_comp_id)
if mon_tag == MonitorTags.CP_ENCRYPTED_EXCHANGE_FIRST_KEY_KN:
return "Encrypted exchanging key K_N '%s', Stream: '%s' -> Sending it" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SETUP_INIT_CREATE_KEYS:
return "Starting to create keys for all streams '%s'" % (asc_comp_id)
if mon_tag == MonitorTags.CP_SETUP_FINISHED_CREATE_KEYS:
return "Finished creating keys for all streams '%s'" % (asc_comp_id)
if mon_tag == MonitorTags.CP_INIT_TRANSMIT_MESSAGE:
return "Intend to send simple message to '%s', Stream: '%s' -> start MAC Creation" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_MACED_TRANSMIT_MESSAGE:
return "Finished Creation of Mac, send simple message to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVED_SIMPLE_MESSAGE:
return "Receive simple message from '%s', Stream: '%s' -> Start Key legitimation" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_BUFFERED_SIMPLE_MESSAGE:
return "Buffered simple message from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RETURNED_AUTHENTICATED_SIMPLE_MESSAGE:
return "Authenticated message (from buffer) from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVED_EXCHANGE_FIRST_KEY_KN:
return "Receive message with first key K_N '%s' -> Start decrypting it" % (asc_comp_id)
if mon_tag == MonitorTags.CP_DECRYPTED_EXCHANGE_FIRST_KEY_KN:
return "Decrypted message with first key K_N'%s'" % (asc_comp_id)
if mon_tag == MonitorTags.CP_CHECKED_KEY_LEGID:
return "Checked key legitimation for '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_INIT_VERIFYING_BUFFER_MESSAGE:
return "Start Verifying messages in buffer with current key '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_FINISHED_VERIFYING_BUFFER_MESSAGE:
return "Finished verifying messages in buffer with current key '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_SYNC_MESSAGE:
return "Send sync message to '%s'" % (asc_comp_id)
if mon_tag == MonitorTags.CP_SEND_SYNC_RESPONSE_MESSAGE:
return "Receive sync message and sending sync response message to '%s'" % (asc_comp_id)
if mon_tag == MonitorTags.CP_RECEIVE_SYNC_RESPONSE_MESSAGE:
return "Receive sync response message from '%s'" % (asc_comp_id)
# TLS:
if mon_tag == MonitorTags.CP_SESSION_AVAILABLE_SEND_MESSAGE:
return "Sending simple message to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_CLIENT_HELLO:
return "Sending ClientHello to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_CLIENT_HELLO:
return "Receive ClientHello from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_ALERT_NO_CIPHERSUITE:
return "Error: Sending Alert to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_SERVER_HELLO:
return "Sending Server Hello to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_SERVER_CERTIFICATE:
return "Sending Server Certificate to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_SERVER_KEYEXCHANGE:
return "Sending Server KeyExchange to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_CERTIFICATE_REQUEST:
return "Sending CertificateRequest to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_SERVER_HELLO_DONE:
return "Sending ServerHelloDone to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_SERVER_HELLO:
return "Receive ServerHello from '%s', Stream: '%s' " % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_SERVER_CERTIFICATE:
return "Receive ServerCertificate from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_SERVER_KEYEXCHANGE:
return "Receive Server Keyexchange from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_CERTIFICATE_REQUEST:
return "Receive Certificate Request from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_SERVER_HELLO_DONE:
return "Receive ServerHelloDone from '%s', Stream: '%s' -> Start Certificate Validation" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SERVER_HELLO_DONE_VALIDATED_CERT:
return "ServerHelloDone, validated Certificate of '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_CLIENT_CERTIFICATE:
return "Sending client Certificate to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_SIMPLE_MESSAGE:
return "Received simple Message from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_INIT_SEND_CLIENT_KEYEXCHANGE:
return "Want to send ClientKeyexchange to '%s' ->Start encrypting" % asc_comp_id
if mon_tag == MonitorTags.CP_ENCRYPTED_CLIENT_KEYEXCHANGE:
return "Encrypted '%s', Stream %s -> Generating Mastersecret" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_GENERATED_MASTERSEC_CLIENT_KEYEXCHANGE:
return "Generated Master secret for '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_INIT_SEND_CERTIFICATE_VERIFY:
return "Want to send CertificateVerify '%s', Stream %s - > Start Encryption" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_ENCRYPTED_CERTIFICATE_VERIFY:
return "CertificateVerify encrypted '%s', Stream: '%s' -> Send message" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SEND_CIPHER_SPEC:
return "Sending ChangeCipherSpec to '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_INIT_CLIENT_FINISHED:
return "Want to send clientFinished to '%s', Stream: '%s' -> Hash Verification Data" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_HASHED_CLIENT_FINISHED:
return "Finished first hashing of ClientFinished for '%s', Stream: '%s' ->Start hashing with prf" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_GENERATED_HASH_FROM_PRF_CLIENT_FINISHED:
return "Generated Hash for ClientFinished message to '%s', Stream: '%s' -> Sending Message" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_CLIENT_CERTIFICATE:
return "Receive the client Certificate from '%s', Stream: '%s' -> Start verification" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_CLIENT_CERTIFICATE_VALIDATED:
return "Finished verification of certificate from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_CLIENT_KEYEXCHANGE:
return "Receive clientKeyexchange from '%s', Stream: '%s' -> Start its decryption" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_DECRYPTED_CLIENT_KEYEXCHANGE:
return "Decrypted clientKeyexchange message from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_CERTIFICATE_VERIFY:
return "Receive CertificateVerify from '%s', Stream: '%s' -> Decrypt it" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_DECRYPTED_CERTIFICATE_VERIFY:
return "Decrypted CertificateVerify from '%s', Stream: '%s' -> Generate Mastersecret from Presecret" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_GENERATED_MASTER_SECRET_CERT_VERIFY:
return "Generated MasterSecret for '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVED_CHANGE_CIPHER_SPEC:
return "Received changeCipherSpec from '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_CLIENT_FINISHED:
return "Received clientFinished Message '%s', Stream: '%s' -> Start Hashing" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_CLIENT_FINISHED_HASHED_COMPARISON_HASH:
return "Hashed ClientFinished Verification Data '%s', Stream: '%s' -> run PRF" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_CLIENT_FINISHED_GENERATED_HASH_PRF:
return "Ran PRF for '%s', Stream: '%s' " % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_RECEIVE_SERVER_FINISHED:
return "Received ServerFinished Message '%s', Stream: '%s' -> Start Hashing" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SERVER_FINISHED_HASHED_COMPARISON_HASH:
return "Receiver: Hashed ServerFinished VerificationData '%s', Stream: '%s' -> run PRF" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SERVER_FINISHED_GENERATED_HASH_PRF:
return "Receiver: Ran PRF for '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_INIT_SERVER_FINISHED:
return "Sender: Want to send ServerFinished'%s', Stream: '%s' -> Start hashing" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_HASHED_SERVER_FINISHED:
return "Sender: hashed ServerFinished Verification data '%s', Stream: '%s' -> run PRF" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_GENERATED_HASH_FROM_PRF_SERVER_FINISHED:
return "Sender: Ran PRF '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_SERVER_AUTHENTICATED:
return "Client received Authentication granted from Server '%s', Stream: '%s'" % (asc_comp_id, stream_id)
if mon_tag == MonitorTags.CP_CLIENT_AUTHENTICATED:
return "Server received Authentication granted from Client '%s', Stream: '%s'" % (asc_comp_id, stream_id)
# Security Module
if mon_tag == MonitorTags.CP_SEC_INIT_AUTHENTICATION:
return "The Security Module initialized the ECU Authentication"
if mon_tag == MonitorTags.CP_SEC_RECEIVE_REG_MESSAGE:
return "Received a Registration message from '%s' -> Start decryption" % asc_comp_id
if mon_tag == MonitorTags.CP_SEC_DECRYPTED_INNER_REG_MESSAGE:
return "Inner Part of Registration message was decrypted (Req. ECU: '%s')" % asc_comp_id
if mon_tag == MonitorTags.CP_SEC_DECRYPTED_OUTER_REG_MESSAGE:
return "Outer Part of Registration message was decrypted (Req. ECU: '%s')" % asc_comp_id
if mon_tag == MonitorTags.CP_SEC_VALIDATED_ECU_CERTIFICATE:
return "Certificate of '%s' is validated" % asc_comp_id
if mon_tag == MonitorTags.CP_SEC_CREATED_CMP_HASH_REG_MSG:
return "Compare hash for the inner Reg. Message is created (Req. ECU: '%s')" % asc_comp_id
if mon_tag == MonitorTags.CP_SEC_COMPARED_HASH_REG_MSG:
return "Comparison of inner Registration Message is finished (Req. ECU: '%s') -> Generate Confirmation" % asc_comp_id
if mon_tag == MonitorTags.CP_SEC_ECNRYPTED_CONFIRMATION_MESSAGE:
return "Confirmation message was encrypted (Req. ECU: '%s') -> Send it to the ECU" % asc_comp_id
if mon_tag == MonitorTags.CP_SEC_RECEIVE_REQ_MESSAGE:
return "Received a Request message from '%s' (Stream ID: %s)-> Start decryption" % (asc_comp_id, "Unknown yet")
if mon_tag == MonitorTags.CP_SEC_DECRYPTED_REQ_MESSAGE:
return "Request message was decrypted (Stream ID: %s, Req. ECU: '%s') -> On success start session key generation" % (stream_id, asc_comp_id)
if mon_tag == MonitorTags.CP_SEC_GENERATED_SESSION_KEY:
return "Session key was generated (Stream ID: %s, Req. ECU: '%s') -> send grant/deny message" % (stream_id, asc_comp_id)
if mon_tag == MonitorTags.CP_SEC_ENCRYPTED_DENY_MESSAGE:
return "Deny message was encrypted (Stream ID: %s, Target ECU: '%s')" % (stream_id, asc_comp_id)
if mon_tag == MonitorTags.CP_SEC_ENCRYPTED_GRANT_MESSAGE:
return "Grant message was encrypted (Stream ID: %s, Target ECU: '%s')" % (stream_id, asc_comp_id)
# ECU
if mon_tag == MonitorTags.CP_ECU_ALREADY_AUTHENTICATED:
return "ECU '%s' was already authenticated )" % asc_comp_id
if mon_tag == MonitorTags.CP_ECU_RECEIVE_SIMPLE_MESSAGE:
return "Simple message was received from ECU: '%s')" % asc_comp_id
if mon_tag == MonitorTags.CP_ECU_DECRYPTED_SIMPLE_MESSAGE:
return "Simple message was decrypted (Stream ID: %s, Sending ECU: '%s')" % (stream_id, asc_comp_id)
if mon_tag == MonitorTags.CP_ECU_INTENT_SEND_SIMPLE_MESSAGE:
return "Want to send Simple message -> Stream ID: %s " % stream_id
if mon_tag == MonitorTags.CP_ECU_ENCRYPTED_SEND_SIMPLE_MESSAGE:
return "Encrypted Simple message -> Send it: Stream ID: %s; Content: '%s'" % (stream_id, message)
if mon_tag == MonitorTags.CP_ECU_RECEIVE_SEC_MOD_ADVERTISEMENT:
return "Receive a security Module Advertisement from '%s' -> Start certificate validation" % asc_comp_id
if mon_tag == MonitorTags.CP_ECU_VALIDATED_SEC_MOD_CERTIFICATE:
return "Certificate from '%s' validated" % asc_comp_id
if mon_tag == MonitorTags.CP_ECU_START_CREATION_REG_MESSAGE:
return "Start creation of registration message -> Generate ECU Key"
if mon_tag == MonitorTags.CP_ECU_CREATED_ECU_KEY_REG_MESSAGE:
return "Created the symmetric ECU Key for Registration Message -> Start to encrypt inner Part"
if mon_tag == MonitorTags.CP_ECU_ENCRYPTED_INNER_REG_MESSAGE:
return "Encrypted the inner Registration Message -> Start Hash of inner message"
if mon_tag == MonitorTags.CP_ECU_HASHED_INNER_REG_MESSAGE:
return "Hashed the inner Registration Message -> Start encryption of this hashed part"
if mon_tag == MonitorTags.CP_ECU_ENCRYPTED_OUTER_REG_MESSAGE:
return "Encrypted the outer Registration Message (Hashed inner Part)"
if mon_tag == MonitorTags.CP_ECU_SEND_REG_MESSAGE:
return "Send the Registration Message to '%s'" % asc_comp_id
if mon_tag == MonitorTags.CP_ECU_RECEIVE_CONF_MESSAGE:
return "Receive confirmation Message from '%s' -> Start to decrypt it" % asc_comp_id
if mon_tag == MonitorTags.CP_ECU_DECRYPTED_CONF_MESSAGE:
return "Decrypted confirmation Message from '%s' -> Successfully Authenticated" % asc_comp_id
if mon_tag == MonitorTags.CP_ECU_START_CREATE_REQ_MESSAGE:
return "Start creation of Request Message (Stream ID: %s)" % stream_id
if mon_tag == MonitorTags.CP_ECU_ENCRYPTED_REQ_MESSAGE:
return "Encrypted Request Message (Stream ID: %s)" % stream_id
if mon_tag == MonitorTags.CP_ECU_RECEIVE_DENY_MESSAGE:
return "Receive Deny Message (Stream ID: %s) -> Start to decrypt" % "Unknown"
if mon_tag == MonitorTags.CP_ECU_DECRYPTED_DENY_MESSAGE:
return "Decrypted Deny Message (Stream ID: %s)" % stream_id
if mon_tag == MonitorTags.CP_ECU_RECEIVE_GRANT_MESSAGE:
return "Receive Grant Message (Stream ID: %s) -> Start to decrypt" % "Unknown"
if mon_tag == MonitorTags.CP_ECU_DECRYPTED_GRANT_MESSAGE:
return "Decrypted Grant Message (Stream ID: %s)" % stream_id
return str(mon_tag)
def export_csv_on_fly(self, time, comp_id, asc_comp_id, category, mon_tag, msg_id, message, msg_size, stream_id, uq_id):
# Export all to the given file
el = [str(time), str(comp_id), str(self.cp_string(eval(mon_tag), asc_comp_id, stream_id, message)), mon_tag, str(message), str(msg_size), str(stream_id), str(category), str(msg_id)]
self.csv_writer.writerow(el)
def _category_by_tag(self, tag):
for ky in self._category_dict:
if tag in self._category_dict[ky]:
return ky
return None
def _get_categories(self):
cat = {}
cat[CPCategory.ECU_AUTHENTICATION_TRANS] = [MonitorTags.CP_SEC_INIT_AUTHENTICATION, MonitorTags.CP_SEC_RECEIVE_REG_MESSAGE, MonitorTags.CP_ECU_RECEIVE_SEC_MOD_ADVERTISEMENT, MonitorTags.CP_ECU_SEND_REG_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_CONF_MESSAGE, MonitorTags.CP_ECU_ALREADY_AUTHENTICATED]
cat[CPCategory.ECU_AUTHENTICATION_ENC] = [ MonitorTags.CP_SEC_DECRYPTED_INNER_REG_MESSAGE, MonitorTags.CP_SEC_DECRYPTED_OUTER_REG_MESSAGE, \
MonitorTags.CP_SEC_VALIDATED_ECU_CERTIFICATE, MonitorTags.CP_SEC_CREATED_CMP_HASH_REG_MSG, \
MonitorTags.CP_SEC_COMPARED_HASH_REG_MSG, MonitorTags.CP_ECU_VALIDATED_SEC_MOD_CERTIFICATE, \
MonitorTags.CP_ECU_START_CREATION_REG_MESSAGE, MonitorTags.CP_ECU_CREATED_ECU_KEY_REG_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_INNER_REG_MESSAGE, MonitorTags.CP_ECU_HASHED_INNER_REG_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_OUTER_REG_MESSAGE, MonitorTags.CP_ECU_DECRYPTED_CONF_MESSAGE, \
MonitorTags.CP_SEC_ECNRYPTED_CONFIRMATION_MESSAGE]
cat[CPCategory.STREAM_AUTHORIZATION_TRANS] = [MonitorTags.CP_SEC_RECEIVE_REQ_MESSAGE, MonitorTags.CP_ECU_RECEIVE_DENY_MESSAGE, MonitorTags.CP_ECU_RECEIVE_GRANT_MESSAGE]
cat[CPCategory.STREAM_AUTHORIZATION_ENC] = [MonitorTags.CP_SEC_DECRYPTED_REQ_MESSAGE, \
MonitorTags.CP_SEC_GENERATED_SESSION_KEY, MonitorTags.CP_SEC_ENCRYPTED_DENY_MESSAGE, \
MonitorTags.CP_SEC_ENCRYPTED_GRANT_MESSAGE, MonitorTags.CP_ECU_START_CREATE_REQ_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_REQ_MESSAGE, MonitorTags.CP_ECU_DECRYPTED_DENY_MESSAGE, MonitorTags.CP_ECU_DECRYPTED_GRANT_MESSAGE]
cat[CPCategory.SIMPLE_MESSAGE_TRANS] = [ MonitorTags.CP_ECU_RECEIVE_SIMPLE_MESSAGE, MonitorTags.CP_ECU_INTENT_SEND_SIMPLE_MESSAGE]
cat[CPCategory.SIMPLE_MESSAGE_ENC] = [ MonitorTags.CP_ECU_DECRYPTED_SIMPLE_MESSAGE, MonitorTags.CP_ECU_ENCRYPTED_SEND_SIMPLE_MESSAGE]
return cat
|
{
"content_hash": "163bb97b71bd5db8352264cdd090c381",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 220,
"avg_line_length": 55.20595533498759,
"alnum_prop": 0.6168644372527867,
"repo_name": "PhilippMundhenk/IVNS",
"id": "80462e9cf5daa6915af74640440c962c26dcbf9a",
"size": "22249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ECUSimulation/io_processing/result_interpreter/eventline_interpreter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1888586"
}
],
"symlink_target": ""
}
|
"""Fichier contenant le paramètre 'montant' de la commande 'questeur'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmMontant(Parametre):
"""Commande 'questeur montant'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "montant", "balance")
self.aide_courte = "demande le montant conservé"
self.aide_longue = \
"Cette commande interroge la valeur de l'argent déposé dans " \
"le questeur présent dans la salle où vous vous trouvez."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
if not importeur.commerce.questeur_existe(salle):
personnage << "|err|Aucun questeur n'est présent là où " \
"vous vous trouvez.|ff|"
return
questeur = importeur.commerce.questeurs[salle]
if questeur.servant is None:
personnage << "|err|Personne n'est présent pour s'en charger.|ff|"
return
montant = questeur.comptes.get(personnage, 0)
if montant == 0:
personnage << "Vous n'avez rien déposé dans ce questeur."
else:
personnage << "Vous disposez de {} pièces de bronze sur votre " \
"compte.".format(montant)
|
{
"content_hash": "bdae6ff67ca0c0d2d4282fbef53c4df6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 37.75675675675676,
"alnum_prop": 0.5934144595561919,
"repo_name": "vlegoff/tsunami",
"id": "013bad6f075e88968e59c05d84de0ac743d95dcd",
"size": "2987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/commerce/commandes/questeur/montant.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from django.forms import ModelForm, forms
from .models import Todo
class TodoForm(ModelForm):
class Meta:
model = Todo
include = ('projectname', 'description', 'deadline')
exclude = ('pk', 'progress', 'created_date')
class TodoUpdateForm(ModelForm):
class Meta:
model = Todo
include = ('projectname', 'description', 'progress', 'deadline')
exclude = ('pk', 'created_date')
|
{
"content_hash": "1890d082e8fa71d47ec5b6404e5a7c8d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 27.0625,
"alnum_prop": 0.625866050808314,
"repo_name": "dihmandrake/todo-django",
"id": "b4106831d349abdb3569a08643d335c540ccc04f",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2354"
},
{
"name": "HTML",
"bytes": "15252"
},
{
"name": "JavaScript",
"bytes": "968"
},
{
"name": "Python",
"bytes": "11242"
}
],
"symlink_target": ""
}
|
"""ACME protocol messages."""
import collections
from six.moves.urllib import parse as urllib_parse # pylint: disable=import-error
from acme import challenges
from acme import fields
from acme import jose
class Error(jose.JSONObjectWithFields, Exception):
"""ACME error.
https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
:ivar unicode typ:
:ivar unicode title:
:ivar unicode detail:
"""
ERROR_TYPE_NAMESPACE = 'urn:acme:error:'
ERROR_TYPE_DESCRIPTIONS = {
'badCSR': 'The CSR is unacceptable (e.g., due to a short key)',
'badNonce': 'The client sent an unacceptable anti-replay nonce',
'connection': 'The server could not connect to the client for DV',
'dnssec': 'The server could not validate a DNSSEC signed domain',
'malformed': 'The request message was malformed',
'serverInternal': 'The server experienced an internal error',
'tls': 'The server experienced a TLS error during DV',
'unauthorized': 'The client lacks sufficient authorization',
'unknownHost': 'The server could not resolve a domain name',
}
typ = jose.Field('type')
title = jose.Field('title', omitempty=True)
detail = jose.Field('detail')
@typ.encoder
def typ(value): # pylint: disable=missing-docstring,no-self-argument
return Error.ERROR_TYPE_NAMESPACE + value
@typ.decoder
def typ(value): # pylint: disable=missing-docstring,no-self-argument
# pylint thinks isinstance(value, Error), so startswith is not found
# pylint: disable=no-member
if not value.startswith(Error.ERROR_TYPE_NAMESPACE):
raise jose.DeserializationError('Missing error type prefix')
without_prefix = value[len(Error.ERROR_TYPE_NAMESPACE):]
if without_prefix not in Error.ERROR_TYPE_DESCRIPTIONS:
raise jose.DeserializationError('Error type not recognized')
return without_prefix
@property
def description(self):
"""Hardcoded error description based on its type.
:rtype: unicode
"""
return self.ERROR_TYPE_DESCRIPTIONS[self.typ]
def __str__(self):
if self.typ is not None:
return ' :: '.join([self.typ, self.description, self.detail])
else:
return str(self.detail)
class _Constant(jose.JSONDeSerializable, collections.Hashable):
"""ACME constant."""
__slots__ = ('name',)
POSSIBLE_NAMES = NotImplemented
def __init__(self, name):
self.POSSIBLE_NAMES[name] = self
self.name = name
def to_partial_json(self):
return self.name
@classmethod
def from_json(cls, value):
if value not in cls.POSSIBLE_NAMES:
raise jose.DeserializationError(
'{0} not recognized'.format(cls.__name__))
return cls.POSSIBLE_NAMES[value]
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self.name)
def __eq__(self, other):
return isinstance(other, type(self)) and other.name == self.name
def __hash__(self):
return hash((self.__class__, self.name))
def __ne__(self, other):
return not self == other
class Status(_Constant):
"""ACME "status" field."""
POSSIBLE_NAMES = {}
STATUS_UNKNOWN = Status('unknown')
STATUS_PENDING = Status('pending')
STATUS_PROCESSING = Status('processing')
STATUS_VALID = Status('valid')
STATUS_INVALID = Status('invalid')
STATUS_REVOKED = Status('revoked')
class IdentifierType(_Constant):
"""ACME identifier type."""
POSSIBLE_NAMES = {}
IDENTIFIER_FQDN = IdentifierType('dns') # IdentifierDNS in Boulder
class Identifier(jose.JSONObjectWithFields):
"""ACME identifier.
:ivar IdentifierType typ:
:ivar unicode value:
"""
typ = jose.Field('type', decoder=IdentifierType.from_json)
value = jose.Field('value')
class Resource(jose.JSONObjectWithFields):
"""ACME Resource.
:ivar acme.messages.ResourceBody body: Resource body.
"""
body = jose.Field('body')
class ResourceWithURI(Resource):
"""ACME Resource with URI.
:ivar unicode uri: Location of the resource.
"""
uri = jose.Field('uri') # no ChallengeResource.uri
class ResourceBody(jose.JSONObjectWithFields):
"""ACME Resource Body."""
class Registration(ResourceBody):
"""Registration Resource Body.
:ivar acme.jose.jwk.JWK key: Public key.
:ivar tuple contact: Contact information following ACME spec,
`tuple` of `unicode`.
:ivar unicode agreement:
:ivar unicode authorizations: URI where
`messages.Registration.Authorizations` can be found.
:ivar unicode certificates: URI where
`messages.Registration.Certificates` can be found.
"""
# on new-reg key server ignores 'key' and populates it based on
# JWS.signature.combined.jwk
key = jose.Field('key', omitempty=True, decoder=jose.JWK.from_json)
contact = jose.Field('contact', omitempty=True, default=())
agreement = jose.Field('agreement', omitempty=True)
authorizations = jose.Field('authorizations', omitempty=True)
certificates = jose.Field('certificates', omitempty=True)
class Authorizations(jose.JSONObjectWithFields):
"""Authorizations granted to Account in the process of registration.
:ivar tuple authorizations: URIs to Authorization Resources.
"""
authorizations = jose.Field('authorizations')
class Certificates(jose.JSONObjectWithFields):
"""Certificates granted to Account in the process of registration.
:ivar tuple certificates: URIs to Certificate Resources.
"""
certificates = jose.Field('certificates')
phone_prefix = 'tel:'
email_prefix = 'mailto:'
@classmethod
def from_data(cls, phone=None, email=None, **kwargs):
"""Create registration resource from contact details."""
details = list(kwargs.pop('contact', ()))
if phone is not None:
details.append(cls.phone_prefix + phone)
if email is not None:
details.append(cls.email_prefix + email)
kwargs['contact'] = tuple(details)
return cls(**kwargs)
def _filter_contact(self, prefix):
return tuple(
detail[len(prefix):] for detail in self.contact
if detail.startswith(prefix))
@property
def phones(self):
"""All phones found in the ``contact`` field."""
return self._filter_contact(self.phone_prefix)
@property
def emails(self):
"""All emails found in the ``contact`` field."""
return self._filter_contact(self.email_prefix)
class NewRegistration(Registration):
"""New registration."""
resource_type = 'new-reg'
resource = fields.Resource(resource_type)
class UpdateRegistration(Registration):
"""Update registration."""
resource_type = 'reg'
resource = fields.Resource(resource_type)
class RegistrationResource(ResourceWithURI):
"""Registration Resource.
:ivar acme.messages.Registration body:
:ivar unicode new_authzr_uri: URI found in the 'next' ``Link`` header
:ivar unicode terms_of_service: URL for the CA TOS.
"""
body = jose.Field('body', decoder=Registration.from_json)
new_authzr_uri = jose.Field('new_authzr_uri')
terms_of_service = jose.Field('terms_of_service', omitempty=True)
class ChallengeBody(ResourceBody):
"""Challenge Resource Body.
.. todo::
Confusingly, this has a similar name to `.challenges.Challenge`,
as well as `.achallenges.AnnotatedChallenge`. Please use names
such as ``challb`` to distinguish instances of this class from
``achall``.
:ivar acme.challenges.Challenge: Wrapped challenge.
Conveniently, all challenge fields are proxied, i.e. you can
call ``challb.x`` to get ``challb.chall.x`` contents.
:ivar acme.messages.Status status:
:ivar datetime.datetime validated:
:ivar messages.Error error:
"""
__slots__ = ('chall',)
uri = jose.Field('uri')
status = jose.Field('status', decoder=Status.from_json,
omitempty=True, default=STATUS_PENDING)
validated = fields.RFC3339Field('validated', omitempty=True)
error = jose.Field('error', decoder=Error.from_json,
omitempty=True, default=None)
def to_partial_json(self):
jobj = super(ChallengeBody, self).to_partial_json()
jobj.update(self.chall.to_partial_json())
return jobj
@classmethod
def fields_from_json(cls, jobj):
jobj_fields = super(ChallengeBody, cls).fields_from_json(jobj)
jobj_fields['chall'] = challenges.Challenge.from_json(jobj)
return jobj_fields
def __getattr__(self, name):
return getattr(self.chall, name)
class ChallengeResource(Resource):
"""Challenge Resource.
:ivar acme.messages.ChallengeBody body:
:ivar unicode authzr_uri: URI found in the 'up' ``Link`` header.
"""
body = jose.Field('body', decoder=ChallengeBody.from_json)
authzr_uri = jose.Field('authzr_uri')
@property
def uri(self): # pylint: disable=missing-docstring,no-self-argument
# bug? 'method already defined line None'
# pylint: disable=function-redefined
return self.body.uri # pylint: disable=no-member
class Authorization(ResourceBody):
"""Authorization Resource Body.
:ivar acme.messages.Identifier identifier:
:ivar list challenges: `list` of `.ChallengeBody`
:ivar tuple combinations: Challenge combinations (`tuple` of `tuple`
of `int`, as opposed to `list` of `list` from the spec).
:ivar acme.messages.Status status:
:ivar datetime.datetime expires:
"""
identifier = jose.Field('identifier', decoder=Identifier.from_json)
challenges = jose.Field('challenges', omitempty=True)
combinations = jose.Field('combinations', omitempty=True)
status = jose.Field('status', omitempty=True, decoder=Status.from_json)
# TODO: 'expires' is allowed for Authorization Resources in
# general, but for Key Authorization '[t]he "expires" field MUST
# be absent'... then acme-spec gives example with 'expires'
# present... That's confusing!
expires = fields.RFC3339Field('expires', omitempty=True)
@challenges.decoder
def challenges(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(ChallengeBody.from_json(chall) for chall in value)
@property
def resolved_combinations(self):
"""Combinations with challenges instead of indices."""
return tuple(tuple(self.challenges[idx] for idx in combo)
for combo in self.combinations)
class NewAuthorization(Authorization):
"""New authorization."""
resource_type = 'new-authz'
resource = fields.Resource(resource_type)
class AuthorizationResource(ResourceWithURI):
"""Authorization Resource.
:ivar acme.messages.Authorization body:
:ivar unicode new_cert_uri: URI found in the 'next' ``Link`` header
"""
body = jose.Field('body', decoder=Authorization.from_json)
new_cert_uri = jose.Field('new_cert_uri')
class CertificateRequest(jose.JSONObjectWithFields):
"""ACME new-cert request.
:ivar acme.jose.util.ComparableX509 csr:
`OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
resource_type = 'new-cert'
resource = fields.Resource(resource_type)
csr = jose.Field('csr', decoder=jose.decode_csr, encoder=jose.encode_csr)
class CertificateResource(ResourceWithURI):
"""Certificate Resource.
:ivar acme.jose.util.ComparableX509 body:
`OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:ivar unicode cert_chain_uri: URI found in the 'up' ``Link`` header
:ivar tuple authzrs: `tuple` of `AuthorizationResource`.
"""
cert_chain_uri = jose.Field('cert_chain_uri')
authzrs = jose.Field('authzrs')
class Revocation(jose.JSONObjectWithFields):
"""Revocation message.
:ivar .ComparableX509 certificate: `OpenSSL.crypto.X509` wrapped in
`.ComparableX509`
"""
resource_type = 'revoke-cert'
resource = fields.Resource(resource_type)
certificate = jose.Field(
'certificate', decoder=jose.decode_cert, encoder=jose.encode_cert)
# TODO: acme-spec#138, this allows only one ACME server instance per domain
PATH = '/acme/revoke-cert'
"""Path to revocation URL, see `url`"""
@classmethod
def url(cls, base):
"""Get revocation URL.
:param str base: New Registration Resource or server (root) URL.
"""
return urllib_parse.urljoin(base, cls.PATH)
|
{
"content_hash": "bc25f92709dff93c9e106c90f34fc054",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 82,
"avg_line_length": 32.15696202531645,
"alnum_prop": 0.663517556290348,
"repo_name": "rugk/letsencrypt",
"id": "970cf4e6e572203e1e5929a67ea9fbd6409ea40b",
"size": "12702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acme/acme/messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14119"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1057615"
},
{
"name": "Shell",
"bytes": "9749"
}
],
"symlink_target": ""
}
|
from django.conf.urls import re_path
from django.contrib.auth.decorators import login_required
from w0rplib.url import redir
from .feed import LatestArticleFeed
from .views import (
ArticleBanCommenterView,
ArticleDeleteCommentView,
ArticleDetailView,
ArticleEditPageView,
ArticleMonthArchiveView,
ArticlePageView,
ArticleUnbanCommenterView,
DeleteArticleView,
EditArticleView,
NewArticleView,
article_bounce_view,
upload_file_view,
)
urlpatterns = [
# Loading the main site gets you page 1.
re_path(
r"^$",
ArticlePageView.as_view(),
{"page": "1"},
name="blog-home",
),
# Redirect the first page back to the blog main page, for SEO.
redir(r"^page/0*1/$", "/blog"),
# Redirect appending "login" to the blog URL to the right login URL,
# which will redirect back to the blog.
redir(r"^login/$", "/login/?next=/blog"),
re_path(
r"^page/(?P<page>[\d]+)/$",
ArticlePageView.as_view(),
name="article-page"
),
re_path(
r"^delete/(?P<slug>[\w-]+)/$",
login_required(DeleteArticleView.as_view()),
name="delete-article"
),
re_path(
r"^edit-page/(?P<page>[\d]+)/$",
login_required(ArticleEditPageView.as_view()),
name="article-edit-list"
),
re_path(
r"^post/(?P<slug>[\w-]+)/$",
ArticleDetailView.as_view(),
name="article-detail"
),
re_path(
r"^post/(?P<slug>[\w-]+)/comment-bounce/$",
article_bounce_view,
name="article-comment-bounce"
),
re_path(
r"^post/(?P<slug>[\w-]+)/delete-comment/(?P<pk>\d+)/$",
ArticleDeleteCommentView.as_view(),
name="delete-comment"
),
re_path(
r"^post/(?P<slug>[\w-]+)/ban-comment/(?P<pk>\d+)/$",
ArticleBanCommenterView.as_view(),
name="ban-commenter"
),
re_path(
r"^post/(?P<slug>[\w-]+)/unban-comment/(?P<pk>\d+)/$",
ArticleUnbanCommenterView.as_view(),
name="unban-commenter"
),
re_path(
r"^date/(?P<year>\d{4})/(?P<month>1[0-2]|0[1-9])/$",
ArticleMonthArchiveView.as_view(month_format="%m"),
name="article-archive"
),
re_path(
r"^latest/feed/$",
LatestArticleFeed(),
name="article-feed"
),
re_path(
r"^new/$",
NewArticleView.as_view(),
name="new-article",
),
re_path(
r"^edit/(?P<slug>[\w-]+)/$",
EditArticleView.as_view(),
name="edit-article"
),
re_path(r"^upload/$", upload_file_view, name="upload-file"),
]
|
{
"content_hash": "1ca03755d6693ea208ed1528f149448b",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 72,
"avg_line_length": 27.489583333333332,
"alnum_prop": 0.5589238347859038,
"repo_name": "w0rp/w0rpzone",
"id": "a0dee26bb696a19e335441bb2ef2e82e43345093",
"size": "2639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "84384"
},
{
"name": "HTML",
"bytes": "17038"
},
{
"name": "JavaScript",
"bytes": "15375"
},
{
"name": "Python",
"bytes": "87286"
},
{
"name": "Shell",
"bytes": "3835"
}
],
"symlink_target": ""
}
|
"""This example adds a responsive display ad to an ad group.
Image assets are uploaded using AssetService. To get ad groups, run
get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
import requests
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def UploadImageAsset(client, url):
"""Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
"""
# Initialize appropriate service.
asset_service = client.GetService('AssetService', version='v201806')
# Download the image.
image_request = requests.get(url)
# Create the image asset.
image_asset = {
'xsi_type': 'ImageAsset',
'imageData': image_request.content,
# This field is optional, and if provided should be unique.
# 'assetName': 'Image asset ' + str(uuid.uuid4()),
}
# Create the operation.
operation = {
'operator': 'ADD',
'operand': image_asset
}
# Create the asset and return the ID.
result = asset_service.mutate([operation])
return result['value'][0]['assetId']
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201806')
# Create the ad.
multi_asset_responsive_display_ad = {
'xsi_type': 'MultiAssetResponsiveDisplayAd',
'headlines': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Mars'
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Jupiter',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Pluto'
}
}],
'descriptions': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'See the planet in style.',
}
}],
'businessName': 'Galactic Luxury Cruises',
'longHeadline': {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
},
# This ad format does not allow the creation of an image asset by setting
# the asset.imageData field. An image asset must first be created using
# the AssetService, and asset.assetId must be populated when creating
# the ad.
'marketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/3b9Wfh')
}
}],
'squareMarketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}],
# Optional values
'finalUrls': ['http://www.example.com'],
'callToActionText': 'Shop Now',
# Set color settings using hexadecimal values. Set allowFlexibleColor to
# false if you want your ads to render by always using your colors
# strictly.
'mainColor': '#0000ff',
'accentColor': '#ffff00',
'allowFlexibleColor': False,
'formatSetting': 'NON_NATIVE',
# Set dynamic display ad settings, composed of landscape logo image,
# promotion text, and price prefix.
'dynamicSettingsPricePrefix': 'as low as',
'dynamicSettingsPromoText': 'Free shipping!',
'logoImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}]
}
# Create ad group ad.
ad_group_ad = {
'adGroupId': ad_group_id,
'ad': multi_asset_responsive_display_ad,
# Optional.
'status': 'PAUSED'
}
# Add ad.
ads = ad_group_ad_service.mutate([
{'operator': 'ADD', 'operand': ad_group_ad}
])
# Display results.
if 'value' in ads:
for ad in ads['value']:
print ('Added new responsive display ad ad with ID "%d" '
'and long headline "%s".'
% (ad['ad']['id'], ad['ad']['longHeadline']['asset']['assetText']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
{
"content_hash": "81a39299dd2933d0e5a2bfa2f243275d",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 80,
"avg_line_length": 29.25465838509317,
"alnum_prop": 0.5821656050955414,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "c6c5273f3b98f2726dcdb188dab50bed8b04476e",
"size": "5311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201806/advanced_operations/add_multi_asset_responsive_display_ad.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
}
|
"""Create / interact with Google Cloud Resource Manager connections."""
from google.cloud import _http
from google.cloud.resource_manager import __version__
_CLIENT_INFO = _http.CLIENT_INFO_TEMPLATE.format(__version__)
class Connection(_http.JSONConnection):
"""A connection to Google Cloud Resource Manager via the JSON REST API.
:type client: :class:`~google.cloud.resource_manager.client.Client`
:param client: The client that owns the current connection.
"""
API_BASE_URL = "https://cloudresourcemanager.googleapis.com"
"""The base of the API call URL."""
API_VERSION = "v1beta1"
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = "{api_base_url}/{api_version}{path}"
"""A template for the URL of a particular API call."""
_EXTRA_HEADERS = {_http.CLIENT_INFO_HEADER: _CLIENT_INFO}
|
{
"content_hash": "0b9d6825aac90eb22f51ec9cfafaa960",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 31.285714285714285,
"alnum_prop": 0.6952054794520548,
"repo_name": "dhermes/gcloud-python",
"id": "d0118c810385d8b67df5097a88430692ef36e20f",
"size": "1451",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "resource_manager/google/cloud/resource_manager/_http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
}
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that using Repository() works even when the Repository has no
SConsignFile() and the source Repository files have their signatures
saved because they're older than the max_drift time.
"""
import TestSCons
test = TestSCons.TestSCons()
test.subdir('build', 'src')
test.write(['build', 'SConstruct'], """\
SetOption('max_drift', 1)
Repository('..')
env = Environment()
env.Program('foo', 'src/foo.c')
""")
test.write(['src', 'foo.c'], """\
#include <stdio.h>
#include <stdlib.h>
#include "foo.h"
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("%s\\n", STRING);
printf("src/foo.c\\n");
exit (0);
}
""")
test.write(['src', 'foo.h'], """\
#define STRING "src/foo.h"
""")
# Make sure it's past the max_drift time,
# so the source file signatures get saved.
test.sleep(2)
test.run(chdir='build', arguments='.')
test.run(program=test.workpath('build', 'foo'),
stdout="src/foo.h\nsrc/foo.c\n")
test.up_to_date(chdir='build', arguments='.')
test.pass_test()
|
{
"content_hash": "bcf42908c4b4c9eafdee6952ab921770",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 68,
"avg_line_length": 21,
"alnum_prop": 0.6330532212885154,
"repo_name": "datalogics-robb/scons",
"id": "95b1749f3bd98c47a14687f1f7ebb594161b8479",
"size": "2173",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/Repository/no-SConsignFile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "43855"
},
{
"name": "Perl",
"bytes": "23384"
},
{
"name": "Python",
"bytes": "4753658"
},
{
"name": "Shell",
"bytes": "25935"
}
],
"symlink_target": ""
}
|
"""
This file contains function to looking for WordPress plugins and versions
"""
import csv
import aiohttp
import asyncio
from datetime import datetime
from functools import partial
from urllib.parse import urlparse
from os.path import join
from .db import DB
from .data import * # noqa
from .exceptions import * # noqa
from .plugins_utils import plugins_testing
from .utils import colorize, generate_error_page, download, get_data_folder
from .wordpress_core import is_remote_a_wordpress, get_wordpress_version, get_wordpress_vulnerabilities
# ----------------------------------------------------------------------
# Main code of functions
# ----------------------------------------------------------------------
def find_versions(args):
"""
Main function to run libs as version finder.
:param args: PlecostOptions object
:type args: `PlecostOptions`
:return: PlecostResults object.
:rtype: `PlecostResults`
:raises: PlecostTargetNotAvailable, PlecostNotWordPressFound
"""
# --------------------------------------------------------------------------
# Common vars
# --------------------------------------------------------------------------
url = args.target
parsed_url = urlparse(args.target)
host = parsed_url.hostname
concurrency = args.concurrency
log = args.log_function
proxy = args.proxy
is_color = args.colorize
start_time = datetime.now()
no_check_wordpress = args.no_check_wordpress
no_check_plugins = args.no_check_plugins
no_check_wordpress_version = args.no_check_wordpress_version
force_scan = args.force_scan
# Jackass mode is set?
if args.jackass is True:
concurrency = 9999
# Non-blocking config
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
con = aiohttp.TCPConnector(conn_timeout=10, share_cookies=True, loop=loop, verify_ssl=False)
_download = partial(download, max_redirect=0, connector=con, loop=loop)
# Get CVE database
db = DB(path=join(get_data_folder(), "cve.db"))
# --------------------------------------------------------------------------
# Test availability of target
# --------------------------------------------------------------------------
log("[*] Testing target connection...")
headers, status, content = loop.run_until_complete(_download(url, method="get", get_content=False))
# Detect redirect
if status in (300, 301, 302, 303, 307):
url = headers.get("location", None)
if url is not None:
log("\n[%s] Redirection detected to '%s'. Using it now. " % (colorize("ii", "yellow"), url),
log_level=1)
else:
raise PlecostTargetNotAvailable("Redirection detected, but can't determinate the new location")
log(colorize(" ok!\n"))
# --------------------------------------------------------------------------
# Check if remote host is a WordPress
# --------------------------------------------------------------------------
if no_check_wordpress is False:
log("[*] Looking for WordPress installation...\n")
# Error page content.
headers, status, error_page = loop.run_until_complete(_download(generate_error_page(url)))
_is_wp = loop.run_until_complete(is_remote_a_wordpress(url, error_page, _download))
if not _is_wp:
if force_scan is False:
raise PlecostNotWordPressFound("No WordPress installations found in '%s'." % host)
else:
log(colorize("\n No Wordpress installation found!\n", "yellow"))
else:
log("\n %s" % colorize(" ok!\n"))
# --------------------------------------------------------------------------
# Check WordPress version
# --------------------------------------------------------------------------
if no_check_wordpress_version is False:
log("[*] Getting WordPress version... ")
wordpress_version = loop.run_until_complete(get_wordpress_version(url, _download, db))
# wordpress_version.
if wordpress_version:
log("%s (latest: %s)" %
(
colorize("%s" % wordpress_version.current_version,
"red" if wordpress_version.is_outdated is True else "blue"),
colorize("%s" % wordpress_version.latest_version)
), 0)
# --------------------------------------------------------------------------
# Looking for CVEs for installed Wordpress version
# --------------------------------------------------------------------------
if wordpress_version.vulnerabilities:
log("\n |_CVE list:\n")
for cve in wordpress_version.vulnerabilities:
log(" |__%(cve)s: (http://cve.mitre.org/cgi-bin/cvename.cgi?name=%(cve)s)\n" %
{"cve": colorize(cve, "red")})
log("\n")
else:
log(colorize("Unknown!\n", "red"))
log("\n")
else:
wordpress_version = PlecostWordPressInfo(last_version="",
current_version="",
vulnerabilities=[])
# --------------------------------------------------------------------------
# Check the plugins
# --------------------------------------------------------------------------
# Read plugins file and remove \n and \r
plugins_info = []
if no_check_plugins is False:
plugins = []
plugins_append = plugins.append
with open(args.wordlist, "rU") as f:
for plugin in f:
plugins_append(plugin.replace("\n", "").replace("\r", ""))
# Prepare csv file
cve_info = csv.reader(plugins)
error_page = ""
# Find plugins
log("[*] Looking for plugins (wordlist: %s) ... " % args.wordlist[args.wordlist.rfind("/") + 1:], 0)
plugins_info = loop.run_until_complete(plugins_testing(url,
error_page,
log,
cve_info,
db,
concurrency,
loop,
con=con))
log("\n[*] Done! \n")
# Set finish time
end_time = datetime.now()
# --------------------------------------------------------------------------
# Clean up
# --------------------------------------------------------------------------
con.close()
# --------------------------------------------------------------------------
# Make results
# --------------------------------------------------------------------------
return PlecostResults(target=args.target,
start_time=start_time,
end_time=end_time,
wordpress_info=wordpress_version,
plugins=plugins_info)
__all__ = ["find_versions", "_is_remote_a_wordpress"]
|
{
"content_hash": "258a09b902716b49845ab803abde7f4a",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 108,
"avg_line_length": 40.868852459016395,
"alnum_prop": 0.43414895039443774,
"repo_name": "0ps/plecost",
"id": "ffda53185d73478e5aa2a714c7a60682aae3755a",
"size": "9294",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "plecost_lib/libs/versions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6726"
},
{
"name": "Makefile",
"bytes": "6803"
},
{
"name": "Python",
"bytes": "160572"
}
],
"symlink_target": ""
}
|
import pymongo, sys, os
from bson.objectid import ObjectId
DB_NAME = "masterSlideList"
DB_COLL = "RawSlideData"
XML_DIR = "/TCGA_MIRROR/TCGA_METADATA/Aperio_XML_Files"
client = pymongo.MongoClient('localhost',27017)
db = client[DB_NAME][DB_COLL]
slides = {}
for root, dirs, files in os.walk(XML_DIR):
for subroot, sudirs, filenames in os.walk(root):
for filename in filenames:
searchTerm = filename[0:23]
absFilePath = os.path.join(subroot, filename)
slide = db.find_one({"name": {"$regex": searchTerm}}, {"scanProperties": False})
if slide:
slideId = str(slide["_id"])
if slideId not in slides: slides[slideId] = []
slides[slideId].append({"name": filename, "path": absFilePath})
for slideId, files in slides.iteritems():
print slideId
db.update_one({"_id": ObjectId(slideId)}, {"$set": {"aperioAnnotations": files}})
|
{
"content_hash": "45934788640813e106343d301177867c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 29.896551724137932,
"alnum_prop": 0.6816608996539792,
"repo_name": "dgutman/ADRCPathViewer",
"id": "44a52357deb08a24b5f5866785c5c2df3d1c52d1",
"size": "867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/matchAperioToSlide.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "542"
},
{
"name": "HTML",
"bytes": "2395"
},
{
"name": "JavaScript",
"bytes": "110030"
},
{
"name": "Jupyter Notebook",
"bytes": "548677"
},
{
"name": "Python",
"bytes": "68756"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
}
|
from tempest import config
from tempest.lib import exceptions as lib_exc
import testtools
from testtools import testcase as tc
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
CONF = config.CONF
class SharesNegativeTest(base.BaseSharesTest):
@classmethod
def resource_setup(cls):
super(SharesNegativeTest, cls).resource_setup()
cls.share = cls.create_share(
name='public_share',
description='public_share_desc',
is_public=True,
metadata={'key': 'value'}
)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_update_share_with_wrong_public_value(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.update_share, self.share["id"],
is_public="truebar")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_try_delete_share_with_existing_snapshot(self):
# share can not be deleted while snapshot exists
# create share
share = self.create_share()
# create snapshot
self.create_snapshot_wait_for_active(share["id"])
# try delete share
self.assertRaises(lib_exc.Forbidden,
self.shares_client.delete_share, share["id"])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_share_from_snap_with_less_size(self):
# requires minimum 5Gb available space
skip_msg = "Check disc space for this test"
try: # create share
size = CONF.share.share_size + 1
share = self.create_share(size=size, cleanup_in_class=False)
except share_exceptions.ShareBuildErrorException:
self.skip(skip_msg)
try: # create snapshot
snap = self.create_snapshot_wait_for_active(
share["id"], cleanup_in_class=False)
except share_exceptions.SnapshotBuildErrorException:
self.skip(skip_msg)
# try create share from snapshot with less size
self.assertRaises(lib_exc.BadRequest,
self.create_share,
snapshot_id=snap["id"],
cleanup_in_class=False)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipIf(not CONF.share.multitenancy_enabled,
"Only for multitenancy.")
def test_create_share_with_nonexistant_share_network(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_share,
share_network_id="wrong_sn_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipIf(not CONF.share.multitenancy_enabled,
"Only for multitenancy.")
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_share_from_snap_with_different_share_network(self):
# create share
share = self.create_share(cleanup_in_class=False)
# get parent's share network
parent_share = self.shares_client.get_share(share["id"])
parent_sn = self.shares_client.get_share_network(
parent_share["share_network_id"])
# create new share-network - net duplicate of parent's share
new_duplicated_sn = self.create_share_network(
cleanup_in_class=False,
neutron_net_id=parent_sn["neutron_net_id"],
neutron_subnet_id=parent_sn["neutron_subnet_id"],
)
# create snapshot of parent share
snap = self.create_snapshot_wait_for_active(
share["id"], cleanup_in_class=False)
# try create share with snapshot using another share-network
# 400 bad request is expected
self.assertRaises(
lib_exc.BadRequest,
self.create_share,
cleanup_in_class=False,
share_network_id=new_duplicated_sn["id"],
snapshot_id=snap["id"],
)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_update_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden, isolated_client.update_share,
self.share["id"], name="new_name")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_delete_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.delete_share,
self.share['id'])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_set_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.set_metadata,
self.share['id'],
{'key': 'value'})
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_update_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.update_all_metadata,
self.share['id'],
{'key': 'value'})
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_delete_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.delete_metadata,
self.share['id'],
'key')
class SharesAPIOnlyNegativeTest(base.BaseSharesTest):
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_unmanage_share_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.unmanage_share,
'fake-id')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_manage_share_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.manage_share,
'fake-host', 'nfs', '/export/path',
'fake-type')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_list_by_share_server_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.list_shares,
params={'share_server_id': 12345})
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_non_existent_az(self):
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.create_share,
availability_zone='fake_az')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_zero_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size=0)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_invalid_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size="#$%")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_out_passing_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size="")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_delete_snapshot_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_snapshot,
"wrong_share_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_snapshot_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_snapshot,
"wrong_share_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_invalid_protocol(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share,
share_protocol="nonexistent_protocol")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_wrong_public_value(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, is_public='truebar')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_get_share_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound, self.shares_client.get_share,
"wrong_share_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_get_share_without_passing_share_id(self):
# Should not be able to get share when empty ID is passed
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share, '')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_list_shares_nonadmin_with_nonexistent_share_server_filter(self):
# filtering by share server allowed only for admins by default
self.assertRaises(lib_exc.Forbidden,
self.shares_client.list_shares_with_detail,
{'share_server_id': 'fake_share_server_id'})
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_delete_share_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share,
"wrong_share_id")
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_delete_share_without_passing_share_id(self):
# Should not be able to delete share when empty ID is passed
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share, '')
|
{
"content_hash": "413b834da691732dbcc2bc084dccfcff",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 79,
"avg_line_length": 41.77075098814229,
"alnum_prop": 0.598221044663134,
"repo_name": "NetApp/manila",
"id": "7c5619ef8285369526c092ad1fd3417fae1cecd3",
"size": "11197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila_tempest_tests/tests/api/test_shares_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "8111068"
},
{
"name": "Shell",
"bytes": "91643"
}
],
"symlink_target": ""
}
|
import types
class SharedInstance:
""" This class merely offers a singelton for the Blockchain Instance
"""
instance = None
config = {}
class AbstractBlockchainInstanceProvider:
""" This is a class that allows compatibility with previous
naming conventions. It will extract 'blockchain_instance'
from the key word arguments and ensure that self.blockchain
contains an instance of the main chain instance
"""
_sharedInstance = SharedInstance
def __init__(self, *args, **kwargs):
self._blockchain = None
if kwargs.get("blockchain_instance"):
self._blockchain = kwargs["blockchain_instance"]
else:
self._blockchain = self.shared_blockchain_instance()
@classmethod
def inject(slf, cls):
class NewClass(slf, cls):
blockchain_instance_class = slf
def __init__(self, *args, **kwargs):
slf.__init__(self, *args, **kwargs)
cls.__init__(self, *args, **kwargs)
NewClass.__name__ = cls.__name__
NewClass.__qualname__ = cls.__qualname__
NewClass.__doc__ = cls.__doc__
NewClass.__module__ = cls.__module__
return NewClass
def get_instance_class(self):
""" Should return the Chain instance class, e.g. `bitshares.BitShares`
"""
raise NotImplementedError
def define_classes(self):
""" Needs to define instance variables that provide classes
"""
raise NotImplementedError
@property
def blockchain(self):
if hasattr(self, "_blockchain") and self._blockchain:
# This shouldn't happen except for legacy libraries
return self._blockchain
else:
return self.shared_blockchain_instance()
@property
def chain(self):
""" Short form for blockchain (for the lazy)
"""
return self.blockchain
def shared_blockchain_instance(self):
""" This method will initialize ``SharedInstance.instance`` and return it.
The purpose of this method is to have offer single default
instance that can be reused by multiple classes.
"""
if not self._sharedInstance.instance:
klass = self.get_instance_class()
self._sharedInstance.instance = klass(**self._sharedInstance.config)
return self._sharedInstance.instance
@classmethod
def set_shared_blockchain_instance(cls, instance):
""" This method allows us to override default instance for all
users of ``SharedInstance.instance``.
:param chaininstance instance: Chain instance
"""
cls._sharedInstance.instance = instance
# -------------------------------------------------------------------------
# Shared instance interface
# -------------------------------------------------------------------------
def set_shared_instance(self):
""" This method allows to set the current instance as default
"""
self._sharedInstance.instance = self
@classmethod
def set_shared_config(cls, config):
""" This allows to set a config that will be used when calling
``shared_blockchain_instance`` and allows to define the configuration
without requiring to actually create an instance
"""
assert isinstance(config, dict)
cls._sharedInstance.config.update(config)
# if one is already set, delete
if cls._sharedInstance.instance:
cls._sharedInstance.instance = None
def shared_blockchain_instance():
return BlockchainInstance().shared_blockchain_instance()
def set_shared_blockchain_instance(instance):
instance.clear_cache()
BlockchainInstance.set_shared_blockchain_instance(instance)
def set_shared_config(config):
BlockchainInstance.set_shared_config(config)
# Legacy alias
BlockchainInstance = AbstractBlockchainInstanceProvider
|
{
"content_hash": "51e7864ffe291c3162cd78947cde01a2",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 82,
"avg_line_length": 32.98347107438016,
"alnum_prop": 0.613881232773741,
"repo_name": "xeroc/python-graphenelib",
"id": "29dcfb0f8ebf1c0bec8d642f2071d875dd8c1753",
"size": "4015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphenecommon/instance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "872"
},
{
"name": "Python",
"bytes": "922435"
}
],
"symlink_target": ""
}
|
import threading
import time
import sys
from src import Utils
from src.play.controller.bots.HumanGui import HumanGui
from src.play.model.Game import Game, InvalidMove_Error
from src.play.model.Move import Move
from src.play.controller.GTPengine import GTPengine
class GTPcontroller(threading.Thread):
def __init__(self, player1type, player2type, logging_level, end_of_turn_sleep_time):
threading.Thread.__init__(self)
self.logger = Utils.get_unique_file_logger(self, logging_level)
self.end_of_turn_sleep_time = end_of_turn_sleep_time
self.game = Game()
self.view = None
self.player1 = Player('b', logging_level)
self.player1.engine.controller = self
self.player2 = Player('w', logging_level)
self.player2.engine.controller = self
self.map = {
self.player1.engine: self.player1,
self.player2.engine: self.player2,
}
self.send_to_player(self.player1, 'set_player_type ' + player1type)
self.send_to_player(self.player2, 'set_player_type ' + player2type)
self.player1.name = self.wait_for_response(self.player1, 'name')[2:]
self.player2.name = self.wait_for_response(self.player2, 'name')[2:]
self.current_player = self.player1
self.other_player = self.player2
def log_and_print(self, message):
self.logger.info(message)
print(message)
def send_to_player(self, player, command):
self.log_and_print(' send to ' + player.name + ' (' + player.color + '): ' + command)
player.engine.handle_input_from_controller(command)
def broadcast(self, command):
self.send_to_player(self.player1, 'quit')
self.send_to_player(self.player2, 'quit')
def wait_for_response(self, player, message):
self.send_to_player(player, message)
while player.latest_response is None:
pass
return player.get_latest_response()
def run(self):
self.game.start()
while self.game.is_running:
print('\nnext turn\n')
response = self.wait_for_response(self.current_player, 'genmove ' + self.current_player.color)
if response.startswith('?'):
self.log_and_print('player ' + self.current_player.name +
' responded with an error, aborting the game: ' + '"' + response[2:] + '"')
break
move = response[2:] # strip away the "= "
self.send_to_player(self.other_player, 'play ' + self.current_player.color + ' ' + move)
self.game.play(Move().from_gtp(move, self.game.size), self.current_player.color)
print('\n' + self.game.__str__())
time.sleep(self.end_of_turn_sleep_time)
# swap players for next turn
if self.current_player == self.player1:
self.current_player = self.player2
self.other_player = self.player1
else:
self.current_player = self.player1
self.other_player = self.player2
self.broadcast('quit')
print('\n' + self.game.__str__())
# if self.view is not None:
# self.view.game_ended()
# else:
print('Final result:', self.game.evaluate_points())
sys.exit(0)
def handle_input_from_engine(self, engine, input):
input = input.strip()
player = self.map[engine]
self.log_and_print('received from ' + player.name + ' (' + player.color + '): ' + input)
player.latest_response = input
def receive_move_from_gui(self, move):
human = self.current_player.engine.bot
if type(human) is HumanGui:
try:
self.game.play(move, self.current_player.color, testing=True)
human.move = move
except InvalidMove_Error as e:
print('\ninvalid move')
class Player:
def __init__(self, color, logging_level):
self.engine = GTPengine(logging_level)
self.color = color
self.name = 'unknown'
self.latest_response = None
def get_latest_response(self):
resp = self.latest_response
self.latest_response = None
return resp
|
{
"content_hash": "f0da55137e6e9180fdc44ae2aac887ff",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 110,
"avg_line_length": 37.7787610619469,
"alnum_prop": 0.5975638322792223,
"repo_name": "nathbo/GO_DILab",
"id": "57d97b9fb80dec10c45ac76a713af2c8ae643a58",
"size": "4269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/play/controller/GTPcontroller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "496"
},
{
"name": "Python",
"bytes": "320762"
},
{
"name": "Shell",
"bytes": "2336"
}
],
"symlink_target": ""
}
|
"""
@package mi.instrument.nortek.aquadopp.playback.driver
@author Pete Cable
@brief Driver for the aquadopp ascii mode playback
Release notes:
Driver for Aquadopp DW
"""
__author__ = 'Pete Cable'
__license__ = 'Apache 2.0'
import re
import datetime
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import SampleException
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.instrument.nortek.aquadopp.ooicore.driver import NortekDataParticleType
from mi.core.instrument.data_particle import DataParticle, DataParticleKey
from mi.core.common import BaseEnum
from mi.instrument.nortek.driver import ProtocolState, ProtocolEvent
integer_pattern = r'\d+'
float_pattern = r'[+\-\d.]+'
VELOCITY_DATA_PATTERN = '\s+'.join([integer_pattern] * 8 +
[float_pattern] * 3 +
[integer_pattern] * 3 +
[float_pattern] * 7 +
[integer_pattern] * 2 +
[float_pattern] * 2) + r'\r\n'
VELOCITY_DATA_REGEX = re.compile(VELOCITY_DATA_PATTERN)
class AquadoppDwVelocityDataParticleKey(BaseEnum):
"""
Velocity Data particle
"""
TIMESTAMP = "date_time_string"
ERROR = "error_code"
ANALOG1 = "analog1"
BATTERY_VOLTAGE = "battery_voltage_dV"
SOUND_SPEED_ANALOG2 = "sound_speed_dms"
HEADING = "heading_decidegree"
PITCH = "pitch_decidegree"
ROLL = "roll_decidegree"
PRESSURE = "pressure_mbar"
STATUS = "status"
TEMPERATURE = "temperature_centidegree"
VELOCITY_BEAM1 = "velocity_beam1"
VELOCITY_BEAM2 = "velocity_beam2"
VELOCITY_BEAM3 = "velocity_beam3"
AMPLITUDE_BEAM1 = "amplitude_beam1"
AMPLITUDE_BEAM2 = "amplitude_beam2"
AMPLITUDE_BEAM3 = "amplitude_beam3"
class AquadoppDwVelocityAsciiDataParticle(DataParticle):
"""
Routine for parsing velocity data into a data particle structure for the Aquadopp DW sensor.
"""
_data_particle_type = NortekDataParticleType.VELOCITY
ntp_epoch = datetime.datetime(1900, 1, 1)
def _build_parsed_values(self):
"""
Take the velocity data sample and parse it into values with appropriate tags.
@throws SampleException If there is a problem with sample creation
"""
try:
(month, day, year, hour, minute, second, error_code, status_code, velocity_beam1,
velocity_beam2, velocity_beam3, amplitude_beam1, amplitude_beam2, amplitude_beam3,
battery_voltage, sound_speed, heading, pitch, roll, pressure, temperature,
analog1, analog2, speed, direction) = self.raw_data.split()
day, month, year, hour, minute, second = int(day), int(month), int(year), int(hour), int(minute), int(second)
ntp_timestamp = (datetime.datetime(year, month, day, hour, minute, second) - self.ntp_epoch).total_seconds()
self.set_internal_timestamp(ntp_timestamp)
# normally we don't adjust any data in a parser
# this is a special case so that we can keep producing the same
# stream from this instrument between the playback and live data
timestamp = '%02d/%02d/%02d %02d:%02d:%02d' % (day, month, year, hour, minute, second)
error_code = int(error_code)
status_code = int(status_code)
velocity_beam1 = int(float(velocity_beam1) * 1000) # m/s to mm/s
velocity_beam2 = int(float(velocity_beam2) * 1000) # m/s to mm/s
velocity_beam3 = int(float(velocity_beam3) * 1000) # m/s to mm/s
amplitude_beam1 = int(amplitude_beam1)
amplitude_beam2 = int(amplitude_beam2)
amplitude_beam3 = int(amplitude_beam3)
battery_voltage = int(float(battery_voltage) * 10) # V to 0.1 V
sound_speed = int(float(sound_speed) * 10) # m/s to 0.1 m/s
heading = int(float(heading) * 10) # deg to 0.1 deg
pitch = int(float(pitch) * 10) # deg to 0.1 deg
roll = int(float(roll) * 10) # deg to 0.1 deg
pressure = int(float(pressure) * 1000) # dbar to 0.001 dbar
temperature = int(float(temperature) * 100) # deg to .01 deg
analog1 = int(analog1)
except ValueError:
raise SampleException("Unable to parse fields")
VID = DataParticleKey.VALUE_ID
VAL = DataParticleKey.VALUE
ADVDPK = AquadoppDwVelocityDataParticleKey
result = [{VID: ADVDPK.TIMESTAMP, VAL: timestamp},
{VID: ADVDPK.ERROR, VAL: error_code},
{VID: ADVDPK.ANALOG1, VAL: analog1},
{VID: ADVDPK.BATTERY_VOLTAGE, VAL: battery_voltage},
{VID: ADVDPK.SOUND_SPEED_ANALOG2, VAL: sound_speed},
{VID: ADVDPK.HEADING, VAL: heading},
{VID: ADVDPK.PITCH, VAL: pitch},
{VID: ADVDPK.ROLL, VAL: roll},
{VID: ADVDPK.STATUS, VAL: status_code},
{VID: ADVDPK.PRESSURE, VAL: pressure},
{VID: ADVDPK.TEMPERATURE, VAL: temperature},
{VID: ADVDPK.VELOCITY_BEAM1, VAL: velocity_beam1},
{VID: ADVDPK.VELOCITY_BEAM2, VAL: velocity_beam2},
{VID: ADVDPK.VELOCITY_BEAM3, VAL: velocity_beam3},
{VID: ADVDPK.AMPLITUDE_BEAM1, VAL: amplitude_beam1},
{VID: ADVDPK.AMPLITUDE_BEAM2, VAL: amplitude_beam2},
{VID: ADVDPK.AMPLITUDE_BEAM3, VAL: amplitude_beam3}]
return result
###############################################################################
# Protocol
################################################################################
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses NortekInstrumentProtocol
"""
def __init__(self, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
CommandResponseInstrumentProtocol.__init__(self, None, None, driver_event)
# create chunker for processing instrument samples.
self._chunker = StringChunker(self.sieve_function)
@classmethod
def sieve_function(cls, raw_data):
"""
The method that detects data sample structures from instrument
Should be in the format [[structure_sync_bytes, structure_len]*]
"""
return_list = []
sieve_matchers = [VELOCITY_DATA_REGEX]
for matcher in sieve_matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
log.debug("sieve_function: regex found %r", raw_data[match.start():match.end()])
return return_list
########################################################################
# overridden superclass methods
########################################################################
def _got_chunk(self, structure, timestamp):
"""
The base class got_data has gotten a structure from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
self._extract_sample(AquadoppDwVelocityAsciiDataParticle, VELOCITY_DATA_REGEX, structure, timestamp)
def get_current_state(self):
return ProtocolState.UNKNOWN
def create_playback_protocol(callback):
return Protocol(callback)
|
{
"content_hash": "dc65d8f058b251add53de0509267c145",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 121,
"avg_line_length": 41.46560846560847,
"alnum_prop": 0.6034196758963889,
"repo_name": "ronkyo/mi-instrument",
"id": "c7f8580761f04d11af38cfb1e7cdaaa699aa27ac",
"size": "7837",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mi/instrument/nortek/aquadopp/playback/driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6398834"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.