hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7904bbcf146feddc23a5102280062792e9a7b19d
| 319
|
py
|
Python
|
codes/write_csv.py
|
mukul54/Flipkart-Grid-Challenge
|
ae193490304c60cfc074e2f31f4db1a0b8e0e0f4
|
[
"MIT"
] | 11
|
2019-07-05T16:32:12.000Z
|
2021-12-06T17:10:18.000Z
|
codes/write_csv.py
|
mukul54/Flipkart-Grid-Challenge
|
ae193490304c60cfc074e2f31f4db1a0b8e0e0f4
|
[
"MIT"
] | null | null | null |
codes/write_csv.py
|
mukul54/Flipkart-Grid-Challenge
|
ae193490304c60cfc074e2f31f4db1a0b8e0e0f4
|
[
"MIT"
] | 4
|
2020-07-01T17:11:56.000Z
|
2021-07-10T10:59:36.000Z
|
import numpy as np
import pandas as pd
X = np.load('preds.npy')
img = pd.read_csv('test.csv')
img['x1'] = X[:,0]*640
img['x2'] = X[:,1]*640
img['y1'] = X[:,2]*480
img['y2'] = X[:,3]*480
""" img['x1'] = 0.05*640
img['x2'] = 0.95*640
img['y1'] = 0.05*480
img['y2'] = 0.95*480 """
img.to_csv('subbigles.csv',index = False)
| 24.538462
| 41
| 0.567398
|
import numpy as np
import pandas as pd
X = np.load('preds.npy')
img = pd.read_csv('test.csv')
img['x1'] = X[:,0]*640
img['x2'] = X[:,1]*640
img['y1'] = X[:,2]*480
img['y2'] = X[:,3]*480
img.to_csv('subbigles.csv',index = False)
| true
| true
|
7904bc1044928c7eaf3de1c8ab22b76106971ace
| 945
|
py
|
Python
|
saw-remote-api/python/tests/saw/test_llvm_array_swap.py
|
msaaltink/saw-script
|
2e4fc0603da85bb1b188d4739a3386e25eea50ab
|
[
"BSD-3-Clause"
] | 411
|
2015-06-09T22:00:47.000Z
|
2022-03-30T11:41:23.000Z
|
saw-remote-api/python/tests/saw/test_llvm_array_swap.py
|
msaaltink/saw-script
|
2e4fc0603da85bb1b188d4739a3386e25eea50ab
|
[
"BSD-3-Clause"
] | 1,151
|
2015-06-12T20:46:31.000Z
|
2022-03-23T02:56:32.000Z
|
saw-remote-api/python/tests/saw/test_llvm_array_swap.py
|
msaaltink/saw-script
|
2e4fc0603da85bb1b188d4739a3386e25eea50ab
|
[
"BSD-3-Clause"
] | 65
|
2015-06-10T17:52:26.000Z
|
2022-02-10T18:17:06.000Z
|
from pathlib import Path
import unittest
from saw_client import *
from saw_client.llvm import Contract, array, array_ty, void, i32
class ArraySwapContract(Contract):
def specification(self):
a0 = self.fresh_var(i32, "a0")
a1 = self.fresh_var(i32, "a1")
a = self.alloc(array_ty(2, i32),
points_to=array(a0, a1))
self.execute_func(a)
self.points_to(a[0], a1)
self.points_to(a[1], a0)
self.returns(void)
class LLVMArraySwapTest(unittest.TestCase):
def test_llvm_array_swap(self):
connect(reset_server=True)
if __name__ == "__main__": view(LogResults())
bcname = str(Path('tests','saw','test-files', 'llvm_array_swap.bc'))
mod = llvm_load_module(bcname)
result = llvm_verify(mod, 'array_swap', ArraySwapContract())
self.assertIs(result.is_success(), True)
if __name__ == "__main__":
unittest.main()
| 27.794118
| 76
| 0.639153
|
from pathlib import Path
import unittest
from saw_client import *
from saw_client.llvm import Contract, array, array_ty, void, i32
class ArraySwapContract(Contract):
def specification(self):
a0 = self.fresh_var(i32, "a0")
a1 = self.fresh_var(i32, "a1")
a = self.alloc(array_ty(2, i32),
points_to=array(a0, a1))
self.execute_func(a)
self.points_to(a[0], a1)
self.points_to(a[1], a0)
self.returns(void)
class LLVMArraySwapTest(unittest.TestCase):
def test_llvm_array_swap(self):
connect(reset_server=True)
if __name__ == "__main__": view(LogResults())
bcname = str(Path('tests','saw','test-files', 'llvm_array_swap.bc'))
mod = llvm_load_module(bcname)
result = llvm_verify(mod, 'array_swap', ArraySwapContract())
self.assertIs(result.is_success(), True)
if __name__ == "__main__":
unittest.main()
| true
| true
|
7904bc701e609458748ccd4a891047655f178468
| 12,355
|
py
|
Python
|
examples/smartquery.py
|
jonatasleon/sqlalchemy-mixins
|
a111e69fc5edc5d81a31dca45755f21c8c512ed1
|
[
"MIT"
] | 1
|
2021-01-29T09:09:26.000Z
|
2021-01-29T09:09:26.000Z
|
examples/smartquery.py
|
AdamGold/sqlalchemy-mixins
|
66e87b0835ef27d504c36a1a27d551cfed551d89
|
[
"MIT"
] | null | null | null |
examples/smartquery.py
|
AdamGold/sqlalchemy-mixins
|
66e87b0835ef27d504c36a1a27d551cfed551d89
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import datetime
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_method
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import Query, scoped_session, sessionmaker
from sqlalchemy_mixins import SmartQueryMixin, ReprMixin, JOINED, smart_query
def log(msg):
print('\n{}\n'.format(msg))
#################### setup ######################
Base = declarative_base()
# we also use ReprMixin which is optional
class BaseModel(Base, SmartQueryMixin, ReprMixin):
__abstract__ = True
__repr__ = ReprMixin.__repr__
pass
class User(BaseModel):
__tablename__ = 'user'
__repr_attrs__ = ['name']
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
# to smart query relationship, it should be explicitly set,
# not to be a backref
posts = sa.orm.relationship('Post')
comments = sa.orm.relationship('Comment')
# below relationship will just return query (without executing)
# this query can be customized
# see http://docs.sqlalchemy.org/en/latest/orm/collections.html#dynamic-relationship
#
# we will use this relationship for demonstrating real-life example
# of how smart_query() function works (see 3.2.2)
comments_ = sa.orm.relationship('Comment', lazy="dynamic") # this will return query
class Post(BaseModel):
__tablename__ = 'post'
id = sa.Column(sa.Integer, primary_key=True)
body = sa.Column(sa.String)
user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
archived = sa.Column(sa.Boolean, default=False)
# to smart query relationship, it should be explicitly set,
# not to be a backref
user = sa.orm.relationship('User')
comments = sa.orm.relationship('Comment')
@hybrid_property
def public(self):
return not self.archived
@public.expression
def public(cls):
return ~cls.archived
@hybrid_method
def is_commented_by_user(cls, user, mapper=None):
# in real apps, Comment class can be obtained from relation
# to avoid cyclic imports like so:
# Comment = cls.comments.property.argument()
mapper = mapper or cls
# from sqlalchemy import exists
# return exists().where((Comment.post_id == mapper.id) & \
# (Comment.user_id == user.id))
return mapper.comments.any(Comment.user_id == user.id)
@hybrid_method
def is_public(cls, value, mapper=None):
# in real apps, Comment class can be obtained from relation
# to avoid cyclic imports like so:
# Comment = cls.comments.property.argument()
mapper = mapper or cls
return mapper.public == value
class Comment(BaseModel):
__tablename__ = 'comment'
__repr_attrs__ = ['body']
id = sa.Column(sa.Integer, primary_key=True)
body = sa.Column(sa.String)
user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
post_id = sa.Column(sa.Integer, sa.ForeignKey('post.id'))
rating = sa.Column(sa.Integer)
created_at = sa.Column(sa.DateTime)
# to smart query relationship, it should be explicitly set,
# not to be a backref
user = sa.orm.relationship('User')
post = sa.orm.relationship('Post')
#################### setup ORM ######################
db_file = os.path.join(os.path.dirname(__file__), 'test.sqlite')
engine = create_engine('sqlite:///{}'.format(db_file), echo=True)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
BaseModel.set_session(session)
#################### setup some data ######################
u1 = User(name='Bill u1')
session.add(u1)
session.commit()
u2 = User(name='Alex u2')
session.add(u2)
session.commit()
u3 = User(name='Bishop u3')
session.add(u3)
session.commit()
session.commit()
p11 = Post(
id=11,
body='1234567890123',
archived=True,
user=u1
)
session.add(p11)
session.commit()
p12 = Post(
id=12,
body='1234567890',
user=u1
)
session.add(p12)
session.commit()
p21 = Post(
id=21,
body='p21',
user=u2
)
session.add(p21)
session.commit()
p22 = Post(
id=22,
body='p22',
user=u2
)
session.add(p22)
session.commit()
cm11 = Comment(
id=11,
body='cm11',
user=u1,
post=p11,
rating=1,
created_at=datetime.datetime(2014, 1, 1)
)
session.add(cm11)
session.commit()
cm12 = Comment(
id=12,
body='cm12',
user=u2,
post=p12,
rating=2,
created_at=datetime.datetime(2015, 10, 20)
)
session.add(cm12)
session.commit()
cm21 = Comment(
id=21,
body='cm21',
user=u1,
post=p21,
rating=1,
created_at=datetime.datetime(2015, 11, 21)
)
session.add(cm21)
session.commit()
cm22 = Comment(
id=22,
body='cm22',
user=u3,
post=p22,
rating=3,
created_at=datetime.datetime(2016, 11, 20)
)
session.add(cm22)
session.commit()
cm_empty = Comment(
id=29,
# no body
# no user
# no post
# no rating
)
session.add(cm_empty)
session.commit()
#################### Demo ######################
# ['id', 'body', 'user_id', 'archived', # normal columns
# 'user', 'comments', # relations
# 'public', # hybrid attributes
# 'is_public', 'is_commented_by_user' # hybrid methods
# ]
log(Post.filterable_attributes)
#### 1. Filters ####
##### 1.1 filter by hybrid_property 'public' #####
# low-level filter_expr()
log(session.query(Post).filter(*Post.filter_expr(user=u1, public=True)).all())
# high-level SmartQueryMixin.where() method
log(Post.where(user=u1, public=True).all())
# you can unpack dict (in real world app you will do this)
filters = {'user': u1, 'public': True}
log(Post.where(**filters).all())
##### 1.2 filter by hybrid_method 'is_commented_by_user' #####
# low-level filter_expr()
log(session.query(Post).filter(
*Post.filter_expr(is_commented_by_user=u1)).all())
# high-level SmartQueryMixin.where() method
log(Post.where(is_commented_by_user=u1).all())
##### 1.3 operators #####
# rating == None
log(Comment.where(rating=None).all()) # cm_empty
log(Comment.where(rating__isnull=2).all()) # cm_empty
# rating == 2
# when no operator, 'exact' operator is assumed
log(Comment.where(rating=2).all()) # cm12
# assumed
log(Comment.where(rating__exact=2).all()) # cm12
# rating > 2
log(Comment.where(rating__gt=2).all()) # cm22
# rating >= 2
log(Comment.where(rating__ge=2).all()) # cm12, cm22
# rating < 2
log(Comment.where(rating__lt=2).all()) # cm11, cm21
# rating <= 2
log(Comment.where(rating__le=2).all()) # cm11, cm12, cm21
# rating in [1,3]
log(Comment.where(rating__in=[1, 3]).all()) # cm11, cm21, cm22
log(Comment.where(rating__in=(1, 3)).all()) # cm11, cm21, cm22
log(Comment.where(rating__in={1, 3}).all()) # cm11, cm21, cm22
# rating between 2 and 3
log(Comment.where(rating__between=[2, 3]).all()) # cm12, cm22
log(Comment.where(rating__between=(2, 3)).all()) # cm12, cm22
# likes
log(Comment.where(body__like=u'cm12 to p12').all()) # cm12
log(Comment.where(body__like='%cm12%').all()) # cm12
log(Comment.where(body__ilike='%CM12%').all()) # cm12
log(Comment.where(body__startswith='cm1').all()) # cm11, cm12
log(Comment.where(body__istartswith='CM1').all()) # cm11, cm12
log(Comment.where(body__endswith='to p12').all()) # cm12
log(Comment.where(body__iendswith='TO P12').all()) # cm12
# dates
# year
log(Comment.where(created_at__year=2014).all()) # cm11
log(Comment.where(created_at__year=2015).all()) # cm12, cm21
# month
log(Comment.where(created_at__month=1).all()) # cm11
log(Comment.where(created_at__month=11).all()) # cm21, cm22
# day
log(Comment.where(created_at__day=1).all()) # cm11
log(Comment.where(created_at__day=20).all()) # cm12, cm22
# whole date
log(Comment.where(created_at__year=2014, created_at__month=1,
created_at__day=1).all()) # cm11
##### 1.4 where() with auto-joined relations #####
# when have no joins, where() is a shortcut for filter_expr
log(session.query(Comment).filter(
*Comment.filter_expr(rating__gt=2, body__startswith='cm1')).all())
log(Comment.where(rating__gt=2, body__startswith='cm1').all())
# but where() can automatically join relations
# users having posts which are commented by user 2
log(User.where(posts___comments___user_id=u2.id).all())
# comments where user name starts with 'Bi'
# !! ATTENTION !!
# about Comment.post:
# although we have Post.comments relationship,
# it's important to **add relationship Comment.post** too,
# not just use backref !!!
log(Comment.where(user___name__startswith='Bi').all())
# non-public posts commented by user 1
log(Post.where(public=False, is_commented_by_user=u1).all())
#### 2. sort ####
#### 2.1 simple demo ####
##### 2.1.1 low-level order_expr()
# '-rating', 'created_at' means 'ORDER BY rating DESC, created_at ASC'
log(session.query(Comment).order_by(
*Comment.order_expr('-rating', 'created_at')).all())
##### 2.1.2 high-level sort()
log(Comment.sort('-rating', 'created_at'))
# in real world apps, you will keep attrs in list
sort_attrs = ['-rating', 'created_at']
log(Comment.sort(*sort_attrs))
##### 2.1.3 hybrid properties
log(session.query(Post).order_by(*Post.order_expr('-public')).all())
log(Post.sort('-public').all())
#### 2.2 sort() with auto-joined relations ####
# sort by name of user ASC (user relation will be auto-joined), then by
# created_at DESC
log(Comment.sort('user___name', '-created_at').all())
# get comments on public posts first, then order by post user name
# Post and User tables will be auto-joined
log(Comment.sort('-post___public', 'post___user___name').all())
#### 3. smart_query() : combination of where(), sort() and eager load ####
schema = {
'post': {
'user': JOINED
}
}
# schema can use class properties too (see EagerLoadMixin):
# schema = {
# Comment.post: {
# Post.user: JOINED
# }
# }
##### 3.1 high-level smart_query() class method #####
res = Comment.smart_query(
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm12, cm21, cm22
##### 3.2 more flexible smart_query() function #####
##### 3.2.1. The same as 3.1
query = Comment.query # could be any query you want
res = smart_query(query,
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm12, cm21, cm22
##### 3.2.2. Real-life example with lazy='dynamic' relationship
# let's imagine we want to display some user relations
# and flexibly filter, sort and eagerload them
# like this http://www.qopy.me/LwfSCu_ETM6At6el8wlbYA
# (no sort on screenshot, but you've git the idea)
# so we have a user
user = session.query(User).first()
# and we have initial query for his/her comments
# (see User.comments_ relationship)
query = user.comments_
# now we just smartly apply all filters, sorts and eagerload. Perfect!
res = smart_query(query,
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm21
##### 3.3 auto eager load in where() and sort() with auto-joined relations ####
"""
Smart_query does auto-joins for filtering/sorting,
so there's a sense to tell sqlalchemy that we alreeady joined that relation
So we test that relations are set to be joinedload
if they were used in smart_query()
"""
##### 3.3.1 where()
# comments on public posts where posted user name like ...
res = Comment.where(post___public=True, post___user___name__like='Bi%').all()
log(res)
# no additional query needed: we used 'post' and 'post__user'
# relations in smart_query()
log(res[0].post)
log(res[0].post.user)
# we didn't use post___comments in filters, so additional query is needed
log(res[0].post.comments)
##### 3.3.2 sort()
res = Comment.sort('-post___public', 'post___user___name').all()
log(res)
# no additional query needed: we used 'post' and 'post__user'
# relations in smart_query()
log(res[0].post)
log(res[0].post.user)
# we didn't use post___comments in filters, so additional query is needed
log(res[0].post.comments)
| 28.272311
| 88
| 0.671712
|
from __future__ import print_function
import os
import datetime
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_method
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import Query, scoped_session, sessionmaker
from sqlalchemy_mixins import SmartQueryMixin, ReprMixin, JOINED, smart_query
def log(msg):
print('\n{}\n'.format(msg))
d
def is_commented_by_user(cls, user, mapper=None):
mapper = mapper or cls
return mapper.comments.any(Comment.user_id == user.id)
@hybrid_method
def is_public(cls, value, mapper=None):
mapper = mapper or cls
return mapper.public == value
class Comment(BaseModel):
__tablename__ = 'comment'
__repr_attrs__ = ['body']
id = sa.Column(sa.Integer, primary_key=True)
body = sa.Column(sa.String)
user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
post_id = sa.Column(sa.Integer, sa.ForeignKey('post.id'))
rating = sa.Column(sa.Integer)
created_at = sa.Column(sa.DateTime)
user = sa.orm.relationship('User')
post = sa.orm.relationship('Post')
real world apps, you will keep attrs in list
sort_attrs = ['-rating', 'created_at']
log(Comment.sort(*sort_attrs))
##### 2.1.3 hybrid properties
log(session.query(Post).order_by(*Post.order_expr('-public')).all())
log(Post.sort('-public').all())
#### 2.2 sort() with auto-joined relations ####
# sort by name of user ASC (user relation will be auto-joined), then by
# created_at DESC
log(Comment.sort('user___name', '-created_at').all())
# get comments on public posts first, then order by post user name
# Post and User tables will be auto-joined
log(Comment.sort('-post___public', 'post___user___name').all())
#### 3. smart_query() : combination of where(), sort() and eager load ####
schema = {
'post': {
'user': JOINED
}
}
# schema can use class properties too (see EagerLoadMixin):
# schema = {
# Comment.post: {
# Post.user: JOINED
# }
# }
##### 3.1 high-level smart_query() class method #####
res = Comment.smart_query(
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm12, cm21, cm22
##### 3.2 more flexible smart_query() function #####
##### 3.2.1. The same as 3.1
query = Comment.query # could be any query you want
res = smart_query(query,
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm12, cm21, cm22
##### 3.2.2. Real-life example with lazy='dynamic' relationship
# let's imagine we want to display some user relations
# so we have a user
user = session.query(User).first()
# and we have initial query for his/her comments
# (see User.comments_ relationship)
query = user.comments_
# now we just smartly apply all filters, sorts and eagerload. Perfect!
res = smart_query(query,
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm21
##### 3.3 auto eager load in where() and sort() with auto-joined relations ####
##### 3.3.1 where()
# comments on public posts where posted user name like ...
res = Comment.where(post___public=True, post___user___name__like='Bi%').all()
log(res)
# no additional query needed: we used 'post' and 'post__user'
# relations in smart_query()
log(res[0].post)
log(res[0].post.user)
# we didn't use post___comments in filters, so additional query is needed
log(res[0].post.comments)
l()
log(res)
log(res[0].post)
log(res[0].post.user)
log(res[0].post.comments)
| true
| true
|
7904bcf6cc1b735d27febe6bd6936266ac2347d7
| 6,699
|
py
|
Python
|
tests/integration/test_catalog.py
|
fredj/mf-chsdi3
|
79dfb5e850432fad95a34520b002ab0a421170b1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/test_catalog.py
|
fredj/mf-chsdi3
|
79dfb5e850432fad95a34520b002ab0a421170b1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/test_catalog.py
|
fredj/mf-chsdi3
|
79dfb5e850432fad95a34520b002ab0a421170b1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from tests.integration import TestsBase
from chsdi.models.bod import Catalog
from sqlalchemy.orm import scoped_session, sessionmaker
from chsdi.views.catalog import create_digraph
from chsdi.lib.filters import filter_by_geodata_staging
class TestCatalogService(TestsBase):
def test_nodes_connection(self):
try:
geodata_staging = self.testapp.app.registry.settings['geodata_staging']
session = scoped_session(sessionmaker())
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
query = session.query(Catalog).filter(Catalog.topic == topic)\
.order_by(Catalog.orderKey)
query = filter_by_geodata_staging(query, Catalog.staging, geodata_staging)
rows = query.all()
if (rows):
graph, meta, root_id = create_digraph(rows, 'fr')
nodes = graph.nodes()
if len(nodes) != len(rows):
for row in rows:
if row.id not in nodes:
raise Exception('%s %s %s is unconnected leaf' % (topic, row.category, row.layerBodId))
finally:
if session:
session.close()
def test_catalog_no_params(self):
resp = self.testapp.get('/rest/services/blw/CatalogServer', status=200)
self.assertTrue(resp.content_type == 'application/json')
self.assertTrue('root' in resp.json['results'])
self.assertTrue('children' in resp.json['results']['root'])
self.assertTrue('selectedOpen' in resp.json['results']['root']['children'][0])
self.assertTrue('category' in resp.json['results']['root'])
def test_catalog_with_callback(self):
resp = self.testapp.get('/rest/services/blw/CatalogServer', params={'callback': 'cb_'}, status=200)
self.assertEqual(resp.content_type, 'application/javascript')
def test_catalog_existing_map_no_catalog(self):
self.testapp.get('/rest/services/all/CatalogServer', status=404)
def test_catalog_wrong_map(self):
self.testapp.get('/rest/services/foo/CatalogServer', status=400)
def test_catalog_ordering(self):
resp = self.testapp.get('/rest/services/inspire/CatalogServer', params={'lang': 'en'}, status=200)
self.assertEqual(resp.content_type, 'application/json')
self.assertTrue('AGNES' in resp.json['results']['root']['children'][0]['children'][0]['children'][0]['label'])
self.assertTrue('Geoid in CH1903' in resp.json['results']['root']['children'][0]['children'][0]['children'][1]['label'])
def test_catalog_languages(self):
for lang in ('de', 'fr', 'it', 'rm', 'en'):
link = '/rest/services/ech/CatalogServer?lang=' + lang
resp = self.testapp.get(link)
self.assertEqual(resp.status_int, 200, link)
def test_layersconfig_with_callback(self):
resp = self.testapp.get('/rest/services/blw/MapServer/layersConfig', params={'callback': 'cb_'}, status=200)
self.assertEqual(resp.content_type, 'application/javascript')
def test_all_catalogs(self):
def existInList(node, l):
found = False
for entry in l:
if entry.id == node.get('id'):
found = True
break
if not found:
print node.get('id')
return False
if 'children' in node:
for child in node.get('children'):
if not existInList(child, l):
return False
return True
from chsdi.models.bod import Catalog
from sqlalchemy.orm import scoped_session, sessionmaker
DBSession = scoped_session(sessionmaker())
old_staging = self.testapp.app.registry.settings['geodata_staging']
# We fix staging for next calls to prod
self.testapp.app.registry.settings['geodata_staging'] = u'prod'
try:
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
# Get catalog
catalog = self.testapp.get('/rest/services/' + topic + '/CatalogServer', status=200)
# Get flat catalog table entries
query = DBSession.query(Catalog).filter(Catalog.topic == topic).filter(Catalog.staging == u'prod')
entries = query.all()
# Check if every node in the catalog is in view_catalog of db
self.assertTrue(existInList(catalog.json['results']['root'], entries))
finally:
# reset staging to previous setting
self.testapp.app.registry.settings['geodata_staging'] = old_staging
DBSession.close()
def test_catalogs_with_layersconfig(self):
def existInList(node, l):
if node.get('category') != 'layer':
return True
found = False
for entry in l:
if entry == node.get('layerBodId'):
found = True
break
if not found:
print node.get('layerBodId')
return False
if 'children' in node:
for child in node.get('children'):
if not existInList(child, l):
return False
return True
from sqlalchemy.orm import scoped_session, sessionmaker
DBSession = scoped_session(sessionmaker())
old_staging = self.testapp.app.registry.settings['geodata_staging']
# We fix staging for next calls to prod
self.testapp.app.registry.settings['geodata_staging'] = u'prod'
try:
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
# Get catalog
catalog = self.testapp.get('/rest/services/' + topic + '/CatalogServer', status=200)
# Get LayersConfig for this topic
layersconf = self.testapp.get('/rest/services/' + topic + '/MapServer/layersConfig', status=200)
# Check if all layers of catalog are in LayersConfig
self.assertTrue(existInList(catalog.json['results']['root'], layersconf.json), 'For Topic: ' + topic)
finally:
# reset staging to previous setting
self.testapp.app.registry.settings['geodata_staging'] = old_staging
DBSession.close()
| 44.072368
| 128
| 0.587252
|
from tests.integration import TestsBase
from chsdi.models.bod import Catalog
from sqlalchemy.orm import scoped_session, sessionmaker
from chsdi.views.catalog import create_digraph
from chsdi.lib.filters import filter_by_geodata_staging
class TestCatalogService(TestsBase):
def test_nodes_connection(self):
try:
geodata_staging = self.testapp.app.registry.settings['geodata_staging']
session = scoped_session(sessionmaker())
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
query = session.query(Catalog).filter(Catalog.topic == topic)\
.order_by(Catalog.orderKey)
query = filter_by_geodata_staging(query, Catalog.staging, geodata_staging)
rows = query.all()
if (rows):
graph, meta, root_id = create_digraph(rows, 'fr')
nodes = graph.nodes()
if len(nodes) != len(rows):
for row in rows:
if row.id not in nodes:
raise Exception('%s %s %s is unconnected leaf' % (topic, row.category, row.layerBodId))
finally:
if session:
session.close()
def test_catalog_no_params(self):
resp = self.testapp.get('/rest/services/blw/CatalogServer', status=200)
self.assertTrue(resp.content_type == 'application/json')
self.assertTrue('root' in resp.json['results'])
self.assertTrue('children' in resp.json['results']['root'])
self.assertTrue('selectedOpen' in resp.json['results']['root']['children'][0])
self.assertTrue('category' in resp.json['results']['root'])
def test_catalog_with_callback(self):
resp = self.testapp.get('/rest/services/blw/CatalogServer', params={'callback': 'cb_'}, status=200)
self.assertEqual(resp.content_type, 'application/javascript')
def test_catalog_existing_map_no_catalog(self):
self.testapp.get('/rest/services/all/CatalogServer', status=404)
def test_catalog_wrong_map(self):
self.testapp.get('/rest/services/foo/CatalogServer', status=400)
def test_catalog_ordering(self):
resp = self.testapp.get('/rest/services/inspire/CatalogServer', params={'lang': 'en'}, status=200)
self.assertEqual(resp.content_type, 'application/json')
self.assertTrue('AGNES' in resp.json['results']['root']['children'][0]['children'][0]['children'][0]['label'])
self.assertTrue('Geoid in CH1903' in resp.json['results']['root']['children'][0]['children'][0]['children'][1]['label'])
def test_catalog_languages(self):
for lang in ('de', 'fr', 'it', 'rm', 'en'):
link = '/rest/services/ech/CatalogServer?lang=' + lang
resp = self.testapp.get(link)
self.assertEqual(resp.status_int, 200, link)
def test_layersconfig_with_callback(self):
resp = self.testapp.get('/rest/services/blw/MapServer/layersConfig', params={'callback': 'cb_'}, status=200)
self.assertEqual(resp.content_type, 'application/javascript')
def test_all_catalogs(self):
def existInList(node, l):
found = False
for entry in l:
if entry.id == node.get('id'):
found = True
break
if not found:
print node.get('id')
return False
if 'children' in node:
for child in node.get('children'):
if not existInList(child, l):
return False
return True
from chsdi.models.bod import Catalog
from sqlalchemy.orm import scoped_session, sessionmaker
DBSession = scoped_session(sessionmaker())
old_staging = self.testapp.app.registry.settings['geodata_staging']
self.testapp.app.registry.settings['geodata_staging'] = u'prod'
try:
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
catalog = self.testapp.get('/rest/services/' + topic + '/CatalogServer', status=200)
query = DBSession.query(Catalog).filter(Catalog.topic == topic).filter(Catalog.staging == u'prod')
entries = query.all()
self.assertTrue(existInList(catalog.json['results']['root'], entries))
finally:
self.testapp.app.registry.settings['geodata_staging'] = old_staging
DBSession.close()
def test_catalogs_with_layersconfig(self):
def existInList(node, l):
if node.get('category') != 'layer':
return True
found = False
for entry in l:
if entry == node.get('layerBodId'):
found = True
break
if not found:
print node.get('layerBodId')
return False
if 'children' in node:
for child in node.get('children'):
if not existInList(child, l):
return False
return True
from sqlalchemy.orm import scoped_session, sessionmaker
DBSession = scoped_session(sessionmaker())
old_staging = self.testapp.app.registry.settings['geodata_staging']
self.testapp.app.registry.settings['geodata_staging'] = u'prod'
try:
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
catalog = self.testapp.get('/rest/services/' + topic + '/CatalogServer', status=200)
layersconf = self.testapp.get('/rest/services/' + topic + '/MapServer/layersConfig', status=200)
self.assertTrue(existInList(catalog.json['results']['root'], layersconf.json), 'For Topic: ' + topic)
finally:
self.testapp.app.registry.settings['geodata_staging'] = old_staging
DBSession.close()
| false
| true
|
7904be24adff428e290a40dacede3ed68363bedd
| 3,783
|
py
|
Python
|
deepflash2/_nbdev.py
|
adriHei/deepflash2
|
82d2fd56f24a995b5c7e301c9c8b3d7b63430414
|
[
"Apache-2.0"
] | null | null | null |
deepflash2/_nbdev.py
|
adriHei/deepflash2
|
82d2fd56f24a995b5c7e301c9c8b3d7b63430414
|
[
"Apache-2.0"
] | null | null | null |
deepflash2/_nbdev.py
|
adriHei/deepflash2
|
82d2fd56f24a995b5c7e301c9c8b3d7b63430414
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Config": "00_learner.ipynb",
"energy_score": "00_learner.ipynb",
"EnsemblePredict": "00_learner.ipynb",
"EnsembleLearner": "00_learner.ipynb",
"ARCHITECTURES": "01_models.ipynb",
"ENCODERS": "01_models.ipynb",
"get_pretrained_options": "01_models.ipynb",
"create_smp_model": "01_models.ipynb",
"save_smp_model": "01_models.ipynb",
"load_smp_model": "01_models.ipynb",
"show": "02_data.ipynb",
"preprocess_mask": "02_data.ipynb",
"DeformationField": "02_data.ipynb",
"BaseDataset": "02_data.ipynb",
"RandomTileDataset": "02_data.ipynb",
"TileDataset": "02_data.ipynb",
"Dice": "03_metrics.ipynb",
"Iou": "03_metrics.ipynb",
"Recorder.plot_metrics": "03_metrics.ipynb",
"LOSSES": "05_losses.ipynb",
"FastaiLoss": "05_losses.ipynb",
"WeightedLoss": "05_losses.ipynb",
"JointLoss": "05_losses.ipynb",
"get_loss": "05_losses.ipynb",
"unzip": "06_utils.ipynb",
"install_package": "06_utils.ipynb",
"import_package": "06_utils.ipynb",
"compose_albumentations": "06_utils.ipynb",
"ensemble_results": "06_utils.ipynb",
"plot_results": "06_utils.ipynb",
"iou": "06_utils.ipynb",
"label_mask": "06_utils.ipynb",
"get_candidates": "06_utils.ipynb",
"iou_mapping": "06_utils.ipynb",
"calculate_roi_measures": "06_utils.ipynb",
"export_roi_set": "06_utils.ipynb",
"calc_iterations": "06_utils.ipynb",
"get_label_fn": "06_utils.ipynb",
"save_mask": "06_utils.ipynb",
"save_unc": "06_utils.ipynb",
"rot90": "07_tta.ipynb",
"hflip": "07_tta.ipynb",
"vflip": "07_tta.ipynb",
"BaseTransform": "07_tta.ipynb",
"Chain": "07_tta.ipynb",
"Transformer": "07_tta.ipynb",
"Compose": "07_tta.ipynb",
"Merger": "07_tta.ipynb",
"HorizontalFlip": "07_tta.ipynb",
"VerticalFlip": "07_tta.ipynb",
"Rotate90": "07_tta.ipynb",
"GRID_COLS": "08_gui.ipynb",
"set_css_in_cell_output": "08_gui.ipynb",
"tooltip_css": "08_gui.ipynb",
"ZipUpload": "08_gui.ipynb",
"ItemsPerPage": "08_gui.ipynb",
"BaseParamWidget": "08_gui.ipynb",
"BaseUI": "08_gui.ipynb",
"PathSelector": "08_gui.ipynb",
"PathDownloads": "08_gui.ipynb",
"PathConfig": "08_gui.ipynb",
"GTDataSB": "08_gui.ipynb",
"GTEstSB": "08_gui.ipynb",
"GTEstUI": "08_gui.ipynb",
"TrainDataSB": "08_gui.ipynb",
"TrainModelSB": "08_gui.ipynb",
"TrainValidSB": "08_gui.ipynb",
"LRWidget": "08_gui.ipynb",
"BasePopUpParamWidget": "08_gui.ipynb",
"ParamWidget": "08_gui.ipynb",
"MWWidget": "08_gui.ipynb",
"TrainUI": "08_gui.ipynb",
"PredInputSB": "08_gui.ipynb",
"PredSB": "08_gui.ipynb",
"PredUI": "08_gui.ipynb",
"GUI": "08_gui.ipynb",
"import_sitk": "09_gt.ipynb",
"staple": "09_gt.ipynb",
"m_voting": "09_gt.ipynb",
"msk_show": "09_gt.ipynb",
"GTEstimator": "09_gt.ipynb"}
modules = ["learner.py",
"models.py",
"data.py",
"metrics.py",
"losses.py",
"utils.py",
"tta.py",
"gui.py",
"gt.py"]
doc_url = "https://matjesg.github.io/deepflash2/"
git_url = "https://github.com/matjesg/deepflash2/tree/master/"
def custom_doc_links(name): return None
| 37.088235
| 62
| 0.560666
|
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Config": "00_learner.ipynb",
"energy_score": "00_learner.ipynb",
"EnsemblePredict": "00_learner.ipynb",
"EnsembleLearner": "00_learner.ipynb",
"ARCHITECTURES": "01_models.ipynb",
"ENCODERS": "01_models.ipynb",
"get_pretrained_options": "01_models.ipynb",
"create_smp_model": "01_models.ipynb",
"save_smp_model": "01_models.ipynb",
"load_smp_model": "01_models.ipynb",
"show": "02_data.ipynb",
"preprocess_mask": "02_data.ipynb",
"DeformationField": "02_data.ipynb",
"BaseDataset": "02_data.ipynb",
"RandomTileDataset": "02_data.ipynb",
"TileDataset": "02_data.ipynb",
"Dice": "03_metrics.ipynb",
"Iou": "03_metrics.ipynb",
"Recorder.plot_metrics": "03_metrics.ipynb",
"LOSSES": "05_losses.ipynb",
"FastaiLoss": "05_losses.ipynb",
"WeightedLoss": "05_losses.ipynb",
"JointLoss": "05_losses.ipynb",
"get_loss": "05_losses.ipynb",
"unzip": "06_utils.ipynb",
"install_package": "06_utils.ipynb",
"import_package": "06_utils.ipynb",
"compose_albumentations": "06_utils.ipynb",
"ensemble_results": "06_utils.ipynb",
"plot_results": "06_utils.ipynb",
"iou": "06_utils.ipynb",
"label_mask": "06_utils.ipynb",
"get_candidates": "06_utils.ipynb",
"iou_mapping": "06_utils.ipynb",
"calculate_roi_measures": "06_utils.ipynb",
"export_roi_set": "06_utils.ipynb",
"calc_iterations": "06_utils.ipynb",
"get_label_fn": "06_utils.ipynb",
"save_mask": "06_utils.ipynb",
"save_unc": "06_utils.ipynb",
"rot90": "07_tta.ipynb",
"hflip": "07_tta.ipynb",
"vflip": "07_tta.ipynb",
"BaseTransform": "07_tta.ipynb",
"Chain": "07_tta.ipynb",
"Transformer": "07_tta.ipynb",
"Compose": "07_tta.ipynb",
"Merger": "07_tta.ipynb",
"HorizontalFlip": "07_tta.ipynb",
"VerticalFlip": "07_tta.ipynb",
"Rotate90": "07_tta.ipynb",
"GRID_COLS": "08_gui.ipynb",
"set_css_in_cell_output": "08_gui.ipynb",
"tooltip_css": "08_gui.ipynb",
"ZipUpload": "08_gui.ipynb",
"ItemsPerPage": "08_gui.ipynb",
"BaseParamWidget": "08_gui.ipynb",
"BaseUI": "08_gui.ipynb",
"PathSelector": "08_gui.ipynb",
"PathDownloads": "08_gui.ipynb",
"PathConfig": "08_gui.ipynb",
"GTDataSB": "08_gui.ipynb",
"GTEstSB": "08_gui.ipynb",
"GTEstUI": "08_gui.ipynb",
"TrainDataSB": "08_gui.ipynb",
"TrainModelSB": "08_gui.ipynb",
"TrainValidSB": "08_gui.ipynb",
"LRWidget": "08_gui.ipynb",
"BasePopUpParamWidget": "08_gui.ipynb",
"ParamWidget": "08_gui.ipynb",
"MWWidget": "08_gui.ipynb",
"TrainUI": "08_gui.ipynb",
"PredInputSB": "08_gui.ipynb",
"PredSB": "08_gui.ipynb",
"PredUI": "08_gui.ipynb",
"GUI": "08_gui.ipynb",
"import_sitk": "09_gt.ipynb",
"staple": "09_gt.ipynb",
"m_voting": "09_gt.ipynb",
"msk_show": "09_gt.ipynb",
"GTEstimator": "09_gt.ipynb"}
modules = ["learner.py",
"models.py",
"data.py",
"metrics.py",
"losses.py",
"utils.py",
"tta.py",
"gui.py",
"gt.py"]
doc_url = "https://matjesg.github.io/deepflash2/"
git_url = "https://github.com/matjesg/deepflash2/tree/master/"
def custom_doc_links(name): return None
| true
| true
|
7904be815a4da7f8005b8d1074a6f8c7ceb79908
| 6,290
|
py
|
Python
|
tools/codegen/codegen_checker.py
|
MarouenMechtri/accords-platform-1
|
4f950fffd9fbbf911840cc5ad0fe5b5a331edf42
|
[
"Apache-2.0"
] | 1
|
2015-02-28T21:25:54.000Z
|
2015-02-28T21:25:54.000Z
|
tools/codegen/codegen_checker.py
|
MarouenMechtri/accords-platform-1
|
4f950fffd9fbbf911840cc5ad0fe5b5a331edf42
|
[
"Apache-2.0"
] | null | null | null |
tools/codegen/codegen_checker.py
|
MarouenMechtri/accords-platform-1
|
4f950fffd9fbbf911840cc5ad0fe5b5a331edf42
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# encoding: utf-8
'''
codegen_checker.codegen -- shortdesc
codegen_checker.codegen is a description
It defines classes_and_methods
@author: user_name
@copyright: 2013 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import sys
import os
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
import glob
import re
# Local imports
import logger
__all__ = []
__version__ = 0.1
__date__ = '2013-03-28'
__updated__ = '2013-03-28'
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def get_class( kls ):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
def readVars(filename):
result = {}
# Prepare a whitespace remover
wsr = re.compile(r'\s+')
# Read the file in a line at a time
for line in open(filename):
m = re.match("^\s(.+)\s(\S+);$", line)
if m:
result[m.group(2)] = re.sub(wsr, "", m.group(1))
return result
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by user_name on %s.
Copyright 2013 organization_name. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
EXAMPLE
./tools/codegen/codegen_checker.py -m ./build/tools/strukt_autogen/ -s ./
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %(default)s]")
parser.add_argument("-m", "--model", dest="model", required=True, help="Model directory")
parser.add_argument("-s", "--src", dest="src", required=True, help="Source directory")
parser.add_argument('-V', '--version', action='version', version=program_version_message)
# Process arguments
args = parser.parse_args()
log = logger.logger(args.verbose)
modelDir = args.model
srcDir = args.src
log.write("Verbose mode on")
# Get the directories in canonical form
modelDir = os.path.abspath(modelDir) + "/"
srcDir = os.path.abspath(srcDir) + "/"
# Find all the model files
modelFiles = []
modelFiles.extend(glob.glob(modelDir+"*.h"))
for modelFile in modelFiles:
# Get the basename
filename = os.path.basename(modelFile)
# Try to find the existing header file in the usual place.
potentialMatches = []
potentialMatches.extend(glob.glob(srcDir + "*/src/" + filename))
# Try to find the existing header file in the usual place but with underscores removed from the file name.
if (len(potentialMatches) == 0):
potentialMatches.extend(glob.glob(srcDir + "*/src/" + re.sub('_', '', filename)))
if (len(potentialMatches) == 0):
print "No matches for " + modelFile
for potentialMatch in potentialMatches:
output = []
# Parse the generated model file
modelVars = readVars(modelFile)
# Parse the header file
headerVars = readVars(potentialMatch)
# Compare variables, first starting with ones that are in the model file.
keysToRemove = []
for modelVar in modelVars:
# Is it in the header file?
if (modelVar in headerVars):
if (modelVars[modelVar] != headerVars[modelVar]):
output.append(" " + "Difference: " + modelFile + ":" + modelVar + " is of type " + modelVars[modelVar] + " but " + potentialMatch + ":" + modelVar + " is of type " + headerVars[modelVar])
keysToRemove.append(modelVar)
# Remove keys that we have processed
for key in keysToRemove:
if (key in modelVars):
del modelVars[key]
if (key in headerVars):
del headerVars[key]
# Output missing vars
for modelVar in modelVars:
output.append(" " + modelFile + ":" + modelVar + " is not in " + potentialMatch)
for headerVar in headerVars:
output.append(" " + potentialMatch + ":" + headerVar + " is not in " + modelFile)
if (len(output) > 0):
print "Comparing " + modelFile + " with " + potentialMatch
for line in output:
print line
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
# except Exception, e:
# indent = len(program_name) * " "
# sys.stderr.write(program_name + ": " + repr(e) + "\n")
# sys.stderr.write(indent + " for help use --help")
# return 2
if __name__ == "__main__":
sys.exit(main())
| 33.636364
| 218
| 0.56248
|
'''
codegen_checker.codegen -- shortdesc
codegen_checker.codegen is a description
It defines classes_and_methods
@author: user_name
@copyright: 2013 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import sys
import os
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
import glob
import re
import logger
__all__ = []
__version__ = 0.1
__date__ = '2013-03-28'
__updated__ = '2013-03-28'
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def get_class( kls ):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
def readVars(filename):
result = {}
wsr = re.compile(r'\s+')
for line in open(filename):
m = re.match("^\s(.+)\s(\S+);$", line)
if m:
result[m.group(2)] = re.sub(wsr, "", m.group(1))
return result
def main(argv=None):
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by user_name on %s.
Copyright 2013 organization_name. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
EXAMPLE
./tools/codegen/codegen_checker.py -m ./build/tools/strukt_autogen/ -s ./
USAGE
''' % (program_shortdesc, str(__date__))
try:
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %(default)s]")
parser.add_argument("-m", "--model", dest="model", required=True, help="Model directory")
parser.add_argument("-s", "--src", dest="src", required=True, help="Source directory")
parser.add_argument('-V', '--version', action='version', version=program_version_message)
args = parser.parse_args()
log = logger.logger(args.verbose)
modelDir = args.model
srcDir = args.src
log.write("Verbose mode on")
modelDir = os.path.abspath(modelDir) + "/"
srcDir = os.path.abspath(srcDir) + "/"
modelFiles = []
modelFiles.extend(glob.glob(modelDir+"*.h"))
for modelFile in modelFiles:
filename = os.path.basename(modelFile)
potentialMatches = []
potentialMatches.extend(glob.glob(srcDir + "*/src/" + filename))
if (len(potentialMatches) == 0):
potentialMatches.extend(glob.glob(srcDir + "*/src/" + re.sub('_', '', filename)))
if (len(potentialMatches) == 0):
print "No matches for " + modelFile
for potentialMatch in potentialMatches:
output = []
modelVars = readVars(modelFile)
headerVars = readVars(potentialMatch)
keysToRemove = []
for modelVar in modelVars:
if (modelVar in headerVars):
if (modelVars[modelVar] != headerVars[modelVar]):
output.append(" " + "Difference: " + modelFile + ":" + modelVar + " is of type " + modelVars[modelVar] + " but " + potentialMatch + ":" + modelVar + " is of type " + headerVars[modelVar])
keysToRemove.append(modelVar)
for key in keysToRemove:
if (key in modelVars):
del modelVars[key]
if (key in headerVars):
del headerVars[key]
for modelVar in modelVars:
output.append(" " + modelFile + ":" + modelVar + " is not in " + potentialMatch)
for headerVar in headerVars:
output.append(" " + potentialMatch + ":" + headerVar + " is not in " + modelFile)
if (len(output) > 0):
print "Comparing " + modelFile + " with " + potentialMatch
for line in output:
print line
return 0
except KeyboardInterrupt:
))
| false
| true
|
7904be896d324c4f2ca4912704cfb6ef95503def
| 1,679
|
py
|
Python
|
graphium/graph_management/model/osm_highway_types.py
|
graphium-project/graphium-qgis-plugin
|
480e90dc874522b4d4d36b0d7b909ef3144da8b2
|
[
"Apache-2.0"
] | 1
|
2020-07-11T10:28:33.000Z
|
2020-07-11T10:28:33.000Z
|
graphium/graph_management/model/osm_highway_types.py
|
graphium-project/graphium-qgis-plugin
|
480e90dc874522b4d4d36b0d7b909ef3144da8b2
|
[
"Apache-2.0"
] | null | null | null |
graphium/graph_management/model/osm_highway_types.py
|
graphium-project/graphium-qgis-plugin
|
480e90dc874522b4d4d36b0d7b909ef3144da8b2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QGIS plugin 'Graphium'
/***************************************************************************
*
* Copyright 2020 Simon Gröchenig @ Salzburg Research
* eMail graphium@salzburgresearch.at
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***************************************************************************/
"""
from enum import Enum
class OsmHighwayTypes(Enum):
MOTORWAY = 'motorway'
MOTORWAY_LINK = 'motorway_link'
TRUNK = 'trunk'
TRUNK_LINK = 'trunk_link'
PRIMARY = 'primary'
PRIMARY_LINK = 'primary_link'
SECONDARY = 'secondary'
SECONDARY_LINK = 'secondary_link'
TERTIARY = 'tertiary'
TERTIARY_LINK = 'tertiary_link'
UNCLASSIFIED = 'unclassified'
RESIDENTIAL = 'residential'
LIVING_STREET = 'living_street'
SERVICE = 'service'
PEDESTRIAN = 'pedestrian'
TRACK = 'track'
BUS_GUIDEWAY = 'bus_guideway'
FOOTWAY = 'footway'
BRIDLEWAY = 'bridleway'
STEPS = 'steps'
CORRIDOR = 'dorridor'
PATH = 'path'
SIDEWALK = 'sidewalk'
CYCLEWAY = 'cycleway'
| 31.092593
| 77
| 0.59321
|
from enum import Enum
class OsmHighwayTypes(Enum):
MOTORWAY = 'motorway'
MOTORWAY_LINK = 'motorway_link'
TRUNK = 'trunk'
TRUNK_LINK = 'trunk_link'
PRIMARY = 'primary'
PRIMARY_LINK = 'primary_link'
SECONDARY = 'secondary'
SECONDARY_LINK = 'secondary_link'
TERTIARY = 'tertiary'
TERTIARY_LINK = 'tertiary_link'
UNCLASSIFIED = 'unclassified'
RESIDENTIAL = 'residential'
LIVING_STREET = 'living_street'
SERVICE = 'service'
PEDESTRIAN = 'pedestrian'
TRACK = 'track'
BUS_GUIDEWAY = 'bus_guideway'
FOOTWAY = 'footway'
BRIDLEWAY = 'bridleway'
STEPS = 'steps'
CORRIDOR = 'dorridor'
PATH = 'path'
SIDEWALK = 'sidewalk'
CYCLEWAY = 'cycleway'
| true
| true
|
7904be955bb0568c7d67565f9d85f85bd3467034
| 1,970
|
py
|
Python
|
producer/main.py
|
jasonwyatt/docker-rabbitmq-demo
|
0a4f1f99d4dd168be2e97187f8e86c64d28fdfa8
|
[
"WTFPL"
] | 4
|
2017-08-16T01:13:46.000Z
|
2018-09-06T13:58:39.000Z
|
producer/main.py
|
jasonwyatt/docker-rabbitmq-demo
|
0a4f1f99d4dd168be2e97187f8e86c64d28fdfa8
|
[
"WTFPL"
] | null | null | null |
producer/main.py
|
jasonwyatt/docker-rabbitmq-demo
|
0a4f1f99d4dd168be2e97187f8e86c64d28fdfa8
|
[
"WTFPL"
] | 1
|
2018-08-30T15:06:59.000Z
|
2018-08-30T15:06:59.000Z
|
import pika
import os
import logging
import json
import time
import random
import sys
RABBIT_MQ_HOST = os.environ.get('RABBITMQ_PORT_5672_TCP_ADDR')
RABBIT_MQ_PASS = os.environ.get('RABBITMQ_PASS')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.debug(os.environ)
def start_producing():
logger.info("Connecting to %s:%s" % (RABBIT_MQ_HOST, 5672))
credentials = pika.PlainCredentials('admin', RABBIT_MQ_PASS)
parameters = pika.ConnectionParameters(RABBIT_MQ_HOST,
5672,
'/',
credentials)
start_time = time.time()
while True:
# wait for rabbitmq
try:
connection = pika.BlockingConnection(parameters)
break
except pika.exceptions.AMQPConnectionError:
logger.warn('Cannot connect yet, sleeping 5 seconds.')
time.sleep(5)
if time.time() - start_time > 60:
logger.error('Could not connect after 30 seconds.')
exit(1)
channel = connection.channel()
channel.queue_declare('jobs_queue', durable=True)
while True:
job = {
'operation': random.choice(['add', 'subtract', 'multiply', 'divide']),
'left': random.choice(range(1000)),
'right': random.choice(range(1,1000)),
}
channel.basic_publish(exchange='',
routing_key='jobs_queue',
body=json.dumps(job),
properties=pika.BasicProperties(
delivery_mode = 2,
))
logger.info("published job: %s" % job)
time.sleep(5)
if __name__ == "__main__":
start_producing()
| 31.774194
| 85
| 0.58731
|
import pika
import os
import logging
import json
import time
import random
import sys
RABBIT_MQ_HOST = os.environ.get('RABBITMQ_PORT_5672_TCP_ADDR')
RABBIT_MQ_PASS = os.environ.get('RABBITMQ_PASS')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.debug(os.environ)
def start_producing():
logger.info("Connecting to %s:%s" % (RABBIT_MQ_HOST, 5672))
credentials = pika.PlainCredentials('admin', RABBIT_MQ_PASS)
parameters = pika.ConnectionParameters(RABBIT_MQ_HOST,
5672,
'/',
credentials)
start_time = time.time()
while True:
try:
connection = pika.BlockingConnection(parameters)
break
except pika.exceptions.AMQPConnectionError:
logger.warn('Cannot connect yet, sleeping 5 seconds.')
time.sleep(5)
if time.time() - start_time > 60:
logger.error('Could not connect after 30 seconds.')
exit(1)
channel = connection.channel()
channel.queue_declare('jobs_queue', durable=True)
while True:
job = {
'operation': random.choice(['add', 'subtract', 'multiply', 'divide']),
'left': random.choice(range(1000)),
'right': random.choice(range(1,1000)),
}
channel.basic_publish(exchange='',
routing_key='jobs_queue',
body=json.dumps(job),
properties=pika.BasicProperties(
delivery_mode = 2,
))
logger.info("published job: %s" % job)
time.sleep(5)
if __name__ == "__main__":
start_producing()
| true
| true
|
7904bf082a4aa69c4a2e057653c3ffd2a36bdd9b
| 51,202
|
py
|
Python
|
bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py
|
LyonsLab/coge
|
1d9a8e84a8572809ee3260ede44290e14de3bdd1
|
[
"BSD-2-Clause"
] | 37
|
2015-02-24T18:58:30.000Z
|
2021-03-07T21:22:18.000Z
|
Bio/Graphics/GenomeDiagram/_CircularDrawer.py
|
sbassi/biopython
|
b41975bb8363171add80d19903861f3d8cffe405
|
[
"PostgreSQL"
] | 12
|
2016-06-09T21:57:00.000Z
|
2020-09-11T18:48:51.000Z
|
Bio/Graphics/GenomeDiagram/_CircularDrawer.py
|
sbassi/biopython
|
b41975bb8363171add80d19903861f3d8cffe405
|
[
"PostgreSQL"
] | 19
|
2016-03-26T08:15:17.000Z
|
2021-04-12T05:03:29.000Z
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# L.Pritchard@scri.ac.uk
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
| 44.79615
| 112
| 0.547479
|
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
for locstart, locend in feature.locations:
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None:
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None:
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None:
strokecolor = colors.black
elif border is None:
strokecolor = color
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None:
strokecolor = colors.black
elif border is None:
strokecolor = color
elif border is not None:
strokecolor = border
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle)
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1,
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokeLineJoin=1,
strokewidth=0,
**kwargs)
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0)
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokeLineJoin=1,
strokewidth=0,
**kwargs)
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0)
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
| true
| true
|
7904bf6f2497e9344071a37a3b124b3545910ba1
| 7,388
|
py
|
Python
|
Fuzzer/src/word.py
|
compsec-snu/difuzz-rtl
|
bff2dee29b175ad1aeff0b88a334d37a91b84b8b
|
[
"BSD-3-Clause"
] | 46
|
2021-03-31T12:07:37.000Z
|
2022-01-24T03:46:53.000Z
|
Fuzzer/src/word.py
|
sangyun0110/difuzz-rtl
|
bff2dee29b175ad1aeff0b88a334d37a91b84b8b
|
[
"BSD-3-Clause"
] | null | null | null |
Fuzzer/src/word.py
|
sangyun0110/difuzz-rtl
|
bff2dee29b175ad1aeff0b88a334d37a91b84b8b
|
[
"BSD-3-Clause"
] | 6
|
2021-05-07T01:31:02.000Z
|
2022-01-23T16:52:36.000Z
|
import os
import random
from riscv_definitions import *
NONE = 0
CF_J = 1
CF_BR = 2
CF_RET = 3
MEM_R = 4
MEM_W = 5
CSR = 6
PREFIX = '_p'
MAIN = '_l'
SUFFIX = '_s'
class Word():
def __init__(self, label: int, insts: list, tpe=NONE, xregs=[], fregs=[], imms=[], symbols=[], populated=False):
self.label = label
self.tpe = tpe
self.insts = insts
self.len_insts = len(insts)
self.xregs = xregs
self.fregs = fregs
self.imms = imms
self.symbols = symbols
self.operands = xregs + fregs + [ imm[0] for imm in imms ] + symbols
self.populated = populated
self.ret_insts = []
def pop_inst(self, inst, opvals):
for (op, val) in opvals.items():
inst = inst.replace(op, val)
return inst
def populate(self, opvals, part=MAIN):
for op in self.operands:
assert op in opvals.keys(), \
'{} is not in label {} Word opvals'.format(op, self.label)
pop_insts = []
for inst in self.insts:
p_inst = self.pop_inst(inst, opvals)
pop_insts.append(p_inst)
ret_insts = [ '{:<8}{:<42}'.format(part + str(self.label) + ':',
pop_insts.pop(0)) ]
for i in range(len(pop_insts)):
ret_insts.append('{:8}{:<42}'.format('', pop_insts.pop(0)))
self.populated = True
self.ret_insts = ret_insts
def reset_label(self, new_label, part):
old_label = self.label
self.label = new_label
if self.populated:
self.ret_insts[0] = '{:8}{:<42}'.format(part + str(self.label) + ':',
self.ret_insts[0][8:])
return (old_label, new_label)
else:
return None
def repop_label(self, label_map, max_label, part):
if self.populated:
for i in range(len(self.ret_insts)):
inst = self.ret_insts[i]
tmps = inst.split(', ' + part)
if len(tmps) > 1:
label = tmps[1].split(' ')[0]
old = int(label)
new = label_map.get(old, random.randint(self.label + 1, max_label))
new_inst = inst[8:].replace(part + '{}'.format(old), part + '{}'.format(new))
inst = '{:<8}{:<50}'.format(inst[0:8], new_inst)
self.ret_insts[i] = inst
else:
return
def get_insts(self):
assert self.populated, \
'Word is not populated'
return self.ret_insts
def word_jal(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_J
insts = [ syntax ]
return (tpe, insts)
def word_jalr(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_J
insts = [ 'la xreg1, symbol', syntax ]
symbols.append('symbol')
return (tpe, insts)
# Need to update
def word_branch(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_BR
insts = [ syntax ]
return (tpe, insts)
def word_ret(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_RET
if syntax == 'mret': epc = 'mepc'
elif syntax == 'sret': epc = 'sepc'
else: epc = 'uepc'
insts = [ 'la xreg0, symbol',
'csrrw zero, {}, xreg0'.format(epc),
syntax ]
xregs.append('xreg0')
symbols.append('symbol')
return (tpe, insts)
def word_mem_r(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_R
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol' ] + mask_addr + [ syntax ]
symbols.append('symbol')
return (tpe, insts)
def word_mem_w(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_W
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol' ] + mask_addr + [ syntax ]
symbols.append('symbol')
return (tpe, insts)
def word_atomic(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_W
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol',
'addi xreg1, xreg1, imm6' ] + \
mask_addr + \
[ syntax ]
if opcode in rv64.keys():
imms.append(('imm6', 8))
else:
imms.append(('imm6', 4))
symbols.append('symbol')
return (tpe, insts)
def word_csr_r(opcode, syntax, xregs, fregs, imms, symbols):
csr = random.choice(csr_names)
if 'pmpaddr' in csr:
tpe = MEM_R
insts = [ 'la xreg1, symbol',
'srai xreg1, xreg1, 1',
syntax.format(csr) ]
symbols.append('symbol')
else:
tpe = CSR
insts = [ 'xor xreg1, xreg1, xreg1']
for i in range(random.randint(0, 3)):
set_bits = random.choice([1, 3])
offset = random.randint(0, 31)
insts = insts + \
['addi xreg{}, zero, {}'.format(i+2, set_bits),
'slli xreg{}, xreg{}, {}'.format(i+2, i+2, offset),
'add xreg1, xreg1, xreg{}'.format(i+2)
]
xregs.append('xreg{}'.format(i+2))
insts.append(syntax.format(csr))
return (tpe, insts)
def word_csr_i(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CSR
csr = random.choice(csr_names)
insts = [ syntax.format(csr) ]
return (tpe, insts)
def word_sfence(opcode, syntax, xregs, fregs, imms, symbols):
tpe = NONE
pt_symbol = random.choice([ 'pt0', 'pt1', 'pt2', 'pt3' ])
imms += [ ('uimm1', 1), ('uimm6', 8) ]
insts = [ 'li xreg0, uimm1',
'la xreg1, {}'.format(pt_symbol),
'addi xreg1, xreg1, uimm6' ] + \
[ syntax ]
return (tpe, insts)
def word_fp(opcode, syntax, xregs, fregs, imms, symbols):
tpe = NONE
# rm = random.choice([ 'rne', 'rtz', 'rdn',
# 'rup', 'rmm', 'dyn'])
# Unset rounding mode testing
rm = 'rne'
insts = [ syntax.format(rm) ]
return (tpe, insts)
""" Opcodes_words
Dictionary of opcodes - word generation functions
to handle opcodes which need special instructions
"""
opcodes_words = {
'jal': (['jal'], word_jal),
'jalr': (['jalr'], word_jalr),
'branch': (list(rv32i_btype.keys()), word_branch),
'ret': (['mret', 'sret', 'uret'], word_ret),
'mem_r': (['lb', 'lh', 'lw', 'ld', 'lbu', 'lhu', 'lwu', \
'flw', 'fld', 'flq'], word_mem_r),
'mem_w': (['sb', 'sh', 'sw', 'sd', 'fsw', 'fsd', 'fsq'], word_mem_w),
'atomic': (list(rv32a.keys()) + list(rv64a.keys()), word_atomic),
'csr_r': (['csrrw', 'csrrs', 'csrrc'], word_csr_r),
'csr_i': (['csrrwi', 'csrrsi', 'csrrci'], word_csr_i),
'sfence': (['sfence.vma'], word_sfence),
'fp': (list(rv32f.keys()) + list(rv64f.keys()) + list(rv32d.keys()) + \
list(rv64d.keys()) + list(rv32q.keys()) + list(rv64q.keys()),
word_fp)
}
| 28.525097
| 116
| 0.525311
|
import os
import random
from riscv_definitions import *
NONE = 0
CF_J = 1
CF_BR = 2
CF_RET = 3
MEM_R = 4
MEM_W = 5
CSR = 6
PREFIX = '_p'
MAIN = '_l'
SUFFIX = '_s'
class Word():
def __init__(self, label: int, insts: list, tpe=NONE, xregs=[], fregs=[], imms=[], symbols=[], populated=False):
self.label = label
self.tpe = tpe
self.insts = insts
self.len_insts = len(insts)
self.xregs = xregs
self.fregs = fregs
self.imms = imms
self.symbols = symbols
self.operands = xregs + fregs + [ imm[0] for imm in imms ] + symbols
self.populated = populated
self.ret_insts = []
def pop_inst(self, inst, opvals):
for (op, val) in opvals.items():
inst = inst.replace(op, val)
return inst
def populate(self, opvals, part=MAIN):
for op in self.operands:
assert op in opvals.keys(), \
'{} is not in label {} Word opvals'.format(op, self.label)
pop_insts = []
for inst in self.insts:
p_inst = self.pop_inst(inst, opvals)
pop_insts.append(p_inst)
ret_insts = [ '{:<8}{:<42}'.format(part + str(self.label) + ':',
pop_insts.pop(0)) ]
for i in range(len(pop_insts)):
ret_insts.append('{:8}{:<42}'.format('', pop_insts.pop(0)))
self.populated = True
self.ret_insts = ret_insts
def reset_label(self, new_label, part):
old_label = self.label
self.label = new_label
if self.populated:
self.ret_insts[0] = '{:8}{:<42}'.format(part + str(self.label) + ':',
self.ret_insts[0][8:])
return (old_label, new_label)
else:
return None
def repop_label(self, label_map, max_label, part):
if self.populated:
for i in range(len(self.ret_insts)):
inst = self.ret_insts[i]
tmps = inst.split(', ' + part)
if len(tmps) > 1:
label = tmps[1].split(' ')[0]
old = int(label)
new = label_map.get(old, random.randint(self.label + 1, max_label))
new_inst = inst[8:].replace(part + '{}'.format(old), part + '{}'.format(new))
inst = '{:<8}{:<50}'.format(inst[0:8], new_inst)
self.ret_insts[i] = inst
else:
return
def get_insts(self):
assert self.populated, \
'Word is not populated'
return self.ret_insts
def word_jal(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_J
insts = [ syntax ]
return (tpe, insts)
def word_jalr(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_J
insts = [ 'la xreg1, symbol', syntax ]
symbols.append('symbol')
return (tpe, insts)
def word_branch(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_BR
insts = [ syntax ]
return (tpe, insts)
def word_ret(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_RET
if syntax == 'mret': epc = 'mepc'
elif syntax == 'sret': epc = 'sepc'
else: epc = 'uepc'
insts = [ 'la xreg0, symbol',
'csrrw zero, {}, xreg0'.format(epc),
syntax ]
xregs.append('xreg0')
symbols.append('symbol')
return (tpe, insts)
def word_mem_r(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_R
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol' ] + mask_addr + [ syntax ]
symbols.append('symbol')
return (tpe, insts)
def word_mem_w(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_W
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol' ] + mask_addr + [ syntax ]
symbols.append('symbol')
return (tpe, insts)
def word_atomic(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_W
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol',
'addi xreg1, xreg1, imm6' ] + \
mask_addr + \
[ syntax ]
if opcode in rv64.keys():
imms.append(('imm6', 8))
else:
imms.append(('imm6', 4))
symbols.append('symbol')
return (tpe, insts)
def word_csr_r(opcode, syntax, xregs, fregs, imms, symbols):
csr = random.choice(csr_names)
if 'pmpaddr' in csr:
tpe = MEM_R
insts = [ 'la xreg1, symbol',
'srai xreg1, xreg1, 1',
syntax.format(csr) ]
symbols.append('symbol')
else:
tpe = CSR
insts = [ 'xor xreg1, xreg1, xreg1']
for i in range(random.randint(0, 3)):
set_bits = random.choice([1, 3])
offset = random.randint(0, 31)
insts = insts + \
['addi xreg{}, zero, {}'.format(i+2, set_bits),
'slli xreg{}, xreg{}, {}'.format(i+2, i+2, offset),
'add xreg1, xreg1, xreg{}'.format(i+2)
]
xregs.append('xreg{}'.format(i+2))
insts.append(syntax.format(csr))
return (tpe, insts)
def word_csr_i(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CSR
csr = random.choice(csr_names)
insts = [ syntax.format(csr) ]
return (tpe, insts)
def word_sfence(opcode, syntax, xregs, fregs, imms, symbols):
tpe = NONE
pt_symbol = random.choice([ 'pt0', 'pt1', 'pt2', 'pt3' ])
imms += [ ('uimm1', 1), ('uimm6', 8) ]
insts = [ 'li xreg0, uimm1',
'la xreg1, {}'.format(pt_symbol),
'addi xreg1, xreg1, uimm6' ] + \
[ syntax ]
return (tpe, insts)
def word_fp(opcode, syntax, xregs, fregs, imms, symbols):
tpe = NONE
rm = 'rne'
insts = [ syntax.format(rm) ]
return (tpe, insts)
opcodes_words = {
'jal': (['jal'], word_jal),
'jalr': (['jalr'], word_jalr),
'branch': (list(rv32i_btype.keys()), word_branch),
'ret': (['mret', 'sret', 'uret'], word_ret),
'mem_r': (['lb', 'lh', 'lw', 'ld', 'lbu', 'lhu', 'lwu', \
'flw', 'fld', 'flq'], word_mem_r),
'mem_w': (['sb', 'sh', 'sw', 'sd', 'fsw', 'fsd', 'fsq'], word_mem_w),
'atomic': (list(rv32a.keys()) + list(rv64a.keys()), word_atomic),
'csr_r': (['csrrw', 'csrrs', 'csrrc'], word_csr_r),
'csr_i': (['csrrwi', 'csrrsi', 'csrrci'], word_csr_i),
'sfence': (['sfence.vma'], word_sfence),
'fp': (list(rv32f.keys()) + list(rv64f.keys()) + list(rv32d.keys()) + \
list(rv64d.keys()) + list(rv32q.keys()) + list(rv64q.keys()),
word_fp)
}
| true
| true
|
7904bf8b23974c94fc8a8310238c82c560497569
| 16,293
|
py
|
Python
|
glue_vispy_viewers/extern/vispy/gloo/buffer.py
|
jzuhone/glue-vispy-viewers
|
d940705f4ba95f8d7a9a74d37fb68c71080b490a
|
[
"BSD-2-Clause"
] | 3
|
2018-05-09T17:55:53.000Z
|
2019-07-22T09:14:41.000Z
|
glue_vispy_viewers/extern/vispy/gloo/buffer.py
|
jzuhone/glue-vispy-viewers
|
d940705f4ba95f8d7a9a74d37fb68c71080b490a
|
[
"BSD-2-Clause"
] | 19
|
2015-06-16T14:33:22.000Z
|
2015-07-27T21:18:15.000Z
|
graphViz/vispy/gloo/buffer.py
|
onecklam/ethereum-graphviz
|
6993accf0cb85e23013bf7ae6b04145724a6dbd2
|
[
"Apache-2.0"
] | 1
|
2017-09-29T01:24:47.000Z
|
2017-09-29T01:24:47.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from os import path as op
from traceback import extract_stack, format_list
import weakref
from . globject import GLObject
from ..util import logger
from ..ext.six import string_types
# ------------------------------------------------------------ Buffer class ---
class Buffer(GLObject):
""" Generic GPU buffer.
A generic buffer is an interface used to upload data to a GPU array buffer
(ARRAY_BUFFER or ELEMENT_ARRAY_BUFFER). It keeps track of
buffer size but does not have any CPU storage. You can consider it as
write-only.
The `set_data` is a deferred operation: you can call it even if an OpenGL
context is not available. The `update` function is responsible to upload
pending data to GPU memory and requires an active GL context.
The Buffer class only deals with data in terms of bytes; it is not
aware of data type or element size.
Parameters
----------
data : ndarray | None
Buffer data.
nbytes : int | None
Buffer byte size.
"""
def __init__(self, data=None, nbytes=None):
GLObject.__init__(self)
self._views = [] # Views on this buffer (stored using weakrefs)
self._valid = True # To invalidate buffer views
self._nbytes = 0 # Bytesize in bytes, set in resize_bytes()
# Set data
if data is not None:
if nbytes is not None:
raise ValueError("Cannot specify both data and nbytes.")
self.set_data(data, copy=False)
elif nbytes is not None:
self.resize_bytes(nbytes)
@property
def nbytes(self):
""" Buffer size in bytes """
return self._nbytes
def set_subdata(self, data, offset=0, copy=False):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if offset < 0:
raise ValueError("Offset must be positive")
elif (offset + nbytes) > self._nbytes:
raise ValueError("Data does not fit into buffer")
# If the whole buffer is to be written, we clear any pending data
# (because they will be overwritten anyway)
if nbytes == self._nbytes and offset == 0:
self._glir.command('SIZE', self._id, nbytes)
self._glir.command('DATA', self._id, offset, data)
def set_data(self, data, copy=False):
""" Set data in the buffer (deferred operation).
This completely resets the size and contents of the buffer.
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if nbytes != self._nbytes:
self.resize_bytes(nbytes)
else:
# Use SIZE to discard any previous data setting
self._glir.command('SIZE', self._id, nbytes)
if nbytes: # Only set data if there *is* data
self._glir.command('DATA', self._id, 0, data)
def resize_bytes(self, size):
""" Resize this buffer (deferred operation).
Parameters
----------
size : int
New buffer size in bytes.
"""
self._nbytes = size
self._glir.command('SIZE', self._id, size)
# Invalidate any view on this buffer
for view in self._views:
if view() is not None:
view()._valid = False
self._views = []
# -------------------------------------------------------- DataBuffer class ---
class DataBuffer(Buffer):
""" GPU data buffer that is aware of data type and elements size
Parameters
----------
data : ndarray | None
Buffer data.
"""
def __init__(self, data=None):
self._size = 0 # number of elements in buffer, set in resize_bytes()
self._dtype = None
self._stride = 0
self._itemsize = 0
self._last_dim = None
Buffer.__init__(self, data)
def _prepare_data(self, data):
# Can be overrriden by subclasses
if not isinstance(data, np.ndarray):
raise TypeError("DataBuffer data must be numpy array.")
return data
def set_subdata(self, data, offset=0, copy=False, **kwargs):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional keyword arguments.
"""
data = self._prepare_data(data, **kwargs)
offset = offset * self.itemsize
Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
def set_data(self, data, copy=False, **kwargs):
""" Set data (deferred operation)
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional arguments.
"""
data = self._prepare_data(data, **kwargs)
self._dtype = data.dtype
self._stride = data.strides[-1]
self._itemsize = self._dtype.itemsize
Buffer.set_data(self, data=data, copy=copy)
@property
def dtype(self):
""" Buffer dtype """
return self._dtype
@property
def offset(self):
""" Buffer offset (in bytes) relative to base """
return 0
@property
def stride(self):
""" Stride of data in memory """
return self._stride
@property
def size(self):
""" Number of elements in the buffer """
return self._size
@property
def itemsize(self):
""" The total number of bytes required to store the array data """
return self._itemsize
@property
def glsl_type(self):
""" GLSL declaration strings required for a variable to hold this data.
"""
if self.dtype is None:
return None
dtshape = self.dtype[0].shape
n = dtshape[0] if dtshape else 1
if n > 1:
dtype = 'vec%d' % n
else:
dtype = 'float' if 'f' in self.dtype[0].base.kind else 'int'
return 'attribute', dtype
def resize_bytes(self, size):
""" Resize the buffer (in-place, deferred operation)
Parameters
----------
size : integer
New buffer size in bytes
Notes
-----
This clears any pending operations.
"""
Buffer.resize_bytes(self, size)
self._size = size // self.itemsize
def __getitem__(self, key):
""" Create a view on this buffer. """
view = DataBufferView(self, key)
self._views.append(weakref.ref(view))
return view
def __setitem__(self, key, data):
""" Set data (deferred operation) """
# Setting a whole field of the buffer: only allowed if we have CPU
# storage. Note this case (key is string) only happen with base buffer
if isinstance(key, string_types):
raise ValueError("Cannot set non-contiguous data on buffer")
# Setting one or several elements
elif isinstance(key, int):
if key < 0:
key += self.size
if key < 0 or key > self.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(self.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, self.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
# Contiguous update?
if step != 1:
raise ValueError("Cannot set non-contiguous data on buffer")
# Make sure data is an array
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=self.dtype, copy=False)
# Make sure data is big enough
if data.size < stop - start:
data = np.resize(data, stop - start)
elif data.size > stop - start:
raise ValueError('Data too big to fit GPU data.')
# Set data
offset = start # * self.itemsize
self.set_subdata(data=data, offset=offset, copy=True)
def __repr__(self):
return ("<%s size=%s last_dim=%s>" %
(self.__class__.__name__, self.size, self._last_dim))
class DataBufferView(DataBuffer):
""" View on a sub-region of a DataBuffer.
Parameters
----------
base : DataBuffer
The buffer accessed by this view.
key : str, int, slice, or Ellpsis
The index into the base buffer that defines a sub-region of the buffer
to view. String arguments select a single field from multi-field
dtypes, and other allowed types select a subset of rows.
Notes
-----
It is generally not necessary to instantiate this class manually; use
``base_buffer[key]`` instead.
"""
# Note that this class is a bit evil: it is a subclass of GLObject,
# Buffer and DataBuffer, but any of these __init__'s are not called ...
def __init__(self, base, key):
# Note how this never runs the super's __init__,
# all attributes must thus be set here ...
self._base = base
self._key = key
self._stride = base.stride
if isinstance(key, string_types):
self._dtype = base.dtype[key]
self._offset = base.dtype.fields[key][1]
self._nbytes = base.size * self._dtype.itemsize
self._size = base.size
self._itemsize = self._dtype.itemsize
return
if isinstance(key, int):
if key < 0:
key += base.size
if key < 0 or key > base.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(base.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, base.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
if step != 1:
raise ValueError("Cannot access non-contiguous data")
self._itemsize = base.itemsize
self._offset = start * self.itemsize
self._size = stop - start
self._dtype = base.dtype
self._nbytes = self.size * self.itemsize
@property
def glir(self):
return self._base.glir
@property
def id(self):
return self._base.id
@property
def _last_dim(self):
return self._base._last_dim
def set_subdata(self, data, offset=0, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
def set_data(self, data, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
@property
def offset(self):
""" Buffer offset (in bytes) relative to base """
return self._offset
@property
def base(self):
"""Buffer base if this buffer is a view on another buffer. """
return self._base
def resize_bytes(self, size):
raise RuntimeError("Cannot resize buffer view.")
def __getitem__(self, key):
raise RuntimeError("Can only access data from a base buffer")
def __setitem__(self, key, data):
raise RuntimeError("Cannot set data on Buffer view")
def __repr__(self):
return ("<DataBufferView on %r at offset=%d size=%d>" %
(self.base, self.offset, self.size))
# ------------------------------------------------------ VertexBuffer class ---
class VertexBuffer(DataBuffer):
""" Buffer for vertex attribute data
Parameters
----------
data : ndarray
Buffer data (optional)
"""
_GLIR_TYPE = 'VertexBuffer'
def _prepare_data(self, data, convert=False):
# Build a structured view of the data if:
# -> it is not already a structured array
# -> shape if 1-D or last dimension is 1,2,3 or 4
if isinstance(data, list):
data = np.array(data, dtype=np.float32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if data.dtype.isbuiltin:
if convert is True:
data = data.astype(np.float32)
if data.dtype in (np.float64, np.int64):
raise TypeError('data must be 32-bit not %s'
% data.dtype)
c = data.shape[-1] if data.ndim > 1 else 1
if c in [2, 3, 4]:
if not data.flags['C_CONTIGUOUS']:
logger.warning('Copying discontiguous data for struct '
'dtype:\n%s' % _last_stack_str())
data = data.copy()
else:
c = 1
if self._last_dim and c != self._last_dim:
raise ValueError('Last dimension should be %s not %s'
% (self._last_dim, c))
data = data.view(dtype=[('f0', data.dtype.base, c)])
self._last_dim = c
return data
def _last_stack_str():
"""Print stack trace from call that didn't originate from here"""
stack = extract_stack()
for s in stack[::-1]:
if op.join('vispy', 'gloo', 'buffer.py') not in __file__:
break
return format_list([s])[0]
# ------------------------------------------------------- IndexBuffer class ---
class IndexBuffer(DataBuffer):
""" Buffer for index data
Parameters
----------
data : ndarray | None
Buffer data.
"""
_GLIR_TYPE = 'IndexBuffer'
def __init__(self, data=None):
DataBuffer.__init__(self, data)
self._last_dim = 1
def _prepare_data(self, data, convert=False):
if isinstance(data, list):
data = np.array(data, dtype=np.uint32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if not data.dtype.isbuiltin:
raise TypeError("Element buffer dtype cannot be structured")
else:
if convert:
if data.dtype is not np.uint32:
data = data.astype(np.uint32)
else:
if data.dtype not in [np.uint32, np.uint16, np.uint8]:
raise TypeError("Invalid dtype for IndexBuffer: %r" %
data.dtype)
return data
| 32.651303
| 79
| 0.560302
|
import numpy as np
from os import path as op
from traceback import extract_stack, format_list
import weakref
from . globject import GLObject
from ..util import logger
from ..ext.six import string_types
class Buffer(GLObject):
def __init__(self, data=None, nbytes=None):
GLObject.__init__(self)
self._views = []
self._valid = True
self._nbytes = 0
if data is not None:
if nbytes is not None:
raise ValueError("Cannot specify both data and nbytes.")
self.set_data(data, copy=False)
elif nbytes is not None:
self.resize_bytes(nbytes)
@property
def nbytes(self):
return self._nbytes
def set_subdata(self, data, offset=0, copy=False):
data = np.array(data, copy=copy)
nbytes = data.nbytes
if offset < 0:
raise ValueError("Offset must be positive")
elif (offset + nbytes) > self._nbytes:
raise ValueError("Data does not fit into buffer")
if nbytes == self._nbytes and offset == 0:
self._glir.command('SIZE', self._id, nbytes)
self._glir.command('DATA', self._id, offset, data)
def set_data(self, data, copy=False):
data = np.array(data, copy=copy)
nbytes = data.nbytes
if nbytes != self._nbytes:
self.resize_bytes(nbytes)
else:
self._glir.command('SIZE', self._id, nbytes)
if nbytes:
self._glir.command('DATA', self._id, 0, data)
def resize_bytes(self, size):
self._nbytes = size
self._glir.command('SIZE', self._id, size)
for view in self._views:
if view() is not None:
view()._valid = False
self._views = []
class DataBuffer(Buffer):
def __init__(self, data=None):
self._size = 0
self._dtype = None
self._stride = 0
self._itemsize = 0
self._last_dim = None
Buffer.__init__(self, data)
def _prepare_data(self, data):
if not isinstance(data, np.ndarray):
raise TypeError("DataBuffer data must be numpy array.")
return data
def set_subdata(self, data, offset=0, copy=False, **kwargs):
data = self._prepare_data(data, **kwargs)
offset = offset * self.itemsize
Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
def set_data(self, data, copy=False, **kwargs):
data = self._prepare_data(data, **kwargs)
self._dtype = data.dtype
self._stride = data.strides[-1]
self._itemsize = self._dtype.itemsize
Buffer.set_data(self, data=data, copy=copy)
@property
def dtype(self):
return self._dtype
@property
def offset(self):
return 0
@property
def stride(self):
return self._stride
@property
def size(self):
return self._size
@property
def itemsize(self):
return self._itemsize
@property
def glsl_type(self):
if self.dtype is None:
return None
dtshape = self.dtype[0].shape
n = dtshape[0] if dtshape else 1
if n > 1:
dtype = 'vec%d' % n
else:
dtype = 'float' if 'f' in self.dtype[0].base.kind else 'int'
return 'attribute', dtype
def resize_bytes(self, size):
Buffer.resize_bytes(self, size)
self._size = size // self.itemsize
def __getitem__(self, key):
view = DataBufferView(self, key)
self._views.append(weakref.ref(view))
return view
def __setitem__(self, key, data):
if isinstance(key, string_types):
raise ValueError("Cannot set non-contiguous data on buffer")
elif isinstance(key, int):
if key < 0:
key += self.size
if key < 0 or key > self.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(self.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, self.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
if step != 1:
raise ValueError("Cannot set non-contiguous data on buffer")
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=self.dtype, copy=False)
if data.size < stop - start:
data = np.resize(data, stop - start)
elif data.size > stop - start:
raise ValueError('Data too big to fit GPU data.')
offset = start
self.set_subdata(data=data, offset=offset, copy=True)
def __repr__(self):
return ("<%s size=%s last_dim=%s>" %
(self.__class__.__name__, self.size, self._last_dim))
class DataBufferView(DataBuffer):
def __init__(self, base, key):
# Note how this never runs the super's __init__,
self._base = base
self._key = key
self._stride = base.stride
if isinstance(key, string_types):
self._dtype = base.dtype[key]
self._offset = base.dtype.fields[key][1]
self._nbytes = base.size * self._dtype.itemsize
self._size = base.size
self._itemsize = self._dtype.itemsize
return
if isinstance(key, int):
if key < 0:
key += base.size
if key < 0 or key > base.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(base.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, base.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
if step != 1:
raise ValueError("Cannot access non-contiguous data")
self._itemsize = base.itemsize
self._offset = start * self.itemsize
self._size = stop - start
self._dtype = base.dtype
self._nbytes = self.size * self.itemsize
@property
def glir(self):
return self._base.glir
@property
def id(self):
return self._base.id
@property
def _last_dim(self):
return self._base._last_dim
def set_subdata(self, data, offset=0, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
def set_data(self, data, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
@property
def offset(self):
return self._offset
@property
def base(self):
return self._base
def resize_bytes(self, size):
raise RuntimeError("Cannot resize buffer view.")
def __getitem__(self, key):
raise RuntimeError("Can only access data from a base buffer")
def __setitem__(self, key, data):
raise RuntimeError("Cannot set data on Buffer view")
def __repr__(self):
return ("<DataBufferView on %r at offset=%d size=%d>" %
(self.base, self.offset, self.size))
class VertexBuffer(DataBuffer):
_GLIR_TYPE = 'VertexBuffer'
def _prepare_data(self, data, convert=False):
if isinstance(data, list):
data = np.array(data, dtype=np.float32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if data.dtype.isbuiltin:
if convert is True:
data = data.astype(np.float32)
if data.dtype in (np.float64, np.int64):
raise TypeError('data must be 32-bit not %s'
% data.dtype)
c = data.shape[-1] if data.ndim > 1 else 1
if c in [2, 3, 4]:
if not data.flags['C_CONTIGUOUS']:
logger.warning('Copying discontiguous data for struct '
'dtype:\n%s' % _last_stack_str())
data = data.copy()
else:
c = 1
if self._last_dim and c != self._last_dim:
raise ValueError('Last dimension should be %s not %s'
% (self._last_dim, c))
data = data.view(dtype=[('f0', data.dtype.base, c)])
self._last_dim = c
return data
def _last_stack_str():
stack = extract_stack()
for s in stack[::-1]:
if op.join('vispy', 'gloo', 'buffer.py') not in __file__:
break
return format_list([s])[0]
class IndexBuffer(DataBuffer):
_GLIR_TYPE = 'IndexBuffer'
def __init__(self, data=None):
DataBuffer.__init__(self, data)
self._last_dim = 1
def _prepare_data(self, data, convert=False):
if isinstance(data, list):
data = np.array(data, dtype=np.uint32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if not data.dtype.isbuiltin:
raise TypeError("Element buffer dtype cannot be structured")
else:
if convert:
if data.dtype is not np.uint32:
data = data.astype(np.uint32)
else:
if data.dtype not in [np.uint32, np.uint16, np.uint8]:
raise TypeError("Invalid dtype for IndexBuffer: %r" %
data.dtype)
return data
| true
| true
|
7904c130d6a5dbee35a6f38140d14f5568a5751f
| 4,235
|
py
|
Python
|
influxdb_client/service/health_service.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
influxdb_client/service/health_service.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
influxdb_client/service/health_service.py
|
rhajek/influxdb-client-python
|
852e6f1b1161df4d67eabc19cdb6b323a46b88e2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from influxdb_client.api_client import ApiClient
class HealthService(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_health(self, **kwargs): # noqa: E501
"""Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_health_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_health_with_http_info(**kwargs) # noqa: E501
return data
def get_health_with_http_info(self, **kwargs): # noqa: E501
"""Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_health" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/health', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 34.153226
| 132
| 0.629988
|
from __future__ import absolute_import
import re
import six
from influxdb_client.api_client import ApiClient
class HealthService(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_health(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_health_with_http_info(**kwargs)
else:
(data) = self.get_health_with_http_info(**kwargs)
return data
def get_health_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = ['zap_trace_span']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_health" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/health', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HealthCheck',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
7904c2d96a35d0856793b2bea96eb68c715feaf3
| 18
|
py
|
Python
|
jobs/version.py
|
shi1412/pyspark-oltp-pipeline
|
b3b28f84007ea8af6df0b7dfba760e0f3365ed94
|
[
"MIT"
] | null | null | null |
jobs/version.py
|
shi1412/pyspark-oltp-pipeline
|
b3b28f84007ea8af6df0b7dfba760e0f3365ed94
|
[
"MIT"
] | null | null | null |
jobs/version.py
|
shi1412/pyspark-oltp-pipeline
|
b3b28f84007ea8af6df0b7dfba760e0f3365ed94
|
[
"MIT"
] | null | null | null |
VERSION = 'v0.0.1'
| 18
| 18
| 0.611111
|
VERSION = 'v0.0.1'
| true
| true
|
7904c3c2af9596a4fb10b2032f25d2a2381fb8d3
| 4,553
|
py
|
Python
|
nistats/tests/test_check_events_file_uses_tab_separators.py
|
gifuni/nistats
|
8f0b606f6da6dc7f55e25cc0fa903fdfcc007145
|
[
"BSD-3-Clause"
] | null | null | null |
nistats/tests/test_check_events_file_uses_tab_separators.py
|
gifuni/nistats
|
8f0b606f6da6dc7f55e25cc0fa903fdfcc007145
|
[
"BSD-3-Clause"
] | 2
|
2019-12-18T14:40:01.000Z
|
2020-01-08T15:13:50.000Z
|
nistats/tests/test_check_events_file_uses_tab_separators.py
|
gifuni/nistats
|
8f0b606f6da6dc7f55e25cc0fa903fdfcc007145
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T08:21:30.000Z
|
2022-02-21T08:21:30.000Z
|
import pandas as pd
from nibabel.tmpdirs import InTemporaryDirectory
from nose.tools import (assert_raises,
assert_true,
)
from nistats.utils import _check_events_file_uses_tab_separators
def make_data_for_test_runs():
data_for_temp_datafile = [
['csf', 'constant', 'linearTrend', 'wm'],
[13343.032102491035, 1.0, 0.0, 9486.199545677482],
[13329.224068063204, 1.0, 1.0, 9497.003324892803],
[13291.755627241291, 1.0, 2.0, 9484.012965365506],
]
delimiters = {
'tab': '\t',
'comma': ',',
'space': ' ',
'semicolon': ';',
'hyphen': '-',
}
return data_for_temp_datafile, delimiters
def _create_test_file(temp_csv, test_data, delimiter):
test_data = pd.DataFrame(test_data)
test_data.to_csv(temp_csv, sep=delimiter)
def _run_test_for_invalid_separator(filepath, delimiter_name):
if delimiter_name not in ('tab', 'comma'):
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(events_files=filepath)
else:
result = _check_events_file_uses_tab_separators(events_files=filepath)
assert_true(result is None)
def test_for_invalid_separator():
data_for_temp_datafile, delimiters = make_data_for_test_runs()
for delimiter_name, delimiter_char in delimiters.items():
with InTemporaryDirectory():
temp_tsv_file = 'tempfile.{} separated values'.format(
delimiter_name)
_create_test_file(temp_csv=temp_tsv_file ,
test_data=data_for_temp_datafile,
delimiter=delimiter_char)
_run_test_for_invalid_separator(filepath=temp_tsv_file ,
delimiter_name=delimiter_name)
def test_with_2D_dataframe():
data_for_pandas_dataframe, _ = make_data_for_test_runs()
events_pandas_dataframe = pd.DataFrame(data_for_pandas_dataframe)
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_with_1D_dataframe():
data_for_pandas_dataframe, _ = make_data_for_test_runs()
for dataframe_ in data_for_pandas_dataframe:
events_pandas_dataframe = pd.DataFrame(dataframe_)
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_for_invalid_filepath():
filepath = 'junk_file_path.csv'
result = _check_events_file_uses_tab_separators(events_files=filepath)
assert_true(result is None)
def test_for_pandas_dataframe():
events_pandas_dataframe = pd.DataFrame([['a', 'b', 'c'], [0, 1, 2]])
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_binary_opening_an_image():
img_data = bytearray(
b'GIF87a\x01\x00\x01\x00\xe7*\x00\x00\x00\x00\x01\x01\x01\x02\x02'
b'\x07\x08\x08\x08\x0b\x0b\x0b\x0c\x0c\x0c\r;')
with InTemporaryDirectory():
temp_img_file = 'temp_img.gif'
with open(temp_img_file, 'wb') as temp_img_obj:
temp_img_obj.write(img_data)
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(
events_files=temp_img_file)
def test_binary_bytearray_of_ints_data():
temp_data_bytearray_from_ints = bytearray([0, 1, 0, 11, 10])
with InTemporaryDirectory():
temp_bin_file = 'temp_bin.bin'
with open(temp_bin_file, 'wb') as temp_bin_obj:
temp_bin_obj.write(temp_data_bytearray_from_ints)
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(
events_files=temp_bin_file)
if __name__ == '__main__':
def _run_tests_print_test_messages(test_func):
from pprint import pprint
pprint(['Running', test_func.__name__])
test_func()
pprint('... complete')
def run_test_suite():
tests = [
test_for_invalid_filepath,
test_with_2D_dataframe,
test_with_1D_dataframe,
test_for_invalid_filepath,
test_for_pandas_dataframe,
test_binary_opening_an_image,
test_binary_bytearray_of_ints_data,
]
for test_ in tests:
_run_tests_print_test_messages(test_func=test_)
run_test_suite()
| 33.977612
| 78
| 0.664617
|
import pandas as pd
from nibabel.tmpdirs import InTemporaryDirectory
from nose.tools import (assert_raises,
assert_true,
)
from nistats.utils import _check_events_file_uses_tab_separators
def make_data_for_test_runs():
data_for_temp_datafile = [
['csf', 'constant', 'linearTrend', 'wm'],
[13343.032102491035, 1.0, 0.0, 9486.199545677482],
[13329.224068063204, 1.0, 1.0, 9497.003324892803],
[13291.755627241291, 1.0, 2.0, 9484.012965365506],
]
delimiters = {
'tab': '\t',
'comma': ',',
'space': ' ',
'semicolon': ';',
'hyphen': '-',
}
return data_for_temp_datafile, delimiters
def _create_test_file(temp_csv, test_data, delimiter):
test_data = pd.DataFrame(test_data)
test_data.to_csv(temp_csv, sep=delimiter)
def _run_test_for_invalid_separator(filepath, delimiter_name):
if delimiter_name not in ('tab', 'comma'):
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(events_files=filepath)
else:
result = _check_events_file_uses_tab_separators(events_files=filepath)
assert_true(result is None)
def test_for_invalid_separator():
data_for_temp_datafile, delimiters = make_data_for_test_runs()
for delimiter_name, delimiter_char in delimiters.items():
with InTemporaryDirectory():
temp_tsv_file = 'tempfile.{} separated values'.format(
delimiter_name)
_create_test_file(temp_csv=temp_tsv_file ,
test_data=data_for_temp_datafile,
delimiter=delimiter_char)
_run_test_for_invalid_separator(filepath=temp_tsv_file ,
delimiter_name=delimiter_name)
def test_with_2D_dataframe():
data_for_pandas_dataframe, _ = make_data_for_test_runs()
events_pandas_dataframe = pd.DataFrame(data_for_pandas_dataframe)
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_with_1D_dataframe():
data_for_pandas_dataframe, _ = make_data_for_test_runs()
for dataframe_ in data_for_pandas_dataframe:
events_pandas_dataframe = pd.DataFrame(dataframe_)
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_for_invalid_filepath():
filepath = 'junk_file_path.csv'
result = _check_events_file_uses_tab_separators(events_files=filepath)
assert_true(result is None)
def test_for_pandas_dataframe():
events_pandas_dataframe = pd.DataFrame([['a', 'b', 'c'], [0, 1, 2]])
result = _check_events_file_uses_tab_separators(
events_files=events_pandas_dataframe)
assert_true(result is None)
def test_binary_opening_an_image():
img_data = bytearray(
b'GIF87a\x01\x00\x01\x00\xe7*\x00\x00\x00\x00\x01\x01\x01\x02\x02'
b'\x07\x08\x08\x08\x0b\x0b\x0b\x0c\x0c\x0c\r;')
with InTemporaryDirectory():
temp_img_file = 'temp_img.gif'
with open(temp_img_file, 'wb') as temp_img_obj:
temp_img_obj.write(img_data)
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(
events_files=temp_img_file)
def test_binary_bytearray_of_ints_data():
temp_data_bytearray_from_ints = bytearray([0, 1, 0, 11, 10])
with InTemporaryDirectory():
temp_bin_file = 'temp_bin.bin'
with open(temp_bin_file, 'wb') as temp_bin_obj:
temp_bin_obj.write(temp_data_bytearray_from_ints)
with assert_raises(ValueError):
_check_events_file_uses_tab_separators(
events_files=temp_bin_file)
if __name__ == '__main__':
def _run_tests_print_test_messages(test_func):
from pprint import pprint
pprint(['Running', test_func.__name__])
test_func()
pprint('... complete')
def run_test_suite():
tests = [
test_for_invalid_filepath,
test_with_2D_dataframe,
test_with_1D_dataframe,
test_for_invalid_filepath,
test_for_pandas_dataframe,
test_binary_opening_an_image,
test_binary_bytearray_of_ints_data,
]
for test_ in tests:
_run_tests_print_test_messages(test_func=test_)
run_test_suite()
| true
| true
|
7904c594dc3dcfbe21cff458e72720bb8dbd2a60
| 3,905
|
py
|
Python
|
src/ewatercycle/observation/usgs.py
|
cffbots/ewatercycle
|
29571aace32fcea8f70948259e33a62c9c834808
|
[
"Apache-2.0"
] | 18
|
2021-03-25T08:25:32.000Z
|
2022-03-25T09:23:09.000Z
|
src/ewatercycle/observation/usgs.py
|
cffbots/ewatercycle
|
29571aace32fcea8f70948259e33a62c9c834808
|
[
"Apache-2.0"
] | 323
|
2016-08-11T12:13:58.000Z
|
2022-03-30T11:29:04.000Z
|
src/ewatercycle/observation/usgs.py
|
cffbots/ewatercycle
|
29571aace32fcea8f70948259e33a62c9c834808
|
[
"Apache-2.0"
] | 4
|
2018-06-27T11:47:23.000Z
|
2022-02-02T14:14:13.000Z
|
import os
from datetime import datetime
import numpy as np
import xarray as xr
from pyoos.collectors.usgs.usgs_rest import UsgsRest
from pyoos.parsers.waterml import WaterML11ToPaegan
def get_usgs_data(station_id, start_date, end_date, parameter="00060", cache_dir=None):
"""Get river discharge data from the USGS REST web service.
See `U.S. Geological Survey Water Services
<https://waterservices.usgs.gov/>`_ (USGS)
Parameters
----------
station_id : str
The station id to get
start_date : str
String for start date in the format: 'YYYY-MM-dd', e.g. '1980-01-01'
end_date : str
String for start date in the format: 'YYYY-MM-dd', e.g. '2018-12-31'
parameter : str
The parameter code to get, e.g. ('00060') discharge, cubic feet per second
cache_dir : str
Directory where files retrieved from the web service are cached.
If set to None then USGS_DATA_HOME env var will be used as cache directory.
Examples
--------
>>> from ewatercycle.observation.usgs import get_usgs_data
>>> data = get_usgs_data('03109500', '2000-01-01', '2000-12-31', cache_dir='.')
>>> data
<xarray.Dataset>
Dimensions: (time: 8032)
Coordinates:
* time (time) datetime64[ns] 2000-01-04T05:00:00 ... 2000-12-23T04:00:00
Data variables:
Streamflow (time) float32 8.296758 10.420501 ... 10.647034 11.694747
Attributes:
title: USGS Data from streamflow data
station: Little Beaver Creek near East Liverpool OH
stationid: 03109500
location: (40.6758974, -80.5406244)
""" # noqa: E501
if cache_dir is None:
cache_dir = os.environ["USGS_DATA_HOME"]
# Check if we have the netcdf data
netcdf = os.path.join(
cache_dir,
"USGS_"
+ station_id
+ "_"
+ parameter
+ "_"
+ start_date
+ "_"
+ end_date
+ ".nc",
)
if os.path.exists(netcdf):
return xr.open_dataset(netcdf)
# Download the data if needed
out = os.path.join(
cache_dir,
"USGS_"
+ station_id
+ "_"
+ parameter
+ "_"
+ start_date
+ "_"
+ end_date
+ ".wml",
)
if not os.path.exists(out):
collector = UsgsRest()
collector.filter(
start=datetime.strptime(start_date, "%Y-%m-%d"),
end=datetime.strptime(end_date, "%Y-%m-%d"),
variables=[parameter],
features=[station_id],
)
data = collector.raw()
with open(out, "w") as file:
file.write(data)
collector.clear()
else:
with open(out, "r") as file:
data = file.read()
# Convert the raw data to an xarray
data = WaterML11ToPaegan(data).feature
# We expect only 1 station
if len(data.elements) == 0:
raise ValueError("Data does not contain any station data")
else:
station = data.elements[0]
# Unit conversion from cubic feet to cubic meter per second
values = np.array(
[float(point.members[0]["value"]) / 35.315 for point in station.elements],
dtype=np.float32,
)
times = [point.time for point in station.elements]
attrs = {
"units": "cubic meters per second",
}
# Create the xarray dataset
ds = xr.Dataset(
{"streamflow": (["time"], values, attrs)}, coords={"time": times}
)
# Set some nice attributes
ds.attrs["title"] = "USGS Data from streamflow data"
ds.attrs["station"] = station.name
ds.attrs["stationid"] = station.get_uid()
ds.attrs["location"] = (station.location.y, station.location.x)
ds.to_netcdf(netcdf)
return ds
| 30.271318
| 89
| 0.576184
|
import os
from datetime import datetime
import numpy as np
import xarray as xr
from pyoos.collectors.usgs.usgs_rest import UsgsRest
from pyoos.parsers.waterml import WaterML11ToPaegan
def get_usgs_data(station_id, start_date, end_date, parameter="00060", cache_dir=None):
if cache_dir is None:
cache_dir = os.environ["USGS_DATA_HOME"]
netcdf = os.path.join(
cache_dir,
"USGS_"
+ station_id
+ "_"
+ parameter
+ "_"
+ start_date
+ "_"
+ end_date
+ ".nc",
)
if os.path.exists(netcdf):
return xr.open_dataset(netcdf)
out = os.path.join(
cache_dir,
"USGS_"
+ station_id
+ "_"
+ parameter
+ "_"
+ start_date
+ "_"
+ end_date
+ ".wml",
)
if not os.path.exists(out):
collector = UsgsRest()
collector.filter(
start=datetime.strptime(start_date, "%Y-%m-%d"),
end=datetime.strptime(end_date, "%Y-%m-%d"),
variables=[parameter],
features=[station_id],
)
data = collector.raw()
with open(out, "w") as file:
file.write(data)
collector.clear()
else:
with open(out, "r") as file:
data = file.read()
data = WaterML11ToPaegan(data).feature
if len(data.elements) == 0:
raise ValueError("Data does not contain any station data")
else:
station = data.elements[0]
values = np.array(
[float(point.members[0]["value"]) / 35.315 for point in station.elements],
dtype=np.float32,
)
times = [point.time for point in station.elements]
attrs = {
"units": "cubic meters per second",
}
ds = xr.Dataset(
{"streamflow": (["time"], values, attrs)}, coords={"time": times}
)
ds.attrs["title"] = "USGS Data from streamflow data"
ds.attrs["station"] = station.name
ds.attrs["stationid"] = station.get_uid()
ds.attrs["location"] = (station.location.y, station.location.x)
ds.to_netcdf(netcdf)
return ds
| true
| true
|
7904c5ac3928098f72b6ad5705bf4a2b346d03ae
| 1,621
|
py
|
Python
|
site/social_auth/filters.py
|
776166/yggdrasil-django
|
7ae134ad5a714e0ab9f735348406b32e46c36b3a
|
[
"MIT"
] | null | null | null |
site/social_auth/filters.py
|
776166/yggdrasil-django
|
7ae134ad5a714e0ab9f735348406b32e46c36b3a
|
[
"MIT"
] | 1
|
2020-06-05T19:19:22.000Z
|
2020-06-05T19:19:22.000Z
|
site/social_auth/filters.py
|
776166/yggdrasil-django
|
7ae134ad5a714e0ab9f735348406b32e46c36b3a
|
[
"MIT"
] | null | null | null |
import re
from social_core.backends.oauth import OAuthAuth
NAME_RE = re.compile(r'([^O])Auth')
LEGACY_NAMES = ['username', 'email']
def backend_name(backend):
name = backend.__name__
name = name.replace('OAuth', ' OAuth')
name = name.replace('OpenId', ' OpenId')
name = name.replace('Sandbox', '')
name = NAME_RE.sub(r'\1 Auth', name)
return name
def backend_class(backend):
return backend.name.replace('-', ' ')
def icon_name(name):
return {
'stackoverflow': 'stack-overflow',
'google-oauth': 'google',
'google-oauth2': 'google',
'google-openidconnect': 'google',
'yahoo-oauth': 'yahoo',
'facebook-app': 'facebook',
'email': 'envelope',
'vimeo': 'vimeo-square',
'linkedin-oauth2': 'linkedin',
'vk-oauth2': 'vk',
'live': 'windows',
'username': 'user',
}.get(name, name)
def slice_by(value, items):
return [value[n:n + items] for n in range(0, len(value), items)]
def social_backends(backends):
return filter_backends(
backends,
lambda name, backend: name not in LEGACY_NAMES
)
def legacy_backends(backends):
return filter_backends(
backends,
lambda name, backend: name in LEGACY_NAMES
)
def oauth_backends(backends):
return filter_backends(
backends,
lambda name, backend: issubclass(backend, OAuthAuth)
)
def filter_backends(backends, filter_func):
backends = [item for item in backends.items() if filter_func(*item)]
backends.sort(key=lambda backend: backend[0])
return backends
| 23.157143
| 72
| 0.624923
|
import re
from social_core.backends.oauth import OAuthAuth
NAME_RE = re.compile(r'([^O])Auth')
LEGACY_NAMES = ['username', 'email']
def backend_name(backend):
name = backend.__name__
name = name.replace('OAuth', ' OAuth')
name = name.replace('OpenId', ' OpenId')
name = name.replace('Sandbox', '')
name = NAME_RE.sub(r'\1 Auth', name)
return name
def backend_class(backend):
return backend.name.replace('-', ' ')
def icon_name(name):
return {
'stackoverflow': 'stack-overflow',
'google-oauth': 'google',
'google-oauth2': 'google',
'google-openidconnect': 'google',
'yahoo-oauth': 'yahoo',
'facebook-app': 'facebook',
'email': 'envelope',
'vimeo': 'vimeo-square',
'linkedin-oauth2': 'linkedin',
'vk-oauth2': 'vk',
'live': 'windows',
'username': 'user',
}.get(name, name)
def slice_by(value, items):
return [value[n:n + items] for n in range(0, len(value), items)]
def social_backends(backends):
return filter_backends(
backends,
lambda name, backend: name not in LEGACY_NAMES
)
def legacy_backends(backends):
return filter_backends(
backends,
lambda name, backend: name in LEGACY_NAMES
)
def oauth_backends(backends):
return filter_backends(
backends,
lambda name, backend: issubclass(backend, OAuthAuth)
)
def filter_backends(backends, filter_func):
backends = [item for item in backends.items() if filter_func(*item)]
backends.sort(key=lambda backend: backend[0])
return backends
| true
| true
|
7904c5ebb599aec04ae6a086b288bfaba3c63bf2
| 1,326
|
py
|
Python
|
pyrlang/net_kernel.py
|
rlouf/Pyrlang
|
c50e6a52a29128f535f29aeb98ee1a8b333852b8
|
[
"Apache-2.0"
] | 1
|
2020-07-23T13:26:35.000Z
|
2020-07-23T13:26:35.000Z
|
pyrlang/net_kernel.py
|
rlouf/Pyrlang
|
c50e6a52a29128f535f29aeb98ee1a8b333852b8
|
[
"Apache-2.0"
] | null | null | null |
pyrlang/net_kernel.py
|
rlouf/Pyrlang
|
c50e6a52a29128f535f29aeb98ee1a8b333852b8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018, Erlang Solutions Ltd, and S2HC Sweden AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pyrlang.gen_server import GenServer
from pyrlang.node import Node
from term.atom import Atom
LOG = logging.getLogger("pyrlang")
class NetKernel(GenServer):
""" A special process which registers itself as ``net_kernel`` and handles
one specific ``is_auth`` message, which is used by ``net_adm:ping``.
"""
def __init__(self, node) -> None:
""" :param node: pyrlang.node.Node
"""
GenServer.__init__(self,
node_name=node.node_name_,
accepted_calls=['is_auth'])
node.register_name(self, Atom('net_kernel'))
@staticmethod
def is_auth():
return Atom('yes')
__all__ = ['NetKernel']
| 30.837209
| 78
| 0.687029
|
import logging
from pyrlang.gen_server import GenServer
from pyrlang.node import Node
from term.atom import Atom
LOG = logging.getLogger("pyrlang")
class NetKernel(GenServer):
def __init__(self, node) -> None:
GenServer.__init__(self,
node_name=node.node_name_,
accepted_calls=['is_auth'])
node.register_name(self, Atom('net_kernel'))
@staticmethod
def is_auth():
return Atom('yes')
__all__ = ['NetKernel']
| true
| true
|
7904c63ff97ebcd5f9ef6db7145b1e6d5de04ccb
| 1,461
|
py
|
Python
|
mla_game/settings/stage.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
mla_game/settings/stage.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
mla_game/settings/stage.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
from .base import *
import os
# how many data points are enough to calculate confidence?
MINIMUM_SAMPLE_SIZE = 3
# original phrase is good enough for export
TRANSCRIPT_PHRASE_POSITIVE_CONFIDENCE_LIMIT = .51
# original phrase needs correction
TRANSCRIPT_PHRASE_NEGATIVE_CONFIDENCE_LIMIT = -.51
# correction is good enough to award points and export data
TRANSCRIPT_PHRASE_CORRECTION_LOWER_LIMIT = .51
# correction no longer needs votes and can replace original phrase
TRANSCRIPT_PHRASE_CORRECTION_UPPER_LIMIT = .66
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = True
LOG_DIRECTORY = '/home/wgbh/logs'
STATIC_ROOT = '/home/wgbh/webroot/static'
ALLOWED_HOSTS = [
'mlagame-dev.wgbhdigital.org', 'mlagame.wgbhdigital.org',
'fixit.wgbhdigital.org',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'localhost',
'NAME': 'mla',
'USER': 'mla',
'PASSWORD': os.environ['PG_PASS'],
'TEST': {
'NAME': 'mla-test',
},
},
}
GA_CODE = 'null'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/django.log'.format(LOG_DIRECTORY),
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| 23.190476
| 66
| 0.615332
|
from .base import *
import os
MINIMUM_SAMPLE_SIZE = 3
TRANSCRIPT_PHRASE_POSITIVE_CONFIDENCE_LIMIT = .51
TRANSCRIPT_PHRASE_NEGATIVE_CONFIDENCE_LIMIT = -.51
TRANSCRIPT_PHRASE_CORRECTION_LOWER_LIMIT = .51
TRANSCRIPT_PHRASE_CORRECTION_UPPER_LIMIT = .66
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = True
LOG_DIRECTORY = '/home/wgbh/logs'
STATIC_ROOT = '/home/wgbh/webroot/static'
ALLOWED_HOSTS = [
'mlagame-dev.wgbhdigital.org', 'mlagame.wgbhdigital.org',
'fixit.wgbhdigital.org',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'localhost',
'NAME': 'mla',
'USER': 'mla',
'PASSWORD': os.environ['PG_PASS'],
'TEST': {
'NAME': 'mla-test',
},
},
}
GA_CODE = 'null'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/django.log'.format(LOG_DIRECTORY),
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| true
| true
|
7904c8bf5fb25baf6061a40684b55286cb37e548
| 407
|
py
|
Python
|
backend/backend/asgi.py
|
CSXLabs/csxlabs.org
|
a51551b0eda149045feea1bb148dcf9ada5566e7
|
[
"MIT"
] | 3
|
2021-09-15T04:02:59.000Z
|
2021-11-03T07:18:35.000Z
|
backend/backend/asgi.py
|
CSXLabs/csxlabs.org
|
a51551b0eda149045feea1bb148dcf9ada5566e7
|
[
"MIT"
] | 36
|
2021-09-22T05:28:14.000Z
|
2021-12-05T18:10:08.000Z
|
backend/backend/asgi.py
|
CSXLabs/csxlabs.org
|
a51551b0eda149045feea1bb148dcf9ada5566e7
|
[
"MIT"
] | 2
|
2021-09-15T04:17:54.000Z
|
2022-01-11T17:13:51.000Z
|
"""
ASGI config for backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
application = get_asgi_application()
| 23.941176
| 79
| 0.7543
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
application = get_asgi_application()
| true
| true
|
7904c939a1af4ad8d91b258873e3311af1d0bb47
| 1,649
|
py
|
Python
|
py/test/selenium/webdriver/common/page_load_timeout_tests.py
|
shubhramittal/selenium
|
0359f0a510991d1b5ce9b41d849425349f952a86
|
[
"Apache-2.0"
] | null | null | null |
py/test/selenium/webdriver/common/page_load_timeout_tests.py
|
shubhramittal/selenium
|
0359f0a510991d1b5ce9b41d849425349f952a86
|
[
"Apache-2.0"
] | null | null | null |
py/test/selenium/webdriver/common/page_load_timeout_tests.py
|
shubhramittal/selenium
|
0359f0a510991d1b5ce9b41d849425349f952a86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import TimeoutException
class TestPageLoadTimeout(object):
@pytest.mark.xfail_phantomjs(
reason='PhantomJS does not implement page load timeouts')
def testShouldTimeoutOnPageLoadTakingTooLong(self, driver, pages):
driver.set_page_load_timeout(0.01)
with pytest.raises(TimeoutException):
pages.load("simpleTest.html")
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1309231')
@pytest.mark.xfail_phantomjs(
reason='PhantomJS does not implement page load timeouts')
def testClickShouldTimeout(self, driver, pages):
pages.load("simpleTest.html")
driver.set_page_load_timeout(0.01)
with pytest.raises(TimeoutException):
driver.find_element_by_id("multilinelink").click()
| 40.219512
| 70
| 0.745907
|
import pytest
from selenium.common.exceptions import TimeoutException
class TestPageLoadTimeout(object):
@pytest.mark.xfail_phantomjs(
reason='PhantomJS does not implement page load timeouts')
def testShouldTimeoutOnPageLoadTakingTooLong(self, driver, pages):
driver.set_page_load_timeout(0.01)
with pytest.raises(TimeoutException):
pages.load("simpleTest.html")
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1309231')
@pytest.mark.xfail_phantomjs(
reason='PhantomJS does not implement page load timeouts')
def testClickShouldTimeout(self, driver, pages):
pages.load("simpleTest.html")
driver.set_page_load_timeout(0.01)
with pytest.raises(TimeoutException):
driver.find_element_by_id("multilinelink").click()
| true
| true
|
7904c964ac73898969bc98fc593eb600a723f137
| 10,991
|
py
|
Python
|
examples/ImageRecon/OccNet/architectures.py
|
Bob-Yeah/kaolin
|
7ad34f8158000499a30b8dfa14fb3ed86d2e57a6
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-10-31T01:08:17.000Z
|
2021-11-08T09:43:17.000Z
|
examples/ImageRecon/OccNet/architectures.py
|
Bob-Yeah/kaolin
|
7ad34f8158000499a30b8dfa14fb3ed86d2e57a6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/ImageRecon/OccNet/architectures.py
|
Bob-Yeah/kaolin
|
7ad34f8158000499a30b8dfa14fb3ed86d2e57a6
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-10T09:19:19.000Z
|
2021-11-12T08:18:17.000Z
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
''' Computes the expectation lower bound.
Args:
p (tensor): sampled points
occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
''' Infers z.
Args:
p (tensor): points tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
| 31.402857
| 85
| 0.59203
|
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torchvision import models
import torch.distributions as dist
import torch
from torch.nn import Parameter
class Resnet18(nn.Module):
def __init__(self, c_dim, normalize=True, use_linear=True):
super().__init__()
self.normalize = normalize
self.use_linear = use_linear
self.features = models.resnet18(pretrained=True)
self.features.fc = nn.Sequential()
if use_linear:
self.fc = nn.Linear(512, c_dim)
elif c_dim == 512:
self.fc = nn.Sequential()
else:
raise ValueError('c_dim must be 512 if use_linear is False')
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
net = self.features(x)
out = self.fc(net)
return out
def normalize_imagenet(x):
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class DecoderCBatchNorm(nn.Module):
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
def get_prior_z(device):
z_dim = 0
p0_z = dist.Normal(
torch.zeros(z_dim, device = device),
torch.ones(z_dim, device = device)
)
return p0_z
class CBatchNorm1d(nn.Module):
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
if len(c.size()) == 2:
c = c.unsqueeze(2)
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
net = self.bn(x)
out = gamma * net + beta
return out
class CResnetBlockConv1d(nn.Module):
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class OccupancyNetwork(nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
self.decoder = DecoderCBatchNorm(dim=3, z_dim=0, c_dim=256,
hidden_size=256).to(self.device)
self.encoder = Resnet18(256, normalize=True, use_linear=True).to(self.device)
self.p0_z = get_prior_z(self.device)
def forward(self, p, inputs, sample=True, **kwargs):
batch_size = p.size(0)
c = self.encode_inputs(inputs)
z = self.get_z_from_prior((batch_size,), sample=sample)
p_r = self.decode(p, z, c, **kwargs)
return p_r
def compute_elbo(self, p, occ, inputs, **kwargs):
c = self.encode_inputs(inputs)
q_z = self.infer_z(p, occ, c, **kwargs)
z = q_z.rsample()
p_r = self.decode(p, z, c, **kwargs)
rec_error = -p_r.log_prob(occ).sum(dim=-1)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
elbo = -rec_error - kl
return elbo, rec_error, kl
def encode_inputs(self, inputs):
c = self.encoder(inputs)
return c
def decode(self, p, z, c, **kwargs):
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
def infer_z(self, p, occ, c, **kwargs):
batch_size = p.size(0)
mean_z = torch.empty(batch_size, 0).to(self.device)
logstd_z = torch.empty(batch_size, 0).to(self.device)
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
if sample:
z = self.p0_z.sample(size).to(self.device)
else:
z = self.p0_z.mean.to(self.device)
z = z.expand(*size, *z.size())
return z
| true
| true
|
7904cabfedf75b5708c9d17ad9e31ceb76acd88b
| 16,140
|
py
|
Python
|
boneless/simulator/sim.py
|
zignig/Boneless-CPU
|
10bb571b4efab015e1bf147c78f0b8b3c93443e4
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
boneless/simulator/sim.py
|
zignig/Boneless-CPU
|
10bb571b4efab015e1bf147c78f0b8b3c93443e4
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
boneless/simulator/sim.py
|
zignig/Boneless-CPU
|
10bb571b4efab015e1bf147c78f0b8b3c93443e4
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
import array
__all__ = ["BonelessSimulator", "BonelessError"]
# Flag functions
# Used to calculate sign bit and also
# overflow.
def sign(val):
return int((val & 0x08000) != 0)
def zero(val):
return int(to_unsigned16b(val) == 0)
# Carry and V use 65xx semantics:
# http://www.righto.com/2012/12/the-6502-overflow-flag-explained.html
# http://teaching.idallen.com/dat2343/10f/notes/040_overflow.txt
def carry(val):
return int(val > 65535)
def overflow(a, b, out):
s_a = sign(a)
s_b = sign(b)
s_o = sign(out)
return int((s_a and s_b and not s_o) or (s_a and s_b and s_o))
def overflow_sub(a, b, out):
s_a = sign(a)
s_b = sign(b)
s_o = sign(out)
return int((s_a and not s_b and not s_o) or (not s_a and s_b and s_o))
# Works with signed _or_ unsigned math.
def to_unsigned16b(val):
if val < 0:
return val + 65536
elif val >= 65536:
return val - 65536
else:
return val
class BonelessSimulator:
"""The Boneless CPU instruction-level simulator object.
Instantiating this object will create a simulator context in
which Boneless CPU code runs, one instruction at a time. A
sample simulation session looks similar to the following:
::
from boneless.simulator import *
from boneless.instr import *
cpu = BonelessSimulator(start_pc=0x10, memsize=65536)
program = assemble([MOVL(R0, 0xFF)])
cpu.load_program(program)
with cpu:
cpu.stepi()
print(cpu.regs())
Parameters
----------
start_pc: int, optional
The Program Counter register is set to this value when instantiating
an object of this class.
mem_size: int, optional
Number of 16-bit words that the simulated CPU can access, starting
from address zero. Accessing out-of-bounds memory will cause an
exception.
io_callback: function
Initial I/O callback to use. See
:func:`~boneless_sim.BonelessSimulator.register_io` for usage.
Attributes
----------
sim_active: bool
``True`` if a simulation is in progress, ``False`` otherwise.
window: int
Offset of the register window into memory (Boneless CPU registers
are just memory locations.)
pc: int
Current program counter pointer.
z: int
Current value of the Zero flag, ``1`` for ``True``, or ``0`` for
``False``.
s: int
Current value of the Sign flag, ``1`` for ``True``, or ``0`` for
``False``.
c: int
Current value of the Carry flag, ``1`` for ``True``, or ``0`` for
``False``.
v: int
Current value of the OVerflow flag, ``1`` for ``True``, or ``0`` for
``False``.
mem: array
Contents of the primary address space seen by the simulated CPU. On
object construction this is initialized to all zeroes.
io_callback: function
Reference to the current I/O callback function.
"""
def __init__(self, start_pc=0x10, mem_size=1024, io_callback=None):
def memset():
for i in range(mem_size):
yield 0
self.sim_active = False
self.window = 0
self.pc = start_pc
self.z = 0
self.s = 0
self.c = 0
self.v = 0
self.mem = array.array("H", memset())
self.io_callback = io_callback
def __enter__(self):
self.sim_active = True
return self
def __exit__(self, type, value, traceback):
self.sim_active = False
def regs(self):
"""Return the 8 registers within the current register window.
Returns
-------
array
Array of 16-bit ints representing registers.
"""
return self.mem[self.window:self.window+8]
def read_reg(self, reg):
"""Read the value of a single 16-bit register.
Parameters
----------
reg: int
Register number to read. ``R[0-8]`` from :mod:`boneless.instr`
is also acceptable.
Returns
-------
int
Current value of the queried register.
"""
return self.mem[self.reg_loc(reg)]
def reg_loc(self, offs):
"""Convenience function to return the address of a register in memory.
A register's location changes when the CPU's :attr:`window` is updated.
Parameters
----------
offs: int
Register number to read. ``R[0-8]`` from :mod:`boneless.instr`
is also acceptable.
Returns
-------
int
16-bit memory address of the queried register.
"""
return self.window + offs
def set_pc(self, new_pc):
"""Set the program counter to a new value.
The program counter can only be updated using this function when
a simulation is inactive.
Parameters
----------
new_pc: int
16-bit (`treated as unsigned`) to write to the PC register. If the
value is out of range, a read to :attr:`mem` will throw an
exception.
"""
if not self.sim_active:
self.pc = new_pc
def write_reg(self, reg, val):
"""Write the value of a single 16-bit register.
Registers can only be updated using this function when a simulation
is inactive.
Parameters
----------
reg: int
Register number to write. ``R[0-8]`` from :mod:`boneless.instr`
is also acceptable.
val: int
16-bit (`treated as unsigned`) to write to a register. If the
value is out of range, the write to :attr:`mem` will throw an
exception.
"""
if not self.sim_active:
self.mem[self.reg_loc(reg)] = val
def load_program(self, contents, start=0x0):
"""Inject program code into the memory space of the simulated CPU.
This function does not distinguish between loading program code and
raw data. Program code can only be loaded using this function when a
simulation is inactive.
Parameters
----------
contents: list of ints
Integer representation of opcodes to load into the memory space of
the simulated CPU. The function :func:`boneless.instr.assemble`
produces a list compatible with this input parameter.
start: int
16-bit int offset representing the starting location in memory
in which to load ``contents``.
"""
if not self.sim_active:
for i, c in enumerate(contents):
self.mem[i + start] = c
def register_io(self, callback):
"""Replace the currently-defined I/O callback with a new one.
The Simulated Boneless CPU needs a way to contact the outside world.
The architecture itself defines a secondary address space for I/O,
similar in idea to x86 port-mapped I/O. When the ``STX`` and ``LDX``
instructions are encountered, the provided callback will execute to
simulate I/O. It is up to the user to decode the I/O address passed
into the callback accordingly.
The I/O callback can only be replaced when a simulation is inactive.
Parameters
----------
callback: function
The callback function, using the following signature:
``fn(addr, data=None)``
* ``addr``: 16-bit int
Virtual I/O address to read/write
* ``data``: 16-bit int` or ``None``
If this I/O access is a read, ``data`` is ``None``. Otherwise,
``data`` contains a value to write to a virtual I/O device.
The callback should return a 16-bit int if the I/O access was a
read and ``None`` if the I/O access was a write (ignored by the
simulator).
"""
if not self.sim_active:
self.io_callback = callback
def stepi(self):
"""Run a single instruction of the simulated CPU.
The state of the CPU will be available through the attributes of
:obj:`~boneless_sim.BonelessSimulator`.
"""
opcode = self.mem[self.pc]
op_class = (0xF800 & opcode) >> 11
if op_class in [0x00, 0x01]:
self._do_a_class(opcode)
self.pc = to_unsigned16b(self.pc + 1)
elif op_class in [0x02, 0x03]:
self._do_s_class(opcode)
self.pc = to_unsigned16b(self.pc + 1)
elif op_class in [0x04, 0x05, 0x06, 0x07]:
self._do_m_class(opcode)
self.pc = to_unsigned16b(self.pc + 1)
elif op_class in [0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F]:
pc_incr = self._do_i_class(opcode)
self.pc = to_unsigned16b(self.pc + pc_incr)
else:
pc_incr = self._do_c_class(opcode)
self.pc = to_unsigned16b(self.pc + pc_incr)
# Utility Functions- Do not call directly
def _write_reg(self, reg, val):
self.mem[self.reg_loc(reg)] = val
# Handle Opcode Clases- Do not call directly
def _do_a_class(self, opcode):
dst = (0x0700 & opcode) >> 8
opa = (0x00E0 & opcode) >> 5
opb = (0x001C & opcode) >> 2
typ = (0x0003 & opcode)
code = (0x0800 & opcode) >> 11
val_a = self.read_reg(opa)
val_b = self.read_reg(opb)
if code and (typ in range(3)):
# ADD
if typ == 0x00:
raw = val_a + val_b
self.v = overflow(val_a, val_b, raw)
self._write_reg(dst, to_unsigned16b(raw))
# SUB
elif typ == 0x01:
raw = val_a + to_unsigned16b(~val_b) + 1
self.v = overflow_sub(val_a, val_b, raw)
self._write_reg(dst, to_unsigned16b(raw))
# CMP
else:
raw = val_a + to_unsigned16b(~val_b) + 1
self.v = overflow_sub(val_a, val_b, raw)
self.c = carry(raw)
elif not code and typ in range(3):
# AND
if typ == 0x00:
raw = val_a & val_b
# OR
elif typ == 0x01:
raw = val_a | val_b
# XOR
else:
raw = val_a ^ val_b
self._write_reg(dst, raw)
else:
raise BonelessError("A-class opcode with typ == 0x03 is a reserved instruction.")
self.z = zero(raw)
self.s = sign(raw)
def _do_s_class(self, opcode):
dst = (0x0700 & opcode) >> 8
opa = (0x00E0 & opcode) >> 5
amt = (0x001E & opcode) >> 1
typ = (0x0001 & opcode)
code = (0x0800 & opcode) >> 11
if not code:
# SLL/MOV
if typ == 0:
raw = self.read_reg(opa) << amt
# ROT
else:
# Don't actually rotate, but implement
# in terms of bitshifts.
val = self.read_reg(opa)
hi_mask = ((1 << amt) - 1) << (15 - amt + 1)
lo_mask = (1 << (15 - amt + 1)) - 1
raw_hi = (hi_mask & val) >> (15 - amt + 1)
raw_lo = (lo_mask & val) << amt
raw = raw_hi | raw_lo
else:
# SRL
if typ == 0:
raw = self.read_reg(opa) >> amt
# SRA
else:
val = self.read_reg(opa)
sign_bit = sign(val)
u_shift = self.read_reg(opa) >> amt
if sign_bit:
sign_mask = ((1 << amt) - 1) << (15 - amt + 1)
raw = sign_mask | u_shift
else:
raw = u_shift
self._write_reg(dst, raw & 0x0FFFF)
self.z = zero(raw)
self.s = sign(raw)
def _do_m_class(self, opcode):
def to_signed5b(val):
if val > 16:
return val - 32
else:
return val
code = (0x1800 & opcode) >> 11
srcdst = (0x0700 & opcode) >> 8
adr = (0x00E0 & opcode) >> 5
imm = (0x001F & opcode)
# LD
if code == 0x00:
self._write_reg(srcdst, self.mem[self.read_reg(adr) + to_signed5b(imm)])
# ST
elif code == 0x01:
self.mem[self.read_reg(adr) + to_signed5b(imm)] = self.read_reg(srcdst)
# LDX
elif code == 0x02:
if self.io_callback:
val = self.io_callback(self.read_reg(adr) + to_signed5b(imm), None)
self._write_reg(srcdst, val)
else:
raise BonelessError("LDX instruction encountered but io_callback not set.")
# STX
else:
if self.io_callback:
val = self.read_reg(srcdst)
self.io_callback(self.read_reg(adr) + to_signed5b(imm), val)
else:
raise BonelessError("STX instruction encountered but io_callback not set.")
def _do_i_class(self, opcode):
def to_signed8b(val):
if val > 127:
return val - 256
else:
return val
opc = (0x3800 & opcode) >> 11
srcdst = (0x0700 & opcode) >> 8
imm = (0x00FF & opcode)
pc_incr = 1
# MOVL
if opc == 0x00:
val = imm
# MOVH
elif opc == 0x01:
val = (imm << 8)
# MOVA
elif opc == 0x02:
val = to_unsigned16b(self.pc + 1 + to_signed8b(imm))
# ADDI/SUBI
elif opc == 0x03:
op_a = self.read_reg(srcdst)
op_b = to_signed8b(imm)
# Flags will not be set correctly unless we convert
# op_b to unsigned to force a carry when op_a > op_b.
raw = op_a + to_unsigned16b(op_b)
val = to_unsigned16b(raw)
self.z = zero(raw)
self.s = sign(raw)
self.c = carry(raw)
self.v = overflow(op_a, op_b, raw)
# LDI
elif opc == 0x04:
val = self.mem[to_unsigned16b(self.pc + to_signed8b(imm))]
# STI
elif opc == 0x05:
self.mem[to_unsigned16b(self.pc + to_signed8b(imm))] = self.read_reg(srcdst)
# JAL
elif opc == 0x06:
val = to_unsigned16b(self.pc + 1)
pc_incr = 1 + to_signed8b(imm)
# JR
else:
raw_pc = self.read_reg(srcdst) + to_signed8b(imm)
pc_incr = to_unsigned16b(raw_pc - self.pc)
if opc not in [0x05, 0x07]:
self._write_reg(srcdst, val)
return pc_incr
def _do_c_class(self, opcode):
def to_signed11b(val):
if val > 1023:
return val - 2048
else:
return val
cond = (0x7000 & opcode) >> 12
flag = (0x0800 & opcode) >> 11
offs = (0x7FF & opcode)
# J
if cond == 0x00:
if flag:
raise BonelessError("Unconditional J with flag==1 is a reserved instruction.")
else:
cond_met = True
# JNZ/JNE, JZ/JE
elif cond == 0x01:
cond_met = (self.z == flag)
# JNS, JS
elif cond == 0x02:
cond_met = (self.s == flag)
# JNC/JULT, JC/JUGE
elif cond == 0x03:
cond_met = (self.c == flag)
# JNO, JO
elif cond == 0x04:
cond_met = (self.v == flag)
# JULE, JUGT
elif cond == 0x05:
cond_met = ((not self.c or self.z) == flag)
# JSGE, JSLT
elif cond == 0x06:
cond_met = ((self.s ^ self.v) == flag)
# JSGT, JSLE
elif cond == 0x07:
cond_met = (((self.s ^ self.v) or self.z) == flag)
if cond_met:
pc_incr = to_signed11b(offs) + 1
else:
pc_incr = 1
return pc_incr
class BonelessError(Exception):
"""Exception raised when the CPU simulator doesn't know what to do."""
pass
| 31.709234
| 94
| 0.53575
|
import array
__all__ = ["BonelessSimulator", "BonelessError"]
def sign(val):
return int((val & 0x08000) != 0)
def zero(val):
return int(to_unsigned16b(val) == 0)
def carry(val):
return int(val > 65535)
def overflow(a, b, out):
s_a = sign(a)
s_b = sign(b)
s_o = sign(out)
return int((s_a and s_b and not s_o) or (s_a and s_b and s_o))
def overflow_sub(a, b, out):
s_a = sign(a)
s_b = sign(b)
s_o = sign(out)
return int((s_a and not s_b and not s_o) or (not s_a and s_b and s_o))
def to_unsigned16b(val):
if val < 0:
return val + 65536
elif val >= 65536:
return val - 65536
else:
return val
class BonelessSimulator:
def __init__(self, start_pc=0x10, mem_size=1024, io_callback=None):
def memset():
for i in range(mem_size):
yield 0
self.sim_active = False
self.window = 0
self.pc = start_pc
self.z = 0
self.s = 0
self.c = 0
self.v = 0
self.mem = array.array("H", memset())
self.io_callback = io_callback
def __enter__(self):
self.sim_active = True
return self
def __exit__(self, type, value, traceback):
self.sim_active = False
def regs(self):
return self.mem[self.window:self.window+8]
def read_reg(self, reg):
return self.mem[self.reg_loc(reg)]
def reg_loc(self, offs):
return self.window + offs
def set_pc(self, new_pc):
if not self.sim_active:
self.pc = new_pc
def write_reg(self, reg, val):
if not self.sim_active:
self.mem[self.reg_loc(reg)] = val
def load_program(self, contents, start=0x0):
if not self.sim_active:
for i, c in enumerate(contents):
self.mem[i + start] = c
def register_io(self, callback):
if not self.sim_active:
self.io_callback = callback
def stepi(self):
opcode = self.mem[self.pc]
op_class = (0xF800 & opcode) >> 11
if op_class in [0x00, 0x01]:
self._do_a_class(opcode)
self.pc = to_unsigned16b(self.pc + 1)
elif op_class in [0x02, 0x03]:
self._do_s_class(opcode)
self.pc = to_unsigned16b(self.pc + 1)
elif op_class in [0x04, 0x05, 0x06, 0x07]:
self._do_m_class(opcode)
self.pc = to_unsigned16b(self.pc + 1)
elif op_class in [0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F]:
pc_incr = self._do_i_class(opcode)
self.pc = to_unsigned16b(self.pc + pc_incr)
else:
pc_incr = self._do_c_class(opcode)
self.pc = to_unsigned16b(self.pc + pc_incr)
def _write_reg(self, reg, val):
self.mem[self.reg_loc(reg)] = val
def _do_a_class(self, opcode):
dst = (0x0700 & opcode) >> 8
opa = (0x00E0 & opcode) >> 5
opb = (0x001C & opcode) >> 2
typ = (0x0003 & opcode)
code = (0x0800 & opcode) >> 11
val_a = self.read_reg(opa)
val_b = self.read_reg(opb)
if code and (typ in range(3)):
if typ == 0x00:
raw = val_a + val_b
self.v = overflow(val_a, val_b, raw)
self._write_reg(dst, to_unsigned16b(raw))
elif typ == 0x01:
raw = val_a + to_unsigned16b(~val_b) + 1
self.v = overflow_sub(val_a, val_b, raw)
self._write_reg(dst, to_unsigned16b(raw))
else:
raw = val_a + to_unsigned16b(~val_b) + 1
self.v = overflow_sub(val_a, val_b, raw)
self.c = carry(raw)
elif not code and typ in range(3):
if typ == 0x00:
raw = val_a & val_b
elif typ == 0x01:
raw = val_a | val_b
else:
raw = val_a ^ val_b
self._write_reg(dst, raw)
else:
raise BonelessError("A-class opcode with typ == 0x03 is a reserved instruction.")
self.z = zero(raw)
self.s = sign(raw)
def _do_s_class(self, opcode):
dst = (0x0700 & opcode) >> 8
opa = (0x00E0 & opcode) >> 5
amt = (0x001E & opcode) >> 1
typ = (0x0001 & opcode)
code = (0x0800 & opcode) >> 11
if not code:
if typ == 0:
raw = self.read_reg(opa) << amt
else:
# in terms of bitshifts.
val = self.read_reg(opa)
hi_mask = ((1 << amt) - 1) << (15 - amt + 1)
lo_mask = (1 << (15 - amt + 1)) - 1
raw_hi = (hi_mask & val) >> (15 - amt + 1)
raw_lo = (lo_mask & val) << amt
raw = raw_hi | raw_lo
else:
# SRL
if typ == 0:
raw = self.read_reg(opa) >> amt
# SRA
else:
val = self.read_reg(opa)
sign_bit = sign(val)
u_shift = self.read_reg(opa) >> amt
if sign_bit:
sign_mask = ((1 << amt) - 1) << (15 - amt + 1)
raw = sign_mask | u_shift
else:
raw = u_shift
self._write_reg(dst, raw & 0x0FFFF)
self.z = zero(raw)
self.s = sign(raw)
def _do_m_class(self, opcode):
def to_signed5b(val):
if val > 16:
return val - 32
else:
return val
code = (0x1800 & opcode) >> 11
srcdst = (0x0700 & opcode) >> 8
adr = (0x00E0 & opcode) >> 5
imm = (0x001F & opcode)
# LD
if code == 0x00:
self._write_reg(srcdst, self.mem[self.read_reg(adr) + to_signed5b(imm)])
# ST
elif code == 0x01:
self.mem[self.read_reg(adr) + to_signed5b(imm)] = self.read_reg(srcdst)
# LDX
elif code == 0x02:
if self.io_callback:
val = self.io_callback(self.read_reg(adr) + to_signed5b(imm), None)
self._write_reg(srcdst, val)
else:
raise BonelessError("LDX instruction encountered but io_callback not set.")
# STX
else:
if self.io_callback:
val = self.read_reg(srcdst)
self.io_callback(self.read_reg(adr) + to_signed5b(imm), val)
else:
raise BonelessError("STX instruction encountered but io_callback not set.")
def _do_i_class(self, opcode):
def to_signed8b(val):
if val > 127:
return val - 256
else:
return val
opc = (0x3800 & opcode) >> 11
srcdst = (0x0700 & opcode) >> 8
imm = (0x00FF & opcode)
pc_incr = 1
# MOVL
if opc == 0x00:
val = imm
# MOVH
elif opc == 0x01:
val = (imm << 8)
# MOVA
elif opc == 0x02:
val = to_unsigned16b(self.pc + 1 + to_signed8b(imm))
# ADDI/SUBI
elif opc == 0x03:
op_a = self.read_reg(srcdst)
op_b = to_signed8b(imm)
# Flags will not be set correctly unless we convert
# op_b to unsigned to force a carry when op_a > op_b.
raw = op_a + to_unsigned16b(op_b)
val = to_unsigned16b(raw)
self.z = zero(raw)
self.s = sign(raw)
self.c = carry(raw)
self.v = overflow(op_a, op_b, raw)
# LDI
elif opc == 0x04:
val = self.mem[to_unsigned16b(self.pc + to_signed8b(imm))]
# STI
elif opc == 0x05:
self.mem[to_unsigned16b(self.pc + to_signed8b(imm))] = self.read_reg(srcdst)
# JAL
elif opc == 0x06:
val = to_unsigned16b(self.pc + 1)
pc_incr = 1 + to_signed8b(imm)
# JR
else:
raw_pc = self.read_reg(srcdst) + to_signed8b(imm)
pc_incr = to_unsigned16b(raw_pc - self.pc)
if opc not in [0x05, 0x07]:
self._write_reg(srcdst, val)
return pc_incr
def _do_c_class(self, opcode):
def to_signed11b(val):
if val > 1023:
return val - 2048
else:
return val
cond = (0x7000 & opcode) >> 12
flag = (0x0800 & opcode) >> 11
offs = (0x7FF & opcode)
# J
if cond == 0x00:
if flag:
raise BonelessError("Unconditional J with flag==1 is a reserved instruction.")
else:
cond_met = True
# JNZ/JNE, JZ/JE
elif cond == 0x01:
cond_met = (self.z == flag)
# JNS, JS
elif cond == 0x02:
cond_met = (self.s == flag)
# JNC/JULT, JC/JUGE
elif cond == 0x03:
cond_met = (self.c == flag)
# JNO, JO
elif cond == 0x04:
cond_met = (self.v == flag)
# JULE, JUGT
elif cond == 0x05:
cond_met = ((not self.c or self.z) == flag)
# JSGE, JSLT
elif cond == 0x06:
cond_met = ((self.s ^ self.v) == flag)
# JSGT, JSLE
elif cond == 0x07:
cond_met = (((self.s ^ self.v) or self.z) == flag)
if cond_met:
pc_incr = to_signed11b(offs) + 1
else:
pc_incr = 1
return pc_incr
class BonelessError(Exception):
pass
| true
| true
|
7904cb201f269a1de6261f9918c25ed6cc376a26
| 969
|
py
|
Python
|
python01/PythonDecorator.py
|
zhayangtao/HelloPython
|
e0e8b450afba1382f56411344ad54ef9910a5004
|
[
"Apache-2.0"
] | null | null | null |
python01/PythonDecorator.py
|
zhayangtao/HelloPython
|
e0e8b450afba1382f56411344ad54ef9910a5004
|
[
"Apache-2.0"
] | 1
|
2017-09-01T03:59:11.000Z
|
2017-09-01T03:59:11.000Z
|
python01/PythonDecorator.py
|
zhayangtao/HelloPython
|
e0e8b450afba1382f56411344ad54ef9910a5004
|
[
"Apache-2.0"
] | null | null | null |
def now():
print('2017-05-31')
now.__name__
f = now
f.__name__
# 定义记录log的装饰器
def log(func):
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now1():
print('2017-05-31')
# 如果 decorator 需要传入参数,需要编写一个返回 decorator 的高阶函数
def log1(text):
def decorator(func):
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
import functools
def log2(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
# 带参数
def log3(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
| 17.618182
| 53
| 0.562436
|
def now():
print('2017-05-31')
now.__name__
f = now
f.__name__
def log(func):
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now1():
print('2017-05-31')
def log1(text):
def decorator(func):
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
import functools
def log2(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
def log3(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
| true
| true
|
7904cb9e72caa6c8614489065df4491eae0d07f6
| 1,915
|
py
|
Python
|
app/global/train_cont.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/global/train_cont.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/global/train_cont.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | 2
|
2021-04-04T02:45:59.000Z
|
2022-03-19T09:41:39.000Z
|
from hydroDL import pathSMAP, master
import os
from hydroDL.data import dbCsv
# train for each cont
contLst = [
'Africa',
'Asia',
'Australia',
'Europe',
'NorthAmerica',
'SouthAmerica',
]
subsetLst = ['Globalv4f1_' + x for x in contLst]
subsetLst.append('Globalv4f1')
outLst = [x + '_v4f1_y1' for x in contLst]
outLst.append('Global_v4f1_y1')
caseLst = ['Forcing', 'Soilm']
cid = 0
for k in range(len(subsetLst)):
for case in caseLst:
if case == 'Forcing':
varLst = dbCsv.varForcingGlobal
else:
varLst = dbCsv.varSoilmGlobal
optData = master.default.update(
master.default.optDataSMAP,
rootDB=pathSMAP['DB_L3_Global'],
subset=subsetLst[k],
tRange=[20150401, 20160401],
varT=varLst)
optModel = master.default.optLstm
optLoss = master.default.optLossSigma
optTrain = master.default.optTrainSMAP
out = os.path.join(pathSMAP['Out_L3_Global'], outLst[k] + '_' + case)
masterDict = master.wrapMaster(out, optData, optModel, optLoss,
optTrain)
master.runTrain(masterDict, cudaID=cid % 3, screen=outLst[k])
cid = cid + 1
# master.train(masterDict)
# some of them failed and rerun
# master.runTrain(
# r'/mnt/sdb/rnnSMAP/Model_SMAPgrid/L3_Global/Africa_v4f1_y1_Forcing/',
# cudaID=1,
# screen='Africa_v4f1_y1_Forcing')
# master.runTrain(
# r'/mnt/sdb/rnnSMAP/Model_SMAPgrid/L3_Global/Asia_v4f1_y1_Soilm/',
# cudaID=0,
# screen='Asia_v4f1_y1_Soilm')
# master.runTrain(
# r'/mnt/sdb/rnnSMAP/Model_SMAPgrid/L3_Global/NorthAmerica_v4f1_y1_Soilm/',
# cudaID=1,
# screen='NorthAmerica_v4f1_y1_Soilm')
# master.runTrain(
# r'/mnt/sdb/rnnSMAP/Model_SMAPgrid/L3_Global/Global_v4f1_y1_Forcing/',
# cudaID=2,
# screen='Global_v4f1_y1_Forcing')
| 30.887097
| 79
| 0.644386
|
from hydroDL import pathSMAP, master
import os
from hydroDL.data import dbCsv
contLst = [
'Africa',
'Asia',
'Australia',
'Europe',
'NorthAmerica',
'SouthAmerica',
]
subsetLst = ['Globalv4f1_' + x for x in contLst]
subsetLst.append('Globalv4f1')
outLst = [x + '_v4f1_y1' for x in contLst]
outLst.append('Global_v4f1_y1')
caseLst = ['Forcing', 'Soilm']
cid = 0
for k in range(len(subsetLst)):
for case in caseLst:
if case == 'Forcing':
varLst = dbCsv.varForcingGlobal
else:
varLst = dbCsv.varSoilmGlobal
optData = master.default.update(
master.default.optDataSMAP,
rootDB=pathSMAP['DB_L3_Global'],
subset=subsetLst[k],
tRange=[20150401, 20160401],
varT=varLst)
optModel = master.default.optLstm
optLoss = master.default.optLossSigma
optTrain = master.default.optTrainSMAP
out = os.path.join(pathSMAP['Out_L3_Global'], outLst[k] + '_' + case)
masterDict = master.wrapMaster(out, optData, optModel, optLoss,
optTrain)
master.runTrain(masterDict, cudaID=cid % 3, screen=outLst[k])
cid = cid + 1
| true
| true
|
7904cc0d25e5c22180d8dd96475db6a70b610a23
| 13,084
|
py
|
Python
|
tests/profile/test_profile.py
|
williamjr/great_expectations
|
7e3af56476ea9966045172696af316b8537ff4c6
|
[
"Apache-2.0"
] | 2
|
2020-03-04T19:35:57.000Z
|
2020-04-13T21:06:02.000Z
|
tests/profile/test_profile.py
|
noncomposmentis/great_expectations
|
8155b1f20a88aa186745698792856f84d82f33ef
|
[
"Apache-2.0"
] | null | null | null |
tests/profile/test_profile.py
|
noncomposmentis/great_expectations
|
8155b1f20a88aa186745698792856f84d82f33ef
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import json
from collections import OrderedDict
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import BasicDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from great_expectations.dataset.pandas_dataset import PandasDataset
import great_expectations as ge
from ..test_utils import assertDeepAlmostEqual
from six import PY2
# Tests to write:
# test_cli_method_works -> test_cli
# test context-based profile methods
# test class-based profile methods
# noinspection PyPep8Naming
def test_DataSetProfiler_methods():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
assert DatasetProfiler.validate(1) == False
assert DatasetProfiler.validate(toy_dataset)
with pytest.raises(NotImplementedError) as e_info:
DatasetProfiler.profile(toy_dataset)
# noinspection PyPep8Naming
def test_ColumnsExistProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
expectations_config, evr_config = ColumnsExistProfiler.profile(toy_dataset)
assert len(expectations_config["expectations"]) == 1
assert expectations_config["expectations"][0]["expectation_type"] == "expect_column_to_exist"
assert expectations_config["expectations"][0]["kwargs"]["column"] == "x"
# noinspection PyPep8Naming
def test_BasicDatasetProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]}, data_asset_name="toy_dataset")
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) == 0
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
# print(json.dumps(expectations_config, indent=2))
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) > 0
assert expectations_config["data_asset_name"] == "toy_dataset"
assert "BasicDatasetProfiler" in expectations_config["meta"]
assert set(expectations_config["meta"]["BasicDatasetProfiler"].keys()) == {
"created_by", "created_at"
}
assert "notes" in expectations_config["meta"]
assert set(expectations_config["meta"]["notes"].keys()) == {"format", "content"}
assert "To add additional notes" in expectations_config["meta"]["notes"]["content"][0]
added_expectations = set()
for exp in expectations_config["expectations"]:
added_expectations.add(exp["expectation_type"])
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
expected_expectations = {
'expect_table_row_count_to_be_between',
'expect_table_columns_to_match_ordered_list',
'expect_column_values_to_be_in_set',
'expect_column_unique_value_count_to_be_between',
'expect_column_proportion_of_unique_values_to_be_between',
'expect_column_values_to_not_be_null',
'expect_column_values_to_be_in_type_list',
'expect_column_values_to_be_unique'}
assert expected_expectations.issubset(added_expectations)
def test_BasicDatasetProfiler_null_column():
"""
The profiler should determine that null columns are of null cardinality and of null type and
not to generate expectations specific to types and cardinality categories.
We verify this by running the basic profiler on a Pandas dataset with an empty column
and asserting the number of successful results for the empty columns.
"""
toy_dataset = PandasDataset({"x": [1, 2, 3], "y": [None, None, None]}, data_asset_name="toy_dataset")
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) == 0
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
# TODO: assert set - specific expectations
assert len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'y' and result['success']]) == 4
assert len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'y' and result['success']]) < \
len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'x' and result['success']])
def test_BasicDatasetProfiler_partially_null_column(dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"nulls" is the partially null column in the fixture dataset
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "nulls"])
def test_BasicDatasetProfiler_non_numeric_low_cardinality(non_numeric_low_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(non_numeric_low_card_dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "lowcardnonnum"])
def test_BasicDatasetProfiler_non_numeric_high_cardinality(non_numeric_high_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(non_numeric_high_card_dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "highcardnonnum"])
def test_BasicDatasetProfiler_numeric_high_cardinality(numeric_high_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(numeric_high_card_dataset)
assert set(["expect_column_to_exist", "expect_table_row_count_to_be_between", "expect_table_columns_to_match_ordered_list", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique"]) == set([expectation['expectation_type'] for expectation in expectations_config["expectations"]])
# noinspection PyPep8Naming
def test_BasicDatasetProfiler_with_context(empty_data_context, filesystem_csv_2):
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
not_so_empty_data_context.create_expectation_suite("my_datasource/f1", "default")
batch_kwargs = not_so_empty_data_context.yield_batch_kwargs("my_datasource/f1")
batch = not_so_empty_data_context.get_batch("my_datasource/f1", "default", batch_kwargs)
expectations_config, validation_results = BasicDatasetProfiler.profile(
batch)
# print(batch.get_batch_kwargs())
# print(json.dumps(expectations_config, indent=2))
assert expectations_config["data_asset_name"] == "my_datasource/default/f1"
assert expectations_config["expectation_suite_name"] == "default"
assert "BasicDatasetProfiler" in expectations_config["meta"]
assert set(expectations_config["meta"]["BasicDatasetProfiler"].keys()) == {
"created_by", "created_at", "batch_kwargs"
}
for exp in expectations_config["expectations"]:
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
assert validation_results["meta"]["data_asset_name"] == "my_datasource/default/f1"
assert set(validation_results["meta"].keys()) == {
"great_expectations.__version__", "data_asset_name", "expectation_suite_name", "run_id", "batch_kwargs",
"batch_id"
}
# noinspection PyPep8Naming
def test_context_profiler(empty_data_context, filesystem_csv_2):
"""This just validates that it's possible to profile using the datasource hook, and have
validation results available in the DataContext"""
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
assert not_so_empty_data_context.list_expectation_suite_keys() == []
not_so_empty_data_context.profile_datasource("my_datasource", profiler=BasicDatasetProfiler)
assert len(not_so_empty_data_context.list_expectation_suite_keys()) == 1
profiled_expectations = not_so_empty_data_context.get_expectation_suite('f1', "BasicDatasetProfiler")
print(json.dumps(profiled_expectations, indent=2))
for exp in profiled_expectations["expectations"]:
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
assert profiled_expectations["data_asset_name"] == "my_datasource/default/f1"
assert profiled_expectations["expectation_suite_name"] == "BasicDatasetProfiler"
assert "batch_kwargs" in profiled_expectations["meta"]["BasicDatasetProfiler"]
assert len(profiled_expectations["expectations"]) > 0
# noinspection PyPep8Naming
def test_BasicDatasetProfiler_on_titanic():
"""
A snapshot test for BasicDatasetProfiler.
We are running the profiler on the Titanic dataset
and comparing the EVRs to ones retrieved from a
previously stored file.
"""
df = ge.read_csv("./tests/test_sets/Titanic.csv")
suite, evrs = df.profile(BasicDatasetProfiler)
# Check to make sure BasicDatasetProfiler is adding meta.columns with a single "description" field for each column
print(json.dumps(suite["meta"], indent=2))
assert "columns" in suite["meta"]
for k,v in suite["meta"]["columns"].items():
assert v == {"description": ""}
# Note: the above already produces an EVR; rerunning isn't strictly necessary just for EVRs
evrs = df.validate(result_format="SUMMARY") # ["results"]
# with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'w+') as file:
# file.write(json.dumps(evrs, indent=2))
#
# with open('tests/render/fixtures/BasicDatasetProfiler_evrs.json', 'w+') as file:
# file.write(json.dumps(evrs, indent=2))
with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'r') as file:
expected_evrs = json.load(file, object_pairs_hook=OrderedDict)
expected_evrs.pop("meta")
evrs.pop("meta")
# We know that python 2 does not guarantee the order of value_counts, which causes a different
# order for items in the partial_unexpected_value_counts list
# Remove those before test.
for result in evrs["results"]:
if "partial_unexpected_counts" in result["result"]:
result["result"].pop("partial_unexpected_counts")
for result in expected_evrs["results"]:
if "partial_unexpected_counts" in result["result"]:
result["result"].pop("partial_unexpected_counts")
# DISABLE TEST IN PY2 BECAUSE OF ORDER ISSUE AND NEAR-EOL
if not PY2:
assertDeepAlmostEqual(expected_evrs, evrs)
| 48.639405
| 490
| 0.747249
|
import pytest
import json
from collections import OrderedDict
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import BasicDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from great_expectations.dataset.pandas_dataset import PandasDataset
import great_expectations as ge
from ..test_utils import assertDeepAlmostEqual
from six import PY2
def test_DataSetProfiler_methods():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
assert DatasetProfiler.validate(1) == False
assert DatasetProfiler.validate(toy_dataset)
with pytest.raises(NotImplementedError) as e_info:
DatasetProfiler.profile(toy_dataset)
def test_ColumnsExistProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
expectations_config, evr_config = ColumnsExistProfiler.profile(toy_dataset)
assert len(expectations_config["expectations"]) == 1
assert expectations_config["expectations"][0]["expectation_type"] == "expect_column_to_exist"
assert expectations_config["expectations"][0]["kwargs"]["column"] == "x"
def test_BasicDatasetProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]}, data_asset_name="toy_dataset")
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) == 0
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) > 0
assert expectations_config["data_asset_name"] == "toy_dataset"
assert "BasicDatasetProfiler" in expectations_config["meta"]
assert set(expectations_config["meta"]["BasicDatasetProfiler"].keys()) == {
"created_by", "created_at"
}
assert "notes" in expectations_config["meta"]
assert set(expectations_config["meta"]["notes"].keys()) == {"format", "content"}
assert "To add additional notes" in expectations_config["meta"]["notes"]["content"][0]
added_expectations = set()
for exp in expectations_config["expectations"]:
added_expectations.add(exp["expectation_type"])
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
expected_expectations = {
'expect_table_row_count_to_be_between',
'expect_table_columns_to_match_ordered_list',
'expect_column_values_to_be_in_set',
'expect_column_unique_value_count_to_be_between',
'expect_column_proportion_of_unique_values_to_be_between',
'expect_column_values_to_not_be_null',
'expect_column_values_to_be_in_type_list',
'expect_column_values_to_be_unique'}
assert expected_expectations.issubset(added_expectations)
def test_BasicDatasetProfiler_null_column():
toy_dataset = PandasDataset({"x": [1, 2, 3], "y": [None, None, None]}, data_asset_name="toy_dataset")
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) == 0
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
assert len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'y' and result['success']]) == 4
assert len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'y' and result['success']]) < \
len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'x' and result['success']])
def test_BasicDatasetProfiler_partially_null_column(dataset):
expectations_config, evr_config = BasicDatasetProfiler.profile(dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "nulls"])
def test_BasicDatasetProfiler_non_numeric_low_cardinality(non_numeric_low_card_dataset):
expectations_config, evr_config = BasicDatasetProfiler.profile(non_numeric_low_card_dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "lowcardnonnum"])
def test_BasicDatasetProfiler_non_numeric_high_cardinality(non_numeric_high_card_dataset):
expectations_config, evr_config = BasicDatasetProfiler.profile(non_numeric_high_card_dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "highcardnonnum"])
def test_BasicDatasetProfiler_numeric_high_cardinality(numeric_high_card_dataset):
expectations_config, evr_config = BasicDatasetProfiler.profile(numeric_high_card_dataset)
assert set(["expect_column_to_exist", "expect_table_row_count_to_be_between", "expect_table_columns_to_match_ordered_list", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique"]) == set([expectation['expectation_type'] for expectation in expectations_config["expectations"]])
def test_BasicDatasetProfiler_with_context(empty_data_context, filesystem_csv_2):
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
not_so_empty_data_context.create_expectation_suite("my_datasource/f1", "default")
batch_kwargs = not_so_empty_data_context.yield_batch_kwargs("my_datasource/f1")
batch = not_so_empty_data_context.get_batch("my_datasource/f1", "default", batch_kwargs)
expectations_config, validation_results = BasicDatasetProfiler.profile(
batch)
assert expectations_config["data_asset_name"] == "my_datasource/default/f1"
assert expectations_config["expectation_suite_name"] == "default"
assert "BasicDatasetProfiler" in expectations_config["meta"]
assert set(expectations_config["meta"]["BasicDatasetProfiler"].keys()) == {
"created_by", "created_at", "batch_kwargs"
}
for exp in expectations_config["expectations"]:
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
assert validation_results["meta"]["data_asset_name"] == "my_datasource/default/f1"
assert set(validation_results["meta"].keys()) == {
"great_expectations.__version__", "data_asset_name", "expectation_suite_name", "run_id", "batch_kwargs",
"batch_id"
}
def test_context_profiler(empty_data_context, filesystem_csv_2):
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
assert not_so_empty_data_context.list_expectation_suite_keys() == []
not_so_empty_data_context.profile_datasource("my_datasource", profiler=BasicDatasetProfiler)
assert len(not_so_empty_data_context.list_expectation_suite_keys()) == 1
profiled_expectations = not_so_empty_data_context.get_expectation_suite('f1', "BasicDatasetProfiler")
print(json.dumps(profiled_expectations, indent=2))
for exp in profiled_expectations["expectations"]:
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
assert profiled_expectations["data_asset_name"] == "my_datasource/default/f1"
assert profiled_expectations["expectation_suite_name"] == "BasicDatasetProfiler"
assert "batch_kwargs" in profiled_expectations["meta"]["BasicDatasetProfiler"]
assert len(profiled_expectations["expectations"]) > 0
def test_BasicDatasetProfiler_on_titanic():
df = ge.read_csv("./tests/test_sets/Titanic.csv")
suite, evrs = df.profile(BasicDatasetProfiler)
print(json.dumps(suite["meta"], indent=2))
assert "columns" in suite["meta"]
for k,v in suite["meta"]["columns"].items():
assert v == {"description": ""}
evrs = df.validate(result_format="SUMMARY") # ["results"]
# with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'w+') as file:
# file.write(json.dumps(evrs, indent=2))
#
# with open('tests/render/fixtures/BasicDatasetProfiler_evrs.json', 'w+') as file:
# file.write(json.dumps(evrs, indent=2))
with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'r') as file:
expected_evrs = json.load(file, object_pairs_hook=OrderedDict)
expected_evrs.pop("meta")
evrs.pop("meta")
# We know that python 2 does not guarantee the order of value_counts, which causes a different
# order for items in the partial_unexpected_value_counts list
# Remove those before test.
for result in evrs["results"]:
if "partial_unexpected_counts" in result["result"]:
result["result"].pop("partial_unexpected_counts")
for result in expected_evrs["results"]:
if "partial_unexpected_counts" in result["result"]:
result["result"].pop("partial_unexpected_counts")
# DISABLE TEST IN PY2 BECAUSE OF ORDER ISSUE AND NEAR-EOL
if not PY2:
assertDeepAlmostEqual(expected_evrs, evrs)
| true
| true
|
7904cd4143c870386bf7e3c0c60242b60fbe156d
| 9,525
|
py
|
Python
|
extra/release.py
|
jcassette/beets
|
10338c2a601c28289cd30debf2537b3523d95446
|
[
"MIT"
] | 1
|
2022-03-17T22:44:47.000Z
|
2022-03-17T22:44:47.000Z
|
extra/release.py
|
jcassette/beets
|
10338c2a601c28289cd30debf2537b3523d95446
|
[
"MIT"
] | 1
|
2022-03-10T00:41:36.000Z
|
2022-03-10T00:41:36.000Z
|
extra/release.py
|
jcassette/beets
|
10338c2a601c28289cd30debf2537b3523d95446
|
[
"MIT"
] | 1
|
2022-03-10T00:37:26.000Z
|
2022-03-10T00:37:26.000Z
|
#!/usr/bin/env python3
"""A utility script for automating the beets release process.
"""
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
"""A context manager that temporary changes the working directory.
"""
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
# Locations (filenames and patterns) of the version number.
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*u[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
GITHUB_USER = 'beetbox'
GITHUB_REPO = 'beets'
def bump_version(version):
"""Update the version number in setup.py, docs config, changelog,
and root module.
"""
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
# Replace the version each place where it lives.
for filename, locations in VERSION_LOCS:
# Read and transform the file.
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
# Check that this version is actually newer.
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
# Insert the new version.
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
# Normal line.
out_lines.append(line)
if not found:
print(f"No pattern found in {filename}")
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
# Generate bits to insert into changelog.
header_line = f'{version} (in development)'
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
# Insert into the right place.
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n') # First blank line.
contents = contents[:location] + header + contents[location:]
# Write back.
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
"""Bump the version number.
"""
bump_version(version)
def get_latest_changelog():
"""Extract the first section of the changelog.
"""
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
# Section boundary. Start or end.
if started:
# Remove last line, which is the header of the next
# section.
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
"""Use Pandoc to convert text from ReST to Markdown.
"""
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--wrap=none'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
# Fix up odd spacing in lists.
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
"""Get the latest changelog entry as hacked up Markdown.
"""
rst = get_latest_changelog()
# Replace plugin links with plugin names.
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
# References with text.
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
# Command links with command names.
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
# Bug numbers.
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
# Users.
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
# Convert with Pandoc.
md = rst2md(rst)
# Restore escaped issue numbers.
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
"""Get the most recent version's changelog as Markdown.
"""
print(changelog_as_markdown())
def get_version(index=0):
"""Read the current version from the changelog.
"""
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
"""Display the current version.
"""
print(get_version())
@release.command()
def datestamp():
"""Enter today's date as the release date in the changelog.
"""
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
# The header line.
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
# This is the line after the header. Rewrite the dashes.
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
@release.command()
def prep():
"""Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version.
"""
cur_version = get_version()
# Tag.
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
# Build.
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
# Generate Markdown changelog.
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
# Version number bump.
# FIXME It should be possible to specify this as an argument.
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version)
@release.command()
def publish():
"""Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI.
"""
version = get_version(1)
# Push to GitHub.
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
# Upload to PyPI.
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path])
@release.command()
def ghrelease():
"""Create a GitHub release using the `github-release` command-line
tool.
Reads the changelog to upload from `changelog.md`. Uploads the
tarball from the `dist` directory.
"""
version = get_version(1)
tag = 'v' + version
# Load the changelog.
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
# Create the release.
subprocess.check_call([
'github-release', 'release',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', f'{GITHUB_REPO} {version}',
'--description', cl_md,
])
# Attach the release tarball.
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call([
'github-release', 'upload',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', os.path.basename(tarball),
'--file', tarball,
])
if __name__ == '__main__':
release()
| 26.90678
| 77
| 0.526089
|
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*u[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
GITHUB_USER = 'beetbox'
GITHUB_REPO = 'beets'
def bump_version(version):
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
for filename, locations in VERSION_LOCS:
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
out_lines.append(line)
if not found:
print(f"No pattern found in {filename}")
with open(filename, 'w') as f:
f.write(''.join(out_lines))
header_line = f'{version} (in development)'
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n')
contents = contents[:location] + header + contents[location:]
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
bump_version(version)
def get_latest_changelog():
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
if started:
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--wrap=none'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
rst = get_latest_changelog()
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
md = rst2md(rst)
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
print(changelog_as_markdown())
def get_version(index=0):
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
print(get_version())
@release.command()
def datestamp():
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
@release.command()
def prep():
cur_version = get_version()
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version)
@release.command()
def publish():
version = get_version(1)
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path])
@release.command()
def ghrelease():
version = get_version(1)
tag = 'v' + version
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
subprocess.check_call([
'github-release', 'release',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', f'{GITHUB_REPO} {version}',
'--description', cl_md,
])
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call([
'github-release', 'upload',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', os.path.basename(tarball),
'--file', tarball,
])
if __name__ == '__main__':
release()
| true
| true
|
7904cd5db58cc10f04e8b8ed06a0c5b09d965fe6
| 544
|
py
|
Python
|
setup.py
|
akumor/python-rastervectoranalysis
|
33370f8d104d3b69ce4c689783818512e7f864f2
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
akumor/python-rastervectoranalysis
|
33370f8d104d3b69ce4c689783818512e7f864f2
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
akumor/python-rastervectoranalysis
|
33370f8d104d3b69ce4c689783818512e7f864f2
|
[
"Apache-2.0"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Raster Vector Analysis',
'author': 'Jan Kumor',
'url': 'http://github.com/akumor/python-rastervectoranalysis',
'download_url': 'http://github.com/akumor/python-rastervectoranalysis',
'author_email': 'akumor@users.noreply.github.com',
'version': '0.1',
'install_requires': [''],
'packages': ['rastervectoranalysis'],
'scripts': [],
'name': 'rastervectoranalysis'
}
setup(**config)
| 27.2
| 75
| 0.667279
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Raster Vector Analysis',
'author': 'Jan Kumor',
'url': 'http://github.com/akumor/python-rastervectoranalysis',
'download_url': 'http://github.com/akumor/python-rastervectoranalysis',
'author_email': 'akumor@users.noreply.github.com',
'version': '0.1',
'install_requires': [''],
'packages': ['rastervectoranalysis'],
'scripts': [],
'name': 'rastervectoranalysis'
}
setup(**config)
| true
| true
|
7904ce17f721204cfe9cd705d9bb971fa3408ec6
| 5,716
|
py
|
Python
|
sklearn/decomposition/_base.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 50,961
|
2015-01-01T06:06:31.000Z
|
2022-03-31T23:40:12.000Z
|
sklearn/decomposition/_base.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 17,065
|
2015-01-01T02:01:58.000Z
|
2022-03-31T23:48:34.000Z
|
sklearn/decomposition/_base.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 26,886
|
2015-01-01T00:59:27.000Z
|
2022-03-31T18:03:23.000Z
|
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..utils.validation import check_is_fitted
from abc import ABCMeta, abstractmethod
class _BasePCA(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array of shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
return precision
@abstractmethod
def fit(self, X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components.
"""
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of components.
Returns
-------
X_original array-like of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return (
np.dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,
)
+ self.mean_
)
else:
return np.dot(X, self.components_) + self.mean_
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
| 34.853659
| 88
| 0.618964
|
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..utils.validation import check_is_fitted
from abc import ABCMeta, abstractmethod
class _BasePCA(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
):
def get_covariance(self):
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[:: len(cov) + 1] += self.noise_variance_
return cov
def get_precision(self):
n_features = self.components_.shape[1]
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
return precision
@abstractmethod
def fit(self, X, y=None):
def transform(self, X):
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
if self.whiten:
return (
np.dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,
)
+ self.mean_
)
else:
return np.dot(X, self.components_) + self.mean_
@property
def _n_features_out(self):
return self.components_.shape[0]
| true
| true
|
7904ce86e89bc53ab3adb657e0b83f32a40e61e7
| 91,732
|
py
|
Python
|
storage/tests/unit/test_blob.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
storage/tests/unit/test_blob.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
storage/tests/unit/test_blob.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import io
import json
import os
import unittest
import mock
import six
from six.moves import http_client
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class Test_Blob(unittest.TestCase):
@staticmethod
def _make_one(*args, **kw):
from google.cloud.storage.blob import Blob
properties = kw.pop('properties', None)
blob = Blob(*args, **kw)
blob._properties = properties or {}
return blob
def test_ctor_wo_encryption_key(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
properties = {'key': 'value'}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertIs(blob.bucket, bucket)
self.assertEqual(blob.name, BLOB_NAME)
self.assertEqual(blob._properties, properties)
self.assertFalse(blob._acl.loaded)
self.assertIs(blob._acl.blob, blob)
self.assertEqual(blob._encryption_key, None)
def test_ctor_with_encoded_unicode(self):
blob_name = b'wet \xe2\x9b\xb5'
blob = self._make_one(blob_name, bucket=None)
unicode_name = u'wet \N{sailboat}'
self.assertNotIsInstance(blob.name, bytes)
self.assertIsInstance(blob.name, six.text_type)
self.assertEqual(blob.name, unicode_name)
def test_ctor_w_encryption_key(self):
KEY = b'01234567890123456789012345678901' # 32 bytes
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY)
self.assertEqual(blob._encryption_key, KEY)
def test_chunk_size_ctor(self):
from google.cloud.storage.blob import Blob
BLOB_NAME = 'blob-name'
BUCKET = object()
chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE
blob = self._make_one(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size)
self.assertEqual(blob._chunk_size, chunk_size)
def test_chunk_size_getter(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob.chunk_size)
VALUE = object()
blob._chunk_size = VALUE
self.assertIs(blob.chunk_size, VALUE)
def test_chunk_size_setter(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
blob.chunk_size = 20
self.assertEqual(blob._chunk_size, 20)
def test_chunk_size_setter_bad_value(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
with self.assertRaises(ValueError):
blob.chunk_size = 11
def test_acl_property(self):
from google.cloud.storage.acl import ObjectACL
fake_bucket = _Bucket()
blob = self._make_one(u'name', bucket=fake_bucket)
acl = blob.acl
self.assertIsInstance(acl, ObjectACL)
self.assertIs(acl, blob._acl)
def test_path_bad_bucket(self):
fake_bucket = object()
name = u'blob-name'
blob = self._make_one(name, bucket=fake_bucket)
self.assertRaises(AttributeError, getattr, blob, 'path')
def test_path_no_name(self):
bucket = _Bucket()
blob = self._make_one(u'', bucket=bucket)
self.assertRaises(ValueError, getattr, blob, 'path')
def test_path_normal(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/%s' % BLOB_NAME)
def test_path_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/parent%2Fchild')
def test_path_with_non_ascii(self):
blob_name = u'Caf\xe9'
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/Caf%C3%A9')
def test_public_url(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.public_url,
'https://storage.googleapis.com/name/%s' %
BLOB_NAME)
def test_public_url_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(
blob.public_url,
'https://storage.googleapis.com/name/parent%2Fchild')
def test_public_url_with_non_ascii(self):
blob_name = u'winter \N{snowman}'
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
expected_url = 'https://storage.googleapis.com/name/winter%20%E2%98%83'
self.assertEqual(blob.public_url, expected_url)
def _basic_generate_signed_url_helper(self, credentials=None):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_uri = blob.generate_signed_url(EXPIRATION,
credentials=credentials)
self.assertEqual(signed_uri, URI)
PATH = '/name/%s' % (BLOB_NAME,)
if credentials is None:
EXPECTED_ARGS = (_Connection.credentials,)
else:
EXPECTED_ARGS = (credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': PATH,
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_default_method(self):
self._basic_generate_signed_url_helper()
def test_generate_signed_url_w_content_type(self):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
CONTENT_TYPE = "text/html"
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_url = blob.generate_signed_url(EXPIRATION,
content_type=CONTENT_TYPE)
self.assertEqual(signed_url, URI)
PATH = '/name/%s' % (BLOB_NAME,)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': PATH,
'content_type': CONTENT_TYPE,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_credentials(self):
credentials = object()
self._basic_generate_signed_url_helper(credentials=credentials)
def test_generate_signed_url_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_url = blob.generate_signed_url(EXPIRATION)
self.assertEqual(signed_url, URI)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': '/name/parent%2Fchild',
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_method_arg(self):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_uri = blob.generate_signed_url(EXPIRATION, method='POST')
self.assertEqual(signed_uri, URI)
PATH = '/name/%s' % (BLOB_NAME,)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'POST',
'resource': PATH,
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_exists_miss(self):
NONESUCH = 'nonesuch'
not_found_response = ({'status': http_client.NOT_FOUND}, b'')
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(NONESUCH, bucket=bucket)
self.assertFalse(blob.exists())
def test_exists_hit(self):
BLOB_NAME = 'blob-name'
found_response = ({'status': http_client.OK}, b'')
connection = _Connection(found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
self.assertTrue(blob.exists())
def test_delete(self):
BLOB_NAME = 'blob-name'
not_found_response = ({'status': http_client.NOT_FOUND}, b'')
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
blob.delete()
self.assertFalse(blob.exists())
self.assertEqual(bucket._deleted, [(BLOB_NAME, None)])
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test__make_transport(self, fake_session_factory):
client = mock.Mock(spec=[u'_credentials'])
blob = self._make_one(u'blob-name', bucket=None)
transport = blob._make_transport(client)
self.assertIs(transport, fake_session_factory.return_value)
fake_session_factory.assert_called_once_with(client._credentials)
def test__get_download_url_with_media_link(self):
blob_name = 'something.txt'
bucket = mock.Mock(spec=[])
blob = self._make_one(blob_name, bucket=bucket)
media_link = 'http://test.invalid'
# Set the media link on the blob
blob._properties['mediaLink'] = media_link
download_url = blob._get_download_url()
self.assertEqual(download_url, media_link)
def test__get_download_url_on_the_fly(self):
blob_name = 'bzzz-fly.txt'
bucket = mock.Mock(path='/b/buhkit', spec=['path'])
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.media_link)
download_url = blob._get_download_url()
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'buhkit/o/bzzz-fly.txt?alt=media')
self.assertEqual(download_url, expected_url)
def test__get_download_url_on_the_fly_with_generation(self):
blob_name = 'pretend.txt'
bucket = mock.Mock(path='/b/fictional', spec=['path'])
blob = self._make_one(blob_name, bucket=bucket)
generation = 1493058489532987
# Set the media link on the blob
blob._properties['generation'] = str(generation)
self.assertIsNone(blob.media_link)
download_url = blob._get_download_url()
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'fictional/o/pretend.txt?alt=media&generation=1493058489532987')
self.assertEqual(download_url, expected_url)
@staticmethod
def _mock_requests_response(status_code, headers, content=b''):
import requests
response = requests.Response()
response.status_code = status_code
response.headers.update(headers)
response._content = content
response.request = requests.Request(
'POST', 'http://example.com').prepare()
return response
def _mock_download_transport(self):
fake_transport = mock.Mock(spec=['request'])
# Give the transport two fake responses.
chunk1_response = self._mock_requests_response(
http_client.PARTIAL_CONTENT,
{'content-length': '3', 'content-range': 'bytes 0-2/6'},
content=b'abc')
chunk2_response = self._mock_requests_response(
http_client.PARTIAL_CONTENT,
{'content-length': '3', 'content-range': 'bytes 3-5/6'},
content=b'def')
fake_transport.request.side_effect = [chunk1_response, chunk2_response]
return fake_transport
def _check_session_mocks(self, client, fake_session_factory,
expected_url, headers=None):
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
fake_transport = fake_session_factory.return_value
# Check that the transport was called exactly twice.
self.assertEqual(fake_transport.request.call_count, 2)
if headers is None:
headers = {}
# NOTE: bytes=0-2 never shows up because the mock was called with
# **MUTABLE** headers and it was mutated before the
# second request.
headers['range'] = 'bytes=3-5'
call = mock.call(
'GET', expected_url, data=None, headers=headers)
self.assertEqual(fake_transport.request.mock_calls, [call, call])
def test__do_download_simple(self):
blob_name = 'blob-name'
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Make sure this will not be chunked.
self.assertIsNone(blob.chunk_size)
transport = mock.Mock(spec=['request'])
transport.request.return_value = self._mock_requests_response(
http_client.OK,
{'content-length': '6', 'content-range': 'bytes 0-5/6'},
content=b'abcdef')
file_obj = io.BytesIO()
download_url = 'http://test.invalid'
headers = {}
blob._do_download(transport, file_obj, download_url, headers)
# Make sure the download was as expected.
self.assertEqual(file_obj.getvalue(), b'abcdef')
transport.request.assert_called_once_with(
'GET', download_url, data=None, headers=headers)
def test__do_download_chunked(self):
blob_name = 'blob-name'
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
transport = self._mock_download_transport()
file_obj = io.BytesIO()
download_url = 'http://test.invalid'
headers = {}
blob._do_download(transport, file_obj, download_url, headers)
# Make sure the download was as expected.
self.assertEqual(file_obj.getvalue(), b'abcdef')
# Check that the transport was called exactly twice.
self.assertEqual(transport.request.call_count, 2)
# ``headers`` was modified (in place) once for each API call.
self.assertEqual(headers, {'range': 'bytes=3-5'})
call = mock.call(
'GET', download_url, data=None, headers=headers)
self.assertEqual(transport.request.mock_calls, [call, call])
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_file_with_failure(self, fake_session_factory):
from google.cloud import exceptions
blob_name = 'blob-name'
transport = mock.Mock(spec=['request'])
bad_response_headers = {
'Content-Length': '9',
'Content-Type': 'text/html; charset=UTF-8',
}
transport.request.return_value = self._mock_requests_response(
http_client.NOT_FOUND, bad_response_headers, content=b'Not found')
fake_session_factory.return_value = transport
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Set the media link on the blob
blob._properties['mediaLink'] = 'http://test.invalid'
file_obj = io.BytesIO()
with self.assertRaises(exceptions.NotFound):
blob.download_to_file(file_obj)
self.assertEqual(file_obj.tell(), 0)
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
# Check that the transport was called once.
transport.request.assert_called_once_with(
'GET', blob.media_link, data=None, headers={})
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_file_wo_media_link(self, fake_session_factory):
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
self.assertEqual(file_obj.getvalue(), b'abcdef')
# Make sure the media link is still unknown.
self.assertIsNone(blob.media_link)
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'name/o/blob-name?alt=media')
self._check_session_mocks(client, fake_session_factory, expected_url)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def _download_to_file_helper(self, fake_session_factory, use_chunks=False):
blob_name = 'blob-name'
fake_transport = self._mock_download_transport()
fake_session_factory.return_value = fake_transport
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
if use_chunks:
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
else:
# Modify the response.
single_chunk_response = self._mock_requests_response(
http_client.OK,
{'content-length': '6', 'content-range': 'bytes 0-5/6'},
content=b'abcdef')
fake_transport.request.side_effect = [single_chunk_response]
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
self.assertEqual(file_obj.getvalue(), b'abcdef')
if use_chunks:
self._check_session_mocks(client, fake_session_factory, media_link)
else:
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
fake_transport.request.assert_called_once_with(
'GET', media_link, data=None, headers={})
def test_download_to_file_default(self):
self._download_to_file_helper()
def test_download_to_file_with_chunk_size(self):
self._download_to_file_helper(use_chunks=True)
def _download_to_filename_helper(self, fake_session_factory, updated=None):
import os
import time
from google.cloud._testing import _NamedTemporaryFile
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
if updated is not None:
properties['updated'] = updated
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
with open(temp.name, 'rb') as file_obj:
wrote = file_obj.read()
if updated is None:
self.assertIsNone(blob.updated)
else:
mtime = os.path.getmtime(temp.name)
updated_time = time.mktime(blob.updated.timetuple())
self.assertEqual(mtime, updated_time)
self.assertEqual(wrote, b'abcdef')
self._check_session_mocks(client, fake_session_factory, media_link)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename(self, fake_session_factory):
updated = '2014-12-06T13:13:50.690Z'
self._download_to_filename_helper(
fake_session_factory, updated=updated)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename_wo_updated(self, fake_session_factory):
self._download_to_filename_helper(fake_session_factory)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename_w_key(self, fake_session_factory):
import os
import time
from google.cloud._testing import _NamedTemporaryFile
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link,
'updated': '2014-12-06T13:13:50.690Z'}
key = b'aa426195405adee2c8081bb9e7e74b19'
blob = self._make_one(
blob_name, bucket=bucket, properties=properties, encryption_key=key)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
with open(temp.name, 'rb') as file_obj:
wrote = file_obj.read()
mtime = os.path.getmtime(temp.name)
updated_time = time.mktime(blob.updated.timetuple())
self.assertEqual(wrote, b'abcdef')
self.assertEqual(mtime, updated_time)
header_key_value = 'YWE0MjYxOTU0MDVhZGVlMmM4MDgxYmI5ZTdlNzRiMTk='
header_key_hash_value = 'V3Kwe46nKc3xLv96+iJ707YfZfFvlObta8TQcx2gpm0='
key_headers = {
'X-Goog-Encryption-Key-Sha256': header_key_hash_value,
'X-Goog-Encryption-Algorithm': 'AES256',
'X-Goog-Encryption-Key': header_key_value,
}
self._check_session_mocks(
client, fake_session_factory, media_link, headers=key_headers)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_as_string(self, fake_session_factory):
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
fetched = blob.download_as_string()
self.assertEqual(fetched, b'abcdef')
self._check_session_mocks(client, fake_session_factory, media_link)
def test__get_content_type_explicit(self):
blob = self._make_one(u'blob-name', bucket=None)
content_type = u'text/plain'
return_value = blob._get_content_type(content_type)
self.assertEqual(return_value, content_type)
def test__get_content_type_from_blob(self):
blob = self._make_one(u'blob-name', bucket=None)
blob.content_type = u'video/mp4'
return_value = blob._get_content_type(None)
self.assertEqual(return_value, blob.content_type)
def test__get_content_type_from_filename(self):
blob = self._make_one(u'blob-name', bucket=None)
return_value = blob._get_content_type(None, filename='archive.tar')
self.assertEqual(return_value, 'application/x-tar')
def test__get_content_type_default(self):
blob = self._make_one(u'blob-name', bucket=None)
return_value = blob._get_content_type(None)
self.assertEqual(return_value, u'application/octet-stream')
def test__get_writable_metadata_no_changes(self):
name = u'blob-name'
blob = self._make_one(name, bucket=None)
object_metadata = blob._get_writable_metadata()
expected = {'name': name}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_with_changes(self):
name = u'blob-name'
blob = self._make_one(name, bucket=None)
blob.storage_class = 'NEARLINE'
blob.cache_control = 'max-age=3600'
blob.metadata = {'color': 'red'}
object_metadata = blob._get_writable_metadata()
expected = {
'cacheControl': blob.cache_control,
'metadata': blob.metadata,
'name': name,
'storageClass': blob.storage_class,
}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_unwritable_field(self):
name = u'blob-name'
properties = {'updated': '2016-10-16T18:18:18.181Z'}
blob = self._make_one(name, bucket=None, properties=properties)
# Fake that `updated` is in changes.
blob._changes.add('updated')
object_metadata = blob._get_writable_metadata()
expected = {'name': name}
self.assertEqual(object_metadata, expected)
def test__get_upload_arguments(self):
name = u'blob-name'
key = b'[pXw@,p@@AfBfrR3x-2b2SCHR,.?YwRO'
blob = self._make_one(name, bucket=None, encryption_key=key)
blob.content_disposition = 'inline'
content_type = u'image/jpeg'
info = blob._get_upload_arguments(content_type)
headers, object_metadata, new_content_type = info
header_key_value = 'W3BYd0AscEBAQWZCZnJSM3gtMmIyU0NIUiwuP1l3Uk8='
header_key_hash_value = 'G0++dxF4q5rG4o9kE8gvEKn15RH6wLm0wXV1MgAlXOg='
expected_headers = {
'X-Goog-Encryption-Algorithm': 'AES256',
'X-Goog-Encryption-Key': header_key_value,
'X-Goog-Encryption-Key-Sha256': header_key_hash_value,
}
self.assertEqual(headers, expected_headers)
expected_metadata = {
'contentDisposition': blob.content_disposition,
'name': name,
}
self.assertEqual(object_metadata, expected_metadata)
self.assertEqual(new_content_type, content_type)
def _mock_transport(self, status_code, headers, content=b''):
fake_transport = mock.Mock(spec=['request'])
fake_response = self._mock_requests_response(
status_code, headers, content=content)
fake_transport.request.return_value = fake_response
return fake_transport
def _do_multipart_success(self, mock_get_boundary, size=None,
num_retries=None):
bucket = mock.Mock(path='/b/w00t', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
self.assertIsNone(blob.chunk_size)
# Create mocks to be checked for doing transport.
fake_transport = self._mock_transport(http_client.OK, {})
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments.
client = mock.sentinel.client
data = b'data here hear hier'
stream = io.BytesIO(data)
content_type = u'application/xml'
response = blob._do_multipart_upload(
client, stream, content_type, size, num_retries)
# Check the mocks and the returned value.
self.assertIs(response, fake_transport.request.return_value)
if size is None:
data_read = data
self.assertEqual(stream.tell(), len(data))
else:
data_read = data[:size]
self.assertEqual(stream.tell(), size)
blob._make_transport.assert_called_once_with(client)
mock_get_boundary.assert_called_once_with()
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=multipart')
payload = (
b'--==0==\r\n' +
b'content-type: application/json; charset=UTF-8\r\n\r\n' +
b'{"name": "blob-name"}\r\n' +
b'--==0==\r\n' +
b'content-type: application/xml\r\n\r\n' +
data_read +
b'\r\n--==0==--')
headers = {'content-type': b'multipart/related; boundary="==0=="'}
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=headers)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_no_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_with_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, size=10)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_with_retry(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, num_retries=8)
def test__do_multipart_upload_bad_size(self):
blob = self._make_one(u'blob-name', bucket=None)
data = b'data here hear hier'
stream = io.BytesIO(data)
size = 50
self.assertGreater(size, len(data))
with self.assertRaises(ValueError) as exc_info:
blob._do_multipart_upload(None, stream, None, size, None)
exc_contents = str(exc_info.exception)
self.assertIn(
'was specified but the file-like object only had', exc_contents)
self.assertEqual(stream.tell(), len(data))
def _initiate_resumable_helper(self, size=None, extra_headers=None,
chunk_size=None, num_retries=None):
from google.resumable_media.requests import ResumableUpload
bucket = mock.Mock(path='/b/whammy', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
blob.metadata = {'rook': 'takes knight'}
blob.chunk_size = 3 * blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
# Need to make sure **same** dict is used because ``json.dumps()``
# will depend on the hash order.
object_metadata = blob._get_writable_metadata()
blob._get_writable_metadata = mock.Mock(
return_value=object_metadata, spec=[])
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=hey-you'
response_headers = {'location': resumable_url}
fake_transport = self._mock_transport(
http_client.OK, response_headers)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments and call the method under test.
client = mock.sentinel.client
data = b'hello hallo halo hi-low'
stream = io.BytesIO(data)
content_type = u'text/plain'
upload, transport = blob._initiate_resumable_upload(
client, stream, content_type, size, num_retries,
extra_headers=extra_headers, chunk_size=chunk_size)
# Check the returned values.
self.assertIsInstance(upload, ResumableUpload)
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=resumable')
self.assertEqual(upload.upload_url, upload_url)
if extra_headers is None:
self.assertEqual(upload._headers, {})
else:
self.assertEqual(upload._headers, extra_headers)
self.assertIsNot(upload._headers, extra_headers)
self.assertFalse(upload.finished)
if chunk_size is None:
self.assertEqual(upload._chunk_size, blob.chunk_size)
else:
self.assertNotEqual(blob.chunk_size, chunk_size)
self.assertEqual(upload._chunk_size, chunk_size)
self.assertIs(upload._stream, stream)
if size is None:
self.assertIsNone(upload._total_bytes)
else:
self.assertEqual(upload._total_bytes, size)
self.assertEqual(upload._content_type, content_type)
self.assertEqual(upload.resumable_url, resumable_url)
retry_strategy = upload._retry_strategy
self.assertEqual(retry_strategy.max_sleep, 64.0)
if num_retries is None:
self.assertEqual(retry_strategy.max_cumulative_retry, 600.0)
self.assertIsNone(retry_strategy.max_retries)
else:
self.assertIsNone(retry_strategy.max_cumulative_retry)
self.assertEqual(retry_strategy.max_retries, num_retries)
self.assertIs(transport, fake_transport)
# Make sure we never read from the stream.
self.assertEqual(stream.tell(), 0)
# Check the mocks.
blob._get_writable_metadata.assert_called_once_with()
blob._make_transport.assert_called_once_with(client)
payload = json.dumps(object_metadata).encode('utf-8')
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-type': content_type,
}
if size is not None:
expected_headers['x-upload-content-length'] = str(size)
if extra_headers is not None:
expected_headers.update(extra_headers)
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=expected_headers)
def test__initiate_resumable_upload_no_size(self):
self._initiate_resumable_helper()
def test__initiate_resumable_upload_with_size(self):
self._initiate_resumable_helper(size=10000)
def test__initiate_resumable_upload_with_chunk_size(self):
one_mb = 1048576
self._initiate_resumable_helper(chunk_size=one_mb)
def test__initiate_resumable_upload_with_extra_headers(self):
extra_headers = {'origin': 'http://not-in-kansas-anymore.invalid'}
self._initiate_resumable_helper(extra_headers=extra_headers)
def test__initiate_resumable_upload_with_retry(self):
self._initiate_resumable_helper(num_retries=11)
def _make_resumable_transport(self, headers1, headers2,
headers3, total_bytes):
from google import resumable_media
fake_transport = mock.Mock(spec=['request'])
fake_response1 = self._mock_requests_response(
http_client.OK, headers1)
fake_response2 = self._mock_requests_response(
resumable_media.PERMANENT_REDIRECT, headers2)
json_body = '{{"size": "{:d}"}}'.format(total_bytes)
fake_response3 = self._mock_requests_response(
http_client.OK, headers3,
content=json_body.encode('utf-8'))
responses = [fake_response1, fake_response2, fake_response3]
fake_transport.request.side_effect = responses
return fake_transport, responses
@staticmethod
def _do_resumable_upload_call0(blob, content_type, size=None):
# First mock transport.request() does initiates upload.
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
blob.bucket.path +
'/o?uploadType=resumable')
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-type': content_type,
}
if size is not None:
expected_headers['x-upload-content-length'] = str(size)
payload = json.dumps({'name': blob.name}).encode('utf-8')
return mock.call(
'POST', upload_url, data=payload, headers=expected_headers)
@staticmethod
def _do_resumable_upload_call1(blob, content_type, data,
resumable_url, size=None):
# Second mock transport.request() does sends first chunk.
if size is None:
content_range = 'bytes 0-{:d}/*'.format(blob.chunk_size - 1)
else:
content_range = 'bytes 0-{:d}/{:d}'.format(
blob.chunk_size - 1, size)
expected_headers = {
'content-type': content_type,
'content-range': content_range,
}
payload = data[:blob.chunk_size]
return mock.call(
'PUT', resumable_url, data=payload, headers=expected_headers)
@staticmethod
def _do_resumable_upload_call2(blob, content_type, data,
resumable_url, total_bytes):
# Third mock transport.request() does sends last chunk.
content_range = 'bytes {:d}-{:d}/{:d}'.format(
blob.chunk_size, total_bytes - 1, total_bytes)
expected_headers = {
'content-type': content_type,
'content-range': content_range,
}
payload = data[blob.chunk_size:]
return mock.call(
'PUT', resumable_url, data=payload, headers=expected_headers)
def _do_resumable_helper(self, use_size=False, num_retries=None):
bucket = mock.Mock(path='/b/yesterday', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
blob.chunk_size = blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
# Data to be uploaded.
data = b'<html>' + (b'A' * blob.chunk_size) + b'</html>'
total_bytes = len(data)
if use_size:
size = total_bytes
else:
size = None
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=and-then-there-was-1'
headers1 = {'location': resumable_url}
headers2 = {'range': 'bytes=0-{:d}'.format(blob.chunk_size - 1)}
fake_transport, responses = self._make_resumable_transport(
headers1, headers2, {}, total_bytes)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments and call the method under test.
client = mock.sentinel.client
stream = io.BytesIO(data)
content_type = u'text/html'
response = blob._do_resumable_upload(
client, stream, content_type, size, num_retries)
# Check the returned values.
self.assertIs(response, responses[2])
self.assertEqual(stream.tell(), total_bytes)
# Check the mocks.
blob._make_transport.assert_called_once_with(client)
call0 = self._do_resumable_upload_call0(blob, content_type, size=size)
call1 = self._do_resumable_upload_call1(
blob, content_type, data, resumable_url, size=size)
call2 = self._do_resumable_upload_call2(
blob, content_type, data, resumable_url, total_bytes)
self.assertEqual(
fake_transport.request.mock_calls, [call0, call1, call2])
def test__do_resumable_upload_no_size(self):
self._do_resumable_helper()
def test__do_resumable_upload_with_size(self):
self._do_resumable_helper(use_size=True)
def test__do_resumable_upload_with_retry(self):
self._do_resumable_helper(num_retries=6)
def _do_upload_helper(self, chunk_size=None, num_retries=None):
blob = self._make_one(u'blob-name', bucket=None)
# Create a fake response.
response = mock.Mock(spec=[u'json'])
response.json.return_value = mock.sentinel.json
# Mock **both** helpers.
blob._do_multipart_upload = mock.Mock(return_value=response, spec=[])
blob._do_resumable_upload = mock.Mock(return_value=response, spec=[])
if chunk_size is None:
self.assertIsNone(blob.chunk_size)
else:
blob.chunk_size = chunk_size
self.assertIsNotNone(blob.chunk_size)
client = mock.sentinel.client
stream = mock.sentinel.stream
content_type = u'video/mp4'
size = 12345654321
# Make the request and check the mocks.
created_json = blob._do_upload(
client, stream, content_type, size, num_retries)
self.assertIs(created_json, mock.sentinel.json)
response.json.assert_called_once_with()
if chunk_size is None:
blob._do_multipart_upload.assert_called_once_with(
client, stream, content_type, size, num_retries)
blob._do_resumable_upload.assert_not_called()
else:
blob._do_multipart_upload.assert_not_called()
blob._do_resumable_upload.assert_called_once_with(
client, stream, content_type, size, num_retries)
def test__do_upload_without_chunk_size(self):
self._do_upload_helper()
def test__do_upload_with_chunk_size(self):
chunk_size = 1024 * 1024 * 1024 # 1GB
self._do_upload_helper(chunk_size=chunk_size)
def test__do_upload_with_retry(self):
self._do_upload_helper(num_retries=20)
def _upload_from_file_helper(self, side_effect=None, **kwargs):
from google.cloud._helpers import UTC
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'updated': '2017-01-01T09:09:09.081Z'}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
if side_effect is not None:
blob._do_upload.side_effect = side_effect
# Make sure `updated` is empty before the request.
self.assertIsNone(blob.updated)
data = b'data is here'
stream = io.BytesIO(data)
stream.seek(2) # Not at zero.
content_type = u'font/woff'
client = mock.sentinel.client
ret_val = blob.upload_from_file(
stream, size=len(data), content_type=content_type,
client=client, **kwargs)
# Check the response and side-effects.
self.assertIsNone(ret_val)
new_updated = datetime.datetime(
2017, 1, 1, 9, 9, 9, 81000, tzinfo=UTC)
self.assertEqual(blob.updated, new_updated)
# Check the mock.
num_retries = kwargs.get('num_retries')
blob._do_upload.assert_called_once_with(
client, stream, content_type, len(data), num_retries)
return stream
def test_upload_from_file_success(self):
stream = self._upload_from_file_helper()
assert stream.tell() == 2
@mock.patch('warnings.warn')
def test_upload_from_file_with_retries(self, mock_warn):
from google.cloud.storage import blob as blob_module
self._upload_from_file_helper(num_retries=20)
mock_warn.assert_called_once_with(
blob_module._NUM_RETRIES_MESSAGE, DeprecationWarning)
def test_upload_from_file_with_rewind(self):
stream = self._upload_from_file_helper(rewind=True)
assert stream.tell() == 0
def test_upload_from_file_failure(self):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = b'Someone is already in this spot.'
response = requests.Response()
response._content = message
response.status_code = http_client.CONFLICT
response.request = requests.Request(
'POST', 'http://example.com').prepare()
side_effect = InvalidResponse(response)
with self.assertRaises(exceptions.Conflict) as exc_info:
self._upload_from_file_helper(side_effect=side_effect)
self.assertIn(message.decode('utf-8'), exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def _do_upload_mock_call_helper(self, blob, client, content_type, size):
self.assertEqual(blob._do_upload.call_count, 1)
mock_call = blob._do_upload.mock_calls[0]
call_name, pos_args, kwargs = mock_call
self.assertEqual(call_name, '')
self.assertEqual(len(pos_args), 5)
self.assertEqual(pos_args[0], client)
self.assertEqual(pos_args[2], content_type)
self.assertEqual(pos_args[3], size)
self.assertIsNone(pos_args[4]) # num_retries
self.assertEqual(kwargs, {})
return pos_args[1]
def test_upload_from_filename(self):
from google.cloud._testing import _NamedTemporaryFile
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'metadata': {'mint': 'ice-cream'}}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.metadata)
data = b'soooo much data'
content_type = u'image/svg+xml'
client = mock.sentinel.client
with _NamedTemporaryFile() as temp:
with open(temp.name, 'wb') as file_obj:
file_obj.write(data)
ret_val = blob.upload_from_filename(
temp.name, content_type=content_type, client=client)
# Check the response and side-effects.
self.assertIsNone(ret_val)
self.assertEqual(blob.metadata, created_json['metadata'])
# Check the mock.
stream = self._do_upload_mock_call_helper(
blob, client, content_type, len(data))
self.assertTrue(stream.closed)
self.assertEqual(stream.mode, 'rb')
self.assertEqual(stream.name, temp.name)
def _upload_from_string_helper(self, data, **kwargs):
from google.cloud._helpers import _to_bytes
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'componentCount': '5'}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.component_count)
client = mock.sentinel.client
ret_val = blob.upload_from_string(data, client=client, **kwargs)
# Check the response and side-effects.
self.assertIsNone(ret_val)
self.assertEqual(blob.component_count, 5)
# Check the mock.
payload = _to_bytes(data, encoding='utf-8')
stream = self._do_upload_mock_call_helper(
blob, client, 'text/plain', len(payload))
self.assertIsInstance(stream, io.BytesIO)
self.assertEqual(stream.getvalue(), payload)
def test_upload_from_string_w_bytes(self):
data = b'XB]jb\xb8tad\xe0'
self._upload_from_string_helper(data)
def test_upload_from_string_w_text(self):
data = u'\N{snowman} \N{sailboat}'
self._upload_from_string_helper(data)
def _create_resumable_upload_session_helper(self, origin=None,
side_effect=None):
bucket = mock.Mock(path='/b/alex-trebek', spec=[u'path'])
blob = self._make_one('blob-name', bucket=bucket)
chunk_size = 99 * blob._CHUNK_SIZE_MULTIPLE
blob.chunk_size = chunk_size
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=clean-up-everybody'
response_headers = {'location': resumable_url}
fake_transport = self._mock_transport(
http_client.OK, response_headers)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
if side_effect is not None:
fake_transport.request.side_effect = side_effect
# Create some mock arguments and call the method under test.
content_type = u'text/plain'
size = 10000
client = mock.sentinel.client
new_url = blob.create_resumable_upload_session(
content_type=content_type, size=size,
origin=origin, client=client)
# Check the returned value and (lack of) side-effect.
self.assertEqual(new_url, resumable_url)
self.assertEqual(blob.chunk_size, chunk_size)
# Check the mocks.
blob._make_transport.assert_called_once_with(client)
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=resumable')
payload = b'{"name": "blob-name"}'
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-length': str(size),
'x-upload-content-type': content_type,
}
if origin is not None:
expected_headers['Origin'] = origin
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=expected_headers)
def test_create_resumable_upload_session(self):
self._create_resumable_upload_session_helper()
def test_create_resumable_upload_session_with_origin(self):
self._create_resumable_upload_session_helper(
origin='http://google.com')
def test_create_resumable_upload_session_with_failure(self):
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = b'5-oh-3 woe is me.'
response = self._mock_requests_response(
content=message, status_code=http_client.SERVICE_UNAVAILABLE,
headers={})
side_effect = InvalidResponse(response)
with self.assertRaises(exceptions.ServiceUnavailable) as exc_info:
self._create_resumable_upload_session_helper(
side_effect=side_effect)
self.assertIn(message.decode('utf-8'), exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def test_get_iam_policy(self):
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.cloud.iam import Policy
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
ETAG = 'DEADBEEF'
VERSION = 17
OWNER1 = 'user:phred@example.com'
OWNER2 = 'group:cloud-logs@google.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
RETURNED = {
'resourceId': PATH,
'etag': ETAG,
'version': VERSION,
'bindings': [
{'role': STORAGE_OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': STORAGE_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': STORAGE_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
],
}
after = ({'status': http_client.OK}, RETURNED)
EXPECTED = {
binding['role']: set(binding['members'])
for binding in RETURNED['bindings']}
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
policy = blob.get_iam_policy()
self.assertIsInstance(policy, Policy)
self.assertEqual(policy.etag, RETURNED['etag'])
self.assertEqual(policy.version, RETURNED['version'])
self.assertEqual(dict(policy), EXPECTED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '%s/iam' % (PATH,))
def test_set_iam_policy(self):
import operator
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.cloud.iam import Policy
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
ETAG = 'DEADBEEF'
VERSION = 17
OWNER1 = 'user:phred@example.com'
OWNER2 = 'group:cloud-logs@google.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
BINDINGS = [
{'role': STORAGE_OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': STORAGE_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': STORAGE_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
]
RETURNED = {
'etag': ETAG,
'version': VERSION,
'bindings': BINDINGS,
}
after = ({'status': http_client.OK}, RETURNED)
policy = Policy()
for binding in BINDINGS:
policy[binding['role']] = binding['members']
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
returned = blob.set_iam_policy(policy)
self.assertEqual(returned.etag, ETAG)
self.assertEqual(returned.version, VERSION)
self.assertEqual(dict(returned), dict(policy))
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PUT')
self.assertEqual(kw[0]['path'], '%s/iam' % (PATH,))
sent = kw[0]['data']
self.assertEqual(sent['resourceId'], PATH)
self.assertEqual(len(sent['bindings']), len(BINDINGS))
key = operator.itemgetter('role')
for found, expected in zip(
sorted(sent['bindings'], key=key),
sorted(BINDINGS, key=key)):
self.assertEqual(found['role'], expected['role'])
self.assertEqual(
sorted(found['members']), sorted(expected['members']))
def test_test_iam_permissions(self):
from google.cloud.storage.iam import STORAGE_OBJECTS_LIST
from google.cloud.storage.iam import STORAGE_BUCKETS_GET
from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
PERMISSIONS = [
STORAGE_OBJECTS_LIST,
STORAGE_BUCKETS_GET,
STORAGE_BUCKETS_UPDATE,
]
ALLOWED = PERMISSIONS[1:]
RETURNED = {'permissions': ALLOWED}
after = ({'status': http_client.OK}, RETURNED)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
allowed = blob.test_iam_permissions(PERMISSIONS)
self.assertEqual(allowed, ALLOWED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '%s/iam/testPermissions' % (PATH,))
self.assertEqual(kw[0]['query_params'], {'permissions': PERMISSIONS})
def test_make_public(self):
from google.cloud.storage.acl import _ACLEntity
BLOB_NAME = 'blob-name'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after = ({'status': http_client.OK}, {'acl': permissive})
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.acl.loaded = True
blob.make_public()
self.assertEqual(list(blob.acl), permissive)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME)
self.assertEqual(kw[0]['data'], {'acl': permissive})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_compose_wo_content_type_set(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
with self.assertRaises(ValueError):
destination.compose(sources=[source_1, source_2])
def test_compose_minimal(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
RESOURCE = {
'etag': 'DEADBEEF'
}
after = ({'status': http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = 'text/plain'
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, 'DEADBEEF')
SENT = {
'sourceObjects': [
{'name': source_1.name},
{'name': source_2.name},
],
'destination': {
'contentType': 'text/plain',
},
}
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], '/b/name/o/%s/compose' % DESTINATION)
self.assertEqual(kw[0]['data'], SENT)
def test_compose_w_additional_property_changes(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
RESOURCE = {
'etag': 'DEADBEEF'
}
after = ({'status': http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = 'text/plain'
destination.content_language = 'en-US'
destination.metadata = {'my-key': 'my-value'}
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, 'DEADBEEF')
SENT = {
'sourceObjects': [
{'name': source_1.name},
{'name': source_2.name},
],
'destination': {
'contentType': 'text/plain',
'contentLanguage': 'en-US',
'metadata': {
'my-key': 'my-value',
}
},
}
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], '/b/name/o/%s/compose' % DESTINATION)
self.assertEqual(kw[0]['data'], SENT)
def test_rewrite_response_without_resource(self):
SOURCE_BLOB = 'source'
DEST_BLOB = 'dest'
DEST_BUCKET = 'other-bucket'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 33,
'objectSize': 42,
'done': False,
'rewriteToken': TOKEN,
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
def test_rewrite_other_bucket_other_name_no_encryption_partial(self):
SOURCE_BLOB = 'source'
DEST_BLOB = 'dest'
DEST_BUCKET = 'other-bucket'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 33,
'objectSize': 42,
'done': False,
'rewriteToken': TOKEN,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/%s/o/%s' % (
SOURCE_BLOB, DEST_BUCKET, DEST_BLOB)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertNotIn('X-Goog-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Encryption-Key', headers)
self.assertNotIn('X-Goog-Encryption-Key-Sha256', headers)
def test_rewrite_same_name_no_old_key_new_key_done(self):
import base64
import hashlib
KEY = b'01234567890123456789012345678901' # 32 bytes
KEY_B64 = base64.b64encode(KEY).rstrip().decode('ascii')
KEY_HASH = hashlib.sha256(KEY).digest()
KEY_HASH_B64 = base64.b64encode(KEY_HASH).rstrip().decode('ascii')
BLOB_NAME = 'blob'
RESPONSE = {
'totalBytesRewritten': 42,
'objectSize': 42,
'done': True,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
plain = self._make_one(BLOB_NAME, bucket=bucket)
encrypted = self._make_one(BLOB_NAME, bucket=bucket,
encryption_key=KEY)
token, rewritten, size = encrypted.rewrite(plain)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertEqual(headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(headers['X-Goog-Encryption-Key'], KEY_B64)
self.assertEqual(headers['X-Goog-Encryption-Key-Sha256'], KEY_HASH_B64)
def test_rewrite_same_name_no_key_new_key_w_token(self):
import base64
import hashlib
SOURCE_KEY = b'01234567890123456789012345678901' # 32 bytes
SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode('ascii')
SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest()
SOURCE_KEY_HASH_B64 = base64.b64encode(
SOURCE_KEY_HASH).rstrip().decode('ascii')
DEST_KEY = b'90123456789012345678901234567890' # 32 bytes
DEST_KEY_B64 = base64.b64encode(DEST_KEY).rstrip().decode('ascii')
DEST_KEY_HASH = hashlib.sha256(DEST_KEY).digest()
DEST_KEY_HASH_B64 = base64.b64encode(
DEST_KEY_HASH).rstrip().decode('ascii')
BLOB_NAME = 'blob'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 42,
'objectSize': 42,
'done': True,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
source = self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY)
dest = self._make_one(BLOB_NAME, bucket=bucket,
encryption_key=DEST_KEY)
token, rewritten, size = dest.rewrite(source, token=TOKEN)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {'rewriteToken': TOKEN})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key'], SOURCE_KEY_B64)
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key-Sha256'],
SOURCE_KEY_HASH_B64)
self.assertEqual(
headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Encryption-Key'], DEST_KEY_B64)
self.assertEqual(
headers['X-Goog-Encryption-Key-Sha256'], DEST_KEY_HASH_B64)
def test_update_storage_class_invalid(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
with self.assertRaises(ValueError):
blob.update_storage_class(u'BOGUS')
def test_update_storage_class_wo_encryption_key(self):
BLOB_NAME = 'blob-name'
STORAGE_CLASS = u'NEARLINE'
RESPONSE = {
'resource': {'storageClass': STORAGE_CLASS},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.update_storage_class('NEARLINE')
self.assertEqual(blob.storage_class, 'NEARLINE')
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertNotIn('query_params', kw[0])
SENT = {'storageClass': STORAGE_CLASS}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
# Blob has no key, and therefore the relevant headers are not sent.
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertNotIn('X-Goog-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Encryption-Key', headers)
self.assertNotIn('X-Goog-Encryption-Key-Sha256', headers)
def test_update_storage_class_w_encryption_key(self):
import base64
import hashlib
BLOB_NAME = 'blob-name'
BLOB_KEY = b'01234567890123456789012345678901' # 32 bytes
BLOB_KEY_B64 = base64.b64encode(BLOB_KEY).rstrip().decode('ascii')
BLOB_KEY_HASH = hashlib.sha256(BLOB_KEY).digest()
BLOB_KEY_HASH_B64 = base64.b64encode(
BLOB_KEY_HASH).rstrip().decode('ascii')
STORAGE_CLASS = u'NEARLINE'
RESPONSE = {
'resource': {'storageClass': STORAGE_CLASS},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=BLOB_KEY)
blob.update_storage_class('NEARLINE')
self.assertEqual(blob.storage_class, 'NEARLINE')
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertNotIn('query_params', kw[0])
SENT = {'storageClass': STORAGE_CLASS}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
# Blob has key, and therefore the relevant headers are sent.
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key'], BLOB_KEY_B64)
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key-Sha256'],
BLOB_KEY_HASH_B64)
self.assertEqual(
headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Encryption-Key'], BLOB_KEY_B64)
self.assertEqual(
headers['X-Goog-Encryption-Key-Sha256'], BLOB_KEY_HASH_B64)
def test_cache_control_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CACHE_CONTROL = 'no-cache'
properties = {'cacheControl': CACHE_CONTROL}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_cache_control_setter(self):
BLOB_NAME = 'blob-name'
CACHE_CONTROL = 'no-cache'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.cache_control)
blob.cache_control = CACHE_CONTROL
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_component_count(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'componentCount': COMPONENT_COUNT})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_component_count_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.component_count)
def test_component_count_string_val(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one(
'blob-name', bucket=BUCKET,
properties={'componentCount': str(COMPONENT_COUNT)})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_content_disposition_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
properties = {'contentDisposition': CONTENT_DISPOSITION}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_disposition_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_disposition)
blob.content_disposition = CONTENT_DISPOSITION
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_encoding_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_ENCODING = 'gzip'
properties = {'contentEncoding': CONTENT_ENCODING}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_encoding_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_ENCODING = 'gzip'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_encoding)
blob.content_encoding = CONTENT_ENCODING
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_language_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_LANGUAGE = 'pt-BR'
properties = {'contentLanguage': CONTENT_LANGUAGE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_language_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_LANGUAGE = 'pt-BR'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_language)
blob.content_language = CONTENT_LANGUAGE
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_type_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_TYPE = 'image/jpeg'
properties = {'contentType': CONTENT_TYPE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_content_type_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_TYPE = 'image/jpeg'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_type)
blob.content_type = CONTENT_TYPE
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_crc32c_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CRC32C = 'DEADBEEF'
properties = {'crc32c': CRC32C}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.crc32c, CRC32C)
def test_crc32c_setter(self):
BLOB_NAME = 'blob-name'
CRC32C = 'DEADBEEF'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.crc32c)
blob.crc32c = CRC32C
self.assertEqual(blob.crc32c, CRC32C)
def test_etag(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ETAG = 'ETAG'
properties = {'etag': ETAG}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.etag, ETAG)
def test_generation(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'generation': GENERATION})
self.assertEqual(blob.generation, GENERATION)
def test_generation_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.generation)
def test_generation_string_val(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'generation': str(GENERATION)})
self.assertEqual(blob.generation, GENERATION)
def test_id(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ID = 'ID'
properties = {'id': ID}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.id, ID)
def test_md5_hash_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MD5_HASH = 'DEADBEEF'
properties = {'md5Hash': MD5_HASH}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_md5_hash_setter(self):
BLOB_NAME = 'blob-name'
MD5_HASH = 'DEADBEEF'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.md5_hash)
blob.md5_hash = MD5_HASH
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_media_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.media_link, MEDIA_LINK)
def test_metadata_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
METADATA = {'foo': 'Foo'}
properties = {'metadata': METADATA}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.metadata, METADATA)
def test_metadata_setter(self):
BLOB_NAME = 'blob-name'
METADATA = {'foo': 'Foo'}
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.metadata)
blob.metadata = METADATA
self.assertEqual(blob.metadata, METADATA)
def test_metageneration(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'metageneration': METAGENERATION})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_metageneration_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.metageneration)
def test_metageneration_string_val(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one(
'blob-name', bucket=BUCKET,
properties={'metageneration': str(METAGENERATION)})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_owner(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'}
properties = {'owner': OWNER}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
owner = blob.owner
self.assertEqual(owner['entity'], 'project-owner-12345')
self.assertEqual(owner['entityId'], '23456')
def test_self_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
SELF_LINK = 'http://example.com/self/'
properties = {'selfLink': SELF_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.self_link, SELF_LINK)
def test_size(self):
BUCKET = object()
SIZE = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'size': SIZE})
self.assertEqual(blob.size, SIZE)
def test_size_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.size)
def test_size_string_val(self):
BUCKET = object()
SIZE = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'size': str(SIZE)})
self.assertEqual(blob.size, SIZE)
def test_storage_class_getter(self):
blob_name = 'blob-name'
bucket = _Bucket()
storage_class = 'MULTI_REGIONAL'
properties = {'storageClass': storage_class}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
self.assertEqual(blob.storage_class, storage_class)
def test_storage_class_setter(self):
blob_name = 'blob-name'
bucket = _Bucket()
storage_class = 'COLDLINE'
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.storage_class)
blob.storage_class = storage_class
self.assertEqual(blob.storage_class, storage_class)
self.assertEqual(blob._properties, {'storageClass': storage_class})
def test_time_deleted(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeDeleted': TIME_DELETED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_deleted, TIMESTAMP)
def test_time_deleted_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.time_deleted)
def test_time_created(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeCreated': TIME_CREATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_created, TIMESTAMP)
def test_time_created_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.time_created)
def test_updated(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'updated': UPDATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.updated, TIMESTAMP)
def test_updated_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.updated)
class Test__quote(unittest.TestCase):
@staticmethod
def _call_fut(value):
from google.cloud.storage.blob import _quote
return _quote(value)
def test_bytes(self):
quoted = self._call_fut(b'\xDE\xAD\xBE\xEF')
self.assertEqual(quoted, '%DE%AD%BE%EF')
def test_unicode(self):
helicopter = u'\U0001f681'
quoted = self._call_fut(helicopter)
self.assertEqual(quoted, '%F0%9F%9A%81')
def test_bad_type(self):
with self.assertRaises(TypeError):
self._call_fut(None)
class Test__maybe_rewind(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _maybe_rewind
return _maybe_rewind(*args, **kwargs)
def test_default(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_not_rewind(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream, rewind=False)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_rewind(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream, rewind=True)
self.assertIsNone(ret_val)
stream.seek.assert_called_once_with(0, os.SEEK_SET)
class Test__raise_from_invalid_response(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _raise_from_invalid_response
return _raise_from_invalid_response(*args, **kwargs)
def _helper(self, message, **kwargs):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
response = requests.Response()
response.request = requests.Request(
'GET', 'http://example.com').prepare()
response.status_code = http_client.BAD_REQUEST
response._content = message
error = InvalidResponse(response)
with self.assertRaises(exceptions.BadRequest) as exc_info:
self._call_fut(error, **kwargs)
return exc_info
def test_default(self):
message = b'Failure'
exc_info = self._helper(message)
message_str = message.decode('utf-8')
expected = 'GET http://example.com/: {}'.format(message_str)
self.assertEqual(exc_info.exception.message, expected)
self.assertEqual(exc_info.exception.errors, [])
class _Connection(object):
API_BASE_URL = 'http://example.com'
USER_AGENT = 'testing 1.2.3'
credentials = object()
def __init__(self, *responses):
self._responses = responses[:]
self._requested = []
self._signed = []
def _respond(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
info, content = self._respond(**kw)
if info.get('status') == http_client.NOT_FOUND:
raise NotFound(info)
return content
class _Bucket(object):
def __init__(self, client=None, name='name'):
if client is None:
connection = _Connection()
client = _Client(connection)
self.client = client
self._blobs = {}
self._copied = []
self._deleted = []
self.name = name
self.path = '/b/' + name
def delete_blob(self, blob_name, client=None):
del self._blobs[blob_name]
self._deleted.append((blob_name, client))
class _Signer(object):
def __init__(self):
self._signed = []
def __call__(self, *args, **kwargs):
self._signed.append((args, kwargs))
return ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=%s' % kwargs.get('expiration'))
class _Client(object):
def __init__(self, connection):
self._base_connection = connection
@property
def _connection(self):
return self._base_connection
@property
def _credentials(self):
return self._base_connection.credentials
| 39.369957
| 80
| 0.637804
|
import datetime
import io
import json
import os
import unittest
import mock
import six
from six.moves import http_client
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class Test_Blob(unittest.TestCase):
@staticmethod
def _make_one(*args, **kw):
from google.cloud.storage.blob import Blob
properties = kw.pop('properties', None)
blob = Blob(*args, **kw)
blob._properties = properties or {}
return blob
def test_ctor_wo_encryption_key(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
properties = {'key': 'value'}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertIs(blob.bucket, bucket)
self.assertEqual(blob.name, BLOB_NAME)
self.assertEqual(blob._properties, properties)
self.assertFalse(blob._acl.loaded)
self.assertIs(blob._acl.blob, blob)
self.assertEqual(blob._encryption_key, None)
def test_ctor_with_encoded_unicode(self):
blob_name = b'wet \xe2\x9b\xb5'
blob = self._make_one(blob_name, bucket=None)
unicode_name = u'wet \N{sailboat}'
self.assertNotIsInstance(blob.name, bytes)
self.assertIsInstance(blob.name, six.text_type)
self.assertEqual(blob.name, unicode_name)
def test_ctor_w_encryption_key(self):
KEY = b'01234567890123456789012345678901'
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY)
self.assertEqual(blob._encryption_key, KEY)
def test_chunk_size_ctor(self):
from google.cloud.storage.blob import Blob
BLOB_NAME = 'blob-name'
BUCKET = object()
chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE
blob = self._make_one(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size)
self.assertEqual(blob._chunk_size, chunk_size)
def test_chunk_size_getter(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob.chunk_size)
VALUE = object()
blob._chunk_size = VALUE
self.assertIs(blob.chunk_size, VALUE)
def test_chunk_size_setter(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
blob.chunk_size = 20
self.assertEqual(blob._chunk_size, 20)
def test_chunk_size_setter_bad_value(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
with self.assertRaises(ValueError):
blob.chunk_size = 11
def test_acl_property(self):
from google.cloud.storage.acl import ObjectACL
fake_bucket = _Bucket()
blob = self._make_one(u'name', bucket=fake_bucket)
acl = blob.acl
self.assertIsInstance(acl, ObjectACL)
self.assertIs(acl, blob._acl)
def test_path_bad_bucket(self):
fake_bucket = object()
name = u'blob-name'
blob = self._make_one(name, bucket=fake_bucket)
self.assertRaises(AttributeError, getattr, blob, 'path')
def test_path_no_name(self):
bucket = _Bucket()
blob = self._make_one(u'', bucket=bucket)
self.assertRaises(ValueError, getattr, blob, 'path')
def test_path_normal(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/%s' % BLOB_NAME)
def test_path_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/parent%2Fchild')
def test_path_with_non_ascii(self):
blob_name = u'Caf\xe9'
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/Caf%C3%A9')
def test_public_url(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.public_url,
'https://storage.googleapis.com/name/%s' %
BLOB_NAME)
def test_public_url_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(
blob.public_url,
'https://storage.googleapis.com/name/parent%2Fchild')
def test_public_url_with_non_ascii(self):
blob_name = u'winter \N{snowman}'
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
expected_url = 'https://storage.googleapis.com/name/winter%20%E2%98%83'
self.assertEqual(blob.public_url, expected_url)
def _basic_generate_signed_url_helper(self, credentials=None):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_uri = blob.generate_signed_url(EXPIRATION,
credentials=credentials)
self.assertEqual(signed_uri, URI)
PATH = '/name/%s' % (BLOB_NAME,)
if credentials is None:
EXPECTED_ARGS = (_Connection.credentials,)
else:
EXPECTED_ARGS = (credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': PATH,
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_default_method(self):
self._basic_generate_signed_url_helper()
def test_generate_signed_url_w_content_type(self):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
CONTENT_TYPE = "text/html"
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_url = blob.generate_signed_url(EXPIRATION,
content_type=CONTENT_TYPE)
self.assertEqual(signed_url, URI)
PATH = '/name/%s' % (BLOB_NAME,)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': PATH,
'content_type': CONTENT_TYPE,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_credentials(self):
credentials = object()
self._basic_generate_signed_url_helper(credentials=credentials)
def test_generate_signed_url_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_url = blob.generate_signed_url(EXPIRATION)
self.assertEqual(signed_url, URI)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': '/name/parent%2Fchild',
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_method_arg(self):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_uri = blob.generate_signed_url(EXPIRATION, method='POST')
self.assertEqual(signed_uri, URI)
PATH = '/name/%s' % (BLOB_NAME,)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'POST',
'resource': PATH,
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_exists_miss(self):
NONESUCH = 'nonesuch'
not_found_response = ({'status': http_client.NOT_FOUND}, b'')
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(NONESUCH, bucket=bucket)
self.assertFalse(blob.exists())
def test_exists_hit(self):
BLOB_NAME = 'blob-name'
found_response = ({'status': http_client.OK}, b'')
connection = _Connection(found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
self.assertTrue(blob.exists())
def test_delete(self):
BLOB_NAME = 'blob-name'
not_found_response = ({'status': http_client.NOT_FOUND}, b'')
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
blob.delete()
self.assertFalse(blob.exists())
self.assertEqual(bucket._deleted, [(BLOB_NAME, None)])
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test__make_transport(self, fake_session_factory):
client = mock.Mock(spec=[u'_credentials'])
blob = self._make_one(u'blob-name', bucket=None)
transport = blob._make_transport(client)
self.assertIs(transport, fake_session_factory.return_value)
fake_session_factory.assert_called_once_with(client._credentials)
def test__get_download_url_with_media_link(self):
blob_name = 'something.txt'
bucket = mock.Mock(spec=[])
blob = self._make_one(blob_name, bucket=bucket)
media_link = 'http://test.invalid'
blob._properties['mediaLink'] = media_link
download_url = blob._get_download_url()
self.assertEqual(download_url, media_link)
def test__get_download_url_on_the_fly(self):
blob_name = 'bzzz-fly.txt'
bucket = mock.Mock(path='/b/buhkit', spec=['path'])
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.media_link)
download_url = blob._get_download_url()
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'buhkit/o/bzzz-fly.txt?alt=media')
self.assertEqual(download_url, expected_url)
def test__get_download_url_on_the_fly_with_generation(self):
blob_name = 'pretend.txt'
bucket = mock.Mock(path='/b/fictional', spec=['path'])
blob = self._make_one(blob_name, bucket=bucket)
generation = 1493058489532987
blob._properties['generation'] = str(generation)
self.assertIsNone(blob.media_link)
download_url = blob._get_download_url()
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'fictional/o/pretend.txt?alt=media&generation=1493058489532987')
self.assertEqual(download_url, expected_url)
@staticmethod
def _mock_requests_response(status_code, headers, content=b''):
import requests
response = requests.Response()
response.status_code = status_code
response.headers.update(headers)
response._content = content
response.request = requests.Request(
'POST', 'http://example.com').prepare()
return response
def _mock_download_transport(self):
fake_transport = mock.Mock(spec=['request'])
chunk1_response = self._mock_requests_response(
http_client.PARTIAL_CONTENT,
{'content-length': '3', 'content-range': 'bytes 0-2/6'},
content=b'abc')
chunk2_response = self._mock_requests_response(
http_client.PARTIAL_CONTENT,
{'content-length': '3', 'content-range': 'bytes 3-5/6'},
content=b'def')
fake_transport.request.side_effect = [chunk1_response, chunk2_response]
return fake_transport
def _check_session_mocks(self, client, fake_session_factory,
expected_url, headers=None):
fake_session_factory.assert_called_once_with(client._credentials)
fake_transport = fake_session_factory.return_value
self.assertEqual(fake_transport.request.call_count, 2)
if headers is None:
headers = {}
headers['range'] = 'bytes=3-5'
call = mock.call(
'GET', expected_url, data=None, headers=headers)
self.assertEqual(fake_transport.request.mock_calls, [call, call])
def test__do_download_simple(self):
blob_name = 'blob-name'
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.chunk_size)
transport = mock.Mock(spec=['request'])
transport.request.return_value = self._mock_requests_response(
http_client.OK,
{'content-length': '6', 'content-range': 'bytes 0-5/6'},
content=b'abcdef')
file_obj = io.BytesIO()
download_url = 'http://test.invalid'
headers = {}
blob._do_download(transport, file_obj, download_url, headers)
self.assertEqual(file_obj.getvalue(), b'abcdef')
transport.request.assert_called_once_with(
'GET', download_url, data=None, headers=headers)
def test__do_download_chunked(self):
blob_name = 'blob-name'
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
transport = self._mock_download_transport()
file_obj = io.BytesIO()
download_url = 'http://test.invalid'
headers = {}
blob._do_download(transport, file_obj, download_url, headers)
self.assertEqual(file_obj.getvalue(), b'abcdef')
self.assertEqual(transport.request.call_count, 2)
self.assertEqual(headers, {'range': 'bytes=3-5'})
call = mock.call(
'GET', download_url, data=None, headers=headers)
self.assertEqual(transport.request.mock_calls, [call, call])
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_file_with_failure(self, fake_session_factory):
from google.cloud import exceptions
blob_name = 'blob-name'
transport = mock.Mock(spec=['request'])
bad_response_headers = {
'Content-Length': '9',
'Content-Type': 'text/html; charset=UTF-8',
}
transport.request.return_value = self._mock_requests_response(
http_client.NOT_FOUND, bad_response_headers, content=b'Not found')
fake_session_factory.return_value = transport
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
blob._properties['mediaLink'] = 'http://test.invalid'
file_obj = io.BytesIO()
with self.assertRaises(exceptions.NotFound):
blob.download_to_file(file_obj)
self.assertEqual(file_obj.tell(), 0)
fake_session_factory.assert_called_once_with(client._credentials)
transport.request.assert_called_once_with(
'GET', blob.media_link, data=None, headers={})
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_file_wo_media_link(self, fake_session_factory):
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
self.assertEqual(file_obj.getvalue(), b'abcdef')
self.assertIsNone(blob.media_link)
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'name/o/blob-name?alt=media')
self._check_session_mocks(client, fake_session_factory, expected_url)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def _download_to_file_helper(self, fake_session_factory, use_chunks=False):
blob_name = 'blob-name'
fake_transport = self._mock_download_transport()
fake_session_factory.return_value = fake_transport
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
if use_chunks:
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
else:
single_chunk_response = self._mock_requests_response(
http_client.OK,
{'content-length': '6', 'content-range': 'bytes 0-5/6'},
content=b'abcdef')
fake_transport.request.side_effect = [single_chunk_response]
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
self.assertEqual(file_obj.getvalue(), b'abcdef')
if use_chunks:
self._check_session_mocks(client, fake_session_factory, media_link)
else:
fake_session_factory.assert_called_once_with(client._credentials)
fake_transport.request.assert_called_once_with(
'GET', media_link, data=None, headers={})
def test_download_to_file_default(self):
self._download_to_file_helper()
def test_download_to_file_with_chunk_size(self):
self._download_to_file_helper(use_chunks=True)
def _download_to_filename_helper(self, fake_session_factory, updated=None):
import os
import time
from google.cloud._testing import _NamedTemporaryFile
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
if updated is not None:
properties['updated'] = updated
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
with open(temp.name, 'rb') as file_obj:
wrote = file_obj.read()
if updated is None:
self.assertIsNone(blob.updated)
else:
mtime = os.path.getmtime(temp.name)
updated_time = time.mktime(blob.updated.timetuple())
self.assertEqual(mtime, updated_time)
self.assertEqual(wrote, b'abcdef')
self._check_session_mocks(client, fake_session_factory, media_link)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename(self, fake_session_factory):
updated = '2014-12-06T13:13:50.690Z'
self._download_to_filename_helper(
fake_session_factory, updated=updated)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename_wo_updated(self, fake_session_factory):
self._download_to_filename_helper(fake_session_factory)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename_w_key(self, fake_session_factory):
import os
import time
from google.cloud._testing import _NamedTemporaryFile
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link,
'updated': '2014-12-06T13:13:50.690Z'}
key = b'aa426195405adee2c8081bb9e7e74b19'
blob = self._make_one(
blob_name, bucket=bucket, properties=properties, encryption_key=key)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
with open(temp.name, 'rb') as file_obj:
wrote = file_obj.read()
mtime = os.path.getmtime(temp.name)
updated_time = time.mktime(blob.updated.timetuple())
self.assertEqual(wrote, b'abcdef')
self.assertEqual(mtime, updated_time)
header_key_value = 'YWE0MjYxOTU0MDVhZGVlMmM4MDgxYmI5ZTdlNzRiMTk='
header_key_hash_value = 'V3Kwe46nKc3xLv96+iJ707YfZfFvlObta8TQcx2gpm0='
key_headers = {
'X-Goog-Encryption-Key-Sha256': header_key_hash_value,
'X-Goog-Encryption-Algorithm': 'AES256',
'X-Goog-Encryption-Key': header_key_value,
}
self._check_session_mocks(
client, fake_session_factory, media_link, headers=key_headers)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_as_string(self, fake_session_factory):
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
fetched = blob.download_as_string()
self.assertEqual(fetched, b'abcdef')
self._check_session_mocks(client, fake_session_factory, media_link)
def test__get_content_type_explicit(self):
blob = self._make_one(u'blob-name', bucket=None)
content_type = u'text/plain'
return_value = blob._get_content_type(content_type)
self.assertEqual(return_value, content_type)
def test__get_content_type_from_blob(self):
blob = self._make_one(u'blob-name', bucket=None)
blob.content_type = u'video/mp4'
return_value = blob._get_content_type(None)
self.assertEqual(return_value, blob.content_type)
def test__get_content_type_from_filename(self):
blob = self._make_one(u'blob-name', bucket=None)
return_value = blob._get_content_type(None, filename='archive.tar')
self.assertEqual(return_value, 'application/x-tar')
def test__get_content_type_default(self):
blob = self._make_one(u'blob-name', bucket=None)
return_value = blob._get_content_type(None)
self.assertEqual(return_value, u'application/octet-stream')
def test__get_writable_metadata_no_changes(self):
name = u'blob-name'
blob = self._make_one(name, bucket=None)
object_metadata = blob._get_writable_metadata()
expected = {'name': name}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_with_changes(self):
name = u'blob-name'
blob = self._make_one(name, bucket=None)
blob.storage_class = 'NEARLINE'
blob.cache_control = 'max-age=3600'
blob.metadata = {'color': 'red'}
object_metadata = blob._get_writable_metadata()
expected = {
'cacheControl': blob.cache_control,
'metadata': blob.metadata,
'name': name,
'storageClass': blob.storage_class,
}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_unwritable_field(self):
name = u'blob-name'
properties = {'updated': '2016-10-16T18:18:18.181Z'}
blob = self._make_one(name, bucket=None, properties=properties)
blob._changes.add('updated')
object_metadata = blob._get_writable_metadata()
expected = {'name': name}
self.assertEqual(object_metadata, expected)
def test__get_upload_arguments(self):
name = u'blob-name'
key = b'[pXw@,p@@AfBfrR3x-2b2SCHR,.?YwRO'
blob = self._make_one(name, bucket=None, encryption_key=key)
blob.content_disposition = 'inline'
content_type = u'image/jpeg'
info = blob._get_upload_arguments(content_type)
headers, object_metadata, new_content_type = info
header_key_value = 'W3BYd0AscEBAQWZCZnJSM3gtMmIyU0NIUiwuP1l3Uk8='
header_key_hash_value = 'G0++dxF4q5rG4o9kE8gvEKn15RH6wLm0wXV1MgAlXOg='
expected_headers = {
'X-Goog-Encryption-Algorithm': 'AES256',
'X-Goog-Encryption-Key': header_key_value,
'X-Goog-Encryption-Key-Sha256': header_key_hash_value,
}
self.assertEqual(headers, expected_headers)
expected_metadata = {
'contentDisposition': blob.content_disposition,
'name': name,
}
self.assertEqual(object_metadata, expected_metadata)
self.assertEqual(new_content_type, content_type)
def _mock_transport(self, status_code, headers, content=b''):
fake_transport = mock.Mock(spec=['request'])
fake_response = self._mock_requests_response(
status_code, headers, content=content)
fake_transport.request.return_value = fake_response
return fake_transport
def _do_multipart_success(self, mock_get_boundary, size=None,
num_retries=None):
bucket = mock.Mock(path='/b/w00t', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
self.assertIsNone(blob.chunk_size)
fake_transport = self._mock_transport(http_client.OK, {})
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
client = mock.sentinel.client
data = b'data here hear hier'
stream = io.BytesIO(data)
content_type = u'application/xml'
response = blob._do_multipart_upload(
client, stream, content_type, size, num_retries)
self.assertIs(response, fake_transport.request.return_value)
if size is None:
data_read = data
self.assertEqual(stream.tell(), len(data))
else:
data_read = data[:size]
self.assertEqual(stream.tell(), size)
blob._make_transport.assert_called_once_with(client)
mock_get_boundary.assert_called_once_with()
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=multipart')
payload = (
b'--==0==\r\n' +
b'content-type: application/json; charset=UTF-8\r\n\r\n' +
b'{"name": "blob-name"}\r\n' +
b'--==0==\r\n' +
b'content-type: application/xml\r\n\r\n' +
data_read +
b'\r\n--==0==--')
headers = {'content-type': b'multipart/related; boundary="==0=="'}
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=headers)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_no_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_with_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, size=10)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_with_retry(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, num_retries=8)
def test__do_multipart_upload_bad_size(self):
blob = self._make_one(u'blob-name', bucket=None)
data = b'data here hear hier'
stream = io.BytesIO(data)
size = 50
self.assertGreater(size, len(data))
with self.assertRaises(ValueError) as exc_info:
blob._do_multipart_upload(None, stream, None, size, None)
exc_contents = str(exc_info.exception)
self.assertIn(
'was specified but the file-like object only had', exc_contents)
self.assertEqual(stream.tell(), len(data))
def _initiate_resumable_helper(self, size=None, extra_headers=None,
chunk_size=None, num_retries=None):
from google.resumable_media.requests import ResumableUpload
bucket = mock.Mock(path='/b/whammy', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
blob.metadata = {'rook': 'takes knight'}
blob.chunk_size = 3 * blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
object_metadata = blob._get_writable_metadata()
blob._get_writable_metadata = mock.Mock(
return_value=object_metadata, spec=[])
resumable_url = 'http://test.invalid?upload_id=hey-you'
response_headers = {'location': resumable_url}
fake_transport = self._mock_transport(
http_client.OK, response_headers)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
client = mock.sentinel.client
data = b'hello hallo halo hi-low'
stream = io.BytesIO(data)
content_type = u'text/plain'
upload, transport = blob._initiate_resumable_upload(
client, stream, content_type, size, num_retries,
extra_headers=extra_headers, chunk_size=chunk_size)
self.assertIsInstance(upload, ResumableUpload)
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=resumable')
self.assertEqual(upload.upload_url, upload_url)
if extra_headers is None:
self.assertEqual(upload._headers, {})
else:
self.assertEqual(upload._headers, extra_headers)
self.assertIsNot(upload._headers, extra_headers)
self.assertFalse(upload.finished)
if chunk_size is None:
self.assertEqual(upload._chunk_size, blob.chunk_size)
else:
self.assertNotEqual(blob.chunk_size, chunk_size)
self.assertEqual(upload._chunk_size, chunk_size)
self.assertIs(upload._stream, stream)
if size is None:
self.assertIsNone(upload._total_bytes)
else:
self.assertEqual(upload._total_bytes, size)
self.assertEqual(upload._content_type, content_type)
self.assertEqual(upload.resumable_url, resumable_url)
retry_strategy = upload._retry_strategy
self.assertEqual(retry_strategy.max_sleep, 64.0)
if num_retries is None:
self.assertEqual(retry_strategy.max_cumulative_retry, 600.0)
self.assertIsNone(retry_strategy.max_retries)
else:
self.assertIsNone(retry_strategy.max_cumulative_retry)
self.assertEqual(retry_strategy.max_retries, num_retries)
self.assertIs(transport, fake_transport)
self.assertEqual(stream.tell(), 0)
blob._get_writable_metadata.assert_called_once_with()
blob._make_transport.assert_called_once_with(client)
payload = json.dumps(object_metadata).encode('utf-8')
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-type': content_type,
}
if size is not None:
expected_headers['x-upload-content-length'] = str(size)
if extra_headers is not None:
expected_headers.update(extra_headers)
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=expected_headers)
def test__initiate_resumable_upload_no_size(self):
self._initiate_resumable_helper()
def test__initiate_resumable_upload_with_size(self):
self._initiate_resumable_helper(size=10000)
def test__initiate_resumable_upload_with_chunk_size(self):
one_mb = 1048576
self._initiate_resumable_helper(chunk_size=one_mb)
def test__initiate_resumable_upload_with_extra_headers(self):
extra_headers = {'origin': 'http://not-in-kansas-anymore.invalid'}
self._initiate_resumable_helper(extra_headers=extra_headers)
def test__initiate_resumable_upload_with_retry(self):
self._initiate_resumable_helper(num_retries=11)
def _make_resumable_transport(self, headers1, headers2,
headers3, total_bytes):
from google import resumable_media
fake_transport = mock.Mock(spec=['request'])
fake_response1 = self._mock_requests_response(
http_client.OK, headers1)
fake_response2 = self._mock_requests_response(
resumable_media.PERMANENT_REDIRECT, headers2)
json_body = '{{"size": "{:d}"}}'.format(total_bytes)
fake_response3 = self._mock_requests_response(
http_client.OK, headers3,
content=json_body.encode('utf-8'))
responses = [fake_response1, fake_response2, fake_response3]
fake_transport.request.side_effect = responses
return fake_transport, responses
@staticmethod
def _do_resumable_upload_call0(blob, content_type, size=None):
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
blob.bucket.path +
'/o?uploadType=resumable')
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-type': content_type,
}
if size is not None:
expected_headers['x-upload-content-length'] = str(size)
payload = json.dumps({'name': blob.name}).encode('utf-8')
return mock.call(
'POST', upload_url, data=payload, headers=expected_headers)
@staticmethod
def _do_resumable_upload_call1(blob, content_type, data,
resumable_url, size=None):
if size is None:
content_range = 'bytes 0-{:d}/*'.format(blob.chunk_size - 1)
else:
content_range = 'bytes 0-{:d}/{:d}'.format(
blob.chunk_size - 1, size)
expected_headers = {
'content-type': content_type,
'content-range': content_range,
}
payload = data[:blob.chunk_size]
return mock.call(
'PUT', resumable_url, data=payload, headers=expected_headers)
@staticmethod
def _do_resumable_upload_call2(blob, content_type, data,
resumable_url, total_bytes):
content_range = 'bytes {:d}-{:d}/{:d}'.format(
blob.chunk_size, total_bytes - 1, total_bytes)
expected_headers = {
'content-type': content_type,
'content-range': content_range,
}
payload = data[blob.chunk_size:]
return mock.call(
'PUT', resumable_url, data=payload, headers=expected_headers)
def _do_resumable_helper(self, use_size=False, num_retries=None):
bucket = mock.Mock(path='/b/yesterday', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
blob.chunk_size = blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
data = b'<html>' + (b'A' * blob.chunk_size) + b'</html>'
total_bytes = len(data)
if use_size:
size = total_bytes
else:
size = None
resumable_url = 'http://test.invalid?upload_id=and-then-there-was-1'
headers1 = {'location': resumable_url}
headers2 = {'range': 'bytes=0-{:d}'.format(blob.chunk_size - 1)}
fake_transport, responses = self._make_resumable_transport(
headers1, headers2, {}, total_bytes)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
client = mock.sentinel.client
stream = io.BytesIO(data)
content_type = u'text/html'
response = blob._do_resumable_upload(
client, stream, content_type, size, num_retries)
self.assertIs(response, responses[2])
self.assertEqual(stream.tell(), total_bytes)
blob._make_transport.assert_called_once_with(client)
call0 = self._do_resumable_upload_call0(blob, content_type, size=size)
call1 = self._do_resumable_upload_call1(
blob, content_type, data, resumable_url, size=size)
call2 = self._do_resumable_upload_call2(
blob, content_type, data, resumable_url, total_bytes)
self.assertEqual(
fake_transport.request.mock_calls, [call0, call1, call2])
def test__do_resumable_upload_no_size(self):
self._do_resumable_helper()
def test__do_resumable_upload_with_size(self):
self._do_resumable_helper(use_size=True)
def test__do_resumable_upload_with_retry(self):
self._do_resumable_helper(num_retries=6)
def _do_upload_helper(self, chunk_size=None, num_retries=None):
blob = self._make_one(u'blob-name', bucket=None)
response = mock.Mock(spec=[u'json'])
response.json.return_value = mock.sentinel.json
blob._do_multipart_upload = mock.Mock(return_value=response, spec=[])
blob._do_resumable_upload = mock.Mock(return_value=response, spec=[])
if chunk_size is None:
self.assertIsNone(blob.chunk_size)
else:
blob.chunk_size = chunk_size
self.assertIsNotNone(blob.chunk_size)
client = mock.sentinel.client
stream = mock.sentinel.stream
content_type = u'video/mp4'
size = 12345654321
created_json = blob._do_upload(
client, stream, content_type, size, num_retries)
self.assertIs(created_json, mock.sentinel.json)
response.json.assert_called_once_with()
if chunk_size is None:
blob._do_multipart_upload.assert_called_once_with(
client, stream, content_type, size, num_retries)
blob._do_resumable_upload.assert_not_called()
else:
blob._do_multipart_upload.assert_not_called()
blob._do_resumable_upload.assert_called_once_with(
client, stream, content_type, size, num_retries)
def test__do_upload_without_chunk_size(self):
self._do_upload_helper()
def test__do_upload_with_chunk_size(self):
chunk_size = 1024 * 1024 * 1024
self._do_upload_helper(chunk_size=chunk_size)
def test__do_upload_with_retry(self):
self._do_upload_helper(num_retries=20)
def _upload_from_file_helper(self, side_effect=None, **kwargs):
from google.cloud._helpers import UTC
blob = self._make_one('blob-name', bucket=None)
created_json = {'updated': '2017-01-01T09:09:09.081Z'}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
if side_effect is not None:
blob._do_upload.side_effect = side_effect
self.assertIsNone(blob.updated)
data = b'data is here'
stream = io.BytesIO(data)
stream.seek(2)
content_type = u'font/woff'
client = mock.sentinel.client
ret_val = blob.upload_from_file(
stream, size=len(data), content_type=content_type,
client=client, **kwargs)
self.assertIsNone(ret_val)
new_updated = datetime.datetime(
2017, 1, 1, 9, 9, 9, 81000, tzinfo=UTC)
self.assertEqual(blob.updated, new_updated)
num_retries = kwargs.get('num_retries')
blob._do_upload.assert_called_once_with(
client, stream, content_type, len(data), num_retries)
return stream
def test_upload_from_file_success(self):
stream = self._upload_from_file_helper()
assert stream.tell() == 2
@mock.patch('warnings.warn')
def test_upload_from_file_with_retries(self, mock_warn):
from google.cloud.storage import blob as blob_module
self._upload_from_file_helper(num_retries=20)
mock_warn.assert_called_once_with(
blob_module._NUM_RETRIES_MESSAGE, DeprecationWarning)
def test_upload_from_file_with_rewind(self):
stream = self._upload_from_file_helper(rewind=True)
assert stream.tell() == 0
def test_upload_from_file_failure(self):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = b'Someone is already in this spot.'
response = requests.Response()
response._content = message
response.status_code = http_client.CONFLICT
response.request = requests.Request(
'POST', 'http://example.com').prepare()
side_effect = InvalidResponse(response)
with self.assertRaises(exceptions.Conflict) as exc_info:
self._upload_from_file_helper(side_effect=side_effect)
self.assertIn(message.decode('utf-8'), exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def _do_upload_mock_call_helper(self, blob, client, content_type, size):
self.assertEqual(blob._do_upload.call_count, 1)
mock_call = blob._do_upload.mock_calls[0]
call_name, pos_args, kwargs = mock_call
self.assertEqual(call_name, '')
self.assertEqual(len(pos_args), 5)
self.assertEqual(pos_args[0], client)
self.assertEqual(pos_args[2], content_type)
self.assertEqual(pos_args[3], size)
self.assertIsNone(pos_args[4])
self.assertEqual(kwargs, {})
return pos_args[1]
def test_upload_from_filename(self):
from google.cloud._testing import _NamedTemporaryFile
blob = self._make_one('blob-name', bucket=None)
created_json = {'metadata': {'mint': 'ice-cream'}}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
self.assertIsNone(blob.metadata)
data = b'soooo much data'
content_type = u'image/svg+xml'
client = mock.sentinel.client
with _NamedTemporaryFile() as temp:
with open(temp.name, 'wb') as file_obj:
file_obj.write(data)
ret_val = blob.upload_from_filename(
temp.name, content_type=content_type, client=client)
self.assertIsNone(ret_val)
self.assertEqual(blob.metadata, created_json['metadata'])
stream = self._do_upload_mock_call_helper(
blob, client, content_type, len(data))
self.assertTrue(stream.closed)
self.assertEqual(stream.mode, 'rb')
self.assertEqual(stream.name, temp.name)
def _upload_from_string_helper(self, data, **kwargs):
from google.cloud._helpers import _to_bytes
blob = self._make_one('blob-name', bucket=None)
created_json = {'componentCount': '5'}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
self.assertIsNone(blob.component_count)
client = mock.sentinel.client
ret_val = blob.upload_from_string(data, client=client, **kwargs)
self.assertIsNone(ret_val)
self.assertEqual(blob.component_count, 5)
payload = _to_bytes(data, encoding='utf-8')
stream = self._do_upload_mock_call_helper(
blob, client, 'text/plain', len(payload))
self.assertIsInstance(stream, io.BytesIO)
self.assertEqual(stream.getvalue(), payload)
def test_upload_from_string_w_bytes(self):
data = b'XB]jb\xb8tad\xe0'
self._upload_from_string_helper(data)
def test_upload_from_string_w_text(self):
data = u'\N{snowman} \N{sailboat}'
self._upload_from_string_helper(data)
def _create_resumable_upload_session_helper(self, origin=None,
side_effect=None):
bucket = mock.Mock(path='/b/alex-trebek', spec=[u'path'])
blob = self._make_one('blob-name', bucket=bucket)
chunk_size = 99 * blob._CHUNK_SIZE_MULTIPLE
blob.chunk_size = chunk_size
resumable_url = 'http://test.invalid?upload_id=clean-up-everybody'
response_headers = {'location': resumable_url}
fake_transport = self._mock_transport(
http_client.OK, response_headers)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
if side_effect is not None:
fake_transport.request.side_effect = side_effect
content_type = u'text/plain'
size = 10000
client = mock.sentinel.client
new_url = blob.create_resumable_upload_session(
content_type=content_type, size=size,
origin=origin, client=client)
self.assertEqual(new_url, resumable_url)
self.assertEqual(blob.chunk_size, chunk_size)
blob._make_transport.assert_called_once_with(client)
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=resumable')
payload = b'{"name": "blob-name"}'
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-length': str(size),
'x-upload-content-type': content_type,
}
if origin is not None:
expected_headers['Origin'] = origin
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=expected_headers)
def test_create_resumable_upload_session(self):
self._create_resumable_upload_session_helper()
def test_create_resumable_upload_session_with_origin(self):
self._create_resumable_upload_session_helper(
origin='http://google.com')
def test_create_resumable_upload_session_with_failure(self):
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = b'5-oh-3 woe is me.'
response = self._mock_requests_response(
content=message, status_code=http_client.SERVICE_UNAVAILABLE,
headers={})
side_effect = InvalidResponse(response)
with self.assertRaises(exceptions.ServiceUnavailable) as exc_info:
self._create_resumable_upload_session_helper(
side_effect=side_effect)
self.assertIn(message.decode('utf-8'), exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def test_get_iam_policy(self):
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.cloud.iam import Policy
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
ETAG = 'DEADBEEF'
VERSION = 17
OWNER1 = 'user:phred@example.com'
OWNER2 = 'group:cloud-logs@google.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
RETURNED = {
'resourceId': PATH,
'etag': ETAG,
'version': VERSION,
'bindings': [
{'role': STORAGE_OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': STORAGE_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': STORAGE_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
],
}
after = ({'status': http_client.OK}, RETURNED)
EXPECTED = {
binding['role']: set(binding['members'])
for binding in RETURNED['bindings']}
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
policy = blob.get_iam_policy()
self.assertIsInstance(policy, Policy)
self.assertEqual(policy.etag, RETURNED['etag'])
self.assertEqual(policy.version, RETURNED['version'])
self.assertEqual(dict(policy), EXPECTED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '%s/iam' % (PATH,))
def test_set_iam_policy(self):
import operator
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.cloud.iam import Policy
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
ETAG = 'DEADBEEF'
VERSION = 17
OWNER1 = 'user:phred@example.com'
OWNER2 = 'group:cloud-logs@google.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
BINDINGS = [
{'role': STORAGE_OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': STORAGE_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': STORAGE_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
]
RETURNED = {
'etag': ETAG,
'version': VERSION,
'bindings': BINDINGS,
}
after = ({'status': http_client.OK}, RETURNED)
policy = Policy()
for binding in BINDINGS:
policy[binding['role']] = binding['members']
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
returned = blob.set_iam_policy(policy)
self.assertEqual(returned.etag, ETAG)
self.assertEqual(returned.version, VERSION)
self.assertEqual(dict(returned), dict(policy))
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PUT')
self.assertEqual(kw[0]['path'], '%s/iam' % (PATH,))
sent = kw[0]['data']
self.assertEqual(sent['resourceId'], PATH)
self.assertEqual(len(sent['bindings']), len(BINDINGS))
key = operator.itemgetter('role')
for found, expected in zip(
sorted(sent['bindings'], key=key),
sorted(BINDINGS, key=key)):
self.assertEqual(found['role'], expected['role'])
self.assertEqual(
sorted(found['members']), sorted(expected['members']))
def test_test_iam_permissions(self):
from google.cloud.storage.iam import STORAGE_OBJECTS_LIST
from google.cloud.storage.iam import STORAGE_BUCKETS_GET
from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
PERMISSIONS = [
STORAGE_OBJECTS_LIST,
STORAGE_BUCKETS_GET,
STORAGE_BUCKETS_UPDATE,
]
ALLOWED = PERMISSIONS[1:]
RETURNED = {'permissions': ALLOWED}
after = ({'status': http_client.OK}, RETURNED)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
allowed = blob.test_iam_permissions(PERMISSIONS)
self.assertEqual(allowed, ALLOWED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '%s/iam/testPermissions' % (PATH,))
self.assertEqual(kw[0]['query_params'], {'permissions': PERMISSIONS})
def test_make_public(self):
from google.cloud.storage.acl import _ACLEntity
BLOB_NAME = 'blob-name'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after = ({'status': http_client.OK}, {'acl': permissive})
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.acl.loaded = True
blob.make_public()
self.assertEqual(list(blob.acl), permissive)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME)
self.assertEqual(kw[0]['data'], {'acl': permissive})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_compose_wo_content_type_set(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
with self.assertRaises(ValueError):
destination.compose(sources=[source_1, source_2])
def test_compose_minimal(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
RESOURCE = {
'etag': 'DEADBEEF'
}
after = ({'status': http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = 'text/plain'
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, 'DEADBEEF')
SENT = {
'sourceObjects': [
{'name': source_1.name},
{'name': source_2.name},
],
'destination': {
'contentType': 'text/plain',
},
}
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], '/b/name/o/%s/compose' % DESTINATION)
self.assertEqual(kw[0]['data'], SENT)
def test_compose_w_additional_property_changes(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
RESOURCE = {
'etag': 'DEADBEEF'
}
after = ({'status': http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = 'text/plain'
destination.content_language = 'en-US'
destination.metadata = {'my-key': 'my-value'}
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, 'DEADBEEF')
SENT = {
'sourceObjects': [
{'name': source_1.name},
{'name': source_2.name},
],
'destination': {
'contentType': 'text/plain',
'contentLanguage': 'en-US',
'metadata': {
'my-key': 'my-value',
}
},
}
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], '/b/name/o/%s/compose' % DESTINATION)
self.assertEqual(kw[0]['data'], SENT)
def test_rewrite_response_without_resource(self):
SOURCE_BLOB = 'source'
DEST_BLOB = 'dest'
DEST_BUCKET = 'other-bucket'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 33,
'objectSize': 42,
'done': False,
'rewriteToken': TOKEN,
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
def test_rewrite_other_bucket_other_name_no_encryption_partial(self):
SOURCE_BLOB = 'source'
DEST_BLOB = 'dest'
DEST_BUCKET = 'other-bucket'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 33,
'objectSize': 42,
'done': False,
'rewriteToken': TOKEN,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/%s/o/%s' % (
SOURCE_BLOB, DEST_BUCKET, DEST_BLOB)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertNotIn('X-Goog-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Encryption-Key', headers)
self.assertNotIn('X-Goog-Encryption-Key-Sha256', headers)
def test_rewrite_same_name_no_old_key_new_key_done(self):
import base64
import hashlib
KEY = b'01234567890123456789012345678901'
KEY_B64 = base64.b64encode(KEY).rstrip().decode('ascii')
KEY_HASH = hashlib.sha256(KEY).digest()
KEY_HASH_B64 = base64.b64encode(KEY_HASH).rstrip().decode('ascii')
BLOB_NAME = 'blob'
RESPONSE = {
'totalBytesRewritten': 42,
'objectSize': 42,
'done': True,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
plain = self._make_one(BLOB_NAME, bucket=bucket)
encrypted = self._make_one(BLOB_NAME, bucket=bucket,
encryption_key=KEY)
token, rewritten, size = encrypted.rewrite(plain)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertEqual(headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(headers['X-Goog-Encryption-Key'], KEY_B64)
self.assertEqual(headers['X-Goog-Encryption-Key-Sha256'], KEY_HASH_B64)
def test_rewrite_same_name_no_key_new_key_w_token(self):
import base64
import hashlib
SOURCE_KEY = b'01234567890123456789012345678901'
SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode('ascii')
SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest()
SOURCE_KEY_HASH_B64 = base64.b64encode(
SOURCE_KEY_HASH).rstrip().decode('ascii')
DEST_KEY = b'90123456789012345678901234567890'
DEST_KEY_B64 = base64.b64encode(DEST_KEY).rstrip().decode('ascii')
DEST_KEY_HASH = hashlib.sha256(DEST_KEY).digest()
DEST_KEY_HASH_B64 = base64.b64encode(
DEST_KEY_HASH).rstrip().decode('ascii')
BLOB_NAME = 'blob'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 42,
'objectSize': 42,
'done': True,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
source = self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY)
dest = self._make_one(BLOB_NAME, bucket=bucket,
encryption_key=DEST_KEY)
token, rewritten, size = dest.rewrite(source, token=TOKEN)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {'rewriteToken': TOKEN})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key'], SOURCE_KEY_B64)
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key-Sha256'],
SOURCE_KEY_HASH_B64)
self.assertEqual(
headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Encryption-Key'], DEST_KEY_B64)
self.assertEqual(
headers['X-Goog-Encryption-Key-Sha256'], DEST_KEY_HASH_B64)
def test_update_storage_class_invalid(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
with self.assertRaises(ValueError):
blob.update_storage_class(u'BOGUS')
def test_update_storage_class_wo_encryption_key(self):
BLOB_NAME = 'blob-name'
STORAGE_CLASS = u'NEARLINE'
RESPONSE = {
'resource': {'storageClass': STORAGE_CLASS},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.update_storage_class('NEARLINE')
self.assertEqual(blob.storage_class, 'NEARLINE')
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertNotIn('query_params', kw[0])
SENT = {'storageClass': STORAGE_CLASS}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertNotIn('X-Goog-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Encryption-Key', headers)
self.assertNotIn('X-Goog-Encryption-Key-Sha256', headers)
def test_update_storage_class_w_encryption_key(self):
import base64
import hashlib
BLOB_NAME = 'blob-name'
BLOB_KEY = b'01234567890123456789012345678901'
BLOB_KEY_B64 = base64.b64encode(BLOB_KEY).rstrip().decode('ascii')
BLOB_KEY_HASH = hashlib.sha256(BLOB_KEY).digest()
BLOB_KEY_HASH_B64 = base64.b64encode(
BLOB_KEY_HASH).rstrip().decode('ascii')
STORAGE_CLASS = u'NEARLINE'
RESPONSE = {
'resource': {'storageClass': STORAGE_CLASS},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=BLOB_KEY)
blob.update_storage_class('NEARLINE')
self.assertEqual(blob.storage_class, 'NEARLINE')
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertNotIn('query_params', kw[0])
SENT = {'storageClass': STORAGE_CLASS}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key'], BLOB_KEY_B64)
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key-Sha256'],
BLOB_KEY_HASH_B64)
self.assertEqual(
headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Encryption-Key'], BLOB_KEY_B64)
self.assertEqual(
headers['X-Goog-Encryption-Key-Sha256'], BLOB_KEY_HASH_B64)
def test_cache_control_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CACHE_CONTROL = 'no-cache'
properties = {'cacheControl': CACHE_CONTROL}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_cache_control_setter(self):
BLOB_NAME = 'blob-name'
CACHE_CONTROL = 'no-cache'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.cache_control)
blob.cache_control = CACHE_CONTROL
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_component_count(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'componentCount': COMPONENT_COUNT})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_component_count_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.component_count)
def test_component_count_string_val(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one(
'blob-name', bucket=BUCKET,
properties={'componentCount': str(COMPONENT_COUNT)})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_content_disposition_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
properties = {'contentDisposition': CONTENT_DISPOSITION}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_disposition_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_disposition)
blob.content_disposition = CONTENT_DISPOSITION
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_encoding_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_ENCODING = 'gzip'
properties = {'contentEncoding': CONTENT_ENCODING}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_encoding_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_ENCODING = 'gzip'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_encoding)
blob.content_encoding = CONTENT_ENCODING
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_language_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_LANGUAGE = 'pt-BR'
properties = {'contentLanguage': CONTENT_LANGUAGE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_language_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_LANGUAGE = 'pt-BR'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_language)
blob.content_language = CONTENT_LANGUAGE
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_type_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_TYPE = 'image/jpeg'
properties = {'contentType': CONTENT_TYPE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_content_type_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_TYPE = 'image/jpeg'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_type)
blob.content_type = CONTENT_TYPE
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_crc32c_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CRC32C = 'DEADBEEF'
properties = {'crc32c': CRC32C}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.crc32c, CRC32C)
def test_crc32c_setter(self):
BLOB_NAME = 'blob-name'
CRC32C = 'DEADBEEF'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.crc32c)
blob.crc32c = CRC32C
self.assertEqual(blob.crc32c, CRC32C)
def test_etag(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ETAG = 'ETAG'
properties = {'etag': ETAG}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.etag, ETAG)
def test_generation(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'generation': GENERATION})
self.assertEqual(blob.generation, GENERATION)
def test_generation_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.generation)
def test_generation_string_val(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'generation': str(GENERATION)})
self.assertEqual(blob.generation, GENERATION)
def test_id(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ID = 'ID'
properties = {'id': ID}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.id, ID)
def test_md5_hash_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MD5_HASH = 'DEADBEEF'
properties = {'md5Hash': MD5_HASH}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_md5_hash_setter(self):
BLOB_NAME = 'blob-name'
MD5_HASH = 'DEADBEEF'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.md5_hash)
blob.md5_hash = MD5_HASH
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_media_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.media_link, MEDIA_LINK)
def test_metadata_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
METADATA = {'foo': 'Foo'}
properties = {'metadata': METADATA}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.metadata, METADATA)
def test_metadata_setter(self):
BLOB_NAME = 'blob-name'
METADATA = {'foo': 'Foo'}
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.metadata)
blob.metadata = METADATA
self.assertEqual(blob.metadata, METADATA)
def test_metageneration(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'metageneration': METAGENERATION})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_metageneration_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.metageneration)
def test_metageneration_string_val(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one(
'blob-name', bucket=BUCKET,
properties={'metageneration': str(METAGENERATION)})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_owner(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'}
properties = {'owner': OWNER}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
owner = blob.owner
self.assertEqual(owner['entity'], 'project-owner-12345')
self.assertEqual(owner['entityId'], '23456')
def test_self_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
SELF_LINK = 'http://example.com/self/'
properties = {'selfLink': SELF_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.self_link, SELF_LINK)
def test_size(self):
BUCKET = object()
SIZE = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'size': SIZE})
self.assertEqual(blob.size, SIZE)
def test_size_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.size)
def test_size_string_val(self):
BUCKET = object()
SIZE = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'size': str(SIZE)})
self.assertEqual(blob.size, SIZE)
def test_storage_class_getter(self):
blob_name = 'blob-name'
bucket = _Bucket()
storage_class = 'MULTI_REGIONAL'
properties = {'storageClass': storage_class}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
self.assertEqual(blob.storage_class, storage_class)
def test_storage_class_setter(self):
blob_name = 'blob-name'
bucket = _Bucket()
storage_class = 'COLDLINE'
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.storage_class)
blob.storage_class = storage_class
self.assertEqual(blob.storage_class, storage_class)
self.assertEqual(blob._properties, {'storageClass': storage_class})
def test_time_deleted(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeDeleted': TIME_DELETED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_deleted, TIMESTAMP)
def test_time_deleted_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.time_deleted)
def test_time_created(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeCreated': TIME_CREATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_created, TIMESTAMP)
def test_time_created_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.time_created)
def test_updated(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'updated': UPDATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.updated, TIMESTAMP)
def test_updated_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.updated)
class Test__quote(unittest.TestCase):
@staticmethod
def _call_fut(value):
from google.cloud.storage.blob import _quote
return _quote(value)
def test_bytes(self):
quoted = self._call_fut(b'\xDE\xAD\xBE\xEF')
self.assertEqual(quoted, '%DE%AD%BE%EF')
def test_unicode(self):
helicopter = u'\U0001f681'
quoted = self._call_fut(helicopter)
self.assertEqual(quoted, '%F0%9F%9A%81')
def test_bad_type(self):
with self.assertRaises(TypeError):
self._call_fut(None)
class Test__maybe_rewind(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _maybe_rewind
return _maybe_rewind(*args, **kwargs)
def test_default(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_not_rewind(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream, rewind=False)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_rewind(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream, rewind=True)
self.assertIsNone(ret_val)
stream.seek.assert_called_once_with(0, os.SEEK_SET)
class Test__raise_from_invalid_response(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _raise_from_invalid_response
return _raise_from_invalid_response(*args, **kwargs)
def _helper(self, message, **kwargs):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
response = requests.Response()
response.request = requests.Request(
'GET', 'http://example.com').prepare()
response.status_code = http_client.BAD_REQUEST
response._content = message
error = InvalidResponse(response)
with self.assertRaises(exceptions.BadRequest) as exc_info:
self._call_fut(error, **kwargs)
return exc_info
def test_default(self):
message = b'Failure'
exc_info = self._helper(message)
message_str = message.decode('utf-8')
expected = 'GET http://example.com/: {}'.format(message_str)
self.assertEqual(exc_info.exception.message, expected)
self.assertEqual(exc_info.exception.errors, [])
class _Connection(object):
API_BASE_URL = 'http://example.com'
USER_AGENT = 'testing 1.2.3'
credentials = object()
def __init__(self, *responses):
self._responses = responses[:]
self._requested = []
self._signed = []
def _respond(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
info, content = self._respond(**kw)
if info.get('status') == http_client.NOT_FOUND:
raise NotFound(info)
return content
class _Bucket(object):
def __init__(self, client=None, name='name'):
if client is None:
connection = _Connection()
client = _Client(connection)
self.client = client
self._blobs = {}
self._copied = []
self._deleted = []
self.name = name
self.path = '/b/' + name
def delete_blob(self, blob_name, client=None):
del self._blobs[blob_name]
self._deleted.append((blob_name, client))
class _Signer(object):
def __init__(self):
self._signed = []
def __call__(self, *args, **kwargs):
self._signed.append((args, kwargs))
return ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=%s' % kwargs.get('expiration'))
class _Client(object):
def __init__(self, connection):
self._base_connection = connection
@property
def _connection(self):
return self._base_connection
@property
def _credentials(self):
return self._base_connection.credentials
| true
| true
|
7904cf1ea8f94dd407c6c6e17bc6b9f8c418d059
| 11,807
|
py
|
Python
|
Data-Engineering-with-Databricks/06 - Incremental Data Processing/DE 6.1 - Incremental Data Ingestion with Auto Loader.py
|
databricks-academy/data-engineering-with-databricks
|
619532eddf7d2cce8f48772afc8d69797036890c
|
[
"CC0-1.0"
] | 35
|
2022-01-20T01:26:20.000Z
|
2022-03-30T11:56:23.000Z
|
Data-Engineering-with-Databricks/Solutions/06 - Incremental Data Processing/DE 6.1 - Incremental Data Ingestion with Auto Loader.py
|
databricks-academy/data-engineering-with-databricks
|
619532eddf7d2cce8f48772afc8d69797036890c
|
[
"CC0-1.0"
] | null | null | null |
Data-Engineering-with-Databricks/Solutions/06 - Incremental Data Processing/DE 6.1 - Incremental Data Ingestion with Auto Loader.py
|
databricks-academy/data-engineering-with-databricks
|
619532eddf7d2cce8f48772afc8d69797036890c
|
[
"CC0-1.0"
] | 30
|
2022-01-28T23:53:32.000Z
|
2022-03-31T08:25:27.000Z
|
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC # Incremental Data Ingestion with Auto Loader
# MAGIC
# MAGIC Incremental ETL is important since it allows us to deal solely with new data that has been encountered since the last ingestion. Reliably processing only the new data reduces redundant processing and helps enterprises reliably scale data pipelines.
# MAGIC
# MAGIC The first step for any successful data lakehouse implementation is ingesting into a Delta Lake table from cloud storage.
# MAGIC
# MAGIC Historically, ingesting files from a data lake into a database has been a complicated process.
# MAGIC
# MAGIC Databricks Auto Loader provides an easy-to-use mechanism for incrementally and efficiently processing new data files as they arrive in cloud file storage. In this notebook, you'll see Auto Loader in action.
# MAGIC
# MAGIC Due to the benefits and scalability that Auto Loader delivers, Databricks recommends its use as general **best practice** when ingesting data from cloud object storage.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Execute Auto Loader code to incrementally ingest data from cloud storage to Delta Lake
# MAGIC * Describe what happens when a new file arrives in a directory configured for Auto Loader
# MAGIC * Query a table fed by a streaming Auto Loader query
# MAGIC
# MAGIC ## Dataset Used
# MAGIC This demo uses simplified artificially generated medical data representing heart rate recordings delivered in the JSON format.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | device_id | int |
# MAGIC | mrn | long |
# MAGIC | time | double |
# MAGIC | heartrate | double |
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to reset the demo and configure required variables and help functions.
# COMMAND ----------
# MAGIC %run ../Includes/Classroom-Setup-6.1
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Using Auto Loader
# MAGIC
# MAGIC In the cell below, a function is defined to demonstrate using Databricks Auto Loader with the PySpark API. This code includes both a Structured Streaming read and write.
# MAGIC
# MAGIC The following notebook will provide a more robust overview of Structured Streaming. If you wish to learn more about Auto Loader options, refer to the <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader.html" target="_blank">documentation</a>.
# MAGIC
# MAGIC Note that when using Auto Loader with automatic <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader-schema.html" target="_blank">schema inference and evolution</a>, the 4 arguments shown here should allow ingestion of most datasets. These arguments are explained below.
# MAGIC
# MAGIC | argument | what it is | how it's used |
# MAGIC | --- | --- | --- |
# MAGIC | **`data_source`** | The directory of the source data | Auto Loader will detect new files as they arrive in this location and queue them for ingestion; passed to the **`.load()`** method |
# MAGIC | **`source_format`** | The format of the source data | While the format for all Auto Loader queries will be **`cloudFiles`**, the format of the source data should always be specified for the **`cloudFiles.format`** option |
# MAGIC | **`table_name`** | The name of the target table | Spark Structured Streaming supports writing directly to Delta Lake tables by passing a table name as a string to the **`.table()`** method. Note that you can either append to an existing table or create a new table |
# MAGIC | **`checkpoint_directory`** | The location for storing metadata about the stream | This argument is pass to the **`checkpointLocation`** and **`cloudFiles.schemaLocation`** options. Checkpoints keep track of streaming progress, while the schema location tracks updates to the fields in the source dataset |
# MAGIC
# MAGIC **NOTE**: The code below has been streamlined to demonstrate Auto Loader functionality. We'll see in later lessons that additional transformations can be applied to source data before saving them to Delta Lake.
# COMMAND ----------
def autoload_to_table(data_source, source_format, table_name, checkpoint_directory):
query = (spark.readStream
.format("cloudFiles")
.option("cloudFiles.format", source_format)
.option("cloudFiles.schemaLocation", checkpoint_directory)
.load(data_source)
.writeStream
.option("checkpointLocation", checkpoint_directory)
.option("mergeSchema", "true")
.table(table_name))
return query
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC In the following cell, we use the previously defined function and some path variables defined in the setup script to begin an Auto Loader stream.
# MAGIC
# MAGIC Here, we're reading from a source directory of JSON files.
# COMMAND ----------
query = autoload_to_table(data_source = f"{DA.paths.working_dir}/tracker",
source_format = "json",
table_name = "target_table",
checkpoint_directory = f"{DA.paths.checkpoints}/target_table")
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC Because Auto Loader uses Spark Structured Streaming to load data incrementally, the code above doesn't appear to finish executing.
# MAGIC
# MAGIC We can think of this as a **continuously active query**. This means that as soon as new data arrives in our data source, it will be processed through our logic and loaded into our target table. We'll explore this in just a second.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Helper Function for Streaming Lessons
# MAGIC
# MAGIC Our notebook-based lessons combine streaming functions with batch and streaming queries against the results of those operations. These notebooks are for instructional purposes and intended for interactive, cell-by-cell execution. This pattern is not intended for production.
# MAGIC
# MAGIC Below, we define a helper function that prevents our notebook from executing the next cell just long enough to ensure data has been written out by a given streaming query. This code should not be necessary in a production job.
# COMMAND ----------
def block_until_stream_is_ready(query, min_batches=2):
import time
while len(query.recentProgress) < min_batches:
time.sleep(5) # Give it a couple of seconds
print(f"The stream has processed {len(query.recentProgress)} batchs")
block_until_stream_is_ready(query)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Query Target Table
# MAGIC
# MAGIC Once data has been ingested to Delta Lake with Auto Loader, users can interact with it the same way they would any table.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM target_table
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC Note that the **`_rescued_data`** column is added by Auto Loader automatically to capture any data that might be malformed and not fit into the table otherwise.
# MAGIC
# MAGIC While Auto Loader captured the field names for our data correctly, note that it encoded all fields as **`STRING`** type. Because JSON is a text-based format, this is the safest and most permissive type, ensuring that the least amount of data is dropped or ignored at ingestion due to type mismatch.
# COMMAND ----------
# MAGIC %sql
# MAGIC DESCRIBE TABLE target_table
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC Use the cell below to define a temporary view that summarizes the recordings in our target table.
# MAGIC
# MAGIC We'll use this view below to demonstrate how new data is automatically ingested with Auto Loader.
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW device_counts AS
# MAGIC SELECT device_id, count(*) total_recordings
# MAGIC FROM target_table
# MAGIC GROUP BY device_id;
# MAGIC
# MAGIC SELECT * FROM device_counts
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Land New Data
# MAGIC
# MAGIC As mentioned previously, Auto Loader is configured to incrementally process files from a directory in cloud object storage into a Delta Lake table.
# MAGIC
# MAGIC We have configured and are currently executing a query to process JSON files from the location specified by **`source_path`** into a table named **`target_table`**. Let's review the contents of the **`source_path`** directory.
# COMMAND ----------
files = dbutils.fs.ls(f"{DA.paths.working_dir}/tracker")
display(files)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC At present, you should see a single JSON file listed in this location.
# MAGIC
# MAGIC The method in the cell below was configured in our setup script to allow us to model an external system writing data to this directory. Each time you execute the cell below, a new file will land in the **`source_path`** directory.
# COMMAND ----------
DA.data_factory.load()
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC List the contents of the **`source_path`** again using the cell below. You should see an additional JSON file for each time you ran the previous cell.
# COMMAND ----------
files = dbutils.fs.ls(f"{DA.paths.working_dir}/tracker")
display(files)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Tracking Ingestion Progress
# MAGIC
# MAGIC Historically, many systems have been configured to either reprocess all records in a source directory to calculate current results or require data engineers to implement custom logic to identify new data that's arrived since the last time a table was updated.
# MAGIC
# MAGIC With Auto Loader, your table has already been updated.
# MAGIC
# MAGIC Run the query below to confirm that new data has been ingested.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM device_counts
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC The Auto Loader query we configured earlier automatically detects and processes records from the source directory into the target table. There is a slight delay as records are ingested, but an Auto Loader query executing with default streaming configuration should update results in near real time.
# MAGIC
# MAGIC The query below shows the table history. A new table version should be indicated for each **`STREAMING UPDATE`**. These update events coincide with new batches of data arriving at the source.
# COMMAND ----------
# MAGIC %sql
# MAGIC DESCRIBE HISTORY target_table
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Clean Up
# MAGIC Feel free to continue landing new data and exploring the table results with the cells above.
# MAGIC
# MAGIC When you're finished, run the following cell to stop all active streams and remove created resources before continuing.
# COMMAND ----------
DA.cleanup()
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 42.167857
| 315
| 0.720505
|
and scalability that Auto Loader delivers, Databricks recommends its use as general **best practice** when ingesting data from cloud object storage.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Execute Auto Loader code to incrementally ingest data from cloud storage to Delta Lake
# MAGIC * Describe what happens when a new file arrives in a directory configured for Auto Loader
# MAGIC * Query a table fed by a streaming Auto Loader query
# MAGIC
# MAGIC ## Dataset Used
# MAGIC This demo uses simplified artificially generated medical data representing heart rate recordings delivered in the JSON format.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | device_id | int |
# MAGIC | mrn | long |
# MAGIC | time | double |
# MAGIC | heartrate | double |
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to reset the demo and configure required variables and help functions.
# COMMAND ----------
# MAGIC %run ../Includes/Classroom-Setup-6.1
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Using Auto Loader
# MAGIC
# MAGIC In the cell below, a function is defined to demonstrate using Databricks Auto Loader with the PySpark API. This code includes both a Structured Streaming read and write.
# MAGIC
# MAGIC The following notebook will provide a more robust overview of Structured Streaming. If you wish to learn more about Auto Loader options, refer to the <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader.html" target="_blank">documentation</a>.
# MAGIC
# MAGIC Note that when using Auto Loader with automatic <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader-schema.html" target="_blank">schema inference and evolution</a>, the 4 arguments shown here should allow ingestion of most datasets. These arguments are explained below.
# MAGIC
# MAGIC | argument | what it is | how it's used |
# COMMAND ----------
def autoload_to_table(data_source, source_format, table_name, checkpoint_directory):
query = (spark.readStream
.format("cloudFiles")
.option("cloudFiles.format", source_format)
.option("cloudFiles.schemaLocation", checkpoint_directory)
.load(data_source)
.writeStream
.option("checkpointLocation", checkpoint_directory)
.option("mergeSchema", "true")
.table(table_name))
return query
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC In the following cell, we use the previously defined function and some path variables defined in the setup script to begin an Auto Loader stream.
# MAGIC
# MAGIC Here, we're reading from a source directory of JSON files.
query = autoload_to_table(data_source = f"{DA.paths.working_dir}/tracker",
source_format = "json",
table_name = "target_table",
checkpoint_directory = f"{DA.paths.checkpoints}/target_table")
# MAGIC
# MAGIC We can think of this as a **continuously active query**. This means that as soon as new data arrives in our data source, it will be processed through our logic and loaded into our target table. We'll explore this in just a second.
while len(query.recentProgress) < min_batches:
time.sleep(5)
print(f"The stream has processed {len(query.recentProgress)} batchs")
block_until_stream_is_ready(query)
----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW device_counts AS
# MAGIC SELECT device_id, count(*) total_recordings
# MAGIC FROM target_table
# MAGIC GROUP BY device_id;
# MAGIC
# MAGIC SELECT * FROM device_counts
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Land New Data
# MAGIC
# MAGIC As mentioned previously, Auto Loader is configured to incrementally process files from a directory in cloud object storage into a Delta Lake table.
# MAGIC
# MAGIC We have configured and are currently executing a query to process JSON files from the location specified by **`source_path`** into a table named **`target_table`**. Let's review the contents of the **`source_path`** directory.
files = dbutils.fs.ls(f"{DA.paths.working_dir}/tracker")
display(files)
DA.data_factory.load()
files = dbutils.fs.ls(f"{DA.paths.working_dir}/tracker")
display(files)
y been updated.
# MAGIC
# MAGIC Run the query below to confirm that new data has been ingested.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM device_counts
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC The Auto Loader query we configured earlier automatically detects and processes records from the source directory into the target table. There is a slight delay as records are ingested, but an Auto Loader query executing with default streaming configuration should update results in near real time.
# MAGIC
# MAGIC The query below shows the table history. A new table version should be indicated for each **`STREAMING UPDATE`**. These update events coincide with new batches of data arriving at the source.
# COMMAND ----------
# MAGIC %sql
# MAGIC DESCRIBE HISTORY target_table
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Clean Up
# MAGIC Feel free to continue landing new data and exploring the table results with the cells above.
# MAGIC
# MAGIC When you're finished, run the following cell to stop all active streams and remove created resources before continuing.
DA.cleanup()
| true
| true
|
7904d0fddb05ad2b0eb2abb776fcea08505d5f30
| 2,128
|
py
|
Python
|
test-toolkit/integration/__init__.py
|
YYStreet/sagemaker-pytorch-serving-container
|
97ce79900b3fcbc644b4c58c787c84c881d611f9
|
[
"Apache-2.0"
] | null | null | null |
test-toolkit/integration/__init__.py
|
YYStreet/sagemaker-pytorch-serving-container
|
97ce79900b3fcbc644b4c58c787c84c881d611f9
|
[
"Apache-2.0"
] | null | null | null |
test-toolkit/integration/__init__.py
|
YYStreet/sagemaker-pytorch-serving-container
|
97ce79900b3fcbc644b4c58c787c84c881d611f9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
resources_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
mnist_path = os.path.join(resources_path, 'mnist')
data_dir = os.path.join(mnist_path, 'data')
training_dir = os.path.join(data_dir, 'training')
cpu_sub_dir = 'model_cpu'
gpu_sub_dir = 'model_gpu'
eia_sub_dir = 'model_eia'
model_cpu_dir = os.path.join(mnist_path, cpu_sub_dir)
mnist_cpu_script = os.path.join(model_cpu_dir, 'mnist.py')
model_cpu_1d_dir = os.path.join(model_cpu_dir, '1d')
mnist_1d_script = os.path.join(model_cpu_1d_dir, 'mnist_1d.py')
model_gpu_dir = os.path.join(mnist_path, gpu_sub_dir)
mnist_gpu_script = os.path.join(model_gpu_dir, 'mnist.py')
model_gpu_1d_dir = os.path.join(model_gpu_dir, '1d')
model_eia_dir = os.path.join(mnist_path, eia_sub_dir)
mnist_eia_script = os.path.join(model_eia_dir, 'mnist.py')
call_model_fn_once_script = os.path.join(model_cpu_dir, 'call_model_fn_once.py')
ROLE = 'dummy/unused-role'
DEFAULT_TIMEOUT = 20
PYTHON3 = 'py3'
RESOURCE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
# These regions have some p2 and p3 instances, but not enough for automated testing
NO_P2_REGIONS = ['ca-central-1', 'eu-central-1', 'eu-west-2', 'us-west-1', 'eu-west-3',
'eu-north-1', 'sa-east-1', 'ap-east-1']
NO_P3_REGIONS = ['ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1',
'eu-central-1', 'eu-west-2', 'us-west-1', 'eu-west-3', 'eu-north-1',
'sa-east-1', 'ap-east-1']
| 44.333333
| 92
| 0.724624
|
from __future__ import absolute_import
import os
resources_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
mnist_path = os.path.join(resources_path, 'mnist')
data_dir = os.path.join(mnist_path, 'data')
training_dir = os.path.join(data_dir, 'training')
cpu_sub_dir = 'model_cpu'
gpu_sub_dir = 'model_gpu'
eia_sub_dir = 'model_eia'
model_cpu_dir = os.path.join(mnist_path, cpu_sub_dir)
mnist_cpu_script = os.path.join(model_cpu_dir, 'mnist.py')
model_cpu_1d_dir = os.path.join(model_cpu_dir, '1d')
mnist_1d_script = os.path.join(model_cpu_1d_dir, 'mnist_1d.py')
model_gpu_dir = os.path.join(mnist_path, gpu_sub_dir)
mnist_gpu_script = os.path.join(model_gpu_dir, 'mnist.py')
model_gpu_1d_dir = os.path.join(model_gpu_dir, '1d')
model_eia_dir = os.path.join(mnist_path, eia_sub_dir)
mnist_eia_script = os.path.join(model_eia_dir, 'mnist.py')
call_model_fn_once_script = os.path.join(model_cpu_dir, 'call_model_fn_once.py')
ROLE = 'dummy/unused-role'
DEFAULT_TIMEOUT = 20
PYTHON3 = 'py3'
RESOURCE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
NO_P2_REGIONS = ['ca-central-1', 'eu-central-1', 'eu-west-2', 'us-west-1', 'eu-west-3',
'eu-north-1', 'sa-east-1', 'ap-east-1']
NO_P3_REGIONS = ['ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1',
'eu-central-1', 'eu-west-2', 'us-west-1', 'eu-west-3', 'eu-north-1',
'sa-east-1', 'ap-east-1']
| true
| true
|
7904d1e2e793a6986cfe6cab88aaf8f93ca90613
| 2,122
|
py
|
Python
|
adv_sample.py
|
ssleg/qiwi_module
|
ef203a904e8ccd6e784b25ccded9d56c9719f2de
|
[
"MIT"
] | null | null | null |
adv_sample.py
|
ssleg/qiwi_module
|
ef203a904e8ccd6e784b25ccded9d56c9719f2de
|
[
"MIT"
] | null | null | null |
adv_sample.py
|
ssleg/qiwi_module
|
ef203a904e8ccd6e784b25ccded9d56c9719f2de
|
[
"MIT"
] | null | null | null |
# Qiwi module advanced usage example v1.00
# 17/05/2021
# https://t.me/ssleg © 2021
import logging
import qiwi_module
# настройка логфлайла test,log, туда будут записываться все ошибки и предупреждения.
lfile = logging.FileHandler('test.log', 'a', 'utf-8')
lfile.setFormatter(logging.Formatter('%(levelname)s %(module)-13s [%(asctime)s] %(message)s'))
# noinspection PyArgumentList
logging.basicConfig(level=logging.INFO, handlers=[lfile])
# простой вариант использования смотрите в файле sample.py
# если у вас настроен свой внешний вид формы платежа, необходимо передать код темы модулю.
# это делается один раз, при его инициализации.
# сам код и настройки формы находятся на странице https://qiwi.com/p2p-admin/transfers/link
theme_code = 'Ivanov-XX-vvv-k_'
# перед любым использованием необходима однократная инициализация модуля.
qiwi_module.init(theme_code)
# создание счета на 1 рубль. При успехе получаете url с формой оплаты для клиента.
# при неуспехе возвращается False с подробной записью в лог.
# идентификаторы счетов придумываете и сохраняете вы сами, они должны быть уникальными всегда.
bill_id = 'bill_2021_00000002'
# по умолчанию счет действителен 15 минут, но вы можете поставить свое время, например сутки и 1 минуту.
valid_hours = 24
valid_minutes = 1
# есть так же поле для комментария, его видит клиент в форме оплаты. например, туда можно записать детали заказа
comment = 'Винт с левой резьбой для Сидорова.'
invoice_url = qiwi_module.create_bill(1.00, bill_id, comment, valid_hours, valid_minutes)
print(invoice_url)
# проверка статуса оплаты.
# возвращает одно из четырех возможных значений, если успешно или False и запись в лог.
# 'WAITING' - cчет выставлен, ожидает оплаты.
# 'PAID' - cчет оплачен.
# 'REJECTED' - счет отменен с вашей стороны.
# 'EXPIRED' - счет не оплачен и истек срок его действия.
# можно вызывать ежесекундно или реже.
pay_status = qiwi_module.bill_status(bill_id)
print(pay_status)
# отмена счета, если вам это необходимо.
# возврашает 'REJECTED' если успешно, иначе False и запись в лог.
bill_status = qiwi_module.cancel_bill(bill_id)
print(bill_status)
| 38.581818
| 112
| 0.780396
|
import logging
import qiwi_module
lfile = logging.FileHandler('test.log', 'a', 'utf-8')
lfile.setFormatter(logging.Formatter('%(levelname)s %(module)-13s [%(asctime)s] %(message)s'))
logging.basicConfig(level=logging.INFO, handlers=[lfile])
theme_code = 'Ivanov-XX-vvv-k_'
qiwi_module.init(theme_code)
bill_id = 'bill_2021_00000002'
valid_hours = 24
valid_minutes = 1
comment = 'Винт с левой резьбой для Сидорова.'
invoice_url = qiwi_module.create_bill(1.00, bill_id, comment, valid_hours, valid_minutes)
print(invoice_url)
pay_status = qiwi_module.bill_status(bill_id)
print(pay_status)
bill_status = qiwi_module.cancel_bill(bill_id)
print(bill_status)
| true
| true
|
7904d2173251d44a6dba2f960b035e3cd19775e1
| 700
|
py
|
Python
|
phc/easy/omics/options/gene_class.py
|
taylordeatri/phc-sdk-py
|
8f3ec6ac44e50c7194f174fd0098de390886693d
|
[
"MIT"
] | 1
|
2020-07-22T12:46:58.000Z
|
2020-07-22T12:46:58.000Z
|
phc/easy/omics/options/gene_class.py
|
taylordeatri/phc-sdk-py
|
8f3ec6ac44e50c7194f174fd0098de390886693d
|
[
"MIT"
] | 54
|
2019-10-09T16:19:04.000Z
|
2022-01-19T20:28:59.000Z
|
phc/easy/omics/options/gene_class.py
|
taylordeatri/phc-sdk-py
|
8f3ec6ac44e50c7194f174fd0098de390886693d
|
[
"MIT"
] | 2
|
2019-10-30T19:54:43.000Z
|
2020-12-03T18:57:15.000Z
|
from enum import Enum
class GeneClass(str, Enum):
PROTEIN_CODING = ("protein coding,nonsense mediated decay",)
PSEUDOGENE = "pseudogene,unprocessed pseudogene,polymorphic pseudogene,unitary pseudogene,transcribed unprocessed pseudogene,transcribed processed pseudogene, IG pseudogene"
MICRO_RNA = "micro RNA"
SHORT_NCRNA = (
"piRNA,rRNA,siRNA,snRNA,snoRNA,tRNA,scaRNA,vaultRNA,sRNA,misc RNA"
)
LONG_NCRNA = "lincRNA,macro IncRNA,prime3 overlapping ncrna,antisense,retained intron,sense intronic,sense overlapping,macro IncRNA,bidirectional IncRNA"
IMMUNOGLOBULIN = "IG C gene,IG D gene,IG J gene,IG V gene"
T_CELL_RECEPTOR = "TR C gene,TR J gene, TR V gene"
| 50
| 177
| 0.76
|
from enum import Enum
class GeneClass(str, Enum):
PROTEIN_CODING = ("protein coding,nonsense mediated decay",)
PSEUDOGENE = "pseudogene,unprocessed pseudogene,polymorphic pseudogene,unitary pseudogene,transcribed unprocessed pseudogene,transcribed processed pseudogene, IG pseudogene"
MICRO_RNA = "micro RNA"
SHORT_NCRNA = (
"piRNA,rRNA,siRNA,snRNA,snoRNA,tRNA,scaRNA,vaultRNA,sRNA,misc RNA"
)
LONG_NCRNA = "lincRNA,macro IncRNA,prime3 overlapping ncrna,antisense,retained intron,sense intronic,sense overlapping,macro IncRNA,bidirectional IncRNA"
IMMUNOGLOBULIN = "IG C gene,IG D gene,IG J gene,IG V gene"
T_CELL_RECEPTOR = "TR C gene,TR J gene, TR V gene"
| true
| true
|
7904d29a91c1be5ca4a0c62b127284cd0e90032d
| 1,405
|
py
|
Python
|
Python3/0943-Find-the-Shortest-Superstring/soln-1.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0943-Find-the-Shortest-Superstring/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0943-Find-the-Shortest-Superstring/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def shortestSuperstring(self, A: List[str]) -> str:
n = len(A)
saved = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
if i == j:
saved[i][j] = len(A[i])
continue
wi, wj = A[i], A[j]
for k in range(min(len(wi), len(wj)), 0, -1):
if wi[-k:] == wj[:k]:
saved[i][j] = k
break
m = (1 << n)
dp = [[''] * n for _ in range(m)]
for state in range(m):
for j in range(n):
if state & (1 << j) == 0:
continue
if state == (1 << j):
dp[state][j] = A[j]
else:
for k in range(n):
if k == j:
continue
if state & (1 << k):
temp = dp[state ^ (1 << k)][j]
temp += A[k][saved[j][k]:]
if dp[state][k] == "" or len(dp[state][k]) > len(temp):
dp[state][k] = temp
mx = math.inf
ans = None
for j in range(n):
if len(dp[m - 1][j]) < mx:
mx = len(dp[m - 1][j])
ans = dp[m - 1][j]
return ans
| 36.025641
| 83
| 0.308185
|
class Solution:
def shortestSuperstring(self, A: List[str]) -> str:
n = len(A)
saved = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
if i == j:
saved[i][j] = len(A[i])
continue
wi, wj = A[i], A[j]
for k in range(min(len(wi), len(wj)), 0, -1):
if wi[-k:] == wj[:k]:
saved[i][j] = k
break
m = (1 << n)
dp = [[''] * n for _ in range(m)]
for state in range(m):
for j in range(n):
if state & (1 << j) == 0:
continue
if state == (1 << j):
dp[state][j] = A[j]
else:
for k in range(n):
if k == j:
continue
if state & (1 << k):
temp = dp[state ^ (1 << k)][j]
temp += A[k][saved[j][k]:]
if dp[state][k] == "" or len(dp[state][k]) > len(temp):
dp[state][k] = temp
mx = math.inf
ans = None
for j in range(n):
if len(dp[m - 1][j]) < mx:
mx = len(dp[m - 1][j])
ans = dp[m - 1][j]
return ans
| true
| true
|
7904d3f3a7c04185b608176b6b51803ef508283f
| 4,673
|
py
|
Python
|
AppVoor/tests/split_data_test.py
|
Noczio/VoorSpelling
|
51e30ab3f3b2e346c6eb56578818020e142a3adb
|
[
"BSD-3-Clause"
] | 3
|
2020-10-09T06:15:14.000Z
|
2021-04-27T02:04:28.000Z
|
AppVoor/tests/split_data_test.py
|
Noczio/VoorSpelling
|
51e30ab3f3b2e346c6eb56578818020e142a3adb
|
[
"BSD-3-Clause"
] | 17
|
2020-09-10T20:22:01.000Z
|
2020-12-21T04:57:03.000Z
|
AppVoor/tests/split_data_test.py
|
Noczio/VoorSpelling
|
51e30ab3f3b2e346c6eb56578818020e142a3adb
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import pandas as pd
import numpy as np
from resources.backend_scripts.is_data import DataEnsurer
from resources.backend_scripts.load_data import LoaderCreator
from resources.backend_scripts.split_data import SplitterReturner
class MyTestCase(unittest.TestCase):
_loader_creator = LoaderCreator()
def test_single_split_columns_match(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
expected_y_len, expected_x_len = df.shape # true prediction and data len with shape method
# shape returns original column value. x doesn't have prediction column, so it must be original value - 1
expected_x_len -= 1
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
x, y = splitter.split_x_y_from_df(df)
# do the values match in both x and y dataframes
self.assertEqual(len(x.columns), expected_x_len)
self.assertEqual(len(y), expected_y_len)
def test_single_split_returns_a_tuple(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y
data = splitter.split_x_y_from_df(df)
result = DataEnsurer.validate_py_data(data, tuple)
self.assertTrue(result)
def test_single_split_x_and_y_is_a_dataframe_and_numpy_array(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y
data = splitter.split_x_y_from_df(df)
results = [isinstance(data[0], pd.DataFrame), isinstance(data[-1], np.ndarray)]
# are all outputs True?
for r in results:
self.assertTrue(r)
def test_train_test_split_size_zero_is_wrong(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
with self.assertRaises(ValueError):
splitter = SplitterReturner()
# split dataframe into x and y, then use train_and_test_split
x, y = splitter.split_x_y_from_df(df)
_ = splitter.train_and_test_split(x, y, 0.0) # 80 percent of data should be training and the other 20 is
def test_train_test_split_size_less_than_zero_is_wrong(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# this should raise a ValueError because size = -0.5 is not a valid number
with self.assertRaises(ValueError):
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y, then use train_and_test_split
x, y = splitter.split_x_y_from_df(df)
_ = splitter.train_and_test_split(x, y, -0.5) # -0.5 is not a valid value
def test_split_into_x_and_y_is_not_a_valid_dataframe(self):
# dummy dictionary
temp_dict = {'x': [i for i in range(200)]}
# transform dictionary to dataframe
df = pd.DataFrame.from_dict(temp_dict)
# this should raise a TypeError because dataframe doesnt meet column requirements
with self.assertRaises(TypeError):
splitter = SplitterReturner()
_, _ = splitter.split_x_y_from_df(df)
if __name__ == '__main__':
unittest.main()
| 45.368932
| 117
| 0.673015
|
import unittest
import pandas as pd
import numpy as np
from resources.backend_scripts.is_data import DataEnsurer
from resources.backend_scripts.load_data import LoaderCreator
from resources.backend_scripts.split_data import SplitterReturner
class MyTestCase(unittest.TestCase):
_loader_creator = LoaderCreator()
def test_single_split_columns_match(self):
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
expected_y_len, expected_x_len = df.shape
expected_x_len -= 1
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
x, y = splitter.split_x_y_from_df(df)
# do the values match in both x and y dataframes
self.assertEqual(len(x.columns), expected_x_len)
self.assertEqual(len(y), expected_y_len)
def test_single_split_returns_a_tuple(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y
data = splitter.split_x_y_from_df(df)
result = DataEnsurer.validate_py_data(data, tuple)
self.assertTrue(result)
def test_single_split_x_and_y_is_a_dataframe_and_numpy_array(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y
data = splitter.split_x_y_from_df(df)
results = [isinstance(data[0], pd.DataFrame), isinstance(data[-1], np.ndarray)]
# are all outputs True?
for r in results:
self.assertTrue(r)
def test_train_test_split_size_zero_is_wrong(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
with self.assertRaises(ValueError):
splitter = SplitterReturner()
# split dataframe into x and y, then use train_and_test_split
x, y = splitter.split_x_y_from_df(df)
_ = splitter.train_and_test_split(x, y, 0.0) # 80 percent of data should be training and the other 20 is
def test_train_test_split_size_less_than_zero_is_wrong(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# this should raise a ValueError because size = -0.5 is not a valid number
with self.assertRaises(ValueError):
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y, then use train_and_test_split
x, y = splitter.split_x_y_from_df(df)
_ = splitter.train_and_test_split(x, y, -0.5) # -0.5 is not a valid value
def test_split_into_x_and_y_is_not_a_valid_dataframe(self):
# dummy dictionary
temp_dict = {'x': [i for i in range(200)]}
# transform dictionary to dataframe
df = pd.DataFrame.from_dict(temp_dict)
# this should raise a TypeError because dataframe doesnt meet column requirements
with self.assertRaises(TypeError):
splitter = SplitterReturner()
_, _ = splitter.split_x_y_from_df(df)
if __name__ == '__main__':
unittest.main()
| true
| true
|
7904d504662d0624447ad55fb6784d57f192f352
| 4,116
|
py
|
Python
|
ABAGAIL_execution/flipflop.py
|
tirthajyoti/Randomized_Optimization
|
396f5092ed21574b8f773ad9493394922b6646b8
|
[
"MIT"
] | 7
|
2018-10-08T09:53:20.000Z
|
2021-10-22T03:31:28.000Z
|
Jython_Codes/flipflop.py
|
tirthajyoti/Randomized_optimization
|
396f5092ed21574b8f773ad9493394922b6646b8
|
[
"MIT"
] | null | null | null |
Jython_Codes/flipflop.py
|
tirthajyoti/Randomized_optimization
|
396f5092ed21574b8f773ad9493394922b6646b8
|
[
"MIT"
] | 7
|
2018-12-03T04:11:15.000Z
|
2021-08-10T11:44:10.000Z
|
"""
Backprop NN training on Madelon data (Feature selection complete)
"""
import os
import csv
import time
import sys
sys.path.append("C:/ABAGAIL/ABAGAIL.jar")
from func.nn.backprop import BackPropagationNetworkFactory
from shared import SumOfSquaresError, DataSet, Instance
from opt.example import NeuralNetworkOptimizationProblem
from func.nn.backprop import RPROPUpdateRule, BatchBackPropagationTrainer
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
from func.nn.activation import ActivationFunction
# Network parameters found "optimal" in Assignment 1
INPUT_LAYER = 31
HIDDEN_LAYER1 = 62
HIDDEN_LAYER2 = 62
HIDDEN_LAYER3 = 62
OUTPUT_LAYER = 1
TRAINING_ITERATIONS = 5001
OUTFILE = 'BACKPROP_LOG.txt'
def initialize_instances(infile):
"""Read the m_trg.csv CSV data into a list of instances."""
instances = []
# Read in the CSV file
#with open(infile, "r") as dat:
dat = open(infile,"r")
reader = csv.reader(dat)
dat.close()
for row in reader:
instance = Instance([float(value) for value in row[:-1]])
if float(row[-1]) < 0:
instance.setLabel(Instance(0))
else:
instance.setLabel(Instance(1))
#instance.setLabel(Instance(0 if float(row[-1]) < 0 else 1))
instances.append(instance)
return instances
def errorOnDataSet(network,ds,measure):
N = len(ds)
error = 0.
correct = 0
incorrect = 0
for instance in ds:
network.setInputValues(instance.getData())
network.run()
actual = instance.getLabel().getContinuous()
predicted = network.getOutputValues().get(0)
predicted = max(min(predicted,1),0)
if abs(predicted - actual) < 0.5:
correct += 1
else:
incorrect += 1
output = instance.getLabel()
output_values = network.getOutputValues()
example = Instance(output_values, Instance(output_values.get(0)))
error += measure.value(output, example)
MSE = error/float(N)
acc = correct/float(correct+incorrect)
return MSE,acc
def train(oa, network, oaName, training_ints,validation_ints,testing_ints, measure):
"""Train a given network on a set of instances.
"""
print ("\nError results for {}\n---------------------------".format(oaName))
times = [0]
for iteration in xrange(TRAINING_ITERATIONS):
start = time.clock()
oa.train()
elapsed = time.clock()-start
times.append(times[-1]+elapsed)
if iteration % 10 == 0:
MSE_trg, acc_trg = errorOnDataSet(network,training_ints,measure)
MSE_val, acc_val = errorOnDataSet(network,validation_ints,measure)
MSE_tst, acc_tst = errorOnDataSet(network,testing_ints,measure)
txt = '{},{},{},{},{},{},{},{}\n'.format(iteration,MSE_trg,MSE_val,MSE_tst,acc_trg,acc_val,acc_tst,times[-1]);
print (txt)
#with open(OUTFILE,'a+') as f:
f=open(OUTFILE,'a+')
f.write(txt)
f.close()
def main():
"""Run this experiment"""
training_ints = initialize_instances('m_trg.csv')
testing_ints = initialize_instances('m_test.csv')
validation_ints = initialize_instances('m_val.csv')
factory = BackPropagationNetworkFactory()
measure = SumOfSquaresError()
data_set = DataSet(training_ints)
relu = RELU()
rule = RPROPUpdateRule()
oa_names = ["Backprop"]
classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3, OUTPUT_LAYER],relu)
train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule), classification_network, 'Backprop', training_ints,validation_ints,testing_ints, measure)
if __name__ == "__main__":
#with open(OUTFILE,'w') as f:
f=open(OUTFILE,'a+')
f.write('{},{},{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_val','MSE_tst','acc_trg','acc_val','acc_tst','elapsed'))
f.close()
main()
| 35.482759
| 173
| 0.670068
|
import os
import csv
import time
import sys
sys.path.append("C:/ABAGAIL/ABAGAIL.jar")
from func.nn.backprop import BackPropagationNetworkFactory
from shared import SumOfSquaresError, DataSet, Instance
from opt.example import NeuralNetworkOptimizationProblem
from func.nn.backprop import RPROPUpdateRule, BatchBackPropagationTrainer
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
from func.nn.activation import ActivationFunction
INPUT_LAYER = 31
HIDDEN_LAYER1 = 62
HIDDEN_LAYER2 = 62
HIDDEN_LAYER3 = 62
OUTPUT_LAYER = 1
TRAINING_ITERATIONS = 5001
OUTFILE = 'BACKPROP_LOG.txt'
def initialize_instances(infile):
instances = []
dat = open(infile,"r")
reader = csv.reader(dat)
dat.close()
for row in reader:
instance = Instance([float(value) for value in row[:-1]])
if float(row[-1]) < 0:
instance.setLabel(Instance(0))
else:
instance.setLabel(Instance(1))
instances.append(instance)
return instances
def errorOnDataSet(network,ds,measure):
N = len(ds)
error = 0.
correct = 0
incorrect = 0
for instance in ds:
network.setInputValues(instance.getData())
network.run()
actual = instance.getLabel().getContinuous()
predicted = network.getOutputValues().get(0)
predicted = max(min(predicted,1),0)
if abs(predicted - actual) < 0.5:
correct += 1
else:
incorrect += 1
output = instance.getLabel()
output_values = network.getOutputValues()
example = Instance(output_values, Instance(output_values.get(0)))
error += measure.value(output, example)
MSE = error/float(N)
acc = correct/float(correct+incorrect)
return MSE,acc
def train(oa, network, oaName, training_ints,validation_ints,testing_ints, measure):
print ("\nError results for {}\n---------------------------".format(oaName))
times = [0]
for iteration in xrange(TRAINING_ITERATIONS):
start = time.clock()
oa.train()
elapsed = time.clock()-start
times.append(times[-1]+elapsed)
if iteration % 10 == 0:
MSE_trg, acc_trg = errorOnDataSet(network,training_ints,measure)
MSE_val, acc_val = errorOnDataSet(network,validation_ints,measure)
MSE_tst, acc_tst = errorOnDataSet(network,testing_ints,measure)
txt = '{},{},{},{},{},{},{},{}\n'.format(iteration,MSE_trg,MSE_val,MSE_tst,acc_trg,acc_val,acc_tst,times[-1]);
print (txt)
f=open(OUTFILE,'a+')
f.write(txt)
f.close()
def main():
training_ints = initialize_instances('m_trg.csv')
testing_ints = initialize_instances('m_test.csv')
validation_ints = initialize_instances('m_val.csv')
factory = BackPropagationNetworkFactory()
measure = SumOfSquaresError()
data_set = DataSet(training_ints)
relu = RELU()
rule = RPROPUpdateRule()
oa_names = ["Backprop"]
classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3, OUTPUT_LAYER],relu)
train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule), classification_network, 'Backprop', training_ints,validation_ints,testing_ints, measure)
if __name__ == "__main__":
f=open(OUTFILE,'a+')
f.write('{},{},{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_val','MSE_tst','acc_trg','acc_val','acc_tst','elapsed'))
f.close()
main()
| true
| true
|
7904d65eb702ec4d934712289e2375641da3e5d8
| 5,752
|
py
|
Python
|
putty-src/contrib/kh2reg.py
|
dzaki236/putty
|
011a08bf9fd9c3913a7b070acfbfffc0fbc046df
|
[
"MIT"
] | 48
|
2016-06-10T14:12:28.000Z
|
2021-12-27T03:05:50.000Z
|
putty-src/contrib/kh2reg.py
|
dzaki236/putty
|
011a08bf9fd9c3913a7b070acfbfffc0fbc046df
|
[
"MIT"
] | 17
|
2016-06-01T06:49:26.000Z
|
2017-05-28T14:07:27.000Z
|
putty-src/contrib/kh2reg.py
|
dzaki236/putty
|
011a08bf9fd9c3913a7b070acfbfffc0fbc046df
|
[
"MIT"
] | 42
|
2016-10-13T16:01:25.000Z
|
2021-12-01T00:44:20.000Z
|
#! /usr/bin/env python
# Convert OpenSSH known_hosts and known_hosts2 files to "new format" PuTTY
# host keys.
# usage:
# kh2reg.py [ --win ] known_hosts1 2 3 4 ... > hosts.reg
# Creates a Windows .REG file (double-click to install).
# kh2reg.py --unix known_hosts1 2 3 4 ... > sshhostkeys
# Creates data suitable for storing in ~/.putty/sshhostkeys (Unix).
# Line endings are someone else's problem as is traditional.
# Originally developed for Python 1.5.2, but probably won't run on that
# any more.
import fileinput
import base64
import struct
import string
import re
import sys
import getopt
def winmungestr(s):
"Duplicate of PuTTY's mungestr() in winstore.c:1.10 for Registry keys"
candot = 0
r = ""
for c in s:
if c in ' \*?%~' or ord(c)<ord(' ') or (c == '.' and not candot):
r = r + ("%%%02X" % ord(c))
else:
r = r + c
candot = 1
return r
def strtolong(s):
"Convert arbitrary-length big-endian binary data to a Python long"
bytes = struct.unpack(">%luB" % len(s), s)
return reduce ((lambda a, b: (long(a) << 8) + long(b)), bytes)
def longtohex(n):
"""Convert long int to lower-case hex.
Ick, Python (at least in 1.5.2) doesn't appear to have a way to
turn a long int into an unadorned hex string -- % gets upset if the
number is too big, and raw hex() uses uppercase (sometimes), and
adds unwanted "0x...L" around it."""
plain=string.lower(re.match(r"0x([0-9A-Fa-f]*)l?$", hex(n), re.I).group(1))
return "0x" + plain
output_type = 'windows'
try:
optlist, args = getopt.getopt(sys.argv[1:], '', [ 'win', 'unix' ])
if filter(lambda x: x[0] == '--unix', optlist):
output_type = 'unix'
except getopt.error, e:
sys.stderr.write(str(e) + "\n")
sys.exit(1)
if output_type == 'windows':
# Output REG file header.
sys.stdout.write("""REGEDIT4
[HKEY_CURRENT_USER\Software\SimonTatham\PuTTY\SshHostKeys]
""")
class BlankInputLine(Exception):
pass
class UnknownKeyType(Exception):
def __init__(self, keytype):
self.keytype = keytype
# Now process all known_hosts input.
for line in fileinput.input(args):
try:
# Remove leading/trailing whitespace (should zap CR and LF)
line = string.strip (line)
# Skip blanks and comments
if line == '' or line[0] == '#':
raise BlankInputLine
# Split line on spaces.
fields = string.split (line, ' ')
# Common fields
hostpat = fields[0]
magicnumbers = [] # placeholder
keytype = "" # placeholder
# Grotty heuristic to distinguish known_hosts from known_hosts2:
# is second field entirely decimal digits?
if re.match (r"\d*$", fields[1]):
# Treat as SSH-1-type host key.
# Format: hostpat bits10 exp10 mod10 comment...
# (PuTTY doesn't store the number of bits.)
magicnumbers = map (long, fields[2:4])
keytype = "rsa"
else:
# Treat as SSH-2-type host key.
# Format: hostpat keytype keyblob64 comment...
sshkeytype, blob = fields[1], base64.decodestring (fields[2])
# 'blob' consists of a number of
# uint32 N (big-endian)
# uint8[N] field_data
subfields = []
while blob:
sizefmt = ">L"
(size,) = struct.unpack (sizefmt, blob[0:4])
size = int(size) # req'd for slicage
(data,) = struct.unpack (">%lus" % size, blob[4:size+4])
subfields.append(data)
blob = blob [struct.calcsize(sizefmt) + size : ]
# The first field is keytype again, and the rest we can treat as
# an opaque list of bignums (same numbers and order as stored
# by PuTTY). (currently embedded keytype is ignored entirely)
magicnumbers = map (strtolong, subfields[1:])
# Translate key type into something PuTTY can use.
if sshkeytype == "ssh-rsa": keytype = "rsa2"
elif sshkeytype == "ssh-dss": keytype = "dss"
else:
raise UnknownKeyType(sshkeytype)
# Now print out one line per host pattern, discarding wildcards.
for host in string.split (hostpat, ','):
if re.search (r"[*?!]", host):
sys.stderr.write("Skipping wildcard host pattern '%s'\n"
% host)
continue
elif re.match (r"\|", host):
sys.stderr.write("Skipping hashed hostname '%s'\n" % host)
continue
else:
m = re.match (r"\[([^]]*)\]:(\d*)$", host)
if m:
(host, port) = m.group(1,2)
port = int(port)
else:
port = 22
# Slightly bizarre output key format: 'type@port:hostname'
# XXX: does PuTTY do anything useful with literal IP[v4]s?
key = keytype + ("@%d:%s" % (port, host))
value = string.join (map (longtohex, magicnumbers), ',')
if output_type == 'unix':
# Unix format.
sys.stdout.write('%s %s\n' % (key, value))
else:
# Windows format.
# XXX: worry about double quotes?
sys.stdout.write("\"%s\"=\"%s\"\n"
% (winmungestr(key), value))
except UnknownKeyType, k:
sys.stderr.write("Unknown SSH key type '%s', skipping\n" % k.keytype)
except BlankInputLine:
pass
| 34.860606
| 79
| 0.546071
|
# Originally developed for Python 1.5.2, but probably won't run on that
import fileinput
import base64
import struct
import string
import re
import sys
import getopt
def winmungestr(s):
"Duplicate of PuTTY's mungestr() in winstore.c:1.10 for Registry keys"
candot = 0
r = ""
for c in s:
if c in ' \*?%~' or ord(c)<ord(' ') or (c == '.' and not candot):
r = r + ("%%%02X" % ord(c))
else:
r = r + c
candot = 1
return r
def strtolong(s):
"Convert arbitrary-length big-endian binary data to a Python long"
bytes = struct.unpack(">%luB" % len(s), s)
return reduce ((lambda a, b: (long(a) << 8) + long(b)), bytes)
def longtohex(n):
"""Convert long int to lower-case hex.
Ick, Python (at least in 1.5.2) doesn't appear to have a way to
turn a long int into an unadorned hex string -- % gets upset if the
number is too big, and raw hex() uses uppercase (sometimes), and
adds unwanted "0x...L" around it."""
plain=string.lower(re.match(r"0x([0-9A-Fa-f]*)l?$", hex(n), re.I).group(1))
return "0x" + plain
output_type = 'windows'
try:
optlist, args = getopt.getopt(sys.argv[1:], '', [ 'win', 'unix' ])
if filter(lambda x: x[0] == '--unix', optlist):
output_type = 'unix'
except getopt.error, e:
sys.stderr.write(str(e) + "\n")
sys.exit(1)
if output_type == 'windows':
sys.stdout.write("""REGEDIT4
[HKEY_CURRENT_USER\Software\SimonTatham\PuTTY\SshHostKeys]
""")
class BlankInputLine(Exception):
pass
class UnknownKeyType(Exception):
def __init__(self, keytype):
self.keytype = keytype
for line in fileinput.input(args):
try:
line = string.strip (line)
if line == '' or line[0] == '#':
raise BlankInputLine
fields = string.split (line, ' ')
hostpat = fields[0]
magicnumbers = []
keytype = ""
if re.match (r"\d*$", fields[1]):
magicnumbers = map (long, fields[2:4])
keytype = "rsa"
else:
# Treat as SSH-2-type host key.
# Format: hostpat keytype keyblob64 comment...
sshkeytype, blob = fields[1], base64.decodestring (fields[2])
# 'blob' consists of a number of
# uint32 N (big-endian)
# uint8[N] field_data
subfields = []
while blob:
sizefmt = ">L"
(size,) = struct.unpack (sizefmt, blob[0:4])
size = int(size) # req'd for slicage
(data,) = struct.unpack (">%lus" % size, blob[4:size+4])
subfields.append(data)
blob = blob [struct.calcsize(sizefmt) + size : ]
magicnumbers = map (strtolong, subfields[1:])
if sshkeytype == "ssh-rsa": keytype = "rsa2"
elif sshkeytype == "ssh-dss": keytype = "dss"
else:
raise UnknownKeyType(sshkeytype)
for host in string.split (hostpat, ','):
if re.search (r"[*?!]", host):
sys.stderr.write("Skipping wildcard host pattern '%s'\n"
% host)
continue
elif re.match (r"\|", host):
sys.stderr.write("Skipping hashed hostname '%s'\n" % host)
continue
else:
m = re.match (r"\[([^]]*)\]:(\d*)$", host)
if m:
(host, port) = m.group(1,2)
port = int(port)
else:
port = 22
key = keytype + ("@%d:%s" % (port, host))
value = string.join (map (longtohex, magicnumbers), ',')
if output_type == 'unix':
sys.stdout.write('%s %s\n' % (key, value))
else:
sys.stdout.write("\"%s\"=\"%s\"\n"
% (winmungestr(key), value))
except UnknownKeyType, k:
sys.stderr.write("Unknown SSH key type '%s', skipping\n" % k.keytype)
except BlankInputLine:
pass
| false
| true
|
7904d7ae495880d6c77746635ec9296ea2f7c6fe
| 176
|
py
|
Python
|
server/asot/manage/__main__.py
|
lun-4/asot
|
24d556af9695f7ac2f059bc7776fc59945a7ec0f
|
[
"BSD-3-Clause"
] | 1
|
2021-08-01T21:20:52.000Z
|
2021-08-01T21:20:52.000Z
|
server/asot/manage/__main__.py
|
lun-4/asot
|
24d556af9695f7ac2f059bc7776fc59945a7ec0f
|
[
"BSD-3-Clause"
] | null | null | null |
server/asot/manage/__main__.py
|
lun-4/asot
|
24d556af9695f7ac2f059bc7776fc59945a7ec0f
|
[
"BSD-3-Clause"
] | null | null | null |
# asot: Localhost tunneling
# Copyright 2021, Luna and asot contributors
# SPDX-License-Identifier: BSD-3-Clause
from .main import main
if __name__ == "__main__":
main()
| 19.555556
| 44
| 0.732955
|
from .main import main
if __name__ == "__main__":
main()
| true
| true
|
7904d8f12d099241d7bd46edba01c296b097eab0
| 4,512
|
py
|
Python
|
examples/process_detail.py
|
hybridlogic/psutil
|
89ba47311d35c9f40ec51a73dc6d10a433360736
|
[
"BSD-3-Clause"
] | 1
|
2019-01-05T08:14:33.000Z
|
2019-01-05T08:14:33.000Z
|
examples/process_detail.py
|
hybridlogic/psutil
|
89ba47311d35c9f40ec51a73dc6d10a433360736
|
[
"BSD-3-Clause"
] | null | null | null |
examples/process_detail.py
|
hybridlogic/psutil
|
89ba47311d35c9f40ec51a73dc6d10a433360736
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# $Id$
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print detailed information about a process.
Author: Giampaolo Rodola' <g.rodola@gmail.com>
"""
import os
import datetime
import socket
import sys
import psutil
def convert_bytes(n):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def print_(a, b):
if sys.stdout.isatty() and os.name == 'posix':
fmt = '\x1b[1;32m%-17s\x1b[0m %s' %(a, b)
else:
fmt = '%-15s %s' %(a, b)
# python 2/3 compatibility layer
sys.stdout.write(fmt + '\n')
sys.stdout.flush()
def run(pid):
ACCESS_DENIED = ''
try:
p = psutil.Process(pid)
pinfo = p.as_dict(ad_value=ACCESS_DENIED)
except psutil.NoSuchProcess:
sys.exit(str(sys.exc_info()[1]))
try:
if p.parent:
parent = '(%s)' % p.parent.name
else:
parent = ''
except psutil.Error:
parent = ''
started = datetime.datetime.fromtimestamp(pinfo['create_time']
).strftime('%Y-%M-%d %H:%M')
io = pinfo.get('io_counters', None)
mem = '%s%% (resident=%s, virtual=%s) ' % (
round(pinfo['memory_percent'], 1),
convert_bytes(pinfo['memory_info'].rss),
convert_bytes(pinfo['memory_info'].vms))
children = p.get_children()
print_('pid', pinfo['pid'])
print_('name', pinfo['name'])
print_('exe', pinfo['exe'])
print_('parent', '%s %s' % (pinfo['ppid'], parent))
print_('cmdline', ' '.join(pinfo['cmdline']))
print_('started', started)
print_('user', pinfo['username'])
if os.name == 'posix':
print_('uids', 'real=%s, effective=%s, saved=%s' % pinfo['uids'])
print_('gids', 'real=%s, effective=%s, saved=%s' % pinfo['gids'])
print_('terminal', pinfo['terminal'] or '')
if hasattr(p, 'getcwd'):
print_('cwd', pinfo['cwd'])
print_('memory', mem)
print_('cpu', '%s%% (user=%s, system=%s)' % (pinfo['cpu_percent'],
pinfo['cpu_times'].user,
pinfo['cpu_times'].system))
print_('status', pinfo['status'])
print_('niceness', pinfo['nice'])
print_('num threads', pinfo['num_threads'])
if io != ACCESS_DENIED:
print_('I/O', 'bytes-read=%s, bytes-written=%s' % \
(convert_bytes(io.read_bytes),
convert_bytes(io.write_bytes)))
if children:
print_('children', '')
for child in children:
print_('', 'pid=%s name=%s' % (child.pid, child.name))
if pinfo['open_files'] != ACCESS_DENIED:
print_('open files', '')
for file in pinfo['open_files']:
print_('', 'fd=%s %s ' % (file.fd, file.path))
if pinfo['threads']:
print_('running threads', '')
for thread in pinfo['threads']:
print_('', 'id=%s, user-time=%s, sys-time=%s' \
% (thread.id, thread.user_time, thread.system_time))
if pinfo['connections'] != ACCESS_DENIED:
print_('open connections', '')
for conn in pinfo['connections']:
if conn.type == socket.SOCK_STREAM:
type = 'TCP'
elif conn.type == socket.SOCK_DGRAM:
type = 'UDP'
else:
type = 'UNIX'
lip, lport = conn.local_address
if not conn.remote_address:
rip, rport = '*', '*'
else:
rip, rport = conn.remote_address
print_('', '%s:%s -> %s:%s type=%s status=%s' \
% (lip, lport, rip, rport, type, conn.status))
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
sys.exit(run(os.getpid()))
elif len(argv) == 2:
sys.exit(run(int(argv[1])))
else:
sys.exit('usage: %s [pid]' % __file__)
if __name__ == '__main__':
sys.exit(main())
| 33.422222
| 79
| 0.505098
|
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import datetime
import socket
import sys
import psutil
def convert_bytes(n):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def print_(a, b):
if sys.stdout.isatty() and os.name == 'posix':
fmt = '\x1b[1;32m%-17s\x1b[0m %s' %(a, b)
else:
fmt = '%-15s %s' %(a, b)
# python 2/3 compatibility layer
sys.stdout.write(fmt + '\n')
sys.stdout.flush()
def run(pid):
ACCESS_DENIED = ''
try:
p = psutil.Process(pid)
pinfo = p.as_dict(ad_value=ACCESS_DENIED)
except psutil.NoSuchProcess:
sys.exit(str(sys.exc_info()[1]))
try:
if p.parent:
parent = '(%s)' % p.parent.name
else:
parent = ''
except psutil.Error:
parent = ''
started = datetime.datetime.fromtimestamp(pinfo['create_time']
).strftime('%Y-%M-%d %H:%M')
io = pinfo.get('io_counters', None)
mem = '%s%% (resident=%s, virtual=%s) ' % (
round(pinfo['memory_percent'], 1),
convert_bytes(pinfo['memory_info'].rss),
convert_bytes(pinfo['memory_info'].vms))
children = p.get_children()
print_('pid', pinfo['pid'])
print_('name', pinfo['name'])
print_('exe', pinfo['exe'])
print_('parent', '%s %s' % (pinfo['ppid'], parent))
print_('cmdline', ' '.join(pinfo['cmdline']))
print_('started', started)
print_('user', pinfo['username'])
if os.name == 'posix':
print_('uids', 'real=%s, effective=%s, saved=%s' % pinfo['uids'])
print_('gids', 'real=%s, effective=%s, saved=%s' % pinfo['gids'])
print_('terminal', pinfo['terminal'] or '')
if hasattr(p, 'getcwd'):
print_('cwd', pinfo['cwd'])
print_('memory', mem)
print_('cpu', '%s%% (user=%s, system=%s)' % (pinfo['cpu_percent'],
pinfo['cpu_times'].user,
pinfo['cpu_times'].system))
print_('status', pinfo['status'])
print_('niceness', pinfo['nice'])
print_('num threads', pinfo['num_threads'])
if io != ACCESS_DENIED:
print_('I/O', 'bytes-read=%s, bytes-written=%s' % \
(convert_bytes(io.read_bytes),
convert_bytes(io.write_bytes)))
if children:
print_('children', '')
for child in children:
print_('', 'pid=%s name=%s' % (child.pid, child.name))
if pinfo['open_files'] != ACCESS_DENIED:
print_('open files', '')
for file in pinfo['open_files']:
print_('', 'fd=%s %s ' % (file.fd, file.path))
if pinfo['threads']:
print_('running threads', '')
for thread in pinfo['threads']:
print_('', 'id=%s, user-time=%s, sys-time=%s' \
% (thread.id, thread.user_time, thread.system_time))
if pinfo['connections'] != ACCESS_DENIED:
print_('open connections', '')
for conn in pinfo['connections']:
if conn.type == socket.SOCK_STREAM:
type = 'TCP'
elif conn.type == socket.SOCK_DGRAM:
type = 'UDP'
else:
type = 'UNIX'
lip, lport = conn.local_address
if not conn.remote_address:
rip, rport = '*', '*'
else:
rip, rport = conn.remote_address
print_('', '%s:%s -> %s:%s type=%s status=%s' \
% (lip, lport, rip, rport, type, conn.status))
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
sys.exit(run(os.getpid()))
elif len(argv) == 2:
sys.exit(run(int(argv[1])))
else:
sys.exit('usage: %s [pid]' % __file__)
if __name__ == '__main__':
sys.exit(main())
| true
| true
|
7904d95f3c654524ab1558185a6ecb5a5bac0bff
| 35
|
py
|
Python
|
controlinverilog/synthesis/__init__.py
|
simoore/control-in-verilog
|
9b00ff48c15c8c56458d1611eaa3fec6f4c94bdb
|
[
"MIT"
] | null | null | null |
controlinverilog/synthesis/__init__.py
|
simoore/control-in-verilog
|
9b00ff48c15c8c56458d1611eaa3fec6f4c94bdb
|
[
"MIT"
] | null | null | null |
controlinverilog/synthesis/__init__.py
|
simoore/control-in-verilog
|
9b00ff48c15c8c56458d1611eaa3fec6f4c94bdb
|
[
"MIT"
] | null | null | null |
from .optimizers import GAOptimizer
| 35
| 35
| 0.885714
|
from .optimizers import GAOptimizer
| true
| true
|
7904d97ef27ac1a1e1a75b8b6a9460cea433affe
| 5,008
|
py
|
Python
|
tests/wallet/test_singleton.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
tests/wallet/test_singleton.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
tests/wallet/test_singleton.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
from clvm_tools import binutils
from chinilla.types.blockchain_format.program import Program, INFINITE_COST
from chinilla.types.announcement import Announcement
from chinilla.types.blockchain_format.sized_bytes import bytes32
from chinilla.util.condition_tools import parse_sexp_to_conditions
from chinilla.wallet.puzzles.load_clvm import load_clvm
SINGLETON_MOD = load_clvm("singleton_top_layer.clvm")
LAUNCHER_PUZZLE = load_clvm("singleton_launcher.clvm")
P2_SINGLETON_MOD = load_clvm("p2_singleton.clvm")
POOL_MEMBER_MOD = load_clvm("pool_member_innerpuz.clvm")
POOL_WAITINGROOM_MOD = load_clvm("pool_waitingroom_innerpuz.clvm")
LAUNCHER_PUZZLE_HASH = LAUNCHER_PUZZLE.get_tree_hash()
SINGLETON_MOD_HASH = SINGLETON_MOD.get_tree_hash()
LAUNCHER_ID = Program.to(b"launcher-id").get_tree_hash()
POOL_REWARD_PREFIX_VANILLANET = bytes32.fromhex("ccd5bb71183532bff220ba46c268991a00000000000000000000000000000000")
def singleton_puzzle(launcher_id: Program, launcher_puzzle_hash: bytes32, inner_puzzle: Program) -> Program:
return SINGLETON_MOD.curry((SINGLETON_MOD_HASH, (launcher_id, launcher_puzzle_hash)), inner_puzzle)
def p2_singleton_puzzle(launcher_id: Program, launcher_puzzle_hash: bytes32) -> Program:
return P2_SINGLETON_MOD.curry(SINGLETON_MOD_HASH, launcher_id, launcher_puzzle_hash)
def singleton_puzzle_hash(launcher_id: Program, launcher_puzzle_hash: bytes32, inner_puzzle: Program) -> bytes32:
return singleton_puzzle(launcher_id, launcher_puzzle_hash, inner_puzzle).get_tree_hash()
def p2_singleton_puzzle_hash(launcher_id: Program, launcher_puzzle_hash: bytes32) -> bytes32:
return p2_singleton_puzzle(launcher_id, launcher_puzzle_hash).get_tree_hash()
def test_only_odd_coins():
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
# (SINGLETON_STRUCT INNER_PUZZLE lineage_proof my_amount inner_solution)
# SINGLETON_STRUCT = (MOD_HASH . (LAUNCHER_ID . LAUNCHER_PUZZLE_HASH))
solution = Program.to(
[
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
Program.to(binutils.assemble("(q (51 0xcafef00d 200))")),
[0xDEADBEEF, 0xCAFEF00D, 200],
200,
[],
]
)
try:
cost, result = SINGLETON_MOD.run_with_cost(INFINITE_COST, solution)
except Exception as e:
assert e.args == ("clvm raise", "80")
else:
assert False
solution = Program.to(
[
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
Program.to(binutils.assemble("(q (51 0xcafef00d 201))")),
[0xDEADBEEF, 0xCAFED00D, 210],
205,
0,
]
)
try:
cost, result = SINGLETON_MOD.run_with_cost(INFINITE_COST, solution)
except Exception:
assert False
def test_only_one_odd_coin_created():
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
solution = Program.to(
[
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
Program.to(binutils.assemble("(q (51 0xcafef00d 203) (51 0xfadeddab 205))")),
[0xDEADBEEF, 0xCAFEF00D, 411],
411,
[],
]
)
try:
cost, result = SINGLETON_MOD.run_with_cost(INFINITE_COST, solution)
except Exception as e:
assert e.args == ("clvm raise", "80")
else:
assert False
solution = Program.to(
[
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
Program.to(binutils.assemble("(q (51 0xcafef00d 203) (51 0xfadeddab 204) (51 0xdeadbeef 202))")),
[0xDEADBEEF, 0xCAFEF00D, 411],
411,
[],
]
)
try:
cost, result = SINGLETON_MOD.run_with_cost(INFINITE_COST, solution)
except Exception:
assert False
def test_p2_singleton():
# create a singleton. This should call driver code.
launcher_id = LAUNCHER_ID
innerpuz = Program.to(1)
singleton_full_puzzle = singleton_puzzle(launcher_id, LAUNCHER_PUZZLE_HASH, innerpuz)
# create a fake coin id for the `p2_singleton`
p2_singleton_coin_id = Program.to(["test_hash"]).get_tree_hash()
expected_announcement = Announcement(singleton_full_puzzle.get_tree_hash(), p2_singleton_coin_id).name()
# create a `p2_singleton` puzzle. This should call driver code.
p2_singleton_full = p2_singleton_puzzle(launcher_id, LAUNCHER_PUZZLE_HASH)
solution = Program.to([innerpuz.get_tree_hash(), p2_singleton_coin_id])
cost, result = p2_singleton_full.run_with_cost(INFINITE_COST, solution)
err, conditions = parse_sexp_to_conditions(result)
assert err is None
p2_singleton_full = p2_singleton_puzzle(launcher_id, LAUNCHER_PUZZLE_HASH)
solution = Program.to([innerpuz.get_tree_hash(), p2_singleton_coin_id])
cost, result = p2_singleton_full.run_with_cost(INFINITE_COST, solution)
assert result.first().rest().first().as_atom() == expected_announcement
assert conditions[0].vars[0] == expected_announcement
| 39.433071
| 115
| 0.720447
|
from clvm_tools import binutils
from chinilla.types.blockchain_format.program import Program, INFINITE_COST
from chinilla.types.announcement import Announcement
from chinilla.types.blockchain_format.sized_bytes import bytes32
from chinilla.util.condition_tools import parse_sexp_to_conditions
from chinilla.wallet.puzzles.load_clvm import load_clvm
SINGLETON_MOD = load_clvm("singleton_top_layer.clvm")
LAUNCHER_PUZZLE = load_clvm("singleton_launcher.clvm")
P2_SINGLETON_MOD = load_clvm("p2_singleton.clvm")
POOL_MEMBER_MOD = load_clvm("pool_member_innerpuz.clvm")
POOL_WAITINGROOM_MOD = load_clvm("pool_waitingroom_innerpuz.clvm")
LAUNCHER_PUZZLE_HASH = LAUNCHER_PUZZLE.get_tree_hash()
SINGLETON_MOD_HASH = SINGLETON_MOD.get_tree_hash()
LAUNCHER_ID = Program.to(b"launcher-id").get_tree_hash()
POOL_REWARD_PREFIX_VANILLANET = bytes32.fromhex("ccd5bb71183532bff220ba46c268991a00000000000000000000000000000000")
def singleton_puzzle(launcher_id: Program, launcher_puzzle_hash: bytes32, inner_puzzle: Program) -> Program:
return SINGLETON_MOD.curry((SINGLETON_MOD_HASH, (launcher_id, launcher_puzzle_hash)), inner_puzzle)
def p2_singleton_puzzle(launcher_id: Program, launcher_puzzle_hash: bytes32) -> Program:
return P2_SINGLETON_MOD.curry(SINGLETON_MOD_HASH, launcher_id, launcher_puzzle_hash)
def singleton_puzzle_hash(launcher_id: Program, launcher_puzzle_hash: bytes32, inner_puzzle: Program) -> bytes32:
return singleton_puzzle(launcher_id, launcher_puzzle_hash, inner_puzzle).get_tree_hash()
def p2_singleton_puzzle_hash(launcher_id: Program, launcher_puzzle_hash: bytes32) -> bytes32:
return p2_singleton_puzzle(launcher_id, launcher_puzzle_hash).get_tree_hash()
def test_only_odd_coins():
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
solution = Program.to(
[
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
Program.to(binutils.assemble("(q (51 0xcafef00d 200))")),
[0xDEADBEEF, 0xCAFEF00D, 200],
200,
[],
]
)
try:
cost, result = SINGLETON_MOD.run_with_cost(INFINITE_COST, solution)
except Exception as e:
assert e.args == ("clvm raise", "80")
else:
assert False
solution = Program.to(
[
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
Program.to(binutils.assemble("(q (51 0xcafef00d 201))")),
[0xDEADBEEF, 0xCAFED00D, 210],
205,
0,
]
)
try:
cost, result = SINGLETON_MOD.run_with_cost(INFINITE_COST, solution)
except Exception:
assert False
def test_only_one_odd_coin_created():
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
solution = Program.to(
[
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
Program.to(binutils.assemble("(q (51 0xcafef00d 203) (51 0xfadeddab 205))")),
[0xDEADBEEF, 0xCAFEF00D, 411],
411,
[],
]
)
try:
cost, result = SINGLETON_MOD.run_with_cost(INFINITE_COST, solution)
except Exception as e:
assert e.args == ("clvm raise", "80")
else:
assert False
solution = Program.to(
[
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
Program.to(binutils.assemble("(q (51 0xcafef00d 203) (51 0xfadeddab 204) (51 0xdeadbeef 202))")),
[0xDEADBEEF, 0xCAFEF00D, 411],
411,
[],
]
)
try:
cost, result = SINGLETON_MOD.run_with_cost(INFINITE_COST, solution)
except Exception:
assert False
def test_p2_singleton():
launcher_id = LAUNCHER_ID
innerpuz = Program.to(1)
singleton_full_puzzle = singleton_puzzle(launcher_id, LAUNCHER_PUZZLE_HASH, innerpuz)
p2_singleton_coin_id = Program.to(["test_hash"]).get_tree_hash()
expected_announcement = Announcement(singleton_full_puzzle.get_tree_hash(), p2_singleton_coin_id).name()
p2_singleton_full = p2_singleton_puzzle(launcher_id, LAUNCHER_PUZZLE_HASH)
solution = Program.to([innerpuz.get_tree_hash(), p2_singleton_coin_id])
cost, result = p2_singleton_full.run_with_cost(INFINITE_COST, solution)
err, conditions = parse_sexp_to_conditions(result)
assert err is None
p2_singleton_full = p2_singleton_puzzle(launcher_id, LAUNCHER_PUZZLE_HASH)
solution = Program.to([innerpuz.get_tree_hash(), p2_singleton_coin_id])
cost, result = p2_singleton_full.run_with_cost(INFINITE_COST, solution)
assert result.first().rest().first().as_atom() == expected_announcement
assert conditions[0].vars[0] == expected_announcement
| true
| true
|
7904db5a91d997aa77b94f4f1b9ddf22ea17b6fe
| 3,609
|
py
|
Python
|
python/Load_and_Pickle_Scenario.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
python/Load_and_Pickle_Scenario.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
python/Load_and_Pickle_Scenario.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import numpy as np
import pickle
import timeit
from copy import deepcopy
import sys
from Model_Manager.Link_Model_Manager import Link_Model_Manager_class
from Java_Connection import Java_Connection
from copy import copy
from Solvers.Frank_Wolfe_Solver_Static import construct_igraph
import os
import inspect
import argparse
# Flag that indicates whether we are doing decomposition or not
decompositio_flag = False
# Flag that indicates whether we are doing decomposition or not
connection = Java_Connection(decompositio_flag)
if connection.pid is not None:
# Contains local path to input configfile, for the three_links.xml network
this_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
scenario_name = 'scenario' #Scenario name
configfile = os.path.join(this_folder, os.path.pardir, 'configfiles', scenario_name+'.xml')
print "Loading data for: ",scenario_name
# File where to save the pickled objects
this_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
outputfile = os.path.join(this_folder, os.path.pardir, 'output', scenario_name + '.pickle')
coefficients = {} #BPR Coefficients
T = 3600 # Time horizon of interest
sim_dt = 0.0 # Duration of one time_step for the traffic model
sampling_dt = 600 # Duration of time_step for the solver, in this case it is equal to sim_dt
model_manager = Link_Model_Manager_class(configfile, "static", connection.gateway, sim_dt, "bpr", coefficients)
#Estimating bpr coefficients with beats
num_links = model_manager.otm_api.get_num_links()
avg_travel_time = np.zeros(num_links)
num_coeff = 5
for i in range(num_links):
fft= (model_manager.otm_api.get_link_with_id(long(i)).getFull_length() \
/ model_manager.otm_api.get_link_with_id(long(i)).get_ffspeed_mps()) / 3600
coefficients[long(i)] = np.zeros(num_coeff)
coefficients[i][0] = copy(fft)
coefficients[i][4] = copy(fft*0.15)
# If scenario.beast_api is none, it means the configfile provided was not valid for the particular traffic model type
if model_manager.is_valid():
num_steps = T/sampling_dt
# Get the OD Matrix form Model Manager
# OD Matrix can also be initialized from another source, as long as it fits the OD_Matrix class format
OD_Matrix = model_manager.get_OD_Matrix(num_steps, sampling_dt)
if OD_Matrix is not None:
# Construct igraph object
traffic_scenario = model_manager.traffic_model
cost_function = model_manager.cost_function
num_of_links = traffic_scenario.beats_api.get_num_links()
graph_object = construct_igraph(traffic_scenario, cost_function)
# We are going to pickle the model manager, te OD_Matrix and the BPR coefficients
with open(outputfile, "wb") as f:
pickle.dump(num_links, f)
pickle.dump(cost_function, f)
pickle.dump(OD_Matrix, f)
pickle.dump(graph_object,f)
# Read back the objects to make sure they got save correctly
start_time1 = timeit.default_timer()
with open(outputfile, "rb") as f:
n_links = pickle.load(f)
c_function = pickle.load(f)
OD_M = pickle.load(f)
g_object = pickle.load(f)
elapsed1 = timeit.default_timer() - start_time1
print ("\nReading from Pickle object took %s seconds" % elapsed1)
connection.close()
| 37.989474
| 121
| 0.692158
|
import numpy as np
import pickle
import timeit
from copy import deepcopy
import sys
from Model_Manager.Link_Model_Manager import Link_Model_Manager_class
from Java_Connection import Java_Connection
from copy import copy
from Solvers.Frank_Wolfe_Solver_Static import construct_igraph
import os
import inspect
import argparse
decompositio_flag = False
connection = Java_Connection(decompositio_flag)
if connection.pid is not None:
this_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
scenario_name = 'scenario'
configfile = os.path.join(this_folder, os.path.pardir, 'configfiles', scenario_name+'.xml')
print "Loading data for: ",scenario_name
this_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
outputfile = os.path.join(this_folder, os.path.pardir, 'output', scenario_name + '.pickle')
coefficients = {}
T = 3600
sim_dt = 0.0
sampling_dt = 600
model_manager = Link_Model_Manager_class(configfile, "static", connection.gateway, sim_dt, "bpr", coefficients)
num_links = model_manager.otm_api.get_num_links()
avg_travel_time = np.zeros(num_links)
num_coeff = 5
for i in range(num_links):
fft= (model_manager.otm_api.get_link_with_id(long(i)).getFull_length() \
/ model_manager.otm_api.get_link_with_id(long(i)).get_ffspeed_mps()) / 3600
coefficients[long(i)] = np.zeros(num_coeff)
coefficients[i][0] = copy(fft)
coefficients[i][4] = copy(fft*0.15)
if model_manager.is_valid():
num_steps = T/sampling_dt
OD_Matrix = model_manager.get_OD_Matrix(num_steps, sampling_dt)
if OD_Matrix is not None:
traffic_scenario = model_manager.traffic_model
cost_function = model_manager.cost_function
num_of_links = traffic_scenario.beats_api.get_num_links()
graph_object = construct_igraph(traffic_scenario, cost_function)
with open(outputfile, "wb") as f:
pickle.dump(num_links, f)
pickle.dump(cost_function, f)
pickle.dump(OD_Matrix, f)
pickle.dump(graph_object,f)
start_time1 = timeit.default_timer()
with open(outputfile, "rb") as f:
n_links = pickle.load(f)
c_function = pickle.load(f)
OD_M = pickle.load(f)
g_object = pickle.load(f)
elapsed1 = timeit.default_timer() - start_time1
print ("\nReading from Pickle object took %s seconds" % elapsed1)
connection.close()
| false
| true
|
7904dbb3a92b63a2ae8e4457061dbb94801dc44c
| 589
|
py
|
Python
|
Pdf2TimeTable/test.py
|
SCOTT-HAMILTON/Pdf2TimeTable
|
d9c8b2f1001865a356cdb61776b8b52adc42b2d3
|
[
"MIT"
] | null | null | null |
Pdf2TimeTable/test.py
|
SCOTT-HAMILTON/Pdf2TimeTable
|
d9c8b2f1001865a356cdb61776b8b52adc42b2d3
|
[
"MIT"
] | null | null | null |
Pdf2TimeTable/test.py
|
SCOTT-HAMILTON/Pdf2TimeTable
|
d9c8b2f1001865a356cdb61776b8b52adc42b2d3
|
[
"MIT"
] | null | null | null |
from timetableparser import TimeTableParser
from timetablewriter import TimeTableWriter
parser = TimeTableParser(False)
writer = TimeTableWriter(True)
# parser.decrypt_pdf("test/a.pdf", "out_a.pdf")
# parser.decrypt_pdf("test/b.pdf", "out_b.pdf")
csv_file_a = "test/output_week_a.csv"
csv_file_b = "test/output_week_b.csv"
# parser.extract_table_from_pdf("out_a.pdf", csv_file_a)
# parser.extract_table_from_pdf("out_b.pdf", csv_file_b)
writer.write_excel("Scott", parser.parse_csv(csv_file_a), parser.parse_csv(csv_file_b), "test/output.xlsx")
print("output file is `test/output.xlsx`")
| 42.071429
| 107
| 0.791171
|
from timetableparser import TimeTableParser
from timetablewriter import TimeTableWriter
parser = TimeTableParser(False)
writer = TimeTableWriter(True)
csv_file_a = "test/output_week_a.csv"
csv_file_b = "test/output_week_b.csv"
writer.write_excel("Scott", parser.parse_csv(csv_file_a), parser.parse_csv(csv_file_b), "test/output.xlsx")
print("output file is `test/output.xlsx`")
| true
| true
|
7904dc293da2ec589a63acebe187caea062976c7
| 1,578
|
py
|
Python
|
withPyGAD/ch06/cardTests.py
|
monfared01/GeneticAlgorithmsWithPython
|
1519efc6c87f225c089a84595379f5b682dcee8f
|
[
"Apache-2.0"
] | null | null | null |
withPyGAD/ch06/cardTests.py
|
monfared01/GeneticAlgorithmsWithPython
|
1519efc6c87f225c089a84595379f5b682dcee8f
|
[
"Apache-2.0"
] | null | null | null |
withPyGAD/ch06/cardTests.py
|
monfared01/GeneticAlgorithmsWithPython
|
1519efc6c87f225c089a84595379f5b682dcee8f
|
[
"Apache-2.0"
] | null | null | null |
import pygad
import functools
import operator
import numpy
def fitness_func(genes, solution_idx):
group1Sum = sum(genes[0:5])
group2Product = functools.reduce(operator.mul, genes[5:10])
duplicateCount = (len(genes) - len(set(genes)))
return 1 / ((abs(36 - group1Sum) + abs(360 - group2Product)) + 1) - duplicateCount
geneset = numpy.array([[i + 1 for i in range(10)], [i + 1 for i in range(10)]])
ga_instance = pygad.GA(num_generations=50,
num_parents_mating=1,
sol_per_pop=50,
fitness_func=fitness_func,
initial_population=None,
num_genes=10,
gene_type=int,
init_range_low=1,
init_range_high=10,
parent_selection_type="rank",
keep_parents=-1,
crossover_type=None,
mutation_type="swap",
mutation_percent_genes=40,
gene_space=[i + 1 for i in range(10)],
allow_duplicate_genes=False,
stop_criteria="reach_1")
ga_instance.run()
solution, solution_fitness, solution_idx = ga_instance.best_solution()
print("Parameters of the best solution : {solution}".format(solution=solution))
print("Fitness value of the best solution = {solution_fitness}".format(
solution_fitness=solution_fitness))
print("Solution index of best solution = {solution_idx}".format(
solution_idx=solution_idx))
| 35.863636
| 86
| 0.581749
|
import pygad
import functools
import operator
import numpy
def fitness_func(genes, solution_idx):
group1Sum = sum(genes[0:5])
group2Product = functools.reduce(operator.mul, genes[5:10])
duplicateCount = (len(genes) - len(set(genes)))
return 1 / ((abs(36 - group1Sum) + abs(360 - group2Product)) + 1) - duplicateCount
geneset = numpy.array([[i + 1 for i in range(10)], [i + 1 for i in range(10)]])
ga_instance = pygad.GA(num_generations=50,
num_parents_mating=1,
sol_per_pop=50,
fitness_func=fitness_func,
initial_population=None,
num_genes=10,
gene_type=int,
init_range_low=1,
init_range_high=10,
parent_selection_type="rank",
keep_parents=-1,
crossover_type=None,
mutation_type="swap",
mutation_percent_genes=40,
gene_space=[i + 1 for i in range(10)],
allow_duplicate_genes=False,
stop_criteria="reach_1")
ga_instance.run()
solution, solution_fitness, solution_idx = ga_instance.best_solution()
print("Parameters of the best solution : {solution}".format(solution=solution))
print("Fitness value of the best solution = {solution_fitness}".format(
solution_fitness=solution_fitness))
print("Solution index of best solution = {solution_idx}".format(
solution_idx=solution_idx))
| true
| true
|
7904dc38398d706aeabf83211ad92f5c22266c00
| 4,216
|
py
|
Python
|
stratlib/sample_SMA.py
|
bopo/mooquant
|
244a87d4cd8b4d918eec4f16905e0921c3b39f50
|
[
"Apache-2.0"
] | 21
|
2017-09-07T16:08:21.000Z
|
2020-10-15T13:42:21.000Z
|
stratlib/sample_SMA.py
|
bopo/MooQuant
|
244a87d4cd8b4d918eec4f16905e0921c3b39f50
|
[
"Apache-2.0"
] | 209
|
2018-10-09T11:57:39.000Z
|
2021-03-25T21:40:30.000Z
|
stratlib/sample_SMA.py
|
bopo/MooQuant
|
244a87d4cd8b4d918eec4f16905e0921c3b39f50
|
[
"Apache-2.0"
] | 15
|
2018-11-17T20:14:37.000Z
|
2022-02-04T23:55:29.000Z
|
from mooquant import bar, strategy
from mooquant.analyzer import drawdown, returns, sharpe, trades
from mooquant.broker.backtesting import TradePercentage
from mooquant.broker.fillstrategy import DefaultStrategy
from mooquant.technical import cross, ma
from mooquant.tools import tushare
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1, self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
# self.info("long close")
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
assert self.__ma1[-i - 1] > self.__ma3[-i - 1]
if self.__ma1[-i - 1] > self.__ma3[-i - 1]:
m1 += 1
if self.__ma2[-i - 1] > self.__ma3[-i - 1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long
# position.
if self.__ma2[-1] is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(
self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
# self.info("sell %s" % (bars.getDateTime()))
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print(bars[self.__instrument].getDateTime(),
bars[self.__instrument].getPrice())
# self.info("buy %s" % (bars.getDateTime()))
def testStrategy():
strat = thrSMA
instrument = '600288'
market = 'SH'
fromDate = '20150101'
toDate = '20150601'
frequency = bar.Frequency.MINUTE
plot = True
paras = [2, 20, 60, 10]
feeds = tushare.build_feed([instrument], 2016, 2017, "histdata/tushare")
strat = strat(feeds, instrument, *paras)
retAnalyzer = returns.Returns()
strat.attachAnalyzer(retAnalyzer)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(drawDownAnalyzer)
tradesAnalyzer = trades.Trades()
strat.attachAnalyzer(tradesAnalyzer)
strat.run()
# 夏普率
sharp = sharpeRatioAnalyzer.getSharpeRatio(0.05)
# 最大回撤
maxdd = drawDownAnalyzer.getMaxDrawDown()
# 收益率
return_ = retAnalyzer.getCumulativeReturns()[-1]
# 收益曲线
return_list = []
for item in retAnalyzer.getCumulativeReturns():
return_list.append(item)
def run_strategy(ticker, account_id, paras):
print(ticker)
print(account_id)
print(paras)
strat = testStrategy()
if __name__ == "__main__":
testStrategy()
| 29.075862
| 99
| 0.617173
|
from mooquant import bar, strategy
from mooquant.analyzer import drawdown, returns, sharpe, trades
from mooquant.broker.backtesting import TradePercentage
from mooquant.broker.fillstrategy import DefaultStrategy
from mooquant.technical import cross, ma
from mooquant.tools import tushare
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1, self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
assert self.__ma1[-i - 1] > self.__ma3[-i - 1]
if self.__ma1[-i - 1] > self.__ma3[-i - 1]:
m1 += 1
if self.__ma2[-i - 1] > self.__ma3[-i - 1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
if self.__ma2[-1] is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(
self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print(bars[self.__instrument].getDateTime(),
bars[self.__instrument].getPrice())
def testStrategy():
strat = thrSMA
instrument = '600288'
market = 'SH'
fromDate = '20150101'
toDate = '20150601'
frequency = bar.Frequency.MINUTE
plot = True
paras = [2, 20, 60, 10]
feeds = tushare.build_feed([instrument], 2016, 2017, "histdata/tushare")
strat = strat(feeds, instrument, *paras)
retAnalyzer = returns.Returns()
strat.attachAnalyzer(retAnalyzer)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(drawDownAnalyzer)
tradesAnalyzer = trades.Trades()
strat.attachAnalyzer(tradesAnalyzer)
strat.run()
sharp = sharpeRatioAnalyzer.getSharpeRatio(0.05)
maxdd = drawDownAnalyzer.getMaxDrawDown()
return_ = retAnalyzer.getCumulativeReturns()[-1]
return_list = []
for item in retAnalyzer.getCumulativeReturns():
return_list.append(item)
def run_strategy(ticker, account_id, paras):
print(ticker)
print(account_id)
print(paras)
strat = testStrategy()
if __name__ == "__main__":
testStrategy()
| true
| true
|
7904df6a990d890fbcbee5b17e02c4e4dcae8ac9
| 2,496
|
py
|
Python
|
doc/conf.py
|
yt87/pyiapws95
|
5fc7d4cda56a000d1b9de018131012dfc80e11ab
|
[
"0BSD"
] | null | null | null |
doc/conf.py
|
yt87/pyiapws95
|
5fc7d4cda56a000d1b9de018131012dfc80e11ab
|
[
"0BSD"
] | null | null | null |
doc/conf.py
|
yt87/pyiapws95
|
5fc7d4cda56a000d1b9de018131012dfc80e11ab
|
[
"0BSD"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'pyiapws95'
copyright = '2021, George Trojan'
author = 'George Trojan'
# The full version, including alpha/beta/rc tags
release = '0.1.1'
today_fmt = '%Y-%m-%d'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
# 'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
"numpydoc",
'sphinx.ext.viewcode',
'sphinxcontrib.programoutput',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_last_updated_fmt = today_fmt
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"pint": ("https://pint.readthedocs.io/en/stable/", None),
"numba": ("https://numba.pydata.org/numba-doc/latest/", None),
}
| 33.72973
| 79
| 0.658654
|
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
project = 'pyiapws95'
copyright = '2021, George Trojan'
author = 'George Trojan'
release = '0.1.1'
today_fmt = '%Y-%m-%d'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.napoleon',
"numpydoc",
'sphinx.ext.viewcode',
'sphinxcontrib.programoutput',
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_last_updated_fmt = today_fmt
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"pint": ("https://pint.readthedocs.io/en/stable/", None),
"numba": ("https://numba.pydata.org/numba-doc/latest/", None),
}
| true
| true
|
7904dfa05c857c0fb5683c524f642a4c0c164c85
| 5,750
|
py
|
Python
|
photologue/tests/test_sites.py
|
elena/django-photologue
|
2bb2a91073855d7e53c1d4cfb2c704d2ebd7caab
|
[
"BSD-3-Clause"
] | null | null | null |
photologue/tests/test_sites.py
|
elena/django-photologue
|
2bb2a91073855d7e53c1d4cfb2c704d2ebd7caab
|
[
"BSD-3-Clause"
] | null | null | null |
photologue/tests/test_sites.py
|
elena/django-photologue
|
2bb2a91073855d7e53c1d4cfb2c704d2ebd7caab
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from django.contrib.sites.models import Site
from django.utils import unittest
from django.conf import settings
from .factories import GalleryFactory, PhotoFactory
class SitesTest(TestCase):
urls = 'photologue.tests.test_urls'
def setUp(self):
"""
Create two example sites that we can use to test what gets displayed
where.
"""
super(SitesTest, self).setUp()
self.site1, created1 = Site.objects.get_or_create(
domain="example.com", name="example.com")
self.site2, created2 = Site.objects.get_or_create(
domain="example.org", name="example.org")
with self.settings(PHOTOLOGUE_MULTISITE=True):
# Be explicit about linking Galleries/Photos to Sites."""
self.gallery1 = GalleryFactory(slug='test-gallery', sites=[self.site1])
self.gallery2 = GalleryFactory(slug='not-on-site-gallery')
self.photo1 = PhotoFactory(slug='test-photo', sites=[self.site1])
self.photo2 = PhotoFactory(slug='not-on-site-photo')
self.gallery1.photos.add(self.photo1, self.photo2)
# I'd like to use factory_boy's mute_signal decorator but that
# will only available once factory_boy 2.4 is released. So long
# we'll have to remove the site association manually
self.photo2.sites.clear()
def tearDown(self):
super(SitesTest, self).tearDown()
self.gallery1.delete()
self.gallery2.delete()
self.photo1.delete()
self.photo2.delete()
def test_basics(self):
""" See if objects were added automatically (by the factory) to the current site. """
self.assertEqual(list(self.gallery1.sites.all()), [self.site1])
self.assertEqual(list(self.photo1.sites.all()), [self.site1])
def test_auto_add_sites(self):
"""
Objects should not be automatically associated with a particular site when
``PHOTOLOGUE_MULTISITE`` is ``True``.
"""
with self.settings(PHOTOLOGUE_MULTISITE=False):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [self.site1])
self.assertEqual(list(photo.sites.all()), [self.site1])
photo.delete()
with self.settings(PHOTOLOGUE_MULTISITE=True):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [])
self.assertEqual(list(photo.sites.all()), [])
photo.delete()
def test_gallery_list(self):
response = self.client.get('/ptests/gallerylist/')
self.assertEqual(list(response.context['object_list']), [self.gallery1])
def test_gallery_detail(self):
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(response.context['object'], self.gallery1)
response = self.client.get('/ptests/gallery/not-on-site-gallery/')
self.assertEqual(response.status_code, 404)
def test_photo_list(self):
response = self.client.get('/ptests/photolist/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photo_detail(self):
response = self.client.get('/ptests/photo/test-photo/')
self.assertEqual(response.context['object'], self.photo1)
response = self.client.get('/ptests/photo/not-on-site-photo/')
self.assertEqual(response.status_code, 404)
def test_photo_archive(self):
response = self.client.get('/ptests/photo/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photos_in_gallery(self):
"""
Only those photos are supposed to be shown in a gallery that are
also associated with the current site.
"""
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(list(response.context['object'].public()), [self.photo1])
@unittest.skipUnless('django.contrib.sitemaps' in settings.INSTALLED_APPS,
'Sitemaps not installed in this project, nothing to test.')
def test_sitemap(self):
"""A sitemap should only show objects associated with the current site."""
response = self.client.get('/sitemap.xml')
# Check photos.
self.assertContains(response,
'<url><loc>http://example.com/ptests/photo/test-photo/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
self.assertNotContains(response,
'<url><loc>http://example.com/ptests/photo/not-on-site-photo/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
# Check galleries.
self.assertContains(response,
'<url><loc>http://example.com/ptests/gallery/test-gallery/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
self.assertNotContains(response,
'<url><loc>http://example.com/ptests/gallery/not-on-site-gallery/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
def test_orphaned_photos(self):
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery2.photos.add(self.photo2)
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery1.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2])
self.photo1.sites.clear()
self.photo2.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2])
| 41.071429
| 103
| 0.630261
|
from django.test import TestCase
from django.contrib.sites.models import Site
from django.utils import unittest
from django.conf import settings
from .factories import GalleryFactory, PhotoFactory
class SitesTest(TestCase):
urls = 'photologue.tests.test_urls'
def setUp(self):
super(SitesTest, self).setUp()
self.site1, created1 = Site.objects.get_or_create(
domain="example.com", name="example.com")
self.site2, created2 = Site.objects.get_or_create(
domain="example.org", name="example.org")
with self.settings(PHOTOLOGUE_MULTISITE=True):
self.gallery1 = GalleryFactory(slug='test-gallery', sites=[self.site1])
self.gallery2 = GalleryFactory(slug='not-on-site-gallery')
self.photo1 = PhotoFactory(slug='test-photo', sites=[self.site1])
self.photo2 = PhotoFactory(slug='not-on-site-photo')
self.gallery1.photos.add(self.photo1, self.photo2)
# I'd like to use factory_boy's mute_signal decorator but that
# will only available once factory_boy 2.4 is released. So long
# we'll have to remove the site association manually
self.photo2.sites.clear()
def tearDown(self):
super(SitesTest, self).tearDown()
self.gallery1.delete()
self.gallery2.delete()
self.photo1.delete()
self.photo2.delete()
def test_basics(self):
self.assertEqual(list(self.gallery1.sites.all()), [self.site1])
self.assertEqual(list(self.photo1.sites.all()), [self.site1])
def test_auto_add_sites(self):
with self.settings(PHOTOLOGUE_MULTISITE=False):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [self.site1])
self.assertEqual(list(photo.sites.all()), [self.site1])
photo.delete()
with self.settings(PHOTOLOGUE_MULTISITE=True):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [])
self.assertEqual(list(photo.sites.all()), [])
photo.delete()
def test_gallery_list(self):
response = self.client.get('/ptests/gallerylist/')
self.assertEqual(list(response.context['object_list']), [self.gallery1])
def test_gallery_detail(self):
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(response.context['object'], self.gallery1)
response = self.client.get('/ptests/gallery/not-on-site-gallery/')
self.assertEqual(response.status_code, 404)
def test_photo_list(self):
response = self.client.get('/ptests/photolist/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photo_detail(self):
response = self.client.get('/ptests/photo/test-photo/')
self.assertEqual(response.context['object'], self.photo1)
response = self.client.get('/ptests/photo/not-on-site-photo/')
self.assertEqual(response.status_code, 404)
def test_photo_archive(self):
response = self.client.get('/ptests/photo/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photos_in_gallery(self):
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(list(response.context['object'].public()), [self.photo1])
@unittest.skipUnless('django.contrib.sitemaps' in settings.INSTALLED_APPS,
'Sitemaps not installed in this project, nothing to test.')
def test_sitemap(self):
response = self.client.get('/sitemap.xml')
# Check photos.
self.assertContains(response,
'<url><loc>http://example.com/ptests/photo/test-photo/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
self.assertNotContains(response,
'<url><loc>http://example.com/ptests/photo/not-on-site-photo/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
# Check galleries.
self.assertContains(response,
'<url><loc>http://example.com/ptests/gallery/test-gallery/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
self.assertNotContains(response,
'<url><loc>http://example.com/ptests/gallery/not-on-site-gallery/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
def test_orphaned_photos(self):
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery2.photos.add(self.photo2)
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery1.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2])
self.photo1.sites.clear()
self.photo2.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2])
| true
| true
|
7904dfaea529291b602c9162cc56458c2dd79fda
| 3,188
|
py
|
Python
|
dags/jenkins_dag.py
|
shameerb/incubator-airflow
|
a97b440eb989789cef43bf740fbd63d40d4b8f87
|
[
"Apache-2.0"
] | null | null | null |
dags/jenkins_dag.py
|
shameerb/incubator-airflow
|
a97b440eb989789cef43bf740fbd63d40d4b8f87
|
[
"Apache-2.0"
] | null | null | null |
dags/jenkins_dag.py
|
shameerb/incubator-airflow
|
a97b440eb989789cef43bf740fbd63d40d4b8f87
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow import DAG
from airflow.contrib.operators.jenkins_job_trigger_operator import JenkinsJobTriggerOperator
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.hooks.jenkins_hook import JenkinsHook
from six.moves.urllib.request import Request
import jenkins
from datetime import datetime
from datetime import timedelta
datetime_start_date = datetime(2018, 5, 3)
default_args = {
"owner": "airflow",
"start_date": datetime_start_date,
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
"concurrency": 8,
"max_active_runs": 8
}
dag = DAG("test_jenkins", default_args=default_args, schedule_interval=None)
#This DAG shouldn't be executed and is only here to provide example of how to use the JenkinsJobTriggerOperator
#(it requires a jenkins server to be executed)
job_trigger = JenkinsJobTriggerOperator(
dag=dag,
task_id="trigger_job",
job_name="red-beta-build-deploy",
parameters={"BRANCH":"origin/master", "USER_ENV":"shameer"},
#parameters="resources/paremeter.json", You can also pass a path to a json file containing your param
jenkins_connection_id="jenkins_nqa" #The connection must be configured first
)
def grabArtifactFromJenkins(**context):
"""
Grab an artifact from the previous job
The python-jenkins library doesn't expose a method for that
But it's totally possible to build manually the request for that
"""
hook = JenkinsHook("jenkins_nqa")
jenkins_server = hook.get_jenkins_server()
url = context['task_instance'].xcom_pull(task_ids='trigger_job')
#The JenkinsJobTriggerOperator store the job url in the xcom variable corresponding to the task
#You can then use it to access things or to get the job number
#This url looks like : http://jenkins_url/job/job_name/job_number/
url = url + "artifact/myartifact.xml" #Or any other artifact name
self.log.info("url : %s", url)
request = Request(url)
response = jenkins_server.jenkins_open(request)
self.log.info("response: %s", response)
return response #We store the artifact content in a xcom variable for later use
artifact_grabber = PythonOperator(
task_id='artifact_grabber',
provide_context=True,
python_callable=grabArtifactFromJenkins,
dag=dag)
artifact_grabber.set_upstream(job_trigger)
| 37.505882
| 111
| 0.754391
|
from airflow import DAG
from airflow.contrib.operators.jenkins_job_trigger_operator import JenkinsJobTriggerOperator
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.hooks.jenkins_hook import JenkinsHook
from six.moves.urllib.request import Request
import jenkins
from datetime import datetime
from datetime import timedelta
datetime_start_date = datetime(2018, 5, 3)
default_args = {
"owner": "airflow",
"start_date": datetime_start_date,
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
"concurrency": 8,
"max_active_runs": 8
}
dag = DAG("test_jenkins", default_args=default_args, schedule_interval=None)
#(it requires a jenkins server to be executed)
job_trigger = JenkinsJobTriggerOperator(
dag=dag,
task_id="trigger_job",
job_name="red-beta-build-deploy",
parameters={"BRANCH":"origin/master", "USER_ENV":"shameer"},
#parameters="resources/paremeter.json", You can also pass a path to a json file containing your param
jenkins_connection_id="jenkins_nqa" #The connection must be configured first
)
def grabArtifactFromJenkins(**context):
hook = JenkinsHook("jenkins_nqa")
jenkins_server = hook.get_jenkins_server()
url = context['task_instance'].xcom_pull(task_ids='trigger_job')
#The JenkinsJobTriggerOperator store the job url in the xcom variable corresponding to the task
#You can then use it to access things or to get the job number
#This url looks like : http://jenkins_url/job/job_name/job_number/
url = url + "artifact/myartifact.xml" #Or any other artifact name
self.log.info("url : %s", url)
request = Request(url)
response = jenkins_server.jenkins_open(request)
self.log.info("response: %s", response)
return response #We store the artifact content in a xcom variable for later use
artifact_grabber = PythonOperator(
task_id='artifact_grabber',
provide_context=True,
python_callable=grabArtifactFromJenkins,
dag=dag)
artifact_grabber.set_upstream(job_trigger)
| true
| true
|
7904e024853431a220c615209e7e4d10c1cef2af
| 5,701
|
py
|
Python
|
django/engagementmanager/utils/exception_message_factory.py
|
onap/vvp-engagementmgr
|
8d2108708e7c55cc753b956563c535177f92d0d9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
django/engagementmanager/utils/exception_message_factory.py
|
onap/vvp-engagementmgr
|
8d2108708e7c55cc753b956563c535177f92d0d9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
django/engagementmanager/utils/exception_message_factory.py
|
onap/vvp-engagementmgr
|
8d2108708e7c55cc753b956563c535177f92d0d9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1
|
2021-10-19T15:17:09.000Z
|
2021-10-19T15:17:09.000Z
|
#
# ============LICENSE_START==========================================
# org.onap.vvp/engagementmgr
# ===================================================================
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ===================================================================
#
# Unless otherwise specified, all software contained herein is licensed
# under the Apache License, Version 2.0 (the “License”);
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Unless otherwise specified, all documentation contained herein is licensed
# under the Creative Commons License, Attribution 4.0 Intl. (the “License”);
# you may not use this documentation except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/
#
# Unless required by applicable law or agreed to in writing, documentation
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============LICENSE_END============================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import CommandError
from engagementmanager.utils.vvp_exceptions import VvpObjectNotAvailable, \
VvpGeneralException, VvpBadRequest, VvpConflict
from itsdangerous import SignatureExpired
from requests import ConnectionError
from rest_framework import status
from rest_framework.exceptions import MethodNotAllowed, NotAuthenticated, \
PermissionDenied, NotAcceptable
class ExceptionMessageFactory:
messages_dictionary = {
ObjectDoesNotExist.__name__: {
'msg': 'User or Password does not match',
'include_exception': False,
'status': status.HTTP_404_NOT_FOUND},
MethodNotAllowed.__name__: {
'msg': 'Method not allowed: ',
'include_exception': True,
'status':
status.HTTP_405_METHOD_NOT_ALLOWED},
NotAuthenticated.__name__: {
'msg': 'You must authenticate in order to perform this action: ',
'include_exception': True, 'status': status.HTTP_403_FORBIDDEN},
SignatureExpired.__name__: {
'msg': 'Signature expired for this token: ',
'include_exception': True,
'status':
status.HTTP_405_METHOD_NOT_ALLOWED},
KeyError.__name__: {
'msg': 'KeyError occurred over the backend.',
'include_exception': True,
'include_additional_exc_str': True, 'status':
status.HTTP_400_BAD_REQUEST},
ValueError.__name__: {
'msg': 'ValueError occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
ConnectionError.__name__: {
'msg': 'ConnectionError occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
ImportError.__name__: {
'msg': 'ImportError occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
CommandError.__name__: {
'msg': 'CommandError occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
PermissionDenied.__name__: {
'msg': 'PermissionDenied occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_401_UNAUTHORIZED},
VvpObjectNotAvailable.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_410_GONE},
NotAcceptable.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_403_FORBIDDEN},
VvpGeneralException.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
FileExistsError.__name__: {
'msg': 'Not modified due to: ', 'include_exception': True,
'status': status.HTTP_304_NOT_MODIFIED},
VvpBadRequest.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_400_BAD_REQUEST},
VvpConflict.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_409_CONFLICT},
Exception.__name__: {
'msg': 'General error on backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
}
def get_exception_message(self, exception):
if isinstance(exception, ObjectDoesNotExist):
result = self.messages_dictionary[ObjectDoesNotExist.__name__]
elif exception.__class__.__name__ in self.messages_dictionary:
result = self.messages_dictionary[exception.__class__.__name__]
else:
result = self.messages_dictionary[Exception.__name__]
return result
| 44.889764
| 77
| 0.641642
|
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import CommandError
from engagementmanager.utils.vvp_exceptions import VvpObjectNotAvailable, \
VvpGeneralException, VvpBadRequest, VvpConflict
from itsdangerous import SignatureExpired
from requests import ConnectionError
from rest_framework import status
from rest_framework.exceptions import MethodNotAllowed, NotAuthenticated, \
PermissionDenied, NotAcceptable
class ExceptionMessageFactory:
messages_dictionary = {
ObjectDoesNotExist.__name__: {
'msg': 'User or Password does not match',
'include_exception': False,
'status': status.HTTP_404_NOT_FOUND},
MethodNotAllowed.__name__: {
'msg': 'Method not allowed: ',
'include_exception': True,
'status':
status.HTTP_405_METHOD_NOT_ALLOWED},
NotAuthenticated.__name__: {
'msg': 'You must authenticate in order to perform this action: ',
'include_exception': True, 'status': status.HTTP_403_FORBIDDEN},
SignatureExpired.__name__: {
'msg': 'Signature expired for this token: ',
'include_exception': True,
'status':
status.HTTP_405_METHOD_NOT_ALLOWED},
KeyError.__name__: {
'msg': 'KeyError occurred over the backend.',
'include_exception': True,
'include_additional_exc_str': True, 'status':
status.HTTP_400_BAD_REQUEST},
ValueError.__name__: {
'msg': 'ValueError occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
ConnectionError.__name__: {
'msg': 'ConnectionError occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
ImportError.__name__: {
'msg': 'ImportError occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
CommandError.__name__: {
'msg': 'CommandError occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
PermissionDenied.__name__: {
'msg': 'PermissionDenied occurred over the backend: ',
'include_exception': True,
'status': status.HTTP_401_UNAUTHORIZED},
VvpObjectNotAvailable.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_410_GONE},
NotAcceptable.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_403_FORBIDDEN},
VvpGeneralException.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
FileExistsError.__name__: {
'msg': 'Not modified due to: ', 'include_exception': True,
'status': status.HTTP_304_NOT_MODIFIED},
VvpBadRequest.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_400_BAD_REQUEST},
VvpConflict.__name__: {
'msg': '', 'include_exception': True,
'status': status.HTTP_409_CONFLICT},
Exception.__name__: {
'msg': 'General error on backend: ',
'include_exception': True,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR},
}
def get_exception_message(self, exception):
if isinstance(exception, ObjectDoesNotExist):
result = self.messages_dictionary[ObjectDoesNotExist.__name__]
elif exception.__class__.__name__ in self.messages_dictionary:
result = self.messages_dictionary[exception.__class__.__name__]
else:
result = self.messages_dictionary[Exception.__name__]
return result
| true
| true
|
7904e0e14640275bde51aabd779455282e94fba6
| 2,323
|
py
|
Python
|
relex/modules/offset_embedders/sine_offset_embedder.py
|
DFKI-NLP/RelEx
|
0826c02f793b78bf8b7b7001c2e3fdfdb25c1ad2
|
[
"Apache-2.0"
] | 16
|
2020-04-21T19:04:23.000Z
|
2021-08-03T04:30:43.000Z
|
relex/modules/offset_embedders/sine_offset_embedder.py
|
DFKI-NLP/RelEx
|
0826c02f793b78bf8b7b7001c2e3fdfdb25c1ad2
|
[
"Apache-2.0"
] | 3
|
2020-07-25T12:29:21.000Z
|
2021-06-11T02:06:58.000Z
|
relex/modules/offset_embedders/sine_offset_embedder.py
|
DFKI-NLP/RelEx
|
0826c02f793b78bf8b7b7001c2e3fdfdb25c1ad2
|
[
"Apache-2.0"
] | 2
|
2020-06-25T12:50:57.000Z
|
2020-11-01T10:31:04.000Z
|
import torch
import numpy as np
from allennlp.nn import util
from relex.modules.offset_embedders import OffsetEmbedder
def position_encoding_init(n_position: int, embedding_dim: int):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / embedding_dim)
for j in range(embedding_dim)]
if pos != 0 else np.zeros(embedding_dim)
for pos in range(n_position)])
# apply sin on 0th,2nd,4th...embedding_dim
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
# apply cos on 1st,3rd,5th...embedding_dim
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2])
return torch.from_numpy(position_enc).type(torch.FloatTensor)
@OffsetEmbedder.register("sine")
class SineOffsetEmbedder(OffsetEmbedder):
def __init__(self, n_position: int, embedding_dim: int) -> None:
super(SineOffsetEmbedder, self).__init__()
self._n_position = n_position
self._embedding_dim = embedding_dim
self._embedding = torch.nn.Embedding(2 * n_position + 1,
embedding_dim,
padding_idx=0)
self._embedding.weight.data = position_encoding_init(2 * n_position + 1,
embedding_dim)
# TODO: add zero vector for padding
def get_output_dim(self) -> int:
return self._embedding_dim
def is_additive(self) -> bool:
return True
def forward(self,
inputs: torch.Tensor,
mask: torch.Tensor,
span: torch.Tensor) -> torch.Tensor:
# pylint: disable=arguments-differ
# input -> [B x seq_len x d], offset -> [B x 2]
batch_size, seq_len, _ = inputs.size()
offset = span[:, 0]
position_range = util.get_range_vector(
seq_len, util.get_device_of(inputs)).repeat((batch_size, 1))
relative_positions = (1 + self._n_position
+ position_range
- offset.unsqueeze(dim=1))
# mask padding so it won't receive a positional embedding
relative_positions = relative_positions * mask.long()
return self._embedding(relative_positions)
| 38.081967
| 81
| 0.584158
|
import torch
import numpy as np
from allennlp.nn import util
from relex.modules.offset_embedders import OffsetEmbedder
def position_encoding_init(n_position: int, embedding_dim: int):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / embedding_dim)
for j in range(embedding_dim)]
if pos != 0 else np.zeros(embedding_dim)
for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2])
return torch.from_numpy(position_enc).type(torch.FloatTensor)
@OffsetEmbedder.register("sine")
class SineOffsetEmbedder(OffsetEmbedder):
def __init__(self, n_position: int, embedding_dim: int) -> None:
super(SineOffsetEmbedder, self).__init__()
self._n_position = n_position
self._embedding_dim = embedding_dim
self._embedding = torch.nn.Embedding(2 * n_position + 1,
embedding_dim,
padding_idx=0)
self._embedding.weight.data = position_encoding_init(2 * n_position + 1,
embedding_dim)
def get_output_dim(self) -> int:
return self._embedding_dim
def is_additive(self) -> bool:
return True
def forward(self,
inputs: torch.Tensor,
mask: torch.Tensor,
span: torch.Tensor) -> torch.Tensor:
batch_size, seq_len, _ = inputs.size()
offset = span[:, 0]
position_range = util.get_range_vector(
seq_len, util.get_device_of(inputs)).repeat((batch_size, 1))
relative_positions = (1 + self._n_position
+ position_range
- offset.unsqueeze(dim=1))
relative_positions = relative_positions * mask.long()
return self._embedding(relative_positions)
| true
| true
|
7904e0f35f99b5e4825d4214348e1dd1ae2ef821
| 1,015
|
py
|
Python
|
rollbar/examples/starlette/app_global_request.py
|
jackton1/pyrollbar
|
eb93f3b6200c624a2986d66ef7418520a6b77504
|
[
"MIT"
] | 177
|
2015-02-02T19:22:15.000Z
|
2022-01-24T07:20:04.000Z
|
rollbar/examples/starlette/app_global_request.py
|
jackton1/pyrollbar
|
eb93f3b6200c624a2986d66ef7418520a6b77504
|
[
"MIT"
] | 293
|
2015-01-04T23:24:56.000Z
|
2022-02-14T18:23:02.000Z
|
rollbar/examples/starlette/app_global_request.py
|
jackton1/pyrollbar
|
eb93f3b6200c624a2986d66ef7418520a6b77504
|
[
"MIT"
] | 121
|
2015-02-06T21:43:51.000Z
|
2022-02-14T11:13:33.000Z
|
#!/usr/bin/env python
# This example uses Uvicorn package that must be installed. However, it can be
# replaced with any other ASGI-compliant server.
#
# NOTE: Python 3.6 requires aiocontextvars package to be installed.
#
# Run: python app_global_request.py
import rollbar
import uvicorn
from rollbar.contrib.starlette import LoggerMiddleware
from starlette.applications import Starlette
from starlette.responses import JSONResponse
# Integrate Rollbar with Starlette application
app = Starlette()
app.add_middleware(LoggerMiddleware) # should be added as the last middleware
async def get_user_agent():
# Global access to the current request object
request = rollbar.get_request()
user_agent = request.headers['User-Agent']
return user_agent
# $ curl -i http://localhost:8888
@app.route('/')
async def root(request):
user_agent = await get_user_agent()
return JSONResponse({'user-agent': user_agent})
if __name__ == '__main__':
uvicorn.run(app, host='localhost', port=8888)
| 26.025641
| 78
| 0.759606
|
import rollbar
import uvicorn
from rollbar.contrib.starlette import LoggerMiddleware
from starlette.applications import Starlette
from starlette.responses import JSONResponse
app = Starlette()
app.add_middleware(LoggerMiddleware)
async def get_user_agent():
request = rollbar.get_request()
user_agent = request.headers['User-Agent']
return user_agent
@app.route('/')
async def root(request):
user_agent = await get_user_agent()
return JSONResponse({'user-agent': user_agent})
if __name__ == '__main__':
uvicorn.run(app, host='localhost', port=8888)
| true
| true
|
7904e12f4f4b43368459ad027c5fcdb2f23291d1
| 384
|
py
|
Python
|
umetnine/artists/admin.py
|
jaanos/OPB-umetnine
|
f1fedd62e750317548510c412793d80c60b9e392
|
[
"MIT"
] | null | null | null |
umetnine/artists/admin.py
|
jaanos/OPB-umetnine
|
f1fedd62e750317548510c412793d80c60b9e392
|
[
"MIT"
] | null | null | null |
umetnine/artists/admin.py
|
jaanos/OPB-umetnine
|
f1fedd62e750317548510c412793d80c60b9e392
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Arts, Comments, Tags, ArtworksTags, Stili, Umetnina, Umetnik
# Register your models here.
admin.site.register(Umetnik)
admin.site.register(Umetnina)
admin.site.register(Stili)
admin.site.register(Arts)
admin.site.register(Comments)
admin.site.register(Tags)
admin.site.register(ArtworksTags)
# admin.site.register(ArtworkLikes)
| 25.6
| 80
| 0.807292
|
from django.contrib import admin
from .models import Arts, Comments, Tags, ArtworksTags, Stili, Umetnina, Umetnik
admin.site.register(Umetnik)
admin.site.register(Umetnina)
admin.site.register(Stili)
admin.site.register(Arts)
admin.site.register(Comments)
admin.site.register(Tags)
admin.site.register(ArtworksTags)
| true
| true
|
7904e17e4ab1e008ef48e5b09f2c8a8c42d9a4d4
| 19,691
|
py
|
Python
|
src/electionguard/encrypt.py
|
john-s-morgan/electionguard-python
|
f0a25b0ac99fac5c8d4e3545055dbdd05968d021
|
[
"MIT"
] | null | null | null |
src/electionguard/encrypt.py
|
john-s-morgan/electionguard-python
|
f0a25b0ac99fac5c8d4e3545055dbdd05968d021
|
[
"MIT"
] | null | null | null |
src/electionguard/encrypt.py
|
john-s-morgan/electionguard-python
|
f0a25b0ac99fac5c8d4e3545055dbdd05968d021
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List, Optional
from uuid import getnode
from .ballot import (
CiphertextBallot,
CiphertextBallotContest,
CiphertextBallotSelection,
PlaintextBallot,
PlaintextBallotContest,
PlaintextBallotSelection,
make_ciphertext_ballot_contest,
make_ciphertext_ballot_selection,
make_ciphertext_ballot,
)
from .ballot_code import get_hash_for_device
from .election import CiphertextElectionContext
from .elgamal import elgamal_encrypt
from .group import ElementModP, ElementModQ, rand_q
from .logs import log_info, log_warning
from .manifest import (
InternalManifest,
ContestDescription,
ContestDescriptionWithPlaceholders,
SelectionDescription,
)
from .nonces import Nonces
from .serializable import Serializable
from .utils import get_optional, get_or_else_optional_func
class EncryptionDevice(Serializable):
"""
Metadata for encryption device
"""
device_id: int
"""Unique identifier for device"""
session_id: int
"""Used to identify session and protect the timestamp"""
launch_code: int
"""Election initialization value"""
location: str
"""Arbitary string to designate the location of device"""
def __init__(
self,
device_id: int,
session_id: int,
launch_code: int,
location: str,
) -> None:
self.device_id = device_id
self.session_id = session_id
self.launch_code = launch_code
self.location = location
log_info(f": EncryptionDevice: Created: UUID: {device_id} at: {location}")
def get_hash(self) -> ElementModQ:
"""
Get hash for encryption device
:return: Starting hash
"""
return get_hash_for_device(
self.device_id, self.session_id, self.launch_code, self.location
)
# pylint: disable=no-self-use
def get_timestamp(self) -> int:
"""
Get the current timestamp in utc
"""
return int(datetime.utcnow().timestamp())
class EncryptionMediator:
"""
An object for caching election and encryption state.
It composes Elections and Ballots.
"""
_internal_manifest: InternalManifest
_context: CiphertextElectionContext
_encryption_seed: ElementModQ
def __init__(
self,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
encryption_device: EncryptionDevice,
):
self._internal_manifest = internal_manifest
self._context = context
self._encryption_seed = encryption_device.get_hash()
def encrypt(self, ballot: PlaintextBallot) -> Optional[CiphertextBallot]:
"""
Encrypt the specified ballot using the cached election context.
"""
log_info(f" encrypt: objectId: {ballot.object_id}")
encrypted_ballot = encrypt_ballot(
ballot, self._internal_manifest, self._context, self._encryption_seed
)
if encrypted_ballot is not None and encrypted_ballot.code is not None:
self._encryption_seed = encrypted_ballot.code
return encrypted_ballot
def generate_device_uuid() -> int:
"""
Get unique identifier for device
:return: Unique identifier
"""
return getnode()
def selection_from(
description: SelectionDescription,
is_placeholder: bool = False,
is_affirmative: bool = False,
) -> PlaintextBallotSelection:
"""
Construct a `BallotSelection` from a specific `SelectionDescription`.
This function is useful for filling selections when a voter undervotes a ballot.
It is also used to create placeholder representations when generating the `ConstantChaumPedersenProof`
:param description: The `SelectionDescription` which provides the relevant `object_id`
:param is_placeholder: Mark this selection as a placeholder value
:param is_affirmative: Mark this selection as `yes`
:return: A BallotSelection
"""
return PlaintextBallotSelection(
description.object_id,
vote=1 if is_affirmative else 0,
is_placeholder_selection=is_placeholder,
)
def contest_from(description: ContestDescription) -> PlaintextBallotContest:
"""
Construct a `BallotContest` from a specific `ContestDescription` with all false fields.
This function is useful for filling contests and selections when a voter undervotes a ballot.
:param description: The `ContestDescription` used to derive the well-formed `BallotContest`
:return: a `BallotContest`
"""
selections: List[PlaintextBallotSelection] = list()
for selection_description in description.ballot_selections:
selections.append(selection_from(selection_description))
return PlaintextBallotContest(description.object_id, selections)
def encrypt_selection(
selection: PlaintextBallotSelection,
selection_description: SelectionDescription,
elgamal_public_key: ElementModP,
crypto_extended_base_hash: ElementModQ,
nonce_seed: ElementModQ,
is_placeholder: bool = False,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallotSelection]:
"""
Encrypt a specific `BallotSelection` in the context of a specific `BallotContest`
:param selection: the selection in the valid input form
:param selection_description: the `SelectionDescription` from the
`ContestDescription` which defines this selection's structure
:param elgamal_public_key: the public key (K) used to encrypt the ballot
:param crypto_extended_base_hash: the extended base hash of the election
:param nonce_seed: an `ElementModQ` used as a header to seed the `Nonce` generated for this selection.
this value can be (or derived from) the BallotContest nonce, but no relationship is required
:param is_placeholder: specifies if this is a placeholder selection
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Validate Input
if not selection.is_valid(selection_description.object_id):
log_warning(f"malformed input selection: {selection}")
return None
selection_description_hash = selection_description.crypto_hash()
nonce_sequence = Nonces(selection_description_hash, nonce_seed)
selection_nonce = nonce_sequence[selection_description.sequence_order]
disjunctive_chaum_pedersen_nonce = next(iter(nonce_sequence))
log_info(
f": encrypt_selection: for {selection_description.object_id} hash: {selection_description_hash.to_hex()}"
)
selection_representation = selection.vote
# Generate the encryption
elgamal_encryption = elgamal_encrypt(
selection_representation, selection_nonce, elgamal_public_key
)
if elgamal_encryption is None:
# will have logged about the failure earlier, so no need to log anything here
return None
# TODO: ISSUE #35: encrypt/decrypt: encrypt the extended_data field
# Create the return object
encrypted_selection = make_ciphertext_ballot_selection(
object_id=selection.object_id,
description_hash=selection_description_hash,
ciphertext=get_optional(elgamal_encryption),
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
proof_seed=disjunctive_chaum_pedersen_nonce,
selection_representation=selection_representation,
is_placeholder_selection=is_placeholder,
nonce=selection_nonce,
)
if encrypted_selection.proof is None:
return None # log will have happened earlier
# optionally, skip the verification step
if not should_verify_proofs:
return encrypted_selection
# verify the selection.
if encrypted_selection.is_valid_encryption(
selection_description_hash, elgamal_public_key, crypto_extended_base_hash
):
return encrypted_selection
log_warning(
f"mismatching selection proof for selection {encrypted_selection.object_id}"
)
return None
# pylint: disable=too-many-return-statements
def encrypt_contest(
contest: PlaintextBallotContest,
contest_description: ContestDescriptionWithPlaceholders,
elgamal_public_key: ElementModP,
crypto_extended_base_hash: ElementModQ,
nonce_seed: ElementModQ,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallotContest]:
"""
Encrypt a specific `BallotContest` in the context of a specific `Ballot`.
This method accepts a contest representation that only includes `True` selections.
It will fill missing selections for a contest with `False` values, and generate `placeholder`
selections to represent the number of seats available for a given contest. By adding `placeholder`
votes
:param contest: the contest in the valid input form
:param contest_description: the `ContestDescriptionWithPlaceholders`
from the `ContestDescription` which defines this contest's structure
:param elgamal_public_key: the public key (k) used to encrypt the ballot
:param crypto_extended_base_hash: the extended base hash of the election
:param nonce_seed: an `ElementModQ` used as a header to seed the `Nonce` generated for this contest.
this value can be (or derived from) the Ballot nonce, but no relationship is required
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Validate Input
if not contest.is_valid(
contest_description.object_id,
len(contest_description.ballot_selections),
contest_description.number_elected,
contest_description.votes_allowed,
):
log_warning(f"malformed input contest: {contest}")
return None
if not contest_description.is_valid():
log_warning(f"malformed contest description: {contest_description}")
return None
# account for sequence id
contest_description_hash = contest_description.crypto_hash()
nonce_sequence = Nonces(contest_description_hash, nonce_seed)
contest_nonce = nonce_sequence[contest_description.sequence_order]
chaum_pedersen_nonce = next(iter(nonce_sequence))
encrypted_selections: List[CiphertextBallotSelection] = list()
selection_count = 0
# TODO: ISSUE #54 this code could be inefficient if we had a contest
# with a lot of choices, although the O(n^2) iteration here is small
# compared to the huge cost of doing the cryptography.
# Generate the encrypted selections
for description in contest_description.ballot_selections:
has_selection = False
encrypted_selection = None
# iterate over the actual selections for each contest description
# and apply the selected value if it exists. If it does not, an explicit
# false is entered instead and the selection_count is not incremented
# this allows consumers to only pass in the relevant selections made by a voter
for selection in contest.ballot_selections:
if selection.object_id == description.object_id:
# track the selection count so we can append the
# appropriate number of true placeholder votes
has_selection = True
selection_count += selection.vote
encrypted_selection = encrypt_selection(
selection,
description,
elgamal_public_key,
crypto_extended_base_hash,
contest_nonce,
)
break
if not has_selection:
# No selection was made for this possible value
# so we explicitly set it to false
encrypted_selection = encrypt_selection(
selection_from(description),
description,
elgamal_public_key,
crypto_extended_base_hash,
contest_nonce,
)
if encrypted_selection is None:
return None # log will have happened earlier
encrypted_selections.append(get_optional(encrypted_selection))
# Handle Placeholder selections
# After we loop through all of the real selections on the ballot,
# we loop through each placeholder value and determine if it should be filled in
# Add a placeholder selection for each possible seat in the contest
for placeholder in contest_description.placeholder_selections:
# for undervotes, select the placeholder value as true for each available seat
# note this pattern is used since DisjunctiveChaumPedersen expects a 0 or 1
# so each seat can only have a maximum value of 1 in the current implementation
select_placeholder = False
if selection_count < contest_description.number_elected:
select_placeholder = True
selection_count += 1
encrypted_selection = encrypt_selection(
selection=selection_from(
description=placeholder,
is_placeholder=True,
is_affirmative=select_placeholder,
),
selection_description=placeholder,
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
nonce_seed=contest_nonce,
is_placeholder=True,
should_verify_proofs=True,
)
if encrypted_selection is None:
return None # log will have happened earlier
encrypted_selections.append(get_optional(encrypted_selection))
# TODO: ISSUE #33: support other cases such as cumulative voting
# (individual selections being an encryption of > 1)
if (
contest_description.votes_allowed is not None
and selection_count < contest_description.votes_allowed
):
log_warning(
"mismatching selection count: only n-of-m style elections are currently supported"
)
# Create the return object
encrypted_contest = make_ciphertext_ballot_contest(
object_id=contest.object_id,
description_hash=contest_description_hash,
ballot_selections=encrypted_selections,
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
proof_seed=chaum_pedersen_nonce,
number_elected=contest_description.number_elected,
nonce=contest_nonce,
)
if encrypted_contest is None or encrypted_contest.proof is None:
return None # log will have happened earlier
if not should_verify_proofs:
return encrypted_contest
# Verify the proof
if encrypted_contest.is_valid_encryption(
contest_description_hash, elgamal_public_key, crypto_extended_base_hash
):
return encrypted_contest
log_warning(f"mismatching contest proof for contest {encrypted_contest.object_id}")
return None
# TODO: ISSUE #57: add the device hash to the function interface so it can be propagated with the ballot.
# also propagate the seed so that the ballot codes can be regenerated
# by traversing the collection of ballots encrypted by a specific device
def encrypt_ballot(
ballot: PlaintextBallot,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
encryption_seed: ElementModQ,
nonce: Optional[ElementModQ] = None,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallot]:
"""
Encrypt a specific `Ballot` in the context of a specific `CiphertextElectionContext`.
This method accepts a ballot representation that only includes `True` selections.
It will fill missing selections for a contest with `False` values, and generate `placeholder`
selections to represent the number of seats available for a given contest.
This method also allows for ballots to exclude passing contests for which the voter made no selections.
It will fill missing contests with `False` selections and generate `placeholder` selections that are marked `True`.
:param ballot: the ballot in the valid input form
:param internal_manifest: the `InternalManifest` which defines this ballot's structure
:param context: all the cryptographic context for the election
:param encryption_seed: Hash from previous ballot or starting hash from device
:param nonce: an optional `int` used to seed the `Nonce` generated for this contest
if this value is not provided, the secret generating mechanism of the OS provides its own
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Determine the relevant range of contests for this ballot style
style = internal_manifest.get_ballot_style(ballot.style_id)
# Validate Input
if not ballot.is_valid(style.object_id):
log_warning(f"malformed input ballot: {ballot}")
return None
# Generate a random master nonce to use for the contest and selection nonce's on the ballot
random_master_nonce = get_or_else_optional_func(nonce, lambda: rand_q())
# Include a representation of the election and the external Id in the nonce's used
# to derive other nonce values on the ballot
nonce_seed = CiphertextBallot.nonce_seed(
internal_manifest.manifest_hash,
ballot.object_id,
random_master_nonce,
)
log_info(f": manifest_hash : {internal_manifest.manifest_hash.to_hex()}")
log_info(f": encryption_seed : {encryption_seed.to_hex()}")
encrypted_contests = encrypt_ballot_contests(
ballot, internal_manifest, context, nonce_seed
)
if encrypted_contests is None:
return None
# Create the return object
encrypted_ballot = make_ciphertext_ballot(
ballot.object_id,
ballot.style_id,
internal_manifest.manifest_hash,
encryption_seed,
encrypted_contests,
random_master_nonce,
)
if not encrypted_ballot.code:
return None
if not should_verify_proofs:
return encrypted_ballot
# Verify the proofs
if encrypted_ballot.is_valid_encryption(
internal_manifest.manifest_hash,
context.elgamal_public_key,
context.crypto_extended_base_hash,
):
return encrypted_ballot
return None # log will have happened earlier
def encrypt_ballot_contests(
ballot: PlaintextBallot,
description: InternalManifest,
context: CiphertextElectionContext,
nonce_seed: ElementModQ,
) -> Optional[List[CiphertextBallotContest]]:
"""Encrypt contests from a plaintext ballot with a specific style"""
encrypted_contests: List[CiphertextBallotContest] = []
# Only iterate on contests for this specific ballot style
for ballot_style_contest in description.get_contests_for(ballot.style_id):
use_contest = None
for contest in ballot.contests:
if contest.object_id == ballot_style_contest.object_id:
use_contest = contest
break
# no selections provided for the contest, so create a placeholder contest
if not use_contest:
use_contest = contest_from(ballot_style_contest)
encrypted_contest = encrypt_contest(
use_contest,
ballot_style_contest,
context.elgamal_public_key,
context.crypto_extended_base_hash,
nonce_seed,
)
if encrypted_contest is None:
return None
encrypted_contests.append(get_optional(encrypted_contest))
return encrypted_contests
| 37.435361
| 119
| 0.713676
|
from datetime import datetime
from typing import List, Optional
from uuid import getnode
from .ballot import (
CiphertextBallot,
CiphertextBallotContest,
CiphertextBallotSelection,
PlaintextBallot,
PlaintextBallotContest,
PlaintextBallotSelection,
make_ciphertext_ballot_contest,
make_ciphertext_ballot_selection,
make_ciphertext_ballot,
)
from .ballot_code import get_hash_for_device
from .election import CiphertextElectionContext
from .elgamal import elgamal_encrypt
from .group import ElementModP, ElementModQ, rand_q
from .logs import log_info, log_warning
from .manifest import (
InternalManifest,
ContestDescription,
ContestDescriptionWithPlaceholders,
SelectionDescription,
)
from .nonces import Nonces
from .serializable import Serializable
from .utils import get_optional, get_or_else_optional_func
class EncryptionDevice(Serializable):
device_id: int
session_id: int
launch_code: int
location: str
def __init__(
self,
device_id: int,
session_id: int,
launch_code: int,
location: str,
) -> None:
self.device_id = device_id
self.session_id = session_id
self.launch_code = launch_code
self.location = location
log_info(f": EncryptionDevice: Created: UUID: {device_id} at: {location}")
def get_hash(self) -> ElementModQ:
return get_hash_for_device(
self.device_id, self.session_id, self.launch_code, self.location
)
def get_timestamp(self) -> int:
return int(datetime.utcnow().timestamp())
class EncryptionMediator:
_internal_manifest: InternalManifest
_context: CiphertextElectionContext
_encryption_seed: ElementModQ
def __init__(
self,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
encryption_device: EncryptionDevice,
):
self._internal_manifest = internal_manifest
self._context = context
self._encryption_seed = encryption_device.get_hash()
def encrypt(self, ballot: PlaintextBallot) -> Optional[CiphertextBallot]:
log_info(f" encrypt: objectId: {ballot.object_id}")
encrypted_ballot = encrypt_ballot(
ballot, self._internal_manifest, self._context, self._encryption_seed
)
if encrypted_ballot is not None and encrypted_ballot.code is not None:
self._encryption_seed = encrypted_ballot.code
return encrypted_ballot
def generate_device_uuid() -> int:
return getnode()
def selection_from(
description: SelectionDescription,
is_placeholder: bool = False,
is_affirmative: bool = False,
) -> PlaintextBallotSelection:
return PlaintextBallotSelection(
description.object_id,
vote=1 if is_affirmative else 0,
is_placeholder_selection=is_placeholder,
)
def contest_from(description: ContestDescription) -> PlaintextBallotContest:
selections: List[PlaintextBallotSelection] = list()
for selection_description in description.ballot_selections:
selections.append(selection_from(selection_description))
return PlaintextBallotContest(description.object_id, selections)
def encrypt_selection(
selection: PlaintextBallotSelection,
selection_description: SelectionDescription,
elgamal_public_key: ElementModP,
crypto_extended_base_hash: ElementModQ,
nonce_seed: ElementModQ,
is_placeholder: bool = False,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallotSelection]:
if not selection.is_valid(selection_description.object_id):
log_warning(f"malformed input selection: {selection}")
return None
selection_description_hash = selection_description.crypto_hash()
nonce_sequence = Nonces(selection_description_hash, nonce_seed)
selection_nonce = nonce_sequence[selection_description.sequence_order]
disjunctive_chaum_pedersen_nonce = next(iter(nonce_sequence))
log_info(
f": encrypt_selection: for {selection_description.object_id} hash: {selection_description_hash.to_hex()}"
)
selection_representation = selection.vote
elgamal_encryption = elgamal_encrypt(
selection_representation, selection_nonce, elgamal_public_key
)
if elgamal_encryption is None:
return None
ot_selection(
object_id=selection.object_id,
description_hash=selection_description_hash,
ciphertext=get_optional(elgamal_encryption),
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
proof_seed=disjunctive_chaum_pedersen_nonce,
selection_representation=selection_representation,
is_placeholder_selection=is_placeholder,
nonce=selection_nonce,
)
if encrypted_selection.proof is None:
return None
if not should_verify_proofs:
return encrypted_selection
if encrypted_selection.is_valid_encryption(
selection_description_hash, elgamal_public_key, crypto_extended_base_hash
):
return encrypted_selection
log_warning(
f"mismatching selection proof for selection {encrypted_selection.object_id}"
)
return None
def encrypt_contest(
contest: PlaintextBallotContest,
contest_description: ContestDescriptionWithPlaceholders,
elgamal_public_key: ElementModP,
crypto_extended_base_hash: ElementModQ,
nonce_seed: ElementModQ,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallotContest]:
if not contest.is_valid(
contest_description.object_id,
len(contest_description.ballot_selections),
contest_description.number_elected,
contest_description.votes_allowed,
):
log_warning(f"malformed input contest: {contest}")
return None
if not contest_description.is_valid():
log_warning(f"malformed contest description: {contest_description}")
return None
contest_description_hash = contest_description.crypto_hash()
nonce_sequence = Nonces(contest_description_hash, nonce_seed)
contest_nonce = nonce_sequence[contest_description.sequence_order]
chaum_pedersen_nonce = next(iter(nonce_sequence))
encrypted_selections: List[CiphertextBallotSelection] = list()
selection_count = 0
ption.ballot_selections:
has_selection = False
encrypted_selection = None
for selection in contest.ballot_selections:
if selection.object_id == description.object_id:
has_selection = True
selection_count += selection.vote
encrypted_selection = encrypt_selection(
selection,
description,
elgamal_public_key,
crypto_extended_base_hash,
contest_nonce,
)
break
if not has_selection:
encrypted_selection = encrypt_selection(
selection_from(description),
description,
elgamal_public_key,
crypto_extended_base_hash,
contest_nonce,
)
if encrypted_selection is None:
return None
encrypted_selections.append(get_optional(encrypted_selection))
for placeholder in contest_description.placeholder_selections:
select_placeholder = False
if selection_count < contest_description.number_elected:
select_placeholder = True
selection_count += 1
encrypted_selection = encrypt_selection(
selection=selection_from(
description=placeholder,
is_placeholder=True,
is_affirmative=select_placeholder,
),
selection_description=placeholder,
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
nonce_seed=contest_nonce,
is_placeholder=True,
should_verify_proofs=True,
)
if encrypted_selection is None:
return None
encrypted_selections.append(get_optional(encrypted_selection))
llowed is not None
and selection_count < contest_description.votes_allowed
):
log_warning(
"mismatching selection count: only n-of-m style elections are currently supported"
)
encrypted_contest = make_ciphertext_ballot_contest(
object_id=contest.object_id,
description_hash=contest_description_hash,
ballot_selections=encrypted_selections,
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
proof_seed=chaum_pedersen_nonce,
number_elected=contest_description.number_elected,
nonce=contest_nonce,
)
if encrypted_contest is None or encrypted_contest.proof is None:
return None
if not should_verify_proofs:
return encrypted_contest
if encrypted_contest.is_valid_encryption(
contest_description_hash, elgamal_public_key, crypto_extended_base_hash
):
return encrypted_contest
log_warning(f"mismatching contest proof for contest {encrypted_contest.object_id}")
return None
st,
context: CiphertextElectionContext,
encryption_seed: ElementModQ,
nonce: Optional[ElementModQ] = None,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallot]:
style = internal_manifest.get_ballot_style(ballot.style_id)
if not ballot.is_valid(style.object_id):
log_warning(f"malformed input ballot: {ballot}")
return None
random_master_nonce = get_or_else_optional_func(nonce, lambda: rand_q())
# Include a representation of the election and the external Id in the nonce's used
nonce_seed = CiphertextBallot.nonce_seed(
internal_manifest.manifest_hash,
ballot.object_id,
random_master_nonce,
)
log_info(f": manifest_hash : {internal_manifest.manifest_hash.to_hex()}")
log_info(f": encryption_seed : {encryption_seed.to_hex()}")
encrypted_contests = encrypt_ballot_contests(
ballot, internal_manifest, context, nonce_seed
)
if encrypted_contests is None:
return None
encrypted_ballot = make_ciphertext_ballot(
ballot.object_id,
ballot.style_id,
internal_manifest.manifest_hash,
encryption_seed,
encrypted_contests,
random_master_nonce,
)
if not encrypted_ballot.code:
return None
if not should_verify_proofs:
return encrypted_ballot
if encrypted_ballot.is_valid_encryption(
internal_manifest.manifest_hash,
context.elgamal_public_key,
context.crypto_extended_base_hash,
):
return encrypted_ballot
return None
def encrypt_ballot_contests(
ballot: PlaintextBallot,
description: InternalManifest,
context: CiphertextElectionContext,
nonce_seed: ElementModQ,
) -> Optional[List[CiphertextBallotContest]]:
encrypted_contests: List[CiphertextBallotContest] = []
for ballot_style_contest in description.get_contests_for(ballot.style_id):
use_contest = None
for contest in ballot.contests:
if contest.object_id == ballot_style_contest.object_id:
use_contest = contest
break
if not use_contest:
use_contest = contest_from(ballot_style_contest)
encrypted_contest = encrypt_contest(
use_contest,
ballot_style_contest,
context.elgamal_public_key,
context.crypto_extended_base_hash,
nonce_seed,
)
if encrypted_contest is None:
return None
encrypted_contests.append(get_optional(encrypted_contest))
return encrypted_contests
| true
| true
|
7904e2289dcbb2a6732c77a374c819ea5e960ff4
| 1,397
|
py
|
Python
|
games/Flappy.py
|
jayamithun/py-box
|
65617c997982584f5c212e8b8ea9c35ced9d8d7e
|
[
"MIT"
] | 1
|
2022-03-30T09:51:45.000Z
|
2022-03-30T09:51:45.000Z
|
games/Flappy.py
|
jayamithun/py-box
|
65617c997982584f5c212e8b8ea9c35ced9d8d7e
|
[
"MIT"
] | null | null | null |
games/Flappy.py
|
jayamithun/py-box
|
65617c997982584f5c212e8b8ea9c35ced9d8d7e
|
[
"MIT"
] | null | null | null |
# pip install freegames
# Click on screen to control ball
# import modules
from random import *
import turtle as t
from freegames import vector
# Set window title, color and icon
t.title("Flappy Ball")
root = t.Screen()._root
root.iconbitmap("logo-ico.ico")
t.bgcolor('#80ffd4')
bird = vector(0, 0)
balls = []
# Functions
# Move bird up in response to screen tap
def tap(x, y):
up = vector(0, 30)
bird.move(up)
# Return True if point on screen
def inside(point):
return -200 < point.x < 200 and -200 < point.y < 200
# Draw screen objects
def draw(alive):
t.clear()
t.goto(bird.x, bird.y)
if alive:
t.dot(13, 'green')
else:
t.dot(13, 'red')
for ball in balls:
t.goto(ball.x, ball.y)
t.dot(20, '#862d2d')
t.update()
def move():
# Update object positions
bird.y -= 5
for ball in balls:
ball.x -= 3
if randrange(10) == 0:
y = randrange(-199, 199)
ball = vector(199, y)
balls.append(ball)
while len(balls) > 0 and not inside(balls[0]):
balls.pop(0)
if not inside(bird):
draw(False)
return
for ball in balls:
if abs(ball - bird) < 15:
draw(False)
return
draw(True)
t.ontimer(move, 50)
t.setup(420, 420, 370, 0)
t.hideturtle()
t.up()
t.tracer(False)
t.onscreenclick(tap)
move()
t.done()
| 16.630952
| 56
| 0.583393
|
from random import *
import turtle as t
from freegames import vector
t.title("Flappy Ball")
root = t.Screen()._root
root.iconbitmap("logo-ico.ico")
t.bgcolor('#80ffd4')
bird = vector(0, 0)
balls = []
def tap(x, y):
up = vector(0, 30)
bird.move(up)
def inside(point):
return -200 < point.x < 200 and -200 < point.y < 200
def draw(alive):
t.clear()
t.goto(bird.x, bird.y)
if alive:
t.dot(13, 'green')
else:
t.dot(13, 'red')
for ball in balls:
t.goto(ball.x, ball.y)
t.dot(20, '#862d2d')
t.update()
def move():
bird.y -= 5
for ball in balls:
ball.x -= 3
if randrange(10) == 0:
y = randrange(-199, 199)
ball = vector(199, y)
balls.append(ball)
while len(balls) > 0 and not inside(balls[0]):
balls.pop(0)
if not inside(bird):
draw(False)
return
for ball in balls:
if abs(ball - bird) < 15:
draw(False)
return
draw(True)
t.ontimer(move, 50)
t.setup(420, 420, 370, 0)
t.hideturtle()
t.up()
t.tracer(False)
t.onscreenclick(tap)
move()
t.done()
| true
| true
|
7904e253c75dce8e51533d6c20a5113efb72bb9d
| 6,357
|
py
|
Python
|
scripts/loadModelDoEntityEmbeddingsUnsorted.py
|
michaelfaerber/Agnos
|
b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf
|
[
"MIT"
] | null | null | null |
scripts/loadModelDoEntityEmbeddingsUnsorted.py
|
michaelfaerber/Agnos
|
b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf
|
[
"MIT"
] | 3
|
2021-12-10T01:22:05.000Z
|
2021-12-14T21:33:16.000Z
|
scripts/loadModelDoEntityEmbeddingsUnsorted.py
|
michaelfaerber/Agnos
|
b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf
|
[
"MIT"
] | null | null | null |
'''
@author: kris
'''
# import modules; set up logging
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
from gensim.test.utils import datapath
import numpy as np
import logging, os, sys, gzip
import datetime
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', filename='word2vec.out', level=logging.INFO)
# Path to a file that contains lines with the locations of files
# containing the sentences we want for our Word2Vec model
# Also works with entities that are just stacked line by line
pathsLocator = "./sentencesPaths.txt"
outputPath = "./entity_embeddings.txt"
# Model to load
to_load = '/vol2/cb/crunchbase-201806/embeddings/dim200-iter10-win5/CB_sg1_size200_mincount1_window5_neg15_iter10.wv.vectors.npy'
#'/home/faerberm/mag-training/MAG_sg1_size128_minCount5_window5_neg15_iter10_alpha_cbowMean.wv.vectors.npy'
#'/vol2/cb/crunchbase-201806/embeddings/dim200-iter10-win5/CB_sg1_size200_mincount1_window5_neg15_iter10'
#'MAG_sg1_size128_minCount5_window5_neg15_iter5'
loadKeyedVector = True
#'dbpedia_sg1_size200_mincount1_window5_neg15_iter10'
#'RDF2Vec_sg1_size200_mincount1_window5_neg15_iter20'
#'MAG_sg1_size200_mincount1_window5_neg15_iter15'
#What is the newline character on the machine
newline = '\n'
ignorePrefix = '#'
#What separates one walk from another (aka. one sentence from another)?
walkSeparator = "\t"
#What separates the single 'units' of a given walk?
hopSeparator = '->'
# Mapping dict
entity_mapping_dict = {}
# Mapping file
mapping_file = "/home/noulletk/prog/bmw/dbpedia_full/resources/data/walks/walk_entity_mapping.txt"
mapping_sep = "\t"
hasMapping = False
iterationCounter = {'val': 0}
#Load mappings if there are any
if hasMapping:
for mapping_line in open(mapping_file, mode='rt'):
mapping_tokens = mapping_line.rstrip(newline).split(mapping_sep)
if len(mapping_tokens) == 2:
entity_mapping_dict[mapping_tokens[0]] = mapping_tokens[1]
print("Loaded %s mappings!" % (len(entity_mapping_dict)))
class MySentences:
def __init__(self, iterationCounter):
self.iterationCounter = iterationCounter
def __iter__(self):
print("Running Iteration #%s" % (iterationCounter['val']))
iterationCounter['val'] += 1
# Iterate to find which files are to be read
for fname in open(pathsLocator, mode='rt'): # os.listdir(self.dirname):
sentencesPath = fname.rstrip(newline)
# Ignore commented-out lines
if sentencesPath.startswith(ignorePrefix):
continue
now = datetime.datetime.now()
print("[%s] Grabbing sentences from: %s" % (now.strftime("%Y-%m-%d %H:%M"), sentencesPath))
try:
# Go through all paths
for line in open(sentencesPath, mode='rt'):
# If you're NOT grouping the walks and separating them by tabs
sentence = line.rstrip(newline).split(hopSeparator)
for tokenPos in range(len(sentence)):
token = sentence[tokenPos]
# Give the proper URL for the entity IF it exists, otherwise return the entity itself
sentence[tokenPos] = entity_mapping_dict.get(token, token)
#print(sentence)
yield sentence
except Exception:
print("Failed reading file:")
print(sentencesPath)
#load model
if loadKeyedVector:
print("Loading [KeyedVectors] from: ",to_load)
#model_wv = KeyedVectors.load(to_load, mmap='r')
#model_wv = KeyedVectors.load_word2vec_format(to_load, binary=True)
#model_wv = KeyedVectors.load_word2vec_format(to_load)
model_wv = KeyedVectors.load(to_load)
#model_wv = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False) # C text format
#model_wv = KeyedVectors.load_word2vec_format(to_load, binary=True, unicode_errors='ignore')
else:
print("Loading [MODEL] from: ",to_load)
model_wv = Word2Vec.load(to_load).wv
print("Vocab keys size:",len(model_wv.vocab.keys()))
print("Outputting entity embeddings to: ",outputPath)
sentences = MySentences(iterationCounter)
#Open the output file for the entity embeddings
outFile = open(outputPath, "w")
#Make a dictionary for in-memory aggregation while going over sentences
default_val = None
entity_embeddings_dict = {}
vocab_keys = model_wv.vocab.keys()
displayCounter = 0
maxDisplay = 10
for voc in vocab_keys:
print(voc)
if displayCounter >= maxDisplay:
break
displayCounter+=1
print("Compute entity embeddings (through combination of word embeddings)...")
counter = 0
'''
for sentence in sentences:
entity = sentence[0]
entity_embedding = None
#Sum over all words' embeddings and then output the resulting embedding
for word in sentence:
word_embedding = model.wv[word]
if default_val is None:
#Initialise default_val if it isn't yet
default_val = np.zeros(word_embedding.shape)
if entity_embedding is None:
entity_embedding = np.zeros(word_embedding.shape)
entity_embedding += word_embedding
entity_embeddings_dict[entity] = entity_embeddings_dict.get(entity, default_val) + entity_embedding
if (counter % 1000000 == 0):
print("Combined word embeddings: ",counter)
print("Last one completed: ",entity)
counter+=1
'''
#Go through all sentences to see which entities we want
for sentence in sentences:
# idea is that the entity is in the document, so we check what it is like and
# since every entity has 'the same' treatment, that we can determine their probabilities based on that
entity = sentence[0]
if hasMapping:
entity = entity_mapping_dict.get(entity, entity)
entity_embedding = None
dict_val = entity_embeddings_dict.get(entity, None)
if (dict_val is None):
if entity in vocab_keys:
entity_embedding = model_wv[entity]
entity_embeddings_dict[entity] = entity_embedding
#Encountered first time, so output it
outFile.write("%s" % entity)
for number in entity_embedding:
outFile.write("\t%s" % number)
outFile.write("\n")
if (counter % 1000000 == 0):
print("Lines passed through: ",counter)
print("Current line's entity: ",entity)
print("Embeddings output: ",len(entity_embeddings_dict))
counter+=1
#print("Output computed entity embeddings!")
#for (entity, entity_embedding) in entity_embeddings_dict.items():
# #Output computed embedding
# outFile.write("%s" % entity)
# for number in entity_embedding:
# outFile.write("\t%s" % number)
# outFile.write("\n")
#Close the output file post finishing output operations
outFile.close()
print("Finished outputting entity embeddings")
| 34.737705
| 129
| 0.757747
|
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
from gensim.test.utils import datapath
import numpy as np
import logging, os, sys, gzip
import datetime
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', filename='word2vec.out', level=logging.INFO)
pathsLocator = "./sentencesPaths.txt"
outputPath = "./entity_embeddings.txt"
to_load = '/vol2/cb/crunchbase-201806/embeddings/dim200-iter10-win5/CB_sg1_size200_mincount1_window5_neg15_iter10.wv.vectors.npy'
loadKeyedVector = True
newline = '\n'
ignorePrefix = '#'
walkSeparator = "\t"
hopSeparator = '->'
entity_mapping_dict = {}
mapping_file = "/home/noulletk/prog/bmw/dbpedia_full/resources/data/walks/walk_entity_mapping.txt"
mapping_sep = "\t"
hasMapping = False
iterationCounter = {'val': 0}
if hasMapping:
for mapping_line in open(mapping_file, mode='rt'):
mapping_tokens = mapping_line.rstrip(newline).split(mapping_sep)
if len(mapping_tokens) == 2:
entity_mapping_dict[mapping_tokens[0]] = mapping_tokens[1]
print("Loaded %s mappings!" % (len(entity_mapping_dict)))
class MySentences:
def __init__(self, iterationCounter):
self.iterationCounter = iterationCounter
def __iter__(self):
print("Running Iteration #%s" % (iterationCounter['val']))
iterationCounter['val'] += 1
for fname in open(pathsLocator, mode='rt'):
sentencesPath = fname.rstrip(newline)
if sentencesPath.startswith(ignorePrefix):
continue
now = datetime.datetime.now()
print("[%s] Grabbing sentences from: %s" % (now.strftime("%Y-%m-%d %H:%M"), sentencesPath))
try:
for line in open(sentencesPath, mode='rt'):
sentence = line.rstrip(newline).split(hopSeparator)
for tokenPos in range(len(sentence)):
token = sentence[tokenPos]
# Give the proper URL for the entity IF it exists, otherwise return the entity itself
sentence[tokenPos] = entity_mapping_dict.get(token, token)
#print(sentence)
yield sentence
except Exception:
print("Failed reading file:")
print(sentencesPath)
#load model
if loadKeyedVector:
print("Loading [KeyedVectors] from: ",to_load)
#model_wv = KeyedVectors.load(to_load, mmap='r')
#model_wv = KeyedVectors.load_word2vec_format(to_load, binary=True)
#model_wv = KeyedVectors.load_word2vec_format(to_load)
model_wv = KeyedVectors.load(to_load)
#model_wv = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False) # C text format
#model_wv = KeyedVectors.load_word2vec_format(to_load, binary=True, unicode_errors='ignore')
else:
print("Loading [MODEL] from: ",to_load)
model_wv = Word2Vec.load(to_load).wv
print("Vocab keys size:",len(model_wv.vocab.keys()))
print("Outputting entity embeddings to: ",outputPath)
sentences = MySentences(iterationCounter)
#Open the output file for the entity embeddings
outFile = open(outputPath, "w")
#Make a dictionary for in-memory aggregation while going over sentences
default_val = None
entity_embeddings_dict = {}
vocab_keys = model_wv.vocab.keys()
displayCounter = 0
maxDisplay = 10
for voc in vocab_keys:
print(voc)
if displayCounter >= maxDisplay:
break
displayCounter+=1
print("Compute entity embeddings (through combination of word embeddings)...")
counter = 0
#Go through all sentences to see which entities we want
for sentence in sentences:
# idea is that the entity is in the document, so we check what it is like and
# since every entity has 'the same' treatment, that we can determine their probabilities based on that
entity = sentence[0]
if hasMapping:
entity = entity_mapping_dict.get(entity, entity)
entity_embedding = None
dict_val = entity_embeddings_dict.get(entity, None)
if (dict_val is None):
if entity in vocab_keys:
entity_embedding = model_wv[entity]
entity_embeddings_dict[entity] = entity_embedding
#Encountered first time, so output it
outFile.write("%s" % entity)
for number in entity_embedding:
outFile.write("\t%s" % number)
outFile.write("\n")
if (counter % 1000000 == 0):
print("Lines passed through: ",counter)
print("Current line's entity: ",entity)
print("Embeddings output: ",len(entity_embeddings_dict))
counter+=1
nt("Finished outputting entity embeddings")
| true
| true
|
7904e327b270f7b8435e2842d22e8031f61c2796
| 8,884
|
py
|
Python
|
plugins/tests/test_helpers.py
|
sul-dlss/folio-airflow
|
befe7097874406e3ab77764d285f1edafa53d4b1
|
[
"Apache-2.0"
] | 2
|
2022-03-02T15:41:43.000Z
|
2022-03-04T19:06:59.000Z
|
plugins/tests/test_helpers.py
|
sul-dlss/folio-airflow
|
befe7097874406e3ab77764d285f1edafa53d4b1
|
[
"Apache-2.0"
] | 40
|
2021-11-30T21:30:52.000Z
|
2022-03-11T00:06:16.000Z
|
plugins/tests/test_helpers.py
|
sul-dlss/folio-airflow
|
befe7097874406e3ab77764d285f1edafa53d4b1
|
[
"Apache-2.0"
] | null | null | null |
import logging
import pytest
import pydantic
import requests
from pymarc import Record, Field
from airflow.models import Variable
from pytest_mock import MockerFixture
from plugins.folio.helpers import (
archive_artifacts,
move_marc_files_check_tsv,
post_to_okapi,
process_marc,
_move_001_to_035,
transform_move_tsvs,
process_records,
setup_data_logging,
)
# Mock xcom messages dict
messages = {}
# Mock xcom
def mock_xcom_push(*args, **kwargs):
key = kwargs["key"]
value = kwargs["value"]
messages[key] = value
class MockTaskInstance(pydantic.BaseModel):
xcom_push = mock_xcom_push
@pytest.fixture
def mock_file_system(tmp_path):
airflow_path = tmp_path / "opt/airflow/"
# Mock source and target dirs
source_dir = airflow_path / "symphony"
source_dir.mkdir(parents=True)
sample_marc = source_dir / "sample.mrc"
sample_marc.write_text("sample")
target_dir = airflow_path / "migration/data/instances/"
target_dir.mkdir(parents=True)
# Mock Results and Archive Directories
results_dir = airflow_path / "migration/results"
results_dir.mkdir(parents=True)
archive_dir = airflow_path / "migration/archive"
archive_dir.mkdir(parents=True)
# mock tmp dir
tmp = tmp_path / "tmp/"
tmp.mkdir(parents=True)
return [
airflow_path,
source_dir,
target_dir,
results_dir,
archive_dir,
tmp
]
def test_move_marc_files(mock_file_system):
task_instance = MockTaskInstance()
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
move_marc_files_check_tsv(
task_instance=task_instance, airflow=airflow_path, source="symphony"
) # noqa
assert not (source_dir / "sample.mrc").exists()
assert messages["marc_only"]
def test_move_tsv_files(mock_file_system):
task_instance = MockTaskInstance()
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
sample_csv = source_dir / "sample.tsv"
sample_csv.write_text("sample")
move_marc_files_check_tsv(
task_instance=task_instance, airflow=airflow_path, source="symphony"
) # noqa
assert messages["marc_only"] is False
@pytest.fixture
def mock_dag_run(mocker: MockerFixture):
dag_run = mocker.stub(name="dag_run")
dag_run.run_id = "manual_2022-02-24"
return dag_run
def test_archive_artifacts(mock_dag_run, mock_file_system):
dag = mock_dag_run
airflow_path = mock_file_system[0]
results_dir = mock_file_system[3]
archive_dir = mock_file_system[4]
tmp_dir = mock_file_system[5]
# Create mock Instance JSON file
instance_filename = f"folio_instances_{dag.run_id}_bibs-transformer.json"
instance_file = results_dir / instance_filename
instance_file.write_text("""{ "id":"abcded2345"}""")
tmp_filename = "temp_file.json"
tmp_file = tmp_dir / tmp_filename
tmp_file.write_text("""{ "key":"vaaluue"}""")
target_file = archive_dir / instance_filename
archive_artifacts(dag_run=dag, airflow=airflow_path, tmp_dir=tmp_dir)
assert not instance_file.exists()
assert not tmp_file.exists()
assert target_file.exists()
@pytest.fixture
def mock_okapi_variable(monkeypatch):
def mock_get(key):
return "https://okapi-folio.dev.edu"
monkeypatch.setattr(Variable, "get", mock_get)
@pytest.fixture
def mock_records():
return [
{"id": "de09e01a-6d75-4007-b700-c83a475999b1"},
{"id": "123326dd-9924-498f-9ca3-4fa00dda6c90"},
]
@pytest.fixture
def mock_okapi_success(monkeypatch, mocker: MockerFixture):
def mock_post(*args, **kwargs):
post_response = mocker.stub(name="post_result")
post_response.status_code = 201
return post_response
monkeypatch.setattr(requests, "post", mock_post)
@pytest.mark.output_capturing
def test_post_to_okapi(
mock_okapi_success, mock_okapi_variable, mock_dag_run, mock_records, caplog
):
post_to_okapi(
token="2345asdf",
dag_run=mock_dag_run(),
records=mock_records,
endpoint="/instance-storage/batch/synchronous",
payload_key="instances",
)
assert "Result status code 201 for 2 records" in caplog.text
@pytest.fixture
def mock_okapi_failure(monkeypatch, mocker: MockerFixture):
def mock_post(*args, **kwargs):
post_response = mocker.stub(name="post_result")
post_response.status_code = 422
post_response.text = """{
"errors" : [ {
"message" : "value already exists in table holdings_record: hld100000000027"
} ]
}""" # noqa
return post_response
monkeypatch.setattr(requests, "post", mock_post)
def test_post_to_okapi_failures(
mock_okapi_failure,
mock_okapi_variable,
mock_dag_run,
mock_records,
mock_file_system,
):
airflow_path = mock_file_system[0]
migration_results = mock_file_system[3]
post_to_okapi(
token="2345asdf",
dag_run=mock_dag_run,
records=mock_records,
endpoint="/instance-storage/batch/synchronous",
payload_key="instances",
airflow=airflow_path,
)
error_file = (
migration_results / "errors-instance-storage-422-manual_2022-02-24.json" # noqa
)
assert error_file.exists()
def test_process_marc():
assert process_marc
@pytest.fixture
def mock_marc_record():
record = Record()
field_245 = Field(
tag="245",
indicators=["0", "1"],
subfields=[
"a",
"The pragmatic programmer : ",
"b",
"from journeyman to master /",
"c",
"Andrew Hunt, David Thomas.",
],
)
field_001_1 = Field(tag="001", data="a123456789")
field_001_2 = Field(tag="001", data="gls_0987654321")
record.add_field(field_001_1, field_001_2, field_245)
return record
def test_move_001_to_035(mock_marc_record):
record = mock_marc_record
_move_001_to_035(record)
assert record.get_fields("035")[0].get_subfields("a")[0] == "gls_0987654321" # noqa
def test_missing_001_to_034(mock_marc_record):
record = mock_marc_record
record.remove_fields('001')
_move_001_to_035(record)
assert record.get_fields("035") == []
def test_transform_move_tsvs(mock_file_system):
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
# mock sample csv and tsv
symphony_tsv = source_dir / "sample.tsv"
symphony_tsv.write_text(
"CATKEY\tCALL_NUMBER_TYPE\tBARCODE\n123456\tLC 12345\t45677 ")
tsv_directory = airflow_path / "migration/data/items"
tsv_directory.mkdir(parents=True)
sample_tsv = tsv_directory / "sample.tsv"
column_transforms = [("CATKEY", lambda x: f"a{x}"),
("BARCODE", lambda x: x.strip())]
transform_move_tsvs(
airflow=airflow_path,
column_transforms=column_transforms,
source="symphony",
)
f = open(sample_tsv, "r")
assert f.readlines()[1] == "a123456\tLC 12345\t45677\n"
f.close()
def test_process_records(mock_dag_run, mock_file_system):
airflow_path = mock_file_system[0]
tmp = mock_file_system[5]
results_dir = mock_file_system[3]
# mock results file
results_file = results_dir / "folio_instances-manual_2022-02-24.json"
results_file.write_text(
"""{"id": "de09e01a-6d75-4007-b700-c83a475999b1"}
{"id": "123326dd-9924-498f-9ca3-4fa00dda6c90"}"""
)
num_records = process_records(
prefix="folio_instances",
out_filename="instances",
jobs=1,
dag_run=mock_dag_run,
airflow=str(airflow_path),
tmp=str(tmp),
)
assert num_records == 2
@pytest.fixture
def mock_logger_file_handler(monkeypatch, mocker: MockerFixture):
def mock_file_handler(*args, **kwargs):
file_handler = mocker.stub(name="file_handler")
file_handler.addFilter = lambda x: x
file_handler.setFormatter = lambda x: x
file_handler.setLevel = lambda x: x
return file_handler
monkeypatch.setattr(logging, "FileHandler", mock_file_handler)
class MockFolderStructure(pydantic.BaseModel):
data_issue_file_path = "data-issues-1345.tsv"
class MockTransform(pydantic.BaseModel):
_log = None
folder_structure = MockFolderStructure()
def test_setup_data_logging(mock_logger_file_handler):
transformer = MockTransform()
assert hasattr(logging.Logger, "data_issues") is False
assert len(logging.getLogger().handlers) == 5
setup_data_logging(transformer)
assert hasattr(logging.Logger, "data_issues")
assert len(logging.getLogger().handlers) == 6
# Removes handler otherwise fails subsequent tests
file_handler = logging.getLogger().handlers[-1]
logging.getLogger().removeHandler(file_handler)
| 26.678679
| 92
| 0.687753
|
import logging
import pytest
import pydantic
import requests
from pymarc import Record, Field
from airflow.models import Variable
from pytest_mock import MockerFixture
from plugins.folio.helpers import (
archive_artifacts,
move_marc_files_check_tsv,
post_to_okapi,
process_marc,
_move_001_to_035,
transform_move_tsvs,
process_records,
setup_data_logging,
)
messages = {}
def mock_xcom_push(*args, **kwargs):
key = kwargs["key"]
value = kwargs["value"]
messages[key] = value
class MockTaskInstance(pydantic.BaseModel):
xcom_push = mock_xcom_push
@pytest.fixture
def mock_file_system(tmp_path):
airflow_path = tmp_path / "opt/airflow/"
source_dir = airflow_path / "symphony"
source_dir.mkdir(parents=True)
sample_marc = source_dir / "sample.mrc"
sample_marc.write_text("sample")
target_dir = airflow_path / "migration/data/instances/"
target_dir.mkdir(parents=True)
results_dir = airflow_path / "migration/results"
results_dir.mkdir(parents=True)
archive_dir = airflow_path / "migration/archive"
archive_dir.mkdir(parents=True)
tmp = tmp_path / "tmp/"
tmp.mkdir(parents=True)
return [
airflow_path,
source_dir,
target_dir,
results_dir,
archive_dir,
tmp
]
def test_move_marc_files(mock_file_system):
task_instance = MockTaskInstance()
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
move_marc_files_check_tsv(
task_instance=task_instance, airflow=airflow_path, source="symphony"
)
assert not (source_dir / "sample.mrc").exists()
assert messages["marc_only"]
def test_move_tsv_files(mock_file_system):
task_instance = MockTaskInstance()
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
sample_csv = source_dir / "sample.tsv"
sample_csv.write_text("sample")
move_marc_files_check_tsv(
task_instance=task_instance, airflow=airflow_path, source="symphony"
)
assert messages["marc_only"] is False
@pytest.fixture
def mock_dag_run(mocker: MockerFixture):
dag_run = mocker.stub(name="dag_run")
dag_run.run_id = "manual_2022-02-24"
return dag_run
def test_archive_artifacts(mock_dag_run, mock_file_system):
dag = mock_dag_run
airflow_path = mock_file_system[0]
results_dir = mock_file_system[3]
archive_dir = mock_file_system[4]
tmp_dir = mock_file_system[5]
instance_filename = f"folio_instances_{dag.run_id}_bibs-transformer.json"
instance_file = results_dir / instance_filename
instance_file.write_text("""{ "id":"abcded2345"}""")
tmp_filename = "temp_file.json"
tmp_file = tmp_dir / tmp_filename
tmp_file.write_text("""{ "key":"vaaluue"}""")
target_file = archive_dir / instance_filename
archive_artifacts(dag_run=dag, airflow=airflow_path, tmp_dir=tmp_dir)
assert not instance_file.exists()
assert not tmp_file.exists()
assert target_file.exists()
@pytest.fixture
def mock_okapi_variable(monkeypatch):
def mock_get(key):
return "https://okapi-folio.dev.edu"
monkeypatch.setattr(Variable, "get", mock_get)
@pytest.fixture
def mock_records():
return [
{"id": "de09e01a-6d75-4007-b700-c83a475999b1"},
{"id": "123326dd-9924-498f-9ca3-4fa00dda6c90"},
]
@pytest.fixture
def mock_okapi_success(monkeypatch, mocker: MockerFixture):
def mock_post(*args, **kwargs):
post_response = mocker.stub(name="post_result")
post_response.status_code = 201
return post_response
monkeypatch.setattr(requests, "post", mock_post)
@pytest.mark.output_capturing
def test_post_to_okapi(
mock_okapi_success, mock_okapi_variable, mock_dag_run, mock_records, caplog
):
post_to_okapi(
token="2345asdf",
dag_run=mock_dag_run(),
records=mock_records,
endpoint="/instance-storage/batch/synchronous",
payload_key="instances",
)
assert "Result status code 201 for 2 records" in caplog.text
@pytest.fixture
def mock_okapi_failure(monkeypatch, mocker: MockerFixture):
def mock_post(*args, **kwargs):
post_response = mocker.stub(name="post_result")
post_response.status_code = 422
post_response.text = """{
"errors" : [ {
"message" : "value already exists in table holdings_record: hld100000000027"
} ]
}"""
return post_response
monkeypatch.setattr(requests, "post", mock_post)
def test_post_to_okapi_failures(
mock_okapi_failure,
mock_okapi_variable,
mock_dag_run,
mock_records,
mock_file_system,
):
airflow_path = mock_file_system[0]
migration_results = mock_file_system[3]
post_to_okapi(
token="2345asdf",
dag_run=mock_dag_run,
records=mock_records,
endpoint="/instance-storage/batch/synchronous",
payload_key="instances",
airflow=airflow_path,
)
error_file = (
migration_results / "errors-instance-storage-422-manual_2022-02-24.json"
)
assert error_file.exists()
def test_process_marc():
assert process_marc
@pytest.fixture
def mock_marc_record():
record = Record()
field_245 = Field(
tag="245",
indicators=["0", "1"],
subfields=[
"a",
"The pragmatic programmer : ",
"b",
"from journeyman to master /",
"c",
"Andrew Hunt, David Thomas.",
],
)
field_001_1 = Field(tag="001", data="a123456789")
field_001_2 = Field(tag="001", data="gls_0987654321")
record.add_field(field_001_1, field_001_2, field_245)
return record
def test_move_001_to_035(mock_marc_record):
record = mock_marc_record
_move_001_to_035(record)
assert record.get_fields("035")[0].get_subfields("a")[0] == "gls_0987654321"
def test_missing_001_to_034(mock_marc_record):
record = mock_marc_record
record.remove_fields('001')
_move_001_to_035(record)
assert record.get_fields("035") == []
def test_transform_move_tsvs(mock_file_system):
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
symphony_tsv = source_dir / "sample.tsv"
symphony_tsv.write_text(
"CATKEY\tCALL_NUMBER_TYPE\tBARCODE\n123456\tLC 12345\t45677 ")
tsv_directory = airflow_path / "migration/data/items"
tsv_directory.mkdir(parents=True)
sample_tsv = tsv_directory / "sample.tsv"
column_transforms = [("CATKEY", lambda x: f"a{x}"),
("BARCODE", lambda x: x.strip())]
transform_move_tsvs(
airflow=airflow_path,
column_transforms=column_transforms,
source="symphony",
)
f = open(sample_tsv, "r")
assert f.readlines()[1] == "a123456\tLC 12345\t45677\n"
f.close()
def test_process_records(mock_dag_run, mock_file_system):
airflow_path = mock_file_system[0]
tmp = mock_file_system[5]
results_dir = mock_file_system[3]
results_file = results_dir / "folio_instances-manual_2022-02-24.json"
results_file.write_text(
"""{"id": "de09e01a-6d75-4007-b700-c83a475999b1"}
{"id": "123326dd-9924-498f-9ca3-4fa00dda6c90"}"""
)
num_records = process_records(
prefix="folio_instances",
out_filename="instances",
jobs=1,
dag_run=mock_dag_run,
airflow=str(airflow_path),
tmp=str(tmp),
)
assert num_records == 2
@pytest.fixture
def mock_logger_file_handler(monkeypatch, mocker: MockerFixture):
def mock_file_handler(*args, **kwargs):
file_handler = mocker.stub(name="file_handler")
file_handler.addFilter = lambda x: x
file_handler.setFormatter = lambda x: x
file_handler.setLevel = lambda x: x
return file_handler
monkeypatch.setattr(logging, "FileHandler", mock_file_handler)
class MockFolderStructure(pydantic.BaseModel):
data_issue_file_path = "data-issues-1345.tsv"
class MockTransform(pydantic.BaseModel):
_log = None
folder_structure = MockFolderStructure()
def test_setup_data_logging(mock_logger_file_handler):
transformer = MockTransform()
assert hasattr(logging.Logger, "data_issues") is False
assert len(logging.getLogger().handlers) == 5
setup_data_logging(transformer)
assert hasattr(logging.Logger, "data_issues")
assert len(logging.getLogger().handlers) == 6
file_handler = logging.getLogger().handlers[-1]
logging.getLogger().removeHandler(file_handler)
| true
| true
|
7904e32998681c19d1931c5a6a712a8fc6b22f7e
| 26,232
|
py
|
Python
|
schmidt_funcs.py
|
johnarban/arban
|
dcd2d0838f72c39bf3a52aabfa74d6ea28933d02
|
[
"MIT"
] | null | null | null |
schmidt_funcs.py
|
johnarban/arban
|
dcd2d0838f72c39bf3a52aabfa74d6ea28933d02
|
[
"MIT"
] | null | null | null |
schmidt_funcs.py
|
johnarban/arban
|
dcd2d0838f72c39bf3a52aabfa74d6ea28933d02
|
[
"MIT"
] | null | null | null |
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
| 30.970484
| 116
| 0.564692
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
import pdb
, angle=0, center=True):
cx, cy = c
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
if center is not True:
if center[0] == 'b':
cy = cy + h / 2.
else:
cy = cy - h / 2.
if center[1] == 'l':
cx = cx + w / 2.
else:
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
c.append((cx + xr, cy + yr))
return c
def comp(arr):
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
diff = np.diff(arr)
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
eg=False):
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
if log and dexisNone:
dex = (xmax - xmin) / n
elif (not log) and dxisNone:
dx = (xmax - xmin) / n
if log:
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h / np.sum(h, dtype=float))
return np.append(0, c), bins
def cdf2(values, bins):
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
if X_err is None:
smooth = False
if n is None:
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0ef lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
def model(x, theta):
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
| true
| true
|
7904e38e4d8be0710b575ef3f2004b920d720924
| 2,827
|
py
|
Python
|
densevid_eval-master/coco-caption/pycocoevalcap/tokenizer/ptbtokenizer.py
|
cxqj/5-densevideocaptioning
|
8f1239128ece2d59a063b766fc44911129706314
|
[
"MIT"
] | 150
|
2018-10-06T15:51:30.000Z
|
2022-03-22T08:23:24.000Z
|
densevid_eval-master/coco-caption/pycocoevalcap/tokenizer/ptbtokenizer.py
|
xiaoxinlong/DenseVideoCaptioning
|
27f315da7c90f6bb6d7a3fc8038159f7a54ec5bb
|
[
"MIT"
] | 38
|
2018-10-08T07:19:59.000Z
|
2021-05-06T21:13:43.000Z
|
densevid_eval-master/coco-caption/pycocoevalcap/tokenizer/ptbtokenizer.py
|
xiaoxinlong/DenseVideoCaptioning
|
27f315da7c90f6bb6d7a3fc8038159f7a54ec5bb
|
[
"MIT"
] | 54
|
2018-10-22T07:33:37.000Z
|
2022-03-23T04:56:25.000Z
|
#!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
import os
import sys
import subprocess
import tempfile
import itertools
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname=os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences)
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.decode().split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| 40.971014
| 114
| 0.520693
|
import os
import sys
import subprocess
import tempfile
import itertools
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname=os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences)
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.decode().split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| true
| true
|
7904e47c5ed08d37d62f48abed03785720f1cbed
| 1,707
|
py
|
Python
|
.install/.backup/lib/apitools/base/py/util.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | null | null | null |
.install/.backup/lib/apitools/base/py/util.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | null | null | null |
.install/.backup/lib/apitools/base/py/util.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:04:47.000Z
|
2020-07-24T20:04:47.000Z
|
"""Assorted utilities shared between parts of apitools."""
import collections
import httplib
import os
import types
import urllib2
from apitools.base.py import exceptions
__all__ = [
'DetectGae',
'DetectGce',
]
def DetectGae():
"""Determine whether or not we're running on GAE.
This is based on:
https://developers.google.com/appengine/docs/python/#The_Environment
Returns:
True iff we're running on GAE.
"""
server_software = os.environ.get('SERVER_SOFTWARE', '')
return (server_software.startswith('Development/') or
server_software.startswith('Google App Engine/'))
def DetectGce():
"""Determine whether or not we're running on GCE.
This is based on:
https://developers.google.com/compute/docs/instances#dmi
Returns:
True iff we're running on a GCE instance.
"""
try:
o = urllib2.urlopen('http://metadata.google.internal')
except urllib2.URLError:
return False
return o.getcode() == httplib.OK
def NormalizeScopes(scope_spec):
"""Normalize scope_spec to a set of strings."""
if isinstance(scope_spec, types.StringTypes):
return set(scope_spec.split(' '))
elif isinstance(scope_spec, collections.Iterable):
return set(scope_spec)
raise exceptions.TypecheckError(
'NormalizeScopes expected string or iterable, found %s' % (
type(scope_spec),))
def Typecheck(arg, arg_type, msg=None):
if not isinstance(arg, arg_type):
if msg is None:
if isinstance(arg_type, tuple):
msg = 'Type of arg is "%s", not one of %r' % (type(arg), arg_type)
else:
msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type)
raise exceptions.TypecheckError(msg)
return arg
| 25.477612
| 74
| 0.68717
|
import collections
import httplib
import os
import types
import urllib2
from apitools.base.py import exceptions
__all__ = [
'DetectGae',
'DetectGce',
]
def DetectGae():
server_software = os.environ.get('SERVER_SOFTWARE', '')
return (server_software.startswith('Development/') or
server_software.startswith('Google App Engine/'))
def DetectGce():
try:
o = urllib2.urlopen('http://metadata.google.internal')
except urllib2.URLError:
return False
return o.getcode() == httplib.OK
def NormalizeScopes(scope_spec):
if isinstance(scope_spec, types.StringTypes):
return set(scope_spec.split(' '))
elif isinstance(scope_spec, collections.Iterable):
return set(scope_spec)
raise exceptions.TypecheckError(
'NormalizeScopes expected string or iterable, found %s' % (
type(scope_spec),))
def Typecheck(arg, arg_type, msg=None):
if not isinstance(arg, arg_type):
if msg is None:
if isinstance(arg_type, tuple):
msg = 'Type of arg is "%s", not one of %r' % (type(arg), arg_type)
else:
msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type)
raise exceptions.TypecheckError(msg)
return arg
| true
| true
|
7904e50d3d48ea15da9ed1983a74fb581ff749ea
| 8,480
|
py
|
Python
|
docs/conf.py
|
nicchub/PythonGithub
|
3af974c552f6b0e8a782a1499aba2a16d997b5d1
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
nicchub/PythonGithub
|
3af974c552f6b0e8a782a1499aba2a16d997b5d1
|
[
"MIT"
] | 2
|
2015-02-06T02:48:24.000Z
|
2015-02-11T02:40:29.000Z
|
docs/conf.py
|
nicchub/PythonGithub
|
3af974c552f6b0e8a782a1499aba2a16d997b5d1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Python Github documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 3 23:23:15 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Github'
copyright = u'2015, Nicolas Mendoza'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonGithubdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PythonGithub.tex', u'Python Github Documentation',
u'Nicolas Mendoza', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pythongithub', u'Python Github Documentation',
[u'Nicolas Mendoza'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PythonGithub', u'Python Github Documentation',
u'Nicolas Mendoza', 'PythonGithub', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 31.524164
| 79
| 0.71816
|
import sys
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Python Github'
copyright = u'2015, Nicolas Mendoza'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonGithubdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PythonGithub.tex', u'Python Github Documentation',
u'Nicolas Mendoza', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pythongithub', u'Python Github Documentation',
[u'Nicolas Mendoza'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PythonGithub', u'Python Github Documentation',
u'Nicolas Mendoza', 'PythonGithub', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
intersphinx_mapping = {'http://docs.python.org/': None}
| true
| true
|
7904e51c1e1cdca821d5ea07bad4b06ebca9cd0a
| 965
|
py
|
Python
|
utils/logger.py
|
pwentrys/SubstanceHelpers
|
8fb56158ee149792219e9cdb9479aaaed09a46bc
|
[
"MIT"
] | 2
|
2018-09-12T23:35:33.000Z
|
2019-10-09T06:56:17.000Z
|
utils/logger.py
|
pwentrys/SubstanceHelpers
|
8fb56158ee149792219e9cdb9479aaaed09a46bc
|
[
"MIT"
] | null | null | null |
utils/logger.py
|
pwentrys/SubstanceHelpers
|
8fb56158ee149792219e9cdb9479aaaed09a46bc
|
[
"MIT"
] | null | null | null |
import os
import platform
from datetime import datetime
# TODO test counter
# def test_count():
# return 0
# f"Count: {test_count()}\n"\
def serve_info():
return f"Stats\n" \
f"UTC: {datetime.utcnow().isoformat()}\n" \
f"\nMachine\n" \
f"Architecture: {platform.machine()}\n" \
f"Name: {platform.node()}\n" \
f"Platform: {platform.platform()}\n" \
f"CPU Model: {platform.processor()}\n" \
f"CPU Count: {os.cpu_count()}\n" \
f"Release: {platform.release()}\n" \
f"System: {platform.system()}\n" \
f"Version: {platform.version()}\n" \
f"\nPython\n" \
f"Branch: {platform.python_branch()}\n" \
f"Build: {platform.python_build()}\n" \
f"Compiler: {platform.python_compiler()}\n" \
f"Implementation: {platform.python_implementation()}\n" \
f"Revision: {platform.python_revision()}\n"
| 33.275862
| 68
| 0.550259
|
import os
import platform
from datetime import datetime
def serve_info():
return f"Stats\n" \
f"UTC: {datetime.utcnow().isoformat()}\n" \
f"\nMachine\n" \
f"Architecture: {platform.machine()}\n" \
f"Name: {platform.node()}\n" \
f"Platform: {platform.platform()}\n" \
f"CPU Model: {platform.processor()}\n" \
f"CPU Count: {os.cpu_count()}\n" \
f"Release: {platform.release()}\n" \
f"System: {platform.system()}\n" \
f"Version: {platform.version()}\n" \
f"\nPython\n" \
f"Branch: {platform.python_branch()}\n" \
f"Build: {platform.python_build()}\n" \
f"Compiler: {platform.python_compiler()}\n" \
f"Implementation: {platform.python_implementation()}\n" \
f"Revision: {platform.python_revision()}\n"
| true
| true
|
7904e56215887b382a340fb0acf47bf362658014
| 1,570
|
py
|
Python
|
spacy/lang/en/syntax_iterators.py
|
snosrap/spaCy
|
3f68bbcfec44ef55d101e6db742d353b72652129
|
[
"MIT"
] | 22,040
|
2016-10-03T11:58:15.000Z
|
2022-03-31T21:08:19.000Z
|
spacy/lang/en/syntax_iterators.py
|
snosrap/spaCy
|
3f68bbcfec44ef55d101e6db742d353b72652129
|
[
"MIT"
] | 6,927
|
2016-10-03T13:11:11.000Z
|
2022-03-31T17:01:25.000Z
|
spacy/lang/en/syntax_iterators.py
|
snosrap/spaCy
|
3f68bbcfec44ef55d101e6db742d353b72652129
|
[
"MIT"
] | 4,403
|
2016-10-04T03:36:33.000Z
|
2022-03-31T14:12:34.000Z
|
from typing import Union, Iterator, Tuple
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
labels = [
"oprd",
"nsubj",
"dobj",
"nsubjpass",
"pcomp",
"pobj",
"dative",
"appos",
"attr",
"ROOT",
]
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings.add(label) for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.i
yield word.left_edge.i, word.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.i
yield word.left_edge.i, word.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| 30.784314
| 81
| 0.56879
|
from typing import Union, Iterator, Tuple
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
labels = [
"oprd",
"nsubj",
"dobj",
"nsubjpass",
"pcomp",
"pobj",
"dative",
"appos",
"attr",
"ROOT",
]
doc = doclike.doc
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings.add(label) for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.i
yield word.left_edge.i, word.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
if head.dep in np_deps:
prev_end = word.i
yield word.left_edge.i, word.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| true
| true
|
7904e75d9efdd02b3aed6bd0b9bbf8ef5b82d42b
| 1,883
|
py
|
Python
|
tests/test_pcap_eager.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:11:23.000Z
|
2019-10-10T06:11:23.000Z
|
tests/test_pcap_eager.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pcap_eager.py
|
rjpower/tensorflow-io
|
39aa0b46cfaa403121fdddbd491a03d2f3190a87
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:11:24.000Z
|
2019-10-10T06:11:24.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""
Test PcapDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow_io.pcap as pcap_io # pylint: disable=wrong-import-position
if not (hasattr(tf, "version") and tf.version.VERSION.startswith("2.")):
tf.compat.v1.enable_eager_execution()
def test_pcap_input():
"""test_pcap_input
"""
print("Testing PcapDataset")
pcap_filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_pcap", "http.pcap")
file_url = "file://" + pcap_filename
url_filenames = [file_url]
dataset = pcap_io.PcapDataset(url_filenames, batch=1)
packets_total = 0
for v in dataset:
(packet_timestamp, packet_data) = v
if packets_total == 0:
assert packet_timestamp.numpy()[0] == 1084443427.311224 # we know this is the correct value in the test pcap file
assert len(packet_data.numpy()[0]) == 62 # we know this is the correct packet data buffer length in the test pcap file
packets_total += 1
assert packets_total == 43 # we know this is the correct number of packets in the test pcap file
if __name__ == "__main__":
test.main()
| 36.921569
| 124
| 0.709506
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow_io.pcap as pcap_io
if not (hasattr(tf, "version") and tf.version.VERSION.startswith("2.")):
tf.compat.v1.enable_eager_execution()
def test_pcap_input():
print("Testing PcapDataset")
pcap_filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_pcap", "http.pcap")
file_url = "file://" + pcap_filename
url_filenames = [file_url]
dataset = pcap_io.PcapDataset(url_filenames, batch=1)
packets_total = 0
for v in dataset:
(packet_timestamp, packet_data) = v
if packets_total == 0:
assert packet_timestamp.numpy()[0] == 1084443427.311224
assert len(packet_data.numpy()[0]) == 62
packets_total += 1
assert packets_total == 43
if __name__ == "__main__":
test.main()
| true
| true
|
7904e76f74a629539b1df4a55ac97db0a4cc7729
| 74,664
|
py
|
Python
|
vrchatapi/api/worlds_api.py
|
vrchatapi/vrchatapi-python
|
afe5ec9fda298723e7408358473aafe343e27d18
|
[
"MIT"
] | 8
|
2021-08-25T02:35:30.000Z
|
2022-03-28T18:11:58.000Z
|
vrchatapi/api/worlds_api.py
|
vrchatapi/vrchatapi-python
|
afe5ec9fda298723e7408358473aafe343e27d18
|
[
"MIT"
] | 1
|
2022-03-18T20:29:30.000Z
|
2022-03-18T20:35:05.000Z
|
vrchatapi/api/worlds_api.py
|
vrchatapi/vrchatapi-python
|
afe5ec9fda298723e7408358473aafe343e27d18
|
[
"MIT"
] | 1
|
2022-01-11T10:49:12.000Z
|
2022-01-11T10:49:12.000Z
|
"""
VRChat API Documentation
The version of the OpenAPI document: 1.6.8
Contact: me@ruby.js.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from vrchatapi.api_client import ApiClient, Endpoint as _Endpoint
from vrchatapi.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from vrchatapi.model.create_world_request import CreateWorldRequest
from vrchatapi.model.error import Error
from vrchatapi.model.instance import Instance
from vrchatapi.model.limited_world import LimitedWorld
from vrchatapi.model.update_world_request import UpdateWorldRequest
from vrchatapi.model.world import World
from vrchatapi.model.world_metadata import WorldMetadata
from vrchatapi.model.world_publish_status import WorldPublishStatus
class WorldsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [],
'endpoint_path': '/worlds',
'operation_id': 'create_world',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_world_request',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_world_request':
(CreateWorldRequest,),
},
'attribute_map': {
},
'location_map': {
'create_world_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'delete_world',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_active_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/active',
'operation_id': 'get_active_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_favorited_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/favorites',
'operation_id': 'get_favorited_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
'user_id',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
'user_id':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
'user_id': 'userId',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_recent_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/recent',
'operation_id': 'get_recent_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
'user_id',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
'user_id':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
'user_id': 'userId',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [
'apiKeyCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'get_world',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_instance_endpoint = _Endpoint(
settings={
'response_type': (Instance,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/{instanceId}',
'operation_id': 'get_world_instance',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
'instance_id',
],
'required': [
'world_id',
'instance_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
'instance_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
'instance_id': 'instanceId',
},
'location_map': {
'world_id': 'path',
'instance_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_metadata_endpoint = _Endpoint(
settings={
'response_type': (WorldMetadata,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/metadata',
'operation_id': 'get_world_metadata',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_publish_status_endpoint = _Endpoint(
settings={
'response_type': (WorldPublishStatus,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'get_world_publish_status',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.publish_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'publish_world',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.search_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds',
'operation_id': 'search_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'user',
'user_id',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'user',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('user',): {
"ME": "me"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'user':
(str,),
'user_id':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'user': 'user',
'user_id': 'userId',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'user': 'query',
'user_id': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.unpublish_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'unpublish_world',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'update_world',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'world_id',
'update_world_request',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
'update_world_request':
(UpdateWorldRequest,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
'update_world_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_world(
self,
**kwargs
):
"""Create World # noqa: E501
Create a new world. This endpoint requires `assetUrl` to be a valid File object with `.vrcw` file extension, and `imageUrl` to be a valid File object with an image file extension. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_world(async_req=True)
>>> result = thread.get()
Keyword Args:
create_world_request (CreateWorldRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.create_world_endpoint.call_with_http_info(**kwargs)
def delete_world(
self,
world_id,
**kwargs
):
"""Delete World # noqa: E501
Delete a world. Notice a world is never fully \"deleted\", only its ReleaseStatus is set to \"hidden\" and the linked Files are deleted. The WorldID is permanently reserved. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.delete_world_endpoint.call_with_http_info(**kwargs)
def get_active_worlds(
self,
**kwargs
):
"""List Active Worlds # noqa: E501
Search and list currently Active worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_active_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_active_worlds_endpoint.call_with_http_info(**kwargs)
def get_favorited_worlds(
self,
**kwargs
):
"""List Favorited Worlds # noqa: E501
Search and list favorited worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_favorited_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
user_id (str): Target user to see information on, admin-only.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_favorited_worlds_endpoint.call_with_http_info(**kwargs)
def get_recent_worlds(
self,
**kwargs
):
"""List Recent Worlds # noqa: E501
Search and list recently visited worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_recent_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
user_id (str): Target user to see information on, admin-only.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_recent_worlds_endpoint.call_with_http_info(**kwargs)
def get_world(
self,
world_id,
**kwargs
):
"""Get World by ID # noqa: E501
Get information about a specific World. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_endpoint.call_with_http_info(**kwargs)
def get_world_instance(
self,
world_id,
instance_id,
**kwargs
):
"""Get World Instance # noqa: E501
Returns a worlds instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_instance(world_id, instance_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
instance_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Instance
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
kwargs['instance_id'] = \
instance_id
return self.get_world_instance_endpoint.call_with_http_info(**kwargs)
def get_world_metadata(
self,
world_id,
**kwargs
):
"""Get World Metadata # noqa: E501
Return a worlds custom metadata. This is currently believed to be unused. Metadata can be set with `updateWorld` and can be any arbitrary object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_metadata(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorldMetadata
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_metadata_endpoint.call_with_http_info(**kwargs)
def get_world_publish_status(
self,
world_id,
**kwargs
):
"""Get World Publish Status # noqa: E501
Returns a worlds publish status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_publish_status(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorldPublishStatus
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_publish_status_endpoint.call_with_http_info(**kwargs)
def publish_world(
self,
world_id,
**kwargs
):
"""Publish World # noqa: E501
Publish a world. You can only publish one world per week. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.publish_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.publish_world_endpoint.call_with_http_info(**kwargs)
def search_worlds(
self,
**kwargs
):
"""Search All Worlds # noqa: E501
Search and list any worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
user (str): Set to `me` for searching own worlds.. [optional] if omitted the server will use the default value of "me"
user_id (str): Filter by UserID.. [optional]
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.search_worlds_endpoint.call_with_http_info(**kwargs)
def unpublish_world(
self,
world_id,
**kwargs
):
"""Unpublish World # noqa: E501
Unpublish a world. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unpublish_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.unpublish_world_endpoint.call_with_http_info(**kwargs)
def update_world(
self,
world_id,
**kwargs
):
"""Update World # noqa: E501
Update information about a specific World. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
update_world_request (UpdateWorldRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.update_world_endpoint.call_with_http_info(**kwargs)
| 36.510513
| 201
| 0.450378
|
import re
import sys
from vrchatapi.api_client import ApiClient, Endpoint as _Endpoint
from vrchatapi.model_utils import (
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from vrchatapi.model.create_world_request import CreateWorldRequest
from vrchatapi.model.error import Error
from vrchatapi.model.instance import Instance
from vrchatapi.model.limited_world import LimitedWorld
from vrchatapi.model.update_world_request import UpdateWorldRequest
from vrchatapi.model.world import World
from vrchatapi.model.world_metadata import WorldMetadata
from vrchatapi.model.world_publish_status import WorldPublishStatus
class WorldsApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [],
'endpoint_path': '/worlds',
'operation_id': 'create_world',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_world_request',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_world_request':
(CreateWorldRequest,),
},
'attribute_map': {
},
'location_map': {
'create_world_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'delete_world',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_active_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/active',
'operation_id': 'get_active_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_favorited_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/favorites',
'operation_id': 'get_favorited_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
'user_id',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
'user_id':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
'user_id': 'userId',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_recent_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/recent',
'operation_id': 'get_recent_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
'user_id',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
'user_id':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
'user_id': 'userId',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [
'apiKeyCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'get_world',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_instance_endpoint = _Endpoint(
settings={
'response_type': (Instance,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/{instanceId}',
'operation_id': 'get_world_instance',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
'instance_id',
],
'required': [
'world_id',
'instance_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
'instance_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
'instance_id': 'instanceId',
},
'location_map': {
'world_id': 'path',
'instance_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_metadata_endpoint = _Endpoint(
settings={
'response_type': (WorldMetadata,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/metadata',
'operation_id': 'get_world_metadata',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_publish_status_endpoint = _Endpoint(
settings={
'response_type': (WorldPublishStatus,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'get_world_publish_status',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.publish_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'publish_world',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.search_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds',
'operation_id': 'search_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'user',
'user_id',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'user',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('user',): {
"ME": "me"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'user':
(str,),
'user_id':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'user': 'user',
'user_id': 'userId',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'user': 'query',
'user_id': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.unpublish_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'unpublish_world',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'update_world',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'world_id',
'update_world_request',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
'update_world_request':
(UpdateWorldRequest,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
'update_world_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_world(
self,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.create_world_endpoint.call_with_http_info(**kwargs)
def delete_world(
self,
world_id,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.delete_world_endpoint.call_with_http_info(**kwargs)
def get_active_worlds(
self,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_active_worlds_endpoint.call_with_http_info(**kwargs)
def get_favorited_worlds(
self,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_favorited_worlds_endpoint.call_with_http_info(**kwargs)
def get_recent_worlds(
self,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_recent_worlds_endpoint.call_with_http_info(**kwargs)
def get_world(
self,
world_id,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_endpoint.call_with_http_info(**kwargs)
def get_world_instance(
self,
world_id,
instance_id,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
kwargs['instance_id'] = \
instance_id
return self.get_world_instance_endpoint.call_with_http_info(**kwargs)
def get_world_metadata(
self,
world_id,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_metadata_endpoint.call_with_http_info(**kwargs)
def get_world_publish_status(
self,
world_id,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_publish_status_endpoint.call_with_http_info(**kwargs)
def publish_world(
self,
world_id,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.publish_world_endpoint.call_with_http_info(**kwargs)
def search_worlds(
self,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.search_worlds_endpoint.call_with_http_info(**kwargs)
def unpublish_world(
self,
world_id,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.unpublish_world_endpoint.call_with_http_info(**kwargs)
def update_world(
self,
world_id,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.update_world_endpoint.call_with_http_info(**kwargs)
| true
| true
|
7904e7d6114dc8ebfdd93c3af25693fdbe68632c
| 1,441
|
py
|
Python
|
pedrec/visualizers/skeleton_3d_visualizer.py
|
noboevbo/PedRec
|
891d19bd6a2c7a7d71c2e41d37e7b4c4bfc7762e
|
[
"MIT"
] | 1
|
2022-03-09T01:24:10.000Z
|
2022-03-09T01:24:10.000Z
|
pedrec/visualizers/skeleton_3d_visualizer.py
|
noboevbo/PedRec
|
891d19bd6a2c7a7d71c2e41d37e7b4c4bfc7762e
|
[
"MIT"
] | null | null | null |
pedrec/visualizers/skeleton_3d_visualizer.py
|
noboevbo/PedRec
|
891d19bd6a2c7a7d71c2e41d37e7b4c4bfc7762e
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from pedrec.models.constants.skeleton_pedrec import SKELETON_PEDREC, SKELETON_PEDREC_JOINT_COLORS, SKELETON_PEDREC_LIMB_COLORS
from pedrec.visualizers.visualization_helper_3d import draw_origin_3d, draw_grid_3d
def add_skeleton_3d_to_axes(ax: Axes3D, skeleton_3d: np.ndarray, size: float = 2, min_score: float = 0.3):
# Joints
xs = skeleton_3d[:, 0]
ys = skeleton_3d[:, 2]
zs = skeleton_3d[:, 1]
colors = []
for idx, joint in enumerate(skeleton_3d):
if joint[3] < min_score: # score
colors.append([0, 0, 0, 0])
else:
colors.append(SKELETON_PEDREC_JOINT_COLORS[idx].rgba_float_list)
ax.scatter(xs, ys, zs, c=colors, s=size)
# Limbs
for idx, pair in enumerate(SKELETON_PEDREC):
if (skeleton_3d[pair[0:2], 3] >= min_score).all():
ax.plot(skeleton_3d[pair[0:2], 0], skeleton_3d[pair[0:2], 2], skeleton_3d[pair[0:2], 1], linewidth=size, c=SKELETON_PEDREC_LIMB_COLORS[idx].rgba_float_list)
def get_skeleton_3d_figure(skeleton_3d: np.ndarray):
# Preparation
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
draw_grid_3d(ax)
draw_origin_3d(ax)
add_skeleton_3d_to_axes(ax, skeleton_3d)
return fig, ax
def plot_skeleton_3d(skeleton_3d: np.ndarray):
fig, ax = get_skeleton_3d_figure(skeleton_3d)
plt.show()
| 35.146341
| 168
| 0.704372
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from pedrec.models.constants.skeleton_pedrec import SKELETON_PEDREC, SKELETON_PEDREC_JOINT_COLORS, SKELETON_PEDREC_LIMB_COLORS
from pedrec.visualizers.visualization_helper_3d import draw_origin_3d, draw_grid_3d
def add_skeleton_3d_to_axes(ax: Axes3D, skeleton_3d: np.ndarray, size: float = 2, min_score: float = 0.3):
xs = skeleton_3d[:, 0]
ys = skeleton_3d[:, 2]
zs = skeleton_3d[:, 1]
colors = []
for idx, joint in enumerate(skeleton_3d):
if joint[3] < min_score:
colors.append([0, 0, 0, 0])
else:
colors.append(SKELETON_PEDREC_JOINT_COLORS[idx].rgba_float_list)
ax.scatter(xs, ys, zs, c=colors, s=size)
for idx, pair in enumerate(SKELETON_PEDREC):
if (skeleton_3d[pair[0:2], 3] >= min_score).all():
ax.plot(skeleton_3d[pair[0:2], 0], skeleton_3d[pair[0:2], 2], skeleton_3d[pair[0:2], 1], linewidth=size, c=SKELETON_PEDREC_LIMB_COLORS[idx].rgba_float_list)
def get_skeleton_3d_figure(skeleton_3d: np.ndarray):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
draw_grid_3d(ax)
draw_origin_3d(ax)
add_skeleton_3d_to_axes(ax, skeleton_3d)
return fig, ax
def plot_skeleton_3d(skeleton_3d: np.ndarray):
fig, ax = get_skeleton_3d_figure(skeleton_3d)
plt.show()
| true
| true
|
7904e8f22b074283994c1053d21e55d29e060443
| 17,613
|
py
|
Python
|
lib/surface/compute/instances/create_with_container.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/instances/create_with_container.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/instances/create_with_container.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating VM instances running Docker images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import containers_utils
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute.instances import flags as instances_flags
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
from six.moves import zip
def _Args(parser, deprecate_maintenance_policy=False,
container_mount_enabled=False):
"""Add flags shared by all release tracks."""
parser.display_info.AddFormat(instances_flags.DEFAULT_LIST_FORMAT)
metadata_utils.AddMetadataArgs(parser)
instances_flags.AddDiskArgs(
parser, True, container_mount_enabled=container_mount_enabled)
instances_flags.AddCreateDiskArgs(
parser, container_mount_enabled=container_mount_enabled)
instances_flags.AddCanIpForwardArgs(parser)
instances_flags.AddAddressArgs(parser, instances=True)
instances_flags.AddMachineTypeArgs(parser)
instances_flags.AddMaintenancePolicyArgs(
parser, deprecate=deprecate_maintenance_policy)
instances_flags.AddNoRestartOnFailureArgs(parser)
instances_flags.AddPreemptibleVmArgs(parser)
instances_flags.AddServiceAccountAndScopeArgs(parser, False)
instances_flags.AddTagsArgs(parser)
instances_flags.AddCustomMachineTypeArgs(parser)
instances_flags.AddNetworkArgs(parser)
instances_flags.AddPrivateNetworkIpArgs(parser)
instances_flags.AddKonletArgs(parser)
instances_flags.AddPublicDnsArgs(parser, instance=True)
instances_flags.AddPublicPtrArgs(parser, instance=True)
instances_flags.AddImageArgs(parser)
labels_util.AddCreateLabelsFlags(parser)
parser.add_argument(
'--description',
help='Specifies a textual description of the instances.')
instances_flags.INSTANCES_ARG.AddArgument(parser, operation_type='create')
CreateWithContainer.SOURCE_INSTANCE_TEMPLATE = (
instances_flags.MakeSourceInstanceTemplateArg())
CreateWithContainer.SOURCE_INSTANCE_TEMPLATE.AddArgument(parser)
parser.display_info.AddCacheUpdater(completers.InstancesCompleter)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class CreateWithContainer(base.CreateCommand):
"""Command for creating VM instances running container images."""
@staticmethod
def Args(parser):
"""Register parser args."""
_Args(parser)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.GA)
def _ValidateArgs(self, args):
instances_flags.ValidateNicFlags(args)
instances_flags.ValidateNetworkTierArgs(args)
instances_flags.ValidateKonletArgs(args)
instances_flags.ValidateDiskCommonFlags(args)
instances_flags.ValidateServiceAccountAndScopeArgs(args)
if instance_utils.UseExistingBootDisk(args.disk or []):
raise exceptions.InvalidArgumentException(
'--disk',
'Boot disk specified for containerized VM.')
def GetImageUri(self, args, client, holder, instance_refs):
if (args.IsSpecified('image') or args.IsSpecified('image_family') or
args.IsSpecified('image_project')):
image_expander = image_utils.ImageExpander(client, holder.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=instance_refs[0].project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project)
if holder.resources.Parse(image_uri).project != 'cos-cloud':
log.warning('This container deployment mechanism requires a '
'Container-Optimized OS image in order to work. Select an '
'image from a cos-cloud project (cost-stable, cos-beta, '
'cos-dev image families).')
else:
image_uri = containers_utils.ExpandKonletCosImageFlag(client)
return image_uri
def _GetNetworkInterfaces(
self, args, client, holder, instance_refs, skip_defaults):
return instance_utils.GetNetworkInterfaces(args, client, holder,
instance_refs, skip_defaults)
def GetNetworkInterfaces(
self, args, resources, client, holder, instance_refs, skip_defaults):
if args.network_interface:
return instance_utils.CreateNetworkInterfaceMessages(
resources=resources,
compute_client=client,
network_interface_arg=args.network_interface,
instance_refs=instance_refs)
return self._GetNetworkInterfaces(
args, client, holder, instance_refs, skip_defaults)
def Run(self, args):
self._ValidateArgs(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateWithContainerBeta(CreateWithContainer):
"""Command for creating VM instances running container images."""
@staticmethod
def Args(parser):
"""Register parser args."""
_Args(parser, container_mount_enabled=True)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddContainerMountDiskFlag(parser)
instances_flags.AddLocalSsdArgsWithSize(parser)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)
def _ValidateArgs(self, args):
instances_flags.ValidateLocalSsdFlags(args)
super(CreateWithContainerBeta, self)._ValidateArgs(args)
def GetImageUri(self, args, client, holder, instance_refs):
if (args.IsSpecified('image') or args.IsSpecified('image_family') or
args.IsSpecified('image_project')):
image_expander = image_utils.ImageExpander(client, holder.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=instance_refs[0].project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project)
if holder.resources.Parse(image_uri).project != 'cos-cloud':
log.warning('This container deployment mechanism requires a '
'Container-Optimized OS image in order to work. Select an '
'image from a cos-cloud project (cost-stable, cos-beta, '
'cos-dev image families).')
else:
image_uri = containers_utils.ExpandKonletCosImageFlag(client)
return image_uri
def Run(self, args):
self._ValidateArgs(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
container_mount_disk = instances_flags.GetValidatedContainerMountDisk(
holder,
args.container_mount_disk,
args.disk,
args.create_disk)
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata,
container_mount_disk_enabled=True,
container_mount_disk=container_mount_disk)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults, match_container_mount_disks=True)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateWithContainerAlpha(CreateWithContainerBeta):
"""Alpha version of compute instances create-with-container command."""
@staticmethod
def Args(parser):
_Args(parser, deprecate_maintenance_policy=True,
container_mount_enabled=True)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddContainerMountDiskFlag(parser)
instances_flags.AddLocalSsdArgsWithSize(parser)
instances_flags.AddLocalNvdimmArgs(parser)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.ALPHA)
def _GetNetworkInterfaces(
self, args, client, holder, instance_refs, skip_defaults):
return instance_utils.GetNetworkInterfacesAlpha(
args, client, holder, instance_refs, skip_defaults)
def Run(self, args):
self._ValidateArgs(args)
instances_flags.ValidatePublicDnsFlags(args)
instances_flags.ValidatePublicPtrFlags(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
container_mount_disk = instances_flags.GetValidatedContainerMountDisk(
holder,
args.container_mount_disk,
args.disk,
args.create_disk)
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata,
container_mount_disk_enabled=True,
container_mount_disk=container_mount_disk)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults, match_container_mount_disks=True)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
CreateWithContainer.detailed_help = {
'brief':
"""\
Creates Google Compute engine virtual machine instances running
container images.
""",
'DESCRIPTION':
"""\
*{command}* creates Google Compute Engine virtual
machines that runs a Docker image. For example:
$ {command} instance-1 --zone us-central1-a \
--container-image=gcr.io/google-containers/busybox
creates an instance called instance-1, in the us-central1-a zone,
running the 'busybox' image.
For more examples, refer to the *EXAMPLES* section below.
""",
'EXAMPLES':
"""\
To run the gcr.io/google-containers/busybox image on an instance named
'instance-1' that executes 'echo "Hello world"' as a run command, run:
$ {command} instance-1 \
--container-image=gcr.io/google-containers/busybox \
--container-command='echo "Hello world"'
To run the gcr.io/google-containers/busybox image in privileged mode,
run:
$ {command} instance-1 \
--container-image=gcr.io/google-containers/busybox
--container-privileged
"""
}
| 43.813433
| 81
| 0.732925
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import containers_utils
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute.instances import flags as instances_flags
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
from six.moves import zip
def _Args(parser, deprecate_maintenance_policy=False,
container_mount_enabled=False):
parser.display_info.AddFormat(instances_flags.DEFAULT_LIST_FORMAT)
metadata_utils.AddMetadataArgs(parser)
instances_flags.AddDiskArgs(
parser, True, container_mount_enabled=container_mount_enabled)
instances_flags.AddCreateDiskArgs(
parser, container_mount_enabled=container_mount_enabled)
instances_flags.AddCanIpForwardArgs(parser)
instances_flags.AddAddressArgs(parser, instances=True)
instances_flags.AddMachineTypeArgs(parser)
instances_flags.AddMaintenancePolicyArgs(
parser, deprecate=deprecate_maintenance_policy)
instances_flags.AddNoRestartOnFailureArgs(parser)
instances_flags.AddPreemptibleVmArgs(parser)
instances_flags.AddServiceAccountAndScopeArgs(parser, False)
instances_flags.AddTagsArgs(parser)
instances_flags.AddCustomMachineTypeArgs(parser)
instances_flags.AddNetworkArgs(parser)
instances_flags.AddPrivateNetworkIpArgs(parser)
instances_flags.AddKonletArgs(parser)
instances_flags.AddPublicDnsArgs(parser, instance=True)
instances_flags.AddPublicPtrArgs(parser, instance=True)
instances_flags.AddImageArgs(parser)
labels_util.AddCreateLabelsFlags(parser)
parser.add_argument(
'--description',
help='Specifies a textual description of the instances.')
instances_flags.INSTANCES_ARG.AddArgument(parser, operation_type='create')
CreateWithContainer.SOURCE_INSTANCE_TEMPLATE = (
instances_flags.MakeSourceInstanceTemplateArg())
CreateWithContainer.SOURCE_INSTANCE_TEMPLATE.AddArgument(parser)
parser.display_info.AddCacheUpdater(completers.InstancesCompleter)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class CreateWithContainer(base.CreateCommand):
@staticmethod
def Args(parser):
_Args(parser)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.GA)
def _ValidateArgs(self, args):
instances_flags.ValidateNicFlags(args)
instances_flags.ValidateNetworkTierArgs(args)
instances_flags.ValidateKonletArgs(args)
instances_flags.ValidateDiskCommonFlags(args)
instances_flags.ValidateServiceAccountAndScopeArgs(args)
if instance_utils.UseExistingBootDisk(args.disk or []):
raise exceptions.InvalidArgumentException(
'--disk',
'Boot disk specified for containerized VM.')
def GetImageUri(self, args, client, holder, instance_refs):
if (args.IsSpecified('image') or args.IsSpecified('image_family') or
args.IsSpecified('image_project')):
image_expander = image_utils.ImageExpander(client, holder.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=instance_refs[0].project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project)
if holder.resources.Parse(image_uri).project != 'cos-cloud':
log.warning('This container deployment mechanism requires a '
'Container-Optimized OS image in order to work. Select an '
'image from a cos-cloud project (cost-stable, cos-beta, '
'cos-dev image families).')
else:
image_uri = containers_utils.ExpandKonletCosImageFlag(client)
return image_uri
def _GetNetworkInterfaces(
self, args, client, holder, instance_refs, skip_defaults):
return instance_utils.GetNetworkInterfaces(args, client, holder,
instance_refs, skip_defaults)
def GetNetworkInterfaces(
self, args, resources, client, holder, instance_refs, skip_defaults):
if args.network_interface:
return instance_utils.CreateNetworkInterfaceMessages(
resources=resources,
compute_client=client,
network_interface_arg=args.network_interface,
instance_refs=instance_refs)
return self._GetNetworkInterfaces(
args, client, holder, instance_refs, skip_defaults)
def Run(self, args):
self._ValidateArgs(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateWithContainerBeta(CreateWithContainer):
@staticmethod
def Args(parser):
_Args(parser, container_mount_enabled=True)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddContainerMountDiskFlag(parser)
instances_flags.AddLocalSsdArgsWithSize(parser)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)
def _ValidateArgs(self, args):
instances_flags.ValidateLocalSsdFlags(args)
super(CreateWithContainerBeta, self)._ValidateArgs(args)
def GetImageUri(self, args, client, holder, instance_refs):
if (args.IsSpecified('image') or args.IsSpecified('image_family') or
args.IsSpecified('image_project')):
image_expander = image_utils.ImageExpander(client, holder.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=instance_refs[0].project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project)
if holder.resources.Parse(image_uri).project != 'cos-cloud':
log.warning('This container deployment mechanism requires a '
'Container-Optimized OS image in order to work. Select an '
'image from a cos-cloud project (cost-stable, cos-beta, '
'cos-dev image families).')
else:
image_uri = containers_utils.ExpandKonletCosImageFlag(client)
return image_uri
def Run(self, args):
self._ValidateArgs(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
container_mount_disk = instances_flags.GetValidatedContainerMountDisk(
holder,
args.container_mount_disk,
args.disk,
args.create_disk)
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata,
container_mount_disk_enabled=True,
container_mount_disk=container_mount_disk)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults, match_container_mount_disks=True)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateWithContainerAlpha(CreateWithContainerBeta):
@staticmethod
def Args(parser):
_Args(parser, deprecate_maintenance_policy=True,
container_mount_enabled=True)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddContainerMountDiskFlag(parser)
instances_flags.AddLocalSsdArgsWithSize(parser)
instances_flags.AddLocalNvdimmArgs(parser)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.ALPHA)
def _GetNetworkInterfaces(
self, args, client, holder, instance_refs, skip_defaults):
return instance_utils.GetNetworkInterfacesAlpha(
args, client, holder, instance_refs, skip_defaults)
def Run(self, args):
self._ValidateArgs(args)
instances_flags.ValidatePublicDnsFlags(args)
instances_flags.ValidatePublicPtrFlags(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
container_mount_disk = instances_flags.GetValidatedContainerMountDisk(
holder,
args.container_mount_disk,
args.disk,
args.create_disk)
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata,
container_mount_disk_enabled=True,
container_mount_disk=container_mount_disk)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults, match_container_mount_disks=True)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
CreateWithContainer.detailed_help = {
'brief':
"""\
Creates Google Compute engine virtual machine instances running
container images.
""",
'DESCRIPTION':
"""\
*{command}* creates Google Compute Engine virtual
machines that runs a Docker image. For example:
$ {command} instance-1 --zone us-central1-a \
--container-image=gcr.io/google-containers/busybox
creates an instance called instance-1, in the us-central1-a zone,
running the 'busybox' image.
For more examples, refer to the *EXAMPLES* section below.
""",
'EXAMPLES':
"""\
To run the gcr.io/google-containers/busybox image on an instance named
'instance-1' that executes 'echo "Hello world"' as a run command, run:
$ {command} instance-1 \
--container-image=gcr.io/google-containers/busybox \
--container-command='echo "Hello world"'
To run the gcr.io/google-containers/busybox image in privileged mode,
run:
$ {command} instance-1 \
--container-image=gcr.io/google-containers/busybox
--container-privileged
"""
}
| true
| true
|
7904e95953759c21dc469a70163f0ce4ac5f2d14
| 590
|
py
|
Python
|
passage/theano_utils.py
|
vishalbelsare/Passage
|
af6e100804dfe332c88bd2cd192e93a807377887
|
[
"MIT"
] | 597
|
2015-01-15T19:23:32.000Z
|
2021-08-29T17:53:22.000Z
|
passage/theano_utils.py
|
v-mk-s/Passage
|
af6e100804dfe332c88bd2cd192e93a807377887
|
[
"MIT"
] | 34
|
2015-01-22T13:50:21.000Z
|
2018-06-13T14:58:45.000Z
|
passage/theano_utils.py
|
v-mk-s/Passage
|
af6e100804dfe332c88bd2cd192e93a807377887
|
[
"MIT"
] | 152
|
2015-01-17T02:19:22.000Z
|
2022-02-05T15:10:04.000Z
|
import numpy as np
import theano
def intX(X):
return np.asarray(X, dtype=np.int32)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def sharedX(X, dtype=theano.config.floatX, name=None):
return theano.shared(np.asarray(X, dtype=dtype), name=name)
def shared0s(shape, dtype=theano.config.floatX, name=None):
return sharedX(np.zeros(shape), dtype=dtype, name=name)
def sharedNs(shape, n, dtype=theano.config.floatX, name=None):
return sharedX(np.ones(shape)*n, dtype=dtype, name=name)
def downcast_float(X):
return np.asarray(X, dtype=np.float32)
| 28.095238
| 63
| 0.727119
|
import numpy as np
import theano
def intX(X):
return np.asarray(X, dtype=np.int32)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def sharedX(X, dtype=theano.config.floatX, name=None):
return theano.shared(np.asarray(X, dtype=dtype), name=name)
def shared0s(shape, dtype=theano.config.floatX, name=None):
return sharedX(np.zeros(shape), dtype=dtype, name=name)
def sharedNs(shape, n, dtype=theano.config.floatX, name=None):
return sharedX(np.ones(shape)*n, dtype=dtype, name=name)
def downcast_float(X):
return np.asarray(X, dtype=np.float32)
| true
| true
|
7904eb091f788198c0e58d13a5d21193df23f8bc
| 2,982
|
py
|
Python
|
saliency/guided_backprop.py
|
leomauro/history-of-interpretation
|
6235f4b875505ac7a6efb10f3c4e5a6d3c7b25ec
|
[
"Apache-2.0"
] | 30
|
2020-11-27T04:06:50.000Z
|
2021-12-09T02:42:15.000Z
|
saliency/guided_backprop.py
|
leomauro/history-of-interpretation
|
6235f4b875505ac7a6efb10f3c4e5a6d3c7b25ec
|
[
"Apache-2.0"
] | null | null | null |
saliency/guided_backprop.py
|
leomauro/history-of-interpretation
|
6235f4b875505ac7a6efb10f3c4e5a6d3c7b25ec
|
[
"Apache-2.0"
] | 8
|
2020-11-27T12:33:15.000Z
|
2021-02-15T05:46:13.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilites to computed GuidedBackprop SaliencyMasks"""
from .base import SaliencyMask
import tensorflow.compat.v1 as tf
class GuidedBackprop(SaliencyMask):
"""A SaliencyMask class that computes saliency masks with GuidedBackProp.
This implementation copies the TensorFlow graph to a new graph with the ReLU
gradient overwritten as in the paper:
https://arxiv.org/abs/1412.6806
Thanks to Chris Olah for generously sharing his implementation of the ReLU
backprop.
"""
GuidedReluRegistered = False
def __init__(self,
graph,
session,
y,
x,
tmp_ckpt_path='/tmp/guided_backprop_ckpt'):
"""Constructs a GuidedBackprop SaliencyMask."""
super(GuidedBackprop, self).__init__(graph, session, y, x)
self.x = x
if GuidedBackprop.GuidedReluRegistered is False:
#### Acknowledgement to Chris Olah ####
@tf.RegisterGradient("GuidedRelu")
def _GuidedReluGrad(op, grad):
gate_g = tf.cast(grad > 0, "float32")
gate_y = tf.cast(op.outputs[0] > 0, "float32")
return gate_y * gate_g * grad
GuidedBackprop.GuidedReluRegistered = True
with graph.as_default():
saver = tf.train.Saver()
saver.save(session, tmp_ckpt_path)
graph_def = graph.as_graph_def()
self.guided_graph = tf.Graph()
with self.guided_graph.as_default():
self.guided_sess = tf.Session(graph = self.guided_graph)
with self.guided_graph.gradient_override_map({'Relu': 'GuidedRelu'}):
# Import the graph def, and all the variables.
tf.import_graph_def(graph_def, name='')
saver.restore(self.guided_sess, tmp_ckpt_path)
imported_y = self.guided_graph.get_tensor_by_name(y.name)
imported_x = self.guided_graph.get_tensor_by_name(x.name)
self.guided_grads_node = tf.gradients(imported_y, imported_x)[0]
def GetMask(self, x_value, feed_dict = {}):
"""Returns a GuidedBackprop mask."""
with self.guided_graph.as_default():
# Move all the feed dict tensor keys to refer to the same tensor on the
# new graph.
guided_feed_dict = {}
for tensor in feed_dict:
guided_feed_dict[tensor.name] = feed_dict[tensor]
guided_feed_dict[self.x.name] = [x_value]
return self.guided_sess.run(
self.guided_grads_node, feed_dict = guided_feed_dict)[0]
| 35.5
| 78
| 0.697183
|
from .base import SaliencyMask
import tensorflow.compat.v1 as tf
class GuidedBackprop(SaliencyMask):
GuidedReluRegistered = False
def __init__(self,
graph,
session,
y,
x,
tmp_ckpt_path='/tmp/guided_backprop_ckpt'):
super(GuidedBackprop, self).__init__(graph, session, y, x)
self.x = x
if GuidedBackprop.GuidedReluRegistered is False:
2")
gate_y = tf.cast(op.outputs[0] > 0, "float32")
return gate_y * gate_g * grad
GuidedBackprop.GuidedReluRegistered = True
with graph.as_default():
saver = tf.train.Saver()
saver.save(session, tmp_ckpt_path)
graph_def = graph.as_graph_def()
self.guided_graph = tf.Graph()
with self.guided_graph.as_default():
self.guided_sess = tf.Session(graph = self.guided_graph)
with self.guided_graph.gradient_override_map({'Relu': 'GuidedRelu'}):
tf.import_graph_def(graph_def, name='')
saver.restore(self.guided_sess, tmp_ckpt_path)
imported_y = self.guided_graph.get_tensor_by_name(y.name)
imported_x = self.guided_graph.get_tensor_by_name(x.name)
self.guided_grads_node = tf.gradients(imported_y, imported_x)[0]
def GetMask(self, x_value, feed_dict = {}):
with self.guided_graph.as_default():
guided_feed_dict = {}
for tensor in feed_dict:
guided_feed_dict[tensor.name] = feed_dict[tensor]
guided_feed_dict[self.x.name] = [x_value]
return self.guided_sess.run(
self.guided_grads_node, feed_dict = guided_feed_dict)[0]
| true
| true
|
7904ec3929a26d2ddf1cc15de1546c69e3b9ac29
| 12,683
|
py
|
Python
|
utils/calc_fall_flush.py
|
NoellePatterson/func-flow-plot
|
196d58ac87c137b42063ac718ea296faaf148307
|
[
"MIT"
] | null | null | null |
utils/calc_fall_flush.py
|
NoellePatterson/func-flow-plot
|
196d58ac87c137b42063ac718ea296faaf148307
|
[
"MIT"
] | null | null | null |
utils/calc_fall_flush.py
|
NoellePatterson/func-flow-plot
|
196d58ac87c137b42063ac718ea296faaf148307
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as ip
from scipy.ndimage import gaussian_filter1d
from utils.helpers import find_index, peakdet, replace_nan
from params import fall_params
def calc_fall_flush_timings_durations(flow_matrix, summer_timings):
max_zero_allowed_per_year = fall_params['max_zero_allowed_per_year']
max_nan_allowed_per_year = fall_params['max_nan_allowed_per_year']
min_flow_rate = fall_params['min_flow_rate']
sigma = fall_params['sigma'] # Smaller filter to find fall flush peak
wet_sigma = fall_params['wet_sigma'] # Larger filter to find wet season peak
peak_sensitivity = fall_params['peak_sensitivity'] # smaller is more peak
max_flush_duration = fall_params['max_flush_duration'] # Maximum duration from start to end, for fall flush peak
wet_threshold_perc = fall_params['wet_threshold_perc'] # Return to wet season flow must be certain percentage of that year's max flow
flush_threshold_perc = fall_params['flush_threshold_perc'] # Size of flush peak, from rising limb to top of peak, has great enough change
min_flush_threshold = fall_params['min_flush_threshold']
date_cutoff = fall_params['date_cutoff'] # Latest accepted date for fall flush, in Julian Date counting from Oct 1st = 0. (i.e. Dec 15th = 75)
start_dates = []
wet_dates = []
durations = []
mags = []
for column_number, column_flow in enumerate(flow_matrix[0]):
start_dates.append(None)
wet_dates.append(None)
durations.append(None)
mags.append(None)
"""Check to see if water year has more than allowed nan or zeros"""
if np.isnan(flow_matrix[:, column_number]).sum() > max_nan_allowed_per_year or np.count_nonzero(flow_matrix[:, column_number]==0) > max_zero_allowed_per_year or max(flow_matrix[:, column_number]) < min_flow_rate:
continue;
"""Get flow data"""
flow_data = flow_matrix[:, column_number]
x_axis = list(range(len(flow_data)))
"""Interpolate between None values"""
flow_data = replace_nan(flow_data)
"""Return to Wet Season"""
wet_filter_data = gaussian_filter1d(flow_data, wet_sigma)
return_date = return_to_wet_date(wet_filter_data, wet_threshold_perc)
wet_dates[-1] = return_date + 10
"""Filter noise data with small sigma to find fall flush hump"""
filter_data = gaussian_filter1d(flow_data, sigma)
"""Fit spline"""
x_axis = list(range(len(filter_data)))
spl = ip.UnivariateSpline(x_axis, filter_data, k=3, s=3)
"""Find the peaks and valleys of the filtered data"""
mean_flow = np.nanmean(filter_data)
maxarray, minarray = peakdet(spl(x_axis), mean_flow * peak_sensitivity)
"""Find max and min of filtered flow data"""
max_flow = max(filter_data[20:])
max_flow_index = find_index(filter_data[20:], max_flow) + 20
min_flow = min(wet_filter_data[:max_flow_index])
"""If could not find any max and find"""
if not list(maxarray) or not list(minarray) or minarray[0][0] > max_flow_index:
continue;
"""Get flow magnitude threshold from previous summer's baseflow"""
baseflows = []
if column_number == 0:
wet_date = wet_dates[0]
baseflow = list(flow_matrix[:wet_date, column_number])
bs_mean = np.mean(baseflow)
bs_med = np.nanpercentile(baseflow, 50)
else:
summer_date = summer_timings[column_number -1]
if wet_dates[column_number] > 20:
wet_date = wet_dates[column_number] - 20
baseflow = list(flow_matrix[summer_date:,column_number -1]) + list(flow_matrix[:wet_date, column_number])
bs_mean = np.mean(baseflow)
bs_med = np.nanpercentile(baseflow, 50)
"""Get fall flush peak"""
counter = 0
half_duration = int(max_flush_duration/2) # Only test duration for first half of fall flush peak
if bs_med > 25:
min_flush_magnitude = bs_med * 1.5 # if median baseflow is large (>25), magnitude threshold is 50% above median baseflow of previous summer
else:
min_flush_magnitude = bs_med * 2 # otherwise magnitude threshold is 100% above median baseflow of previous summer
if min_flush_magnitude < min_flush_threshold:
min_flush_magnitude = min_flush_threshold
for flow_index in maxarray:
if counter == 0:
if flow_index[0] < half_duration and flow_index[0] != 0 and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude:
"""if index found is before the half duration allowed"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif bool((flow_index[1] - spl(maxarray[counter][0] - half_duration)) / flow_index[1] > flush_threshold_perc or minarray[counter][0] - maxarray[counter][0] < half_duration) and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude:
"""If peak and valley is separted by half duration, or half duration to the left is less than 30% of its value"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif counter == len(minarray):
start_dates[-1]=None
mags[-1]=None
break;
elif bool(minarray[counter][0] - maxarray[counter][0] < half_duration or maxarray[counter][0] - minarray[counter-1][0] < half_duration) and bool(flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude and flow_index[0] <= date_cutoff):
"""valley and peak are distanced by less than half dur from either side"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif (spl(flow_index[0] - half_duration) - min_flow) / (flow_index[1] - min_flow) < flush_threshold_perc and (spl(flow_index[0] + half_duration) - min_flow) / (flow_index[1] - min_flow) < flush_threshold_perc and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude and flow_index[0] <= date_cutoff:
"""both side of flow value at the peak + half duration index fall below flush_threshold_perc"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
counter = counter + 1
"""Check to see if last start_date falls behind the max_allowed_date"""
if bool(start_dates[-1] is None or start_dates[-1] > wet_dates[-1]) and wet_dates[-1]:
start_dates[-1] = None
mags[-1] = None
"""Get duration of each fall flush"""
current_duration, left, right = calc_fall_flush_durations_2(filter_data, start_dates[-1])
durations[-1] = current_duration
_plotter(x_axis, flow_data, filter_data, wet_filter_data, start_dates, wet_dates, column_number, left, right, maxarray, minarray, min_flush_magnitude)
return start_dates, mags, wet_dates, durations
def calc_fall_flush_durations(flow_data, wet_filter_data, date):
duration_left = None
duration_right = None
duration = None
if date:
date = int(date)
for index_left, flow_left in enumerate(reversed(flow_data[:date])):
if flow_left < wet_filter_data[date - index_left]:
duration_left = index_left
break
for index_right, flow_right in enumerate(flow_data[date:]):
if flow_right < wet_filter_data[date + index_right]:
duration_right = index_right
break
if duration_left and duration_right:
duration = duration_left + duration_right
else:
duration = None
return duration
def calc_fall_flush_durations_2(filter_data, date):
"""Left side sharp"""
der_percent_threshold_left = 50 # Slope of rising limb (i.e. derivative) must be "sharp"
flow_percent_threshold_left = 80
"""Right side mellow"""
der_percent_threshold_right = 30 # Slope of falling limb (i.e. derivative) has lower requirement to be part of flush duration
flow_percent_threshold_right = 80
duration = None
left = 0
right = 0
if date or date == 0:
date = int(date)
left_maxarray, left_minarray = peakdet(filter_data[:date], 0.01)
right_maxarray, right_minarray = peakdet(filter_data[date:], 0.01)
if not list(left_minarray):
left = 0
else:
left = int(left_minarray[-1][0])
if not list(right_minarray):
right = 0
else:
right = int(date - 2 + right_minarray[0][0])
if date - left > 10:
"""create spline, and find derivative"""
x_axis_left = list(range(len(filter_data[left:date])))
spl_left = ip.UnivariateSpline(x_axis_left, filter_data[left:date], k=3, s=3)
spl_first_left = spl_left.derivative(1)
"""check if derivative value falls below certain threshold"""
spl_first_left_median = np.nanpercentile(spl_first_left(x_axis_left), der_percent_threshold_left)
"""check if actual value falls below threshold, avoiding the rounded peak"""
median_left = np.nanpercentile(list(set(filter_data[left:date])), flow_percent_threshold_left)
for index_left, der in enumerate(reversed(spl_first_left(x_axis_left))):
# print(der < spl_first_left_median, filter_data[date - index_left] < median_left)
if der < spl_first_left_median and filter_data[date - index_left] < median_left:
left = date - index_left
break
if right - date > 10:
x_axis_right = list(range(len(filter_data[date:right])))
spl_right = ip.UnivariateSpline(x_axis_right, filter_data[date:right], k=3, s=3)
spl_first_right = spl_right.derivative(1)
spl_first_right_median = abs(np.nanpercentile(spl_first_right(x_axis_right), der_percent_threshold_right))
median_right = np.nanpercentile(list(set(filter_data[date:right])), flow_percent_threshold_right)
for index_right, der in enumerate(spl_first_right(x_axis_right)):
# print(date+index_right, der < spl_first_right_median, filter_data[date + index_right] < median_right)
if abs(der) < spl_first_right_median and filter_data[date + index_right] < median_right:
right = date + index_right
break
if left:
duration = int(date - left)
elif not left and right:
duration = int(right - date)
else:
duration = 0
return duration, left, right
def return_to_wet_date(wet_filter_data, wet_threshold_perc):
max_wet_peak_mag = max(wet_filter_data[20:])
max_wet_peak_index = find_index(wet_filter_data, max_wet_peak_mag)
min_wet_peak_mag = min(wet_filter_data[:max_wet_peak_index])
"""Loop backwards from max flow index to beginning, to search for wet season"""
for index, value in enumerate(reversed(wet_filter_data[:max_wet_peak_index])):
if index == len(wet_filter_data[:max_wet_peak_index] - 1):
return None
elif (value - min_wet_peak_mag) / (max_wet_peak_mag - min_wet_peak_mag) < wet_threshold_perc:
"""If value percentage falls below wet_threshold_perc"""
return_date = max_wet_peak_index - index
return return_date
def _plotter(x_axis, flow_data, filter_data, wet_filter_data, start_dates, wet_dates, column_number, left, right, maxarray, minarray, min_flush_magnitude):
plt.figure()
#plt.plot(x_axis, flow_data, '-')
plt.plot(x_axis, filter_data, '-', color='#5993E5') #greyish blue
#plt.plot(x_axis, wet_filter_data)
# for data in maxarray:
# plt.plot(data[0], data[1], '^')
# for data in minarray:
# plt.plot(data[0], data[1], 'v')
if start_dates[-1] is not None:
plt.axvline(start_dates[-1], color='blue', ls=':')
plt.axvline(wet_dates[-1], color="green", ls=':')
#plt.axvline(left, ls=":")
#plt.axvline(right, ls=":")
if min_flush_magnitude is not None:
plt.axhline(min_flush_magnitude, ls=':', color = 'red')
#plt.yscale('log')
plt.savefig('post_processedFiles/Boxplots/{}.png'.format(column_number))
| 48.59387
| 350
| 0.653552
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as ip
from scipy.ndimage import gaussian_filter1d
from utils.helpers import find_index, peakdet, replace_nan
from params import fall_params
def calc_fall_flush_timings_durations(flow_matrix, summer_timings):
max_zero_allowed_per_year = fall_params['max_zero_allowed_per_year']
max_nan_allowed_per_year = fall_params['max_nan_allowed_per_year']
min_flow_rate = fall_params['min_flow_rate']
sigma = fall_params['sigma']
wet_sigma = fall_params['wet_sigma']
peak_sensitivity = fall_params['peak_sensitivity']
max_flush_duration = fall_params['max_flush_duration']
wet_threshold_perc = fall_params['wet_threshold_perc']
flush_threshold_perc = fall_params['flush_threshold_perc'] # Size of flush peak, from rising limb to top of peak, has great enough change
min_flush_threshold = fall_params['min_flush_threshold']
date_cutoff = fall_params['date_cutoff'] # Latest accepted date for fall flush, in Julian Date counting from Oct 1st = 0. (i.e. Dec 15th = 75)
start_dates = []
wet_dates = []
durations = []
mags = []
for column_number, column_flow in enumerate(flow_matrix[0]):
start_dates.append(None)
wet_dates.append(None)
durations.append(None)
mags.append(None)
if np.isnan(flow_matrix[:, column_number]).sum() > max_nan_allowed_per_year or np.count_nonzero(flow_matrix[:, column_number]==0) > max_zero_allowed_per_year or max(flow_matrix[:, column_number]) < min_flow_rate:
continue;
flow_data = flow_matrix[:, column_number]
x_axis = list(range(len(flow_data)))
flow_data = replace_nan(flow_data)
wet_filter_data = gaussian_filter1d(flow_data, wet_sigma)
return_date = return_to_wet_date(wet_filter_data, wet_threshold_perc)
wet_dates[-1] = return_date + 10
filter_data = gaussian_filter1d(flow_data, sigma)
x_axis = list(range(len(filter_data)))
spl = ip.UnivariateSpline(x_axis, filter_data, k=3, s=3)
mean_flow = np.nanmean(filter_data)
maxarray, minarray = peakdet(spl(x_axis), mean_flow * peak_sensitivity)
max_flow = max(filter_data[20:])
max_flow_index = find_index(filter_data[20:], max_flow) + 20
min_flow = min(wet_filter_data[:max_flow_index])
if not list(maxarray) or not list(minarray) or minarray[0][0] > max_flow_index:
continue;
baseflows = []
if column_number == 0:
wet_date = wet_dates[0]
baseflow = list(flow_matrix[:wet_date, column_number])
bs_mean = np.mean(baseflow)
bs_med = np.nanpercentile(baseflow, 50)
else:
summer_date = summer_timings[column_number -1]
if wet_dates[column_number] > 20:
wet_date = wet_dates[column_number] - 20
baseflow = list(flow_matrix[summer_date:,column_number -1]) + list(flow_matrix[:wet_date, column_number])
bs_mean = np.mean(baseflow)
bs_med = np.nanpercentile(baseflow, 50)
counter = 0
half_duration = int(max_flush_duration/2) # Only test duration for first half of fall flush peak
if bs_med > 25:
min_flush_magnitude = bs_med * 1.5 # if median baseflow is large (>25), magnitude threshold is 50% above median baseflow of previous summer
else:
min_flush_magnitude = bs_med * 2 # otherwise magnitude threshold is 100% above median baseflow of previous summer
if min_flush_magnitude < min_flush_threshold:
min_flush_magnitude = min_flush_threshold
for flow_index in maxarray:
if counter == 0:
if flow_index[0] < half_duration and flow_index[0] != 0 and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude:
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif bool((flow_index[1] - spl(maxarray[counter][0] - half_duration)) / flow_index[1] > flush_threshold_perc or minarray[counter][0] - maxarray[counter][0] < half_duration) and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude:
"""If peak and valley is separted by half duration, or half duration to the left is less than 30% of its value"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif counter == len(minarray):
start_dates[-1]=None
mags[-1]=None
break;
elif bool(minarray[counter][0] - maxarray[counter][0] < half_duration or maxarray[counter][0] - minarray[counter-1][0] < half_duration) and bool(flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude and flow_index[0] <= date_cutoff):
"""valley and peak are distanced by less than half dur from either side"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif (spl(flow_index[0] - half_duration) - min_flow) / (flow_index[1] - min_flow) < flush_threshold_perc and (spl(flow_index[0] + half_duration) - min_flow) / (flow_index[1] - min_flow) < flush_threshold_perc and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude and flow_index[0] <= date_cutoff:
"""both side of flow value at the peak + half duration index fall below flush_threshold_perc"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
counter = counter + 1
if bool(start_dates[-1] is None or start_dates[-1] > wet_dates[-1]) and wet_dates[-1]:
start_dates[-1] = None
mags[-1] = None
current_duration, left, right = calc_fall_flush_durations_2(filter_data, start_dates[-1])
durations[-1] = current_duration
_plotter(x_axis, flow_data, filter_data, wet_filter_data, start_dates, wet_dates, column_number, left, right, maxarray, minarray, min_flush_magnitude)
return start_dates, mags, wet_dates, durations
def calc_fall_flush_durations(flow_data, wet_filter_data, date):
duration_left = None
duration_right = None
duration = None
if date:
date = int(date)
for index_left, flow_left in enumerate(reversed(flow_data[:date])):
if flow_left < wet_filter_data[date - index_left]:
duration_left = index_left
break
for index_right, flow_right in enumerate(flow_data[date:]):
if flow_right < wet_filter_data[date + index_right]:
duration_right = index_right
break
if duration_left and duration_right:
duration = duration_left + duration_right
else:
duration = None
return duration
def calc_fall_flush_durations_2(filter_data, date):
der_percent_threshold_left = 50 # Slope of rising limb (i.e. derivative) must be "sharp"
flow_percent_threshold_left = 80
der_percent_threshold_right = 30 # Slope of falling limb (i.e. derivative) has lower requirement to be part of flush duration
flow_percent_threshold_right = 80
duration = None
left = 0
right = 0
if date or date == 0:
date = int(date)
left_maxarray, left_minarray = peakdet(filter_data[:date], 0.01)
right_maxarray, right_minarray = peakdet(filter_data[date:], 0.01)
if not list(left_minarray):
left = 0
else:
left = int(left_minarray[-1][0])
if not list(right_minarray):
right = 0
else:
right = int(date - 2 + right_minarray[0][0])
if date - left > 10:
x_axis_left = list(range(len(filter_data[left:date])))
spl_left = ip.UnivariateSpline(x_axis_left, filter_data[left:date], k=3, s=3)
spl_first_left = spl_left.derivative(1)
spl_first_left_median = np.nanpercentile(spl_first_left(x_axis_left), der_percent_threshold_left)
median_left = np.nanpercentile(list(set(filter_data[left:date])), flow_percent_threshold_left)
for index_left, der in enumerate(reversed(spl_first_left(x_axis_left))):
# print(der < spl_first_left_median, filter_data[date - index_left] < median_left)
if der < spl_first_left_median and filter_data[date - index_left] < median_left:
left = date - index_left
break
if right - date > 10:
x_axis_right = list(range(len(filter_data[date:right])))
spl_right = ip.UnivariateSpline(x_axis_right, filter_data[date:right], k=3, s=3)
spl_first_right = spl_right.derivative(1)
spl_first_right_median = abs(np.nanpercentile(spl_first_right(x_axis_right), der_percent_threshold_right))
median_right = np.nanpercentile(list(set(filter_data[date:right])), flow_percent_threshold_right)
for index_right, der in enumerate(spl_first_right(x_axis_right)):
# print(date+index_right, der < spl_first_right_median, filter_data[date + index_right] < median_right)
if abs(der) < spl_first_right_median and filter_data[date + index_right] < median_right:
right = date + index_right
break
if left:
duration = int(date - left)
elif not left and right:
duration = int(right - date)
else:
duration = 0
return duration, left, right
def return_to_wet_date(wet_filter_data, wet_threshold_perc):
max_wet_peak_mag = max(wet_filter_data[20:])
max_wet_peak_index = find_index(wet_filter_data, max_wet_peak_mag)
min_wet_peak_mag = min(wet_filter_data[:max_wet_peak_index])
for index, value in enumerate(reversed(wet_filter_data[:max_wet_peak_index])):
if index == len(wet_filter_data[:max_wet_peak_index] - 1):
return None
elif (value - min_wet_peak_mag) / (max_wet_peak_mag - min_wet_peak_mag) < wet_threshold_perc:
"""If value percentage falls below wet_threshold_perc"""
return_date = max_wet_peak_index - index
return return_date
def _plotter(x_axis, flow_data, filter_data, wet_filter_data, start_dates, wet_dates, column_number, left, right, maxarray, minarray, min_flush_magnitude):
plt.figure()
#plt.plot(x_axis, flow_data, '-')
plt.plot(x_axis, filter_data, '-', color='
#plt.plot(x_axis, wet_filter_data)
# for data in maxarray:
# plt.plot(data[0], data[1], '^')
# for data in minarray:
# plt.plot(data[0], data[1], 'v')
if start_dates[-1] is not None:
plt.axvline(start_dates[-1], color='blue', ls=':')
plt.axvline(wet_dates[-1], color="green", ls=':')
#plt.axvline(left, ls=":")
#plt.axvline(right, ls=":")
if min_flush_magnitude is not None:
plt.axhline(min_flush_magnitude, ls=':', color = 'red')
#plt.yscale('log')
plt.savefig('post_processedFiles/Boxplots/{}.png'.format(column_number))
| true
| true
|
7904ec9d53c04cbceca5352a1e7d44a8717bfb60
| 1,289
|
py
|
Python
|
python/215_Kth_Largest_Element_in_an_Array.py
|
dvlpsh/leetcode-1
|
f965328af72113ac8a5a9d6624868c1502be937b
|
[
"MIT"
] | 4,416
|
2016-03-30T15:02:26.000Z
|
2022-03-31T16:31:03.000Z
|
python/215_Kth_Largest_Element_in_an_Array.py
|
YinpuLi/leetcode-6
|
1371de2631d745efba39de41b51c3424e35da434
|
[
"MIT"
] | 20
|
2018-11-17T13:46:25.000Z
|
2022-03-13T05:37:06.000Z
|
python/215_Kth_Largest_Element_in_an_Array.py
|
YinpuLi/leetcode-6
|
1371de2631d745efba39de41b51c3424e35da434
|
[
"MIT"
] | 1,374
|
2017-05-26T15:44:30.000Z
|
2022-03-30T19:21:02.000Z
|
class Solution(object):
# def findKthLargest(self, nums, k):
# """
# :type nums: List[int]
# :type k: int
# :rtype: int
# """
# return sorted(nums, reverse=True)[k - 1]
# def findKthLargest(self, nums, k):
# # build min heap
# heapq.heapify(nums)
# # remove n - k smallest number
# while len(nums) > k:
# heapq.heappop(nums)
# return nums[0]
# #return heapq.nlargest(k, nums)[-1]
def findKthLargest(self, nums, k):
# shuffle nums to avoid n*n
random.shuffle(nums)
return self.quickSelection(nums, 0, len(nums) - 1, len(nums) - k)
def quickSelection(self, nums, start, end, k):
if start > end:
return float('inf')
pivot = nums[end]
left = start
for i in range(start, end):
if nums[i] <= pivot:
# swip left and i
nums[left], nums[i] = nums[i], nums[left]
left += 1
nums[left], nums[end] = nums[end], nums[left]
if left == k:
return nums[left]
elif left < k:
return self.quickSelection(nums, left + 1, end, k)
else:
return self.quickSelection(nums, start, left - 1, k)
| 31.439024
| 73
| 0.501939
|
class Solution(object):
# :type nums: List[int]
# :type k: int
# :rtype: int
# """
random.shuffle(nums)
return self.quickSelection(nums, 0, len(nums) - 1, len(nums) - k)
def quickSelection(self, nums, start, end, k):
if start > end:
return float('inf')
pivot = nums[end]
left = start
for i in range(start, end):
if nums[i] <= pivot:
nums[left], nums[i] = nums[i], nums[left]
left += 1
nums[left], nums[end] = nums[end], nums[left]
if left == k:
return nums[left]
elif left < k:
return self.quickSelection(nums, left + 1, end, k)
else:
return self.quickSelection(nums, start, left - 1, k)
| true
| true
|
7904ecbabc8dfd071bb8ccd600b6cc1369f4ac28
| 1,481
|
py
|
Python
|
SandBox/Practicals_05_Cut.py
|
MichalKyjovsky/NPRG065_Programing_in_Python
|
14436fbf8f0e547ab084083135a84c8ae49e083c
|
[
"MIT"
] | null | null | null |
SandBox/Practicals_05_Cut.py
|
MichalKyjovsky/NPRG065_Programing_in_Python
|
14436fbf8f0e547ab084083135a84c8ae49e083c
|
[
"MIT"
] | null | null | null |
SandBox/Practicals_05_Cut.py
|
MichalKyjovsky/NPRG065_Programing_in_Python
|
14436fbf8f0e547ab084083135a84c8ae49e083c
|
[
"MIT"
] | null | null | null |
from sys import argv, stdin
def cut(input_file, *args):
options = process_options(*args)
delimiter = d_option(options["-d"])
lines = input_file.readlines()
columns = [item.split(delimiter) for item in lines]
scope = f_option(options["-f"], len(columns[0]))
out_scope = []
for x in scope:
out_scope.append([column[x] for column in columns])
pr = []
for line in range(len(out_scope[0])):
for rec in out_scope:
pr.append(rec[line].strip())
print(delimiter.join(pr), end='')
pr.clear()
print()
def process_options(options):
out_opt = dict()
last_key = ""
for option in options:
if option.startswith('-'):
out_opt[option] = ""
last_key = option
else:
out_opt[last_key] = option
return out_opt
def f_option(params: str, file_size: int):
if not params:
return None
inp = params.split('-') if '-' in params else params
if '-' not in params and ',' not in params:
return int(params)
elif params.startswith('-'):
return [x for x in range(0, int(inp[1]))]
elif params.endswith('-'):
return [x - 1 for x in range(int(inp[0]), file_size + 1)]
elif ',' in params:
return [int(x) for x in params.split(',')]
else:
return [x - 1 for x in range(int(inp[0]), int(inp[1]) + 1)]
def d_option(params):
return params if params else ' '
cut(stdin, argv[1:])
| 26.927273
| 67
| 0.576637
|
from sys import argv, stdin
def cut(input_file, *args):
options = process_options(*args)
delimiter = d_option(options["-d"])
lines = input_file.readlines()
columns = [item.split(delimiter) for item in lines]
scope = f_option(options["-f"], len(columns[0]))
out_scope = []
for x in scope:
out_scope.append([column[x] for column in columns])
pr = []
for line in range(len(out_scope[0])):
for rec in out_scope:
pr.append(rec[line].strip())
print(delimiter.join(pr), end='')
pr.clear()
print()
def process_options(options):
out_opt = dict()
last_key = ""
for option in options:
if option.startswith('-'):
out_opt[option] = ""
last_key = option
else:
out_opt[last_key] = option
return out_opt
def f_option(params: str, file_size: int):
if not params:
return None
inp = params.split('-') if '-' in params else params
if '-' not in params and ',' not in params:
return int(params)
elif params.startswith('-'):
return [x for x in range(0, int(inp[1]))]
elif params.endswith('-'):
return [x - 1 for x in range(int(inp[0]), file_size + 1)]
elif ',' in params:
return [int(x) for x in params.split(',')]
else:
return [x - 1 for x in range(int(inp[0]), int(inp[1]) + 1)]
def d_option(params):
return params if params else ' '
cut(stdin, argv[1:])
| true
| true
|
7904edc9d31d07036fe7c9f5faa53e7ddac376dd
| 2,079
|
py
|
Python
|
virtool/shutdown.py
|
ReeceHoffmann/virtool
|
f9befad060fe16fa29fb80124e674ac5a9c4f538
|
[
"MIT"
] | 39
|
2016-10-31T23:28:59.000Z
|
2022-01-15T00:00:42.000Z
|
virtool/shutdown.py
|
ReeceHoffmann/virtool
|
f9befad060fe16fa29fb80124e674ac5a9c4f538
|
[
"MIT"
] | 1,690
|
2017-02-07T23:39:48.000Z
|
2022-03-31T22:30:44.000Z
|
virtool/shutdown.py
|
ReeceHoffmann/virtool
|
f9befad060fe16fa29fb80124e674ac5a9c4f538
|
[
"MIT"
] | 25
|
2017-02-08T18:25:31.000Z
|
2021-09-20T22:55:25.000Z
|
import logging
from aiohttp.web import Application
from virtool.pg.base import Base
from virtool.startup import get_scheduler_from_app
logger = logging.getLogger(__name__)
async def shutdown_client(app: Application):
"""
Attempt to close the async HTTP client session.
:param app: The application object
"""
logger.info("Stopping HTTP client")
try:
await app["client"].close()
except KeyError:
pass
async def shutdown_dispatcher(app: Application):
"""
Attempt to close the app's `Dispatcher` object.
:param app: The application object
"""
logger.info("Stopping dispatcher")
try:
await app["dispatcher"].close()
except KeyError:
pass
async def shutdown_executors(app: Application):
"""
Attempt to close the `ThreadPoolExecutor` and `ProcessPoolExecutor`.
:param app: the application object
"""
try:
app["executor"].shutdown(wait=True)
except KeyError:
pass
try:
app["process_executor"].shutdown(wait=True)
except KeyError:
pass
async def shutdown_scheduler(app: Application):
"""
Attempt to the close the app's `aiojobs` scheduler.
:param app: The application object
"""
scheduler = get_scheduler_from_app(app)
await scheduler.close()
async def shutdown_redis(app: Application):
"""
Attempt to close the app's `redis` instance.
:param app: The application object
"""
logger.info("Closing Redis connection")
try:
app["redis"].close()
await app["redis"].wait_closed()
except KeyError:
pass
async def drop_fake_postgres(app: Application):
"""
Drop a fake PostgreSQL database if the instance was run with the ``--fake`` option.
:param app: the application object
"""
if app["config"].fake and "fake_" in app["config"].postgres_connection_string:
async with app["pg"].begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
logger.debug("Dropped fake PostgreSQL database.")
| 22.597826
| 87
| 0.658009
|
import logging
from aiohttp.web import Application
from virtool.pg.base import Base
from virtool.startup import get_scheduler_from_app
logger = logging.getLogger(__name__)
async def shutdown_client(app: Application):
logger.info("Stopping HTTP client")
try:
await app["client"].close()
except KeyError:
pass
async def shutdown_dispatcher(app: Application):
logger.info("Stopping dispatcher")
try:
await app["dispatcher"].close()
except KeyError:
pass
async def shutdown_executors(app: Application):
try:
app["executor"].shutdown(wait=True)
except KeyError:
pass
try:
app["process_executor"].shutdown(wait=True)
except KeyError:
pass
async def shutdown_scheduler(app: Application):
scheduler = get_scheduler_from_app(app)
await scheduler.close()
async def shutdown_redis(app: Application):
logger.info("Closing Redis connection")
try:
app["redis"].close()
await app["redis"].wait_closed()
except KeyError:
pass
async def drop_fake_postgres(app: Application):
if app["config"].fake and "fake_" in app["config"].postgres_connection_string:
async with app["pg"].begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
logger.debug("Dropped fake PostgreSQL database.")
| true
| true
|
7904ee365b4e60e710ed318fcb8a8c56cd69e12b
| 953
|
py
|
Python
|
var/spack/repos/builtin/packages/r-checkmate/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/r-checkmate/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/r-checkmate/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCheckmate(RPackage):
"""Tests and assertions to perform frequent argument checks.
A substantial part of the package was written in C to
minimize any worries about execution time overhead."""
homepage = "https://cloud.r-project.org/package=checkmate"
url = "https://cloud.r-project.org/src/contrib/checkmate_1.8.4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/checkmate"
version('1.9.4', sha256='faa25754b757fe483b876f5d07b73f76f69a1baa971420892fadec4af4bbad21')
version('1.8.4', sha256='6f948883e5a885a1c409d997f0c782e754a549227ec3c8eb18318deceb38f8f6')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r-backports@1.1.0:', type=('build', 'run'))
| 41.434783
| 95
| 0.735572
|
from spack import *
class RCheckmate(RPackage):
homepage = "https://cloud.r-project.org/package=checkmate"
url = "https://cloud.r-project.org/src/contrib/checkmate_1.8.4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/checkmate"
version('1.9.4', sha256='faa25754b757fe483b876f5d07b73f76f69a1baa971420892fadec4af4bbad21')
version('1.8.4', sha256='6f948883e5a885a1c409d997f0c782e754a549227ec3c8eb18318deceb38f8f6')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r-backports@1.1.0:', type=('build', 'run'))
| true
| true
|
7904ef53ba17a51d72d4bc042c9098d510df5c9f
| 1,367
|
py
|
Python
|
tests/storage/dav/test_main.py
|
edvfb9/vdirsyncer
|
9e6bd83a3245123f7f68e880016989abe8f34a65
|
[
"BSD-3-Clause"
] | null | null | null |
tests/storage/dav/test_main.py
|
edvfb9/vdirsyncer
|
9e6bd83a3245123f7f68e880016989abe8f34a65
|
[
"BSD-3-Clause"
] | null | null | null |
tests/storage/dav/test_main.py
|
edvfb9/vdirsyncer
|
9e6bd83a3245123f7f68e880016989abe8f34a65
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from vdirsyncer.storage.dav import _BAD_XML_CHARS
from vdirsyncer.storage.dav import _merge_xml
from vdirsyncer.storage.dav import _parse_xml
def test_xml_utilities():
x = _parse_xml(
b"""<?xml version="1.0" encoding="UTF-8" ?>
<multistatus xmlns="DAV:">
<response>
<propstat>
<status>HTTP/1.1 404 Not Found</status>
<prop>
<getcontenttype/>
</prop>
</propstat>
<propstat>
<prop>
<resourcetype>
<collection/>
</resourcetype>
</prop>
</propstat>
</response>
</multistatus>
"""
)
response = x.find("{DAV:}response")
props = _merge_xml(response.findall("{DAV:}propstat/{DAV:}prop"))
assert props.find("{DAV:}resourcetype/{DAV:}collection") is not None
assert props.find("{DAV:}getcontenttype") is not None
@pytest.mark.parametrize("char", range(32))
def test_xml_specialchars(char):
x = _parse_xml(
'<?xml version="1.0" encoding="UTF-8" ?>'
"<foo>ye{}s\r\n"
"hello</foo>".format(chr(char)).encode("ascii")
)
if char in _BAD_XML_CHARS:
assert x.text == "yes\nhello"
| 29.085106
| 72
| 0.516459
|
import pytest
from vdirsyncer.storage.dav import _BAD_XML_CHARS
from vdirsyncer.storage.dav import _merge_xml
from vdirsyncer.storage.dav import _parse_xml
def test_xml_utilities():
x = _parse_xml(
b"""<?xml version="1.0" encoding="UTF-8" ?>
<multistatus xmlns="DAV:">
<response>
<propstat>
<status>HTTP/1.1 404 Not Found</status>
<prop>
<getcontenttype/>
</prop>
</propstat>
<propstat>
<prop>
<resourcetype>
<collection/>
</resourcetype>
</prop>
</propstat>
</response>
</multistatus>
"""
)
response = x.find("{DAV:}response")
props = _merge_xml(response.findall("{DAV:}propstat/{DAV:}prop"))
assert props.find("{DAV:}resourcetype/{DAV:}collection") is not None
assert props.find("{DAV:}getcontenttype") is not None
@pytest.mark.parametrize("char", range(32))
def test_xml_specialchars(char):
x = _parse_xml(
'<?xml version="1.0" encoding="UTF-8" ?>'
"<foo>ye{}s\r\n"
"hello</foo>".format(chr(char)).encode("ascii")
)
if char in _BAD_XML_CHARS:
assert x.text == "yes\nhello"
| true
| true
|
7904f00b96e9379af6255d0d617c791450c4f778
| 4,382
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
777-project/777
|
1a907e655984232660d812308e046a62fb45bbba
|
[
"MIT"
] | 7
|
2020-11-11T23:15:58.000Z
|
2021-05-03T16:26:14.000Z
|
contrib/seeds/generate-seeds.py
|
777-project/777
|
1a907e655984232660d812308e046a62fb45bbba
|
[
"MIT"
] | 3
|
2020-11-14T13:18:47.000Z
|
2021-02-06T16:24:40.000Z
|
contrib/seeds/generate-seeds.py
|
777-project/777_v2_1
|
1a907e655984232660d812308e046a62fb45bbba
|
[
"MIT"
] | 5
|
2020-10-18T16:47:23.000Z
|
2021-03-01T19:06:08.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 17771)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 27771)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.52518
| 99
| 0.583752
|
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 17771)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 27771)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true
| true
|
7904f0a6cce4ad968ed402397dd5db06cd7e2da6
| 12,733
|
py
|
Python
|
homeassistant/components/smappee.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 2
|
2020-08-29T07:24:56.000Z
|
2020-10-27T21:47:35.000Z
|
homeassistant/components/smappee.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:25:50.000Z
|
2022-03-11T23:27:53.000Z
|
homeassistant/components/smappee.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 3
|
2018-09-14T07:34:09.000Z
|
2018-09-29T12:57:10.000Z
|
"""
Support for Smappee energy monitor.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/smappee/
"""
import logging
from datetime import datetime, timedelta
import re
import voluptuous as vol
from requests.exceptions import RequestException
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_HOST
)
from homeassistant.util import Throttle
from homeassistant.helpers.discovery import load_platform
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['smappy==0.2.16']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Smappee'
DEFAULT_HOST_PASSWORD = 'admin'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_HOST_PASSWORD = 'host_password'
DOMAIN = 'smappee'
DATA_SMAPPEE = 'SMAPPEE'
_SENSOR_REGEX = re.compile(
r'(?P<key>([A-Za-z]+))\=' +
r'(?P<value>([0-9\.]+))')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Inclusive(CONF_CLIENT_ID, 'Server credentials'): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, 'Server credentials'): cv.string,
vol.Inclusive(CONF_USERNAME, 'Server credentials'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'Server credentials'): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_HOST_PASSWORD, default=DEFAULT_HOST_PASSWORD):
cv.string
}),
}, extra=vol.ALLOW_EXTRA)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the Smapee component."""
client_id = config.get(DOMAIN).get(CONF_CLIENT_ID)
client_secret = config.get(DOMAIN).get(CONF_CLIENT_SECRET)
username = config.get(DOMAIN).get(CONF_USERNAME)
password = config.get(DOMAIN).get(CONF_PASSWORD)
host = config.get(DOMAIN).get(CONF_HOST)
host_password = config.get(DOMAIN).get(CONF_HOST_PASSWORD)
smappee = Smappee(client_id, client_secret, username,
password, host, host_password)
if not smappee.is_local_active and not smappee.is_remote_active:
_LOGGER.error("Neither Smappee server or local component enabled.")
return False
hass.data[DATA_SMAPPEE] = smappee
load_platform(hass, 'switch', DOMAIN)
load_platform(hass, 'sensor', DOMAIN)
return True
class Smappee:
"""Stores data retrieved from Smappee sensor."""
def __init__(self, client_id, client_secret, username,
password, host, host_password):
"""Initialize the data."""
import smappy
self._remote_active = False
self._local_active = False
if client_id is not None:
try:
self._smappy = smappy.Smappee(client_id, client_secret)
self._smappy.authenticate(username, password)
self._remote_active = True
except RequestException as error:
self._smappy = None
_LOGGER.exception(
"Smappee server authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee server component init skipped.")
if host is not None:
try:
self._localsmappy = smappy.LocalSmappee(host)
self._localsmappy.logon(host_password)
self._local_active = True
except RequestException as error:
self._localsmappy = None
_LOGGER.exception(
"Local Smappee device authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee local component init skipped.")
self.locations = {}
self.info = {}
self.consumption = {}
self.sensor_consumption = {}
self.instantaneous = {}
if self._remote_active or self._local_active:
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update data from Smappee API."""
if self.is_remote_active:
service_locations = self._smappy.get_service_locations() \
.get('serviceLocations')
for location in service_locations:
location_id = location.get('serviceLocationId')
if location_id is not None:
self.sensor_consumption[location_id] = {}
self.locations[location_id] = location.get('name')
self.info[location_id] = self._smappy \
.get_service_location_info(location_id)
_LOGGER.debug("Remote info %s %s",
self.locations, self.info[location_id])
for sensors in self.info[location_id].get('sensors'):
sensor_id = sensors.get('id')
self.sensor_consumption[location_id]\
.update({sensor_id: self.get_sensor_consumption(
location_id, sensor_id,
aggregation=3, delta=1440)})
_LOGGER.debug("Remote sensors %s %s",
self.locations,
self.sensor_consumption[location_id])
self.consumption[location_id] = self.get_consumption(
location_id, aggregation=3, delta=1440)
_LOGGER.debug("Remote consumption %s %s",
self.locations,
self.consumption[location_id])
if self.is_local_active:
self.local_devices = self.get_switches()
_LOGGER.debug("Local switches %s", self.local_devices)
self.instantaneous = self.load_instantaneous()
_LOGGER.debug("Local values %s", self.instantaneous)
@property
def is_remote_active(self):
"""Return true if Smappe server is configured and working."""
return self._remote_active
@property
def is_local_active(self):
"""Return true if Smappe local device is configured and working."""
return self._local_active
def get_switches(self):
"""Get switches from local Smappee."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_command_control_config()
except RequestException as error:
_LOGGER.error(
"Error getting switches from local Smappee. (%s)",
error)
def get_consumption(self, location_id, aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_consumption(location_id,
start,
end,
aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def get_sensor_consumption(self, location_id, sensor_id,
aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_sensor_consumption(location_id,
sensor_id,
start,
end, aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def actuator_on(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn on actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_on(location_id, actuator_id, duration)
self._smappy.actuator_on(location_id, actuator_id, duration)
else:
self._localsmappy.on_command_control(actuator_id)
self._localsmappy.on_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def actuator_off(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn off actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_off(location_id, actuator_id, duration)
self._smappy.actuator_off(location_id, actuator_id, duration)
else:
self._localsmappy.off_command_control(actuator_id)
self._localsmappy.off_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def active_power(self):
"""Get sum of all instantaneous active power values from local hub."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_power()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def active_cosfi(self):
"""Get the average of all instantaneous cosfi values."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_cosfi()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def instantaneous_values(self):
"""ReportInstantaneousValues."""
if not self.is_local_active:
return
report_instantaneous_values = \
self._localsmappy.report_instantaneous_values()
report_result = \
report_instantaneous_values['report'].split('<BR>')
properties = {}
for lines in report_result:
lines_result = lines.split(',')
for prop in lines_result:
match = _SENSOR_REGEX.search(prop)
if match:
properties[match.group('key')] = \
match.group('value')
_LOGGER.debug(properties)
return properties
def active_current(self):
"""Get current active Amps."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['current'])
def active_voltage(self):
"""Get current active Voltage."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['voltage'])
def load_instantaneous(self):
"""LoadInstantaneous."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_instantaneous()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
| 36.276353
| 78
| 0.581089
|
import logging
from datetime import datetime, timedelta
import re
import voluptuous as vol
from requests.exceptions import RequestException
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_HOST
)
from homeassistant.util import Throttle
from homeassistant.helpers.discovery import load_platform
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['smappy==0.2.16']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Smappee'
DEFAULT_HOST_PASSWORD = 'admin'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_HOST_PASSWORD = 'host_password'
DOMAIN = 'smappee'
DATA_SMAPPEE = 'SMAPPEE'
_SENSOR_REGEX = re.compile(
r'(?P<key>([A-Za-z]+))\=' +
r'(?P<value>([0-9\.]+))')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Inclusive(CONF_CLIENT_ID, 'Server credentials'): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, 'Server credentials'): cv.string,
vol.Inclusive(CONF_USERNAME, 'Server credentials'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'Server credentials'): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_HOST_PASSWORD, default=DEFAULT_HOST_PASSWORD):
cv.string
}),
}, extra=vol.ALLOW_EXTRA)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
client_id = config.get(DOMAIN).get(CONF_CLIENT_ID)
client_secret = config.get(DOMAIN).get(CONF_CLIENT_SECRET)
username = config.get(DOMAIN).get(CONF_USERNAME)
password = config.get(DOMAIN).get(CONF_PASSWORD)
host = config.get(DOMAIN).get(CONF_HOST)
host_password = config.get(DOMAIN).get(CONF_HOST_PASSWORD)
smappee = Smappee(client_id, client_secret, username,
password, host, host_password)
if not smappee.is_local_active and not smappee.is_remote_active:
_LOGGER.error("Neither Smappee server or local component enabled.")
return False
hass.data[DATA_SMAPPEE] = smappee
load_platform(hass, 'switch', DOMAIN)
load_platform(hass, 'sensor', DOMAIN)
return True
class Smappee:
def __init__(self, client_id, client_secret, username,
password, host, host_password):
import smappy
self._remote_active = False
self._local_active = False
if client_id is not None:
try:
self._smappy = smappy.Smappee(client_id, client_secret)
self._smappy.authenticate(username, password)
self._remote_active = True
except RequestException as error:
self._smappy = None
_LOGGER.exception(
"Smappee server authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee server component init skipped.")
if host is not None:
try:
self._localsmappy = smappy.LocalSmappee(host)
self._localsmappy.logon(host_password)
self._local_active = True
except RequestException as error:
self._localsmappy = None
_LOGGER.exception(
"Local Smappee device authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee local component init skipped.")
self.locations = {}
self.info = {}
self.consumption = {}
self.sensor_consumption = {}
self.instantaneous = {}
if self._remote_active or self._local_active:
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
if self.is_remote_active:
service_locations = self._smappy.get_service_locations() \
.get('serviceLocations')
for location in service_locations:
location_id = location.get('serviceLocationId')
if location_id is not None:
self.sensor_consumption[location_id] = {}
self.locations[location_id] = location.get('name')
self.info[location_id] = self._smappy \
.get_service_location_info(location_id)
_LOGGER.debug("Remote info %s %s",
self.locations, self.info[location_id])
for sensors in self.info[location_id].get('sensors'):
sensor_id = sensors.get('id')
self.sensor_consumption[location_id]\
.update({sensor_id: self.get_sensor_consumption(
location_id, sensor_id,
aggregation=3, delta=1440)})
_LOGGER.debug("Remote sensors %s %s",
self.locations,
self.sensor_consumption[location_id])
self.consumption[location_id] = self.get_consumption(
location_id, aggregation=3, delta=1440)
_LOGGER.debug("Remote consumption %s %s",
self.locations,
self.consumption[location_id])
if self.is_local_active:
self.local_devices = self.get_switches()
_LOGGER.debug("Local switches %s", self.local_devices)
self.instantaneous = self.load_instantaneous()
_LOGGER.debug("Local values %s", self.instantaneous)
@property
def is_remote_active(self):
return self._remote_active
@property
def is_local_active(self):
return self._local_active
def get_switches(self):
if not self.is_local_active:
return
try:
return self._localsmappy.load_command_control_config()
except RequestException as error:
_LOGGER.error(
"Error getting switches from local Smappee. (%s)",
error)
def get_consumption(self, location_id, aggregation, delta):
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_consumption(location_id,
start,
end,
aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def get_sensor_consumption(self, location_id, sensor_id,
aggregation, delta):
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_sensor_consumption(location_id,
sensor_id,
start,
end, aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def actuator_on(self, location_id, actuator_id,
is_remote_switch, duration=None):
try:
if is_remote_switch:
self._smappy.actuator_on(location_id, actuator_id, duration)
self._smappy.actuator_on(location_id, actuator_id, duration)
else:
self._localsmappy.on_command_control(actuator_id)
self._localsmappy.on_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def actuator_off(self, location_id, actuator_id,
is_remote_switch, duration=None):
try:
if is_remote_switch:
self._smappy.actuator_off(location_id, actuator_id, duration)
self._smappy.actuator_off(location_id, actuator_id, duration)
else:
self._localsmappy.off_command_control(actuator_id)
self._localsmappy.off_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def active_power(self):
if not self.is_local_active:
return
try:
return self._localsmappy.active_power()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def active_cosfi(self):
if not self.is_local_active:
return
try:
return self._localsmappy.active_cosfi()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def instantaneous_values(self):
if not self.is_local_active:
return
report_instantaneous_values = \
self._localsmappy.report_instantaneous_values()
report_result = \
report_instantaneous_values['report'].split('<BR>')
properties = {}
for lines in report_result:
lines_result = lines.split(',')
for prop in lines_result:
match = _SENSOR_REGEX.search(prop)
if match:
properties[match.group('key')] = \
match.group('value')
_LOGGER.debug(properties)
return properties
def active_current(self):
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['current'])
def active_voltage(self):
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['voltage'])
def load_instantaneous(self):
if not self.is_local_active:
return
try:
return self._localsmappy.load_instantaneous()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
| true
| true
|
7904f0ec29c4ddeab8942766dba78f7cded677e7
| 394
|
py
|
Python
|
products/migrations/0009_product_is_deleted.py
|
BarisX/ecommerce_api
|
69191d1086f1befe49175e93dc716f1e4037e21f
|
[
"MIT"
] | 95
|
2020-04-13T09:02:30.000Z
|
2022-03-25T14:11:34.000Z
|
products/migrations/0009_product_is_deleted.py
|
Bilal815/ecommerce_api
|
a3d8ce7a9e1fa2528d240d5ab508afe92607c9f8
|
[
"MIT"
] | 87
|
2020-02-21T17:58:56.000Z
|
2022-03-21T21:37:05.000Z
|
products/migrations/0009_product_is_deleted.py
|
Bilal815/ecommerce_api
|
a3d8ce7a9e1fa2528d240d5ab508afe92607c9f8
|
[
"MIT"
] | 33
|
2021-01-18T09:30:29.000Z
|
2022-03-30T01:31:57.000Z
|
# Generated by Django 2.1.11 on 2020-06-24 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0008_auto_20190919_1521'),
]
operations = [
migrations.AddField(
model_name='product',
name='is_deleted',
field=models.BooleanField(default=False),
),
]
| 20.736842
| 53
| 0.606599
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0008_auto_20190919_1521'),
]
operations = [
migrations.AddField(
model_name='product',
name='is_deleted',
field=models.BooleanField(default=False),
),
]
| true
| true
|
7904f0f49fb0c48a84b0978fefae0fef672d5d1f
| 14,589
|
py
|
Python
|
tools/convet_voc2coco/voc2coco.py
|
yhpengtu/CenterIMask
|
7e046964db11df78c93cb88f50b9c4b6ddf0c9bc
|
[
"Apache-2.0"
] | null | null | null |
tools/convet_voc2coco/voc2coco.py
|
yhpengtu/CenterIMask
|
7e046964db11df78c93cb88f50b9c4b6ddf0c9bc
|
[
"Apache-2.0"
] | null | null | null |
tools/convet_voc2coco/voc2coco.py
|
yhpengtu/CenterIMask
|
7e046964db11df78c93cb88f50b9c4b6ddf0c9bc
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from bs4 import BeautifulSoup as bs
import cv2
import imgaug
from utils import *
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Inference result directory
RESULTS_DIR = os.path.abspath("./inference/")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from configs import Config
# from mrcnn import model as modellib, utils
# from mrcnn import visualize
import matplotlib
# Agg backend runs without a display
matplotlib.use('Agg')
import matplotlib.pyplot as plt
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = '2012'
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# VOC DATASET MASK MAP FUNCTION
# Following codes are mapping each mask color(SegmentationClass) to ground truth index.
# - reference: https://d2l.ai/chapter_computer-vision/semantic-segmentation-and-dataset.html
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
def build_colormap2label():
"""Build a RGB color to label mapping for segmentation."""
colormap2label = np.zeros(256 ** 3)
for i, colormap in enumerate(VOC_COLORMAP):
colormap2label[(colormap[0]*256 + colormap[1])*256 + colormap[2]] = i
return colormap2label
def voc_label_indices(colormap, colormap2label):
"""Map a RGB color to a label."""
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
# VOC DATASET MASK MAP FUNCTION
class VocConfig(Config):
NAME = "voc"
IMAGE_PER_GPU = 2
NUM_CLASSES = 1 + 20 # VOC 2012 have 20 classes. "1" is for background.
class InferenceConfig(VocConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
class VocDataset(Dataset):
def load_voc(self, dataset_dir, trainval, year='2012'):
"""Load a voc_year of the VOC dataset.
dataset_dir: The root directory of the VOC dataset, example: '/mnt/disk1/VOCdevkit'
trainval: 'train' or 'val' for Training or Validation
year: '2007' or '2012' for VOC dataset
"""
voc_year = 'VOC' + year
Segmentation = os.path.join(dataset_dir, voc_year, 'ImageSets', 'Segmentation')
JPEGImages = os.path.join(dataset_dir, voc_year, 'JPEGImages')
Annotations = os.path.join(dataset_dir, voc_year, 'Annotations')
SegmentationClass = os.path.join(dataset_dir, voc_year, 'SegmentationClass')
SegmentationObject = os.path.join(dataset_dir, voc_year, 'SegmentationObject')
# load classes of VOC, BG is initialed in parent class.
for idx, class_name in enumerate(VOC_CLASSES[1:]):
self.add_class("voc", idx + 1, class_name)
assert trainval in ['train', 'val']
# read segmentation annotation file
annotation_file = os.path.join(Segmentation, trainval + '.txt')
image_ids = []
with open(annotation_file) as f:
image_id_list = [line.strip() for line in f]
image_ids += image_id_list
for image_id in image_ids:
image_file_name = '{}.jpg'.format(image_id)
mask_file_name = '{}.png'.format(image_id)
xml_file_name = '{}.xml'.format(image_id)
image_path = os.path.join(JPEGImages, image_file_name)
# Parse Annotations XML File
with open(os.path.join(Annotations, xml_file_name)) as f:
soup = bs(f, 'lxml')
objects = soup.find_all('object')
image_contains_class_flag = False
for obj in objects:
class_name = obj.find('name').text
if class_name in VOC_CLASSES:
image_contains_class_flag = True
continue
if image_contains_class_flag:
class_mask_path = os.path.join(SegmentationClass, mask_file_name)
object_mask_path = os.path.join(SegmentationObject, mask_file_name)
self.add_image("voc",
image_id=image_file_name,
path=image_path,
class_mask_path=class_mask_path,
object_mask_path=object_mask_path)
def load_raw_mask(self, image_id, class_or_object):
'''load two kinds of mask of VOC dataset.
image_id: id of mask
class_or_object: 'class_mask' or 'object_mask' for SegmentationClass or SegmentationObject
Returns:
image: numpy of mask image.
'''
assert class_or_object in ['class_mask', 'object_mask']
image = skimage.io.imread(self.image_info[image_id][class_or_object+'_path'])
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_class_label(self, image_id):
'''Mapping SegmentationClass image's color to indice of ground truth
image_id: id of mask
Return:
class_label: [height, width] matrix contains values form 0 to 20
'''
raw_mask = self.load_raw_mask(image_id, 'class_mask')
class_label = voc_label_indices(raw_mask, build_colormap2label())
return class_label
def load_mask(self, image_id):
'''Mapping annotation images to real Masks(MRCNN needed)
image_id: id of mask
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
'''
class_label = self.load_class_label(image_id)
instance_mask = self.load_raw_mask(image_id, 'object_mask')
max_indice = int(np.max(class_label))
instance_label = []
instance_class = []
for i in range(1, max_indice+1):
if not np.any(class_label==i):
continue
gt_indice = i
object_filter = class_label == i
object_filter = object_filter.astype(np.uint8)
object_filter = np.dstack((object_filter,object_filter,object_filter))
filtered = np.multiply(object_filter, instance_mask)
gray = cv2.cvtColor(filtered, cv2.COLOR_RGB2GRAY)
max_gray = np.max(gray)
for sub_index in range(1, max_gray+1):
if not np.any(gray==sub_index):
continue
instance_filter = gray == sub_index
instance_label += [instance_filter]
instance_class += [gt_indice]
masks = np.asarray(instance_label).transpose((1,2,0))
classes_ids = np.asarray(instance_class)
return masks, classes_ids
############################################################
# Inference
############################################################
def inference(model, dataset, limit):
"""Run detection on images in the given directory."""
# Create directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
time_dir = "{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
time_dir = os.path.join(RESULTS_DIR, time_dir)
os.makedirs(time_dir)
# Load over images
for image_id in dataset.image_ids[:limit]:
# Load image and run detection
image = dataset.load_image(image_id)
# Detect objects
r = model.detect([image], verbose=0)[0]
# Encode image to RLE. Returns a string of multiple lines
source_id = dataset.image_info[image_id]["id"]
# Save image with masks
if len(r['class_ids']) > 0:
print('[*] {}th image has {} instance(s).'.format(image_id, len(r['class_ids'])))
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
show_bbox=True, show_mask=True,
title="Predictions")
plt.savefig("{}/{}".format(time_dir, dataset.image_info[image_id]["id"]))
plt.close()
else:
plt.imshow(image)
plt.savefig("{}/noinstance_{}".format(time_dir, dataset.image_info[image_id]["id"]))
print('[*] {}th image have no instance.'.format(image_id))
plt.close()
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on PASCAL VOC.')
parser.add_argument("--command",
metavar="<command>",
default='train',
help="'train' or 'inference' on PASCAL VOC")
parser.add_argument('--dataset',
default="/data/lktime-seg-tp/dataset/PASCALVOC/VOCdevkit/",
help='Directory of the PASCAL VOC dataset')
parser.add_argument('--year',
default='2012',
help='Year of the PASCAL VOC dataset (2007 or 2012) (default=2012)')
parser.add_argument('--model',
default="/path/to/weights.h5",
help="Path to weights .h5 file or 'voc'")
parser.add_argument('--logs',
default='./logs',
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=10,
metavar="<image count>",
help='Images to use for evaluation (default=10)')
# TODO
'''
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip PASCAL VOC files (default=False)',
type=bool)
'''
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
#print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = VocConfig()
else:
config = InferenceConfig()
config.display()
# Create model
# if args.command == "train":
# model = modellib.MaskRCNN(mode="training", config=config,
# model_dir=args.logs)
# else:
# model = modellib.MaskRCNN(mode="inference", config=config,
# model_dir=args.logs)
# Select weights file to load
# if args.model.lower() == "coco":
# model_path = COCO_WEIGHTS_PATH
# elif args.model.lower() == "last":
# # Find last trained weights
# model_path = model.find_last()
# elif args.model.lower() == "imagenet":
# # Start from ImageNet trained weights
# model_path = model.get_imagenet_weights()
# else:
# model_path = args.model
# Load weights
# if args.model.lower() == "coco":
# # Exclude the last layers because they require a matching
# # number of classes
# model.load_weights(model_path, by_name=True, exclude=[
# "mrcnn_class_logits", "mrcnn_bbox_fc",
# "mrcnn_bbox", "mrcnn_mask"])
# else:
# print("Loading weights ", model_path)
# model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = VocDataset()
dataset_train.load_voc(args.dataset, "train", year=args.year)
dataset_train.prepare()
# Validation dataset
dataset_val = VocDataset()
dataset_val.load_voc(args.dataset, "val", year=args.year)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# # Training - Stage 1
# print("Training network heads")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=40,
# layers='heads',
# augmentation=augmentation)
# # Training - Stage 2
# # Finetune layers from ResNet stage 4 and up
# print("Fine tune Resnet stage 4 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=120,
# layers='4+',
# augmentation=augmentation)
# # Training - Stage 3
# # Fine tune all layers
# print("Fine tune all layers")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE / 10,
# epochs=160,
# layers='all',
# augmentation=augmentation)
# elif args.command == "inference":
# #print("evaluate have not been implemented")
# # Validation dataset
# dataset_val = VocDataset()
# voc = dataset_val.load_voc(args.dataset, "val", year=args.year)
# dataset_val.prepare()
# print("Running voc inference on {} images.".format(args.limit))
# inference(model, dataset_val, int(args.limit))
# else:
# print("'{}' is not recognized. "
# "Use 'train' or 'inference'".format(args.command))
| 39.112601
| 98
| 0.582699
|
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from bs4 import BeautifulSoup as bs
import cv2
import imgaug
from utils import *
ROOT_DIR = os.path.abspath("../../")
RESULTS_DIR = os.path.abspath("./inference/")
sys.path.append(ROOT_DIR)
from configs import Config
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = '2012'
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
def build_colormap2label():
colormap2label = np.zeros(256 ** 3)
for i, colormap in enumerate(VOC_COLORMAP):
colormap2label[(colormap[0]*256 + colormap[1])*256 + colormap[2]] = i
return colormap2label
def voc_label_indices(colormap, colormap2label):
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
class VocConfig(Config):
NAME = "voc"
IMAGE_PER_GPU = 2
NUM_CLASSES = 1 + 20
class InferenceConfig(VocConfig):
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
class VocDataset(Dataset):
def load_voc(self, dataset_dir, trainval, year='2012'):
voc_year = 'VOC' + year
Segmentation = os.path.join(dataset_dir, voc_year, 'ImageSets', 'Segmentation')
JPEGImages = os.path.join(dataset_dir, voc_year, 'JPEGImages')
Annotations = os.path.join(dataset_dir, voc_year, 'Annotations')
SegmentationClass = os.path.join(dataset_dir, voc_year, 'SegmentationClass')
SegmentationObject = os.path.join(dataset_dir, voc_year, 'SegmentationObject')
# load classes of VOC, BG is initialed in parent class.
for idx, class_name in enumerate(VOC_CLASSES[1:]):
self.add_class("voc", idx + 1, class_name)
assert trainval in ['train', 'val']
# read segmentation annotation file
annotation_file = os.path.join(Segmentation, trainval + '.txt')
image_ids = []
with open(annotation_file) as f:
image_id_list = [line.strip() for line in f]
image_ids += image_id_list
for image_id in image_ids:
image_file_name = '{}.jpg'.format(image_id)
mask_file_name = '{}.png'.format(image_id)
xml_file_name = '{}.xml'.format(image_id)
image_path = os.path.join(JPEGImages, image_file_name)
# Parse Annotations XML File
with open(os.path.join(Annotations, xml_file_name)) as f:
soup = bs(f, 'lxml')
objects = soup.find_all('object')
image_contains_class_flag = False
for obj in objects:
class_name = obj.find('name').text
if class_name in VOC_CLASSES:
image_contains_class_flag = True
continue
if image_contains_class_flag:
class_mask_path = os.path.join(SegmentationClass, mask_file_name)
object_mask_path = os.path.join(SegmentationObject, mask_file_name)
self.add_image("voc",
image_id=image_file_name,
path=image_path,
class_mask_path=class_mask_path,
object_mask_path=object_mask_path)
def load_raw_mask(self, image_id, class_or_object):
assert class_or_object in ['class_mask', 'object_mask']
image = skimage.io.imread(self.image_info[image_id][class_or_object+'_path'])
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_class_label(self, image_id):
raw_mask = self.load_raw_mask(image_id, 'class_mask')
class_label = voc_label_indices(raw_mask, build_colormap2label())
return class_label
def load_mask(self, image_id):
class_label = self.load_class_label(image_id)
instance_mask = self.load_raw_mask(image_id, 'object_mask')
max_indice = int(np.max(class_label))
instance_label = []
instance_class = []
for i in range(1, max_indice+1):
if not np.any(class_label==i):
continue
gt_indice = i
object_filter = class_label == i
object_filter = object_filter.astype(np.uint8)
object_filter = np.dstack((object_filter,object_filter,object_filter))
filtered = np.multiply(object_filter, instance_mask)
gray = cv2.cvtColor(filtered, cv2.COLOR_RGB2GRAY)
max_gray = np.max(gray)
for sub_index in range(1, max_gray+1):
if not np.any(gray==sub_index):
continue
instance_filter = gray == sub_index
instance_label += [instance_filter]
instance_class += [gt_indice]
masks = np.asarray(instance_label).transpose((1,2,0))
classes_ids = np.asarray(instance_class)
return masks, classes_ids
############################################################
# Inference
############################################################
def inference(model, dataset, limit):
# Create directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
time_dir = "{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
time_dir = os.path.join(RESULTS_DIR, time_dir)
os.makedirs(time_dir)
# Load over images
for image_id in dataset.image_ids[:limit]:
# Load image and run detection
image = dataset.load_image(image_id)
# Detect objects
r = model.detect([image], verbose=0)[0]
# Encode image to RLE. Returns a string of multiple lines
source_id = dataset.image_info[image_id]["id"]
# Save image with masks
if len(r['class_ids']) > 0:
print('[*] {}th image has {} instance(s).'.format(image_id, len(r['class_ids'])))
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
show_bbox=True, show_mask=True,
title="Predictions")
plt.savefig("{}/{}".format(time_dir, dataset.image_info[image_id]["id"]))
plt.close()
else:
plt.imshow(image)
plt.savefig("{}/noinstance_{}".format(time_dir, dataset.image_info[image_id]["id"]))
print('[*] {}th image have no instance.'.format(image_id))
plt.close()
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on PASCAL VOC.')
parser.add_argument("--command",
metavar="<command>",
default='train',
help="'train' or 'inference' on PASCAL VOC")
parser.add_argument('--dataset',
default="/data/lktime-seg-tp/dataset/PASCALVOC/VOCdevkit/",
help='Directory of the PASCAL VOC dataset')
parser.add_argument('--year',
default='2012',
help='Year of the PASCAL VOC dataset (2007 or 2012) (default=2012)')
parser.add_argument('--model',
default="/path/to/weights.h5",
help="Path to weights .h5 file or 'voc'")
parser.add_argument('--logs',
default='./logs',
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=10,
metavar="<image count>",
help='Images to use for evaluation (default=10)')
# TODO
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
#print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = VocConfig()
else:
config = InferenceConfig()
config.display()
# Create model
# if args.command == "train":
# model = modellib.MaskRCNN(mode="training", config=config,
# model_dir=args.logs)
# else:
# model = modellib.MaskRCNN(mode="inference", config=config,
# model_dir=args.logs)
# Select weights file to load
# if args.model.lower() == "coco":
# model_path = COCO_WEIGHTS_PATH
# elif args.model.lower() == "last":
# # Find last trained weights
# model_path = model.find_last()
# elif args.model.lower() == "imagenet":
# # Start from ImageNet trained weights
# model_path = model.get_imagenet_weights()
# else:
# model_path = args.model
# Load weights
# if args.model.lower() == "coco":
# # Exclude the last layers because they require a matching
# # number of classes
# model.load_weights(model_path, by_name=True, exclude=[
# "mrcnn_class_logits", "mrcnn_bbox_fc",
# "mrcnn_bbox", "mrcnn_mask"])
# else:
# print("Loading weights ", model_path)
# model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = VocDataset()
dataset_train.load_voc(args.dataset, "train", year=args.year)
dataset_train.prepare()
# Validation dataset
dataset_val = VocDataset()
dataset_val.load_voc(args.dataset, "val", year=args.year)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# # Training - Stage 1
# print("Training network heads")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=40,
# layers='heads',
# augmentation=augmentation)
# # Training - Stage 2
# # Finetune layers from ResNet stage 4 and up
# print("Fine tune Resnet stage 4 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=120,
# layers='4+',
# augmentation=augmentation)
# # Training - Stage 3
# # Fine tune all layers
# print("Fine tune all layers")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE / 10,
# epochs=160,
# layers='all',
# augmentation=augmentation)
# elif args.command == "inference":
# #print("evaluate have not been implemented")
# # Validation dataset
# dataset_val = VocDataset()
# voc = dataset_val.load_voc(args.dataset, "val", year=args.year)
# dataset_val.prepare()
# print("Running voc inference on {} images.".format(args.limit))
# inference(model, dataset_val, int(args.limit))
# else:
# print("'{}' is not recognized. "
# "Use 'train' or 'inference'".format(args.command))
| true
| true
|
7904f2d31f5fac20237998f7a957bc81a5e511fe
| 1,353
|
py
|
Python
|
examples/uploaders.py
|
MooFreak/vkbottle
|
e4ee6d31537b65022ed519b64be3b9fa3c9b6267
|
[
"MIT"
] | null | null | null |
examples/uploaders.py
|
MooFreak/vkbottle
|
e4ee6d31537b65022ed519b64be3b9fa3c9b6267
|
[
"MIT"
] | null | null | null |
examples/uploaders.py
|
MooFreak/vkbottle
|
e4ee6d31537b65022ed519b64be3b9fa3c9b6267
|
[
"MIT"
] | null | null | null |
from io import BytesIO
from gtts import gTTS
from PIL import Image
from vkbottle import AudioUploader, Bot, DocUploader, Message, PhotoUploader
bot = Bot("token")
photo_uploader = PhotoUploader(bot.api, generate_attachment_strings=True)
doc_uploader = DocUploader(bot.api, generate_attachment_strings=True)
audio_uploader = AudioUploader(bot.api, generate_attachment_strings=True)
@bot.on.message_handler(text="photo_from_bytes", lower=True)
async def photo_from_bytes(ans: Message):
image = Image.new("RGB", (320, 320), (0, 0, 0))
fp = BytesIO()
image.save(fp, "RGB")
setattr(fp, "name", "image.png")
photo = await photo_uploader.upload_message_photo(fp)
await ans(attachment=photo)
@bot.on.message_handler(text="doc_from_file", lower=True)
async def photo_from_bytes(ans: Message):
image = Image.new("RGB", (320, 320), (0, 0, 0))
image.save("image.png", "RGB")
photo = await doc_uploader.upload_doc_to_message("image.png", ans.peer_id)
await ans(attachment=photo)
@bot.on.message_handler(text="audio_message")
async def audio(ans: Message):
tts = gTTS(text="бокале монада", lang="ru")
fp = BytesIO()
tts.write_to_fp(fp)
audio_message = await audio_uploader.upload_audio_message(fp, ans.peer_id)
await ans(attachment=audio_message)
if __name__ == "__main__":
bot.run_polling()
| 31.465116
| 78
| 0.731707
|
from io import BytesIO
from gtts import gTTS
from PIL import Image
from vkbottle import AudioUploader, Bot, DocUploader, Message, PhotoUploader
bot = Bot("token")
photo_uploader = PhotoUploader(bot.api, generate_attachment_strings=True)
doc_uploader = DocUploader(bot.api, generate_attachment_strings=True)
audio_uploader = AudioUploader(bot.api, generate_attachment_strings=True)
@bot.on.message_handler(text="photo_from_bytes", lower=True)
async def photo_from_bytes(ans: Message):
image = Image.new("RGB", (320, 320), (0, 0, 0))
fp = BytesIO()
image.save(fp, "RGB")
setattr(fp, "name", "image.png")
photo = await photo_uploader.upload_message_photo(fp)
await ans(attachment=photo)
@bot.on.message_handler(text="doc_from_file", lower=True)
async def photo_from_bytes(ans: Message):
image = Image.new("RGB", (320, 320), (0, 0, 0))
image.save("image.png", "RGB")
photo = await doc_uploader.upload_doc_to_message("image.png", ans.peer_id)
await ans(attachment=photo)
@bot.on.message_handler(text="audio_message")
async def audio(ans: Message):
tts = gTTS(text="бокале монада", lang="ru")
fp = BytesIO()
tts.write_to_fp(fp)
audio_message = await audio_uploader.upload_audio_message(fp, ans.peer_id)
await ans(attachment=audio_message)
if __name__ == "__main__":
bot.run_polling()
| true
| true
|
7904f2d70ef6781d9d24239fa046eef91f73a28c
| 2,720
|
py
|
Python
|
6/server.py
|
mesilliac/multitude
|
eccab96f496217971d19d2a4592fe48ee837fb3e
|
[
"CC0-1.0"
] | 2
|
2017-08-22T19:11:58.000Z
|
2017-10-10T22:14:33.000Z
|
6/server.py
|
mesilliac/multitude
|
eccab96f496217971d19d2a4592fe48ee837fb3e
|
[
"CC0-1.0"
] | null | null | null |
6/server.py
|
mesilliac/multitude
|
eccab96f496217971d19d2a4592fe48ee837fb3e
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python
# coding: utf-8
"""A simple webserver."""
# python 2.7 compatibility
from __future__ import print_function, unicode_literals
# based on tornado
import tornado.ioloop
import tornado.web
import tornado.websocket
import sys
import json
def make_app():
"""Create and return the main Tornado web application.
It will listen on the port assigned via `app.listen(port)`,
and will run on Tornado's main ioloop,
which can be started with `tornado.ioloop.IOLoop.current().start()`.
"""
return tornado.web.Application([
(r"/connect", ClientSocket),
(r"/(.*)", tornado.web.StaticFileHandler, {
"path": "client",
"default_filename": "index.html"
}),
], debug=True)
class ClientSocket(tornado.websocket.WebSocketHandler):
"""ClientSocket represents an active websocket connection to a client.
"""
def open(self):
"""Called when a websocket connection is initiated."""
# print some info about the opened connection
print("WebSocket opened",
"from user at {}".format(self.request.remote_ip))
def on_message(self, message):
"""Called when a websocket client sends a message."""
# print the message to the console
print("client sent: {!r}".format(message))
# try to parse the message
try:
parsed_message = json.loads(message)
except ValueError:
print("Failed to parse message: {!r}".format(message))
return
# if there's a "message" in the message, echo it
if "message" in parsed_message:
response = {
"client" : str(self.request.remote_ip),
"message" : parsed_message["message"]
}
# respond to the message
m = json.dumps(response)
self.write_message(m)
else:
print("message unhandled.")
def on_close(self):
"""Called when a client connection is closed for any reason."""
# print some info about the closed connection
print("WebSocket closed",
"by user at {}".format(self.request.remote_ip))
print("close code: {}".format(self.close_code))
print("close reason: {!r}".format(self.close_reason))
if __name__ == "__main__":
# print some basic info about the system
print("Running Tornado Web Server {}".format(tornado.version))
print("Using Python {}".format(sys.version))
# start the webapp on port 8888
app = make_app()
app.listen(8888)
print("webapp started on port 8888")
tornado.ioloop.IOLoop.current().start()
| 31.627907
| 74
| 0.606985
|
from __future__ import print_function, unicode_literals
import tornado.ioloop
import tornado.web
import tornado.websocket
import sys
import json
def make_app():
return tornado.web.Application([
(r"/connect", ClientSocket),
(r"/(.*)", tornado.web.StaticFileHandler, {
"path": "client",
"default_filename": "index.html"
}),
], debug=True)
class ClientSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened",
"from user at {}".format(self.request.remote_ip))
def on_message(self, message):
print("client sent: {!r}".format(message))
try:
parsed_message = json.loads(message)
except ValueError:
print("Failed to parse message: {!r}".format(message))
return
if "message" in parsed_message:
response = {
"client" : str(self.request.remote_ip),
"message" : parsed_message["message"]
}
# respond to the message
m = json.dumps(response)
self.write_message(m)
else:
print("message unhandled.")
def on_close(self):
# print some info about the closed connection
print("WebSocket closed",
"by user at {}".format(self.request.remote_ip))
print("close code: {}".format(self.close_code))
print("close reason: {!r}".format(self.close_reason))
if __name__ == "__main__":
# print some basic info about the system
print("Running Tornado Web Server {}".format(tornado.version))
print("Using Python {}".format(sys.version))
# start the webapp on port 8888
app = make_app()
app.listen(8888)
print("webapp started on port 8888")
tornado.ioloop.IOLoop.current().start()
| true
| true
|
7904f2e32a45ed22e345d2ac38fd04f26b9e4adb
| 8,346
|
py
|
Python
|
rac_aspace/data_helpers.py
|
RockefellerArchiveCenter/rac_aspace
|
02546e5d618a6b9c2e2edba35383a457cba9321b
|
[
"MIT"
] | null | null | null |
rac_aspace/data_helpers.py
|
RockefellerArchiveCenter/rac_aspace
|
02546e5d618a6b9c2e2edba35383a457cba9321b
|
[
"MIT"
] | 74
|
2020-01-14T14:55:51.000Z
|
2021-02-18T21:13:29.000Z
|
rac_aspace/data_helpers.py
|
RockefellerArchiveCenter/rac_aspace
|
02546e5d618a6b9c2e2edba35383a457cba9321b
|
[
"MIT"
] | 2
|
2020-03-28T21:19:21.000Z
|
2022-02-11T20:05:33.000Z
|
"""Data Helpers
Data helpers leverage the abstraction layer of ArchivesSnake to provide
additional functionality for retrieving, inferring and concatenating data
elements. They can also extend (or invert) relationships between different
objects.
"""
from datetime import datetime
import re
from rapidfuzz import fuzz
from asnake.jsonmodel import JSONModelObject
from string import Formatter
from .decorators import check_type
@check_type(dict)
def get_note_text(note):
"""Parses note content from different note types.
:param dict: an ArchivesSpace note.
:returns: a list containing note content.
:rtype: list
"""
def parse_subnote(subnote):
"""Parses note content from subnotes.
:param dict: an ArchivesSpace subnote.
:returns: a list containing subnote content.
:rtype: list
"""
if subnote["jsonmodel_type"] in [
"note_orderedlist", "note_index"]:
content = subnote["items"]
elif subnote["jsonmodel_type"] in ["note_chronology", "note_definedlist"]:
content = []
for k in subnote["items"]:
for i in k:
content += k.get(i) if isinstance(k.get(i),
list) else [k.get(i)]
else:
content = subnote["content"] if isinstance(
subnote["content"], list) else [subnote["content"]]
return content
if note["jsonmodel_type"] == "note_singlepart":
content = note["content"]
elif note["jsonmodel_type"] == "note_bibliography":
data = []
data += note["content"]
data += note["items"]
content = data
elif note["jsonmodel_type"] == "note_index":
data = []
for item in note["items"]:
data.append(item["value"])
content = data
else:
subnote_content_list = list(parse_subnote(sn)
for sn in note["subnotes"])
content = [
c for subnote_content in subnote_content_list for c in subnote_content]
return content
@check_type(dict)
def text_in_note(note, query_string):
"""Performs fuzzy searching against note text.
:param dict note: an ArchivesSpace note.
:param str query_string: a string to match against.
:returns: True if a match is found for `query_string`, False if no match is
found.
:rtype: bool
"""
CONFIDENCE_RATIO = 97
"""int: Minimum confidence ratio to match against."""
note_content = get_note_text(note)
ratio = fuzz.token_sort_ratio(
" ".join([n.lower() for n in note_content]),
query_string.lower(),
score_cutoff=CONFIDENCE_RATIO)
return bool(ratio)
@check_type(JSONModelObject)
def object_locations(archival_object):
"""Finds locations associated with an archival object.
:param JSONModelObject archival_object: an ArchivesSpace archival_object.
:returns: Locations objects associated with the archival object.
:rtype: list
"""
locations = []
for instance in archival_object.instances:
top_container = instance.sub_container.top_container.reify()
locations += top_container.container_locations
return locations
@check_type(JSONModelObject)
def format_from_obj(obj, format_string):
"""Generates a human-readable string from an object.
:param JSONModelObject location: an ArchivesSpace object.
:returns: a string in the chosen format.
:rtype: str
"""
if not format_string:
raise Exception("No format string provided.")
else:
try:
d = {}
matches = [i[1] for i in Formatter().parse(format_string) if i[1]]
for m in matches:
d.update({m: getattr(obj, m, "")})
return format_string.format(**d)
except KeyError as e:
raise KeyError(
"The field {} was not found in this object".format(
str(e)))
@check_type(dict)
def format_resource_id(resource, separator=":"):
"""Concatenates the four-part ID for a resource record.
:param dict resource: an ArchivesSpace resource.
:param str separator: a separator to insert between the id parts. Defaults
to `:`.
:returns: a concatenated four-part ID for the resource record.
:rtype: str
"""
resource_id = []
for x in range(4):
try:
resource_id.append(resource["id_{0}".format(x)])
except KeyError:
break
return separator.join(resource_id)
@check_type(JSONModelObject)
def closest_value(archival_object, key):
"""Finds the closest value matching a key.
Starts with an archival object, and iterates up through its ancestors
until it finds a match for a key that is not empty or null.
:param JSONModelObject archival_object: an ArchivesSpace archival_object.
:param str key: the key to match against.
:returns: The value of the key, which could be a str, list, or dict.
:rtype: str, list, or key
"""
if getattr(archival_object, key) not in ["", [], {}, None]:
return getattr(archival_object, key)
else:
for ancestor in archival_object.ancestors:
return closest_value(ancestor, key)
def get_orphans(object_list, null_attribute):
"""Finds objects in a list which do not have a value in a specified field.
:param list object_list: a list of ArchivesSpace objects.
:param null_attribute: an attribute which must be empty or null.
:yields: a list of ArchivesSpace objects.
:yield type: dict
"""
for obj in object_list:
if getattr(obj, null_attribute) in ["", [], {}, None]:
yield obj
@check_type(dict)
def get_expression(date):
"""Returns a date expression for a date object.
Concatenates start and end dates if no date expression exists.
:param dict date: an ArchivesSpace date
:returns: date expression for the date object.
:rtype: str
"""
try:
expression = date["expression"]
except KeyError:
if date.get("end"):
expression = "{0}-{1}".format(date["begin"], date["end"])
else:
expression = date["begin"]
return expression
@check_type(dict)
def indicates_restriction(rights_statement, restriction_acts):
"""Parses a rights statement to determine if it indicates a restriction.
:param dict rights_statement: an ArchivesSpace rights statement.
:returns: True if rights statement indicates a restriction, False if not.
:rtype: bool
"""
def is_expired(date):
today = datetime.now()
date = date if date else datetime.strftime("%Y-%m-%d")
return False if (
datetime.strptime(date, "%Y-%m-%d") >= today) else True
if is_expired(rights_statement.get("end_date")):
return False
for act in rights_statement.get("acts"):
if (act.get("restriction")
in restriction_acts and not is_expired(act.get("end_date"))):
return True
return False
@check_type(dict)
def is_restricted(archival_object, query_string, restriction_acts):
"""Parses an archival object to determine if it is restricted.
Iterates through notes, looking for a conditions governing access note
which contains a particular set of strings.
Also looks for associated rights statements which indicate object may be
restricted.
:param dict archival_object: an ArchivesSpace archival_object.
:param list restriction_acts: a list of strings to match restriction act against.
:returns: True if archival object is restricted, False if not.
:rtype: bool
"""
for note in archival_object["notes"]:
if note["type"] == "accessrestrict":
if text_in_note(note, query_string.lower()):
return True
for rights_statement in archival_object["rights_statements"]:
if indicates_restriction(rights_statement, restriction_acts):
return True
return False
@check_type(str)
def strip_html_tags(string):
"""Strips HTML tags from a string.
:param str string: An input string from which to remove HTML tags.
"""
tag_match = re.compile("<.*?>")
cleantext = re.sub(tag_match, "", string)
return cleantext
| 31.73384
| 85
| 0.648454
|
from datetime import datetime
import re
from rapidfuzz import fuzz
from asnake.jsonmodel import JSONModelObject
from string import Formatter
from .decorators import check_type
@check_type(dict)
def get_note_text(note):
def parse_subnote(subnote):
if subnote["jsonmodel_type"] in [
"note_orderedlist", "note_index"]:
content = subnote["items"]
elif subnote["jsonmodel_type"] in ["note_chronology", "note_definedlist"]:
content = []
for k in subnote["items"]:
for i in k:
content += k.get(i) if isinstance(k.get(i),
list) else [k.get(i)]
else:
content = subnote["content"] if isinstance(
subnote["content"], list) else [subnote["content"]]
return content
if note["jsonmodel_type"] == "note_singlepart":
content = note["content"]
elif note["jsonmodel_type"] == "note_bibliography":
data = []
data += note["content"]
data += note["items"]
content = data
elif note["jsonmodel_type"] == "note_index":
data = []
for item in note["items"]:
data.append(item["value"])
content = data
else:
subnote_content_list = list(parse_subnote(sn)
for sn in note["subnotes"])
content = [
c for subnote_content in subnote_content_list for c in subnote_content]
return content
@check_type(dict)
def text_in_note(note, query_string):
CONFIDENCE_RATIO = 97
note_content = get_note_text(note)
ratio = fuzz.token_sort_ratio(
" ".join([n.lower() for n in note_content]),
query_string.lower(),
score_cutoff=CONFIDENCE_RATIO)
return bool(ratio)
@check_type(JSONModelObject)
def object_locations(archival_object):
locations = []
for instance in archival_object.instances:
top_container = instance.sub_container.top_container.reify()
locations += top_container.container_locations
return locations
@check_type(JSONModelObject)
def format_from_obj(obj, format_string):
if not format_string:
raise Exception("No format string provided.")
else:
try:
d = {}
matches = [i[1] for i in Formatter().parse(format_string) if i[1]]
for m in matches:
d.update({m: getattr(obj, m, "")})
return format_string.format(**d)
except KeyError as e:
raise KeyError(
"The field {} was not found in this object".format(
str(e)))
@check_type(dict)
def format_resource_id(resource, separator=":"):
resource_id = []
for x in range(4):
try:
resource_id.append(resource["id_{0}".format(x)])
except KeyError:
break
return separator.join(resource_id)
@check_type(JSONModelObject)
def closest_value(archival_object, key):
if getattr(archival_object, key) not in ["", [], {}, None]:
return getattr(archival_object, key)
else:
for ancestor in archival_object.ancestors:
return closest_value(ancestor, key)
def get_orphans(object_list, null_attribute):
for obj in object_list:
if getattr(obj, null_attribute) in ["", [], {}, None]:
yield obj
@check_type(dict)
def get_expression(date):
try:
expression = date["expression"]
except KeyError:
if date.get("end"):
expression = "{0}-{1}".format(date["begin"], date["end"])
else:
expression = date["begin"]
return expression
@check_type(dict)
def indicates_restriction(rights_statement, restriction_acts):
def is_expired(date):
today = datetime.now()
date = date if date else datetime.strftime("%Y-%m-%d")
return False if (
datetime.strptime(date, "%Y-%m-%d") >= today) else True
if is_expired(rights_statement.get("end_date")):
return False
for act in rights_statement.get("acts"):
if (act.get("restriction")
in restriction_acts and not is_expired(act.get("end_date"))):
return True
return False
@check_type(dict)
def is_restricted(archival_object, query_string, restriction_acts):
for note in archival_object["notes"]:
if note["type"] == "accessrestrict":
if text_in_note(note, query_string.lower()):
return True
for rights_statement in archival_object["rights_statements"]:
if indicates_restriction(rights_statement, restriction_acts):
return True
return False
@check_type(str)
def strip_html_tags(string):
tag_match = re.compile("<.*?>")
cleantext = re.sub(tag_match, "", string)
return cleantext
| true
| true
|
7904f36c47eee73c9b469b11fcdbc1634739035b
| 749
|
py
|
Python
|
hkm/migrations/0026_hkm_museum_printer_credentials.py
|
andersinno/kuvaselaamo
|
aed553a0ba85e82055e0de025ba2d3e3e4f2c9e6
|
[
"MIT"
] | 1
|
2017-05-07T10:46:24.000Z
|
2017-05-07T10:46:24.000Z
|
hkm/migrations/0026_hkm_museum_printer_credentials.py
|
City-of-Helsinki/kuvaselaamo
|
3fa9b69e3f5496620852d8b138129d0069339fcd
|
[
"MIT"
] | 60
|
2016-10-18T11:18:48.000Z
|
2022-02-13T20:04:18.000Z
|
hkm/migrations/0026_hkm_museum_printer_credentials.py
|
andersinno/kuvaselaamo
|
aed553a0ba85e82055e0de025ba2d3e3e4f2c9e6
|
[
"MIT"
] | 9
|
2017-04-18T13:26:26.000Z
|
2020-02-13T20:05:13.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-10-02 09:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hkm', '0025_userprofile_printer_presets'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='printer_password',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Printer password'),
),
migrations.AddField(
model_name='userprofile',
name='printer_username',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Printer username'),
),
]
| 28.807692
| 107
| 0.636849
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hkm', '0025_userprofile_printer_presets'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='printer_password',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Printer password'),
),
migrations.AddField(
model_name='userprofile',
name='printer_username',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Printer username'),
),
]
| true
| true
|
7904f3a4c0856151dea457452ff9246e9d6d4140
| 84
|
py
|
Python
|
Curso/Mundo 1/02.py
|
ZaikoXander/Python
|
7e7243edb02dd33991c5f63f02c983ad060fc3ca
|
[
"Unlicense"
] | null | null | null |
Curso/Mundo 1/02.py
|
ZaikoXander/Python
|
7e7243edb02dd33991c5f63f02c983ad060fc3ca
|
[
"Unlicense"
] | null | null | null |
Curso/Mundo 1/02.py
|
ZaikoXander/Python
|
7e7243edb02dd33991c5f63f02c983ad060fc3ca
|
[
"Unlicense"
] | null | null | null |
nome = input('Qual é o seu nome? ')
print('Olá', nome + '! Prazer em te conhecer!')
| 28
| 47
| 0.619048
|
nome = input('Qual é o seu nome? ')
print('Olá', nome + '! Prazer em te conhecer!')
| true
| true
|
7904f3df1a5d83c104afd1438c819061f7fabfd1
| 16,078
|
py
|
Python
|
log_mito/model_112.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_mito/model_112.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_mito/model_112.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 28000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 87.857923
| 710
| 0.803458
|
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 28000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| true
| true
|
7904f3ebe502340b7f29eedfa3cccd177099531d
| 1,114
|
py
|
Python
|
mlcomp/board/views/api.py
|
korepwx/mlcomp
|
b39f64d700531792da72175c8daaa10be5c73ad1
|
[
"MIT"
] | null | null | null |
mlcomp/board/views/api.py
|
korepwx/mlcomp
|
b39f64d700531792da72175c8daaa10be5c73ad1
|
[
"MIT"
] | null | null | null |
mlcomp/board/views/api.py
|
korepwx/mlcomp
|
b39f64d700531792da72175c8daaa10be5c73ad1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import six
from flask import Blueprint, jsonify, current_app
from ..utils import MountTree
from .utils import is_testing
api_bp = Blueprint('api', __name__.rsplit('.')[1])
if is_testing():
@api_bp.route('/_hello/')
def api_hello():
return jsonify('api hello')
@api_bp.route('/all')
def all_storage():
"""Get all storage in JSON."""
trees = current_app.trees
mounts = MountTree()
for prefix, tree in six.iteritems(trees):
for path, storage in tree.iter_storage():
mounts.mount(prefix + '/' + path, storage)
# get a compressed representation of the tree
def dfs(node):
children = node.children
if children:
ret = []
for name in sorted(six.iterkeys(children)):
child = children[name]
child_ret = dfs(child)
if child_ret:
ret.append((name, child_ret))
if ret:
return ret
data = node.data
if data:
return data.to_dict()
return jsonify(dfs(mounts.root) or [])
| 25.906977
| 55
| 0.572711
|
import six
from flask import Blueprint, jsonify, current_app
from ..utils import MountTree
from .utils import is_testing
api_bp = Blueprint('api', __name__.rsplit('.')[1])
if is_testing():
@api_bp.route('/_hello/')
def api_hello():
return jsonify('api hello')
@api_bp.route('/all')
def all_storage():
trees = current_app.trees
mounts = MountTree()
for prefix, tree in six.iteritems(trees):
for path, storage in tree.iter_storage():
mounts.mount(prefix + '/' + path, storage)
def dfs(node):
children = node.children
if children:
ret = []
for name in sorted(six.iterkeys(children)):
child = children[name]
child_ret = dfs(child)
if child_ret:
ret.append((name, child_ret))
if ret:
return ret
data = node.data
if data:
return data.to_dict()
return jsonify(dfs(mounts.root) or [])
| true
| true
|
7904f57af1123e1fffb4246b91ab5ebd724c3887
| 405
|
py
|
Python
|
animeDjangoApp/asgi.py
|
peteryouu/animeDjango
|
a0b34005ea453c0c5ace5da0b65c13d5c225b033
|
[
"MIT"
] | 1
|
2021-09-08T18:51:58.000Z
|
2021-09-08T18:51:58.000Z
|
animeDjangoApp/asgi.py
|
peteryouu/animeDjango
|
a0b34005ea453c0c5ace5da0b65c13d5c225b033
|
[
"MIT"
] | null | null | null |
animeDjangoApp/asgi.py
|
peteryouu/animeDjango
|
a0b34005ea453c0c5ace5da0b65c13d5c225b033
|
[
"MIT"
] | null | null | null |
"""
ASGI config for animeDjangoApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animeDjangoApp.settings')
application = get_asgi_application()
| 23.823529
| 78
| 0.792593
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animeDjangoApp.settings')
application = get_asgi_application()
| true
| true
|
7904f5c7d855b0c81a36598404c795e8f972faba
| 12,654
|
py
|
Python
|
network.py
|
HongyangGao/PixelDeconv
|
71964b6b2594d1a888435d3fb42572ffb4096165
|
[
"MIT"
] | 71
|
2017-05-24T01:06:26.000Z
|
2018-12-10T05:46:55.000Z
|
network.py
|
HongyangGao/PixelDeconv
|
71964b6b2594d1a888435d3fb42572ffb4096165
|
[
"MIT"
] | 10
|
2017-07-03T02:48:41.000Z
|
2018-01-03T08:20:28.000Z
|
network.py
|
HongyangGao/PixelDeconv
|
71964b6b2594d1a888435d3fb42572ffb4096165
|
[
"MIT"
] | 37
|
2017-05-25T05:14:55.000Z
|
2018-11-29T19:53:21.000Z
|
import os
import numpy as np
import tensorflow as tf
from utils.data_reader import H5DataLoader, H53DDataLoader
from utils.img_utils import imsave
from utils import ops
"""
This module builds a standard U-NET for semantic segmentation.
If want VAE using pixelDCL, please visit this code:
https://github.com/HongyangGao/UVAE
"""
class PixelDCN(object):
def __init__(self, sess, conf):
self.sess = sess
self.conf = conf
self.def_params()
if not os.path.exists(conf.modeldir):
os.makedirs(conf.modeldir)
if not os.path.exists(conf.logdir):
os.makedirs(conf.logdir)
if not os.path.exists(conf.sampledir):
os.makedirs(conf.sampledir)
self.configure_networks()
self.train_summary = self.config_summary('train')
self.valid_summary = self.config_summary('valid')
def def_params(self):
self.data_format = 'NHWC'
if self.conf.data_type == '3D':
self.conv_size = (3, 3, 3)
self.pool_size = (2, 2, 2)
self.axis, self.channel_axis = (1, 2, 3), 4
self.input_shape = [
self.conf.batch, self.conf.depth, self.conf.height,
self.conf.width, self.conf.channel]
self.output_shape = [
self.conf.batch, self.conf.depth, self.conf.height,
self.conf.width]
else:
self.conv_size = (3, 3)
self.pool_size = (2, 2)
self.axis, self.channel_axis = (1, 2), 3
self.input_shape = [
self.conf.batch, self.conf.height, self.conf.width,
self.conf.channel]
self.output_shape = [
self.conf.batch, self.conf.height, self.conf.width]
def configure_networks(self):
self.build_network()
optimizer = tf.train.AdamOptimizer(self.conf.learning_rate)
self.train_op = optimizer.minimize(self.loss_op, name='train_op')
tf.set_random_seed(self.conf.random_seed)
self.sess.run(tf.global_variables_initializer())
trainable_vars = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=trainable_vars, max_to_keep=0)
self.writer = tf.summary.FileWriter(self.conf.logdir, self.sess.graph)
def build_network(self):
self.inputs = tf.placeholder(
tf.float32, self.input_shape, name='inputs')
self.labels = tf.placeholder(
tf.int64, self.output_shape, name='labels')
self.predictions = self.inference(self.inputs)
self.cal_loss()
def cal_loss(self):
one_hot_labels = tf.one_hot(
self.labels, depth=self.conf.class_num,
axis=self.channel_axis, name='labels/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_labels, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_preds = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
correct_prediction = tf.equal(
self.labels, self.decoded_preds,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
# weights = tf.cast(
# tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
# tf.int32, name='m_iou/weights')
weights = tf.cast(
tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
tf.int64, name='m_iou/weights')
labels = tf.multiply(self.labels, weights, name='m_iou/mul')
self.m_iou, self.miou_op = tf.metrics.mean_iou(
self.labels, self.decoded_preds, self.conf.class_num,
weights, name='m_iou/m_ious')
def config_summary(self, name):
summarys = []
summarys.append(tf.summary.scalar(name+'/loss', self.loss_op))
summarys.append(tf.summary.scalar(name+'/accuracy', self.accuracy_op))
if name == 'valid' and self.conf.data_type == '2D':
summarys.append(
tf.summary.image(name+'/input', self.inputs, max_outputs=100))
summarys.append(
tf.summary.image(
name+'/annotation',
tf.cast(tf.expand_dims(self.labels, -1),
tf.float32), max_outputs=100))
summarys.append(
tf.summary.image(
name+'/prediction',
tf.cast(tf.expand_dims(self.decoded_preds, -1),
tf.float32), max_outputs=100))
summary = tf.summary.merge(summarys)
return summary
def inference(self, inputs):
outputs = inputs
down_outputs = []
for layer_index in range(self.conf.network_depth-1):
is_first = True if not layer_index else False
name = 'down%s' % layer_index
outputs = self.build_down_block(
outputs, name, down_outputs, is_first)
outputs = self.build_bottom_block(outputs, 'bottom')
for layer_index in range(self.conf.network_depth-2, -1, -1):
is_final = True if layer_index == 0 else False
name = 'up%s' % layer_index
down_inputs = down_outputs[layer_index]
outputs = self.build_up_block(
outputs, down_inputs, name, is_final)
return outputs
def build_down_block(self, inputs, name, down_outputs, first=False):
out_num = self.conf.start_channel_num if first else 2 * \
inputs.shape[self.channel_axis].value
conv1 = ops.conv(inputs, out_num, self.conv_size,
name+'/conv1', self.conf.data_type)
conv2 = ops.conv(conv1, out_num, self.conv_size,
name+'/conv2', self.conf.data_type)
down_outputs.append(conv2)
pool = ops.pool(conv2, self.pool_size, name +
'/pool', self.conf.data_type)
return pool
def build_bottom_block(self, inputs, name):
out_num = inputs.shape[self.channel_axis].value
conv1 = ops.conv(
inputs, 2*out_num, self.conv_size, name+'/conv1',
self.conf.data_type)
conv2 = ops.conv(
conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
return conv2
def build_up_block(self, inputs, down_inputs, name, final=False):
out_num = inputs.shape[self.channel_axis].value
conv1 = self.deconv_func()(
inputs, out_num, self.conv_size, name+'/conv1',
self.conf.data_type, action=self.conf.action)
conv1 = tf.concat(
[conv1, down_inputs], self.channel_axis, name=name+'/concat')
conv2 = self.conv_func()(
conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
out_num = self.conf.class_num if final else out_num/2
conv3 = ops.conv(
conv2, out_num, self.conv_size, name+'/conv3', self.conf.data_type,
not final)
return conv3
def deconv_func(self):
return getattr(ops, self.conf.deconv_name)
def conv_func(self):
return getattr(ops, self.conf.conv_name)
def save_summary(self, summary, step):
print('---->summarizing', step)
self.writer.add_summary(summary, step)
def train(self):
if self.conf.reload_step > 0:
self.reload(self.conf.reload_step)
if self.conf.data_type == '2D':
train_reader = H5DataLoader(
self.conf.data_dir+self.conf.train_data)
valid_reader = H5DataLoader(
self.conf.data_dir+self.conf.valid_data)
else:
train_reader = H53DDataLoader(
self.conf.data_dir+self.conf.train_data, self.input_shape)
valid_reader = H53DDataLoader(
self.conf.data_dir+self.conf.valid_data, self.input_shape)
for epoch_num in range(self.conf.max_step+1):
if epoch_num and epoch_num % self.conf.test_interval == 0:
inputs, labels = valid_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, summary = self.sess.run(
[self.loss_op, self.valid_summary], feed_dict=feed_dict)
self.save_summary(summary, epoch_num+self.conf.reload_step)
print('----testing loss', loss)
if epoch_num and epoch_num % self.conf.summary_interval == 0:
inputs, labels = train_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, _, summary = self.sess.run(
[self.loss_op, self.train_op, self.train_summary],
feed_dict=feed_dict)
self.save_summary(summary, epoch_num+self.conf.reload_step)
else:
inputs, labels = train_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, _ = self.sess.run(
[self.loss_op, self.train_op], feed_dict=feed_dict)
print('----training loss', loss)
if epoch_num and epoch_num % self.conf.save_interval == 0:
self.save(epoch_num+self.conf.reload_step)
def test(self):
print('---->testing ', self.conf.test_step)
if self.conf.test_step > 0:
self.reload(self.conf.test_step)
else:
print("please set a reasonable test_step")
return
if self.conf.data_type == '2D':
test_reader = H5DataLoader(
self.conf.data_dir+self.conf.test_data, False)
else:
test_reader = H53DDataLoader(
self.conf.data_dir+self.conf.test_data, self.input_shape)
self.sess.run(tf.local_variables_initializer())
count = 0
losses = []
accuracies = []
m_ious = []
while True:
inputs, labels = test_reader.next_batch(self.conf.batch)
if inputs.shape[0] < self.conf.batch:
break
feed_dict = {self.inputs: inputs, self.labels: labels}
loss, accuracy, m_iou, _ = self.sess.run(
[self.loss_op, self.accuracy_op, self.m_iou, self.miou_op],
feed_dict=feed_dict)
print('values----->', loss, accuracy, m_iou)
count += 1
losses.append(loss)
accuracies.append(accuracy)
m_ious.append(m_iou)
print('Loss: ', np.mean(losses))
print('Accuracy: ', np.mean(accuracies))
print('M_iou: ', m_ious[-1])
def predict(self):
print('---->predicting ', self.conf.test_step)
if self.conf.test_step > 0:
self.reload(self.conf.test_step)
else:
print("please set a reasonable test_step")
return
if self.conf.data_type == '2D':
test_reader = H5DataLoader(
self.conf.data_dir+self.conf.test_data, False)
else:
test_reader = H53DDataLoader(
self.conf.data_dir+self.conf.test_data, self.input_shape)
predictions = []
while True:
inputs, labels = test_reader.next_batch(self.conf.batch)
if inputs.shape[0] < self.conf.batch:
break
feed_dict = {self.inputs: inputs, self.labels: labels}
predictions.append(self.sess.run(
self.decoded_preds, feed_dict=feed_dict))
print('----->saving predictions')
for index, prediction in enumerate(predictions):
for i in range(prediction.shape[0]):
imsave(prediction[i], self.conf.sampledir +
str(index*prediction.shape[0]+i)+'.png')
def save(self, step):
print('---->saving', step)
checkpoint_path = os.path.join(
self.conf.modeldir, self.conf.model_name)
self.saver.save(self.sess, checkpoint_path, global_step=step)
def reload(self, step):
checkpoint_path = os.path.join(
self.conf.modeldir, self.conf.model_name)
model_path = checkpoint_path+'-'+str(step)
if not os.path.exists(model_path+'.meta'):
print('------- no such checkpoint', model_path)
return
self.saver.restore(self.sess, model_path)
| 42.606061
| 79
| 0.585744
|
import os
import numpy as np
import tensorflow as tf
from utils.data_reader import H5DataLoader, H53DDataLoader
from utils.img_utils import imsave
from utils import ops
class PixelDCN(object):
def __init__(self, sess, conf):
self.sess = sess
self.conf = conf
self.def_params()
if not os.path.exists(conf.modeldir):
os.makedirs(conf.modeldir)
if not os.path.exists(conf.logdir):
os.makedirs(conf.logdir)
if not os.path.exists(conf.sampledir):
os.makedirs(conf.sampledir)
self.configure_networks()
self.train_summary = self.config_summary('train')
self.valid_summary = self.config_summary('valid')
def def_params(self):
self.data_format = 'NHWC'
if self.conf.data_type == '3D':
self.conv_size = (3, 3, 3)
self.pool_size = (2, 2, 2)
self.axis, self.channel_axis = (1, 2, 3), 4
self.input_shape = [
self.conf.batch, self.conf.depth, self.conf.height,
self.conf.width, self.conf.channel]
self.output_shape = [
self.conf.batch, self.conf.depth, self.conf.height,
self.conf.width]
else:
self.conv_size = (3, 3)
self.pool_size = (2, 2)
self.axis, self.channel_axis = (1, 2), 3
self.input_shape = [
self.conf.batch, self.conf.height, self.conf.width,
self.conf.channel]
self.output_shape = [
self.conf.batch, self.conf.height, self.conf.width]
def configure_networks(self):
self.build_network()
optimizer = tf.train.AdamOptimizer(self.conf.learning_rate)
self.train_op = optimizer.minimize(self.loss_op, name='train_op')
tf.set_random_seed(self.conf.random_seed)
self.sess.run(tf.global_variables_initializer())
trainable_vars = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=trainable_vars, max_to_keep=0)
self.writer = tf.summary.FileWriter(self.conf.logdir, self.sess.graph)
def build_network(self):
self.inputs = tf.placeholder(
tf.float32, self.input_shape, name='inputs')
self.labels = tf.placeholder(
tf.int64, self.output_shape, name='labels')
self.predictions = self.inference(self.inputs)
self.cal_loss()
def cal_loss(self):
one_hot_labels = tf.one_hot(
self.labels, depth=self.conf.class_num,
axis=self.channel_axis, name='labels/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_labels, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_preds = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
correct_prediction = tf.equal(
self.labels, self.decoded_preds,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
weights = tf.cast(
tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
tf.int64, name='m_iou/weights')
labels = tf.multiply(self.labels, weights, name='m_iou/mul')
self.m_iou, self.miou_op = tf.metrics.mean_iou(
self.labels, self.decoded_preds, self.conf.class_num,
weights, name='m_iou/m_ious')
def config_summary(self, name):
summarys = []
summarys.append(tf.summary.scalar(name+'/loss', self.loss_op))
summarys.append(tf.summary.scalar(name+'/accuracy', self.accuracy_op))
if name == 'valid' and self.conf.data_type == '2D':
summarys.append(
tf.summary.image(name+'/input', self.inputs, max_outputs=100))
summarys.append(
tf.summary.image(
name+'/annotation',
tf.cast(tf.expand_dims(self.labels, -1),
tf.float32), max_outputs=100))
summarys.append(
tf.summary.image(
name+'/prediction',
tf.cast(tf.expand_dims(self.decoded_preds, -1),
tf.float32), max_outputs=100))
summary = tf.summary.merge(summarys)
return summary
def inference(self, inputs):
outputs = inputs
down_outputs = []
for layer_index in range(self.conf.network_depth-1):
is_first = True if not layer_index else False
name = 'down%s' % layer_index
outputs = self.build_down_block(
outputs, name, down_outputs, is_first)
outputs = self.build_bottom_block(outputs, 'bottom')
for layer_index in range(self.conf.network_depth-2, -1, -1):
is_final = True if layer_index == 0 else False
name = 'up%s' % layer_index
down_inputs = down_outputs[layer_index]
outputs = self.build_up_block(
outputs, down_inputs, name, is_final)
return outputs
def build_down_block(self, inputs, name, down_outputs, first=False):
out_num = self.conf.start_channel_num if first else 2 * \
inputs.shape[self.channel_axis].value
conv1 = ops.conv(inputs, out_num, self.conv_size,
name+'/conv1', self.conf.data_type)
conv2 = ops.conv(conv1, out_num, self.conv_size,
name+'/conv2', self.conf.data_type)
down_outputs.append(conv2)
pool = ops.pool(conv2, self.pool_size, name +
'/pool', self.conf.data_type)
return pool
def build_bottom_block(self, inputs, name):
out_num = inputs.shape[self.channel_axis].value
conv1 = ops.conv(
inputs, 2*out_num, self.conv_size, name+'/conv1',
self.conf.data_type)
conv2 = ops.conv(
conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
return conv2
def build_up_block(self, inputs, down_inputs, name, final=False):
out_num = inputs.shape[self.channel_axis].value
conv1 = self.deconv_func()(
inputs, out_num, self.conv_size, name+'/conv1',
self.conf.data_type, action=self.conf.action)
conv1 = tf.concat(
[conv1, down_inputs], self.channel_axis, name=name+'/concat')
conv2 = self.conv_func()(
conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
out_num = self.conf.class_num if final else out_num/2
conv3 = ops.conv(
conv2, out_num, self.conv_size, name+'/conv3', self.conf.data_type,
not final)
return conv3
def deconv_func(self):
return getattr(ops, self.conf.deconv_name)
def conv_func(self):
return getattr(ops, self.conf.conv_name)
def save_summary(self, summary, step):
print('---->summarizing', step)
self.writer.add_summary(summary, step)
def train(self):
if self.conf.reload_step > 0:
self.reload(self.conf.reload_step)
if self.conf.data_type == '2D':
train_reader = H5DataLoader(
self.conf.data_dir+self.conf.train_data)
valid_reader = H5DataLoader(
self.conf.data_dir+self.conf.valid_data)
else:
train_reader = H53DDataLoader(
self.conf.data_dir+self.conf.train_data, self.input_shape)
valid_reader = H53DDataLoader(
self.conf.data_dir+self.conf.valid_data, self.input_shape)
for epoch_num in range(self.conf.max_step+1):
if epoch_num and epoch_num % self.conf.test_interval == 0:
inputs, labels = valid_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, summary = self.sess.run(
[self.loss_op, self.valid_summary], feed_dict=feed_dict)
self.save_summary(summary, epoch_num+self.conf.reload_step)
print('----testing loss', loss)
if epoch_num and epoch_num % self.conf.summary_interval == 0:
inputs, labels = train_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, _, summary = self.sess.run(
[self.loss_op, self.train_op, self.train_summary],
feed_dict=feed_dict)
self.save_summary(summary, epoch_num+self.conf.reload_step)
else:
inputs, labels = train_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, _ = self.sess.run(
[self.loss_op, self.train_op], feed_dict=feed_dict)
print('----training loss', loss)
if epoch_num and epoch_num % self.conf.save_interval == 0:
self.save(epoch_num+self.conf.reload_step)
def test(self):
print('---->testing ', self.conf.test_step)
if self.conf.test_step > 0:
self.reload(self.conf.test_step)
else:
print("please set a reasonable test_step")
return
if self.conf.data_type == '2D':
test_reader = H5DataLoader(
self.conf.data_dir+self.conf.test_data, False)
else:
test_reader = H53DDataLoader(
self.conf.data_dir+self.conf.test_data, self.input_shape)
self.sess.run(tf.local_variables_initializer())
count = 0
losses = []
accuracies = []
m_ious = []
while True:
inputs, labels = test_reader.next_batch(self.conf.batch)
if inputs.shape[0] < self.conf.batch:
break
feed_dict = {self.inputs: inputs, self.labels: labels}
loss, accuracy, m_iou, _ = self.sess.run(
[self.loss_op, self.accuracy_op, self.m_iou, self.miou_op],
feed_dict=feed_dict)
print('values----->', loss, accuracy, m_iou)
count += 1
losses.append(loss)
accuracies.append(accuracy)
m_ious.append(m_iou)
print('Loss: ', np.mean(losses))
print('Accuracy: ', np.mean(accuracies))
print('M_iou: ', m_ious[-1])
def predict(self):
print('---->predicting ', self.conf.test_step)
if self.conf.test_step > 0:
self.reload(self.conf.test_step)
else:
print("please set a reasonable test_step")
return
if self.conf.data_type == '2D':
test_reader = H5DataLoader(
self.conf.data_dir+self.conf.test_data, False)
else:
test_reader = H53DDataLoader(
self.conf.data_dir+self.conf.test_data, self.input_shape)
predictions = []
while True:
inputs, labels = test_reader.next_batch(self.conf.batch)
if inputs.shape[0] < self.conf.batch:
break
feed_dict = {self.inputs: inputs, self.labels: labels}
predictions.append(self.sess.run(
self.decoded_preds, feed_dict=feed_dict))
print('----->saving predictions')
for index, prediction in enumerate(predictions):
for i in range(prediction.shape[0]):
imsave(prediction[i], self.conf.sampledir +
str(index*prediction.shape[0]+i)+'.png')
def save(self, step):
print('---->saving', step)
checkpoint_path = os.path.join(
self.conf.modeldir, self.conf.model_name)
self.saver.save(self.sess, checkpoint_path, global_step=step)
def reload(self, step):
checkpoint_path = os.path.join(
self.conf.modeldir, self.conf.model_name)
model_path = checkpoint_path+'-'+str(step)
if not os.path.exists(model_path+'.meta'):
print('------- no such checkpoint', model_path)
return
self.saver.restore(self.sess, model_path)
| true
| true
|
7904f60d169ad9f2bbca1ee173dbd8ff9dc32226
| 1,079
|
py
|
Python
|
search_to_follow.py
|
hallowf/MotivationalBinary
|
16d85929bd689f227e5021291ec477262e6477d8
|
[
"MIT"
] | null | null | null |
search_to_follow.py
|
hallowf/MotivationalBinary
|
16d85929bd689f227e5021291ec477262e6477d8
|
[
"MIT"
] | null | null | null |
search_to_follow.py
|
hallowf/MotivationalBinary
|
16d85929bd689f227e5021291ec477262e6477d8
|
[
"MIT"
] | null | null | null |
import json
import pickle
from TwitterAPI import TwitterAPI
with open("api_key.json") as json_data:
all_keys = json.load(json_data)
consumer_key = all_keys["consumer_key"]
consumer_secret = all_keys["consumer_secret"]
access_token_key = all_keys["access_token_key"]
access_token_secret = all_keys["access_token_secret"]
api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)
master_ID = "116568685"
count = 25
def who_follows(ID):
page_cursor = get_pickle()
r = api.request("followers/ids", {"user_id":ID, "cursor":page_cursor, "count":count})
print(r.status_code)
parse_response = r.json()
users_inf = parse_response["ids"]
IDS = []
for x in users_inf:
IDS.append(x)
page_cursor += -1
print(page_cursor)
make_pickle(page_cursor)
print(IDS)
return IDS
def make_pickle(obj):
with open("objs.pkl", "wb") as f:
pickle.dump(obj, f)
def get_pickle():
with open("objs.pkl", "rb") as f:
obj = pickle.load(f)
print(obj)
return obj
| 25.093023
| 89
| 0.677479
|
import json
import pickle
from TwitterAPI import TwitterAPI
with open("api_key.json") as json_data:
all_keys = json.load(json_data)
consumer_key = all_keys["consumer_key"]
consumer_secret = all_keys["consumer_secret"]
access_token_key = all_keys["access_token_key"]
access_token_secret = all_keys["access_token_secret"]
api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)
master_ID = "116568685"
count = 25
def who_follows(ID):
page_cursor = get_pickle()
r = api.request("followers/ids", {"user_id":ID, "cursor":page_cursor, "count":count})
print(r.status_code)
parse_response = r.json()
users_inf = parse_response["ids"]
IDS = []
for x in users_inf:
IDS.append(x)
page_cursor += -1
print(page_cursor)
make_pickle(page_cursor)
print(IDS)
return IDS
def make_pickle(obj):
with open("objs.pkl", "wb") as f:
pickle.dump(obj, f)
def get_pickle():
with open("objs.pkl", "rb") as f:
obj = pickle.load(f)
print(obj)
return obj
| true
| true
|
7904f658275c4c10056aaee0ba7203c11c1eb377
| 3,747
|
py
|
Python
|
plugins/News/test.py
|
joulez/Limnoria
|
aa89a2dd72ec6f593df4c5c281d915af456d5614
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T21:53:47.000Z
|
2020-04-01T21:53:47.000Z
|
plugins/News/test.py
|
joulez/Limnoria
|
aa89a2dd72ec6f593df4c5c281d915af456d5614
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/News/test.py
|
joulez/Limnoria
|
aa89a2dd72ec6f593df4c5c281d915af456d5614
|
[
"BSD-3-Clause"
] | null | null | null |
###
# Copyright (c) 2003-2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import time
from supybot.test import *
class NewsTestCase(ChannelPluginTestCase):
plugins = ('News','User')
def setUp(self):
ChannelPluginTestCase.setUp(self)
# Create a valid user to use
self.prefix = 'news!bar@baz'
self.irc.feedMsg(ircmsgs.privmsg(self.nick, 'register tester moo',
prefix=self.prefix))
m = self.irc.takeMsg() # Response to register.
def testAddnews(self):
self.assertNotError('add 0 subject: foo')
self.assertRegexp('news', 'subject')
self.assertNotError('add 0 subject2: foo2')
self.assertRegexp('news', 'subject.*subject2')
self.assertNotError('add 5 subject3: foo3')
self.assertRegexp('news', 'subject3')
print()
print('Sleeping to expire the news item (testAddnews)')
time.sleep(6)
print('Done sleeping.')
self.assertNotRegexp('news', 'subject3')
def testNews(self):
# These should both fail first, as they will have nothing in the DB
self.assertRegexp('news', 'no news')
self.assertRegexp('news #channel', 'no news')
# Now we'll add news and make sure listnews doesn't fail
self.assertNotError('add #channel 0 subject: foo')
self.assertNotError('news #channel')
self.assertNotError('add 0 subject: foo')
self.assertRegexp('news', '#1')
self.assertNotError('news 1')
def testChangenews(self):
self.assertNotError('add 0 Foo: bar')
self.assertNotError('change 1 s/bar/baz/')
self.assertNotRegexp('news 1', 'bar')
self.assertRegexp('news 1', 'baz')
def testOldnews(self):
self.assertRegexp('old', 'No old news')
self.assertNotError('add 0 a: b')
self.assertRegexp('old', 'No old news')
self.assertNotError('add 5 foo: bar')
self.assertRegexp('old', 'No old news')
print()
print('Sleeping to expire the news item (testOldnews)')
time.sleep(6)
print('Done sleeping.')
self.assertNotError('old')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 42.579545
| 79
| 0.681345
|
mport time
from supybot.test import *
class NewsTestCase(ChannelPluginTestCase):
plugins = ('News','User')
def setUp(self):
ChannelPluginTestCase.setUp(self)
self.prefix = 'news!bar@baz'
self.irc.feedMsg(ircmsgs.privmsg(self.nick, 'register tester moo',
prefix=self.prefix))
m = self.irc.takeMsg()
def testAddnews(self):
self.assertNotError('add 0 subject: foo')
self.assertRegexp('news', 'subject')
self.assertNotError('add 0 subject2: foo2')
self.assertRegexp('news', 'subject.*subject2')
self.assertNotError('add 5 subject3: foo3')
self.assertRegexp('news', 'subject3')
print()
print('Sleeping to expire the news item (testAddnews)')
time.sleep(6)
print('Done sleeping.')
self.assertNotRegexp('news', 'subject3')
def testNews(self):
self.assertRegexp('news', 'no news')
self.assertRegexp('news #channel', 'no news')
self.assertNotError('add #channel 0 subject: foo')
self.assertNotError('news #channel')
self.assertNotError('add 0 subject: foo')
self.assertRegexp('news', '#1')
self.assertNotError('news 1')
def testChangenews(self):
self.assertNotError('add 0 Foo: bar')
self.assertNotError('change 1 s/bar/baz/')
self.assertNotRegexp('news 1', 'bar')
self.assertRegexp('news 1', 'baz')
def testOldnews(self):
self.assertRegexp('old', 'No old news')
self.assertNotError('add 0 a: b')
self.assertRegexp('old', 'No old news')
self.assertNotError('add 5 foo: bar')
self.assertRegexp('old', 'No old news')
print()
print('Sleeping to expire the news item (testOldnews)')
time.sleep(6)
print('Done sleeping.')
self.assertNotError('old')
| true
| true
|
7904f68638bcd0ad360d246513846cbd1b8f1b3e
| 24,872
|
py
|
Python
|
bin/kookaburra.py
|
simonsobs/lyrebird
|
027ca633876860c492270983c3880a7d4b87f14b
|
[
"BSD-2-Clause"
] | null | null | null |
bin/kookaburra.py
|
simonsobs/lyrebird
|
027ca633876860c492270983c3880a7d4b87f14b
|
[
"BSD-2-Clause"
] | 1
|
2021-02-04T03:16:43.000Z
|
2021-02-04T16:43:09.000Z
|
bin/kookaburra.py
|
simonsobs/lyrebird
|
027ca633876860c492270983c3880a7d4b87f14b
|
[
"BSD-2-Clause"
] | 1
|
2019-03-19T01:27:11.000Z
|
2019-03-19T01:27:11.000Z
|
#!/usr/bin/env python
import numpy as np
import socket, curses, json, traceback, math, argparse, math, sys, os, stat
from operator import itemgetter, attrgetter
from configutils.dfmux_config_constructor import get_physical_id, sq_phys_id_to_info
from configutils.dfmux_config_constructor import uniquifyList, generate_dfmux_lyrebird_config
#from spt3g.util import genericutils as GU # not in the public S4 repo
from spt3g import core, dfmux, calibration
from functools import cmp_to_key
import signal
import warnings
warnings.filterwarnings("ignore")
def split_on_numbers(s):
'''
Splits the string into a list where the numbers and the characters between numbers are each element
Copied from spt3g_software to fix dependencies (sorry)
'''
prevDig = False
outList = []
for char in s:
if char.isdigit():
if prevDig:
outList[-1] += char
else:
prevDig = True
outList.append(char)
else:
if not prevDig and len(outList)>0:
outList[-1] += char
else:
prevDig = False
outList.append(char)
return outList
def str_cmp_with_numbers_sorted(str1, str2):
'''
Compares two strings where numbers are sorted according to value, so Sq12 ends up after Sq8, use in sorted function
Copied from spt3g_software to fix dependencies (sorry)
'''
if str1==str2:
return 0
split1 = split_on_numbers(str1)
split2 = split_on_numbers(str2)
largestStr = 0
for l in [split1, split2]:
for s in l:
if s[0].isdigit():
largestStr = len(s) if len(s) > largestStr else largestStr
for l in [split1, split2]:
for i in range(len(l)):
if l[i][0].isdigit():
l[i] = '0'*(largestStr-len(l[i])) +l[i]
p1 = reduce(lambda x,y: x+y, split1)
p2 = reduce(lambda x,y: x+y, split2)
return -1 if p1<p2 else 1
@core.cache_frame_data(type = core.G3FrameType.Housekeeping, wiring_map = 'WiringMap',
tf = 'DfMuxTransferFunction', system = 'ReadoutSystem')
def AddVbiasAndCurrentConv(frame, wiring_map):
hk_map = frame['DfMuxHousekeeping']
v_bias = core.G3MapDouble()
i_conv = core.G3MapDouble()
for k in wiring_map.keys():
vb = dfmux.unittransforms.bolo_bias_voltage_rms(wiring_map, hk_map,
bolo = k, tf = tf, system = system) / core.G3Units.V
ic = dfmux.unittransforms.counts_to_rms_amps(wiring_map, hk_map,
bolo = k, tf = tf, system = system) / core.G3Units.amp
v_bias[k] = vb
i_conv[k] = ic
frame['VoltageBias'] = v_bias
frame['CurrentConv'] = i_conv
def make_square_block(n_things):
sq = n_things**0.5
if n_things == int(math.floor(sq))**2:
return (sq,sq)
else:
sq = int(math.floor(sq))
return (sq, sq+1)
def write_get_hk_script(fn, hostname, port):
script = '''#!/bin/bash
nc -w 1 %s %d
''' % (hostname, port)
f = open(fn, 'w')
f.write(script)
f.close()
st = os.stat(fn)
os.chmod(fn, st.st_mode | stat.S_IXUSR)
class BoloPropertiesFaker(object):
def __init__(self):
self.wiring_map = None
self.bolo_props = None
self.sent_off = False
self.default_tf = 'spt3g_filtering_2017_full'
return
def __call__(self, frame):
if 'DfMuxTransferFunction' in frame:
self.default_tf = frame['DfMuxTransferFunction']
if frame.type == core.G3FrameType.Wiring:
self.wiring_map = frame['WiringMap']
return self.send_off(frame)
elif frame.type == core.G3FrameType.Calibration:
if 'BolometerProperties' in frame:
self.bolo_props = frame['BolometerProperties']
elif 'NominalBolometerProperties' in frame:
self.bolo_props = frame['NominalBolometerProperties']
def send_off(self, frame):
if not self.wiring_map is None and self.bolo_props is None:
#faking the frame data
self.bolo_props = calibration.BolometerPropertiesMap()
n_chans = 0
squids = {}
for k in self.wiring_map.keys():
wm = self.wiring_map[k]
c = wm.channel + 1
if c > n_chans:
n_chans = c
sq = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1)
squids[sq] = 1
n_squids = len(squids.keys())
sq_layout = make_square_block(n_squids)
ch_layout = make_square_block(n_chans)
sq_x_sep = ch_layout[0] + 1
sq_y_sep = ch_layout[1] + 1
ch_x_sep = 1
ch_y_sep = 1
for i, sq in enumerate( sorted(squids.keys()) ):
x = i % sq_layout[0]
y = i // sq_layout[0]
squids[sq] = (1.2 * x * ch_layout[0], 1.2* y * ch_layout[1])
#need nsquids
#need nbolos per squid
for k in self.wiring_map.keys():
wm = self.wiring_map[k]
sq_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1)
w_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot)
sql = squids[sq_id]
x = sql[0] + ((wm.channel) % ch_layout[0]) * ch_x_sep
y = sql[1] + ((wm.channel) // ch_layout[0]) * ch_y_sep
bp = calibration.BolometerProperties()
bp.physical_name = k
bp.band = 0
bp.pol_angle = 0
bp.pol_efficiency = 0
bp.wafer_id = w_id
bp.squid_id = sq_id
bp.x_offset = float(x)
bp.y_offset = float(y)
self.bolo_props[k] = bp
out_frame = core.G3Frame(core.G3FrameType.Calibration)
out_frame['BolometerProperties'] = self.bolo_props
out_frame['DfMuxTransferFunction'] = self.default_tf
return [out_frame, frame]
else:
return frame
class BirdConfigGenerator(object):
def __init__(self,
lyrebird_output_file = '',
get_hk_script_name= '',
hostname = '', hk_hostname = '',
port = 3, hk_port = 3, get_hk_port = 3,
dv_buffer_size = 0, min_max_update_interval = 0,
rendering_sub_sampling = 1, max_framerate = 0,
mean_decay_factor = 0.01
):
self.l_fn = lyrebird_output_file
self.get_hk_script_name = get_hk_script_name
self.is_written = False
self.bolo_props = None
self.wiring_map = None
self.hostname = hostname
self.hk_hostname = hk_hostname
self.port = port
self.hk_port = hk_port
self.get_hk_port = get_hk_port
self.dv_buffer_size = dv_buffer_size
self.min_max_update_interval = min_max_update_interval
self.rendering_sub_sampling = rendering_sub_sampling
self.max_framerate = max_framerate
self.mean_decay_factor = mean_decay_factor
def __call__(self, frame):
if frame.type == core.G3FrameType.Calibration:
if 'BolometerProperties' in frame:
bp_id = 'BolometerProperties'
elif 'NominalBolometerProperties' in frame:
bp_id = 'NominalBolometerProperties'
else:
raise RuntimeError("BolometerProperties fucked")
self.bolo_props = frame[bp_id]
self.write_config()
elif frame.type == core.G3FrameType.Wiring:
self.wiring_map = frame['WiringMap']
self.write_config()
def write_config(self):
if self.wiring_map is None or self.bolo_props is None:
return
config_dic = generate_dfmux_lyrebird_config(
self.l_fn,
self.wiring_map, self.bolo_props,
hostname = self.hostname,
hk_hostname = self.hk_hostname,
port = self.port,
hk_port = self.hk_port,
control_host = self.hostname,
gcp_get_hk_port = self.get_hk_port,
dv_buffer_size = self.dv_buffer_size,
min_max_update_interval = self.min_max_update_interval,
sub_sampling = self.rendering_sub_sampling,
max_framerate = self.max_framerate,
mean_decay_factor = self.mean_decay_factor
)
write_get_hk_script(self.get_hk_script_name,
self.hostname, self.get_hk_port)
print("Done writing config file")
class IdSerialMapper(object):
def __init__(self, wiring_map):
self.mp = {}
self.mp_inv = {}
for k in wiring_map.keys():
wm = wiring_map[k]
board_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot)
self.mp[ wm.board_serial ] = board_id
self.mp_inv[board_id] = wm.board_serial
def get_id(self, serial):
return self.mp[serial]
def get_serial(self, id):
return self.mp_inv[id]
###########################
## Squid display portion ##
###########################
def add_timestamp_info(screen, y, x, ts, col_index):
s = ts.Description()
screen.addstr(y, x, s[:s.rfind('.')], curses.color_pair(col_index))
#need screen geometry and squid list and squid mapping
def add_squid_info(screen, y, x,
sq_label, sq_label_size,
carrier_good, nuller_good, demod_good,
temperature_good,
voltage_good,
max_size,
bolometer_good,
fir_stage,
#routing_good,
feedback_on,
bolo_label = '',
neutral_c = 3, good_c = 2, bad_c = 1):
col_map = {True: curses.color_pair(good_c),
False: curses.color_pair(bad_c) }
current_index = x
screen.addstr(y, current_index, sq_label, curses.color_pair(neutral_c))
current_index += sq_label_size
screen.addstr(y, current_index, 'C', col_map[carrier_good])
current_index += 1
screen.addstr(y, current_index, 'N', col_map[nuller_good])
current_index += 1
screen.addstr(y, current_index, 'D', col_map[demod_good])
current_index += 1
screen.addstr(y, current_index, 'T', col_map[temperature_good])
current_index += 1
screen.addstr(y, current_index, 'V', col_map[voltage_good])
current_index += 1
screen.addstr(y, current_index, '%d'%fir_stage, col_map[fir_stage == 6])
current_index += 1
#screen.addstr(y, current_index, 'R', col_map[routing_good])
#current_index += 1
screen.addstr(y, current_index, 'F', col_map[feedback_on])
current_index += 1
if (not bolometer_good):
screen.addstr(y,
current_index, ' '+bolo_label[:(max_size - 7 - sq_label_size )],
col_map[False])
def load_squid_info_from_hk( screen, y, x,
hk_map,
sq_dev_id, sq_label, sq_label_size,
max_size, serial_mapper):
carrier_good = False
nuller_good = False
demod_good = False
temp_good = False
volt_good = False
bolometer_good = False
full_label = 'NoData'
fir_stage = 0
routing_good = False
feedback_on = False
board_id, mezz_num, module_num = sq_phys_id_to_info(sq_dev_id)
board_serial = serial_mapper.get_serial(board_id)
#code for loading hk info for display
if (not hk_map is None) and board_serial in hk_map:
board_info = hk_map[board_serial]
mezz_info = hk_map[board_serial].mezz[mezz_num]
module_info = hk_map[board_serial].mezz[mezz_num].modules[module_num]
fir_stage = int(board_info.fir_stage)
routing_good = module_info.routing_type.lower() == 'routing_nul'
feedback_on = module_info.squid_feedback.lower() == 'squid_lowpass'
carrier_good = not module_info.carrier_railed
nuller_good = not module_info.nuller_railed
demod_good = not module_info.demod_railed
def dic_range_check(dr, dv):
for k in dv.keys():
if (not k in dr):
continue
rng = dr[k]
v = dv[k]
if v < rng[0] or v > rng[1]:
return False
return True
voltage_range = {'MOTHERBOARD_RAIL_VCC5V5': (5,6),
'MOTHERBOARD_RAIL_VADJ': (2,3),
'MOTHERBOARD_RAIL_VCC3V3': (3,3.6),
'MOTHERBOARD_RAIL_VCC1V0': (0.8, 1.2),
'MOTHERBOARD_RAIL_VCC1V2': (1, 1.5),
'MOTHERBOARD_RAIL_VCC12V0': (11, 13),
'MOTHERBOARD_RAIL_VCC1V8': (1.6, 2),
'MOTHERBOARD_RAIL_VCC1V5': (1.3, 1.7),
'MOTHERBOARD_RAIL_VCC1V0_GTX': (0.7, 1.3)}
temp_range = {'MOTHERBOARD_TEMPERATURE_FPGA': (0,80),
'MOTHERBOARD_TEMPERATURE_POWER': (0,80),
'MOTHERBOARD_TEMPERATURE_ARM': (0,80),
'MOTHERBOARD_TEMPERATURE_PHY': (0,80)}
#mezz voltages
mezz_voltage_range = {'MEZZANINE_RAIL_VCC12V0': (11,13),
'MEZZANINE_RAIL_VADJ': (2,3),
'MEZZANINE_RAIL_VCC3V3': (3,4) }
temp_good = dic_range_check( temp_range, board_info.temperatures)
volt_good = ( dic_range_check( voltage_range, board_info.voltages) or
dic_range_check( mezz_voltage_range, mezz_info.voltages)
)
bolometer_good = True
bolo_label = ''
n_railed = 0
n_diff_freq = 0
n_dan_off = 0
for b in module_info.channels.keys():
chinfo = module_info.channels[b]
if (chinfo.dan_railed):
n_railed += 1
elif (chinfo.carrier_frequency != chinfo.demod_frequency):
n_diff_freq += 1
elif ( (not (chinfo.dan_accumulator_enable and
chinfo.dan_feedback_enable and
chinfo.dan_streaming_enable ) )
and (chinfo.carrier_frequency > 0 and chinfo.carrier_amplitude > 0) ):
n_dan_off += 1
bolometer_good = not (n_railed or n_diff_freq or n_dan_off)
if not bolometer_good:
if n_railed:
full_label = "DanRail:%s"%(n_railed)
elif n_diff_freq:
full_label = "CDDiffFreq:%s"%(n_diff_freq)
elif n_dan_off:
full_label = "DanOff:%s"%(n_dan_off)
else:
full_label = ''
add_squid_info(screen, y, x,
sq_label, sq_label_size,
carrier_good, nuller_good, demod_good,
temp_good, volt_good,
max_size,
bolometer_good,
fir_stage,
#routing_good,
feedback_on,
bolo_label = full_label,
)
def GetHousekeepingMessenger(frame, hostname, port):
if frame.type == core.G3FrameType.Wiring:
os.system( "nc %s %d" % (hostname, port) )
class SquidDisplay(object):
def __init__(self,
squids_per_col = 32,
squid_col_width = 30):
self.squids_list = None
self.squids_per_col = squids_per_col
self.squid_col_width = squid_col_width
self.serial_mapper = None
self.str_id_lst = [" Carrier",
" Nuller",
" Demod",
" Temp",
" Voltage",
" fir#",
" squid Feedback"
]
self.highlight_index = [7 for s in self.str_id_lst]
def init_squids(self, squids_list) :
self.n_squids = len(squids_list) + len(self.str_id_lst) + 1
self.squids_list = squids_list
self.sq_label_size = max(map(len, squids_list)) + 3
ncols = int(math.ceil(float(self.n_squids)/self.squids_per_col))
self.screen_size_x = ncols * self.squid_col_width
self.screen_size_y = self.squids_per_col + 2
self.pos_map = {}
#assign an x, y location to each squid
for j, sq in enumerate(sorted(squids_list, key=cmp_to_key(str_cmp_with_numbers_sorted))):
i = j + len(self.str_id_lst) + 1
y = i % self.squids_per_col + 1
x = 1 + self.squid_col_width * ( i // self.squids_per_col)
self.pos_map[sq] = (x,y)
self.stdscr = curses.initscr()
curses.start_color()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_BLUE, curses.COLOR_WHITE)
self.stdscr.clear()
signal.signal(signal.SIGWINCH, signal.SIG_IGN)
def __call__(self, frame):
if frame.type == core.G3FrameType.Wiring:
wiring_map = frame['WiringMap']
squid_ids = []
for k in wiring_map.keys():
wm = wiring_map[k]
squid_ids.append( get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1) )
squid_ids = uniquifyList(squid_ids)
self.init_squids(squid_ids)
self.serial_mapper = IdSerialMapper(frame['WiringMap'])
elif frame.type == core.G3FrameType.Housekeeping:
if self.squids_list is None:
return
#do update
if not frame is None:
hk_data = frame['DfMuxHousekeeping']
else:
hk_data = None
self.stdscr.clear()
y, x = self.stdscr.getmaxyx()
if y < self.screen_size_y or x < self.screen_size_x:
screen = self.stdscr.subwin(0, x, 0, 0)
screen.addstr(0,0, 'Terminal is too small %d %d'%(y,x), curses.color_pair(1))
screen.refresh()
return
screen = self.stdscr.subwin(0, self.screen_size_x, 0, 0)
screen.clear()
#screen.box()
#CNDTV6F
if not hk_data is None:
add_timestamp_info(screen, 0,2, hk_data[hk_data.keys()[0]].timestamp, 5)
for i, s in enumerate(self.str_id_lst):
offset = 4
screen.addstr(i+1, offset, s, curses.color_pair(2))
screen.addstr(i+1, offset + self.highlight_index[i],
s[self.highlight_index[i]], curses.color_pair(3))
screen.hline(len(self.str_id_lst) + 1, 0,
'-', self.squid_col_width)
screen.vline(0, self.squid_col_width-1,
'|', len(self.str_id_lst)+1)
for i, s in enumerate(self.squids_list):
p = self.pos_map[s]
load_squid_info_from_hk( screen, p[1], p[0],
hk_data,
s, s, self.sq_label_size,
self.squid_col_width, self.serial_mapper)
screen.refresh()
elif frame.type == core.G3FrameType.EndProcessing:
if not self.squids_list is None:
self.stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('hostname')
parser.add_argument('--port',type=int, default=8675)
parser.add_argument('--local_ts_port',type=int, default=8676)
parser.add_argument('--local_hk_port',type=int, default=8677)
parser.add_argument('--gcp_signalled_hk_port', type=int, default=50011)
parser.add_argument('--lyrebird_output_file', default = 'lyrebird_config_file.json')
parser.add_argument('--get_hk_script', default = 'get_hk.sh')
parser.add_argument('--timestream_buffer_size',type=int, default=1024)
parser.add_argument('--min_max_update_interval', type=int, default = 300)
parser.add_argument('--rendering_sub_sampling', type=int, default = 2)
parser.add_argument('--max_framerate', type=int, default = 60)
parser.add_argument("--mean_decay_factor", type = float, default = 0.01,
help = "The mean filtered power has an exponential convolution form to the filter. It has a value in (0,1) exclusive. Increasing the value decreases the size of the exponential to it pushes the frequency of the HPF lower. Numbers close to one filter things very rapidly, close to 0 very slowly.")
parser.add_argument('--debug_mode', action='store_true', help = "prevents the spawning on the curses display")
parser.add_argument('--debug_logs', action='store_true', help = "store logs of stderr/out")
parser.add_argument('--ignore_nominal_bias_props', action='store_true', help = "will align the bolometers into a grid")
args = parser.parse_args()
#core.set_log_level(core.G3LogLevel.LOG_DEBUG)
script_path = os.path.dirname(os.path.realpath(__file__))
script_path = script_path + '/../bin/'
lyrebird_output_file = script_path + args.lyrebird_output_file
get_hk_script = script_path + args.get_hk_script
pipe = core.G3Pipeline()
pipe.Add(core.G3NetworkReceiver,
hostname = args.hostname, port = args.port)
if args.ignore_nominal_bias_props:
pipe.Add(lambda fr: fr.type != core.G3FrameType.Calibration)
pipe.Add(BoloPropertiesFaker)
pipe.Add(AddVbiasAndCurrentConv)
pipe.Add(BirdConfigGenerator,
lyrebird_output_file = lyrebird_output_file,
hostname = args.hostname,
get_hk_script_name = get_hk_script,
hk_hostname = '127.0.0.1',
port = args.local_ts_port,
hk_port = args.local_hk_port,
get_hk_port = args.gcp_signalled_hk_port,
dv_buffer_size = args.timestream_buffer_size,
min_max_update_interval = args.min_max_update_interval,
rendering_sub_sampling = args.rendering_sub_sampling,
max_framerate = args.max_framerate,
mean_decay_factor = args.mean_decay_factor
)
pipe.Add(GetHousekeepingMessenger, hostname = args.hostname,
port = args.gcp_signalled_hk_port)
pipe.Add(core.G3ThrottledNetworkSender,
hostname = '*',
port = args.local_hk_port,
frame_decimation = {core.G3FrameType.Timepoint: 10}
)
pipe.Add(core.G3ThrottledNetworkSender,
hostname = '*',
port = args.local_ts_port,
frame_decimation = {core.G3FrameType.Housekeeping: 0}
)
if args.debug_logs:
import sys
sys.stderr = open('kookaburra_stderr.txt', 'w')
sys.stdout = open('kookaburra_stdout.txt', 'w')
if args.debug_mode:
pipe.Add(core.Dump)
pipe.Run()
else:
pipe.Add(SquidDisplay)
try:
pipe.Run()
finally:
traceback.print_exc() # Print the exception
curses.curs_set(1)
curses.echo()
curses.nocbreak()
curses.endwin()
| 37.401504
| 323
| 0.56051
|
import numpy as np
import socket, curses, json, traceback, math, argparse, math, sys, os, stat
from operator import itemgetter, attrgetter
from configutils.dfmux_config_constructor import get_physical_id, sq_phys_id_to_info
from configutils.dfmux_config_constructor import uniquifyList, generate_dfmux_lyrebird_config
mux, calibration
from functools import cmp_to_key
import signal
import warnings
warnings.filterwarnings("ignore")
def split_on_numbers(s):
prevDig = False
outList = []
for char in s:
if char.isdigit():
if prevDig:
outList[-1] += char
else:
prevDig = True
outList.append(char)
else:
if not prevDig and len(outList)>0:
outList[-1] += char
else:
prevDig = False
outList.append(char)
return outList
def str_cmp_with_numbers_sorted(str1, str2):
if str1==str2:
return 0
split1 = split_on_numbers(str1)
split2 = split_on_numbers(str2)
largestStr = 0
for l in [split1, split2]:
for s in l:
if s[0].isdigit():
largestStr = len(s) if len(s) > largestStr else largestStr
for l in [split1, split2]:
for i in range(len(l)):
if l[i][0].isdigit():
l[i] = '0'*(largestStr-len(l[i])) +l[i]
p1 = reduce(lambda x,y: x+y, split1)
p2 = reduce(lambda x,y: x+y, split2)
return -1 if p1<p2 else 1
@core.cache_frame_data(type = core.G3FrameType.Housekeeping, wiring_map = 'WiringMap',
tf = 'DfMuxTransferFunction', system = 'ReadoutSystem')
def AddVbiasAndCurrentConv(frame, wiring_map):
hk_map = frame['DfMuxHousekeeping']
v_bias = core.G3MapDouble()
i_conv = core.G3MapDouble()
for k in wiring_map.keys():
vb = dfmux.unittransforms.bolo_bias_voltage_rms(wiring_map, hk_map,
bolo = k, tf = tf, system = system) / core.G3Units.V
ic = dfmux.unittransforms.counts_to_rms_amps(wiring_map, hk_map,
bolo = k, tf = tf, system = system) / core.G3Units.amp
v_bias[k] = vb
i_conv[k] = ic
frame['VoltageBias'] = v_bias
frame['CurrentConv'] = i_conv
def make_square_block(n_things):
sq = n_things**0.5
if n_things == int(math.floor(sq))**2:
return (sq,sq)
else:
sq = int(math.floor(sq))
return (sq, sq+1)
def write_get_hk_script(fn, hostname, port):
script = '''#!/bin/bash
nc -w 1 %s %d
''' % (hostname, port)
f = open(fn, 'w')
f.write(script)
f.close()
st = os.stat(fn)
os.chmod(fn, st.st_mode | stat.S_IXUSR)
class BoloPropertiesFaker(object):
def __init__(self):
self.wiring_map = None
self.bolo_props = None
self.sent_off = False
self.default_tf = 'spt3g_filtering_2017_full'
return
def __call__(self, frame):
if 'DfMuxTransferFunction' in frame:
self.default_tf = frame['DfMuxTransferFunction']
if frame.type == core.G3FrameType.Wiring:
self.wiring_map = frame['WiringMap']
return self.send_off(frame)
elif frame.type == core.G3FrameType.Calibration:
if 'BolometerProperties' in frame:
self.bolo_props = frame['BolometerProperties']
elif 'NominalBolometerProperties' in frame:
self.bolo_props = frame['NominalBolometerProperties']
def send_off(self, frame):
if not self.wiring_map is None and self.bolo_props is None:
self.bolo_props = calibration.BolometerPropertiesMap()
n_chans = 0
squids = {}
for k in self.wiring_map.keys():
wm = self.wiring_map[k]
c = wm.channel + 1
if c > n_chans:
n_chans = c
sq = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1)
squids[sq] = 1
n_squids = len(squids.keys())
sq_layout = make_square_block(n_squids)
ch_layout = make_square_block(n_chans)
sq_x_sep = ch_layout[0] + 1
sq_y_sep = ch_layout[1] + 1
ch_x_sep = 1
ch_y_sep = 1
for i, sq in enumerate( sorted(squids.keys()) ):
x = i % sq_layout[0]
y = i // sq_layout[0]
squids[sq] = (1.2 * x * ch_layout[0], 1.2* y * ch_layout[1])
for k in self.wiring_map.keys():
wm = self.wiring_map[k]
sq_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1)
w_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot)
sql = squids[sq_id]
x = sql[0] + ((wm.channel) % ch_layout[0]) * ch_x_sep
y = sql[1] + ((wm.channel) // ch_layout[0]) * ch_y_sep
bp = calibration.BolometerProperties()
bp.physical_name = k
bp.band = 0
bp.pol_angle = 0
bp.pol_efficiency = 0
bp.wafer_id = w_id
bp.squid_id = sq_id
bp.x_offset = float(x)
bp.y_offset = float(y)
self.bolo_props[k] = bp
out_frame = core.G3Frame(core.G3FrameType.Calibration)
out_frame['BolometerProperties'] = self.bolo_props
out_frame['DfMuxTransferFunction'] = self.default_tf
return [out_frame, frame]
else:
return frame
class BirdConfigGenerator(object):
def __init__(self,
lyrebird_output_file = '',
get_hk_script_name= '',
hostname = '', hk_hostname = '',
port = 3, hk_port = 3, get_hk_port = 3,
dv_buffer_size = 0, min_max_update_interval = 0,
rendering_sub_sampling = 1, max_framerate = 0,
mean_decay_factor = 0.01
):
self.l_fn = lyrebird_output_file
self.get_hk_script_name = get_hk_script_name
self.is_written = False
self.bolo_props = None
self.wiring_map = None
self.hostname = hostname
self.hk_hostname = hk_hostname
self.port = port
self.hk_port = hk_port
self.get_hk_port = get_hk_port
self.dv_buffer_size = dv_buffer_size
self.min_max_update_interval = min_max_update_interval
self.rendering_sub_sampling = rendering_sub_sampling
self.max_framerate = max_framerate
self.mean_decay_factor = mean_decay_factor
def __call__(self, frame):
if frame.type == core.G3FrameType.Calibration:
if 'BolometerProperties' in frame:
bp_id = 'BolometerProperties'
elif 'NominalBolometerProperties' in frame:
bp_id = 'NominalBolometerProperties'
else:
raise RuntimeError("BolometerProperties fucked")
self.bolo_props = frame[bp_id]
self.write_config()
elif frame.type == core.G3FrameType.Wiring:
self.wiring_map = frame['WiringMap']
self.write_config()
def write_config(self):
if self.wiring_map is None or self.bolo_props is None:
return
config_dic = generate_dfmux_lyrebird_config(
self.l_fn,
self.wiring_map, self.bolo_props,
hostname = self.hostname,
hk_hostname = self.hk_hostname,
port = self.port,
hk_port = self.hk_port,
control_host = self.hostname,
gcp_get_hk_port = self.get_hk_port,
dv_buffer_size = self.dv_buffer_size,
min_max_update_interval = self.min_max_update_interval,
sub_sampling = self.rendering_sub_sampling,
max_framerate = self.max_framerate,
mean_decay_factor = self.mean_decay_factor
)
write_get_hk_script(self.get_hk_script_name,
self.hostname, self.get_hk_port)
print("Done writing config file")
class IdSerialMapper(object):
def __init__(self, wiring_map):
self.mp = {}
self.mp_inv = {}
for k in wiring_map.keys():
wm = wiring_map[k]
board_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot)
self.mp[ wm.board_serial ] = board_id
self.mp_inv[board_id] = wm.board_serial
def get_id(self, serial):
return self.mp[serial]
def get_serial(self, id):
return self.mp_inv[id]
reen.addstr(y, current_index, sq_label, curses.color_pair(neutral_c))
current_index += sq_label_size
screen.addstr(y, current_index, 'C', col_map[carrier_good])
current_index += 1
screen.addstr(y, current_index, 'N', col_map[nuller_good])
current_index += 1
screen.addstr(y, current_index, 'D', col_map[demod_good])
current_index += 1
screen.addstr(y, current_index, 'T', col_map[temperature_good])
current_index += 1
screen.addstr(y, current_index, 'V', col_map[voltage_good])
current_index += 1
screen.addstr(y, current_index, '%d'%fir_stage, col_map[fir_stage == 6])
current_index += 1
screen.addstr(y, current_index, 'F', col_map[feedback_on])
current_index += 1
if (not bolometer_good):
screen.addstr(y,
current_index, ' '+bolo_label[:(max_size - 7 - sq_label_size )],
col_map[False])
def load_squid_info_from_hk( screen, y, x,
hk_map,
sq_dev_id, sq_label, sq_label_size,
max_size, serial_mapper):
carrier_good = False
nuller_good = False
demod_good = False
temp_good = False
volt_good = False
bolometer_good = False
full_label = 'NoData'
fir_stage = 0
routing_good = False
feedback_on = False
board_id, mezz_num, module_num = sq_phys_id_to_info(sq_dev_id)
board_serial = serial_mapper.get_serial(board_id)
if (not hk_map is None) and board_serial in hk_map:
board_info = hk_map[board_serial]
mezz_info = hk_map[board_serial].mezz[mezz_num]
module_info = hk_map[board_serial].mezz[mezz_num].modules[module_num]
fir_stage = int(board_info.fir_stage)
routing_good = module_info.routing_type.lower() == 'routing_nul'
feedback_on = module_info.squid_feedback.lower() == 'squid_lowpass'
carrier_good = not module_info.carrier_railed
nuller_good = not module_info.nuller_railed
demod_good = not module_info.demod_railed
def dic_range_check(dr, dv):
for k in dv.keys():
if (not k in dr):
continue
rng = dr[k]
v = dv[k]
if v < rng[0] or v > rng[1]:
return False
return True
voltage_range = {'MOTHERBOARD_RAIL_VCC5V5': (5,6),
'MOTHERBOARD_RAIL_VADJ': (2,3),
'MOTHERBOARD_RAIL_VCC3V3': (3,3.6),
'MOTHERBOARD_RAIL_VCC1V0': (0.8, 1.2),
'MOTHERBOARD_RAIL_VCC1V2': (1, 1.5),
'MOTHERBOARD_RAIL_VCC12V0': (11, 13),
'MOTHERBOARD_RAIL_VCC1V8': (1.6, 2),
'MOTHERBOARD_RAIL_VCC1V5': (1.3, 1.7),
'MOTHERBOARD_RAIL_VCC1V0_GTX': (0.7, 1.3)}
temp_range = {'MOTHERBOARD_TEMPERATURE_FPGA': (0,80),
'MOTHERBOARD_TEMPERATURE_POWER': (0,80),
'MOTHERBOARD_TEMPERATURE_ARM': (0,80),
'MOTHERBOARD_TEMPERATURE_PHY': (0,80)}
mezz_voltage_range = {'MEZZANINE_RAIL_VCC12V0': (11,13),
'MEZZANINE_RAIL_VADJ': (2,3),
'MEZZANINE_RAIL_VCC3V3': (3,4) }
temp_good = dic_range_check( temp_range, board_info.temperatures)
volt_good = ( dic_range_check( voltage_range, board_info.voltages) or
dic_range_check( mezz_voltage_range, mezz_info.voltages)
)
bolometer_good = True
bolo_label = ''
n_railed = 0
n_diff_freq = 0
n_dan_off = 0
for b in module_info.channels.keys():
chinfo = module_info.channels[b]
if (chinfo.dan_railed):
n_railed += 1
elif (chinfo.carrier_frequency != chinfo.demod_frequency):
n_diff_freq += 1
elif ( (not (chinfo.dan_accumulator_enable and
chinfo.dan_feedback_enable and
chinfo.dan_streaming_enable ) )
and (chinfo.carrier_frequency > 0 and chinfo.carrier_amplitude > 0) ):
n_dan_off += 1
bolometer_good = not (n_railed or n_diff_freq or n_dan_off)
if not bolometer_good:
if n_railed:
full_label = "DanRail:%s"%(n_railed)
elif n_diff_freq:
full_label = "CDDiffFreq:%s"%(n_diff_freq)
elif n_dan_off:
full_label = "DanOff:%s"%(n_dan_off)
else:
full_label = ''
add_squid_info(screen, y, x,
sq_label, sq_label_size,
carrier_good, nuller_good, demod_good,
temp_good, volt_good,
max_size,
bolometer_good,
fir_stage,
feedback_on,
bolo_label = full_label,
)
def GetHousekeepingMessenger(frame, hostname, port):
if frame.type == core.G3FrameType.Wiring:
os.system( "nc %s %d" % (hostname, port) )
class SquidDisplay(object):
def __init__(self,
squids_per_col = 32,
squid_col_width = 30):
self.squids_list = None
self.squids_per_col = squids_per_col
self.squid_col_width = squid_col_width
self.serial_mapper = None
self.str_id_lst = [" Carrier",
" Nuller",
" Demod",
" Temp",
" Voltage",
" fir#",
" squid Feedback"
]
self.highlight_index = [7 for s in self.str_id_lst]
def init_squids(self, squids_list) :
self.n_squids = len(squids_list) + len(self.str_id_lst) + 1
self.squids_list = squids_list
self.sq_label_size = max(map(len, squids_list)) + 3
ncols = int(math.ceil(float(self.n_squids)/self.squids_per_col))
self.screen_size_x = ncols * self.squid_col_width
self.screen_size_y = self.squids_per_col + 2
self.pos_map = {}
for j, sq in enumerate(sorted(squids_list, key=cmp_to_key(str_cmp_with_numbers_sorted))):
i = j + len(self.str_id_lst) + 1
y = i % self.squids_per_col + 1
x = 1 + self.squid_col_width * ( i // self.squids_per_col)
self.pos_map[sq] = (x,y)
self.stdscr = curses.initscr()
curses.start_color()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_BLUE, curses.COLOR_WHITE)
self.stdscr.clear()
signal.signal(signal.SIGWINCH, signal.SIG_IGN)
def __call__(self, frame):
if frame.type == core.G3FrameType.Wiring:
wiring_map = frame['WiringMap']
squid_ids = []
for k in wiring_map.keys():
wm = wiring_map[k]
squid_ids.append( get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1) )
squid_ids = uniquifyList(squid_ids)
self.init_squids(squid_ids)
self.serial_mapper = IdSerialMapper(frame['WiringMap'])
elif frame.type == core.G3FrameType.Housekeeping:
if self.squids_list is None:
return
if not frame is None:
hk_data = frame['DfMuxHousekeeping']
else:
hk_data = None
self.stdscr.clear()
y, x = self.stdscr.getmaxyx()
if y < self.screen_size_y or x < self.screen_size_x:
screen = self.stdscr.subwin(0, x, 0, 0)
screen.addstr(0,0, 'Terminal is too small %d %d'%(y,x), curses.color_pair(1))
screen.refresh()
return
screen = self.stdscr.subwin(0, self.screen_size_x, 0, 0)
screen.clear()
if not hk_data is None:
add_timestamp_info(screen, 0,2, hk_data[hk_data.keys()[0]].timestamp, 5)
for i, s in enumerate(self.str_id_lst):
offset = 4
screen.addstr(i+1, offset, s, curses.color_pair(2))
screen.addstr(i+1, offset + self.highlight_index[i],
s[self.highlight_index[i]], curses.color_pair(3))
screen.hline(len(self.str_id_lst) + 1, 0,
'-', self.squid_col_width)
screen.vline(0, self.squid_col_width-1,
'|', len(self.str_id_lst)+1)
for i, s in enumerate(self.squids_list):
p = self.pos_map[s]
load_squid_info_from_hk( screen, p[1], p[0],
hk_data,
s, s, self.sq_label_size,
self.squid_col_width, self.serial_mapper)
screen.refresh()
elif frame.type == core.G3FrameType.EndProcessing:
if not self.squids_list is None:
self.stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('hostname')
parser.add_argument('--port',type=int, default=8675)
parser.add_argument('--local_ts_port',type=int, default=8676)
parser.add_argument('--local_hk_port',type=int, default=8677)
parser.add_argument('--gcp_signalled_hk_port', type=int, default=50011)
parser.add_argument('--lyrebird_output_file', default = 'lyrebird_config_file.json')
parser.add_argument('--get_hk_script', default = 'get_hk.sh')
parser.add_argument('--timestream_buffer_size',type=int, default=1024)
parser.add_argument('--min_max_update_interval', type=int, default = 300)
parser.add_argument('--rendering_sub_sampling', type=int, default = 2)
parser.add_argument('--max_framerate', type=int, default = 60)
parser.add_argument("--mean_decay_factor", type = float, default = 0.01,
help = "The mean filtered power has an exponential convolution form to the filter. It has a value in (0,1) exclusive. Increasing the value decreases the size of the exponential to it pushes the frequency of the HPF lower. Numbers close to one filter things very rapidly, close to 0 very slowly.")
parser.add_argument('--debug_mode', action='store_true', help = "prevents the spawning on the curses display")
parser.add_argument('--debug_logs', action='store_true', help = "store logs of stderr/out")
parser.add_argument('--ignore_nominal_bias_props', action='store_true', help = "will align the bolometers into a grid")
args = parser.parse_args()
script_path = os.path.dirname(os.path.realpath(__file__))
script_path = script_path + '/../bin/'
lyrebird_output_file = script_path + args.lyrebird_output_file
get_hk_script = script_path + args.get_hk_script
pipe = core.G3Pipeline()
pipe.Add(core.G3NetworkReceiver,
hostname = args.hostname, port = args.port)
if args.ignore_nominal_bias_props:
pipe.Add(lambda fr: fr.type != core.G3FrameType.Calibration)
pipe.Add(BoloPropertiesFaker)
pipe.Add(AddVbiasAndCurrentConv)
pipe.Add(BirdConfigGenerator,
lyrebird_output_file = lyrebird_output_file,
hostname = args.hostname,
get_hk_script_name = get_hk_script,
hk_hostname = '127.0.0.1',
port = args.local_ts_port,
hk_port = args.local_hk_port,
get_hk_port = args.gcp_signalled_hk_port,
dv_buffer_size = args.timestream_buffer_size,
min_max_update_interval = args.min_max_update_interval,
rendering_sub_sampling = args.rendering_sub_sampling,
max_framerate = args.max_framerate,
mean_decay_factor = args.mean_decay_factor
)
pipe.Add(GetHousekeepingMessenger, hostname = args.hostname,
port = args.gcp_signalled_hk_port)
pipe.Add(core.G3ThrottledNetworkSender,
hostname = '*',
port = args.local_hk_port,
frame_decimation = {core.G3FrameType.Timepoint: 10}
)
pipe.Add(core.G3ThrottledNetworkSender,
hostname = '*',
port = args.local_ts_port,
frame_decimation = {core.G3FrameType.Housekeeping: 0}
)
if args.debug_logs:
import sys
sys.stderr = open('kookaburra_stderr.txt', 'w')
sys.stdout = open('kookaburra_stdout.txt', 'w')
if args.debug_mode:
pipe.Add(core.Dump)
pipe.Run()
else:
pipe.Add(SquidDisplay)
try:
pipe.Run()
finally:
traceback.print_exc()
curses.curs_set(1)
curses.echo()
curses.nocbreak()
curses.endwin()
| true
| true
|
7904f6fc827f130bd6b460661e5f206262104f46
| 5,833
|
py
|
Python
|
framework/TSA/PolynomialRegression.py
|
archmagethanos/raven
|
d727cc3da3dff5254b418fb3691a2e45deb20136
|
[
"Apache-2.0"
] | 1
|
2021-07-12T19:41:52.000Z
|
2021-07-12T19:41:52.000Z
|
framework/TSA/PolynomialRegression.py
|
archmagethanos/raven
|
d727cc3da3dff5254b418fb3691a2e45deb20136
|
[
"Apache-2.0"
] | null | null | null |
framework/TSA/PolynomialRegression.py
|
archmagethanos/raven
|
d727cc3da3dff5254b418fb3691a2e45deb20136
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Polynomial Regression
"""
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
| 36.917722
| 112
| 0.677353
|
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
@classmethod
def getInputSpecification(cls):
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def handleInput(self, spec):
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
def characterize(self, signal, pivot, targets, settings):
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
def getParamNames(self, settings):
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
| true
| true
|
7904f73a55e8a1cb1e5822e74cf6fff7c1ddfd71
| 1,133
|
py
|
Python
|
pythonmod/doc/examples/example0-1.py
|
luisdallos/unbound
|
4034c009bb8fc78299996b0a23154653ede7c30a
|
[
"BSD-3-Clause"
] | 1,751
|
2016-11-03T18:25:34.000Z
|
2022-03-30T17:43:26.000Z
|
pythonmod/doc/examples/example0-1.py
|
luisdallos/unbound
|
4034c009bb8fc78299996b0a23154653ede7c30a
|
[
"BSD-3-Clause"
] | 603
|
2017-03-03T19:51:58.000Z
|
2022-03-31T12:56:58.000Z
|
pythonmod/doc/examples/example0-1.py
|
luisdallos/unbound
|
4034c009bb8fc78299996b0a23154653ede7c30a
|
[
"BSD-3-Clause"
] | 296
|
2016-11-14T07:00:11.000Z
|
2022-03-29T00:56:58.000Z
|
def init(id, cfg):
log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, cfg.port, cfg.python_script))
return True
def init_standard(id, env):
log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, env.cfg.port, env.cfg.python_script))
return True
def deinit(id):
log_info("pythonmod: deinit called, module id is %d" % id)
return True
def inform_super(id, qstate, superqstate, qdata):
return True
def operate(id, event, qstate, qdata):
log_info("pythonmod: operate called, id: %d, event:%s" % (id, strmodulevent(event)))
if event == MODULE_EVENT_NEW:
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
if event == MODULE_EVENT_MODDONE:
log_info("pythonmod: module we are waiting for is done")
qstate.ext_state[id] = MODULE_FINISHED
return True
if event == MODULE_EVENT_PASS:
log_info("pythonmod: event_pass")
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
log_err("pythonmod: BAD event")
qstate.ext_state[id] = MODULE_ERROR
return True
log_info("pythonmod: script loaded.")
| 29.815789
| 118
| 0.68579
|
def init(id, cfg):
log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, cfg.port, cfg.python_script))
return True
def init_standard(id, env):
log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, env.cfg.port, env.cfg.python_script))
return True
def deinit(id):
log_info("pythonmod: deinit called, module id is %d" % id)
return True
def inform_super(id, qstate, superqstate, qdata):
return True
def operate(id, event, qstate, qdata):
log_info("pythonmod: operate called, id: %d, event:%s" % (id, strmodulevent(event)))
if event == MODULE_EVENT_NEW:
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
if event == MODULE_EVENT_MODDONE:
log_info("pythonmod: module we are waiting for is done")
qstate.ext_state[id] = MODULE_FINISHED
return True
if event == MODULE_EVENT_PASS:
log_info("pythonmod: event_pass")
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
log_err("pythonmod: BAD event")
qstate.ext_state[id] = MODULE_ERROR
return True
log_info("pythonmod: script loaded.")
| true
| true
|
7904f7d66d7d996e8defae5ec52fbd6d0ff0fcca
| 585
|
py
|
Python
|
tuling.py
|
ali-geng/wechatrobot
|
6e0701447ff9bdfb09a3d872a5bcc2ed3d8ff345
|
[
"MIT"
] | 2
|
2018-11-14T07:44:19.000Z
|
2018-11-14T07:44:30.000Z
|
tuling.py
|
91MrGeng/wechatrobot
|
6e0701447ff9bdfb09a3d872a5bcc2ed3d8ff345
|
[
"MIT"
] | 1
|
2021-08-10T08:23:34.000Z
|
2021-08-10T08:23:34.000Z
|
tuling.py
|
ali-geng/wechatrobot
|
6e0701447ff9bdfb09a3d872a5bcc2ed3d8ff345
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import requests
import json
def robot(content,userid):
api = r'http://openapi.tuling123.com/openapi/api/v2'
data = {
"perception": {
"inputText": {
"text": content
}
},
"userInfo": {
"apiKey": "fece0dcdbe4845559492c26d5de40119",
"userId": userid
}
}
response = requests.post(api, data=json.dumps(data))
robot_res = json.loads(response.content)
return robot_res["results"][0]['values']['text']
| 26.590909
| 65
| 0.499145
|
import requests
import json
def robot(content,userid):
api = r'http://openapi.tuling123.com/openapi/api/v2'
data = {
"perception": {
"inputText": {
"text": content
}
},
"userInfo": {
"apiKey": "fece0dcdbe4845559492c26d5de40119",
"userId": userid
}
}
response = requests.post(api, data=json.dumps(data))
robot_res = json.loads(response.content)
return robot_res["results"][0]['values']['text']
| true
| true
|
7904f810458fe24ba253700653f2686d9292e963
| 3,854
|
py
|
Python
|
cave/com.raytheon.viz.gfe/python/testFormatters/FirePeriodTable.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
cave/com.raytheon.viz.gfe/python/testFormatters/FirePeriodTable.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
cave/com.raytheon.viz.gfe/python/testFormatters/FirePeriodTable.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
########################################################################
# FirePeriodTable
#
# Type: table
# Edit Areas: solicited from user
# Weather Elements: You must have these Weather elements defined in
# your server: Sky, LAL, RelHum, MaxT, MinT, FreeWind,
# Haines, TransWind, MixHgt(ft AGL)
# To Run:
# Set GFE Time Range
# Products-->Generate Products
# Choose Edit Areas
# Select OK
#
########################################################################
## EXAMPLE OUTPUT (Scarce Data)
## Fire Period Table for Feb 29 00 17:00:00 GMT - Mar 01 00 11:00:00 GMT.
## Edit Area Sky (%) LAL RelHum (%) MaxT MinT FreeWind(mph) Haines TransWind(mph) MixHgt(ft AGL)
## COAdams 36-23 46 26
## COArapahoe 34-24 46 26
## COBoulder 31-52 34 18
## COClearCreek 16-57 26 12
## CODenver 37-40 43 25
## CODouglas 24-47 40 21
## COElbert 31-22 46 25
########################################################################
Definition = {
"type": "table",
"displayName": "TEST_Fire Period Table", # for Product Generation Menu
# Output file for product results
"outputFile": "./FirePeriodTable.txt", # default output file
"constantVariable": "TimePeriod",
"rowVariable": "EditArea",
"columnVariable": "WeatherElement",
"beginningText": "Fire Period Table for %TimePeriod. \n\n",
"endingText": "",
# Edit Areas
"defaultEditAreas" : [("area1","Area 1"),("area2","Area 2")],
"runTimeEditAreas": "yes",
"areaType" : "Edit Area", # E.g. City, County, Basin, etc.
# Time Ranges
"defaultRanges": ["Today"],
"runTimeRanges" : "no", # if yes, ask user at run time
"elementList": [
("Sky", "Sky (%)",
"minMax",
"range2Value",
"Scalar", 1, None),
("LAL","LAL",
"minMax",
"range2Value",
"Scalar",1,None),
("MaxT","MaxT",
"avg",
"singleValue",
"Scalar", 1, None),
("MinT","MinT",
"avg",
"singleValue",
"Scalar", 1, None),
("FreeWind","FreeWind(mph)",
"vectorRange",
"range2Value",
"Vector", 1, "ktToMph"),
("Haines","Haines",
"minMax",
"range2Value",
"Scalar",1,None),
("TransWind","TransWind(mph)",
"vectorRange",
"range2Value",
"Vector", 1, "ktToMph"),
("MixHgt", "MixHgt(ft AGL)",
"minMax",
"range2Value",
"Scalar",10,None),
],
}
| 34.720721
| 112
| 0.463415
| true
| true
|
|
7904f86a9867804b94ab97025933ff0fd4d8caa9
| 640
|
py
|
Python
|
trebol/interface.py
|
ilkerkesen/trebol
|
4adda97a7662d2412cf6a92a768cb1033d74db6c
|
[
"MIT"
] | null | null | null |
trebol/interface.py
|
ilkerkesen/trebol
|
4adda97a7662d2412cf6a92a768cb1033d74db6c
|
[
"MIT"
] | 2
|
2015-01-18T00:47:52.000Z
|
2015-02-06T15:24:55.000Z
|
trebol/interface.py
|
ilkerkesen/trebol
|
4adda97a7662d2412cf6a92a768cb1033d74db6c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.gen
import bcrypt
__all__ = ["create_new_user"]
@tornado.gen.coroutine
def get_next_id(db, collection):
counter = yield db.counters.find_and_modify(
{"_id": "{}id".format(collection)},
{"$inc": {"seq": 1}},
new=True,
)
raise tornado.gen.Return(counter["seq"])
@tornado.gen.coroutine
def create_new_user(db, email, password, group):
password = bcrypt.hashpw(password.encode(), bcrypt.gensalt(8))
id = yield get_next_id(db, "user")
yield db.users.insert({
"_id": id, "email": email, "hash": password, "group": group})
| 23.703704
| 69
| 0.635938
|
import tornado.gen
import bcrypt
__all__ = ["create_new_user"]
@tornado.gen.coroutine
def get_next_id(db, collection):
counter = yield db.counters.find_and_modify(
{"_id": "{}id".format(collection)},
{"$inc": {"seq": 1}},
new=True,
)
raise tornado.gen.Return(counter["seq"])
@tornado.gen.coroutine
def create_new_user(db, email, password, group):
password = bcrypt.hashpw(password.encode(), bcrypt.gensalt(8))
id = yield get_next_id(db, "user")
yield db.users.insert({
"_id": id, "email": email, "hash": password, "group": group})
| true
| true
|
7904f89e359c6197225aebdabbbcb2e74fe423a5
| 43,227
|
py
|
Python
|
featuretools/entityset/entityset.py
|
esyyes/featuretools
|
7d96bd221bad71c70b5d79ce7f7a8885c298f6df
|
[
"BSD-3-Clause"
] | 1
|
2020-06-10T02:39:27.000Z
|
2020-06-10T02:39:27.000Z
|
featuretools/entityset/entityset.py
|
esyyes/featuretools
|
7d96bd221bad71c70b5d79ce7f7a8885c298f6df
|
[
"BSD-3-Clause"
] | null | null | null |
featuretools/entityset/entityset.py
|
esyyes/featuretools
|
7d96bd221bad71c70b5d79ce7f7a8885c298f6df
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_e.df, pd.DataFrame) and \
(child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable/str], optional):
Keys are of variable ids and values are variable types or type_strings. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
if isinstance(entity.df, dd.DataFrame):
# The current Dask implementation doesn't set the index of the dataframe
# to the entity's index, so we have to do it manually here
lti.index = entity.df[entity.index].copy()
else:
lti = entity.df[entity.index].copy()
if isinstance(entity.df, dd.DataFrame):
lti.index = entity.df[entity.index].copy()
lti = lti.apply(lambda x: None)
else:
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
if isinstance(child_e.last_time_index, dd.Series):
to_join = child_e.df[link_var]
to_join.index = child_e.df[child_e.index]
lti_df = child_e.last_time_index.to_frame(name='last_time').join(
to_join.to_frame(name=entity.index)
)
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[entity.index]).agg('max')
lti_df = entity.last_time_index.to_frame(name='last_time_old').join(lti_df)
else:
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
if not isinstance(lti_df, dd.DataFrame) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series()
else:
lti_df['last_time'] = lti_df['last_time'].astype('datetime64[ns]')
lti_df['last_time_old'] = lti_df['last_time_old'].astype('datetime64[ns]')
lti_df = lti_df.fillna(pd.to_datetime('1800-01-01 00:00')).max(axis=1)
lti_df = lti_df.replace(pd.to_datetime('1800-01-01 00:00'), pd.NaT)
# lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
GRAPHVIZ_ERR_MSG = ('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/en/stable/getting_started/install.html#installing-graphviz for' +
' details)')
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/en/stable/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph
| 44.426516
| 174
| 0.573924
|
import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
def __init__(self, id=None, entities=None, relationships=None):
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
nd index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
| true
| true
|
7904f98216b2696d85f120262b088e598960052b
| 6,497
|
py
|
Python
|
homeassistant/components/agent_dvr/camera.py
|
CantankerousBullMoose/core
|
2178e27fb4c62271d4872e16838331defed82226
|
[
"Apache-2.0"
] | 1
|
2021-03-23T07:20:03.000Z
|
2021-03-23T07:20:03.000Z
|
homeassistant/components/agent_dvr/camera.py
|
CantankerousBullMoose/core
|
2178e27fb4c62271d4872e16838331defed82226
|
[
"Apache-2.0"
] | 51
|
2020-08-03T07:30:44.000Z
|
2022-03-22T06:02:42.000Z
|
homeassistant/components/agent_dvr/camera.py
|
CantankerousBullMoose/core
|
2178e27fb4c62271d4872e16838331defed82226
|
[
"Apache-2.0"
] | 2
|
2021-03-22T21:42:48.000Z
|
2021-04-12T12:26:39.000Z
|
"""Support for Agent camera streaming."""
from datetime import timedelta
import logging
from agent import AgentError
from homeassistant.components.camera import SUPPORT_ON_OFF
from homeassistant.components.mjpeg.camera import (
CONF_MJPEG_URL,
CONF_STILL_IMAGE_URL,
MjpegCamera,
filter_urllib3_logging,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers import entity_platform
from .const import (
ATTRIBUTION,
CAMERA_SCAN_INTERVAL_SECS,
CONNECTION,
DOMAIN as AGENT_DOMAIN,
)
SCAN_INTERVAL = timedelta(seconds=CAMERA_SCAN_INTERVAL_SECS)
_LOGGER = logging.getLogger(__name__)
_DEV_EN_ALT = "enable_alerts"
_DEV_DS_ALT = "disable_alerts"
_DEV_EN_REC = "start_recording"
_DEV_DS_REC = "stop_recording"
_DEV_SNAP = "snapshot"
CAMERA_SERVICES = {
_DEV_EN_ALT: "async_enable_alerts",
_DEV_DS_ALT: "async_disable_alerts",
_DEV_EN_REC: "async_start_recording",
_DEV_DS_REC: "async_stop_recording",
_DEV_SNAP: "async_snapshot",
}
async def async_setup_entry(
hass, config_entry, async_add_entities, discovery_info=None
):
"""Set up the Agent cameras."""
filter_urllib3_logging()
cameras = []
server = hass.data[AGENT_DOMAIN][config_entry.entry_id][CONNECTION]
if not server.devices:
_LOGGER.warning("Could not fetch cameras from Agent server")
return
for device in server.devices:
if device.typeID == 2:
camera = AgentCamera(device)
cameras.append(camera)
async_add_entities(cameras)
platform = entity_platform.current_platform.get()
for service, method in CAMERA_SERVICES.items():
platform.async_register_entity_service(service, {}, method)
class AgentCamera(MjpegCamera):
"""Representation of an Agent Device Stream."""
def __init__(self, device):
"""Initialize as a subclass of MjpegCamera."""
self._servername = device.client.name
self.server_url = device.client._server_url
device_info = {
CONF_NAME: device.name,
CONF_MJPEG_URL: f"{self.server_url}{device.mjpeg_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}",
CONF_STILL_IMAGE_URL: f"{self.server_url}{device.still_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}",
}
self.device = device
self._removed = False
self._name = f"{self._servername} {device.name}"
self._unique_id = f"{device._client.unique}_{device.typeID}_{device.id}"
super().__init__(device_info)
@property
def device_info(self):
"""Return the device info for adding the entity to the agent object."""
return {
"identifiers": {(AGENT_DOMAIN, self._unique_id)},
"name": self._name,
"manufacturer": "Agent",
"model": "Camera",
"sw_version": self.device.client.version,
}
async def async_update(self):
"""Update our state from the Agent API."""
try:
await self.device.update()
if self._removed:
_LOGGER.debug("%s reacquired", self._name)
self._removed = False
except AgentError:
if self.device.client.is_available: # server still available - camera error
if not self._removed:
_LOGGER.error("%s lost", self._name)
self._removed = True
@property
def extra_state_attributes(self):
"""Return the Agent DVR camera state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"editable": False,
"enabled": self.is_on,
"connected": self.connected,
"detected": self.is_detected,
"alerted": self.is_alerted,
"has_ptz": self.device.has_ptz,
"alerts_enabled": self.device.alerts_active,
}
@property
def should_poll(self) -> bool:
"""Update the state periodically."""
return True
@property
def is_recording(self) -> bool:
"""Return whether the monitor is recording."""
return self.device.recording
@property
def is_alerted(self) -> bool:
"""Return whether the monitor has alerted."""
return self.device.alerted
@property
def is_detected(self) -> bool:
"""Return whether the monitor has alerted."""
return self.device.detected
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.device.client.is_available
@property
def connected(self) -> bool:
"""Return True if entity is connected."""
return self.device.connected
@property
def supported_features(self) -> int:
"""Return supported features."""
return SUPPORT_ON_OFF
@property
def is_on(self) -> bool:
"""Return true if on."""
return self.device.online
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
if self.is_on:
return "mdi:camcorder"
return "mdi:camcorder-off"
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self.device.detector_active
@property
def unique_id(self) -> str:
"""Return a unique identifier for this agent object."""
return self._unique_id
async def async_enable_alerts(self):
"""Enable alerts."""
await self.device.alerts_on()
async def async_disable_alerts(self):
"""Disable alerts."""
await self.device.alerts_off()
async def async_enable_motion_detection(self):
"""Enable motion detection."""
await self.device.detector_on()
async def async_disable_motion_detection(self):
"""Disable motion detection."""
await self.device.detector_off()
async def async_start_recording(self):
"""Start recording."""
await self.device.record()
async def async_stop_recording(self):
"""Stop recording."""
await self.device.record_stop()
async def async_turn_on(self):
"""Enable the camera."""
await self.device.enable()
async def async_snapshot(self):
"""Take a snapshot."""
await self.device.snapshot()
async def async_turn_off(self):
"""Disable the camera."""
await self.device.disable()
| 30.078704
| 137
| 0.64322
|
from datetime import timedelta
import logging
from agent import AgentError
from homeassistant.components.camera import SUPPORT_ON_OFF
from homeassistant.components.mjpeg.camera import (
CONF_MJPEG_URL,
CONF_STILL_IMAGE_URL,
MjpegCamera,
filter_urllib3_logging,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers import entity_platform
from .const import (
ATTRIBUTION,
CAMERA_SCAN_INTERVAL_SECS,
CONNECTION,
DOMAIN as AGENT_DOMAIN,
)
SCAN_INTERVAL = timedelta(seconds=CAMERA_SCAN_INTERVAL_SECS)
_LOGGER = logging.getLogger(__name__)
_DEV_EN_ALT = "enable_alerts"
_DEV_DS_ALT = "disable_alerts"
_DEV_EN_REC = "start_recording"
_DEV_DS_REC = "stop_recording"
_DEV_SNAP = "snapshot"
CAMERA_SERVICES = {
_DEV_EN_ALT: "async_enable_alerts",
_DEV_DS_ALT: "async_disable_alerts",
_DEV_EN_REC: "async_start_recording",
_DEV_DS_REC: "async_stop_recording",
_DEV_SNAP: "async_snapshot",
}
async def async_setup_entry(
hass, config_entry, async_add_entities, discovery_info=None
):
filter_urllib3_logging()
cameras = []
server = hass.data[AGENT_DOMAIN][config_entry.entry_id][CONNECTION]
if not server.devices:
_LOGGER.warning("Could not fetch cameras from Agent server")
return
for device in server.devices:
if device.typeID == 2:
camera = AgentCamera(device)
cameras.append(camera)
async_add_entities(cameras)
platform = entity_platform.current_platform.get()
for service, method in CAMERA_SERVICES.items():
platform.async_register_entity_service(service, {}, method)
class AgentCamera(MjpegCamera):
def __init__(self, device):
self._servername = device.client.name
self.server_url = device.client._server_url
device_info = {
CONF_NAME: device.name,
CONF_MJPEG_URL: f"{self.server_url}{device.mjpeg_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}",
CONF_STILL_IMAGE_URL: f"{self.server_url}{device.still_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}",
}
self.device = device
self._removed = False
self._name = f"{self._servername} {device.name}"
self._unique_id = f"{device._client.unique}_{device.typeID}_{device.id}"
super().__init__(device_info)
@property
def device_info(self):
return {
"identifiers": {(AGENT_DOMAIN, self._unique_id)},
"name": self._name,
"manufacturer": "Agent",
"model": "Camera",
"sw_version": self.device.client.version,
}
async def async_update(self):
try:
await self.device.update()
if self._removed:
_LOGGER.debug("%s reacquired", self._name)
self._removed = False
except AgentError:
if self.device.client.is_available:
if not self._removed:
_LOGGER.error("%s lost", self._name)
self._removed = True
@property
def extra_state_attributes(self):
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"editable": False,
"enabled": self.is_on,
"connected": self.connected,
"detected": self.is_detected,
"alerted": self.is_alerted,
"has_ptz": self.device.has_ptz,
"alerts_enabled": self.device.alerts_active,
}
@property
def should_poll(self) -> bool:
return True
@property
def is_recording(self) -> bool:
return self.device.recording
@property
def is_alerted(self) -> bool:
return self.device.alerted
@property
def is_detected(self) -> bool:
return self.device.detected
@property
def available(self) -> bool:
return self.device.client.is_available
@property
def connected(self) -> bool:
return self.device.connected
@property
def supported_features(self) -> int:
return SUPPORT_ON_OFF
@property
def is_on(self) -> bool:
return self.device.online
@property
def icon(self):
if self.is_on:
return "mdi:camcorder"
return "mdi:camcorder-off"
@property
def motion_detection_enabled(self):
return self.device.detector_active
@property
def unique_id(self) -> str:
return self._unique_id
async def async_enable_alerts(self):
await self.device.alerts_on()
async def async_disable_alerts(self):
await self.device.alerts_off()
async def async_enable_motion_detection(self):
await self.device.detector_on()
async def async_disable_motion_detection(self):
await self.device.detector_off()
async def async_start_recording(self):
await self.device.record()
async def async_stop_recording(self):
await self.device.record_stop()
async def async_turn_on(self):
await self.device.enable()
async def async_snapshot(self):
await self.device.snapshot()
async def async_turn_off(self):
await self.device.disable()
| true
| true
|
7904f9ab55622686bc4e6310b36e16189b2e04aa
| 336
|
py
|
Python
|
obot.py
|
MrTsRex/Reddit_bot
|
f384b3736f8a6849653ee27dcfb2390d5eab7d37
|
[
"MIT"
] | null | null | null |
obot.py
|
MrTsRex/Reddit_bot
|
f384b3736f8a6849653ee27dcfb2390d5eab7d37
|
[
"MIT"
] | null | null | null |
obot.py
|
MrTsRex/Reddit_bot
|
f384b3736f8a6849653ee27dcfb2390d5eab7d37
|
[
"MIT"
] | 1
|
2020-05-09T06:58:47.000Z
|
2020-05-09T06:58:47.000Z
|
import praw
c_id='34kxuaxc4yWiKw'
c_secret='8bJqHqNHFdB6NKV9sHzFbo4_Dl4'
ua='my user agent'
un='the_ugly_bot'
pwd='whatever930'
def login():
r = praw.Reddit(client_id=c_id,
client_secret=c_secret,
user_agent=ua,
username=un,
password=pwd)
return r
| 22.4
| 44
| 0.58631
|
import praw
c_id='34kxuaxc4yWiKw'
c_secret='8bJqHqNHFdB6NKV9sHzFbo4_Dl4'
ua='my user agent'
un='the_ugly_bot'
pwd='whatever930'
def login():
r = praw.Reddit(client_id=c_id,
client_secret=c_secret,
user_agent=ua,
username=un,
password=pwd)
return r
| true
| true
|
7904facea9fd890b5c6a46fa9a4e7fc4c16dd44e
| 153
|
py
|
Python
|
tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_MovingMedian_NoCycle_LSTM.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_MovingMedian_NoCycle_LSTM.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_MovingMedian_NoCycle_LSTM.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['MovingMedian'] , ['NoCycle'] , ['LSTM'] );
| 38.25
| 80
| 0.745098
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['MovingMedian'] , ['NoCycle'] , ['LSTM'] );
| true
| true
|
7904fb41de68edf8a0661c861365993ff2e1f5b2
| 2,072
|
py
|
Python
|
flange/source.py
|
flashashen/flange
|
c8e6e790fe68679fe74aec007abdc47810137b0d
|
[
"MIT"
] | 1
|
2020-09-09T02:51:38.000Z
|
2020-09-09T02:51:38.000Z
|
flange/source.py
|
flashashen/flange
|
c8e6e790fe68679fe74aec007abdc47810137b0d
|
[
"MIT"
] | 6
|
2018-03-06T17:47:44.000Z
|
2019-03-01T17:13:39.000Z
|
flange/source.py
|
flashashen/flange
|
c8e6e790fe68679fe74aec007abdc47810137b0d
|
[
"MIT"
] | null | null | null |
import os
import anyconfig
PARSABLES = {
'pickle':['p','pickle'],
'toml':['toml'],
'xml':['xml'],
'yaml':['yml','yaml'],
'json':['json'],
'ini':['ini'],
'properties':['props','properties'],
'shellvars':['env']}
class Source(object):
def __init__(self, uri, root_path=None, contents={}, parser=None, error=None):
self.uri = uri
self.root_path = root_path
self.error = error
self.parser = parser
self.contents = contents
def __repr__(self):
return self.__str__()
def __str__(self):
return "<Source uri={} root_path={} parser={} error={}>".format(self.uri, self.root_path, self.parser, self.error)
def load(self):
pass
@staticmethod
def from_file(full_file_path, root_path):
s = SourceFile(full_file_path, root_path)
s.load()
return s
class SourceFile(Source):
def _parse(self, parser=None):
contents = anyconfig.load(self.uri, ac_parser=parser, ac_ordered=True)
parser = parser if parser else os.path.splitext(self.uri)[1].strip('.')
return (contents, parser)
def load(self):
try:
self.contents, self.parser = self._parse()
except Exception as e:
# if the file had a known extension but didn't parse, raise an exception. The danger is that
# it be parsed incorrectly as properties file which seems to match everything
ext = os.path.splitext(self.uri)[1][1:]
if [lext for lext in PARSABLES.values() if ext in lext]:
self.error = e
# print type(e) # 'exception parsing {}\t{}'.format(ext, e)
else:
for p in PARSABLES.keys():
try:
self.contents, self.parser = self._parse(p)
self.error = None
break
except Exception as e:
# print type(e) #'exception parsing as ', p, ' ', e
pass
| 25.9
| 122
| 0.543436
|
import os
import anyconfig
PARSABLES = {
'pickle':['p','pickle'],
'toml':['toml'],
'xml':['xml'],
'yaml':['yml','yaml'],
'json':['json'],
'ini':['ini'],
'properties':['props','properties'],
'shellvars':['env']}
class Source(object):
def __init__(self, uri, root_path=None, contents={}, parser=None, error=None):
self.uri = uri
self.root_path = root_path
self.error = error
self.parser = parser
self.contents = contents
def __repr__(self):
return self.__str__()
def __str__(self):
return "<Source uri={} root_path={} parser={} error={}>".format(self.uri, self.root_path, self.parser, self.error)
def load(self):
pass
@staticmethod
def from_file(full_file_path, root_path):
s = SourceFile(full_file_path, root_path)
s.load()
return s
class SourceFile(Source):
def _parse(self, parser=None):
contents = anyconfig.load(self.uri, ac_parser=parser, ac_ordered=True)
parser = parser if parser else os.path.splitext(self.uri)[1].strip('.')
return (contents, parser)
def load(self):
try:
self.contents, self.parser = self._parse()
except Exception as e:
# it be parsed incorrectly as properties file which seems to match everything
ext = os.path.splitext(self.uri)[1][1:]
if [lext for lext in PARSABLES.values() if ext in lext]:
self.error = e
# print type(e) # 'exception parsing {}\t{}'.format(ext, e)
else:
for p in PARSABLES.keys():
try:
self.contents, self.parser = self._parse(p)
self.error = None
break
except Exception as e:
# print type(e) #'exception parsing as ', p, ' ', e
pass
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.