code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import csv
import requests
class Mapper:
def __init__(self,agent):
self.AI_agent = agent
self.csv2mr_map = {}
@staticmethod
def rn_step1(AI_agent):
return raw_input('[%s]: What is the name of the CSV file you want to import? \n[YOU]: '%AI_agent)
def rn_step2(self,AI_agent):
return raw_input('[%s]: What is the URL of the Ring? \n[YOU]: '%AI_agent)
def mmf_step1(self,AI_agent):
return raw_input('[%s]: Do you want to enter a new language?[yes/no] \n[YOU]: '%AI_agent)
def mmf_step2(self,langs,AI_agent):
return raw_input('[%s]: What language you want to map?[%s] \n[YOU]: '%(AI_agent,'/'.join(langs)))
def mmf_step3(self,f,l,columns,AI_agent):
return raw_input('[%s]: What CSV column matches with Ring field "%s.%s" ?[1-%s] \n[YOU]: '%(
AI_agent,
f['FieldLabel'],
l,
len(columns)))
def mrf_step1(self,f,columns,AI_agent):
return raw_input('[%s]: What CSV column matches with ring field "%s" ?[1-%s] \n[YOU]: '% (
AI_agent,
f['FieldLabel'],
len(columns)))
def run(self,
xrn_step1=rn_step1,
xrn_step2=rn_step2,
xmmf_step1=mmf_step1,
xmmf_step2=mmf_step2,
xmmf_step3=mmf_step3,
xmrf_step1=mrf_step1):
print('[%s]: So, you want to import data from a CSV file to a Ring right? '%self.AI_agent)
self.csv_filename = xrn_step1(self.AI_agent)
columns = self.get_csv_header(self.csv_filename)
self.ring_url = xrn_step2(self.AI_agent)
ring_url_parts = self.ring_url.split('?')
self.ring,self.fields = self.get_ring_schema(ring_url_parts[0]+'?schema=1')
print('[%s]: Given the following CSV Columns:'%self.AI_agent)
print(self.columns_menu_string(columns))
for f in self.fields:
if f['FieldMultilingual']:
self.map_multilingual_field(f,
columns,
xmmf_step1=xmmf_step1,
xmmf_step2=xmmf_step2,
xmmf_step3=xmmf_step3)
else:
self.map_regular_field(f,columns,xmrf_step1=xmrf_step1)
print('[%s]: This is your map:'%self.AI_agent)
print(self.csv2mr_map)
return self.csv2mr_map
def map_regular_field(self,f,columns,xmrf_step1=mrf_step1):
self.csv2mr_map[f['FieldId']] = xmrf_step1(f,columns,self.AI_agent)
def map_multilingual_field(self,f,columns,xmmf_step1=None,xmmf_step2=None,xmmf_step3=None):
print('[%s]: "%s" is a Multilingual field '%(self.AI_agent,f['FieldLabel']))
langs = ['eng','spa','ita','fra']
lang_dict = {}
next_lang = True
while next_lang:
r = xmmf_step1(self.AI_agent)
if str(r[0]).lower() == 'y':
l = xmmf_step2(langs,self.AI_agent)
if l in langs:
c = xmmf_step3(f,l,columns,self.AI_agent)
lang_dict[l] = c
else:
print("[%s]: I don't recognize that language"%self.AI_agent)
elif str(r[0]).lower() == 'n':
next_lang = False
self.csv2mr_map[f['FieldId']] = lang_dict
def columns_menu_string(self,columns):
i = 0
out = ''
for c in columns:
i += 1
out += ' %s)%s'%(i,c)
return out
def retry_get_csv_header(self,raw_input):
csv_filename = raw_input('[%s]: What is the name of the CSV file you want to import? \n[YOU]: '%self.AI_agent)
return self.get_csv_header(csv_filename)
def get_csv_header(self,csv_filename,retry_raw_input=raw_input):
try:
with open(csv_filename,'rb') as f:
self.reader = csv.reader(f)
for row in self.reader:
#print(row)
csv_header=[]
count = 0
for r in row:
#print(type(r))
if r != '':
csv_header.append(r)
count += 1
if count != 0:
break
print('[%s]: %s columns detected: '%(self.AI_agent,len(csv_header)))
print(', '.join(csv_header))
return csv_header
except(IOError):
print('[%s]: There was a problem opening that file. '%self.AI_agent)
print('[%s]: It should live in the same folder as this script. '%self.AI_agent)
return self.retry_get_csv_header(retry_raw_input)
def retry_get_ring_schema(self,raw_input):
ring_url = raw_input('[%s]: What is the URL of the Ring? \n[YOU]: '%self.AI_agent)
return self.get_ring_schema(ring_url)
def get_ring_schema(self,ring_url,retry_raw_input=raw_input):
try:
r = requests.get(ring_url)
if r.status_code == requests.codes.ok:
#print(r.json())
fields = r.json()['fields']
rings = r.json()['rings']
print('[%s]: Got it!'%self.AI_agent)
print('[%s]: %s fields detected: '%(self.AI_agent,len(fields)))
print(', '.join([field['FieldLabel'] for field in fields]))
return rings,fields
else:
raise
except(ValueError):
print('[%s]: There was a problem with that URL. Do you want to try another URL?'%self.AI_agent)
return self.retry_get_ring_schema(retry_raw_input)
except(requests.exceptions.MissingSchema):
print('[%s]: That is not a valid URL. Do you want to try another URL?'%self.AI_agent)
return self.retry_get_ring_schema(retry_raw_input)
except(requests.exceptions.ConnectionError):
print('[%s]: Connection refused. Do you want to try another URL?'%self.AI_agent)
return self.retry_get_ring_schema(retry_raw_input) | Mapper.py | import csv
import requests
class Mapper:
def __init__(self,agent):
self.AI_agent = agent
self.csv2mr_map = {}
@staticmethod
def rn_step1(AI_agent):
return raw_input('[%s]: What is the name of the CSV file you want to import? \n[YOU]: '%AI_agent)
def rn_step2(self,AI_agent):
return raw_input('[%s]: What is the URL of the Ring? \n[YOU]: '%AI_agent)
def mmf_step1(self,AI_agent):
return raw_input('[%s]: Do you want to enter a new language?[yes/no] \n[YOU]: '%AI_agent)
def mmf_step2(self,langs,AI_agent):
return raw_input('[%s]: What language you want to map?[%s] \n[YOU]: '%(AI_agent,'/'.join(langs)))
def mmf_step3(self,f,l,columns,AI_agent):
return raw_input('[%s]: What CSV column matches with Ring field "%s.%s" ?[1-%s] \n[YOU]: '%(
AI_agent,
f['FieldLabel'],
l,
len(columns)))
def mrf_step1(self,f,columns,AI_agent):
return raw_input('[%s]: What CSV column matches with ring field "%s" ?[1-%s] \n[YOU]: '% (
AI_agent,
f['FieldLabel'],
len(columns)))
def run(self,
xrn_step1=rn_step1,
xrn_step2=rn_step2,
xmmf_step1=mmf_step1,
xmmf_step2=mmf_step2,
xmmf_step3=mmf_step3,
xmrf_step1=mrf_step1):
print('[%s]: So, you want to import data from a CSV file to a Ring right? '%self.AI_agent)
self.csv_filename = xrn_step1(self.AI_agent)
columns = self.get_csv_header(self.csv_filename)
self.ring_url = xrn_step2(self.AI_agent)
ring_url_parts = self.ring_url.split('?')
self.ring,self.fields = self.get_ring_schema(ring_url_parts[0]+'?schema=1')
print('[%s]: Given the following CSV Columns:'%self.AI_agent)
print(self.columns_menu_string(columns))
for f in self.fields:
if f['FieldMultilingual']:
self.map_multilingual_field(f,
columns,
xmmf_step1=xmmf_step1,
xmmf_step2=xmmf_step2,
xmmf_step3=xmmf_step3)
else:
self.map_regular_field(f,columns,xmrf_step1=xmrf_step1)
print('[%s]: This is your map:'%self.AI_agent)
print(self.csv2mr_map)
return self.csv2mr_map
def map_regular_field(self,f,columns,xmrf_step1=mrf_step1):
self.csv2mr_map[f['FieldId']] = xmrf_step1(f,columns,self.AI_agent)
def map_multilingual_field(self,f,columns,xmmf_step1=None,xmmf_step2=None,xmmf_step3=None):
print('[%s]: "%s" is a Multilingual field '%(self.AI_agent,f['FieldLabel']))
langs = ['eng','spa','ita','fra']
lang_dict = {}
next_lang = True
while next_lang:
r = xmmf_step1(self.AI_agent)
if str(r[0]).lower() == 'y':
l = xmmf_step2(langs,self.AI_agent)
if l in langs:
c = xmmf_step3(f,l,columns,self.AI_agent)
lang_dict[l] = c
else:
print("[%s]: I don't recognize that language"%self.AI_agent)
elif str(r[0]).lower() == 'n':
next_lang = False
self.csv2mr_map[f['FieldId']] = lang_dict
def columns_menu_string(self,columns):
i = 0
out = ''
for c in columns:
i += 1
out += ' %s)%s'%(i,c)
return out
def retry_get_csv_header(self,raw_input):
csv_filename = raw_input('[%s]: What is the name of the CSV file you want to import? \n[YOU]: '%self.AI_agent)
return self.get_csv_header(csv_filename)
def get_csv_header(self,csv_filename,retry_raw_input=raw_input):
try:
with open(csv_filename,'rb') as f:
self.reader = csv.reader(f)
for row in self.reader:
#print(row)
csv_header=[]
count = 0
for r in row:
#print(type(r))
if r != '':
csv_header.append(r)
count += 1
if count != 0:
break
print('[%s]: %s columns detected: '%(self.AI_agent,len(csv_header)))
print(', '.join(csv_header))
return csv_header
except(IOError):
print('[%s]: There was a problem opening that file. '%self.AI_agent)
print('[%s]: It should live in the same folder as this script. '%self.AI_agent)
return self.retry_get_csv_header(retry_raw_input)
def retry_get_ring_schema(self,raw_input):
ring_url = raw_input('[%s]: What is the URL of the Ring? \n[YOU]: '%self.AI_agent)
return self.get_ring_schema(ring_url)
def get_ring_schema(self,ring_url,retry_raw_input=raw_input):
try:
r = requests.get(ring_url)
if r.status_code == requests.codes.ok:
#print(r.json())
fields = r.json()['fields']
rings = r.json()['rings']
print('[%s]: Got it!'%self.AI_agent)
print('[%s]: %s fields detected: '%(self.AI_agent,len(fields)))
print(', '.join([field['FieldLabel'] for field in fields]))
return rings,fields
else:
raise
except(ValueError):
print('[%s]: There was a problem with that URL. Do you want to try another URL?'%self.AI_agent)
return self.retry_get_ring_schema(retry_raw_input)
except(requests.exceptions.MissingSchema):
print('[%s]: That is not a valid URL. Do you want to try another URL?'%self.AI_agent)
return self.retry_get_ring_schema(retry_raw_input)
except(requests.exceptions.ConnectionError):
print('[%s]: Connection refused. Do you want to try another URL?'%self.AI_agent)
return self.retry_get_ring_schema(retry_raw_input) | 0.248443 | 0.226131 |
from layers import ProcessingLayer
from lexical_shortcuts.shortcuts_attention_modules import \
MultiHeadAttentionShortcuts, \
MultiHeadAttentionShortcutsFeatureFusion, \
MultiHeadAttentionShortcutsFeatureFusionNonLexical
class ShortcutsAttentionBlock(object):
""" Defines a single attention block (referred to as 'sub-layer' in the paper) comprising of a single multi-head
attention layer preceded by a pre-processing layer and followed by a post-processing layer. """
def __init__(self,
config,
float_dtype,
self_attention,
training,
shortcut_type):
# Set attributes
self.config = config
self.self_attention = self_attention
self.shortcut_type = shortcut_type
# Track gate values
self.key_gate = 0.
self.value_gate = 0.
if self_attention:
attn_name = 'self_attn'
else:
attn_name = 'cross_attn'
memory_size = config.hidden_size
assert shortcut_type in ['lexical', 'lexical_plus_feature_fusion', 'non-lexical'], \
'Shortcut type {:s} is not supported.'.format(shortcut_type)
# Build layers
self.pre_sub_layer = ProcessingLayer(config.hidden_size,
use_layer_norm=True,
dropout_rate=0.0,
training=training,
name='pre_{:s}_sublayer'.format(attn_name))
self.post_sub_layer = ProcessingLayer(config.hidden_size,
use_layer_norm=False,
dropout_rate=config.dropout_residual,
training=training,
name='post_{:s}_sublayer'.format(attn_name))
if shortcut_type == 'lexical_plus_feature_fusion':
self.attn = MultiHeadAttentionShortcutsFeatureFusion(memory_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.num_heads,
float_dtype,
dropout_attn=config.dropout_attn,
training=training,
name='{:s}_sublayer'.format(attn_name))
elif shortcut_type == 'non_lexical':
self.attn = MultiHeadAttentionShortcutsFeatureFusionNonLexical(memory_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.num_heads,
float_dtype,
dropout_attn=config.dropout_attn,
training=training,
name='{:s}_sublayer'.format(attn_name))
else:
self.attn = MultiHeadAttentionShortcuts(memory_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.num_heads,
float_dtype,
dropout_attn=config.dropout_attn,
training=training,
name='{:s}_sublayer'.format(attn_name))
def forward(self, inputs, memory_context, attn_mask, layer_memories=None):
""" Propagates input data through the block. """
assert (memory_context is not None), 'State cache has to be provided for the application of shortcuts.'
# Pre-process inputs
inputs = self.pre_sub_layer.forward(inputs)
outputs, layer_memories = self.attn.forward(inputs, memory_context, attn_mask, layer_memories)
# Post-process outputs
block_out = self.post_sub_layer.forward(outputs, residual_inputs=inputs)
# Optionally track gate values
if self.config.track_gate_values:
self.key_gate = self.attn.key_gate
self.value_gate = self.attn.value_gate
return block_out, layer_memories | codebase/lexical_shortcuts/lexical_shortcuts_blocks.py | from layers import ProcessingLayer
from lexical_shortcuts.shortcuts_attention_modules import \
MultiHeadAttentionShortcuts, \
MultiHeadAttentionShortcutsFeatureFusion, \
MultiHeadAttentionShortcutsFeatureFusionNonLexical
class ShortcutsAttentionBlock(object):
""" Defines a single attention block (referred to as 'sub-layer' in the paper) comprising of a single multi-head
attention layer preceded by a pre-processing layer and followed by a post-processing layer. """
def __init__(self,
config,
float_dtype,
self_attention,
training,
shortcut_type):
# Set attributes
self.config = config
self.self_attention = self_attention
self.shortcut_type = shortcut_type
# Track gate values
self.key_gate = 0.
self.value_gate = 0.
if self_attention:
attn_name = 'self_attn'
else:
attn_name = 'cross_attn'
memory_size = config.hidden_size
assert shortcut_type in ['lexical', 'lexical_plus_feature_fusion', 'non-lexical'], \
'Shortcut type {:s} is not supported.'.format(shortcut_type)
# Build layers
self.pre_sub_layer = ProcessingLayer(config.hidden_size,
use_layer_norm=True,
dropout_rate=0.0,
training=training,
name='pre_{:s}_sublayer'.format(attn_name))
self.post_sub_layer = ProcessingLayer(config.hidden_size,
use_layer_norm=False,
dropout_rate=config.dropout_residual,
training=training,
name='post_{:s}_sublayer'.format(attn_name))
if shortcut_type == 'lexical_plus_feature_fusion':
self.attn = MultiHeadAttentionShortcutsFeatureFusion(memory_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.num_heads,
float_dtype,
dropout_attn=config.dropout_attn,
training=training,
name='{:s}_sublayer'.format(attn_name))
elif shortcut_type == 'non_lexical':
self.attn = MultiHeadAttentionShortcutsFeatureFusionNonLexical(memory_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.num_heads,
float_dtype,
dropout_attn=config.dropout_attn,
training=training,
name='{:s}_sublayer'.format(attn_name))
else:
self.attn = MultiHeadAttentionShortcuts(memory_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.hidden_size,
config.num_heads,
float_dtype,
dropout_attn=config.dropout_attn,
training=training,
name='{:s}_sublayer'.format(attn_name))
def forward(self, inputs, memory_context, attn_mask, layer_memories=None):
""" Propagates input data through the block. """
assert (memory_context is not None), 'State cache has to be provided for the application of shortcuts.'
# Pre-process inputs
inputs = self.pre_sub_layer.forward(inputs)
outputs, layer_memories = self.attn.forward(inputs, memory_context, attn_mask, layer_memories)
# Post-process outputs
block_out = self.post_sub_layer.forward(outputs, residual_inputs=inputs)
# Optionally track gate values
if self.config.track_gate_values:
self.key_gate = self.attn.key_gate
self.value_gate = self.attn.value_gate
return block_out, layer_memories | 0.889745 | 0.367951 |
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class Record(pulumi.CustomResource):
created_on: pulumi.Output[str]
"""
The RFC3339 timestamp of when the record was created
"""
data: pulumi.Output[dict]
"""
Map of attributes that constitute the record value. Primarily used for LOC and SRV record types. Either this or `value` must be specified
* `algorithm` (`float`)
* `altitude` (`float`)
* `certificate` (`str`)
* `content` (`str`)
* `digest` (`str`)
* `digest_type` (`float`)
* `fingerprint` (`str`)
* `flags` (`str`)
* `key_tag` (`float`)
* `lat_degrees` (`float`)
* `lat_direction` (`str`)
* `lat_minutes` (`float`)
* `lat_seconds` (`float`)
* `long_degrees` (`float`)
* `long_direction` (`str`)
* `long_minutes` (`float`)
* `long_seconds` (`float`)
* `matching_type` (`float`)
* `name` (`str`) - The name of the record
* `order` (`float`)
* `port` (`float`)
* `precision_horz` (`float`)
* `precision_vert` (`float`)
* `preference` (`float`)
* `priority` (`float`) - The priority of the record
* `proto` (`str`)
* `protocol` (`float`)
* `public_key` (`str`)
* `regex` (`str`)
* `replacement` (`str`)
* `selector` (`float`)
* `service` (`str`)
* `size` (`float`)
* `target` (`str`)
* `type` (`float`) - The type of the record
* `usage` (`float`)
* `weight` (`float`)
"""
hostname: pulumi.Output[str]
"""
The FQDN of the record
"""
metadata: pulumi.Output[dict]
"""
A key-value map of string metadata Cloudflare associates with the record
"""
modified_on: pulumi.Output[str]
"""
The RFC3339 timestamp of when the record was last modified
"""
name: pulumi.Output[str]
"""
The name of the record
"""
priority: pulumi.Output[float]
"""
The priority of the record
"""
proxiable: pulumi.Output[bool]
"""
Shows whether this record can be proxied, must be true if setting `proxied=true`
"""
proxied: pulumi.Output[bool]
"""
Whether the record gets Cloudflare's origin protection; defaults to `false`.
"""
ttl: pulumi.Output[float]
"""
The TTL of the record ([automatic: '1'](https://api.cloudflare.com/#dns-records-for-a-zone-create-dns-record))
"""
type: pulumi.Output[str]
"""
The type of the record
"""
value: pulumi.Output[str]
"""
The (string) value of the record. Either this or `data` must be specified
"""
zone_id: pulumi.Output[str]
"""
The DNS zone ID to add the record to
"""
def __init__(__self__, resource_name, opts=None, data=None, name=None, priority=None, proxied=None, ttl=None, type=None, value=None, zone_id=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Cloudflare record resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-cloudflare/blob/master/website/docs/r/record.html.markdown.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] data: Map of attributes that constitute the record value. Primarily used for LOC and SRV record types. Either this or `value` must be specified
:param pulumi.Input[str] name: The name of the record
:param pulumi.Input[float] priority: The priority of the record
:param pulumi.Input[bool] proxied: Whether the record gets Cloudflare's origin protection; defaults to `false`.
:param pulumi.Input[float] ttl: The TTL of the record ([automatic: '1'](https://api.cloudflare.com/#dns-records-for-a-zone-create-dns-record))
:param pulumi.Input[str] type: The type of the record
:param pulumi.Input[str] value: The (string) value of the record. Either this or `data` must be specified
:param pulumi.Input[str] zone_id: The DNS zone ID to add the record to
The **data** object supports the following:
* `algorithm` (`pulumi.Input[float]`)
* `altitude` (`pulumi.Input[float]`)
* `certificate` (`pulumi.Input[str]`)
* `content` (`pulumi.Input[str]`)
* `digest` (`pulumi.Input[str]`)
* `digest_type` (`pulumi.Input[float]`)
* `fingerprint` (`pulumi.Input[str]`)
* `flags` (`pulumi.Input[str]`)
* `key_tag` (`pulumi.Input[float]`)
* `lat_degrees` (`pulumi.Input[float]`)
* `lat_direction` (`pulumi.Input[str]`)
* `lat_minutes` (`pulumi.Input[float]`)
* `lat_seconds` (`pulumi.Input[float]`)
* `long_degrees` (`pulumi.Input[float]`)
* `long_direction` (`pulumi.Input[str]`)
* `long_minutes` (`pulumi.Input[float]`)
* `long_seconds` (`pulumi.Input[float]`)
* `matching_type` (`pulumi.Input[float]`)
* `name` (`pulumi.Input[str]`) - The name of the record
* `order` (`pulumi.Input[float]`)
* `port` (`pulumi.Input[float]`)
* `precision_horz` (`pulumi.Input[float]`)
* `precision_vert` (`pulumi.Input[float]`)
* `preference` (`pulumi.Input[float]`)
* `priority` (`pulumi.Input[float]`) - The priority of the record
* `proto` (`pulumi.Input[str]`)
* `protocol` (`pulumi.Input[float]`)
* `public_key` (`pulumi.Input[str]`)
* `regex` (`pulumi.Input[str]`)
* `replacement` (`pulumi.Input[str]`)
* `selector` (`pulumi.Input[float]`)
* `service` (`pulumi.Input[str]`)
* `size` (`pulumi.Input[float]`)
* `target` (`pulumi.Input[str]`)
* `type` (`pulumi.Input[float]`) - The type of the record
* `usage` (`pulumi.Input[float]`)
* `weight` (`pulumi.Input[float]`)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data'] = data
if name is None:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
__props__['priority'] = priority
__props__['proxied'] = proxied
__props__['ttl'] = ttl
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
__props__['value'] = value
if zone_id is None:
raise TypeError("Missing required property 'zone_id'")
__props__['zone_id'] = zone_id
__props__['created_on'] = None
__props__['hostname'] = None
__props__['metadata'] = None
__props__['modified_on'] = None
__props__['proxiable'] = None
super(Record, __self__).__init__(
'cloudflare:index/record:Record',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, created_on=None, data=None, hostname=None, metadata=None, modified_on=None, name=None, priority=None, proxiable=None, proxied=None, ttl=None, type=None, value=None, zone_id=None):
"""
Get an existing Record resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_on: The RFC3339 timestamp of when the record was created
:param pulumi.Input[dict] data: Map of attributes that constitute the record value. Primarily used for LOC and SRV record types. Either this or `value` must be specified
:param pulumi.Input[str] hostname: The FQDN of the record
:param pulumi.Input[dict] metadata: A key-value map of string metadata Cloudflare associates with the record
:param pulumi.Input[str] modified_on: The RFC3339 timestamp of when the record was last modified
:param pulumi.Input[str] name: The name of the record
:param pulumi.Input[float] priority: The priority of the record
:param pulumi.Input[bool] proxiable: Shows whether this record can be proxied, must be true if setting `proxied=true`
:param pulumi.Input[bool] proxied: Whether the record gets Cloudflare's origin protection; defaults to `false`.
:param pulumi.Input[float] ttl: The TTL of the record ([automatic: '1'](https://api.cloudflare.com/#dns-records-for-a-zone-create-dns-record))
:param pulumi.Input[str] type: The type of the record
:param pulumi.Input[str] value: The (string) value of the record. Either this or `data` must be specified
:param pulumi.Input[str] zone_id: The DNS zone ID to add the record to
The **data** object supports the following:
* `algorithm` (`pulumi.Input[float]`)
* `altitude` (`pulumi.Input[float]`)
* `certificate` (`pulumi.Input[str]`)
* `content` (`pulumi.Input[str]`)
* `digest` (`pulumi.Input[str]`)
* `digest_type` (`pulumi.Input[float]`)
* `fingerprint` (`pulumi.Input[str]`)
* `flags` (`pulumi.Input[str]`)
* `key_tag` (`pulumi.Input[float]`)
* `lat_degrees` (`pulumi.Input[float]`)
* `lat_direction` (`pulumi.Input[str]`)
* `lat_minutes` (`pulumi.Input[float]`)
* `lat_seconds` (`pulumi.Input[float]`)
* `long_degrees` (`pulumi.Input[float]`)
* `long_direction` (`pulumi.Input[str]`)
* `long_minutes` (`pulumi.Input[float]`)
* `long_seconds` (`pulumi.Input[float]`)
* `matching_type` (`pulumi.Input[float]`)
* `name` (`pulumi.Input[str]`) - The name of the record
* `order` (`pulumi.Input[float]`)
* `port` (`pulumi.Input[float]`)
* `precision_horz` (`pulumi.Input[float]`)
* `precision_vert` (`pulumi.Input[float]`)
* `preference` (`pulumi.Input[float]`)
* `priority` (`pulumi.Input[float]`) - The priority of the record
* `proto` (`pulumi.Input[str]`)
* `protocol` (`pulumi.Input[float]`)
* `public_key` (`pulumi.Input[str]`)
* `regex` (`pulumi.Input[str]`)
* `replacement` (`pulumi.Input[str]`)
* `selector` (`pulumi.Input[float]`)
* `service` (`pulumi.Input[str]`)
* `size` (`pulumi.Input[float]`)
* `target` (`pulumi.Input[str]`)
* `type` (`pulumi.Input[float]`) - The type of the record
* `usage` (`pulumi.Input[float]`)
* `weight` (`pulumi.Input[float]`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["created_on"] = created_on
__props__["data"] = data
__props__["hostname"] = hostname
__props__["metadata"] = metadata
__props__["modified_on"] = modified_on
__props__["name"] = name
__props__["priority"] = priority
__props__["proxiable"] = proxiable
__props__["proxied"] = proxied
__props__["ttl"] = ttl
__props__["type"] = type
__props__["value"] = value
__props__["zone_id"] = zone_id
return Record(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | sdk/python/pulumi_cloudflare/record.py |
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class Record(pulumi.CustomResource):
created_on: pulumi.Output[str]
"""
The RFC3339 timestamp of when the record was created
"""
data: pulumi.Output[dict]
"""
Map of attributes that constitute the record value. Primarily used for LOC and SRV record types. Either this or `value` must be specified
* `algorithm` (`float`)
* `altitude` (`float`)
* `certificate` (`str`)
* `content` (`str`)
* `digest` (`str`)
* `digest_type` (`float`)
* `fingerprint` (`str`)
* `flags` (`str`)
* `key_tag` (`float`)
* `lat_degrees` (`float`)
* `lat_direction` (`str`)
* `lat_minutes` (`float`)
* `lat_seconds` (`float`)
* `long_degrees` (`float`)
* `long_direction` (`str`)
* `long_minutes` (`float`)
* `long_seconds` (`float`)
* `matching_type` (`float`)
* `name` (`str`) - The name of the record
* `order` (`float`)
* `port` (`float`)
* `precision_horz` (`float`)
* `precision_vert` (`float`)
* `preference` (`float`)
* `priority` (`float`) - The priority of the record
* `proto` (`str`)
* `protocol` (`float`)
* `public_key` (`str`)
* `regex` (`str`)
* `replacement` (`str`)
* `selector` (`float`)
* `service` (`str`)
* `size` (`float`)
* `target` (`str`)
* `type` (`float`) - The type of the record
* `usage` (`float`)
* `weight` (`float`)
"""
hostname: pulumi.Output[str]
"""
The FQDN of the record
"""
metadata: pulumi.Output[dict]
"""
A key-value map of string metadata Cloudflare associates with the record
"""
modified_on: pulumi.Output[str]
"""
The RFC3339 timestamp of when the record was last modified
"""
name: pulumi.Output[str]
"""
The name of the record
"""
priority: pulumi.Output[float]
"""
The priority of the record
"""
proxiable: pulumi.Output[bool]
"""
Shows whether this record can be proxied, must be true if setting `proxied=true`
"""
proxied: pulumi.Output[bool]
"""
Whether the record gets Cloudflare's origin protection; defaults to `false`.
"""
ttl: pulumi.Output[float]
"""
The TTL of the record ([automatic: '1'](https://api.cloudflare.com/#dns-records-for-a-zone-create-dns-record))
"""
type: pulumi.Output[str]
"""
The type of the record
"""
value: pulumi.Output[str]
"""
The (string) value of the record. Either this or `data` must be specified
"""
zone_id: pulumi.Output[str]
"""
The DNS zone ID to add the record to
"""
def __init__(__self__, resource_name, opts=None, data=None, name=None, priority=None, proxied=None, ttl=None, type=None, value=None, zone_id=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Cloudflare record resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-cloudflare/blob/master/website/docs/r/record.html.markdown.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] data: Map of attributes that constitute the record value. Primarily used for LOC and SRV record types. Either this or `value` must be specified
:param pulumi.Input[str] name: The name of the record
:param pulumi.Input[float] priority: The priority of the record
:param pulumi.Input[bool] proxied: Whether the record gets Cloudflare's origin protection; defaults to `false`.
:param pulumi.Input[float] ttl: The TTL of the record ([automatic: '1'](https://api.cloudflare.com/#dns-records-for-a-zone-create-dns-record))
:param pulumi.Input[str] type: The type of the record
:param pulumi.Input[str] value: The (string) value of the record. Either this or `data` must be specified
:param pulumi.Input[str] zone_id: The DNS zone ID to add the record to
The **data** object supports the following:
* `algorithm` (`pulumi.Input[float]`)
* `altitude` (`pulumi.Input[float]`)
* `certificate` (`pulumi.Input[str]`)
* `content` (`pulumi.Input[str]`)
* `digest` (`pulumi.Input[str]`)
* `digest_type` (`pulumi.Input[float]`)
* `fingerprint` (`pulumi.Input[str]`)
* `flags` (`pulumi.Input[str]`)
* `key_tag` (`pulumi.Input[float]`)
* `lat_degrees` (`pulumi.Input[float]`)
* `lat_direction` (`pulumi.Input[str]`)
* `lat_minutes` (`pulumi.Input[float]`)
* `lat_seconds` (`pulumi.Input[float]`)
* `long_degrees` (`pulumi.Input[float]`)
* `long_direction` (`pulumi.Input[str]`)
* `long_minutes` (`pulumi.Input[float]`)
* `long_seconds` (`pulumi.Input[float]`)
* `matching_type` (`pulumi.Input[float]`)
* `name` (`pulumi.Input[str]`) - The name of the record
* `order` (`pulumi.Input[float]`)
* `port` (`pulumi.Input[float]`)
* `precision_horz` (`pulumi.Input[float]`)
* `precision_vert` (`pulumi.Input[float]`)
* `preference` (`pulumi.Input[float]`)
* `priority` (`pulumi.Input[float]`) - The priority of the record
* `proto` (`pulumi.Input[str]`)
* `protocol` (`pulumi.Input[float]`)
* `public_key` (`pulumi.Input[str]`)
* `regex` (`pulumi.Input[str]`)
* `replacement` (`pulumi.Input[str]`)
* `selector` (`pulumi.Input[float]`)
* `service` (`pulumi.Input[str]`)
* `size` (`pulumi.Input[float]`)
* `target` (`pulumi.Input[str]`)
* `type` (`pulumi.Input[float]`) - The type of the record
* `usage` (`pulumi.Input[float]`)
* `weight` (`pulumi.Input[float]`)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data'] = data
if name is None:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
__props__['priority'] = priority
__props__['proxied'] = proxied
__props__['ttl'] = ttl
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
__props__['value'] = value
if zone_id is None:
raise TypeError("Missing required property 'zone_id'")
__props__['zone_id'] = zone_id
__props__['created_on'] = None
__props__['hostname'] = None
__props__['metadata'] = None
__props__['modified_on'] = None
__props__['proxiable'] = None
super(Record, __self__).__init__(
'cloudflare:index/record:Record',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, created_on=None, data=None, hostname=None, metadata=None, modified_on=None, name=None, priority=None, proxiable=None, proxied=None, ttl=None, type=None, value=None, zone_id=None):
"""
Get an existing Record resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_on: The RFC3339 timestamp of when the record was created
:param pulumi.Input[dict] data: Map of attributes that constitute the record value. Primarily used for LOC and SRV record types. Either this or `value` must be specified
:param pulumi.Input[str] hostname: The FQDN of the record
:param pulumi.Input[dict] metadata: A key-value map of string metadata Cloudflare associates with the record
:param pulumi.Input[str] modified_on: The RFC3339 timestamp of when the record was last modified
:param pulumi.Input[str] name: The name of the record
:param pulumi.Input[float] priority: The priority of the record
:param pulumi.Input[bool] proxiable: Shows whether this record can be proxied, must be true if setting `proxied=true`
:param pulumi.Input[bool] proxied: Whether the record gets Cloudflare's origin protection; defaults to `false`.
:param pulumi.Input[float] ttl: The TTL of the record ([automatic: '1'](https://api.cloudflare.com/#dns-records-for-a-zone-create-dns-record))
:param pulumi.Input[str] type: The type of the record
:param pulumi.Input[str] value: The (string) value of the record. Either this or `data` must be specified
:param pulumi.Input[str] zone_id: The DNS zone ID to add the record to
The **data** object supports the following:
* `algorithm` (`pulumi.Input[float]`)
* `altitude` (`pulumi.Input[float]`)
* `certificate` (`pulumi.Input[str]`)
* `content` (`pulumi.Input[str]`)
* `digest` (`pulumi.Input[str]`)
* `digest_type` (`pulumi.Input[float]`)
* `fingerprint` (`pulumi.Input[str]`)
* `flags` (`pulumi.Input[str]`)
* `key_tag` (`pulumi.Input[float]`)
* `lat_degrees` (`pulumi.Input[float]`)
* `lat_direction` (`pulumi.Input[str]`)
* `lat_minutes` (`pulumi.Input[float]`)
* `lat_seconds` (`pulumi.Input[float]`)
* `long_degrees` (`pulumi.Input[float]`)
* `long_direction` (`pulumi.Input[str]`)
* `long_minutes` (`pulumi.Input[float]`)
* `long_seconds` (`pulumi.Input[float]`)
* `matching_type` (`pulumi.Input[float]`)
* `name` (`pulumi.Input[str]`) - The name of the record
* `order` (`pulumi.Input[float]`)
* `port` (`pulumi.Input[float]`)
* `precision_horz` (`pulumi.Input[float]`)
* `precision_vert` (`pulumi.Input[float]`)
* `preference` (`pulumi.Input[float]`)
* `priority` (`pulumi.Input[float]`) - The priority of the record
* `proto` (`pulumi.Input[str]`)
* `protocol` (`pulumi.Input[float]`)
* `public_key` (`pulumi.Input[str]`)
* `regex` (`pulumi.Input[str]`)
* `replacement` (`pulumi.Input[str]`)
* `selector` (`pulumi.Input[float]`)
* `service` (`pulumi.Input[str]`)
* `size` (`pulumi.Input[float]`)
* `target` (`pulumi.Input[str]`)
* `type` (`pulumi.Input[float]`) - The type of the record
* `usage` (`pulumi.Input[float]`)
* `weight` (`pulumi.Input[float]`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["created_on"] = created_on
__props__["data"] = data
__props__["hostname"] = hostname
__props__["metadata"] = metadata
__props__["modified_on"] = modified_on
__props__["name"] = name
__props__["priority"] = priority
__props__["proxiable"] = proxiable
__props__["proxied"] = proxied
__props__["ttl"] = ttl
__props__["type"] = type
__props__["value"] = value
__props__["zone_id"] = zone_id
return Record(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | 0.822581 | 0.347067 |
from __future__ import division
import sys
import pickle
import numpy as np
from scipy.stats import johnsonsu
import pandas as pd
import ete3
def name_internal_nodes(T):
# Name internal nodes of a tree
i = 1
for node in T.traverse():
if not node.is_leaf():
node.name = str(i) + "_"
i += 1
return None
def flatten(L):
""" Convert list of lists into flat list"""
return [item for sublist in L for item in sublist]
def unpack(X):
""" Unpack a comma separated list of values into a flat list """
return flatten([x.split(",") for x in list(X)])
def get_muts_counts_subtree(df_seqs, lineage_uid, T, subtree_parent, seq_string_uid_to_uid):
""" Get unique somatic mutations and occurrence of each for a subtree of a lineage """
leaf_names = [node.name for node in T.search_nodes(name=subtree_parent)[0].get_leaves()]
leaf_names = [x for x in leaf_names if x != "germline"] # remove germline
leaf_seq_string_uids = [int(x.split("~")[0]) for x in leaf_names] # get sequence string uids
leaf_uids = [seq_string_uid_to_uid[x] for x in leaf_seq_string_uids] # convert to sequence uids
df = df_seqs.loc[leaf_uids] # subset to only leaves in the subtree
N = df.shape[0] # number of sequences in subtree
df = df[df["mut_germline_positions"] != ""] # filter out sequences without germline mutations
df = df.loc[-pd.isnull(df["mut_germline_positions"])] # filter out sequences without germline mutations
# Count occurrences of each mutation
muts = zip(unpack(df.mut_germline_positions), unpack(df.mut_germline_before), unpack(df.mut_germline_after))
counts = {}
for mut in muts:
try:
counts[mut] += 1
except:
counts[mut] = 1
return counts, N
def calc_H(mut_counts, n):
# Calculate Fay and Wu's H based on counts of mutations
counts = pd.Series(mut_counts).value_counts()
theta_H = sum(2 * np.array(counts.index)**2 * counts) / (n * (n-1))
theta_pi = sum(2 * counts * np.array(counts.index) * (n - np.array(counts.index))) / (n * (n-1))
H = theta_pi - theta_H
return H
def find_nearest(L,value):
array = np.array(L)
idx = (np.abs(array-value)).argmin()
return array[idx]
def calc_pvalue_matchedSimulations(H_focal, N_focal, params, model=johnsonsu):
N = find_nearest(params.keys(), N_focal) # Find nearest N in ensemble
myParams = params[N]
p_low = model.cdf(H_focal, myParams[0], myParams[1], myParams[2], myParams[3]) # Calculate p of H under nearest N
p = p_low
return p
def calc_FayAndWusH_subtree(T, node_name, lineage_uid, df_seqs,
fit_params_kingman, fit_params_BSC,
seq_string_uid_to_uid):
# Calculate Fay and Wu's H and P values for H under neutral models for a subtree
# Get counts of derived mutations in subtree
mut_counts, N = get_muts_counts_subtree(df_seqs, lineage_uid, T, node_name, seq_string_uid_to_uid)
# Remove mutations at fixation within subtree
for mut, count in mut_counts.items():
if count == N:
mut_counts.pop(mut) # mutation is fixed in subtree, drop it
# Calculate Fay and Wu's H based on mutations within subtree
H = calc_H(mut_counts.values(), N)
# Calculate P values based on comparison with simulations of neutrality and strong selection
pvalue_kingman = calc_pvalue_matchedSimulations(H, N, fit_params_kingman, model=johnsonsu)
pvalue_BSC = calc_pvalue_matchedSimulations(H, N, fit_params_BSC, model=johnsonsu)
return H, pvalue_kingman, pvalue_BSC
def annotate_FayAndWusH(T, lineage_uid, df_seqs, fit_params_kingman, fit_params_BSC, seq_string_uid_to_uid):
""" Traverse tree, calculate Fay and Wu's H for each node, and calculate significance """
annotations = []
# Condition for stopping traversal
def stop(node):
if node.name == "germline" or node.name == "1_":
return False
if len(node) < 100:
# print "Stopping branch (too small)", node.name
return True
else:
return False
# Traverse tree and calculate Fay and Wu's H at each node
for node in T.traverse(is_leaf_fn=stop):
if node.is_leaf() or node.name == "1_": continue
H, pvalue_kingman, pvalue_BSC = calc_FayAndWusH_subtree(T, node.name, lineage_uid, df_seqs,
fit_params_kingman, fit_params_BSC,
seq_string_uid_to_uid)
myAnnotation = [node.name, len(node), node.dist, H, pvalue_kingman, pvalue_BSC]
annotations.append(myAnnotation)
return annotations
if __name__ == "__main__":
lineage_uid = int(sys.argv[1])
print "Lineage uid", lineage_uid
# Set paths to data
infile_basename = "/local10G/rfhorns/Bcell/flu_highres/figures/v5/data"
infile_df_seqs = infile_basename+"/FindSubclonesSelected.df_seqs_raw.csv"
infile_seq_string_uid_to_uid = infile_basename+"/FindSubclonesSelected.seq_string_uid_to_uid.pickle"
infile_fit_params_kingman = infile_basename+"/fit_params_kingman.pickle"
infile_fit_params_BSC = infile_basename+"/fit_params_BSC.pickle"
# Load sequence data
df_seqs = pd.read_csv(infile_df_seqs, index_col=0, header=0)
seq_string_uid_to_uid = pickle.load(open(infile_seq_string_uid_to_uid))
# Load parameters of fits to null distributions
fit_params_kingman = pickle.load(open(infile_fit_params_kingman))
fit_params_BSC = pickle.load(open(infile_fit_params_BSC))
# Set name of tree file
infile_tree = "/local10G/rfhorns/Bcell/flu_highres/trees/" + str(lineage_uid) + "/fasttree.rep.nwk"
print "infile_tree", infile_tree
# Set name of output file
outfile = "/local10G/rfhorns/Bcell/flu_highres/trees/" + str(lineage_uid) + "/annotation_FayAndWusH.csv"
print "outfile", outfile
# Load tree
T = ete3.Tree(infile_tree, format=1)
print "Leaves", len(T)
# Set names of internal nodes (e.g., 1_)
name_internal_nodes(T)
# Annotate nodes with Fay and Wu's H and P value
annotations = annotate_FayAndWusH(T, lineage_uid, df_seqs,
fit_params_kingman, fit_params_BSC,
seq_string_uid_to_uid)
# Convert output to dataframe
cols = ["name", "N_leaves", "dist", "H", "pvalue_kingman", "pvalue_BSC"]
df_annotations = pd.DataFrame(columns=cols)
for i in range(len(annotations)):
df_annotations.loc[i] = annotations[i]
df_annotations.set_index("name", inplace=True)
# Write output to file
df_annotations.to_csv(outfile)
print "Done!!" | scripts/subclones/annotate_nodes_FayAndWusH.py | from __future__ import division
import sys
import pickle
import numpy as np
from scipy.stats import johnsonsu
import pandas as pd
import ete3
def name_internal_nodes(T):
# Name internal nodes of a tree
i = 1
for node in T.traverse():
if not node.is_leaf():
node.name = str(i) + "_"
i += 1
return None
def flatten(L):
""" Convert list of lists into flat list"""
return [item for sublist in L for item in sublist]
def unpack(X):
""" Unpack a comma separated list of values into a flat list """
return flatten([x.split(",") for x in list(X)])
def get_muts_counts_subtree(df_seqs, lineage_uid, T, subtree_parent, seq_string_uid_to_uid):
""" Get unique somatic mutations and occurrence of each for a subtree of a lineage """
leaf_names = [node.name for node in T.search_nodes(name=subtree_parent)[0].get_leaves()]
leaf_names = [x for x in leaf_names if x != "germline"] # remove germline
leaf_seq_string_uids = [int(x.split("~")[0]) for x in leaf_names] # get sequence string uids
leaf_uids = [seq_string_uid_to_uid[x] for x in leaf_seq_string_uids] # convert to sequence uids
df = df_seqs.loc[leaf_uids] # subset to only leaves in the subtree
N = df.shape[0] # number of sequences in subtree
df = df[df["mut_germline_positions"] != ""] # filter out sequences without germline mutations
df = df.loc[-pd.isnull(df["mut_germline_positions"])] # filter out sequences without germline mutations
# Count occurrences of each mutation
muts = zip(unpack(df.mut_germline_positions), unpack(df.mut_germline_before), unpack(df.mut_germline_after))
counts = {}
for mut in muts:
try:
counts[mut] += 1
except:
counts[mut] = 1
return counts, N
def calc_H(mut_counts, n):
# Calculate Fay and Wu's H based on counts of mutations
counts = pd.Series(mut_counts).value_counts()
theta_H = sum(2 * np.array(counts.index)**2 * counts) / (n * (n-1))
theta_pi = sum(2 * counts * np.array(counts.index) * (n - np.array(counts.index))) / (n * (n-1))
H = theta_pi - theta_H
return H
def find_nearest(L,value):
array = np.array(L)
idx = (np.abs(array-value)).argmin()
return array[idx]
def calc_pvalue_matchedSimulations(H_focal, N_focal, params, model=johnsonsu):
N = find_nearest(params.keys(), N_focal) # Find nearest N in ensemble
myParams = params[N]
p_low = model.cdf(H_focal, myParams[0], myParams[1], myParams[2], myParams[3]) # Calculate p of H under nearest N
p = p_low
return p
def calc_FayAndWusH_subtree(T, node_name, lineage_uid, df_seqs,
fit_params_kingman, fit_params_BSC,
seq_string_uid_to_uid):
# Calculate Fay and Wu's H and P values for H under neutral models for a subtree
# Get counts of derived mutations in subtree
mut_counts, N = get_muts_counts_subtree(df_seqs, lineage_uid, T, node_name, seq_string_uid_to_uid)
# Remove mutations at fixation within subtree
for mut, count in mut_counts.items():
if count == N:
mut_counts.pop(mut) # mutation is fixed in subtree, drop it
# Calculate Fay and Wu's H based on mutations within subtree
H = calc_H(mut_counts.values(), N)
# Calculate P values based on comparison with simulations of neutrality and strong selection
pvalue_kingman = calc_pvalue_matchedSimulations(H, N, fit_params_kingman, model=johnsonsu)
pvalue_BSC = calc_pvalue_matchedSimulations(H, N, fit_params_BSC, model=johnsonsu)
return H, pvalue_kingman, pvalue_BSC
def annotate_FayAndWusH(T, lineage_uid, df_seqs, fit_params_kingman, fit_params_BSC, seq_string_uid_to_uid):
""" Traverse tree, calculate Fay and Wu's H for each node, and calculate significance """
annotations = []
# Condition for stopping traversal
def stop(node):
if node.name == "germline" or node.name == "1_":
return False
if len(node) < 100:
# print "Stopping branch (too small)", node.name
return True
else:
return False
# Traverse tree and calculate Fay and Wu's H at each node
for node in T.traverse(is_leaf_fn=stop):
if node.is_leaf() or node.name == "1_": continue
H, pvalue_kingman, pvalue_BSC = calc_FayAndWusH_subtree(T, node.name, lineage_uid, df_seqs,
fit_params_kingman, fit_params_BSC,
seq_string_uid_to_uid)
myAnnotation = [node.name, len(node), node.dist, H, pvalue_kingman, pvalue_BSC]
annotations.append(myAnnotation)
return annotations
if __name__ == "__main__":
lineage_uid = int(sys.argv[1])
print "Lineage uid", lineage_uid
# Set paths to data
infile_basename = "/local10G/rfhorns/Bcell/flu_highres/figures/v5/data"
infile_df_seqs = infile_basename+"/FindSubclonesSelected.df_seqs_raw.csv"
infile_seq_string_uid_to_uid = infile_basename+"/FindSubclonesSelected.seq_string_uid_to_uid.pickle"
infile_fit_params_kingman = infile_basename+"/fit_params_kingman.pickle"
infile_fit_params_BSC = infile_basename+"/fit_params_BSC.pickle"
# Load sequence data
df_seqs = pd.read_csv(infile_df_seqs, index_col=0, header=0)
seq_string_uid_to_uid = pickle.load(open(infile_seq_string_uid_to_uid))
# Load parameters of fits to null distributions
fit_params_kingman = pickle.load(open(infile_fit_params_kingman))
fit_params_BSC = pickle.load(open(infile_fit_params_BSC))
# Set name of tree file
infile_tree = "/local10G/rfhorns/Bcell/flu_highres/trees/" + str(lineage_uid) + "/fasttree.rep.nwk"
print "infile_tree", infile_tree
# Set name of output file
outfile = "/local10G/rfhorns/Bcell/flu_highres/trees/" + str(lineage_uid) + "/annotation_FayAndWusH.csv"
print "outfile", outfile
# Load tree
T = ete3.Tree(infile_tree, format=1)
print "Leaves", len(T)
# Set names of internal nodes (e.g., 1_)
name_internal_nodes(T)
# Annotate nodes with Fay and Wu's H and P value
annotations = annotate_FayAndWusH(T, lineage_uid, df_seqs,
fit_params_kingman, fit_params_BSC,
seq_string_uid_to_uid)
# Convert output to dataframe
cols = ["name", "N_leaves", "dist", "H", "pvalue_kingman", "pvalue_BSC"]
df_annotations = pd.DataFrame(columns=cols)
for i in range(len(annotations)):
df_annotations.loc[i] = annotations[i]
df_annotations.set_index("name", inplace=True)
# Write output to file
df_annotations.to_csv(outfile)
print "Done!!" | 0.455683 | 0.421492 |
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.cloudstackAPI import (updateStoragePool,
resizeVolume,
listCapacity,
addCluster)
from marvin.sshClient import SshClient
from marvin.lib.common import (get_zone,
get_template,
get_domain,
list_volumes,
get_pod,
is_config_suitable)
from marvin.lib.base import (Domain,
Account,
Template,
VirtualMachine,
Volume,
DiskOffering,
StoragePool,
ServiceOffering,
Configurations)
from marvin.lib.utils import cleanup_resources
from nose.plugins.attrib import attr
import time
class Test42xBugsMgmtSvr(cloudstackTestCase):
@classmethod
def setUpClass(cls):
try:
cls._cleanup = []
cls.testClient = super(Test42xBugsMgmtSvr, cls).getClsTestClient()
cls.apiClient = cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
# Get Domain, Zone, Template
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client,
cls.testClient.getZoneForTests())
cls.pod = get_pod(cls.apiClient, zone_id=cls.zone.id)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services['mode'] = cls.zone.networktype
cls.services["hypervisor"] = cls.testClient.getHypervisorInfo()
# Creating Disk offering, Service Offering and Account
cls.service_offering = ServiceOffering.create(
cls.apiClient,
cls.services["service_offerings"]["tiny"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
# Create account
cls.account_2 = Account.create(
cls.api_client,
cls.services["account2"],
domainid=cls.domain.id
)
# Getting authentication for user in newly created Account
cls.user = cls.account.user[0]
cls.userapiclient = cls.testClient.getUserApiClient(
cls.user.username,
cls.domain.name
)
# add objects created in setUpCls to the _cleanup list
cls._cleanup = [cls.account,
cls.account_2,
cls.service_offering]
except Exception as e:
cls.tearDownClass()
raise Exception("Warning: Exception in setup : %s" % e)
return
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.cleanup = []
def tearDown(self):
# Clean up, terminate the created resources
cleanup_resources(self.apiClient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "basic", "tested"])
@attr(required_hardware="false")
@attr(configuration='apply.allocation.algorithm.to.pods')
def test_es_1223_apply_algo_to_pods(self):
"""
@Desc: Test VM creation while "apply.allocation.algorithm.to.pods" is
set to true
@Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-4947
@Steps:
Step1: Set global configuration "apply.allocation.algorithm.to.pods"
to true
Step2: Restart management server
Step3: Verifying that VM creation is successful
"""
# Step1: set global configuration
# "apply.allocation.algorithm.to.pods" to true
# Configurations.update(self.apiClient,
# "apply.allocation.algorithm.to.pods", "true")
# TODO: restart management server
if not is_config_suitable(apiclient=self.apiClient,
name='apply.allocation.algorithm.to.pods',
value='true'):
self.skipTest('apply.allocation.algorithm.to.pods '
'should be true. skipping')
# TODO:Step2: Restart management server
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
# Step3: Verifying that VM creation is successful
virtual_machine = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine2"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
self.cleanup.append(virtual_machine)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Running',
"Check VM state is Running or not"
)
# cleanup: set global configuration
# "apply.allocation.algorithm.to.pods" back to false
Configurations.update(
self.apiClient,
name="apply.allocation.algorithm.to.pods",
value="false"
)
# TODO:cleanup: Restart management server
return
@attr(tags=["advanced", "basic", "tested"])
@attr(required_hardware="false")
def test_local_storage_data_disk_tag(self):
"""
@Desc: Test whether tags are honoured while creating
data disks on local storage
@Steps:
This test needs multiple local storages
Step1: create a tag 'loc' on the local storage
Step2: create a disk offering with this storage tag 'loc'
Step3: create a VM and create disk by selecting the disk offering
created in step2
step4: check whether the data disk created in step3 is created on
local storage with tag 'loc'
"""
if not self.zone.localstorageenabled:
self.skipTest('Local storage is not enable for this '
'zone. skipping')
local_storages = StoragePool.list(self.apiClient,
zoneid=self.zone.id,
scope='HOST')
self.assertEqual(
isinstance(local_storages, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
local_storages,
None,
"Check if local storage pools exists in ListStoragePools"
)
cmd = updateStoragePool.updateStoragePoolCmd()
cmd.zoneid = self.zone.id
cmd.tags = 'loc'
cmd.id = local_storages[0].id
self.apiClient.updateStoragePool(cmd)
self.services["disk_offering"]["storagetype"] = 'local'
self.services["disk_offering"]["tags"] = 'loc'
disk_offering = DiskOffering.create(
self.apiClient,
self.services["disk_offering"]
)
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
# Step3: Verifying that VM creation is successful
virtual_machine = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.services["mode"]
)
self.cleanup.append(virtual_machine)
self.cleanup.append(disk_offering)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Running',
"Check VM state is Running or not"
)
self.volume = Volume.create(
self.apiClient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=disk_offering.id
)
virtual_machine.attach_volume(self.apiClient, self.volume)
self.attached = True
list_volume_response = Volume.list(
self.apiClient,
id=self.volume.id
)
self.assertEqual(
isinstance(list_volume_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_volume_response,
None,
"Check if volume exists in ListVolumes"
)
volume = list_volume_response[0]
self.assertNotEqual(
volume.virtualmachineid,
None,
"Check if volume state (attached) is reflected"
)
storage_pool = StoragePool.list(self.apiClient, id=volume.storageid)
self.assertEqual(
volume.storagetype,
'local',
"Check list storage pool response has local as storage type"
)
self.assertEqual(
storage_pool[0].tags,
'loc',
"Check list storage pool response has tag"
)
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
def test_es_1236_cloudstack_sccs(self):
"""
@Desc: Test whether cloudstack-sccs is available on management server
@Steps:
Step1: run cloudstack-sccs on management server
Step2: It should return a commit hash
"""
# Step1: run cloudstack-sccs on management server
mgmt_ssh = SshClient(
self.apiClient.connection.mgtSvr,
22,
self.apiClient.connection.user,
self.apiClient.connection.passwd
)
mgmt_ssh.execute("cloudstack-sccs")
# Step2: It should return a commit hash
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
@attr(storage="s3")
def test_es_1863_register_template_s3_domain_admin_user(self):
"""
@Desc: Test whether cloudstack allows Domain admin or user
to register a template using S3/Swift object store.
@Steps:
Step1: create a Domain and users in it.
Step2: Register a template as Domain admin.
Step3: Register a template as Domain user.
Step4: Template should be registered successfully.
"""
# Step1: create a Domain and users in it.
self.newdomain = Domain.create(self.apiClient,
self.services["domain"])
# create account in the domain
self.account_domain = Account.create(
self.apiClient,
self.services["account"],
domainid=self.newdomain.id
)
self.cleanup.append(self.account_domain)
self.cleanup.append(self.newdomain)
# Getting authentication for user in newly created Account in domain
self.domain_user = self.account_domain.user[0]
self.domain_userapiclient = self.testClient.getUserApiClient(
self.domain_user.username, self.newdomain.name
)
# Step2: Register a template as Domain admin.
self.services["templateregister"]["ostype"] = self.services["ostype"]
self.domain_template = Template.register(
self.apiClient,
self.services["templateregister"],
zoneid=self.zone.id,
account=self.account_domain.name,
domainid=self.newdomain.id,
hypervisor=self.hypervisor
)
# Wait for template to download
self.domain_template.download(self.api_client)
# Wait for template status to be changed across
time.sleep(60)
# Step3: Register a template as Domain user.
self.domain_user_template = Template.register(
self.domain_userapiclient,
self.services["templateregister"],
zoneid=self.zone.id,
account=self.account_domain.name,
domainid=self.newdomain.id,
hypervisor=self.hypervisor
)
# Wait for template to download
self.domain_user_template.download(self.api_client)
# Wait for template status to be changed across
time.sleep(60)
# TODO: Step4: Template should be registered successfully.
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="true")
@attr(hypervisor="KVM")
def test_CLOUDSTACK_6181_stoppedvm_root_resize(self):
"""
@Desc: Test root volume resize of stopped VM
@Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-6181
@Steps:
Step1: Deploy VM in stopped state (startvm=false),
resize via 'resizeVolume', start VM. Root is new size.
"""
# Check whether usage server is running or not
if self.hypervisor.lower() != 'kvm':
self.skipTest("Test can be run only on KVM hypervisor")
# deploy virtual machine in stopped state
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
# Step3: Verifying that VM creation is successful
virtual_machine = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
self.cleanup.append(virtual_machine)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Stopped or not"
)
volumes = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
self.assertIsNotNone(volumes, "root volume is not returned properly")
newrootsize = (self.template.size >> 30) + 2
cmd = resizeVolume.resizeVolumeCmd()
cmd.id = volumes[0].id
cmd.size = newrootsize
self.apiClient.resizeVolume(cmd)
virtual_machine.start(self.apiClient)
volumes_after_resize = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
rootvolume = volumes_after_resize[0]
success = False
if rootvolume is not None and rootvolume.size == (newrootsize << 30):
success = True
self.assertEqual(
success,
True,
"Check if the root volume resized appropriately"
)
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="true")
@attr(hypervisor="KVM")
def test_CLOUDSTACK_6181_vm_root_resize(self):
"""
@Desc: Test root volume resize of running VM
@Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-6181
@Steps:
Step1: Deploy VM, resize root volume via 'resizeVolume'.
"""
# Check whether usage server is running or not
if self.hypervisor.lower() != 'kvm':
self.skipTest("Test can be run only on KVM hypervisor")
# deploy virtual machine in stopped state
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
# Step3: Verifying that VM creation is successful
virtual_machine = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
self.cleanup.append(virtual_machine)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Running',
"Check VM state is Running or not"
)
volumes = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
self.assertIsNotNone(volumes, "root volume is not returned properly")
newrootsize = (self.template.size >> 30) + 2
cmd = resizeVolume.resizeVolumeCmd()
cmd.id = volumes[0].id
cmd.size = newrootsize
self.apiClient.resizeVolume(cmd)
volumes_after_resize = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
rootvolume = volumes_after_resize[0]
success = False
if rootvolume is not None and rootvolume.size == (newrootsize << 30):
success = True
self.assertEqual(
success,
True,
"Check if the root volume resized appropriately"
)
return
@unittest.skip('In progress')
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
def test_CLOUDSTACK_5023(self):
"""
@Desc: Test whether we are able to delete PF rule while
rabbit mq is collecting usage events.
@Steps:
step1. Run Usage server
step2. Delete a PF rule and check whether it is
successful and usage event is generated
Configure RabbitMQ for usage event generation
"""
# TBA
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
@attr(configuration='apply.allocation.algorithm.to.pods')
def test_es_47_list_os_types_win_2012(self):
"""
@Desc: Test VM creation while "apply.allocation.algorithm.to.pods"
is set to true
@Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-4947
@Steps:
Step1: register windows 2012 VM template as windows 8 template
Step2: deploy a VM with windows2012 template and Verify
that VM creation is successful
"""
if not is_config_suitable(apiclient=self.apiClient,
name='apply.allocation.algorithm.to.pods',
value='true'):
self.skipTest('apply.allocation.algorithm.to.pods '
'should be true. skipping')
# register windows 2012 VM template as windows 8 template
self.hypervisor = self.testClient.getHypervisorInfo()
if self.hypervisor.lower() in ['lxc']:
self.skipTest(
"windows VM is not supported on %s" %
self.hypervisor.lower())
self.win2012_template = Template.register(
self.apiClient,
self.services["win2012template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.domain.id,
hypervisor=self.hypervisor
)
# Wait for template to download
self.win2012_template.download(self.apiClient)
self.cleanup.append(self.win2012_template)
# Wait for template status to be changed across
time.sleep(60)
# Deploy
self.debug("Deploying win 2012 VM in account: %s" % self.account.name)
self.services["virtual_machine"]["displayname"] = "win2012"
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.win2012_template.id
vm1 = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(vm1)
# Verify VM state
self.assertEqual(
vm1.state,
'Running',
"Check VM state is Running or not"
)
return
@attr(tags=["advanced", "basic", "test"])
@attr(required_hardware="true")
def test_secondary_storage_stats(self):
"""
@Desc: Dashboard is not showing correct secondary
storage statistics
@Steps:
Step1: listCapacity api should show correct secondary
storage statistics
"""
cmd = listCapacity.listCapacityCmd()
cmd.type = 6
cmd.zoneid = self.zone.id
response = self.apiClient.listCapacity(cmd)
self.assertEqual(
isinstance(response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
response,
None,
"Check if listCapacity has returned properly"
)
self.assertNotEqual(
response[0].capacitytotal,
0,
"check the total capacity of secondary storage returned"
)
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
def test_multiple_mgmt_srvr_session_timeout(self):
"""
@Desc: Check whether mgmt server session times out with in 30s
@Steps:
Step1: run 'telnet localhot 8250' on the management server
and see that it times out with in 30seconds
"""
# Step1: run cloudstack-sccs on management server
mgmt_ssh = SshClient(
self.apiClient.connection.mgtSvr,
22,
self.apiClient.connection.user,
self.apiClient.connection.passwd
)
mgmt_ssh.execute("time telnet localhost 8250")
# Step2: It should return a commit hash
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="true")
def test_add_cluster_datacenter_spaces(self):
"""
@Desc: Add VmWare cluster to the CS with the data center
name contains space in between
@Steps:
Step1: Add VmWare cluster to the CS with the data center
name contains space in between.
"""
if self.hypervisor.lower() != 'vmware':
self.skipTest('Can be run only on vmware zone. skipping')
cmd = addCluster.addClusterCmd()
cmd.zoneid = self.zone.id
cmd.hypervisor = self.hypervisor
cmd.clustertype = self.services["configurableData"][
"vmware_cluster"]["clustertype"]
cmd.podid = self.pod.id
cmd.username = self.services["configurableData"][
"vmware_cluster"]["username"]
cmd.password = self.services["configurableData"][
"vmware_cluster"]["password"]
cmd.publicswitchtype = 'vmwaredvs'
cmd.guestswitchtype = 'vmwaredvs'
cmd.url = self.services["configurableData"]["vmware_cluster"]["url"]
cmd.clustername = self.services[
"configurableData"]["vmware_cluster"]["url"]
self.apiClient.addCluster(cmd)
return | test/integration/component/maint/test_bugs.py |
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.cloudstackAPI import (updateStoragePool,
resizeVolume,
listCapacity,
addCluster)
from marvin.sshClient import SshClient
from marvin.lib.common import (get_zone,
get_template,
get_domain,
list_volumes,
get_pod,
is_config_suitable)
from marvin.lib.base import (Domain,
Account,
Template,
VirtualMachine,
Volume,
DiskOffering,
StoragePool,
ServiceOffering,
Configurations)
from marvin.lib.utils import cleanup_resources
from nose.plugins.attrib import attr
import time
class Test42xBugsMgmtSvr(cloudstackTestCase):
@classmethod
def setUpClass(cls):
try:
cls._cleanup = []
cls.testClient = super(Test42xBugsMgmtSvr, cls).getClsTestClient()
cls.apiClient = cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
# Get Domain, Zone, Template
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client,
cls.testClient.getZoneForTests())
cls.pod = get_pod(cls.apiClient, zone_id=cls.zone.id)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services['mode'] = cls.zone.networktype
cls.services["hypervisor"] = cls.testClient.getHypervisorInfo()
# Creating Disk offering, Service Offering and Account
cls.service_offering = ServiceOffering.create(
cls.apiClient,
cls.services["service_offerings"]["tiny"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
# Create account
cls.account_2 = Account.create(
cls.api_client,
cls.services["account2"],
domainid=cls.domain.id
)
# Getting authentication for user in newly created Account
cls.user = cls.account.user[0]
cls.userapiclient = cls.testClient.getUserApiClient(
cls.user.username,
cls.domain.name
)
# add objects created in setUpCls to the _cleanup list
cls._cleanup = [cls.account,
cls.account_2,
cls.service_offering]
except Exception as e:
cls.tearDownClass()
raise Exception("Warning: Exception in setup : %s" % e)
return
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.cleanup = []
def tearDown(self):
# Clean up, terminate the created resources
cleanup_resources(self.apiClient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "basic", "tested"])
@attr(required_hardware="false")
@attr(configuration='apply.allocation.algorithm.to.pods')
def test_es_1223_apply_algo_to_pods(self):
"""
@Desc: Test VM creation while "apply.allocation.algorithm.to.pods" is
set to true
@Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-4947
@Steps:
Step1: Set global configuration "apply.allocation.algorithm.to.pods"
to true
Step2: Restart management server
Step3: Verifying that VM creation is successful
"""
# Step1: set global configuration
# "apply.allocation.algorithm.to.pods" to true
# Configurations.update(self.apiClient,
# "apply.allocation.algorithm.to.pods", "true")
# TODO: restart management server
if not is_config_suitable(apiclient=self.apiClient,
name='apply.allocation.algorithm.to.pods',
value='true'):
self.skipTest('apply.allocation.algorithm.to.pods '
'should be true. skipping')
# TODO:Step2: Restart management server
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
# Step3: Verifying that VM creation is successful
virtual_machine = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine2"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
self.cleanup.append(virtual_machine)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Running',
"Check VM state is Running or not"
)
# cleanup: set global configuration
# "apply.allocation.algorithm.to.pods" back to false
Configurations.update(
self.apiClient,
name="apply.allocation.algorithm.to.pods",
value="false"
)
# TODO:cleanup: Restart management server
return
@attr(tags=["advanced", "basic", "tested"])
@attr(required_hardware="false")
def test_local_storage_data_disk_tag(self):
"""
@Desc: Test whether tags are honoured while creating
data disks on local storage
@Steps:
This test needs multiple local storages
Step1: create a tag 'loc' on the local storage
Step2: create a disk offering with this storage tag 'loc'
Step3: create a VM and create disk by selecting the disk offering
created in step2
step4: check whether the data disk created in step3 is created on
local storage with tag 'loc'
"""
if not self.zone.localstorageenabled:
self.skipTest('Local storage is not enable for this '
'zone. skipping')
local_storages = StoragePool.list(self.apiClient,
zoneid=self.zone.id,
scope='HOST')
self.assertEqual(
isinstance(local_storages, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
local_storages,
None,
"Check if local storage pools exists in ListStoragePools"
)
cmd = updateStoragePool.updateStoragePoolCmd()
cmd.zoneid = self.zone.id
cmd.tags = 'loc'
cmd.id = local_storages[0].id
self.apiClient.updateStoragePool(cmd)
self.services["disk_offering"]["storagetype"] = 'local'
self.services["disk_offering"]["tags"] = 'loc'
disk_offering = DiskOffering.create(
self.apiClient,
self.services["disk_offering"]
)
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
# Step3: Verifying that VM creation is successful
virtual_machine = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.services["mode"]
)
self.cleanup.append(virtual_machine)
self.cleanup.append(disk_offering)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Running',
"Check VM state is Running or not"
)
self.volume = Volume.create(
self.apiClient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=disk_offering.id
)
virtual_machine.attach_volume(self.apiClient, self.volume)
self.attached = True
list_volume_response = Volume.list(
self.apiClient,
id=self.volume.id
)
self.assertEqual(
isinstance(list_volume_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_volume_response,
None,
"Check if volume exists in ListVolumes"
)
volume = list_volume_response[0]
self.assertNotEqual(
volume.virtualmachineid,
None,
"Check if volume state (attached) is reflected"
)
storage_pool = StoragePool.list(self.apiClient, id=volume.storageid)
self.assertEqual(
volume.storagetype,
'local',
"Check list storage pool response has local as storage type"
)
self.assertEqual(
storage_pool[0].tags,
'loc',
"Check list storage pool response has tag"
)
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
def test_es_1236_cloudstack_sccs(self):
"""
@Desc: Test whether cloudstack-sccs is available on management server
@Steps:
Step1: run cloudstack-sccs on management server
Step2: It should return a commit hash
"""
# Step1: run cloudstack-sccs on management server
mgmt_ssh = SshClient(
self.apiClient.connection.mgtSvr,
22,
self.apiClient.connection.user,
self.apiClient.connection.passwd
)
mgmt_ssh.execute("cloudstack-sccs")
# Step2: It should return a commit hash
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
@attr(storage="s3")
def test_es_1863_register_template_s3_domain_admin_user(self):
"""
@Desc: Test whether cloudstack allows Domain admin or user
to register a template using S3/Swift object store.
@Steps:
Step1: create a Domain and users in it.
Step2: Register a template as Domain admin.
Step3: Register a template as Domain user.
Step4: Template should be registered successfully.
"""
# Step1: create a Domain and users in it.
self.newdomain = Domain.create(self.apiClient,
self.services["domain"])
# create account in the domain
self.account_domain = Account.create(
self.apiClient,
self.services["account"],
domainid=self.newdomain.id
)
self.cleanup.append(self.account_domain)
self.cleanup.append(self.newdomain)
# Getting authentication for user in newly created Account in domain
self.domain_user = self.account_domain.user[0]
self.domain_userapiclient = self.testClient.getUserApiClient(
self.domain_user.username, self.newdomain.name
)
# Step2: Register a template as Domain admin.
self.services["templateregister"]["ostype"] = self.services["ostype"]
self.domain_template = Template.register(
self.apiClient,
self.services["templateregister"],
zoneid=self.zone.id,
account=self.account_domain.name,
domainid=self.newdomain.id,
hypervisor=self.hypervisor
)
# Wait for template to download
self.domain_template.download(self.api_client)
# Wait for template status to be changed across
time.sleep(60)
# Step3: Register a template as Domain user.
self.domain_user_template = Template.register(
self.domain_userapiclient,
self.services["templateregister"],
zoneid=self.zone.id,
account=self.account_domain.name,
domainid=self.newdomain.id,
hypervisor=self.hypervisor
)
# Wait for template to download
self.domain_user_template.download(self.api_client)
# Wait for template status to be changed across
time.sleep(60)
# TODO: Step4: Template should be registered successfully.
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="true")
@attr(hypervisor="KVM")
def test_CLOUDSTACK_6181_stoppedvm_root_resize(self):
"""
@Desc: Test root volume resize of stopped VM
@Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-6181
@Steps:
Step1: Deploy VM in stopped state (startvm=false),
resize via 'resizeVolume', start VM. Root is new size.
"""
# Check whether usage server is running or not
if self.hypervisor.lower() != 'kvm':
self.skipTest("Test can be run only on KVM hypervisor")
# deploy virtual machine in stopped state
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
# Step3: Verifying that VM creation is successful
virtual_machine = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
self.cleanup.append(virtual_machine)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Stopped or not"
)
volumes = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
self.assertIsNotNone(volumes, "root volume is not returned properly")
newrootsize = (self.template.size >> 30) + 2
cmd = resizeVolume.resizeVolumeCmd()
cmd.id = volumes[0].id
cmd.size = newrootsize
self.apiClient.resizeVolume(cmd)
virtual_machine.start(self.apiClient)
volumes_after_resize = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
rootvolume = volumes_after_resize[0]
success = False
if rootvolume is not None and rootvolume.size == (newrootsize << 30):
success = True
self.assertEqual(
success,
True,
"Check if the root volume resized appropriately"
)
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="true")
@attr(hypervisor="KVM")
def test_CLOUDSTACK_6181_vm_root_resize(self):
"""
@Desc: Test root volume resize of running VM
@Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-6181
@Steps:
Step1: Deploy VM, resize root volume via 'resizeVolume'.
"""
# Check whether usage server is running or not
if self.hypervisor.lower() != 'kvm':
self.skipTest("Test can be run only on KVM hypervisor")
# deploy virtual machine in stopped state
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
# Step3: Verifying that VM creation is successful
virtual_machine = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
self.cleanup.append(virtual_machine)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Running',
"Check VM state is Running or not"
)
volumes = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
self.assertIsNotNone(volumes, "root volume is not returned properly")
newrootsize = (self.template.size >> 30) + 2
cmd = resizeVolume.resizeVolumeCmd()
cmd.id = volumes[0].id
cmd.size = newrootsize
self.apiClient.resizeVolume(cmd)
volumes_after_resize = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
rootvolume = volumes_after_resize[0]
success = False
if rootvolume is not None and rootvolume.size == (newrootsize << 30):
success = True
self.assertEqual(
success,
True,
"Check if the root volume resized appropriately"
)
return
@unittest.skip('In progress')
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
def test_CLOUDSTACK_5023(self):
"""
@Desc: Test whether we are able to delete PF rule while
rabbit mq is collecting usage events.
@Steps:
step1. Run Usage server
step2. Delete a PF rule and check whether it is
successful and usage event is generated
Configure RabbitMQ for usage event generation
"""
# TBA
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
@attr(configuration='apply.allocation.algorithm.to.pods')
def test_es_47_list_os_types_win_2012(self):
"""
@Desc: Test VM creation while "apply.allocation.algorithm.to.pods"
is set to true
@Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-4947
@Steps:
Step1: register windows 2012 VM template as windows 8 template
Step2: deploy a VM with windows2012 template and Verify
that VM creation is successful
"""
if not is_config_suitable(apiclient=self.apiClient,
name='apply.allocation.algorithm.to.pods',
value='true'):
self.skipTest('apply.allocation.algorithm.to.pods '
'should be true. skipping')
# register windows 2012 VM template as windows 8 template
self.hypervisor = self.testClient.getHypervisorInfo()
if self.hypervisor.lower() in ['lxc']:
self.skipTest(
"windows VM is not supported on %s" %
self.hypervisor.lower())
self.win2012_template = Template.register(
self.apiClient,
self.services["win2012template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.domain.id,
hypervisor=self.hypervisor
)
# Wait for template to download
self.win2012_template.download(self.apiClient)
self.cleanup.append(self.win2012_template)
# Wait for template status to be changed across
time.sleep(60)
# Deploy
self.debug("Deploying win 2012 VM in account: %s" % self.account.name)
self.services["virtual_machine"]["displayname"] = "win2012"
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.win2012_template.id
vm1 = VirtualMachine.create(
self.apiClient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(vm1)
# Verify VM state
self.assertEqual(
vm1.state,
'Running',
"Check VM state is Running or not"
)
return
@attr(tags=["advanced", "basic", "test"])
@attr(required_hardware="true")
def test_secondary_storage_stats(self):
"""
@Desc: Dashboard is not showing correct secondary
storage statistics
@Steps:
Step1: listCapacity api should show correct secondary
storage statistics
"""
cmd = listCapacity.listCapacityCmd()
cmd.type = 6
cmd.zoneid = self.zone.id
response = self.apiClient.listCapacity(cmd)
self.assertEqual(
isinstance(response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
response,
None,
"Check if listCapacity has returned properly"
)
self.assertNotEqual(
response[0].capacitytotal,
0,
"check the total capacity of secondary storage returned"
)
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="false")
def test_multiple_mgmt_srvr_session_timeout(self):
"""
@Desc: Check whether mgmt server session times out with in 30s
@Steps:
Step1: run 'telnet localhot 8250' on the management server
and see that it times out with in 30seconds
"""
# Step1: run cloudstack-sccs on management server
mgmt_ssh = SshClient(
self.apiClient.connection.mgtSvr,
22,
self.apiClient.connection.user,
self.apiClient.connection.passwd
)
mgmt_ssh.execute("time telnet localhost 8250")
# Step2: It should return a commit hash
return
@attr(tags=["advanced", "basic"])
@attr(required_hardware="true")
def test_add_cluster_datacenter_spaces(self):
"""
@Desc: Add VmWare cluster to the CS with the data center
name contains space in between
@Steps:
Step1: Add VmWare cluster to the CS with the data center
name contains space in between.
"""
if self.hypervisor.lower() != 'vmware':
self.skipTest('Can be run only on vmware zone. skipping')
cmd = addCluster.addClusterCmd()
cmd.zoneid = self.zone.id
cmd.hypervisor = self.hypervisor
cmd.clustertype = self.services["configurableData"][
"vmware_cluster"]["clustertype"]
cmd.podid = self.pod.id
cmd.username = self.services["configurableData"][
"vmware_cluster"]["username"]
cmd.password = self.services["configurableData"][
"vmware_cluster"]["password"]
cmd.publicswitchtype = 'vmwaredvs'
cmd.guestswitchtype = 'vmwaredvs'
cmd.url = self.services["configurableData"]["vmware_cluster"]["url"]
cmd.clustername = self.services[
"configurableData"]["vmware_cluster"]["url"]
self.apiClient.addCluster(cmd)
return | 0.403332 | 0.116211 |
import os, time, json
import urllib.parse
import nabto
PARENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
NABTO_HOME_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'share', 'nabto')
NABTO_QUERIES = os.path.join(PARENT_DIRECTORY, 'unabto_queries.xml')
class NabtoDevice:
deviceID: str
session: nabto.Session = None
def __init__(self, id: str, session: nabto.Session):
self.deviceID = id
self.session = session
def rpcInvoke(self, f: str, args: dict):
if args:
params = urllib.parse.urlencode(args)
return self.session.rpcInvoke(f"nabto://{self.deviceID}/{f}?{params}")
return self.session.rpcInvoke(f"nabto://{self.deviceID}/{f}")
def addUser(self, user, fingerprint):
resp = self.rpcInvoke("add_user.json", {"name": user, "fingerprint": fingerprint})
return json.loads(resp)["response"]
def getUsers(self) -> list:
resp = self.rpcInvoke("get_users.json", {"start": 0, "count": 10})
return json.loads(resp)["response"]["users"]
def pairWithDevice(self, name: str):
resp = self.rpcInvoke("pair_with_device.json", {"name": name})
return json.loads(resp)["response"]
USER = "alex"
PASSWORD = "<PASSWORD>"
LOCAL_PORT = 18090
NABTO_HOST = "jkkecnxk.rxxbkt.trial.nabto.net"
REMOTE_HOST = "localhost"
REMOTE_PORT = 8090
def main():
nabto.nabtoStartup(NABTO_HOME_DIRECTORY)
print(nabto.nabtoVersionString())
nabto.nabtoCreateSelfSignedProfile("alex", "<PASSWORD>")
session = nabto.Session()
session.open(USER, PASSWORD)
with open(NABTO_QUERIES) as file:
session.rpcSetDefaultInterface(file.read())
dev = NabtoDevice(NABTO_HOST, session)
print(dev.getUsers())
tunnel = nabto.Tunnel()
port = tunnel.openTcp(session, LOCAL_PORT, NABTO_HOST, REMOTE_HOST, REMOTE_PORT)
print(f"Opened tunnel on port {port}")
time.sleep(30)
tunnel.close()
session.close()
nabto.nabtoShutdown()
if __name__ == "__main__":
main() | example/ex2.py | import os, time, json
import urllib.parse
import nabto
PARENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
NABTO_HOME_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'share', 'nabto')
NABTO_QUERIES = os.path.join(PARENT_DIRECTORY, 'unabto_queries.xml')
class NabtoDevice:
deviceID: str
session: nabto.Session = None
def __init__(self, id: str, session: nabto.Session):
self.deviceID = id
self.session = session
def rpcInvoke(self, f: str, args: dict):
if args:
params = urllib.parse.urlencode(args)
return self.session.rpcInvoke(f"nabto://{self.deviceID}/{f}?{params}")
return self.session.rpcInvoke(f"nabto://{self.deviceID}/{f}")
def addUser(self, user, fingerprint):
resp = self.rpcInvoke("add_user.json", {"name": user, "fingerprint": fingerprint})
return json.loads(resp)["response"]
def getUsers(self) -> list:
resp = self.rpcInvoke("get_users.json", {"start": 0, "count": 10})
return json.loads(resp)["response"]["users"]
def pairWithDevice(self, name: str):
resp = self.rpcInvoke("pair_with_device.json", {"name": name})
return json.loads(resp)["response"]
USER = "alex"
PASSWORD = "<PASSWORD>"
LOCAL_PORT = 18090
NABTO_HOST = "jkkecnxk.rxxbkt.trial.nabto.net"
REMOTE_HOST = "localhost"
REMOTE_PORT = 8090
def main():
nabto.nabtoStartup(NABTO_HOME_DIRECTORY)
print(nabto.nabtoVersionString())
nabto.nabtoCreateSelfSignedProfile("alex", "<PASSWORD>")
session = nabto.Session()
session.open(USER, PASSWORD)
with open(NABTO_QUERIES) as file:
session.rpcSetDefaultInterface(file.read())
dev = NabtoDevice(NABTO_HOST, session)
print(dev.getUsers())
tunnel = nabto.Tunnel()
port = tunnel.openTcp(session, LOCAL_PORT, NABTO_HOST, REMOTE_HOST, REMOTE_PORT)
print(f"Opened tunnel on port {port}")
time.sleep(30)
tunnel.close()
session.close()
nabto.nabtoShutdown()
if __name__ == "__main__":
main() | 0.304042 | 0.111895 |
import os
import urllib.request
import numpy as np
import pandas as pd
from astropy.io.votable import parse_single_table
from species.analysis import photometry
from species.util import data_util, query_util
def add_spex(input_path, database):
"""
Function for adding the SpeX Prism Spectral Library to the database.
Parameters
----------
input_path : str
Path of the data folder.
database : h5py._hl.files.File
The HDF5 database.
Returns
-------
NoneType
None
"""
distance_url = 'https://people.phys.ethz.ch/~stolkert/species/distance.dat'
distance_file = os.path.join(input_path, 'distance.dat')
if not os.path.isfile(distance_file):
urllib.request.urlretrieve(distance_url, distance_file)
distance_data = pd.pandas.read_csv(distance_file,
usecols=[0, 3, 4],
names=['object', 'distance', 'distance_error'],
delimiter=',',
dtype={'object': str,
'distance': float,
'distance_error': float})
database.create_group('spectra/spex')
data_path = os.path.join(input_path, 'spex')
if not os.path.exists(data_path):
os.makedirs(data_path)
url_all = 'http://svo2.cab.inta-csic.es/vocats/v2/spex/' \
'cs.php?RA=180.000000&DEC=0.000000&SR=180.000000&VERB=2'
xml_file_spex = os.path.join(data_path, 'spex.xml')
if not os.path.isfile(xml_file_spex):
urllib.request.urlretrieve(url_all, xml_file_spex)
table = parse_single_table(xml_file_spex)
# name = table.array['name']
twomass = table.array['name2m']
url = table.array['access_url']
unique_id = []
for i, item in enumerate(url):
if twomass[i] not in unique_id:
if isinstance(twomass[i], str):
xml_file_1 = os.path.join(data_path, twomass[i]+'.xml')
else:
# Use decode for backward compatibility
xml_file_1 = os.path.join(data_path, twomass[i].decode('utf-8')+'.xml')
if not os.path.isfile(xml_file_1):
if isinstance(item, str):
urllib.request.urlretrieve(item, xml_file_1)
else:
urllib.request.urlretrieve(item.decode('utf-8'), xml_file_1)
table = parse_single_table(xml_file_1)
name = table.array['ID']
url = table.array['access_url']
if isinstance(name[0], str):
name = name[0]
else:
name = name[0].decode('utf-8')
print_message = f'Downloading SpeX Prism Spectral Library... {name}'
print(f'\r{print_message:<72}', end='')
xml_file_2 = os.path.join(data_path, f'spex_{name}.xml')
if not os.path.isfile(xml_file_2):
if isinstance(url[0], str):
urllib.request.urlretrieve(url[0], xml_file_2)
else:
urllib.request.urlretrieve(url[0].decode('utf-8'), xml_file_2)
unique_id.append(twomass[i])
print_message = 'Downloading SpeX Prism Spectral Library... [DONE]'
print(f'\r{print_message:<72}')
h_twomass = photometry.SyntheticPhotometry('2MASS/2MASS.H')
# 2MASS H band zero point for 0 mag (Cogen et al. 2003)
h_zp = 1.133e-9 # (W m-2 um-1)
for votable in os.listdir(data_path):
if votable.startswith('spex_') and votable.endswith('.xml'):
xml_file = os.path.join(data_path, votable)
table = parse_single_table(xml_file)
wavelength = table.array['wavelength'] # (Angstrom)
flux = table.array['flux'] # Normalized units
wavelength = np.array(wavelength*1e-4) # (um)
flux = np.array(flux) # (a.u.)
error = np.full(flux.size, np.nan)
# 2MASS magnitudes
j_mag = table.get_field_by_id('jmag').value
h_mag = table.get_field_by_id('hmag').value
ks_mag = table.get_field_by_id('ksmag').value
if not isinstance(j_mag, str):
j_mag = j_mag.decode('utf-8')
if not isinstance(h_mag, str):
h_mag = h_mag.decode('utf-8')
if not isinstance(ks_mag, str):
ks_mag = ks_mag.decode('utf-8')
if j_mag == '':
j_mag = np.nan
else:
j_mag = float(j_mag)
if h_mag == '':
h_mag = np.nan
else:
h_mag = float(h_mag)
if ks_mag == '':
ks_mag = np.nan
else:
ks_mag = float(ks_mag)
name = table.get_field_by_id('name').value
if not isinstance(name, str):
name = name.decode('utf-8')
twomass_id = table.get_field_by_id('name2m').value
if not isinstance(twomass_id, str):
twomass_id = twomass_id.decode('utf-8')
try:
sptype = table.get_field_by_id('nirspty').value
if not isinstance(sptype, str):
sptype = sptype.decode('utf-8')
except KeyError:
try:
sptype = table.get_field_by_id('optspty').value
if not isinstance(sptype, str):
sptype = sptype.decode('utf-8')
except KeyError:
sptype = 'None'
sptype = data_util.update_sptype(np.array([sptype]))[0].strip()
h_flux, _ = h_twomass.magnitude_to_flux(h_mag, error=None, zp_flux=h_zp)
phot = h_twomass.spectrum_to_flux(wavelength, flux) # Normalized units
flux *= h_flux/phot[0] # (W m-2 um-1)
spdata = np.vstack([wavelength, flux, error])
# simbad_id, distance = query_util.get_distance(f'2MASS {twomass_id}')
simbad_id = query_util.get_simbad(f'2MASS {twomass_id}')
if simbad_id is not None:
if not isinstance(simbad_id, str):
simbad_id = simbad_id.decode('utf-8')
dist_select = distance_data.loc[distance_data['object'] == simbad_id]
if not dist_select.empty:
distance = (dist_select['distance'], dist_select['distance_error'])
else:
distance = (np.nan, np.nan)
else:
distance = (np.nan, np.nan)
if sptype[0] in ['M', 'L', 'T'] and len(sptype) == 2:
print_message = f'Adding SpeX Prism Spectral Library... {name}'
print(f'\r{print_message:<72}', end='')
dset = database.create_dataset(f'spectra/spex/{name}', data=spdata)
dset.attrs['name'] = str(name).encode()
dset.attrs['sptype'] = str(sptype).encode()
dset.attrs['simbad'] = str(simbad_id).encode()
dset.attrs['2MASS/2MASS.J'] = j_mag
dset.attrs['2MASS/2MASS.H'] = h_mag
dset.attrs['2MASS/2MASS.Ks'] = ks_mag
dset.attrs['distance'] = distance[0] # (pc)
dset.attrs['distance_error'] = distance[1] # (pc)
print_message = 'Adding SpeX Prism Spectral Library... [DONE]'
print(f'\r{print_message:<72}')
database.close() | species/data/spex.py | import os
import urllib.request
import numpy as np
import pandas as pd
from astropy.io.votable import parse_single_table
from species.analysis import photometry
from species.util import data_util, query_util
def add_spex(input_path, database):
"""
Function for adding the SpeX Prism Spectral Library to the database.
Parameters
----------
input_path : str
Path of the data folder.
database : h5py._hl.files.File
The HDF5 database.
Returns
-------
NoneType
None
"""
distance_url = 'https://people.phys.ethz.ch/~stolkert/species/distance.dat'
distance_file = os.path.join(input_path, 'distance.dat')
if not os.path.isfile(distance_file):
urllib.request.urlretrieve(distance_url, distance_file)
distance_data = pd.pandas.read_csv(distance_file,
usecols=[0, 3, 4],
names=['object', 'distance', 'distance_error'],
delimiter=',',
dtype={'object': str,
'distance': float,
'distance_error': float})
database.create_group('spectra/spex')
data_path = os.path.join(input_path, 'spex')
if not os.path.exists(data_path):
os.makedirs(data_path)
url_all = 'http://svo2.cab.inta-csic.es/vocats/v2/spex/' \
'cs.php?RA=180.000000&DEC=0.000000&SR=180.000000&VERB=2'
xml_file_spex = os.path.join(data_path, 'spex.xml')
if not os.path.isfile(xml_file_spex):
urllib.request.urlretrieve(url_all, xml_file_spex)
table = parse_single_table(xml_file_spex)
# name = table.array['name']
twomass = table.array['name2m']
url = table.array['access_url']
unique_id = []
for i, item in enumerate(url):
if twomass[i] not in unique_id:
if isinstance(twomass[i], str):
xml_file_1 = os.path.join(data_path, twomass[i]+'.xml')
else:
# Use decode for backward compatibility
xml_file_1 = os.path.join(data_path, twomass[i].decode('utf-8')+'.xml')
if not os.path.isfile(xml_file_1):
if isinstance(item, str):
urllib.request.urlretrieve(item, xml_file_1)
else:
urllib.request.urlretrieve(item.decode('utf-8'), xml_file_1)
table = parse_single_table(xml_file_1)
name = table.array['ID']
url = table.array['access_url']
if isinstance(name[0], str):
name = name[0]
else:
name = name[0].decode('utf-8')
print_message = f'Downloading SpeX Prism Spectral Library... {name}'
print(f'\r{print_message:<72}', end='')
xml_file_2 = os.path.join(data_path, f'spex_{name}.xml')
if not os.path.isfile(xml_file_2):
if isinstance(url[0], str):
urllib.request.urlretrieve(url[0], xml_file_2)
else:
urllib.request.urlretrieve(url[0].decode('utf-8'), xml_file_2)
unique_id.append(twomass[i])
print_message = 'Downloading SpeX Prism Spectral Library... [DONE]'
print(f'\r{print_message:<72}')
h_twomass = photometry.SyntheticPhotometry('2MASS/2MASS.H')
# 2MASS H band zero point for 0 mag (Cogen et al. 2003)
h_zp = 1.133e-9 # (W m-2 um-1)
for votable in os.listdir(data_path):
if votable.startswith('spex_') and votable.endswith('.xml'):
xml_file = os.path.join(data_path, votable)
table = parse_single_table(xml_file)
wavelength = table.array['wavelength'] # (Angstrom)
flux = table.array['flux'] # Normalized units
wavelength = np.array(wavelength*1e-4) # (um)
flux = np.array(flux) # (a.u.)
error = np.full(flux.size, np.nan)
# 2MASS magnitudes
j_mag = table.get_field_by_id('jmag').value
h_mag = table.get_field_by_id('hmag').value
ks_mag = table.get_field_by_id('ksmag').value
if not isinstance(j_mag, str):
j_mag = j_mag.decode('utf-8')
if not isinstance(h_mag, str):
h_mag = h_mag.decode('utf-8')
if not isinstance(ks_mag, str):
ks_mag = ks_mag.decode('utf-8')
if j_mag == '':
j_mag = np.nan
else:
j_mag = float(j_mag)
if h_mag == '':
h_mag = np.nan
else:
h_mag = float(h_mag)
if ks_mag == '':
ks_mag = np.nan
else:
ks_mag = float(ks_mag)
name = table.get_field_by_id('name').value
if not isinstance(name, str):
name = name.decode('utf-8')
twomass_id = table.get_field_by_id('name2m').value
if not isinstance(twomass_id, str):
twomass_id = twomass_id.decode('utf-8')
try:
sptype = table.get_field_by_id('nirspty').value
if not isinstance(sptype, str):
sptype = sptype.decode('utf-8')
except KeyError:
try:
sptype = table.get_field_by_id('optspty').value
if not isinstance(sptype, str):
sptype = sptype.decode('utf-8')
except KeyError:
sptype = 'None'
sptype = data_util.update_sptype(np.array([sptype]))[0].strip()
h_flux, _ = h_twomass.magnitude_to_flux(h_mag, error=None, zp_flux=h_zp)
phot = h_twomass.spectrum_to_flux(wavelength, flux) # Normalized units
flux *= h_flux/phot[0] # (W m-2 um-1)
spdata = np.vstack([wavelength, flux, error])
# simbad_id, distance = query_util.get_distance(f'2MASS {twomass_id}')
simbad_id = query_util.get_simbad(f'2MASS {twomass_id}')
if simbad_id is not None:
if not isinstance(simbad_id, str):
simbad_id = simbad_id.decode('utf-8')
dist_select = distance_data.loc[distance_data['object'] == simbad_id]
if not dist_select.empty:
distance = (dist_select['distance'], dist_select['distance_error'])
else:
distance = (np.nan, np.nan)
else:
distance = (np.nan, np.nan)
if sptype[0] in ['M', 'L', 'T'] and len(sptype) == 2:
print_message = f'Adding SpeX Prism Spectral Library... {name}'
print(f'\r{print_message:<72}', end='')
dset = database.create_dataset(f'spectra/spex/{name}', data=spdata)
dset.attrs['name'] = str(name).encode()
dset.attrs['sptype'] = str(sptype).encode()
dset.attrs['simbad'] = str(simbad_id).encode()
dset.attrs['2MASS/2MASS.J'] = j_mag
dset.attrs['2MASS/2MASS.H'] = h_mag
dset.attrs['2MASS/2MASS.Ks'] = ks_mag
dset.attrs['distance'] = distance[0] # (pc)
dset.attrs['distance_error'] = distance[1] # (pc)
print_message = 'Adding SpeX Prism Spectral Library... [DONE]'
print(f'\r{print_message:<72}')
database.close() | 0.650689 | 0.236489 |
import os
import glob
import sys
import argparse
import re
from collections import defaultdict
from celescope.__init__ import __CONDA__
from celescope.tools.utils import merge_report
from celescope.tools.utils import parse_map_col4, link_data
from celescope.tools.__init__ import __PATTERN_DICT__
class Multi():
def __init__(self, __ASSAY__, __STEPS__, __CONDA__):
self.__ASSAY__ = __ASSAY__
self.__STEPS__ = __STEPS__
self.__CONDA__ = __CONDA__
self.__APP__ = 'celescope'
self.col4_default = 'auto'
self.last_step = ''
def multi_opts(self):
readme = f'{self.__ASSAY__} multi-samples'
parser = argparse.ArgumentParser(readme)
parser.add_argument('--mod', help='mod, sjm or shell', choices=['sjm', 'shell'], default='sjm')
parser.add_argument(
'--mapfile',
help='''
tsv file, 4 columns:
1st col: LibName;
2nd col: DataDir;
3rd col: SampleName;
4th col: Cell number or match_dir, optional;
''',
required=True)
parser.add_argument('--outdir', help='output dir', default="./")
parser.add_argument(
'--adapt',
action='append',
help='adapter sequence',
default=[
'polyT=A{15}',
'p5=AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC'])
parser.add_argument(
'--minimum_length',
dest='minimum_length',
help='minimum_length',
default=20)
parser.add_argument(
'--nextseq-trim',
dest='nextseq_trim',
help='nextseq_trim',
default=20)
parser.add_argument('--overlap', help='minimum overlap length', default=10)
parser.add_argument('--insert', help="read2 insert length", default=150)
parser.add_argument('--rm_files', action='store_true', help='remove redundant fq.gz and bam after running')
parser.add_argument('--steps_run', help='steps to run', default='all')
parser.add_argument('--debug', help='debug or not', action='store_true')
self.parser = parser
return parser
def barcode_args(self):
parser = self.parser
parser.add_argument('--chemistry', choices=__PATTERN_DICT__.keys(), help='chemistry version', default='auto')
parser.add_argument('--pattern', help='')
parser.add_argument('--whitelist', help='')
parser.add_argument('--linker', help='')
parser.add_argument('--lowQual', type=int, help='max phred of base as lowQual, default=0', default=0)
parser.add_argument('--lowNum', type=int, help='max number with lowQual allowed, default=2', default=2)
parser.add_argument('--nopolyT', action='store_true', help='output nopolyT fq')
parser.add_argument('--noLinker', action='store_true', help='output noLinker fq')
parser.add_argument('--probe_file', help="probe fasta file")
parser.add_argument('--allowNoPolyT', help="allow reads without polyT", action='store_true')
parser.add_argument('--allowNoLinker', help="allow reads without correct linker", action='store_true')
self.parser = parser
def read_barcode_args(self):
self.chemistry = self.args.chemistry
self.pattern = self.args.pattern
self.whitelist = self.args.whitelist
self.linker = self.args.linker
self.lowQual = self.args.lowQual
self.lowNum = self.args.lowNum
self.nopolyT_str = Multi.arg_str(self.args.nopolyT, 'nopolyT')
self.noLinker_str = Multi.arg_str(self.args.noLinker, 'noLinker')
self.probe_file = self.args.probe_file
self.allowNoPolyT_str = Multi.arg_str(self.args.allowNoPolyT, 'allowNoPolyT')
self.allowNoLinker_str = Multi.arg_str(self.args.allowNoLinker, 'allowNoLinker')
def STAR_args(self):
self.parser.add_argument('--starMem', help='starMem', default=30)
self.parser.add_argument('--genomeDir', help='genome index dir', required=True)
self.parser.add_argument(
'--gtf_type',
help='Specify attribute type in GTF annotation, default=exon',
default='exon')
self.parser.add_argument('--thread', help='thread', default=6)
self.parser.add_argument('--out_unmapped', help='out_unmapped', action='store_true')
self.parser.add_argument('--outFilterMatchNmin', help='STAR outFilterMatchNmin', default=0)
def count_args(self):
self.parser.add_argument('--rescue', help='rescue low UMI cells', action='store_true')
def analysis_args(self):
self.parser.add_argument('--save_rds', action='store_true', help='write rds to disk')
self.parser.add_argument('--type_marker_tsv', help='cell type marker tsv')
def custome_args(self):
self.STAR_args()
self.count_args()
self.analysis_args()
def parse_args(self):
self.multi_opts()
self.barcode_args()
self.custome_args()
self.args = self.parser.parse_args()
# read args
self.outdir = self.args.outdir
self.overlap = self.args.overlap
self.minimum_length = self.args.minimum_length
self.insert = self.args.insert
self.mod = self.args.mod
self.rm_files = self.args.rm_files
self.steps_run = self.args.steps_run
if self.__CONDA__ == 'celescope_RD':
self.debug_str = '--debug'
else:
self.debug_str = Multi.arg_str(self.args.debug, 'debug')
self.read_barcode_args()
self.read_custome_args()
@staticmethod
def arg_str(arg, arg_name):
'''
return action store_true arguments as string
'''
if arg:
return '--' + arg_name
return ''
def read_STAR_args(self):
self.thread = self.args.thread
self.genomeDir = self.args.genomeDir
self.starMem = self.args.starMem
self.gtf_type = self.args.gtf_type
self.out_unmapped = Multi.arg_str(self.args.out_unmapped, 'out_unmapped')
self.outFilterMatchNmin = self.args.outFilterMatchNmin
def read_count_args(self):
self.rescue_str = Multi.arg_str(self.args.rescue, 'rescue')
def read_analysis_args(self):
self.save_rds = self.args.save_rds
self.save_rds_str = Multi.arg_str(self.save_rds, 'save_rds')
self.type_marker_tsv = self.args.type_marker_tsv
def read_custome_args(self):
self.read_STAR_args()
self.read_count_args()
self.read_analysis_args()
def prepare(self):
# parse_mapfile
self.fq_dict, self.col4_dict = parse_map_col4(self.args.mapfile, self.col4_default)
# link
link_data(self.outdir, self.fq_dict)
# mk log dir
self.logdir = self.outdir + '/log'
os.system('mkdir -p %s' % (self.logdir))
# script init
self.sjm_cmd = 'log_dir %s\n' % (self.logdir)
self.sjm_order = ''
self.shell_dict = defaultdict(str)
# outdir dict
self.outdir_dic = {}
for sample in self.fq_dict:
self.outdir_dic[sample] = {}
index = 0
for step in self.__STEPS__:
step_outdir = f"{self.outdir}/{sample}/{index:02d}.{step}"
self.outdir_dic[sample].update({step: step_outdir})
index += 1
def generate_cmd(self, cmd, step, sample, m, x):
self.sjm_cmd += f'''
job_begin
name {step}_{sample}
sched_options -w n -cwd -V -l vf={m}g,p={x}
cmd source activate {self.__CONDA__}; {cmd}
job_end
'''
def process_cmd(self, cmd, step, sample, m=1, x=1):
self.generate_cmd(cmd, step, sample, m=m, x=x)
self.shell_dict[sample] += cmd + '\n'
if self.last_step:
self.sjm_order += f'order {step}_{sample} after {self.last_step}_{sample}\n'
self.last_step = step
def generate_first(self, cmd, step, sample, m=1, x=1):
self.generate_cmd(cmd, step, sample, m=1, x=1)
self.shell_dict[sample] += cmd + '\n'
self.last_step = step
def generate_other(self, cmd, step, sample, m=1, x=1):
self.generate_cmd(cmd, step, sample, m=1, x=1)
self.shell_dict[sample] += cmd + '\n'
self.sjm_order += f'order {step}_{sample} after {self.last_step}_{sample}\n'
self.last_step = step
def sample(self, sample):
step = "sample"
arr = self.fq_dict[sample]
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--chemistry {self.chemistry} '
f'--fq1 {arr[0]}'
)
self.process_cmd(cmd, step, sample, m=1, x=1)
def barcode(self, sample):
# barcode
arr = self.fq_dict[sample]
step = "barcode"
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--chemistry {self.chemistry} '
f'--fq1 {arr[0]} --fq2 {arr[1]} '
f'--pattern {self.pattern} --whitelist {self.whitelist} --linker {self.linker} '
f'--lowQual {self.lowQual} --thread {self.thread} '
f'--lowNum {self.lowNum} '
f'{self.allowNoPolyT_str} '
f'{self.allowNoLinker_str} '
f'{self.noLinker_str} '
f'{self.nopolyT_str} '
f'--probe_file {self.probe_file} '
)
self.process_cmd(cmd, step, sample, m=5, x=1)
def cutadapt(self, sample):
# adapt
step = "cutadapt"
fq = f'{self.outdir_dic[sample]["barcode"]}/{sample}_2.fq.gz'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step } '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--fq {fq} '
f'--overlap {self.overlap} '
f'--minimum_length {self.minimum_length} '
f'--insert {self.insert} '
)
self.process_cmd(cmd, step, sample, m=5, x=1)
def STAR(self, sample):
step = 'STAR'
fq = f'{self.outdir_dic[sample]["cutadapt"]}/{sample}_clean_2.fq.gz'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--fq {fq} '
f'--genomeDir {self.genomeDir} '
f'--thread {self.thread} '
f'{self.debug_str} '
f'--outFilterMatchNmin {self.outFilterMatchNmin} '
f'{self.out_unmapped} '
)
self.process_cmd(cmd, step, sample, m=self.starMem, x=self.thread)
def featureCounts(self, sample):
step = 'featureCounts'
input = f'{self.outdir_dic[sample]["STAR"]}/{sample}_Aligned.sortedByCoord.out.bam'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--input {input} --gtf_type {self.gtf_type} '
f'--genomeDir {self.genomeDir} '
f'--thread {self.thread} '
f'--gtf_type {self.gtf_type} '
)
self.process_cmd(cmd, step, sample, m=5, x=self.thread)
def count(self, sample):
step = 'count'
bam = f'{self.outdir_dic[sample]["featureCounts"]}/{sample}_name_sorted.bam'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--bam {bam} '
f'--cells {self.col4_dict[sample]} '
f'--genomeDir {self.genomeDir} '
f'{self.rescue_str} '
)
self.process_cmd(cmd, step, sample, m=10, x=1)
def analysis(self, sample):
step = 'analysis'
matrix_file = f'{self.outdir_dic[sample]["count"]}/{sample}_matrix.tsv.gz'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step } '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--matrix_file {matrix_file} '
f'{self.save_rds_str} '
f'--type_marker_tsv {self.type_marker_tsv} '
)
self.process_cmd(cmd, step, sample, m=10, x=1)
def run_steps(self):
if self.steps_run == 'all':
self.steps_run = self.__STEPS__
elif self.steps_run:
self.steps_run = self.steps_run.strip().split(',')
for sample in self.fq_dict:
self.last_step = ''
for step in self.steps_run:
eval(f'self.{step}(sample)')
def end(self):
if self.mod == 'sjm':
step = 'merge_report'
merge_report(
self.fq_dict,
self.__STEPS__,
self.last_step,
self.sjm_cmd,
self.sjm_order,
self.logdir,
self.__CONDA__,
self.outdir,
self.rm_files,
)
if self.mod == 'shell':
os.system('mkdir -p ./shell/')
for sample in self.shell_dict:
with open(f'./shell/{sample}.sh', 'w') as f:
f.write(self.shell_dict[sample])
def run(self):
self.parse_args()
self.prepare()
self.run_steps()
self.end() | celescope/tools/Multi.py | import os
import glob
import sys
import argparse
import re
from collections import defaultdict
from celescope.__init__ import __CONDA__
from celescope.tools.utils import merge_report
from celescope.tools.utils import parse_map_col4, link_data
from celescope.tools.__init__ import __PATTERN_DICT__
class Multi():
def __init__(self, __ASSAY__, __STEPS__, __CONDA__):
self.__ASSAY__ = __ASSAY__
self.__STEPS__ = __STEPS__
self.__CONDA__ = __CONDA__
self.__APP__ = 'celescope'
self.col4_default = 'auto'
self.last_step = ''
def multi_opts(self):
readme = f'{self.__ASSAY__} multi-samples'
parser = argparse.ArgumentParser(readme)
parser.add_argument('--mod', help='mod, sjm or shell', choices=['sjm', 'shell'], default='sjm')
parser.add_argument(
'--mapfile',
help='''
tsv file, 4 columns:
1st col: LibName;
2nd col: DataDir;
3rd col: SampleName;
4th col: Cell number or match_dir, optional;
''',
required=True)
parser.add_argument('--outdir', help='output dir', default="./")
parser.add_argument(
'--adapt',
action='append',
help='adapter sequence',
default=[
'polyT=A{15}',
'p5=AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC'])
parser.add_argument(
'--minimum_length',
dest='minimum_length',
help='minimum_length',
default=20)
parser.add_argument(
'--nextseq-trim',
dest='nextseq_trim',
help='nextseq_trim',
default=20)
parser.add_argument('--overlap', help='minimum overlap length', default=10)
parser.add_argument('--insert', help="read2 insert length", default=150)
parser.add_argument('--rm_files', action='store_true', help='remove redundant fq.gz and bam after running')
parser.add_argument('--steps_run', help='steps to run', default='all')
parser.add_argument('--debug', help='debug or not', action='store_true')
self.parser = parser
return parser
def barcode_args(self):
parser = self.parser
parser.add_argument('--chemistry', choices=__PATTERN_DICT__.keys(), help='chemistry version', default='auto')
parser.add_argument('--pattern', help='')
parser.add_argument('--whitelist', help='')
parser.add_argument('--linker', help='')
parser.add_argument('--lowQual', type=int, help='max phred of base as lowQual, default=0', default=0)
parser.add_argument('--lowNum', type=int, help='max number with lowQual allowed, default=2', default=2)
parser.add_argument('--nopolyT', action='store_true', help='output nopolyT fq')
parser.add_argument('--noLinker', action='store_true', help='output noLinker fq')
parser.add_argument('--probe_file', help="probe fasta file")
parser.add_argument('--allowNoPolyT', help="allow reads without polyT", action='store_true')
parser.add_argument('--allowNoLinker', help="allow reads without correct linker", action='store_true')
self.parser = parser
def read_barcode_args(self):
self.chemistry = self.args.chemistry
self.pattern = self.args.pattern
self.whitelist = self.args.whitelist
self.linker = self.args.linker
self.lowQual = self.args.lowQual
self.lowNum = self.args.lowNum
self.nopolyT_str = Multi.arg_str(self.args.nopolyT, 'nopolyT')
self.noLinker_str = Multi.arg_str(self.args.noLinker, 'noLinker')
self.probe_file = self.args.probe_file
self.allowNoPolyT_str = Multi.arg_str(self.args.allowNoPolyT, 'allowNoPolyT')
self.allowNoLinker_str = Multi.arg_str(self.args.allowNoLinker, 'allowNoLinker')
def STAR_args(self):
self.parser.add_argument('--starMem', help='starMem', default=30)
self.parser.add_argument('--genomeDir', help='genome index dir', required=True)
self.parser.add_argument(
'--gtf_type',
help='Specify attribute type in GTF annotation, default=exon',
default='exon')
self.parser.add_argument('--thread', help='thread', default=6)
self.parser.add_argument('--out_unmapped', help='out_unmapped', action='store_true')
self.parser.add_argument('--outFilterMatchNmin', help='STAR outFilterMatchNmin', default=0)
def count_args(self):
self.parser.add_argument('--rescue', help='rescue low UMI cells', action='store_true')
def analysis_args(self):
self.parser.add_argument('--save_rds', action='store_true', help='write rds to disk')
self.parser.add_argument('--type_marker_tsv', help='cell type marker tsv')
def custome_args(self):
self.STAR_args()
self.count_args()
self.analysis_args()
def parse_args(self):
self.multi_opts()
self.barcode_args()
self.custome_args()
self.args = self.parser.parse_args()
# read args
self.outdir = self.args.outdir
self.overlap = self.args.overlap
self.minimum_length = self.args.minimum_length
self.insert = self.args.insert
self.mod = self.args.mod
self.rm_files = self.args.rm_files
self.steps_run = self.args.steps_run
if self.__CONDA__ == 'celescope_RD':
self.debug_str = '--debug'
else:
self.debug_str = Multi.arg_str(self.args.debug, 'debug')
self.read_barcode_args()
self.read_custome_args()
@staticmethod
def arg_str(arg, arg_name):
'''
return action store_true arguments as string
'''
if arg:
return '--' + arg_name
return ''
def read_STAR_args(self):
self.thread = self.args.thread
self.genomeDir = self.args.genomeDir
self.starMem = self.args.starMem
self.gtf_type = self.args.gtf_type
self.out_unmapped = Multi.arg_str(self.args.out_unmapped, 'out_unmapped')
self.outFilterMatchNmin = self.args.outFilterMatchNmin
def read_count_args(self):
self.rescue_str = Multi.arg_str(self.args.rescue, 'rescue')
def read_analysis_args(self):
self.save_rds = self.args.save_rds
self.save_rds_str = Multi.arg_str(self.save_rds, 'save_rds')
self.type_marker_tsv = self.args.type_marker_tsv
def read_custome_args(self):
self.read_STAR_args()
self.read_count_args()
self.read_analysis_args()
def prepare(self):
# parse_mapfile
self.fq_dict, self.col4_dict = parse_map_col4(self.args.mapfile, self.col4_default)
# link
link_data(self.outdir, self.fq_dict)
# mk log dir
self.logdir = self.outdir + '/log'
os.system('mkdir -p %s' % (self.logdir))
# script init
self.sjm_cmd = 'log_dir %s\n' % (self.logdir)
self.sjm_order = ''
self.shell_dict = defaultdict(str)
# outdir dict
self.outdir_dic = {}
for sample in self.fq_dict:
self.outdir_dic[sample] = {}
index = 0
for step in self.__STEPS__:
step_outdir = f"{self.outdir}/{sample}/{index:02d}.{step}"
self.outdir_dic[sample].update({step: step_outdir})
index += 1
def generate_cmd(self, cmd, step, sample, m, x):
self.sjm_cmd += f'''
job_begin
name {step}_{sample}
sched_options -w n -cwd -V -l vf={m}g,p={x}
cmd source activate {self.__CONDA__}; {cmd}
job_end
'''
def process_cmd(self, cmd, step, sample, m=1, x=1):
self.generate_cmd(cmd, step, sample, m=m, x=x)
self.shell_dict[sample] += cmd + '\n'
if self.last_step:
self.sjm_order += f'order {step}_{sample} after {self.last_step}_{sample}\n'
self.last_step = step
def generate_first(self, cmd, step, sample, m=1, x=1):
self.generate_cmd(cmd, step, sample, m=1, x=1)
self.shell_dict[sample] += cmd + '\n'
self.last_step = step
def generate_other(self, cmd, step, sample, m=1, x=1):
self.generate_cmd(cmd, step, sample, m=1, x=1)
self.shell_dict[sample] += cmd + '\n'
self.sjm_order += f'order {step}_{sample} after {self.last_step}_{sample}\n'
self.last_step = step
def sample(self, sample):
step = "sample"
arr = self.fq_dict[sample]
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--chemistry {self.chemistry} '
f'--fq1 {arr[0]}'
)
self.process_cmd(cmd, step, sample, m=1, x=1)
def barcode(self, sample):
# barcode
arr = self.fq_dict[sample]
step = "barcode"
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--chemistry {self.chemistry} '
f'--fq1 {arr[0]} --fq2 {arr[1]} '
f'--pattern {self.pattern} --whitelist {self.whitelist} --linker {self.linker} '
f'--lowQual {self.lowQual} --thread {self.thread} '
f'--lowNum {self.lowNum} '
f'{self.allowNoPolyT_str} '
f'{self.allowNoLinker_str} '
f'{self.noLinker_str} '
f'{self.nopolyT_str} '
f'--probe_file {self.probe_file} '
)
self.process_cmd(cmd, step, sample, m=5, x=1)
def cutadapt(self, sample):
# adapt
step = "cutadapt"
fq = f'{self.outdir_dic[sample]["barcode"]}/{sample}_2.fq.gz'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step } '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--fq {fq} '
f'--overlap {self.overlap} '
f'--minimum_length {self.minimum_length} '
f'--insert {self.insert} '
)
self.process_cmd(cmd, step, sample, m=5, x=1)
def STAR(self, sample):
step = 'STAR'
fq = f'{self.outdir_dic[sample]["cutadapt"]}/{sample}_clean_2.fq.gz'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--fq {fq} '
f'--genomeDir {self.genomeDir} '
f'--thread {self.thread} '
f'{self.debug_str} '
f'--outFilterMatchNmin {self.outFilterMatchNmin} '
f'{self.out_unmapped} '
)
self.process_cmd(cmd, step, sample, m=self.starMem, x=self.thread)
def featureCounts(self, sample):
step = 'featureCounts'
input = f'{self.outdir_dic[sample]["STAR"]}/{sample}_Aligned.sortedByCoord.out.bam'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--input {input} --gtf_type {self.gtf_type} '
f'--genomeDir {self.genomeDir} '
f'--thread {self.thread} '
f'--gtf_type {self.gtf_type} '
)
self.process_cmd(cmd, step, sample, m=5, x=self.thread)
def count(self, sample):
step = 'count'
bam = f'{self.outdir_dic[sample]["featureCounts"]}/{sample}_name_sorted.bam'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--bam {bam} '
f'--cells {self.col4_dict[sample]} '
f'--genomeDir {self.genomeDir} '
f'{self.rescue_str} '
)
self.process_cmd(cmd, step, sample, m=10, x=1)
def analysis(self, sample):
step = 'analysis'
matrix_file = f'{self.outdir_dic[sample]["count"]}/{sample}_matrix.tsv.gz'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step } '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--matrix_file {matrix_file} '
f'{self.save_rds_str} '
f'--type_marker_tsv {self.type_marker_tsv} '
)
self.process_cmd(cmd, step, sample, m=10, x=1)
def run_steps(self):
if self.steps_run == 'all':
self.steps_run = self.__STEPS__
elif self.steps_run:
self.steps_run = self.steps_run.strip().split(',')
for sample in self.fq_dict:
self.last_step = ''
for step in self.steps_run:
eval(f'self.{step}(sample)')
def end(self):
if self.mod == 'sjm':
step = 'merge_report'
merge_report(
self.fq_dict,
self.__STEPS__,
self.last_step,
self.sjm_cmd,
self.sjm_order,
self.logdir,
self.__CONDA__,
self.outdir,
self.rm_files,
)
if self.mod == 'shell':
os.system('mkdir -p ./shell/')
for sample in self.shell_dict:
with open(f'./shell/{sample}.sh', 'w') as f:
f.write(self.shell_dict[sample])
def run(self):
self.parse_args()
self.prepare()
self.run_steps()
self.end() | 0.332202 | 0.057229 |
__author__ = "<NAME> <<EMAIL>>"
# External Includes
from copy import deepcopy
import numpy as np
import os
import torch
from unittest import TestCase
# Internal Includes
from rfml.data import Dataset, DatasetBuilder, Encoder
class TestDataset(TestCase):
# 5 mods x 5 snrs x 1k examples = 25k entries in the dataset
MODS = ["BPSK", "QPSK", "8PSK", "QAM16", "QAM64"]
SNRS = [0.0, 5.0, 10.0, 15.0, 20.0]
NEXAMPLES = 1000
NSAMPLES = 1024
@classmethod
def _create_dataset(cls, extras: dict = {}):
keys = ["SNR", "Modulation"] + list(extras.keys())
db = DatasetBuilder(n=cls.NSAMPLES, keys=keys)
iq = np.zeros((2, cls.NSAMPLES))
for mod in cls.MODS:
for snr in cls.SNRS:
for _ in range(cls.NEXAMPLES):
db.add(iq, SNR=snr, Modulation=mod, **extras)
return db.build()
@classmethod
def setUpClass(cls):
"""Create a basic dataset for use in the underlying tests.
"""
cls._data = cls._create_dataset()
def test_equality(self):
"""Verify that the dataset.__eq__ method works
"""
db = DatasetBuilder(n=TestDataset.NSAMPLES, keys=["SNR", "Modulation"])
iq = np.zeros((2, TestDataset.NSAMPLES))
for i, mod in enumerate(TestDataset.MODS):
for snr in TestDataset.SNRS:
for _ in range(i * TestDataset.NEXAMPLES):
db.add(iq, SNR=snr, Modulation=mod)
baddata = db.build()
copieddata = deepcopy(TestDataset._data)
# By definition, a dataset must be equivalent to itself
self.assertEqual(TestDataset._data, TestDataset._data)
# Verify that equality works in both directions
self.assertEqual(TestDataset._data, copieddata)
self.assertEqual(copieddata, TestDataset._data)
# Verify that inequality works in both directions
self.assertNotEqual(TestDataset._data, baddata)
self.assertNotEqual(baddata, TestDataset._data)
def test_addition(self):
"""Verify that the dataset.__add__ method works
"""
# Verify a basic example where everything matches
d1 = TestDataset._create_dataset()
d2 = TestDataset._create_dataset()
d3 = TestDataset._create_dataset()
combined = d1 + d2 + d3
self.assertEqual(len(combined), len(d1) + len(d2) + len(d3))
# Verify a harder example, where columns must be dropped
k1 = {"Extra1": 1, "Extra2": 2}
d1 = TestDataset._create_dataset(k1)
k2 = {"Extra2": 2, "Extra3": 3}
d2 = TestDataset._create_dataset(k2)
combined = d1 + d2
self.assertIn("Extra2", combined.df.columns)
self.assertNotIn("Extra1", combined.df.columns)
self.assertNotIn("Extra3", combined.df.columns)
def test_isbalanced(self):
"""Verify that the Dataset can correctly detect imbalance.
"""
db = DatasetBuilder(n=TestDataset.NSAMPLES, keys=["SNR", "Modulation"])
iq = np.zeros((2, TestDataset.NSAMPLES))
for i, mod in enumerate(TestDataset.MODS):
for snr in TestDataset.SNRS:
for _ in range(i * TestDataset.NEXAMPLES):
db.add(iq, SNR=snr, Modulation=mod)
baddata = db.build()
self.assertTrue(TestDataset._data.is_balanced(label="Modulation"))
self.assertTrue(TestDataset._data.is_balanced(label="SNR"))
self.assertFalse(baddata.is_balanced(label="Modulation"))
self.assertTrue(baddata.is_balanced(label="SNR"))
def test_naivesplit(self):
"""Verify that the Dataset is naively split into two.
"""
margin = 10e-3 # As percent error
d1, d2 = TestDataset._data.split(frac=0.3)
diff1 = len(d1) - 0.7 * len(TestDataset._data)
diff2 = len(d2) - 0.3 * len(TestDataset._data)
original = float(len(TestDataset._data))
self.assertLessEqual(np.abs(diff1 / original), margin)
self.assertLessEqual(np.abs(diff2 / original), margin)
def test_intelligentsplit(self):
"""Verify that an intelligent split actually balances classes
.. note::
test_isbalanced also ensures that the classes are balanced a priori
otherwise the test below would fail as well, but, not at the fault
of the split method
"""
margin = 10e-3
d1, d2 = TestDataset._data.split(frac=0.3, on=["SNR", "Modulation"])
diff1 = len(d1) - 0.7 * len(TestDataset._data)
diff2 = len(d2) - 0.3 * len(TestDataset._data)
original = float(len(TestDataset._data))
self.assertLessEqual(np.abs(diff1 / original), margin)
self.assertLessEqual(np.abs(diff2 / original), margin)
self.assertTrue(d1.is_balanced(label="Modulation"))
self.assertTrue(d1.is_balanced(label="SNR"))
self.assertTrue(d2.is_balanced(label="Modulation"))
self.assertTrue(d2.is_balanced(label="SNR"))
def test_splitmaintainscount(self):
"""Verify a simple split does not change the total number of examples
This protects against an off-by-one error in the split
"""
d1, d2 = TestDataset._data.split(frac=0.3, on=["Modulation"])
current = len(d1) + len(d2)
original = len(TestDataset._data)
self.assertEqual(current, original)
def test_examplesperclass(self):
"""Verify that the examples per class are correctly computed.
"""
# Verify the modulation examples are computed correctly
epc = TestDataset._data.get_examples_per_class(label="Modulation")
self.assertEqual(set(epc.keys()), set(TestDataset.MODS))
expectation = TestDataset.NEXAMPLES * len(TestDataset.SNRS)
for actual in epc.values():
self.assertEqual(actual, expectation)
# Verify the SNR examples are computed correctly
epc = TestDataset._data.get_examples_per_class(label="SNR")
self.assertEqual(set(epc.keys()), set(TestDataset.SNRS))
expectation = TestDataset.NEXAMPLES * len(TestDataset.MODS)
for actual in epc.values():
self.assertEqual(actual, expectation)
def test_asnumpy(self):
"""Verify that the asnumpy method returns the expected shapes.
"""
le = Encoder(TestDataset.MODS, label_name="Modulation")
x, y = TestDataset._data.as_numpy(le=le)
self.assertEqual(x.shape, (len(TestDataset._data), 1, 2, TestDataset.NSAMPLES))
self.assertEqual(y.shape, (len(TestDataset._data), 1))
def test_astorch(self):
"""Verify that the astorch method returns the expected shapes.
"""
le = Encoder(TestDataset.MODS, label_name="Modulation")
dataset = TestDataset._data.as_torch(le=le)
self.assertEqual(len(dataset), len(TestDataset._data))
x, y = dataset[0]
self.assertEqual(x.shape, (1, 2, TestDataset.NSAMPLES))
self.assertEqual(y.dtype, torch.long) | test/data/test_dataset.py | __author__ = "<NAME> <<EMAIL>>"
# External Includes
from copy import deepcopy
import numpy as np
import os
import torch
from unittest import TestCase
# Internal Includes
from rfml.data import Dataset, DatasetBuilder, Encoder
class TestDataset(TestCase):
# 5 mods x 5 snrs x 1k examples = 25k entries in the dataset
MODS = ["BPSK", "QPSK", "8PSK", "QAM16", "QAM64"]
SNRS = [0.0, 5.0, 10.0, 15.0, 20.0]
NEXAMPLES = 1000
NSAMPLES = 1024
@classmethod
def _create_dataset(cls, extras: dict = {}):
keys = ["SNR", "Modulation"] + list(extras.keys())
db = DatasetBuilder(n=cls.NSAMPLES, keys=keys)
iq = np.zeros((2, cls.NSAMPLES))
for mod in cls.MODS:
for snr in cls.SNRS:
for _ in range(cls.NEXAMPLES):
db.add(iq, SNR=snr, Modulation=mod, **extras)
return db.build()
@classmethod
def setUpClass(cls):
"""Create a basic dataset for use in the underlying tests.
"""
cls._data = cls._create_dataset()
def test_equality(self):
"""Verify that the dataset.__eq__ method works
"""
db = DatasetBuilder(n=TestDataset.NSAMPLES, keys=["SNR", "Modulation"])
iq = np.zeros((2, TestDataset.NSAMPLES))
for i, mod in enumerate(TestDataset.MODS):
for snr in TestDataset.SNRS:
for _ in range(i * TestDataset.NEXAMPLES):
db.add(iq, SNR=snr, Modulation=mod)
baddata = db.build()
copieddata = deepcopy(TestDataset._data)
# By definition, a dataset must be equivalent to itself
self.assertEqual(TestDataset._data, TestDataset._data)
# Verify that equality works in both directions
self.assertEqual(TestDataset._data, copieddata)
self.assertEqual(copieddata, TestDataset._data)
# Verify that inequality works in both directions
self.assertNotEqual(TestDataset._data, baddata)
self.assertNotEqual(baddata, TestDataset._data)
def test_addition(self):
"""Verify that the dataset.__add__ method works
"""
# Verify a basic example where everything matches
d1 = TestDataset._create_dataset()
d2 = TestDataset._create_dataset()
d3 = TestDataset._create_dataset()
combined = d1 + d2 + d3
self.assertEqual(len(combined), len(d1) + len(d2) + len(d3))
# Verify a harder example, where columns must be dropped
k1 = {"Extra1": 1, "Extra2": 2}
d1 = TestDataset._create_dataset(k1)
k2 = {"Extra2": 2, "Extra3": 3}
d2 = TestDataset._create_dataset(k2)
combined = d1 + d2
self.assertIn("Extra2", combined.df.columns)
self.assertNotIn("Extra1", combined.df.columns)
self.assertNotIn("Extra3", combined.df.columns)
def test_isbalanced(self):
"""Verify that the Dataset can correctly detect imbalance.
"""
db = DatasetBuilder(n=TestDataset.NSAMPLES, keys=["SNR", "Modulation"])
iq = np.zeros((2, TestDataset.NSAMPLES))
for i, mod in enumerate(TestDataset.MODS):
for snr in TestDataset.SNRS:
for _ in range(i * TestDataset.NEXAMPLES):
db.add(iq, SNR=snr, Modulation=mod)
baddata = db.build()
self.assertTrue(TestDataset._data.is_balanced(label="Modulation"))
self.assertTrue(TestDataset._data.is_balanced(label="SNR"))
self.assertFalse(baddata.is_balanced(label="Modulation"))
self.assertTrue(baddata.is_balanced(label="SNR"))
def test_naivesplit(self):
"""Verify that the Dataset is naively split into two.
"""
margin = 10e-3 # As percent error
d1, d2 = TestDataset._data.split(frac=0.3)
diff1 = len(d1) - 0.7 * len(TestDataset._data)
diff2 = len(d2) - 0.3 * len(TestDataset._data)
original = float(len(TestDataset._data))
self.assertLessEqual(np.abs(diff1 / original), margin)
self.assertLessEqual(np.abs(diff2 / original), margin)
def test_intelligentsplit(self):
"""Verify that an intelligent split actually balances classes
.. note::
test_isbalanced also ensures that the classes are balanced a priori
otherwise the test below would fail as well, but, not at the fault
of the split method
"""
margin = 10e-3
d1, d2 = TestDataset._data.split(frac=0.3, on=["SNR", "Modulation"])
diff1 = len(d1) - 0.7 * len(TestDataset._data)
diff2 = len(d2) - 0.3 * len(TestDataset._data)
original = float(len(TestDataset._data))
self.assertLessEqual(np.abs(diff1 / original), margin)
self.assertLessEqual(np.abs(diff2 / original), margin)
self.assertTrue(d1.is_balanced(label="Modulation"))
self.assertTrue(d1.is_balanced(label="SNR"))
self.assertTrue(d2.is_balanced(label="Modulation"))
self.assertTrue(d2.is_balanced(label="SNR"))
def test_splitmaintainscount(self):
"""Verify a simple split does not change the total number of examples
This protects against an off-by-one error in the split
"""
d1, d2 = TestDataset._data.split(frac=0.3, on=["Modulation"])
current = len(d1) + len(d2)
original = len(TestDataset._data)
self.assertEqual(current, original)
def test_examplesperclass(self):
"""Verify that the examples per class are correctly computed.
"""
# Verify the modulation examples are computed correctly
epc = TestDataset._data.get_examples_per_class(label="Modulation")
self.assertEqual(set(epc.keys()), set(TestDataset.MODS))
expectation = TestDataset.NEXAMPLES * len(TestDataset.SNRS)
for actual in epc.values():
self.assertEqual(actual, expectation)
# Verify the SNR examples are computed correctly
epc = TestDataset._data.get_examples_per_class(label="SNR")
self.assertEqual(set(epc.keys()), set(TestDataset.SNRS))
expectation = TestDataset.NEXAMPLES * len(TestDataset.MODS)
for actual in epc.values():
self.assertEqual(actual, expectation)
def test_asnumpy(self):
"""Verify that the asnumpy method returns the expected shapes.
"""
le = Encoder(TestDataset.MODS, label_name="Modulation")
x, y = TestDataset._data.as_numpy(le=le)
self.assertEqual(x.shape, (len(TestDataset._data), 1, 2, TestDataset.NSAMPLES))
self.assertEqual(y.shape, (len(TestDataset._data), 1))
def test_astorch(self):
"""Verify that the astorch method returns the expected shapes.
"""
le = Encoder(TestDataset.MODS, label_name="Modulation")
dataset = TestDataset._data.as_torch(le=le)
self.assertEqual(len(dataset), len(TestDataset._data))
x, y = dataset[0]
self.assertEqual(x.shape, (1, 2, TestDataset.NSAMPLES))
self.assertEqual(y.dtype, torch.long) | 0.798187 | 0.470311 |
import smart_imports
smart_imports.all()
from the_tale.statistics.metrics import registrations
from the_tale.statistics.metrics import lifetime
from the_tale.statistics.metrics import monetization
from the_tale.statistics.metrics import actual
from the_tale.statistics.metrics import forum
from the_tale.statistics.metrics import bills
from the_tale.statistics.metrics import folclor
METRICS = [
statistics_metrics_registrations.RegistrationsCompleted,
statistics_metrics_registrations.RegistrationsTries,
statistics_metrics_registrations.RegistrationsCompletedPercents,
statistics_metrics_registrations.RegistrationsCompletedInMonth,
statistics_metrics_registrations.RegistrationsTriesInMonth,
statistics_metrics_registrations.RegistrationsCompletedPercentsInMonth,
statistics_metrics_registrations.AccountsTotal,
statistics_metrics_registrations.Referrals,
statistics_metrics_registrations.ReferralsTotal,
statistics_metrics_registrations.ReferralsPercents,
statistics_metrics_registrations.ReferralsInMonth,
statistics_metrics_actual.Premiums,
statistics_metrics_actual.InfinitPremiums,
statistics_metrics_actual.PremiumPercents,
statistics_metrics_actual.Active,
statistics_metrics_actual.DAU,
statistics_metrics_actual.MAU,
statistics_metrics_actual.ActiveOlderDay,
statistics_metrics_actual.ActiveOlderWeek,
statistics_metrics_actual.ActiveOlderMonth,
statistics_metrics_actual.ActiveOlder3Month,
statistics_metrics_actual.ActiveOlder6Month,
statistics_metrics_actual.ActiveOlderYear,
statistics_metrics_lifetime.AliveAfterDay,
statistics_metrics_lifetime.AliveAfterWeek,
statistics_metrics_lifetime.AliveAfterMonth,
statistics_metrics_lifetime.AliveAfter3Month,
statistics_metrics_lifetime.AliveAfter6Month,
statistics_metrics_lifetime.AliveAfterYear,
statistics_metrics_lifetime.AliveAfter0,
statistics_metrics_lifetime.Lifetime,
statistics_metrics_lifetime.LifetimePercent,
statistics_metrics_monetization.Payers,
statistics_metrics_monetization.Income,
statistics_metrics_monetization.PayersInMonth,
statistics_metrics_monetization.IncomeInMonth,
statistics_metrics_monetization.ARPPU,
statistics_metrics_monetization.ARPU,
statistics_metrics_monetization.ARPPUInMonth,
statistics_metrics_monetization.ARPUInMonth,
statistics_metrics_monetization.PU,
statistics_metrics_monetization.PUPercents,
statistics_metrics_monetization.IncomeTotal,
statistics_metrics_monetization.DaysBeforePayment,
statistics_metrics_monetization.ARPNUWeek,
statistics_metrics_monetization.ARPNUMonth,
statistics_metrics_monetization.ARPNU3Month,
statistics_metrics_monetization.LTV,
statistics_metrics_monetization.Revenue,
statistics_metrics_monetization.IncomeFromForum,
statistics_metrics_monetization.IncomeFromSilent,
statistics_metrics_monetization.IncomeFromGuildMembers,
statistics_metrics_monetization.IncomeFromSingles,
statistics_metrics_monetization.IncomeFromForumPercents,
statistics_metrics_monetization.IncomeFromSilentPercents,
statistics_metrics_monetization.IncomeFromGuildMembersPercents,
statistics_metrics_monetization.IncomeFromSinglesPercents,
statistics_metrics_monetization.IncomeFromGoodsPremium,
statistics_metrics_monetization.IncomeFromGoodsEnergy,
statistics_metrics_monetization.IncomeFromGoodsChest,
statistics_metrics_monetization.IncomeFromGoodsPeferences,
statistics_metrics_monetization.IncomeFromGoodsPreferencesReset,
statistics_metrics_monetization.IncomeFromGoodsHabits,
statistics_metrics_monetization.IncomeFromGoodsAbilities,
statistics_metrics_monetization.IncomeFromGoodsClans,
statistics_metrics_monetization.IncomeFromGoodsMarketCommission,
statistics_metrics_monetization.IncomeFromTransferMoneyCommission,
statistics_metrics_monetization.IncomeFromGoodsPremiumPercents,
statistics_metrics_monetization.IncomeFromGoodsEnergyPercents,
statistics_metrics_monetization.IncomeFromGoodsChestPercents,
statistics_metrics_monetization.IncomeFromGoodsPeferencesPercents,
statistics_metrics_monetization.IncomeFromGoodsPreferencesResetPercents,
statistics_metrics_monetization.IncomeFromGoodsHabitsPercents,
statistics_metrics_monetization.IncomeFromGoodsAbilitiesPercents,
statistics_metrics_monetization.IncomeFromGoodsClansPercents,
statistics_metrics_monetization.IncomeFromGoodsMarketCommissionPercents,
statistics_metrics_monetization.IncomeFromTransferMoneyCommissionPercents,
statistics_metrics_monetization.IncomeGroup0_500,
statistics_metrics_monetization.IncomeGroup500_1000,
statistics_metrics_monetization.IncomeGroup1000_2500,
statistics_metrics_monetization.IncomeGroup2500_10000,
statistics_metrics_monetization.IncomeGroup10000,
statistics_metrics_monetization.IncomeGroup0_500Percents,
statistics_metrics_monetization.IncomeGroup500_1000Percents,
statistics_metrics_monetization.IncomeGroup1000_2500Percents,
statistics_metrics_monetization.IncomeGroup2500_10000Percents,
statistics_metrics_monetization.IncomeGroup10000Percents,
statistics_metrics_monetization.IncomeGroupIncome0_500,
statistics_metrics_monetization.IncomeGroupIncome500_1000,
statistics_metrics_monetization.IncomeGroupIncome1000_2500,
statistics_metrics_monetization.IncomeGroupIncome2500_10000,
statistics_metrics_monetization.IncomeGroupIncome10000,
statistics_metrics_monetization.IncomeGroupIncome0_500Percents,
statistics_metrics_monetization.IncomeGroupIncome500_1000Percents,
statistics_metrics_monetization.IncomeGroupIncome1000_2500Percents,
statistics_metrics_monetization.IncomeGroupIncome2500_10000Percents,
statistics_metrics_monetization.IncomeGroupIncome10000Percents,
statistics_metrics_forum.Posts,
statistics_metrics_forum.PostsInMonth,
statistics_metrics_forum.PostsTotal,
statistics_metrics_forum.Threads,
statistics_metrics_forum.ThreadsInMonth,
statistics_metrics_forum.ThreadsTotal,
statistics_metrics_forum.PostsPerThreadInMonth,
statistics_metrics_bills.Bills,
statistics_metrics_bills.BillsInMonth,
statistics_metrics_bills.BillsTotal,
statistics_metrics_bills.Votes,
statistics_metrics_bills.VotesInMonth,
statistics_metrics_bills.VotesTotal,
statistics_metrics_bills.VotesPerBillInMonth,
statistics_metrics_folclor.Posts,
statistics_metrics_folclor.PostsInMonth,
statistics_metrics_folclor.PostsTotal,
statistics_metrics_folclor.Votes,
statistics_metrics_folclor.VotesInMonth,
statistics_metrics_folclor.VotesTotal,
statistics_metrics_folclor.VotesPerPostInMonth
]
class Command(django_management.BaseCommand):
help = 'complete statistics'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('-f', '--force-clear', action='store_true', dest='force-clear', help='force clear all metrics')
parser.add_argument('-l', '--log', action='store_true', dest='verbose', help='print log')
parser.add_argument('-r', '--recalculate-last', action='store_true', dest='recalculate-last', help='recalculate last day')
def handle(self, *args, **options):
force_clear = options.get('force-clear')
verbose = options.get('verbose')
recalculate = options.get('recalculate-last')
if recalculate:
for MetricClass in METRICS:
prototypes.RecordPrototype._db_filter(date=MetricClass._last_datetime().date(),
type=MetricClass.TYPE).delete()
for MetricClass in METRICS:
if force_clear or MetricClass.FULL_CLEAR_RECUIRED:
if verbose:
print('clear %s' % MetricClass.TYPE)
MetricClass.clear()
for i, MetricClass in enumerate(METRICS):
metric = MetricClass()
if verbose:
print('[%3d] calculate %s' % (i, metric.TYPE))
metric.initialize()
metric.complete_values()
models.FullStatistics.objects.all().delete()
models.FullStatistics.objects.create(data=prototypes.RecordPrototype.get_js_data()) | src/the_tale/the_tale/statistics/management/commands/statistics_complete.py | import smart_imports
smart_imports.all()
from the_tale.statistics.metrics import registrations
from the_tale.statistics.metrics import lifetime
from the_tale.statistics.metrics import monetization
from the_tale.statistics.metrics import actual
from the_tale.statistics.metrics import forum
from the_tale.statistics.metrics import bills
from the_tale.statistics.metrics import folclor
METRICS = [
statistics_metrics_registrations.RegistrationsCompleted,
statistics_metrics_registrations.RegistrationsTries,
statistics_metrics_registrations.RegistrationsCompletedPercents,
statistics_metrics_registrations.RegistrationsCompletedInMonth,
statistics_metrics_registrations.RegistrationsTriesInMonth,
statistics_metrics_registrations.RegistrationsCompletedPercentsInMonth,
statistics_metrics_registrations.AccountsTotal,
statistics_metrics_registrations.Referrals,
statistics_metrics_registrations.ReferralsTotal,
statistics_metrics_registrations.ReferralsPercents,
statistics_metrics_registrations.ReferralsInMonth,
statistics_metrics_actual.Premiums,
statistics_metrics_actual.InfinitPremiums,
statistics_metrics_actual.PremiumPercents,
statistics_metrics_actual.Active,
statistics_metrics_actual.DAU,
statistics_metrics_actual.MAU,
statistics_metrics_actual.ActiveOlderDay,
statistics_metrics_actual.ActiveOlderWeek,
statistics_metrics_actual.ActiveOlderMonth,
statistics_metrics_actual.ActiveOlder3Month,
statistics_metrics_actual.ActiveOlder6Month,
statistics_metrics_actual.ActiveOlderYear,
statistics_metrics_lifetime.AliveAfterDay,
statistics_metrics_lifetime.AliveAfterWeek,
statistics_metrics_lifetime.AliveAfterMonth,
statistics_metrics_lifetime.AliveAfter3Month,
statistics_metrics_lifetime.AliveAfter6Month,
statistics_metrics_lifetime.AliveAfterYear,
statistics_metrics_lifetime.AliveAfter0,
statistics_metrics_lifetime.Lifetime,
statistics_metrics_lifetime.LifetimePercent,
statistics_metrics_monetization.Payers,
statistics_metrics_monetization.Income,
statistics_metrics_monetization.PayersInMonth,
statistics_metrics_monetization.IncomeInMonth,
statistics_metrics_monetization.ARPPU,
statistics_metrics_monetization.ARPU,
statistics_metrics_monetization.ARPPUInMonth,
statistics_metrics_monetization.ARPUInMonth,
statistics_metrics_monetization.PU,
statistics_metrics_monetization.PUPercents,
statistics_metrics_monetization.IncomeTotal,
statistics_metrics_monetization.DaysBeforePayment,
statistics_metrics_monetization.ARPNUWeek,
statistics_metrics_monetization.ARPNUMonth,
statistics_metrics_monetization.ARPNU3Month,
statistics_metrics_monetization.LTV,
statistics_metrics_monetization.Revenue,
statistics_metrics_monetization.IncomeFromForum,
statistics_metrics_monetization.IncomeFromSilent,
statistics_metrics_monetization.IncomeFromGuildMembers,
statistics_metrics_monetization.IncomeFromSingles,
statistics_metrics_monetization.IncomeFromForumPercents,
statistics_metrics_monetization.IncomeFromSilentPercents,
statistics_metrics_monetization.IncomeFromGuildMembersPercents,
statistics_metrics_monetization.IncomeFromSinglesPercents,
statistics_metrics_monetization.IncomeFromGoodsPremium,
statistics_metrics_monetization.IncomeFromGoodsEnergy,
statistics_metrics_monetization.IncomeFromGoodsChest,
statistics_metrics_monetization.IncomeFromGoodsPeferences,
statistics_metrics_monetization.IncomeFromGoodsPreferencesReset,
statistics_metrics_monetization.IncomeFromGoodsHabits,
statistics_metrics_monetization.IncomeFromGoodsAbilities,
statistics_metrics_monetization.IncomeFromGoodsClans,
statistics_metrics_monetization.IncomeFromGoodsMarketCommission,
statistics_metrics_monetization.IncomeFromTransferMoneyCommission,
statistics_metrics_monetization.IncomeFromGoodsPremiumPercents,
statistics_metrics_monetization.IncomeFromGoodsEnergyPercents,
statistics_metrics_monetization.IncomeFromGoodsChestPercents,
statistics_metrics_monetization.IncomeFromGoodsPeferencesPercents,
statistics_metrics_monetization.IncomeFromGoodsPreferencesResetPercents,
statistics_metrics_monetization.IncomeFromGoodsHabitsPercents,
statistics_metrics_monetization.IncomeFromGoodsAbilitiesPercents,
statistics_metrics_monetization.IncomeFromGoodsClansPercents,
statistics_metrics_monetization.IncomeFromGoodsMarketCommissionPercents,
statistics_metrics_monetization.IncomeFromTransferMoneyCommissionPercents,
statistics_metrics_monetization.IncomeGroup0_500,
statistics_metrics_monetization.IncomeGroup500_1000,
statistics_metrics_monetization.IncomeGroup1000_2500,
statistics_metrics_monetization.IncomeGroup2500_10000,
statistics_metrics_monetization.IncomeGroup10000,
statistics_metrics_monetization.IncomeGroup0_500Percents,
statistics_metrics_monetization.IncomeGroup500_1000Percents,
statistics_metrics_monetization.IncomeGroup1000_2500Percents,
statistics_metrics_monetization.IncomeGroup2500_10000Percents,
statistics_metrics_monetization.IncomeGroup10000Percents,
statistics_metrics_monetization.IncomeGroupIncome0_500,
statistics_metrics_monetization.IncomeGroupIncome500_1000,
statistics_metrics_monetization.IncomeGroupIncome1000_2500,
statistics_metrics_monetization.IncomeGroupIncome2500_10000,
statistics_metrics_monetization.IncomeGroupIncome10000,
statistics_metrics_monetization.IncomeGroupIncome0_500Percents,
statistics_metrics_monetization.IncomeGroupIncome500_1000Percents,
statistics_metrics_monetization.IncomeGroupIncome1000_2500Percents,
statistics_metrics_monetization.IncomeGroupIncome2500_10000Percents,
statistics_metrics_monetization.IncomeGroupIncome10000Percents,
statistics_metrics_forum.Posts,
statistics_metrics_forum.PostsInMonth,
statistics_metrics_forum.PostsTotal,
statistics_metrics_forum.Threads,
statistics_metrics_forum.ThreadsInMonth,
statistics_metrics_forum.ThreadsTotal,
statistics_metrics_forum.PostsPerThreadInMonth,
statistics_metrics_bills.Bills,
statistics_metrics_bills.BillsInMonth,
statistics_metrics_bills.BillsTotal,
statistics_metrics_bills.Votes,
statistics_metrics_bills.VotesInMonth,
statistics_metrics_bills.VotesTotal,
statistics_metrics_bills.VotesPerBillInMonth,
statistics_metrics_folclor.Posts,
statistics_metrics_folclor.PostsInMonth,
statistics_metrics_folclor.PostsTotal,
statistics_metrics_folclor.Votes,
statistics_metrics_folclor.VotesInMonth,
statistics_metrics_folclor.VotesTotal,
statistics_metrics_folclor.VotesPerPostInMonth
]
class Command(django_management.BaseCommand):
help = 'complete statistics'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('-f', '--force-clear', action='store_true', dest='force-clear', help='force clear all metrics')
parser.add_argument('-l', '--log', action='store_true', dest='verbose', help='print log')
parser.add_argument('-r', '--recalculate-last', action='store_true', dest='recalculate-last', help='recalculate last day')
def handle(self, *args, **options):
force_clear = options.get('force-clear')
verbose = options.get('verbose')
recalculate = options.get('recalculate-last')
if recalculate:
for MetricClass in METRICS:
prototypes.RecordPrototype._db_filter(date=MetricClass._last_datetime().date(),
type=MetricClass.TYPE).delete()
for MetricClass in METRICS:
if force_clear or MetricClass.FULL_CLEAR_RECUIRED:
if verbose:
print('clear %s' % MetricClass.TYPE)
MetricClass.clear()
for i, MetricClass in enumerate(METRICS):
metric = MetricClass()
if verbose:
print('[%3d] calculate %s' % (i, metric.TYPE))
metric.initialize()
metric.complete_values()
models.FullStatistics.objects.all().delete()
models.FullStatistics.objects.create(data=prototypes.RecordPrototype.get_js_data()) | 0.313525 | 0.499451 |
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
Class that define a parallel coordinates chart display as post post processing
"""
import plotly.graph_objects as go
from sos_trades_core.tools.post_processing.post_processing_tools import escape_str_with_comma
from sos_trades_core.tools.post_processing.post_processing_plotly_tooling import AbstractPostProcessingPlotlyTooling
class ParallelCoordinatesTrace:
""" Class that define parallel coordinate chart trace
"""
TRACE_TEXT = 'text'
TRACE_NUMBER = 'number'
def __init__(self, trace_name='', trace_values=[], trace_type=TRACE_NUMBER):
""" Init of the class
@param trace_name: name of the trace
@type str
@param trace_values: values of each vertical axis
@type list
@param trace_type: type of the trace (TRACE_TEXT or TRACE_NUMBER)
@type str
"""
self.trace_name = trace_name
if not isinstance(trace_values, list):
message = f'"trace_values" argument is intended to be a list not {type(trace_values)}'
raise TypeError(message)
if not (trace_type == self.TRACE_NUMBER or trace_type == self.TRACE_TEXT):
message = f'"trace_type" argument is intended to be "number" or "text"'
raise TypeError(message)
self.trace_values = trace_values
self.trace_type = trace_type
class InstantiatedParallelCoordinatesChart(AbstractPostProcessingPlotlyTooling):
""" Class that define parallel coordinates display as post post processing
"""
def __init__(self, chart_name=''):
""" Init of the class
@param chart_name: name of the chart
@type str
"""
super().__init__()
# List of traces
self.__traces = []
# Chart name
self.chart_name = chart_name
def add_trace(self, trace):
""" Private method to add trace to current parallel coordinates chart
@param trace: trace instance to add
@type ParallelCoordinatesTrace
"""
if not isinstance(trace, ParallelCoordinatesTrace):
message = f'"trace" argument is intended to be a ParallelCoordinatesTrace not {type(trace)}'
raise TypeError(message)
# Check if trace with text already in list
if trace.trace_type == ParallelCoordinatesTrace.TRACE_TEXT:
if len(list(filter(lambda tr: tr.trace_type == ParallelCoordinatesTrace.TRACE_TEXT, self.__traces))) > 0:
message = f'You have already set a trace with trace_type text, only one is allowed for the chart'
raise TypeError(message)
self.__traces.append(trace)
def to_plotly(self, logger=None):
""" Convert current instance into a plotly object
@param logger: logging object to log message
@type Logging.logger
@return plotly.graph_objects.go instance
"""
pc_dimensions = []
# First add number traces
for trace in self.__traces:
if trace.trace_type == ParallelCoordinatesTrace.TRACE_NUMBER:
range_values = abs(max(trace.trace_values)) - \
abs(min(trace.trace_values))
pc_dimensions.append(dict(label=trace.trace_name,
values=trace.trace_values,
range=[min(trace.trace_values) - 0.10 * range_values,
max(trace.trace_values) + 0.10 * range_values]))
# Second add text traces
line_config = dict(autocolorscale=True,
showscale=False)
for trace in self.__traces:
if trace.trace_type == ParallelCoordinatesTrace.TRACE_TEXT:
id_values = []
tick_texts = []
for index, tick_text in enumerate(trace.trace_values, start=1):
id_values.append(index)
tick_texts.append(tick_text)
pc_dimensions.append(dict(label=trace.trace_name,
values=id_values,
tickvals=id_values,
ticktext=tick_texts))
line_config['color'] = id_values
fig = go.Figure(data=go.Parcoords(
line=line_config,
dimensions=list(pc_dimensions)
)
)
layout = {}
layout.update(
{'title': self.get_default_title_layout(self.chart_name)})
layout.update({'width': 600})
layout.update({'height': 450})
layout.update({'autosize': False})
layout.update({'font': self.get_default_font_layout()})
fig.update_layout(layout)
return fig
def __to_csv(self):
global_list = []
header = []
max_len = 0
for trace in self.__traces:
if trace.trace_name is not None and len(trace.trace_name):
header.append(escape_str_with_comma(f'{trace.trace_name}'))
global_list.append(trace.trace_values)
if len(trace.trace_values) > max_len:
max_len = len(trace.trace_values)
csv_list = [','.join(header)]
for i in range(max_len):
csv_line = []
for gl in global_list:
if i < len(gl):
csv_line.append(escape_str_with_comma(f'{gl[i]}'))
else:
csv_line.append('')
csv_list.append(','.join(csv_line))
self.set_csv_data(csv_list)
def to_plotly_dict(self, logger=None):
""" Method that convert current instance to plotly object and then to a dictionary
@param logger: logger instance
@type Logging.loger
"""
json = self.to_plotly(logger).to_dict()
if self._plot_csv_data is None:
self.__to_csv()
json[self.CSV_DATA] = self._plot_csv_data
json[self.LOGO_NOTOFFICIAL] = self.logo_notofficial
json[self.LOGO_OFFICIAL] = self.logo_official
json[self.LOGO_WORK_IN_PROGRESS] = self.logo_work_in_progress
return json | sos_trades_core/tools/post_processing/parallel_coordinates_charts/instantiated_parallel_coordinates_chart.py | mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
Class that define a parallel coordinates chart display as post post processing
"""
import plotly.graph_objects as go
from sos_trades_core.tools.post_processing.post_processing_tools import escape_str_with_comma
from sos_trades_core.tools.post_processing.post_processing_plotly_tooling import AbstractPostProcessingPlotlyTooling
class ParallelCoordinatesTrace:
""" Class that define parallel coordinate chart trace
"""
TRACE_TEXT = 'text'
TRACE_NUMBER = 'number'
def __init__(self, trace_name='', trace_values=[], trace_type=TRACE_NUMBER):
""" Init of the class
@param trace_name: name of the trace
@type str
@param trace_values: values of each vertical axis
@type list
@param trace_type: type of the trace (TRACE_TEXT or TRACE_NUMBER)
@type str
"""
self.trace_name = trace_name
if not isinstance(trace_values, list):
message = f'"trace_values" argument is intended to be a list not {type(trace_values)}'
raise TypeError(message)
if not (trace_type == self.TRACE_NUMBER or trace_type == self.TRACE_TEXT):
message = f'"trace_type" argument is intended to be "number" or "text"'
raise TypeError(message)
self.trace_values = trace_values
self.trace_type = trace_type
class InstantiatedParallelCoordinatesChart(AbstractPostProcessingPlotlyTooling):
""" Class that define parallel coordinates display as post post processing
"""
def __init__(self, chart_name=''):
""" Init of the class
@param chart_name: name of the chart
@type str
"""
super().__init__()
# List of traces
self.__traces = []
# Chart name
self.chart_name = chart_name
def add_trace(self, trace):
""" Private method to add trace to current parallel coordinates chart
@param trace: trace instance to add
@type ParallelCoordinatesTrace
"""
if not isinstance(trace, ParallelCoordinatesTrace):
message = f'"trace" argument is intended to be a ParallelCoordinatesTrace not {type(trace)}'
raise TypeError(message)
# Check if trace with text already in list
if trace.trace_type == ParallelCoordinatesTrace.TRACE_TEXT:
if len(list(filter(lambda tr: tr.trace_type == ParallelCoordinatesTrace.TRACE_TEXT, self.__traces))) > 0:
message = f'You have already set a trace with trace_type text, only one is allowed for the chart'
raise TypeError(message)
self.__traces.append(trace)
def to_plotly(self, logger=None):
""" Convert current instance into a plotly object
@param logger: logging object to log message
@type Logging.logger
@return plotly.graph_objects.go instance
"""
pc_dimensions = []
# First add number traces
for trace in self.__traces:
if trace.trace_type == ParallelCoordinatesTrace.TRACE_NUMBER:
range_values = abs(max(trace.trace_values)) - \
abs(min(trace.trace_values))
pc_dimensions.append(dict(label=trace.trace_name,
values=trace.trace_values,
range=[min(trace.trace_values) - 0.10 * range_values,
max(trace.trace_values) + 0.10 * range_values]))
# Second add text traces
line_config = dict(autocolorscale=True,
showscale=False)
for trace in self.__traces:
if trace.trace_type == ParallelCoordinatesTrace.TRACE_TEXT:
id_values = []
tick_texts = []
for index, tick_text in enumerate(trace.trace_values, start=1):
id_values.append(index)
tick_texts.append(tick_text)
pc_dimensions.append(dict(label=trace.trace_name,
values=id_values,
tickvals=id_values,
ticktext=tick_texts))
line_config['color'] = id_values
fig = go.Figure(data=go.Parcoords(
line=line_config,
dimensions=list(pc_dimensions)
)
)
layout = {}
layout.update(
{'title': self.get_default_title_layout(self.chart_name)})
layout.update({'width': 600})
layout.update({'height': 450})
layout.update({'autosize': False})
layout.update({'font': self.get_default_font_layout()})
fig.update_layout(layout)
return fig
def __to_csv(self):
global_list = []
header = []
max_len = 0
for trace in self.__traces:
if trace.trace_name is not None and len(trace.trace_name):
header.append(escape_str_with_comma(f'{trace.trace_name}'))
global_list.append(trace.trace_values)
if len(trace.trace_values) > max_len:
max_len = len(trace.trace_values)
csv_list = [','.join(header)]
for i in range(max_len):
csv_line = []
for gl in global_list:
if i < len(gl):
csv_line.append(escape_str_with_comma(f'{gl[i]}'))
else:
csv_line.append('')
csv_list.append(','.join(csv_line))
self.set_csv_data(csv_list)
def to_plotly_dict(self, logger=None):
""" Method that convert current instance to plotly object and then to a dictionary
@param logger: logger instance
@type Logging.loger
"""
json = self.to_plotly(logger).to_dict()
if self._plot_csv_data is None:
self.__to_csv()
json[self.CSV_DATA] = self._plot_csv_data
json[self.LOGO_NOTOFFICIAL] = self.logo_notofficial
json[self.LOGO_OFFICIAL] = self.logo_official
json[self.LOGO_WORK_IN_PROGRESS] = self.logo_work_in_progress
return json | 0.798737 | 0.307774 |
from rest_framework import generics, status
from rest_framework.response import Response
from . import serializers
from .authentication import AUTH_HEADER_TYPES
from .exceptions import InvalidToken, TokenError
class TokenViewBase(generics.GenericAPIView):
permission_classes = ()
authentication_classes = ()
serializer_class = None
www_authenticate_realm = 'api'
def get_authenticate_header(self, request):
return '{0} realm="{1}"'.format(
AUTH_HEADER_TYPES[0],
self.www_authenticate_realm,
)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except TokenError as e:
raise InvalidToken(e.args[0])
return Response(serializer.validated_data, status=status.HTTP_200_OK)
class TokenObtainPairView(TokenViewBase):
"""
Takes a set of user credentials and returns an access and refresh JSON web
token pair to prove the authentication of those credentials.
"""
serializer_class = serializers.TokenObtainPairSerializer
token_obtain_pair = TokenObtainPairView.as_view()
class TokenRefreshView(TokenViewBase):
"""
Takes a refresh type JSON web token and returns an access type JSON web
token if the refresh token is valid.
"""
serializer_class = serializers.TokenRefreshSerializer
token_refresh = TokenRefreshView.as_view()
class TokenObtainSlidingView(TokenViewBase):
"""
Takes a set of user credentials and returns a sliding JSON web token to
prove the authentication of those credentials.
"""
serializer_class = serializers.TokenObtainSlidingSerializer
token_obtain_sliding = TokenObtainSlidingView.as_view()
class TokenRefreshSlidingView(TokenViewBase):
"""
Takes a sliding JSON web token and returns a new, refreshed version if the
token's refresh period has not expired.
"""
serializer_class = serializers.TokenRefreshSlidingSerializer
token_refresh_sliding = TokenRefreshSlidingView.as_view()
class TokenVerifyView(TokenViewBase):
"""
Takes a token and indicates if it is valid. This view provides no
information about a token's fitness for a particular use.
"""
serializer_class = serializers.TokenVerifySerializer
token_verify = TokenVerifyView.as_view()
class TokenBlacklistView(TokenViewBase):
"""
Takes a token and blacklists it. Must be used with the
`rest_framework_simplejwt.token_blacklist` app installed.
"""
serializer_class = serializers.TokenBlacklistSerializer
token_blacklist = TokenBlacklistView.as_view() | rest_framework_simplejwt/views.py | from rest_framework import generics, status
from rest_framework.response import Response
from . import serializers
from .authentication import AUTH_HEADER_TYPES
from .exceptions import InvalidToken, TokenError
class TokenViewBase(generics.GenericAPIView):
permission_classes = ()
authentication_classes = ()
serializer_class = None
www_authenticate_realm = 'api'
def get_authenticate_header(self, request):
return '{0} realm="{1}"'.format(
AUTH_HEADER_TYPES[0],
self.www_authenticate_realm,
)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except TokenError as e:
raise InvalidToken(e.args[0])
return Response(serializer.validated_data, status=status.HTTP_200_OK)
class TokenObtainPairView(TokenViewBase):
"""
Takes a set of user credentials and returns an access and refresh JSON web
token pair to prove the authentication of those credentials.
"""
serializer_class = serializers.TokenObtainPairSerializer
token_obtain_pair = TokenObtainPairView.as_view()
class TokenRefreshView(TokenViewBase):
"""
Takes a refresh type JSON web token and returns an access type JSON web
token if the refresh token is valid.
"""
serializer_class = serializers.TokenRefreshSerializer
token_refresh = TokenRefreshView.as_view()
class TokenObtainSlidingView(TokenViewBase):
"""
Takes a set of user credentials and returns a sliding JSON web token to
prove the authentication of those credentials.
"""
serializer_class = serializers.TokenObtainSlidingSerializer
token_obtain_sliding = TokenObtainSlidingView.as_view()
class TokenRefreshSlidingView(TokenViewBase):
"""
Takes a sliding JSON web token and returns a new, refreshed version if the
token's refresh period has not expired.
"""
serializer_class = serializers.TokenRefreshSlidingSerializer
token_refresh_sliding = TokenRefreshSlidingView.as_view()
class TokenVerifyView(TokenViewBase):
"""
Takes a token and indicates if it is valid. This view provides no
information about a token's fitness for a particular use.
"""
serializer_class = serializers.TokenVerifySerializer
token_verify = TokenVerifyView.as_view()
class TokenBlacklistView(TokenViewBase):
"""
Takes a token and blacklists it. Must be used with the
`rest_framework_simplejwt.token_blacklist` app installed.
"""
serializer_class = serializers.TokenBlacklistSerializer
token_blacklist = TokenBlacklistView.as_view() | 0.727975 | 0.174375 |
import random
from geomstats.geometry.full_rank_correlation_matrices import (
CorrelationMatricesBundle,
FullRankCorrelationMatrices,
)
from geomstats.geometry.symmetric_matrices import SymmetricMatrices
from tests.data_generation import TestData, _LevelSetTestData
class RankFullRankCorrelationMatricesTestData(_LevelSetTestData):
n_list = random.sample(range(2, 4), 2)
space_args_list = [(n,) for n in n_list]
shape_list = [(n, n) for n in n_list]
n_points_list = random.sample(range(2, 5), 2)
n_vecs_list = random.sample(range(2, 5), 2)
def random_point_belongs_test_data(self):
smoke_space_args_list = [(2,), (3,)]
smoke_n_points_list = [1, 2]
return self._random_point_belongs_test_data(
smoke_space_args_list,
smoke_n_points_list,
self.space_args_list,
self.n_points_list,
)
def projection_belongs_test_data(self):
return self._projection_belongs_test_data(
self.space_args_list, self.shape_list, self.n_points_list
)
def to_tangent_is_tangent_test_data(self):
return self._to_tangent_is_tangent_test_data(
FullRankCorrelationMatrices,
self.space_args_list,
self.shape_list,
self.n_vecs_list,
)
def random_tangent_vec_is_tangent_test_data(self):
return self._random_tangent_vec_is_tangent_test_data(
FullRankCorrelationMatrices, self.space_args_list, self.n_vecs_list
)
class CorrelationMatricesBundleTestData(TestData):
n_list = random.sample(range(2, 3), 1)
n_samples_list = random.sample(range(1, 3), 1)
def riemannian_submersion_belongs_to_base_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
point = bundle.base.random_point(n_samples)
random_data.append(dict(n=n, point=point))
return self.generate_tests([], random_data)
def lift_riemannian_submersion_composition_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
point = bundle.base.random_point(n_samples)
random_data.append(dict(n=n, point=point))
return self.generate_tests([], random_data)
def tangent_riemannian_submersion_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
point = bundle.riemannian_submersion(mat)
vec = bundle.random_point(n_samples)
random_data.append(dict(n=n, vec=vec, point=point))
return self.generate_tests([], random_data)
def vertical_projection_tangent_submersion_test_data(self):
random_data = []
for n in self.n_list:
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point(2)
vec = SymmetricMatrices(n).random_point(2)
random_data.append(dict(n=n, vec=vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_projection_test_data(self):
random_data = []
for n in self.n_list:
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
vec = bundle.random_point()
random_data.append(dict(n=n, vec=vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_lift_is_horizontal_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.base.random_point()
vec = bundle.base.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def vertical_projection_is_vertical_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
vec = bundle.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_lift_and_tangent_riemannian_submersion_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.base.random_point()
vec = bundle.base.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def log_after_align_is_horizontal_test_data(self):
n_list = [2, 3]
random_data = []
for n in n_list:
bundle = CorrelationMatricesBundle(n)
point = bundle.random_point(2)
random_data.append(dict(n=n, point_a=point[0], point_b=point[1]))
return self.generate_tests([], random_data)
class FullRankcorrelationAffineQuotientMetricTestData(TestData):
def exp_log_composition_test_data(self):
bundle = CorrelationMatricesBundle(3)
point = bundle.riemannian_submersion(bundle.random_point(2))
random_data = [dict(dim=3, point=point)]
return self.generate_tests([], random_data)
def exp_belongs_test_data(self):
bundle = CorrelationMatricesBundle(3)
base_point = bundle.base.random_point()
tangent_vec = bundle.base.to_tangent(bundle.random_point(), base_point)
smoke_data = [dict(dim=3, tangent_vec=tangent_vec, base_point=base_point)]
return self.generate_tests(smoke_data) | tests/data/full_rank_correlation_matrices_data.py | import random
from geomstats.geometry.full_rank_correlation_matrices import (
CorrelationMatricesBundle,
FullRankCorrelationMatrices,
)
from geomstats.geometry.symmetric_matrices import SymmetricMatrices
from tests.data_generation import TestData, _LevelSetTestData
class RankFullRankCorrelationMatricesTestData(_LevelSetTestData):
n_list = random.sample(range(2, 4), 2)
space_args_list = [(n,) for n in n_list]
shape_list = [(n, n) for n in n_list]
n_points_list = random.sample(range(2, 5), 2)
n_vecs_list = random.sample(range(2, 5), 2)
def random_point_belongs_test_data(self):
smoke_space_args_list = [(2,), (3,)]
smoke_n_points_list = [1, 2]
return self._random_point_belongs_test_data(
smoke_space_args_list,
smoke_n_points_list,
self.space_args_list,
self.n_points_list,
)
def projection_belongs_test_data(self):
return self._projection_belongs_test_data(
self.space_args_list, self.shape_list, self.n_points_list
)
def to_tangent_is_tangent_test_data(self):
return self._to_tangent_is_tangent_test_data(
FullRankCorrelationMatrices,
self.space_args_list,
self.shape_list,
self.n_vecs_list,
)
def random_tangent_vec_is_tangent_test_data(self):
return self._random_tangent_vec_is_tangent_test_data(
FullRankCorrelationMatrices, self.space_args_list, self.n_vecs_list
)
class CorrelationMatricesBundleTestData(TestData):
n_list = random.sample(range(2, 3), 1)
n_samples_list = random.sample(range(1, 3), 1)
def riemannian_submersion_belongs_to_base_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
point = bundle.base.random_point(n_samples)
random_data.append(dict(n=n, point=point))
return self.generate_tests([], random_data)
def lift_riemannian_submersion_composition_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
point = bundle.base.random_point(n_samples)
random_data.append(dict(n=n, point=point))
return self.generate_tests([], random_data)
def tangent_riemannian_submersion_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
point = bundle.riemannian_submersion(mat)
vec = bundle.random_point(n_samples)
random_data.append(dict(n=n, vec=vec, point=point))
return self.generate_tests([], random_data)
def vertical_projection_tangent_submersion_test_data(self):
random_data = []
for n in self.n_list:
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point(2)
vec = SymmetricMatrices(n).random_point(2)
random_data.append(dict(n=n, vec=vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_projection_test_data(self):
random_data = []
for n in self.n_list:
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
vec = bundle.random_point()
random_data.append(dict(n=n, vec=vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_lift_is_horizontal_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.base.random_point()
vec = bundle.base.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def vertical_projection_is_vertical_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
vec = bundle.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_lift_and_tangent_riemannian_submersion_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.base.random_point()
vec = bundle.base.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def log_after_align_is_horizontal_test_data(self):
n_list = [2, 3]
random_data = []
for n in n_list:
bundle = CorrelationMatricesBundle(n)
point = bundle.random_point(2)
random_data.append(dict(n=n, point_a=point[0], point_b=point[1]))
return self.generate_tests([], random_data)
class FullRankcorrelationAffineQuotientMetricTestData(TestData):
def exp_log_composition_test_data(self):
bundle = CorrelationMatricesBundle(3)
point = bundle.riemannian_submersion(bundle.random_point(2))
random_data = [dict(dim=3, point=point)]
return self.generate_tests([], random_data)
def exp_belongs_test_data(self):
bundle = CorrelationMatricesBundle(3)
base_point = bundle.base.random_point()
tangent_vec = bundle.base.to_tangent(bundle.random_point(), base_point)
smoke_data = [dict(dim=3, tangent_vec=tangent_vec, base_point=base_point)]
return self.generate_tests(smoke_data) | 0.549399 | 0.434221 |
from __future__ import annotations
import argparse
import logging
import os
import tempfile
from enum import Enum
import attr
from .fanout_test_driver import (
Binaries,
run_scenario_saved_state_init,
run_scenario_incremental_no_old_decls,
run_scenario_incremental_with_old_decls,
)
from .fanout_test_parser import FanoutTest
logging.basicConfig(
format="[%(asctime)s] [%(levelname)s] %(message)s",
datefmt="%m/%d/%Y %H:%M:%S %Z",
level=logging.WARNING,
)
class Mode(Enum):
SAVED_STATE_INIT = "saved-state-init"
INCREMENTAL_OLD_DECLS_ENABLED = "incremental-old-decls-enabled"
INCREMENTAL_OLD_DECLS_DISABLED = "incremental-old-decls-disabled"
def __str__(self) -> str:
return self.value
@attr.s(auto_attribs=True)
class Opts(object):
hh_client: str
hh_server: str
hh_single_type_check: str
legacy_hh_fanout: str
debug: bool
mode: Mode
input_file: str
def to_bins(self) -> Binaries:
return Binaries(
hh_client=self.hh_client,
hh_server=self.hh_server,
legacy_hh_fanout=self.legacy_hh_fanout,
hh_single_type_check=self.hh_single_type_check,
)
def get_temporary_dir(prefix: str) -> tempfile.TemporaryDirectory[str]:
# sandcastle sets TEMP as a directory that's cleaned up when the job ends
return tempfile.TemporaryDirectory(prefix=prefix, dir=os.getenv("TEMP"))
def go(opts: Opts) -> None:
logging.debug("hh_client: %s", opts.hh_client)
logging.debug("hh_server: %s", opts.hh_server)
logging.debug("hh_single_type_check: %s", opts.hh_single_type_check)
logging.debug("legacy_hh_fanout: %s", opts.legacy_hh_fanout)
logging.debug("mode: %s", opts.mode)
logging.debug("input_file: %s", opts.input_file)
test = FanoutTest.from_file(opts.input_file)
if opts.mode is Mode.SAVED_STATE_INIT:
run_scenario_saved_state_init(opts.to_bins(), test)
elif opts.mode is Mode.INCREMENTAL_OLD_DECLS_ENABLED:
run_scenario_incremental_with_old_decls(opts.to_bins(), test)
elif opts.mode is Mode.INCREMENTAL_OLD_DECLS_DISABLED:
run_scenario_incremental_no_old_decls(opts.to_bins(), test)
else:
raise AssertionError()
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--hh-client", type=os.path.abspath)
parser.add_argument("--hh-server", type=os.path.abspath)
parser.add_argument("--hh-single-type-check", type=os.path.abspath)
parser.add_argument("--legacy-hh-fanout", type=os.path.abspath)
parser.add_argument("--debug", action="store_true")
parser.add_argument(
"--mode", type=Mode, choices=list(Mode), default=Mode.SAVED_STATE_INIT
)
parser.add_argument("input_file")
args = parser.parse_args()
opts = Opts(**vars(args))
if opts.debug:
logging.getLogger().setLevel(level=logging.DEBUG)
go(opts)
if __name__ == "__main__":
main() | hphp/hack/test/fanout/tools/fanout.py | from __future__ import annotations
import argparse
import logging
import os
import tempfile
from enum import Enum
import attr
from .fanout_test_driver import (
Binaries,
run_scenario_saved_state_init,
run_scenario_incremental_no_old_decls,
run_scenario_incremental_with_old_decls,
)
from .fanout_test_parser import FanoutTest
logging.basicConfig(
format="[%(asctime)s] [%(levelname)s] %(message)s",
datefmt="%m/%d/%Y %H:%M:%S %Z",
level=logging.WARNING,
)
class Mode(Enum):
SAVED_STATE_INIT = "saved-state-init"
INCREMENTAL_OLD_DECLS_ENABLED = "incremental-old-decls-enabled"
INCREMENTAL_OLD_DECLS_DISABLED = "incremental-old-decls-disabled"
def __str__(self) -> str:
return self.value
@attr.s(auto_attribs=True)
class Opts(object):
hh_client: str
hh_server: str
hh_single_type_check: str
legacy_hh_fanout: str
debug: bool
mode: Mode
input_file: str
def to_bins(self) -> Binaries:
return Binaries(
hh_client=self.hh_client,
hh_server=self.hh_server,
legacy_hh_fanout=self.legacy_hh_fanout,
hh_single_type_check=self.hh_single_type_check,
)
def get_temporary_dir(prefix: str) -> tempfile.TemporaryDirectory[str]:
# sandcastle sets TEMP as a directory that's cleaned up when the job ends
return tempfile.TemporaryDirectory(prefix=prefix, dir=os.getenv("TEMP"))
def go(opts: Opts) -> None:
logging.debug("hh_client: %s", opts.hh_client)
logging.debug("hh_server: %s", opts.hh_server)
logging.debug("hh_single_type_check: %s", opts.hh_single_type_check)
logging.debug("legacy_hh_fanout: %s", opts.legacy_hh_fanout)
logging.debug("mode: %s", opts.mode)
logging.debug("input_file: %s", opts.input_file)
test = FanoutTest.from_file(opts.input_file)
if opts.mode is Mode.SAVED_STATE_INIT:
run_scenario_saved_state_init(opts.to_bins(), test)
elif opts.mode is Mode.INCREMENTAL_OLD_DECLS_ENABLED:
run_scenario_incremental_with_old_decls(opts.to_bins(), test)
elif opts.mode is Mode.INCREMENTAL_OLD_DECLS_DISABLED:
run_scenario_incremental_no_old_decls(opts.to_bins(), test)
else:
raise AssertionError()
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--hh-client", type=os.path.abspath)
parser.add_argument("--hh-server", type=os.path.abspath)
parser.add_argument("--hh-single-type-check", type=os.path.abspath)
parser.add_argument("--legacy-hh-fanout", type=os.path.abspath)
parser.add_argument("--debug", action="store_true")
parser.add_argument(
"--mode", type=Mode, choices=list(Mode), default=Mode.SAVED_STATE_INIT
)
parser.add_argument("input_file")
args = parser.parse_args()
opts = Opts(**vars(args))
if opts.debug:
logging.getLogger().setLevel(level=logging.DEBUG)
go(opts)
if __name__ == "__main__":
main() | 0.551574 | 0.069384 |
import os
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from ..hooks.hdfs_hook import HDFSHook
from ..hooks.pyhive_hook import PyHiveHook
from ..utils import (create_concat_filename, concat_avro_files,
sample_avro_file, get_dataframe_from_avro)
INPUT_FMT = 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
OUTPUT_FMT = 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
ROW_FORMAT = 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
class IngestAvroIntoHiveOperator(BaseOperator):
"""Moves Avro file into HDFS and inserts into a Hive table.
:param str target_table: Hive table name to insert into.
:param str avro_schema_path: Avro schema path in HDFS.
:param src_filepath: Local Avro file path or callable that produces one,
supports a single path or list of paths (templated).
:param str hdfs_processing_dir: Directory to use for processing.
:param bool concat_src: Concatenate Avro source files before loading.
:param bool remove_src: Remove Avro source file.
"""
template_fields = ['src_filepath_str']
ui_color = '#ffefeb'
@apply_defaults
def __init__(self, target_table: str, avro_schema_path: str,
src_filepath, hdfs_processing_dir: str,
concat_src: bool = True, remove_src: bool = True,
*args, **kwargs):
super(IngestAvroIntoHiveOperator, self).__init__(*args, **kwargs)
self.target_table = target_table
self.avro_schema_path = avro_schema_path
if isinstance(src_filepath, (str, list)):
self.src_filepath_str = src_filepath
self.src_filepath_callable = None
elif callable(src_filepath):
self.src_filepath_str = None
self.src_filepath_callable = src_filepath
else:
raise AirflowException(
f'Incompatible src_filepath {src_filepath!r}.')
self.hdfs_processing_dir = hdfs_processing_dir.rstrip('/')
self.concat_src = concat_src
self.remove_src = remove_src
def _concat_src_files(self, src_filepaths: list) -> list:
src_concat_path = create_concat_filename(*src_filepaths)
for src_path in src_filepaths:
if not os.path.exists(src_path):
if os.path.exists(src_concat_path):
# Give up and rely on concatenated source from last run.
return [src_concat_path]
else:
raise AssertionError(f"Don't know what to do with "
f"non-existent source file {src_path}")
concat_avro_files(src_filepaths, src_concat_path)
if self.remove_src:
for src_path in src_filepaths:
self.log.info(f'Removing {src_path}.')
os.remove(src_path)
return [src_concat_path]
def _get_target_partitions(self, src_filepaths: list) -> (str, str, str):
part_ds, part_h, part_en = None, None, None
avro_sample_path = f'{create_concat_filename(*src_filepaths)}_max10k'
sample_avro_file(src_filepaths, avro_sample_path, limit=10000,
sample_rate=0.1)
df = get_dataframe_from_avro(avro_sample_path)
os.remove(avro_sample_path)
if df.empty:
self.log.info('Empty input, no partitions to target')
return part_ds, part_h, part_en
ds_h = df.server_date.iloc[0][:13]
if df.server_date.str.startswith(ds_h).all():
part_ds, part_h = ds_h.split()
self.log.info(f'Partition ds={part_ds} and h={part_h} in '
f'all {df.server_date.count()} samples')
en = df.event_name.iloc[0]
# noinspection PyUnresolvedReferences
if (df.event_name == en).all():
part_en = en
self.log.info(f'Partition en={part_en} in all '
f'{df.event_name.count()} samples')
return part_ds, part_h, part_en
def _move_src_files_to_hdfs(self, src_filepaths: list) -> list:
temp_avro_paths = []
hdfs = HDFSHook().get_conn()
for src_filepath in src_filepaths:
temp_avro_path = os.path.join(self.hdfs_processing_dir,
os.path.basename(src_filepath))
if not hdfs.exists(temp_avro_path):
self.log.info(f'Moving local file {src_filepath} to '
f'HDFS {temp_avro_path}')
try:
hdfs.put(src_filepath, temp_avro_path, replication=1)
except FileNotFoundError as err:
self.log.error(f'Upload failed: {err}')
temp_avro_paths.append(temp_avro_path)
return temp_avro_paths
@staticmethod
def _load_avro_into_temp_tables(temp_avro_paths: list,
full_schema_path: str) -> list:
temp_table_names = []
conn = PyHiveHook().get_conn()
cursor = conn.cursor()
for temp_avro_path in temp_avro_paths:
temp_table_name = os.path.basename(temp_avro_path).replace(
'.', '_').replace('-', '_')
create_temp_table_stmt = f"""
CREATE TABLE IF NOT EXISTS {temp_table_name}
ROW FORMAT SERDE '{ROW_FORMAT}'
STORED AS INPUTFORMAT '{INPUT_FMT}'
OUTPUTFORMAT '{OUTPUT_FMT}'
TBLPROPERTIES ('avro.schema.url'='{full_schema_path}')
"""
print('--- create_temp_table_stmt ---')
cursor.execute(create_temp_table_stmt)
select_temp_row_stmt = f"""
SELECT * FROM {temp_table_name} LIMIT 1
"""
print('--- select_temp_row_stmt ---')
cursor.execute(select_temp_row_stmt)
if cursor.fetchone() is None:
load_data_stmt = f"""
LOAD DATA INPATH '{temp_avro_path}'
INTO TABLE {temp_table_name}
"""
print('--- load_data_stmt ---')
cursor.execute(load_data_stmt)
temp_table_names.append(temp_table_name)
return temp_table_names
def _insert_temp_data_into_target_table(self, temp_table_names: list,
full_schema_path: str,
part_ds: str = None,
part_h: str = None,
part_en: str = None) -> None:
conn = PyHiveHook(configuration={
'hive.exec.dynamic.partition.mode': 'nonstrict',
'hive.exec.compress.output': 'true',
'avro.output.codec': 'deflate'
}).get_conn()
cursor = conn.cursor()
create_target_table_stmt = f"""
CREATE EXTERNAL TABLE IF NOT EXISTS {self.target_table}
PARTITIONED BY (ds STRING, h STRING, en STRING)
ROW FORMAT SERDE '{ROW_FORMAT}'
STORED AS INPUTFORMAT '{INPUT_FMT}'
OUTPUTFORMAT '{OUTPUT_FMT}'
TBLPROPERTIES ('avro.schema.url'='{full_schema_path}')
"""
print('--- create_target_table_stmt ---')
cursor.execute(create_target_table_stmt)
insert_clause = 'INTO'
if part_ds and part_h and part_en:
partitions = f"ds='{part_ds}', h='{part_h}', en='{part_en}'"
if len(temp_table_names) == 1:
insert_clause = 'OVERWRITE TABLE'
elif part_ds and part_h:
partitions = f"ds='{part_ds}', h='{part_h}', en"
else:
partitions = "ds, h, en"
for temp_table_name in temp_table_names:
insert_data_stmt = f"""
INSERT {insert_clause} {self.target_table}
PARTITION ({partitions})
SELECT
*
{'-- ' if part_ds else ', '}datestamp AS ds
{'-- ' if part_h else ', '}substr(server_date, 12, 2) AS h
{'-- ' if part_en else ', '}event_name AS en
FROM
{temp_table_name}
"""
print('--- insert_data_stmt ---')
cursor.execute(insert_data_stmt)
drop_temp_table_stmt = f"""
DROP TABLE {temp_table_name}
"""
print('--- drop_temp_table_stmt ---')
cursor.execute(drop_temp_table_stmt)
def execute(self, context) -> None:
avro_schema_path = self.avro_schema_path
if self.src_filepath_callable:
src_filepath = self.src_filepath_callable(**context)
else:
try:
src_filepath = eval(self.src_filepath_str)
except SyntaxError:
src_filepath = self.src_filepath_str
if not src_filepath:
self.log.info('No filepath(s) received, skipping.')
raise AirflowSkipException()
if isinstance(src_filepath, list):
src_filepaths = src_filepath
else:
src_filepaths = [src_filepath]
if self.concat_src and len(src_filepaths) > 1:
src_filepaths = self._concat_src_files(src_filepaths)
part_ds, part_h, part_en = self._get_target_partitions(src_filepaths)
hdfs = HDFSHook().get_conn()
if not hdfs.exists(avro_schema_path):
raise AirflowException(f'Avro schema {avro_schema_path} not found.')
full_schema_path = f'hdfs://{hdfs.host}:{hdfs.port}{avro_schema_path}'
temp_avro_paths = self._move_src_files_to_hdfs(src_filepaths)
temp_table_names = self._load_avro_into_temp_tables(temp_avro_paths,
full_schema_path)
self._insert_temp_data_into_target_table(
temp_table_names, full_schema_path, part_ds=part_ds, part_h=part_h,
part_en=part_en)
if self.remove_src:
for src_filepath in src_filepaths:
self.log.info(f'Removing {src_filepath}.')
os.remove(src_filepath) | o3/operators/ingest_avro_into_hive_operator.py | import os
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from ..hooks.hdfs_hook import HDFSHook
from ..hooks.pyhive_hook import PyHiveHook
from ..utils import (create_concat_filename, concat_avro_files,
sample_avro_file, get_dataframe_from_avro)
INPUT_FMT = 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
OUTPUT_FMT = 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
ROW_FORMAT = 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
class IngestAvroIntoHiveOperator(BaseOperator):
"""Moves Avro file into HDFS and inserts into a Hive table.
:param str target_table: Hive table name to insert into.
:param str avro_schema_path: Avro schema path in HDFS.
:param src_filepath: Local Avro file path or callable that produces one,
supports a single path or list of paths (templated).
:param str hdfs_processing_dir: Directory to use for processing.
:param bool concat_src: Concatenate Avro source files before loading.
:param bool remove_src: Remove Avro source file.
"""
template_fields = ['src_filepath_str']
ui_color = '#ffefeb'
@apply_defaults
def __init__(self, target_table: str, avro_schema_path: str,
src_filepath, hdfs_processing_dir: str,
concat_src: bool = True, remove_src: bool = True,
*args, **kwargs):
super(IngestAvroIntoHiveOperator, self).__init__(*args, **kwargs)
self.target_table = target_table
self.avro_schema_path = avro_schema_path
if isinstance(src_filepath, (str, list)):
self.src_filepath_str = src_filepath
self.src_filepath_callable = None
elif callable(src_filepath):
self.src_filepath_str = None
self.src_filepath_callable = src_filepath
else:
raise AirflowException(
f'Incompatible src_filepath {src_filepath!r}.')
self.hdfs_processing_dir = hdfs_processing_dir.rstrip('/')
self.concat_src = concat_src
self.remove_src = remove_src
def _concat_src_files(self, src_filepaths: list) -> list:
src_concat_path = create_concat_filename(*src_filepaths)
for src_path in src_filepaths:
if not os.path.exists(src_path):
if os.path.exists(src_concat_path):
# Give up and rely on concatenated source from last run.
return [src_concat_path]
else:
raise AssertionError(f"Don't know what to do with "
f"non-existent source file {src_path}")
concat_avro_files(src_filepaths, src_concat_path)
if self.remove_src:
for src_path in src_filepaths:
self.log.info(f'Removing {src_path}.')
os.remove(src_path)
return [src_concat_path]
def _get_target_partitions(self, src_filepaths: list) -> (str, str, str):
part_ds, part_h, part_en = None, None, None
avro_sample_path = f'{create_concat_filename(*src_filepaths)}_max10k'
sample_avro_file(src_filepaths, avro_sample_path, limit=10000,
sample_rate=0.1)
df = get_dataframe_from_avro(avro_sample_path)
os.remove(avro_sample_path)
if df.empty:
self.log.info('Empty input, no partitions to target')
return part_ds, part_h, part_en
ds_h = df.server_date.iloc[0][:13]
if df.server_date.str.startswith(ds_h).all():
part_ds, part_h = ds_h.split()
self.log.info(f'Partition ds={part_ds} and h={part_h} in '
f'all {df.server_date.count()} samples')
en = df.event_name.iloc[0]
# noinspection PyUnresolvedReferences
if (df.event_name == en).all():
part_en = en
self.log.info(f'Partition en={part_en} in all '
f'{df.event_name.count()} samples')
return part_ds, part_h, part_en
def _move_src_files_to_hdfs(self, src_filepaths: list) -> list:
temp_avro_paths = []
hdfs = HDFSHook().get_conn()
for src_filepath in src_filepaths:
temp_avro_path = os.path.join(self.hdfs_processing_dir,
os.path.basename(src_filepath))
if not hdfs.exists(temp_avro_path):
self.log.info(f'Moving local file {src_filepath} to '
f'HDFS {temp_avro_path}')
try:
hdfs.put(src_filepath, temp_avro_path, replication=1)
except FileNotFoundError as err:
self.log.error(f'Upload failed: {err}')
temp_avro_paths.append(temp_avro_path)
return temp_avro_paths
@staticmethod
def _load_avro_into_temp_tables(temp_avro_paths: list,
full_schema_path: str) -> list:
temp_table_names = []
conn = PyHiveHook().get_conn()
cursor = conn.cursor()
for temp_avro_path in temp_avro_paths:
temp_table_name = os.path.basename(temp_avro_path).replace(
'.', '_').replace('-', '_')
create_temp_table_stmt = f"""
CREATE TABLE IF NOT EXISTS {temp_table_name}
ROW FORMAT SERDE '{ROW_FORMAT}'
STORED AS INPUTFORMAT '{INPUT_FMT}'
OUTPUTFORMAT '{OUTPUT_FMT}'
TBLPROPERTIES ('avro.schema.url'='{full_schema_path}')
"""
print('--- create_temp_table_stmt ---')
cursor.execute(create_temp_table_stmt)
select_temp_row_stmt = f"""
SELECT * FROM {temp_table_name} LIMIT 1
"""
print('--- select_temp_row_stmt ---')
cursor.execute(select_temp_row_stmt)
if cursor.fetchone() is None:
load_data_stmt = f"""
LOAD DATA INPATH '{temp_avro_path}'
INTO TABLE {temp_table_name}
"""
print('--- load_data_stmt ---')
cursor.execute(load_data_stmt)
temp_table_names.append(temp_table_name)
return temp_table_names
def _insert_temp_data_into_target_table(self, temp_table_names: list,
full_schema_path: str,
part_ds: str = None,
part_h: str = None,
part_en: str = None) -> None:
conn = PyHiveHook(configuration={
'hive.exec.dynamic.partition.mode': 'nonstrict',
'hive.exec.compress.output': 'true',
'avro.output.codec': 'deflate'
}).get_conn()
cursor = conn.cursor()
create_target_table_stmt = f"""
CREATE EXTERNAL TABLE IF NOT EXISTS {self.target_table}
PARTITIONED BY (ds STRING, h STRING, en STRING)
ROW FORMAT SERDE '{ROW_FORMAT}'
STORED AS INPUTFORMAT '{INPUT_FMT}'
OUTPUTFORMAT '{OUTPUT_FMT}'
TBLPROPERTIES ('avro.schema.url'='{full_schema_path}')
"""
print('--- create_target_table_stmt ---')
cursor.execute(create_target_table_stmt)
insert_clause = 'INTO'
if part_ds and part_h and part_en:
partitions = f"ds='{part_ds}', h='{part_h}', en='{part_en}'"
if len(temp_table_names) == 1:
insert_clause = 'OVERWRITE TABLE'
elif part_ds and part_h:
partitions = f"ds='{part_ds}', h='{part_h}', en"
else:
partitions = "ds, h, en"
for temp_table_name in temp_table_names:
insert_data_stmt = f"""
INSERT {insert_clause} {self.target_table}
PARTITION ({partitions})
SELECT
*
{'-- ' if part_ds else ', '}datestamp AS ds
{'-- ' if part_h else ', '}substr(server_date, 12, 2) AS h
{'-- ' if part_en else ', '}event_name AS en
FROM
{temp_table_name}
"""
print('--- insert_data_stmt ---')
cursor.execute(insert_data_stmt)
drop_temp_table_stmt = f"""
DROP TABLE {temp_table_name}
"""
print('--- drop_temp_table_stmt ---')
cursor.execute(drop_temp_table_stmt)
def execute(self, context) -> None:
avro_schema_path = self.avro_schema_path
if self.src_filepath_callable:
src_filepath = self.src_filepath_callable(**context)
else:
try:
src_filepath = eval(self.src_filepath_str)
except SyntaxError:
src_filepath = self.src_filepath_str
if not src_filepath:
self.log.info('No filepath(s) received, skipping.')
raise AirflowSkipException()
if isinstance(src_filepath, list):
src_filepaths = src_filepath
else:
src_filepaths = [src_filepath]
if self.concat_src and len(src_filepaths) > 1:
src_filepaths = self._concat_src_files(src_filepaths)
part_ds, part_h, part_en = self._get_target_partitions(src_filepaths)
hdfs = HDFSHook().get_conn()
if not hdfs.exists(avro_schema_path):
raise AirflowException(f'Avro schema {avro_schema_path} not found.')
full_schema_path = f'hdfs://{hdfs.host}:{hdfs.port}{avro_schema_path}'
temp_avro_paths = self._move_src_files_to_hdfs(src_filepaths)
temp_table_names = self._load_avro_into_temp_tables(temp_avro_paths,
full_schema_path)
self._insert_temp_data_into_target_table(
temp_table_names, full_schema_path, part_ds=part_ds, part_h=part_h,
part_en=part_en)
if self.remove_src:
for src_filepath in src_filepaths:
self.log.info(f'Removing {src_filepath}.')
os.remove(src_filepath) | 0.542742 | 0.222975 |
import os
import re
import logging
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for DC-SSAT: A Divide-and-Conquer Approach to Solving Stochastic Satisfiability Problems Efficiently
URL: https://www.aaai.org/Papers/AAAI/2005/AAAI05-066.pdf
"""
def executable(self, tool_locator):
return tool_locator.find_executable("dcssat", subdir="bin")
def name(self):
return "DC-SSAT"
def cmdline(self, executable, options, task, rlimits):
if task.single_input_file.endswith(".pec"):
new_file_name = (
os.path.dirname(task.single_input_file)
+ "/re-"
+ os.path.splitext(os.path.basename(task.single_input_file))[0]
+ ".sdimacs"
)
return [executable] + options + [new_file_name]
elif task.single_input_file.endswith(".mpec"):
new_file_name = (
os.path.dirname(task.single_input_file)
+ "/ere-"
+ os.path.splitext(os.path.basename(task.single_input_file))[0]
+ ".sdimacs"
)
return [executable] + options + [new_file_name]
else:
return [executable] + options + [task.single_input_file]
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the number after it
# the number can be an integer, a decimal, or a scientific notation
# warn if there are repeated matches (multiple statistics from sequential analysis?)
regex_integer = r"(\d+)"
regex_decimal = r"(\d+\.\d*|\d*\.\d+)"
regex_scinote = r"(\d\.?\d*[Ee][+\-]?\d+)"
regex_pattern = (
re.escape(identifier)
+ r"\s*[:=]?\s*(-?("
+ regex_integer
+ r"|"
+ regex_decimal
+ r"|"
+ regex_scinote
+ r"))(\s|$)"
)
regex = re.compile(regex_pattern)
match = None
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '%s': '%s'",
identifier,
line,
)
return match | reproduction-artifact/tool-info/dcssat.py |
import os
import re
import logging
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for DC-SSAT: A Divide-and-Conquer Approach to Solving Stochastic Satisfiability Problems Efficiently
URL: https://www.aaai.org/Papers/AAAI/2005/AAAI05-066.pdf
"""
def executable(self, tool_locator):
return tool_locator.find_executable("dcssat", subdir="bin")
def name(self):
return "DC-SSAT"
def cmdline(self, executable, options, task, rlimits):
if task.single_input_file.endswith(".pec"):
new_file_name = (
os.path.dirname(task.single_input_file)
+ "/re-"
+ os.path.splitext(os.path.basename(task.single_input_file))[0]
+ ".sdimacs"
)
return [executable] + options + [new_file_name]
elif task.single_input_file.endswith(".mpec"):
new_file_name = (
os.path.dirname(task.single_input_file)
+ "/ere-"
+ os.path.splitext(os.path.basename(task.single_input_file))[0]
+ ".sdimacs"
)
return [executable] + options + [new_file_name]
else:
return [executable] + options + [task.single_input_file]
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the number after it
# the number can be an integer, a decimal, or a scientific notation
# warn if there are repeated matches (multiple statistics from sequential analysis?)
regex_integer = r"(\d+)"
regex_decimal = r"(\d+\.\d*|\d*\.\d+)"
regex_scinote = r"(\d\.?\d*[Ee][+\-]?\d+)"
regex_pattern = (
re.escape(identifier)
+ r"\s*[:=]?\s*(-?("
+ regex_integer
+ r"|"
+ regex_decimal
+ r"|"
+ regex_scinote
+ r"))(\s|$)"
)
regex = re.compile(regex_pattern)
match = None
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '%s': '%s'",
identifier,
line,
)
return match | 0.456894 | 0.242396 |
def assertIn(v, c):
if not v in c:
raise AssertionError('{} not in {}'.format(v, c))
def assertNin(v, c):
if v in c:
raise AssertionError('{} in {}'.format(v, c))
def assertEq(l, r):
if l != r:
raise AssertionError('{} != {}'.format(l, r))
def assertNeq(l, r):
if l == r:
raise AssertionError('{} == {}'.format(l, r))
def assertGt(l, r):
if l <= r:
raise AssertionError('{} <= {}'.format(l, r))
def assertGe(l, r):
if l < r:
raise AssertionError('{} < {}'.format(l, r))
def assertLt(l, r):
if l >= r:
raise AssertionError('{} >= {}'.format(l, r))
def assertLe(l, r):
if l > r:
raise AssertionError('{} > {}'.format(l, r))
def assertIsInstance(v, t):
if not isinstance(v, t):
raise AssertionError('{} is not instance of {}'.format(v, t))
def assertNisInstance(v, t):
if isinstance(v, t):
raise AssertionError('{} is instance of {}'.format(v, t))
def assertThrow(f, E):
try:
f()
except E:
return
except:
raise AssertionError('{} didn\'t raised {}'.format(f, E))
raise AssertionError('{} didn\'t raised'.format(f))
def assertNoThrow(f):
try:
f()
except:
raise AssertionError('{} raised'.format(f))
# Tests.
if __name__ == "__main__":
def check(f, *args):
try:
f(*args)
import traceback
traceback.print_stack()
exit(1)
except AssertionError:
pass
# In.
assertIn(3, [3])
check(assertIn, 3, [])
# Nin.
assertNin(3, [])
check(assertNin, 3, [3])
# Eq.
assertEq(3, 3)
check(assertEq, 3, 4)
# Neq.
assertNeq(3, 4)
check(assertNeq, 3, 3)
# Gt.
assertGt(4, 3)
check(assertGt, 3, 3)
check(assertGt, 3, 4)
# Ge.
assertGe(4, 4)
assertGe(4, 3)
check(assertGe, 3, 4)
# Lt.
assertLt(3, 4)
check(assertLt, 3, 3)
check(assertLt, 4, 3)
# Le.
assertLe(3, 3)
assertLe(3, 4)
check(assertLe, 4, 3)
# IsInstance
assertIsInstance(3, int)
check(assertIsInstance, 3, str)
# NisInstance
assertNisInstance(3, str)
check(assertNisInstance, 3, int)
# Throw.
def do_raise():
raise Exception('devil')
assertThrow(do_raise, Exception)
check(assertThrow, lambda: 3, Exception)
# NoThrow
assertNoThrow(lambda: 3)
check(assertNoThrow, do_raise) | __init__.py | def assertIn(v, c):
if not v in c:
raise AssertionError('{} not in {}'.format(v, c))
def assertNin(v, c):
if v in c:
raise AssertionError('{} in {}'.format(v, c))
def assertEq(l, r):
if l != r:
raise AssertionError('{} != {}'.format(l, r))
def assertNeq(l, r):
if l == r:
raise AssertionError('{} == {}'.format(l, r))
def assertGt(l, r):
if l <= r:
raise AssertionError('{} <= {}'.format(l, r))
def assertGe(l, r):
if l < r:
raise AssertionError('{} < {}'.format(l, r))
def assertLt(l, r):
if l >= r:
raise AssertionError('{} >= {}'.format(l, r))
def assertLe(l, r):
if l > r:
raise AssertionError('{} > {}'.format(l, r))
def assertIsInstance(v, t):
if not isinstance(v, t):
raise AssertionError('{} is not instance of {}'.format(v, t))
def assertNisInstance(v, t):
if isinstance(v, t):
raise AssertionError('{} is instance of {}'.format(v, t))
def assertThrow(f, E):
try:
f()
except E:
return
except:
raise AssertionError('{} didn\'t raised {}'.format(f, E))
raise AssertionError('{} didn\'t raised'.format(f))
def assertNoThrow(f):
try:
f()
except:
raise AssertionError('{} raised'.format(f))
# Tests.
if __name__ == "__main__":
def check(f, *args):
try:
f(*args)
import traceback
traceback.print_stack()
exit(1)
except AssertionError:
pass
# In.
assertIn(3, [3])
check(assertIn, 3, [])
# Nin.
assertNin(3, [])
check(assertNin, 3, [3])
# Eq.
assertEq(3, 3)
check(assertEq, 3, 4)
# Neq.
assertNeq(3, 4)
check(assertNeq, 3, 3)
# Gt.
assertGt(4, 3)
check(assertGt, 3, 3)
check(assertGt, 3, 4)
# Ge.
assertGe(4, 4)
assertGe(4, 3)
check(assertGe, 3, 4)
# Lt.
assertLt(3, 4)
check(assertLt, 3, 3)
check(assertLt, 4, 3)
# Le.
assertLe(3, 3)
assertLe(3, 4)
check(assertLe, 4, 3)
# IsInstance
assertIsInstance(3, int)
check(assertIsInstance, 3, str)
# NisInstance
assertNisInstance(3, str)
check(assertNisInstance, 3, int)
# Throw.
def do_raise():
raise Exception('devil')
assertThrow(do_raise, Exception)
check(assertThrow, lambda: 3, Exception)
# NoThrow
assertNoThrow(lambda: 3)
check(assertNoThrow, do_raise) | 0.460289 | 0.423339 |
# COMMAND ----------
# MAGIC %run ../Includes/Classroom-Setup
# COMMAND ----------
# Read in the dataset for the lab, along with all functions
from pyspark.sql.functions import *
df = spark.read.format("delta").load(sales_path)
display(df)
# COMMAND ----------
# MAGIC %md ### 1. Extract item details from purchases
# MAGIC
# MAGIC - Explode the **`items`** field in **`df`** with the results replacing the existing **`items`** field
# MAGIC - Select the **`email`** and **`item.item_name`** fields
# MAGIC - Split the words in **`item_name`** into an array and alias the column to "details"
# MAGIC
# MAGIC Assign the resulting DataFrame to **`details_df`**.
# COMMAND ----------
# ANSWER
from pyspark.sql.functions import *
details_df = (df
.withColumn("items", explode("items"))
.select("email", "items.item_name")
.withColumn("details", split(col("item_name"), " "))
)
display(details_df)
# COMMAND ----------
# MAGIC %md So you can see that our **`details`** column is now an array containing the quality, size, and object type.
# COMMAND ----------
# MAGIC %md ### 2. Extract size and quality options from mattress purchases
# MAGIC
# MAGIC - Filter **`details_df`** for records where **`details`** contains "Mattress"
# MAGIC - Add a **`size`** column by extracting the element at position 2
# MAGIC - Add a **`quality`** column by extracting the element at position 1
# MAGIC
# MAGIC Save the result as **`mattress_df`**.
# COMMAND ----------
# ANSWER
mattress_df = (details_df
.filter(array_contains(col("details"), "Mattress"))
.withColumn("size", element_at(col("details"), 2))
.withColumn("quality", element_at(col("details"), 1))
)
display(mattress_df)
# COMMAND ----------
# MAGIC %md Next we're going to do the same thing for pillow purchases.
# COMMAND ----------
# MAGIC %md ### 3. Extract size and quality options from pillow purchases
# MAGIC - Filter **`details_df`** for records where **`details`** contains "Pillow"
# MAGIC - Add a **`size`** column by extracting the element at position 1
# MAGIC - Add a **`quality`** column by extracting the element at position 2
# MAGIC
# MAGIC Note the positions of **`size`** and **`quality`** are switched for mattresses and pillows.
# MAGIC
# MAGIC Save result as **`pillow_df`**.
# COMMAND ----------
# ANSWER
pillow_df = (details_df
.filter(array_contains(col("details"), "Pillow"))
.withColumn("size", element_at(col("details"), 1))
.withColumn("quality", element_at(col("details"), 2))
)
display(pillow_df)
# COMMAND ----------
# MAGIC %md ### 4. Combine data for mattress and pillows
# MAGIC
# MAGIC - Perform a union on **`mattress_df`** and **`pillow_df`** by column names
# MAGIC - Drop the **`details`** column
# MAGIC
# MAGIC Save the result as **`union_df`**.
# COMMAND ----------
# ANSWER
union_df = mattress_df.unionByName(pillow_df).drop("details")
display(union_df)
# COMMAND ----------
# MAGIC %md
# MAGIC ### 5. List all size and quality options bought by each user
# MAGIC
# MAGIC - Group rows in **`union_df`** by **`email`**
# MAGIC - Collect the set of all items in **`size`** for each user and alias the column to "size options"
# MAGIC - Collect the set of all items in **`quality`** for each user and alias the column to "quality options"
# MAGIC
# MAGIC Save the result as **`options_df`**.
# COMMAND ----------
# ANSWER
options_df = (union_df
.groupBy("email")
.agg(collect_set("size").alias("size options"),
collect_set("quality").alias("quality options"))
)
display(options_df)
# COMMAND ----------
# MAGIC %md ### Clean up classroom
# MAGIC
# MAGIC And lastly, we'll clean up the classroom.
# COMMAND ----------
classroom_cleanup()
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a> | Apache-Spark-Programming-with-Databricks/Solutions/ASP 3 - Functions/ASP 3.3L - Users.py |
# COMMAND ----------
# MAGIC %run ../Includes/Classroom-Setup
# COMMAND ----------
# Read in the dataset for the lab, along with all functions
from pyspark.sql.functions import *
df = spark.read.format("delta").load(sales_path)
display(df)
# COMMAND ----------
# MAGIC %md ### 1. Extract item details from purchases
# MAGIC
# MAGIC - Explode the **`items`** field in **`df`** with the results replacing the existing **`items`** field
# MAGIC - Select the **`email`** and **`item.item_name`** fields
# MAGIC - Split the words in **`item_name`** into an array and alias the column to "details"
# MAGIC
# MAGIC Assign the resulting DataFrame to **`details_df`**.
# COMMAND ----------
# ANSWER
from pyspark.sql.functions import *
details_df = (df
.withColumn("items", explode("items"))
.select("email", "items.item_name")
.withColumn("details", split(col("item_name"), " "))
)
display(details_df)
# COMMAND ----------
# MAGIC %md So you can see that our **`details`** column is now an array containing the quality, size, and object type.
# COMMAND ----------
# MAGIC %md ### 2. Extract size and quality options from mattress purchases
# MAGIC
# MAGIC - Filter **`details_df`** for records where **`details`** contains "Mattress"
# MAGIC - Add a **`size`** column by extracting the element at position 2
# MAGIC - Add a **`quality`** column by extracting the element at position 1
# MAGIC
# MAGIC Save the result as **`mattress_df`**.
# COMMAND ----------
# ANSWER
mattress_df = (details_df
.filter(array_contains(col("details"), "Mattress"))
.withColumn("size", element_at(col("details"), 2))
.withColumn("quality", element_at(col("details"), 1))
)
display(mattress_df)
# COMMAND ----------
# MAGIC %md Next we're going to do the same thing for pillow purchases.
# COMMAND ----------
# MAGIC %md ### 3. Extract size and quality options from pillow purchases
# MAGIC - Filter **`details_df`** for records where **`details`** contains "Pillow"
# MAGIC - Add a **`size`** column by extracting the element at position 1
# MAGIC - Add a **`quality`** column by extracting the element at position 2
# MAGIC
# MAGIC Note the positions of **`size`** and **`quality`** are switched for mattresses and pillows.
# MAGIC
# MAGIC Save result as **`pillow_df`**.
# COMMAND ----------
# ANSWER
pillow_df = (details_df
.filter(array_contains(col("details"), "Pillow"))
.withColumn("size", element_at(col("details"), 1))
.withColumn("quality", element_at(col("details"), 2))
)
display(pillow_df)
# COMMAND ----------
# MAGIC %md ### 4. Combine data for mattress and pillows
# MAGIC
# MAGIC - Perform a union on **`mattress_df`** and **`pillow_df`** by column names
# MAGIC - Drop the **`details`** column
# MAGIC
# MAGIC Save the result as **`union_df`**.
# COMMAND ----------
# ANSWER
union_df = mattress_df.unionByName(pillow_df).drop("details")
display(union_df)
# COMMAND ----------
# MAGIC %md
# MAGIC ### 5. List all size and quality options bought by each user
# MAGIC
# MAGIC - Group rows in **`union_df`** by **`email`**
# MAGIC - Collect the set of all items in **`size`** for each user and alias the column to "size options"
# MAGIC - Collect the set of all items in **`quality`** for each user and alias the column to "quality options"
# MAGIC
# MAGIC Save the result as **`options_df`**.
# COMMAND ----------
# ANSWER
options_df = (union_df
.groupBy("email")
.agg(collect_set("size").alias("size options"),
collect_set("quality").alias("quality options"))
)
display(options_df)
# COMMAND ----------
# MAGIC %md ### Clean up classroom
# MAGIC
# MAGIC And lastly, we'll clean up the classroom.
# COMMAND ----------
classroom_cleanup()
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a> | 0.72952 | 0.685354 |
# Unidirectional Sequence LSTM Test:
# 1 Time Step, Layer Normalization, No Cifg, Peephole, Projection, and No Clipping.
import copy
import dynamic_tensor
model = Model()
max_time = 1
n_batch = 2
n_input = 5
# n_cell and n_output have the same size when there is no projection.
n_cell = 4
n_output = 3
input_shape = [max_time, n_batch, n_input]
dynamic_layer = dynamic_tensor.DynamicInputGenerator(model, input_shape, "TENSOR_FLOAT32")
input = dynamic_layer.getTestNodeInput()
input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
recurrent_to_input_weights = Input("recurrent_to_intput_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_output_weights = Input("recurrent_to_output_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
projection_weights = Input("projection_weights", "TENSOR_FLOAT32",
"{%d,%d}" % (n_output, n_cell))
projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
output_state_in = Input("output_state_in", "TENSOR_FLOAT32",
"{%d, %d}" % (n_batch, n_output))
cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32",
"{%d, %d}" % (n_batch, n_cell))
activation_param = Int32Scalar("activation_param", 4) # Tanh
cell_clip_param = Float32Scalar("cell_clip_param", 0.)
proj_clip_param = Float32Scalar("proj_clip_param", 0.)
time_major_param = BoolScalar("time_major_param", True)
input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
output = Output("output", "TENSOR_FLOAT32", "{%d, %d, %d}" % (max_time, n_batch, n_output))
model = model.Operation(
"UNIDIRECTIONAL_SEQUENCE_LSTM", input, input_to_input_weights, input_to_forget_weights,
input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
recurrent_to_forget_weights, recurrent_to_cell_weights,
recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
output_gate_bias, projection_weights, projection_bias, output_state_in,
cell_state_in, activation_param, cell_clip_param, proj_clip_param, time_major_param,
input_layer_norm_weights, forget_layer_norm_weights,
cell_layer_norm_weights, output_layer_norm_weights).To([output])
# Example 1. Input in operand 0,
test_input = [0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1]
input0 = {
dynamic_layer.getModelInput() : test_input,
dynamic_layer.getShapeInput() : input_shape,
input_to_input_weights: [
0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6,
0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1
],
input_to_forget_weights: [
-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
-0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
],
input_to_cell_weights: [
-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
-0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
],
input_to_output_weights: [
-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
],
input_gate_bias: [0.03, 0.15, 0.22, 0.38],
forget_gate_bias: [0.1, -0.3, -0.2, 0.1],
cell_gate_bias: [-0.05, 0.72, 0.25, 0.08],
output_gate_bias: [0.05, -0.01, 0.2, 0.1],
recurrent_to_input_weights: [
-0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6
],
recurrent_to_cell_weights: [
-0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
],
recurrent_to_forget_weights: [
-0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
],
recurrent_to_output_weights: [
0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
],
cell_to_input_weights: [0.05, 0.1, 0.25, 0.15],
cell_to_forget_weights: [-0.02, -0.15, -0.25, -0.03],
cell_to_output_weights: [0.1, -0.1, -0.5, 0.05],
projection_weights: [
-0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
],
projection_bias: [],
input_layer_norm_weights: [0.1, 0.2, 0.3, 0.5],
forget_layer_norm_weights: [0.2, 0.2, 0.4, 0.3],
cell_layer_norm_weights: [0.7, 0.2, 0.3, 0.8],
output_layer_norm_weights: [0.6, 0.2, 0.2, 0.5]
}
golden_output = [
0.024407668039203, 0.128027379512787, -0.001709178090096,
-0.006924282759428, 0.084874063730240, 0.063444979488850
]
output0 = {
output: golden_output,
}
input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
Example((input0, output0)) | tests/nnapi/specs/V1_2/unidirectional_sequence_lstm_dynamic_nnfw.mod.py |
# Unidirectional Sequence LSTM Test:
# 1 Time Step, Layer Normalization, No Cifg, Peephole, Projection, and No Clipping.
import copy
import dynamic_tensor
model = Model()
max_time = 1
n_batch = 2
n_input = 5
# n_cell and n_output have the same size when there is no projection.
n_cell = 4
n_output = 3
input_shape = [max_time, n_batch, n_input]
dynamic_layer = dynamic_tensor.DynamicInputGenerator(model, input_shape, "TENSOR_FLOAT32")
input = dynamic_layer.getTestNodeInput()
input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
recurrent_to_input_weights = Input("recurrent_to_intput_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_output_weights = Input("recurrent_to_output_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
projection_weights = Input("projection_weights", "TENSOR_FLOAT32",
"{%d,%d}" % (n_output, n_cell))
projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
output_state_in = Input("output_state_in", "TENSOR_FLOAT32",
"{%d, %d}" % (n_batch, n_output))
cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32",
"{%d, %d}" % (n_batch, n_cell))
activation_param = Int32Scalar("activation_param", 4) # Tanh
cell_clip_param = Float32Scalar("cell_clip_param", 0.)
proj_clip_param = Float32Scalar("proj_clip_param", 0.)
time_major_param = BoolScalar("time_major_param", True)
input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
output = Output("output", "TENSOR_FLOAT32", "{%d, %d, %d}" % (max_time, n_batch, n_output))
model = model.Operation(
"UNIDIRECTIONAL_SEQUENCE_LSTM", input, input_to_input_weights, input_to_forget_weights,
input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
recurrent_to_forget_weights, recurrent_to_cell_weights,
recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
output_gate_bias, projection_weights, projection_bias, output_state_in,
cell_state_in, activation_param, cell_clip_param, proj_clip_param, time_major_param,
input_layer_norm_weights, forget_layer_norm_weights,
cell_layer_norm_weights, output_layer_norm_weights).To([output])
# Example 1. Input in operand 0,
test_input = [0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1]
input0 = {
dynamic_layer.getModelInput() : test_input,
dynamic_layer.getShapeInput() : input_shape,
input_to_input_weights: [
0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6,
0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1
],
input_to_forget_weights: [
-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
-0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
],
input_to_cell_weights: [
-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
-0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
],
input_to_output_weights: [
-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
],
input_gate_bias: [0.03, 0.15, 0.22, 0.38],
forget_gate_bias: [0.1, -0.3, -0.2, 0.1],
cell_gate_bias: [-0.05, 0.72, 0.25, 0.08],
output_gate_bias: [0.05, -0.01, 0.2, 0.1],
recurrent_to_input_weights: [
-0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6
],
recurrent_to_cell_weights: [
-0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
],
recurrent_to_forget_weights: [
-0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
],
recurrent_to_output_weights: [
0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
],
cell_to_input_weights: [0.05, 0.1, 0.25, 0.15],
cell_to_forget_weights: [-0.02, -0.15, -0.25, -0.03],
cell_to_output_weights: [0.1, -0.1, -0.5, 0.05],
projection_weights: [
-0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
],
projection_bias: [],
input_layer_norm_weights: [0.1, 0.2, 0.3, 0.5],
forget_layer_norm_weights: [0.2, 0.2, 0.4, 0.3],
cell_layer_norm_weights: [0.7, 0.2, 0.3, 0.8],
output_layer_norm_weights: [0.6, 0.2, 0.2, 0.5]
}
golden_output = [
0.024407668039203, 0.128027379512787, -0.001709178090096,
-0.006924282759428, 0.084874063730240, 0.063444979488850
]
output0 = {
output: golden_output,
}
input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
Example((input0, output0)) | 0.7641 | 0.442516 |
import logging
logger = logging.getLogger(__name__)
from .pyfda_lib import qstr
from .compat import QFrame, QMessageBox, Qt
#------------------------------------------------------------------------------
def qget_cmb_box(cmb_box, data=True):
"""
Get current itemData or Text of comboBox and convert it to string.
In Python 3, python Qt objects are automatically converted to QVariant
when stored as "data" e.g. in a QComboBox and converted back when
retrieving. In Python 2, QVariant is returned when itemData is retrieved.
This is first converted from the QVariant container format to a
QString, next to a "normal" non-unicode string.
Returns:
The current text or data of combobox as a string
"""
if data:
idx = cmb_box.currentIndex()
cmb_data = cmb_box.itemData(idx)
cmb_str = qstr(cmb_data) # convert QVariant, QString, string to plain string
else:
cmb_str = cmb_box.currentText()
cmb_str = str(cmb_str)
return cmb_str
#------------------------------------------------------------------------------
def qset_cmb_box(cmb_box, string, data=False, fireSignals=False, caseSensitive=False):
"""
Set combobox to the index corresponding to `string` in a text field (`data = False`)
or in a data field (`data=True`). When `string` is not found in the combobox entries,
select the first entry. Signals are blocked during the update of the combobox unless
`fireSignals` is set `True`. By default, the search is case insensitive, this
can be changed by passing `caseSensitive=False`.
Parameters
----------
string: str
The label in the text or data field to be selected. When the string is
not found, select the first entry of the combo box.
data: bool (default: False)
Whether the string refers to the data or text fields of the combo box
fireSignals: bool (default: False)
When False, fire a signal if the index is changed (useful for GUI testing)
caseInsensitive: bool (default: False)
When true, perform case sensitive search.
Returns
-------
The index of the string. When the string was not found in the combo box,
return index -1.
"""
if caseSensitive:
flag = Qt.MatchFixedString | Qt.MatchCaseSensitive
else:
flag = Qt.MatchFixedString # string based matching (case insensitive)
# Other more or less self explanatory flags:
# MatchExactly (default), MatchContains, MatchStartsWith, MatchEndsWith,
# MatchRegExp, MatchWildcard, MatchRecursive
if data:
idx = cmb_box.findData(str(string), flags=flag) # find index for data = string
else:
idx = cmb_box.findText(str(string), flags=flag) # find index for text = string
ret = idx
if idx == -1: # data does not exist, use first entry instead
idx = 0
cmb_box.blockSignals(not fireSignals)
cmb_box.setCurrentIndex(idx) # set index
cmb_box.blockSignals(False)
return ret
#------------------------------------------------------------------------------
def qstyle_widget(widget, state):
"""
Apply the "state" defined in pyfda_rc.py to the widget, e.g.:
Color the >> DESIGN FILTER << button according to the filter design state.
- "normal": default, no color styling
- "ok": green, filter has been designed, everything ok
- "changed": yellow, filter specs have been changed
- "error" : red, an error has occurred during filter design
- "failed" : orange, filter fails to meet target specs
- "u" or "unused": grey text color
- "d" or "disabled": background color darkgrey
- "a" or "active": no special style defined
"""
state = str(state)
if state == 'u':
state = "unused"
# *[state="unused"], *[state="u"]{background-color:white; color:darkgrey}
elif state == 'a':
state = "active"
elif state == 'd':
state = "disabled"
# QLineEdit:disabled{background-color:darkgrey;}
widget.setProperty("state", state)
widget.style().unpolish(widget)
widget.style().polish(widget)
widget.update()
#------------------------------------------------------------------------------
def qhline(widget):
# http://stackoverflow.com/questions/5671354/how-to-programmatically-make-a-horizontal-line-in-qt
# solution
"""
Create a horizontal line
Parameters
----------
widget: widget containing the QFrame to be created
"""
line = QFrame(widget)
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
return line
#------------------------------------------------------------------------------
def qget_selected(table, select_all=False, reverse=True):
"""
Get selected cells in ``table`` and return a dictionary with the following keys:
'idx': indices of selected cells as an unsorted list of tuples
'sel': list of selected cells per column, by default sorted in reverse
'cur': current cell selection as a tuple
Parameters
----------
select_all : bool
select all table items and create a list when True
reverse : bool
return selected fields upside down when True
"""
if select_all:
table.selectAll()
idx = []
for _ in table.selectedItems():
idx.append([_.column(), _.row(), ])
sel = [0, 0]
sel[0] = sorted([i[1] for i in idx if i[0] == 0], reverse = reverse)
sel[1] = sorted([i[1] for i in idx if i[0] == 1], reverse = reverse)
if select_all:
table.clearSelection()
# use set comprehension to eliminate multiple identical entries
# cols = sorted(list({i[0] for i in idx}))
# rows = sorted(list({i[1] for i in idx}))
cur = (table.currentColumn(), table.currentRow())
# cur_idx_row = table.currentIndex().row()
return {'idx':idx, 'sel':sel, 'cur':cur}# 'rows':rows 'cols':cols, }
#------------------------------------------------------------------------------
def qfilter_warning(self, N, fil_class):
"""
Pop-up a warning box for very large filter orders
"""
reply = QMessageBox.warning(self, 'Warning',
("<span><i><b>N = {0}</b></i> is a rather high order for<br />"
"an {1} filter and may cause large <br />"
"numerical errors and compute times.<br />"
"Continue?</span>".format(N, fil_class)),
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
return True
else:
return False
#==============================================================================
if __name__=='__main__':
pass | pyfda/pyfda_qt_lib.py | import logging
logger = logging.getLogger(__name__)
from .pyfda_lib import qstr
from .compat import QFrame, QMessageBox, Qt
#------------------------------------------------------------------------------
def qget_cmb_box(cmb_box, data=True):
"""
Get current itemData or Text of comboBox and convert it to string.
In Python 3, python Qt objects are automatically converted to QVariant
when stored as "data" e.g. in a QComboBox and converted back when
retrieving. In Python 2, QVariant is returned when itemData is retrieved.
This is first converted from the QVariant container format to a
QString, next to a "normal" non-unicode string.
Returns:
The current text or data of combobox as a string
"""
if data:
idx = cmb_box.currentIndex()
cmb_data = cmb_box.itemData(idx)
cmb_str = qstr(cmb_data) # convert QVariant, QString, string to plain string
else:
cmb_str = cmb_box.currentText()
cmb_str = str(cmb_str)
return cmb_str
#------------------------------------------------------------------------------
def qset_cmb_box(cmb_box, string, data=False, fireSignals=False, caseSensitive=False):
"""
Set combobox to the index corresponding to `string` in a text field (`data = False`)
or in a data field (`data=True`). When `string` is not found in the combobox entries,
select the first entry. Signals are blocked during the update of the combobox unless
`fireSignals` is set `True`. By default, the search is case insensitive, this
can be changed by passing `caseSensitive=False`.
Parameters
----------
string: str
The label in the text or data field to be selected. When the string is
not found, select the first entry of the combo box.
data: bool (default: False)
Whether the string refers to the data or text fields of the combo box
fireSignals: bool (default: False)
When False, fire a signal if the index is changed (useful for GUI testing)
caseInsensitive: bool (default: False)
When true, perform case sensitive search.
Returns
-------
The index of the string. When the string was not found in the combo box,
return index -1.
"""
if caseSensitive:
flag = Qt.MatchFixedString | Qt.MatchCaseSensitive
else:
flag = Qt.MatchFixedString # string based matching (case insensitive)
# Other more or less self explanatory flags:
# MatchExactly (default), MatchContains, MatchStartsWith, MatchEndsWith,
# MatchRegExp, MatchWildcard, MatchRecursive
if data:
idx = cmb_box.findData(str(string), flags=flag) # find index for data = string
else:
idx = cmb_box.findText(str(string), flags=flag) # find index for text = string
ret = idx
if idx == -1: # data does not exist, use first entry instead
idx = 0
cmb_box.blockSignals(not fireSignals)
cmb_box.setCurrentIndex(idx) # set index
cmb_box.blockSignals(False)
return ret
#------------------------------------------------------------------------------
def qstyle_widget(widget, state):
"""
Apply the "state" defined in pyfda_rc.py to the widget, e.g.:
Color the >> DESIGN FILTER << button according to the filter design state.
- "normal": default, no color styling
- "ok": green, filter has been designed, everything ok
- "changed": yellow, filter specs have been changed
- "error" : red, an error has occurred during filter design
- "failed" : orange, filter fails to meet target specs
- "u" or "unused": grey text color
- "d" or "disabled": background color darkgrey
- "a" or "active": no special style defined
"""
state = str(state)
if state == 'u':
state = "unused"
# *[state="unused"], *[state="u"]{background-color:white; color:darkgrey}
elif state == 'a':
state = "active"
elif state == 'd':
state = "disabled"
# QLineEdit:disabled{background-color:darkgrey;}
widget.setProperty("state", state)
widget.style().unpolish(widget)
widget.style().polish(widget)
widget.update()
#------------------------------------------------------------------------------
def qhline(widget):
# http://stackoverflow.com/questions/5671354/how-to-programmatically-make-a-horizontal-line-in-qt
# solution
"""
Create a horizontal line
Parameters
----------
widget: widget containing the QFrame to be created
"""
line = QFrame(widget)
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
return line
#------------------------------------------------------------------------------
def qget_selected(table, select_all=False, reverse=True):
"""
Get selected cells in ``table`` and return a dictionary with the following keys:
'idx': indices of selected cells as an unsorted list of tuples
'sel': list of selected cells per column, by default sorted in reverse
'cur': current cell selection as a tuple
Parameters
----------
select_all : bool
select all table items and create a list when True
reverse : bool
return selected fields upside down when True
"""
if select_all:
table.selectAll()
idx = []
for _ in table.selectedItems():
idx.append([_.column(), _.row(), ])
sel = [0, 0]
sel[0] = sorted([i[1] for i in idx if i[0] == 0], reverse = reverse)
sel[1] = sorted([i[1] for i in idx if i[0] == 1], reverse = reverse)
if select_all:
table.clearSelection()
# use set comprehension to eliminate multiple identical entries
# cols = sorted(list({i[0] for i in idx}))
# rows = sorted(list({i[1] for i in idx}))
cur = (table.currentColumn(), table.currentRow())
# cur_idx_row = table.currentIndex().row()
return {'idx':idx, 'sel':sel, 'cur':cur}# 'rows':rows 'cols':cols, }
#------------------------------------------------------------------------------
def qfilter_warning(self, N, fil_class):
"""
Pop-up a warning box for very large filter orders
"""
reply = QMessageBox.warning(self, 'Warning',
("<span><i><b>N = {0}</b></i> is a rather high order for<br />"
"an {1} filter and may cause large <br />"
"numerical errors and compute times.<br />"
"Continue?</span>".format(N, fil_class)),
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
return True
else:
return False
#==============================================================================
if __name__=='__main__':
pass | 0.735357 | 0.459864 |
from . import static as static
from . import gquery as gquery
from . import pagination as pageUtils
from . import swagger as swagger
from .prov import grlcPROV
from .fileLoaders import GithubLoader, LocalLoader, URLLoader
from .queryTypes import qType
from . import __version__ as grlc_version
import re
import requests
import json
from rdflib import Graph
import SPARQLTransformer
from . import glogging as glogging
glogger = glogging.getGrlcLogger(__name__)
def getLoader(user, repo, subdir=None, spec_url=None, sha=None, prov=None):
"""Build a fileLoader (LocalLoader, GithubLoader, URLLoader) for the given parameters."""
if user is None and repo is None and not spec_url:
loader = LocalLoader()
elif spec_url:
loader = URLLoader(spec_url)
else:
loader = GithubLoader(user, repo, subdir, sha, prov)
return loader
def build_spec(user, repo, subdir=None, sha=None, prov=None, extraMetadata=[]):
"""Build grlc specification for the given github user / repo.
Deprecated."""
glogger.warning("grlc.utils.build_spec is deprecated and will " \
"be removed in the future. Use grlc.swagger.build_spec instead.")
items, _ = swagger.build_spec(user, repo, subdir, sha, prov, extraMetadata)
return items
def build_swagger_spec(user, repo, subdir, spec_url, sha, serverName):
"""Build grlc specification for the given github user / repo in swagger format."""
if user and repo:
# Init provenance recording
prov_g = grlcPROV(user, repo)
else:
prov_g = None
swag = swagger.get_blank_spec()
swag['host'] = serverName
try:
loader = getLoader(user, repo, subdir, spec_url, sha, prov_g)
except Exception as e:
# If repo does not exits
swag['info'] = {
'title': 'ERROR!',
'description': str(e)
}
swag['paths'] = {}
return swag
prev_commit, next_commit, info, basePath = \
swagger.get_repo_info(loader, sha, prov_g)
swag['prev_commit'] = prev_commit
swag['next_commit'] = next_commit
swag['info'] = info
swag['basePath'] = basePath
# TODO: can we pass loader to build_spec ? --> Ideally yes!
spec, warnings = swagger.build_spec(user, repo, subdir, spec_url, sha, prov_g)
# Use items to build API paths
for item in spec:
swag['paths'][item['call_name']] = swagger.get_path_for_item(item)
# TODO: Add bootstrap style to top level HTML
# Without a better place to display warnings, we can make them part of the description.
if 'description' not in swag['info']:
swag['info']['description'] = ''
for warn in warnings:
swag['info']['description'] += swagger.get_warning_div(warn)
if prov_g:
prov_g.end_prov_graph()
swag['prov'] = prov_g.serialize(format='turtle')
return swag
def dispatch_query(user, repo, query_name, subdir=None, spec_url=None, sha=None,
content=None, requestArgs={}, acceptHeader='application/json',
requestUrl='http://', formData={}):
"""Executes the specified SPARQL or TPF query."""
loader = getLoader(user, repo, subdir, spec_url, sha=sha, prov=None)
query, q_type = loader.getTextForName(query_name)
# Call name implemented with SPARQL query
if q_type == qType['SPARQL'] or q_type == qType['JSON']:
resp, status, headers = dispatchSPARQLQuery(query, loader, requestArgs, acceptHeader, content, formData,
requestUrl)
if acceptHeader == 'application/json':
# TODO: transform JSON result if suitable
pass
return resp, status, headers
# Call name implemented with TPF query
elif q_type == qType['TPF']:
resp, status, headers = dispatchTPFQuery(query, loader, acceptHeader, content)
return resp, status, headers
else:
return "Couldn't find a SPARQL, RDF dump, or TPF query with the requested name", 404, {}
def dispatchSPARQLQuery(raw_sparql_query, loader, requestArgs, acceptHeader, content,
formData, requestUrl):
"""Executes the specified SPARQL query."""
endpoint, auth = gquery.guess_endpoint_uri(raw_sparql_query, loader)
if endpoint == '':
return 'No SPARQL endpoint indicated', 407, {}
glogger.debug("=====================================================")
glogger.debug("Sending query to SPARQL endpoint: {}".format(endpoint))
glogger.debug("=====================================================")
try:
query_metadata = gquery.get_metadata(raw_sparql_query, endpoint)
except Exception as e:
# extracting metadata
return { 'error': str(e) }, 400, {}
acceptHeader = 'application/json' if isinstance(raw_sparql_query, dict) else acceptHeader
pagination = query_metadata['pagination'] if 'pagination' in query_metadata else ""
rewritten_query = query_metadata['query']
# Rewrite query using parameter values
if query_metadata['type'] == 'SelectQuery' or query_metadata['type'] == 'ConstructQuery':
rewritten_query = gquery.rewrite_query(query_metadata['original_query'], query_metadata['parameters'], requestArgs)
# Rewrite query using pagination
if query_metadata['type'] == 'SelectQuery' and 'pagination' in query_metadata:
rewritten_query = gquery.paginate_query(rewritten_query, query_metadata['pagination'], requestArgs)
resp = None
headers = {}
# If we have a mime field, we load the remote dump and query it locally
if 'mime' in query_metadata and query_metadata['mime']:
glogger.debug(
"Detected {} MIME type, proceeding with locally loading remote dump".format(query_metadata['mime']))
g = Graph()
try:
query_metadata = gquery.get_metadata(raw_sparql_query, endpoint)
g.parse(endpoint, format=query_metadata['mime'])
glogger.debug("Local RDF graph loaded successfully with {} triples".format(len(g)))
except Exception as e:
glogger.error(e)
results = g.query(rewritten_query, result='sparql')
# Prepare return format as requested
resp_string = ""
if 'application/json' in acceptHeader or (content and 'application/json' in static.mimetypes[content]):
resp_string = results.serialize(format='json')
glogger.debug("Results of SPARQL query against locally loaded dump: {}".format(resp_string))
elif 'text/csv' in acceptHeader or (content and 'text/csv' in static.mimetypes[content]):
resp_string = results.serialize(format='csv')
glogger.debug("Results of SPARQL query against locally loaded dump: {}".format(resp_string))
else:
return 'Unacceptable requested format', 415, {}
glogger.debug("Finished processing query against RDF dump, end of use case")
del g
# Check for INSERT/POST
elif query_metadata['type'] == 'InsertData':
glogger.debug("Processing INSERT query")
# Rewrite INSERT
rewritten_query = rewritten_query.replace("?_g_iri", "{}".format(formData.get('g')))
rewritten_query = rewritten_query.replace("<s> <p> <o>", formData.get('data'))
glogger.debug("INSERT query rewritten as {}".format(rewritten_query))
# Prepare HTTP POST request
reqHeaders = {'Accept': acceptHeader, 'Content-Type': 'application/sparql-update'}
response = requests.post(endpoint, data=rewritten_query, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers['Content-Type'] = response.headers['Content-Type']
# If there's no mime type, the endpoint is an actual SPARQL endpoint
else:
reqHeaders = {'Accept': acceptHeader}
if content:
reqHeaders = {'Accept': static.mimetypes[content]}
data = {'query': rewritten_query}
glogger.debug('Sending HTTP request to SPARQL endpoint with params: {}'.format(data))
glogger.debug('Sending HTTP request to SPARQL endpoint with headers: {}'.format(reqHeaders))
glogger.debug('Sending HTTP request to SPARQL endpoint with auth: {}'.format(auth))
try:
response = requests.get(endpoint, params=data, headers=reqHeaders, auth=auth)
except Exception as e:
# Error contacting SPARQL endpoint
glogger.debug('Exception encountered while connecting to SPARQL endpoint')
return { 'error': str(e) }, 400, headers
try:
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers['Content-Type'] = response.headers['Content-Type']
except Exception as e:
glogger.debug('Exception encountered while connecting to SPARQL endpoint')
return { 'error': str(e) }, 400, headers
# If the query is paginated, set link HTTP headers
if pagination:
# Get number of total results
count = gquery.count_query_results(rewritten_query, endpoint)
pageArg = requestArgs.get('page', None)
headerLink = pageUtils.buildPaginationHeader(count, pagination, pageArg, requestUrl)
headers['Link'] = headerLink
if 'proto' in query_metadata: # sparql transformer
resp = SPARQLTransformer.post_process(json.loads(resp), query_metadata['proto'], query_metadata['opt'])
if 'transform' in query_metadata and acceptHeader == 'application/json': # sparql transformer
rq = { 'proto': query_metadata['transform'] }
_, _, opt = SPARQLTransformer.pre_process(rq)
resp = SPARQLTransformer.post_process(json.loads(resp), query_metadata['transform'], opt)
headers['Server'] = 'grlc/' + grlc_version
return resp, 200, headers
def dispatchTPFQuery(raw_tpf_query, loader, acceptHeader, content):
"""Executes the specified TPF query."""
endpoint, auth = gquery.guess_endpoint_uri(raw_tpf_query, loader)
glogger.debug("=====================================================")
glogger.debug("Sending query to TPF endpoint: {}".format(endpoint))
glogger.debug("=====================================================")
# TODO: pagination for TPF
# Preapre HTTP request
reqHeaders = {'Accept': acceptHeader, 'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}
if content:
reqHeaders = {'Accept': static.mimetypes[content], 'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}
tpf_list = re.split('\n|=', raw_tpf_query)
subject = tpf_list[tpf_list.index('subject') + 1]
predicate = tpf_list[tpf_list.index('predicate') + 1]
object = tpf_list[tpf_list.index('object') + 1]
data = {'subject': subject, 'predicate': predicate, 'object': object}
response = requests.get(endpoint, params=data, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers = {}
headers['Content-Type'] = response.headers['Content-Type']
headers['Server'] = 'grlc/' + grlc_version
return resp, 200, headers | src/utils.py | from . import static as static
from . import gquery as gquery
from . import pagination as pageUtils
from . import swagger as swagger
from .prov import grlcPROV
from .fileLoaders import GithubLoader, LocalLoader, URLLoader
from .queryTypes import qType
from . import __version__ as grlc_version
import re
import requests
import json
from rdflib import Graph
import SPARQLTransformer
from . import glogging as glogging
glogger = glogging.getGrlcLogger(__name__)
def getLoader(user, repo, subdir=None, spec_url=None, sha=None, prov=None):
"""Build a fileLoader (LocalLoader, GithubLoader, URLLoader) for the given parameters."""
if user is None and repo is None and not spec_url:
loader = LocalLoader()
elif spec_url:
loader = URLLoader(spec_url)
else:
loader = GithubLoader(user, repo, subdir, sha, prov)
return loader
def build_spec(user, repo, subdir=None, sha=None, prov=None, extraMetadata=[]):
"""Build grlc specification for the given github user / repo.
Deprecated."""
glogger.warning("grlc.utils.build_spec is deprecated and will " \
"be removed in the future. Use grlc.swagger.build_spec instead.")
items, _ = swagger.build_spec(user, repo, subdir, sha, prov, extraMetadata)
return items
def build_swagger_spec(user, repo, subdir, spec_url, sha, serverName):
"""Build grlc specification for the given github user / repo in swagger format."""
if user and repo:
# Init provenance recording
prov_g = grlcPROV(user, repo)
else:
prov_g = None
swag = swagger.get_blank_spec()
swag['host'] = serverName
try:
loader = getLoader(user, repo, subdir, spec_url, sha, prov_g)
except Exception as e:
# If repo does not exits
swag['info'] = {
'title': 'ERROR!',
'description': str(e)
}
swag['paths'] = {}
return swag
prev_commit, next_commit, info, basePath = \
swagger.get_repo_info(loader, sha, prov_g)
swag['prev_commit'] = prev_commit
swag['next_commit'] = next_commit
swag['info'] = info
swag['basePath'] = basePath
# TODO: can we pass loader to build_spec ? --> Ideally yes!
spec, warnings = swagger.build_spec(user, repo, subdir, spec_url, sha, prov_g)
# Use items to build API paths
for item in spec:
swag['paths'][item['call_name']] = swagger.get_path_for_item(item)
# TODO: Add bootstrap style to top level HTML
# Without a better place to display warnings, we can make them part of the description.
if 'description' not in swag['info']:
swag['info']['description'] = ''
for warn in warnings:
swag['info']['description'] += swagger.get_warning_div(warn)
if prov_g:
prov_g.end_prov_graph()
swag['prov'] = prov_g.serialize(format='turtle')
return swag
def dispatch_query(user, repo, query_name, subdir=None, spec_url=None, sha=None,
content=None, requestArgs={}, acceptHeader='application/json',
requestUrl='http://', formData={}):
"""Executes the specified SPARQL or TPF query."""
loader = getLoader(user, repo, subdir, spec_url, sha=sha, prov=None)
query, q_type = loader.getTextForName(query_name)
# Call name implemented with SPARQL query
if q_type == qType['SPARQL'] or q_type == qType['JSON']:
resp, status, headers = dispatchSPARQLQuery(query, loader, requestArgs, acceptHeader, content, formData,
requestUrl)
if acceptHeader == 'application/json':
# TODO: transform JSON result if suitable
pass
return resp, status, headers
# Call name implemented with TPF query
elif q_type == qType['TPF']:
resp, status, headers = dispatchTPFQuery(query, loader, acceptHeader, content)
return resp, status, headers
else:
return "Couldn't find a SPARQL, RDF dump, or TPF query with the requested name", 404, {}
def dispatchSPARQLQuery(raw_sparql_query, loader, requestArgs, acceptHeader, content,
formData, requestUrl):
"""Executes the specified SPARQL query."""
endpoint, auth = gquery.guess_endpoint_uri(raw_sparql_query, loader)
if endpoint == '':
return 'No SPARQL endpoint indicated', 407, {}
glogger.debug("=====================================================")
glogger.debug("Sending query to SPARQL endpoint: {}".format(endpoint))
glogger.debug("=====================================================")
try:
query_metadata = gquery.get_metadata(raw_sparql_query, endpoint)
except Exception as e:
# extracting metadata
return { 'error': str(e) }, 400, {}
acceptHeader = 'application/json' if isinstance(raw_sparql_query, dict) else acceptHeader
pagination = query_metadata['pagination'] if 'pagination' in query_metadata else ""
rewritten_query = query_metadata['query']
# Rewrite query using parameter values
if query_metadata['type'] == 'SelectQuery' or query_metadata['type'] == 'ConstructQuery':
rewritten_query = gquery.rewrite_query(query_metadata['original_query'], query_metadata['parameters'], requestArgs)
# Rewrite query using pagination
if query_metadata['type'] == 'SelectQuery' and 'pagination' in query_metadata:
rewritten_query = gquery.paginate_query(rewritten_query, query_metadata['pagination'], requestArgs)
resp = None
headers = {}
# If we have a mime field, we load the remote dump and query it locally
if 'mime' in query_metadata and query_metadata['mime']:
glogger.debug(
"Detected {} MIME type, proceeding with locally loading remote dump".format(query_metadata['mime']))
g = Graph()
try:
query_metadata = gquery.get_metadata(raw_sparql_query, endpoint)
g.parse(endpoint, format=query_metadata['mime'])
glogger.debug("Local RDF graph loaded successfully with {} triples".format(len(g)))
except Exception as e:
glogger.error(e)
results = g.query(rewritten_query, result='sparql')
# Prepare return format as requested
resp_string = ""
if 'application/json' in acceptHeader or (content and 'application/json' in static.mimetypes[content]):
resp_string = results.serialize(format='json')
glogger.debug("Results of SPARQL query against locally loaded dump: {}".format(resp_string))
elif 'text/csv' in acceptHeader or (content and 'text/csv' in static.mimetypes[content]):
resp_string = results.serialize(format='csv')
glogger.debug("Results of SPARQL query against locally loaded dump: {}".format(resp_string))
else:
return 'Unacceptable requested format', 415, {}
glogger.debug("Finished processing query against RDF dump, end of use case")
del g
# Check for INSERT/POST
elif query_metadata['type'] == 'InsertData':
glogger.debug("Processing INSERT query")
# Rewrite INSERT
rewritten_query = rewritten_query.replace("?_g_iri", "{}".format(formData.get('g')))
rewritten_query = rewritten_query.replace("<s> <p> <o>", formData.get('data'))
glogger.debug("INSERT query rewritten as {}".format(rewritten_query))
# Prepare HTTP POST request
reqHeaders = {'Accept': acceptHeader, 'Content-Type': 'application/sparql-update'}
response = requests.post(endpoint, data=rewritten_query, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers['Content-Type'] = response.headers['Content-Type']
# If there's no mime type, the endpoint is an actual SPARQL endpoint
else:
reqHeaders = {'Accept': acceptHeader}
if content:
reqHeaders = {'Accept': static.mimetypes[content]}
data = {'query': rewritten_query}
glogger.debug('Sending HTTP request to SPARQL endpoint with params: {}'.format(data))
glogger.debug('Sending HTTP request to SPARQL endpoint with headers: {}'.format(reqHeaders))
glogger.debug('Sending HTTP request to SPARQL endpoint with auth: {}'.format(auth))
try:
response = requests.get(endpoint, params=data, headers=reqHeaders, auth=auth)
except Exception as e:
# Error contacting SPARQL endpoint
glogger.debug('Exception encountered while connecting to SPARQL endpoint')
return { 'error': str(e) }, 400, headers
try:
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers['Content-Type'] = response.headers['Content-Type']
except Exception as e:
glogger.debug('Exception encountered while connecting to SPARQL endpoint')
return { 'error': str(e) }, 400, headers
# If the query is paginated, set link HTTP headers
if pagination:
# Get number of total results
count = gquery.count_query_results(rewritten_query, endpoint)
pageArg = requestArgs.get('page', None)
headerLink = pageUtils.buildPaginationHeader(count, pagination, pageArg, requestUrl)
headers['Link'] = headerLink
if 'proto' in query_metadata: # sparql transformer
resp = SPARQLTransformer.post_process(json.loads(resp), query_metadata['proto'], query_metadata['opt'])
if 'transform' in query_metadata and acceptHeader == 'application/json': # sparql transformer
rq = { 'proto': query_metadata['transform'] }
_, _, opt = SPARQLTransformer.pre_process(rq)
resp = SPARQLTransformer.post_process(json.loads(resp), query_metadata['transform'], opt)
headers['Server'] = 'grlc/' + grlc_version
return resp, 200, headers
def dispatchTPFQuery(raw_tpf_query, loader, acceptHeader, content):
"""Executes the specified TPF query."""
endpoint, auth = gquery.guess_endpoint_uri(raw_tpf_query, loader)
glogger.debug("=====================================================")
glogger.debug("Sending query to TPF endpoint: {}".format(endpoint))
glogger.debug("=====================================================")
# TODO: pagination for TPF
# Preapre HTTP request
reqHeaders = {'Accept': acceptHeader, 'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}
if content:
reqHeaders = {'Accept': static.mimetypes[content], 'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}
tpf_list = re.split('\n|=', raw_tpf_query)
subject = tpf_list[tpf_list.index('subject') + 1]
predicate = tpf_list[tpf_list.index('predicate') + 1]
object = tpf_list[tpf_list.index('object') + 1]
data = {'subject': subject, 'predicate': predicate, 'object': object}
response = requests.get(endpoint, params=data, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers = {}
headers['Content-Type'] = response.headers['Content-Type']
headers['Server'] = 'grlc/' + grlc_version
return resp, 200, headers | 0.427516 | 0.097305 |
from __future__ import annotations
import json
import tempfile
from os import environ, fsync, makedirs, path, rename
from pathlib import Path
from typing import Dict, List
from ghaudit import auth, config, schema
from ghaudit.config import Config
from ghaudit.query.branch_protection_push_allowances import (
BranchProtectionPushAllowances,
)
from ghaudit.query.compound_query import CompoundQuery
from ghaudit.query.org_members import OrgMembersQuery
from ghaudit.query.org_repositories import OrgRepoQuery
from ghaudit.query.org_teams import OrgTeamsQuery
from ghaudit.query.repo_branch_protection import RepoBranchProtectionQuery
from ghaudit.query.repo_collaborators import RepoCollaboratorQuery
from ghaudit.query.team_children import TeamChildrenQuery
from ghaudit.query.team_permission import TeamRepoQuery
from ghaudit.query.user import UserQuery
from ghaudit.query.user_role import TeamMemberQuery
from ghaudit.ui import ProgressCB
def file_path() -> Path:
def parent_dir() -> Path:
xdg_data_home = environ.get("XDG_DATA_HOME")
if xdg_data_home:
return Path(xdg_data_home)
home = environ.get("HOME")
if home:
return Path(home) / ".local" / "share"
return Path("/")
return parent_dir() / "ghaudit" / "compliance" / "cache.json"
def load() -> schema.Rstate:
"""Load remote state from cache file."""
with open(file_path(), encoding="UTF-8") as cache_file:
rstate = json.load(cache_file)
schema.validate(rstate)
return rstate
def store(data: schema.Rstate) -> None:
"""Store remote state to file."""
ofilepath = file_path()
if not path.exists(ofilepath.parent):
makedirs(ofilepath.parent)
temp_path = None
with tempfile.NamedTemporaryFile(
mode="w+t", dir=ofilepath.parent, delete=False
) as output:
json.dump(data, output)
temp_path = output.name
rename(temp_path, ofilepath)
with open(ofilepath, encoding="UTF-8") as cache_file:
fsync(cache_file.fileno())
def refresh(
config_: Config, auth_driver: auth.AuthDriver, progress: ProgressCB
) -> None:
"""Refresh the remote state from github to a local file."""
data = _sync(config_, auth_driver, progress)
print("validating cache")
if schema.validate(data):
print("persisting cache")
store(data)
FRAG_PAGEINFO_FIELDS = """
fragment pageInfoFields on PageInfo {
endCursor
hasNextPage
}
"""
MAX_PARALLEL_QUERIES = 40
ORG_TEAMS_MAX = 90
ORG_MEMBERS_MAX = 90
ORG_REPOSITORIES_MAX = 90
def _sync_progress(data, query, found, progress: ProgressCB):
stats = query.stats()
progress(
[
("total HTTP roundtrips", stats["iterations"]),
("graphQL queries", stats["done"], stats["queries"]),
("teams", len(schema.org_teams(data)), len(found["teams"])),
(
"repositories",
len(schema.org_repositories(data)),
len(found["repositories"]),
),
("org members", len(schema.org_members(data))),
("users", len(schema.users(data))),
(
"branch protection rules",
len(schema.all_bp_rules(data)),
len(found["bprules"]),
),
]
)
def _sync(config_: Config, auth_driver, progress: ProgressCB):
data = schema.empty()
found = {
"teams": [],
"repositories": [],
"collaborators": [],
"bprules": [],
} # type: Dict[str, List[str]]
workaround2 = {"team": 0, "repo": 0, "user": 0, "bprules": 0}
query = CompoundQuery(MAX_PARALLEL_QUERIES)
demo_params = {
"organisation": config.get_org_name(config_),
"teamsMax": ORG_TEAMS_MAX,
"membersWithRoleMax": ORG_MEMBERS_MAX,
"repositoriesMax": ORG_REPOSITORIES_MAX,
} # type: Dict[str, str | int]
query.add_frag(FRAG_PAGEINFO_FIELDS)
query.append(OrgTeamsQuery())
query.append(OrgMembersQuery())
query.append(OrgRepoQuery())
while not query.finished():
result = query.run(auth_driver, demo_params)
for key, value in result["data"].items():
data = schema.merge(data, key, {"data": {"organization": value}})
new_teams = [
x
for x in schema.org_teams(data)
if schema.team_name(x) not in found["teams"]
]
new_repos = [
x
for x in schema.org_repositories(data)
if schema.repo_name(x) not in found["repositories"]
]
new_collaborators = [
y
for x in schema.org_repositories(data)
for y in schema.missing_collaborators(data, x)
if y not in found["collaborators"]
]
new_bp_rules = [
x for x in schema.all_bp_rules(data) if x not in found["bprules"]
]
for team in new_teams:
name = schema.team_name(team)
query.append(TeamRepoQuery(name, workaround2["team"], 40))
workaround2["team"] += 1
query.append(
TeamMemberQuery(team["node"]["slug"], workaround2["team"], 40)
)
workaround2["team"] += 1
found["teams"].append(name)
query.append(TeamChildrenQuery(name, workaround2["team"], 40))
workaround2["team"] += 1
found["teams"].append(name)
for repo in new_repos:
name = schema.repo_name(repo)
query.append(RepoCollaboratorQuery(name, workaround2["repo"], 40))
workaround2["repo"] += 1
query.append(
RepoBranchProtectionQuery(name, workaround2["repo"], 40)
)
workaround2["repo"] += 1
found["repositories"].append(name)
for login in new_collaborators:
query.append(UserQuery(login, workaround2["user"]))
workaround2["user"] += 1
found["collaborators"].append(login)
for rule_id in new_bp_rules:
query.append(
BranchProtectionPushAllowances(
str(rule_id), workaround2["bprules"], 10
)
)
workaround2["bprules"] += 1
found["bprules"].append(str(rule_id))
_sync_progress(data, query, found, progress)
return data | src/ghaudit/cache.py |
from __future__ import annotations
import json
import tempfile
from os import environ, fsync, makedirs, path, rename
from pathlib import Path
from typing import Dict, List
from ghaudit import auth, config, schema
from ghaudit.config import Config
from ghaudit.query.branch_protection_push_allowances import (
BranchProtectionPushAllowances,
)
from ghaudit.query.compound_query import CompoundQuery
from ghaudit.query.org_members import OrgMembersQuery
from ghaudit.query.org_repositories import OrgRepoQuery
from ghaudit.query.org_teams import OrgTeamsQuery
from ghaudit.query.repo_branch_protection import RepoBranchProtectionQuery
from ghaudit.query.repo_collaborators import RepoCollaboratorQuery
from ghaudit.query.team_children import TeamChildrenQuery
from ghaudit.query.team_permission import TeamRepoQuery
from ghaudit.query.user import UserQuery
from ghaudit.query.user_role import TeamMemberQuery
from ghaudit.ui import ProgressCB
def file_path() -> Path:
def parent_dir() -> Path:
xdg_data_home = environ.get("XDG_DATA_HOME")
if xdg_data_home:
return Path(xdg_data_home)
home = environ.get("HOME")
if home:
return Path(home) / ".local" / "share"
return Path("/")
return parent_dir() / "ghaudit" / "compliance" / "cache.json"
def load() -> schema.Rstate:
"""Load remote state from cache file."""
with open(file_path(), encoding="UTF-8") as cache_file:
rstate = json.load(cache_file)
schema.validate(rstate)
return rstate
def store(data: schema.Rstate) -> None:
"""Store remote state to file."""
ofilepath = file_path()
if not path.exists(ofilepath.parent):
makedirs(ofilepath.parent)
temp_path = None
with tempfile.NamedTemporaryFile(
mode="w+t", dir=ofilepath.parent, delete=False
) as output:
json.dump(data, output)
temp_path = output.name
rename(temp_path, ofilepath)
with open(ofilepath, encoding="UTF-8") as cache_file:
fsync(cache_file.fileno())
def refresh(
config_: Config, auth_driver: auth.AuthDriver, progress: ProgressCB
) -> None:
"""Refresh the remote state from github to a local file."""
data = _sync(config_, auth_driver, progress)
print("validating cache")
if schema.validate(data):
print("persisting cache")
store(data)
FRAG_PAGEINFO_FIELDS = """
fragment pageInfoFields on PageInfo {
endCursor
hasNextPage
}
"""
MAX_PARALLEL_QUERIES = 40
ORG_TEAMS_MAX = 90
ORG_MEMBERS_MAX = 90
ORG_REPOSITORIES_MAX = 90
def _sync_progress(data, query, found, progress: ProgressCB):
stats = query.stats()
progress(
[
("total HTTP roundtrips", stats["iterations"]),
("graphQL queries", stats["done"], stats["queries"]),
("teams", len(schema.org_teams(data)), len(found["teams"])),
(
"repositories",
len(schema.org_repositories(data)),
len(found["repositories"]),
),
("org members", len(schema.org_members(data))),
("users", len(schema.users(data))),
(
"branch protection rules",
len(schema.all_bp_rules(data)),
len(found["bprules"]),
),
]
)
def _sync(config_: Config, auth_driver, progress: ProgressCB):
data = schema.empty()
found = {
"teams": [],
"repositories": [],
"collaborators": [],
"bprules": [],
} # type: Dict[str, List[str]]
workaround2 = {"team": 0, "repo": 0, "user": 0, "bprules": 0}
query = CompoundQuery(MAX_PARALLEL_QUERIES)
demo_params = {
"organisation": config.get_org_name(config_),
"teamsMax": ORG_TEAMS_MAX,
"membersWithRoleMax": ORG_MEMBERS_MAX,
"repositoriesMax": ORG_REPOSITORIES_MAX,
} # type: Dict[str, str | int]
query.add_frag(FRAG_PAGEINFO_FIELDS)
query.append(OrgTeamsQuery())
query.append(OrgMembersQuery())
query.append(OrgRepoQuery())
while not query.finished():
result = query.run(auth_driver, demo_params)
for key, value in result["data"].items():
data = schema.merge(data, key, {"data": {"organization": value}})
new_teams = [
x
for x in schema.org_teams(data)
if schema.team_name(x) not in found["teams"]
]
new_repos = [
x
for x in schema.org_repositories(data)
if schema.repo_name(x) not in found["repositories"]
]
new_collaborators = [
y
for x in schema.org_repositories(data)
for y in schema.missing_collaborators(data, x)
if y not in found["collaborators"]
]
new_bp_rules = [
x for x in schema.all_bp_rules(data) if x not in found["bprules"]
]
for team in new_teams:
name = schema.team_name(team)
query.append(TeamRepoQuery(name, workaround2["team"], 40))
workaround2["team"] += 1
query.append(
TeamMemberQuery(team["node"]["slug"], workaround2["team"], 40)
)
workaround2["team"] += 1
found["teams"].append(name)
query.append(TeamChildrenQuery(name, workaround2["team"], 40))
workaround2["team"] += 1
found["teams"].append(name)
for repo in new_repos:
name = schema.repo_name(repo)
query.append(RepoCollaboratorQuery(name, workaround2["repo"], 40))
workaround2["repo"] += 1
query.append(
RepoBranchProtectionQuery(name, workaround2["repo"], 40)
)
workaround2["repo"] += 1
found["repositories"].append(name)
for login in new_collaborators:
query.append(UserQuery(login, workaround2["user"]))
workaround2["user"] += 1
found["collaborators"].append(login)
for rule_id in new_bp_rules:
query.append(
BranchProtectionPushAllowances(
str(rule_id), workaround2["bprules"], 10
)
)
workaround2["bprules"] += 1
found["bprules"].append(str(rule_id))
_sync_progress(data, query, found, progress)
return data | 0.614625 | 0.179207 |
import os
from unittest import TestCase
from unittest.mock import patch, MagicMock, call
with patch("boto3.resource") as boto_resource_mock:
with patch("boto3.client") as boto_client_mock:
from functions.usergamedata.DeleteAll import index
BUNDLES_TABLE_NAME = 'test_bundles_table'
ITEMS_TABLE_NAME = 'test_bundleitems_table'
BATCH_DELETE_HELPER_LAMBDA_NAME = "test_lambda_arn"
# Patch Lambda environment variables:
@patch.dict(os.environ, {
'BUNDLES_TABLE_NAME': BUNDLES_TABLE_NAME,
'BUNDLE_ITEMS_TABLE_NAME': ITEMS_TABLE_NAME,
'BATCH_DELETE_HELPER_LAMBDA_NAME': BATCH_DELETE_HELPER_LAMBDA_NAME
})
class TestDeleteAll(TestCase):
def setUp(self):
index.ddb_resource = MagicMock()
index.lambda_client = MagicMock()
def test_delete_all_invalid_player_returns_401_error(self):
# Arrange
event = self.get_lambda_event()
event['requestContext'] = {'authorizer': {'claims': {}}}
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(401, result['statusCode'])
index.ddb_resource.batch_write_item.assert_not_called()
def test_delete_all_player_has_no_bundles_returns_204_error(self):
# Arrange
event = self.get_lambda_event()
index.ddb_resource.Table().query.side_effect = [{'Items': []}]
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(204, result['statusCode'])
def test_delete_all_player_has_data_success(self):
# Arrange
event = self.get_lambda_event()
index.ddb_resource.Table().query.side_effect = [
{'Items': [{'player_id': '12345', 'bundle_name': 'TestBundle'}]},
{'Items': [{'player_id_bundle': '12345_TestBundle', 'bundle_item_key': 'Key'}]}, {'Items': []},
{'Items': []}]
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(204, result['statusCode'])
calls = [call(FunctionName=BATCH_DELETE_HELPER_LAMBDA_NAME, InvocationType='Event',
Payload='{"TableName": "test_bundleitems_table", "DeleteRequest": [{"DeleteRequest": {"Key": {'
'"player_id_bundle": "12345_TestBundle", "bundle_item_key": "Key"}}}]}'),
call(FunctionName=BATCH_DELETE_HELPER_LAMBDA_NAME, InvocationType='Event',
Payload='{"TableName": "test_bundles_table", "DeleteRequest": [{'
'"DeleteRequest": {"Key": {"player_id": "12345", "bundle_name": "TestBundle"}}}]}')]
index.lambda_client.invoke.assert_has_calls(calls, any_order=False)
@staticmethod
def get_lambda_event():
return {
'resource': '/usergamedata',
'path': '/usergamedata/',
'httpMethod': 'DELETE',
'headers': {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/json',
'Host': 'abcdefghij.execute-api.us-west-2.amazonaws.com',
'User-Agent': 'TestAgent',
'X-Amzn-Trace-Id': 'Root=1-61003a02-7e1356b05a1e1569614c0c46',
'X-Forwarded-For': '127.0.0.1',
'X-Forwarded-Port': '443',
'X-Forwarded-Proto': 'https'
},
'multiValueHeaders': {
'Accept': ['*/*'],
'Accept-Encoding': ['gzip, deflate, br'],
'Content-Type': ['application/json'],
'Host': ['abcdefghij.execute-api.us-west-2.amazonaws.com'],
'User-Agent': ['TestAgent'],
'X-Amzn-Trace-Id': ['Root=1-61003a02-7e1356b05a1e1569614c0c46'],
'X-Forwarded-For': ['127.0.0.1'],
'X-Forwarded-Port': ['443'],
'X-Forwarded-Proto': ['https']
},
'queryStringParameters': None,
'multiValueQueryStringParameters': None,
'pathParameters': None,
'stageVariables': None,
'requestContext': {
'resourceId': 'abcdef',
'authorizer': {
'claims': {
'sub': '12345678-1234-1234-1234-123456789012',
'iss': 'https://cognito-idp.us-west-2.amazonaws.com/us-west-2_123456789',
'cognito:username': 'jakschic',
'origin_jti': '12345678-1234-1234-1234-123456789012',
'aud': '7s24tlabcn8n0defbfoghijsgn',
'event_id': '6234d920-b637-4cdf-bd44-3a5e53f51569',
'token_use': 'id',
'auth_time': '1627438909',
'custom:gk_user_id': '12345678-1234-1234-1234-123456789012',
'exp': 'Wed Jul 28 03:21:49 UTC 2021',
'iat': 'Wed Jul 28 02:21:49 UTC 2021',
'jti': '7s24tlabcn8n0defbfoghijsgn',
'email': '<EMAIL>'
}
},
'domainName': 'abcdefghij.execute-api.us-west-2.amazonaws.com',
'apiId': 'abcdefghij'
},
'body': None,
'isBase64Encoded': False
} | AwsGameKit/Resources/cloudResources/functionsTests/test_usergamedata/test_DeleteAll/test_index.py |
import os
from unittest import TestCase
from unittest.mock import patch, MagicMock, call
with patch("boto3.resource") as boto_resource_mock:
with patch("boto3.client") as boto_client_mock:
from functions.usergamedata.DeleteAll import index
BUNDLES_TABLE_NAME = 'test_bundles_table'
ITEMS_TABLE_NAME = 'test_bundleitems_table'
BATCH_DELETE_HELPER_LAMBDA_NAME = "test_lambda_arn"
# Patch Lambda environment variables:
@patch.dict(os.environ, {
'BUNDLES_TABLE_NAME': BUNDLES_TABLE_NAME,
'BUNDLE_ITEMS_TABLE_NAME': ITEMS_TABLE_NAME,
'BATCH_DELETE_HELPER_LAMBDA_NAME': BATCH_DELETE_HELPER_LAMBDA_NAME
})
class TestDeleteAll(TestCase):
def setUp(self):
index.ddb_resource = MagicMock()
index.lambda_client = MagicMock()
def test_delete_all_invalid_player_returns_401_error(self):
# Arrange
event = self.get_lambda_event()
event['requestContext'] = {'authorizer': {'claims': {}}}
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(401, result['statusCode'])
index.ddb_resource.batch_write_item.assert_not_called()
def test_delete_all_player_has_no_bundles_returns_204_error(self):
# Arrange
event = self.get_lambda_event()
index.ddb_resource.Table().query.side_effect = [{'Items': []}]
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(204, result['statusCode'])
def test_delete_all_player_has_data_success(self):
# Arrange
event = self.get_lambda_event()
index.ddb_resource.Table().query.side_effect = [
{'Items': [{'player_id': '12345', 'bundle_name': 'TestBundle'}]},
{'Items': [{'player_id_bundle': '12345_TestBundle', 'bundle_item_key': 'Key'}]}, {'Items': []},
{'Items': []}]
# Act
result = index.lambda_handler(event, None)
# Assert
self.assertEqual(204, result['statusCode'])
calls = [call(FunctionName=BATCH_DELETE_HELPER_LAMBDA_NAME, InvocationType='Event',
Payload='{"TableName": "test_bundleitems_table", "DeleteRequest": [{"DeleteRequest": {"Key": {'
'"player_id_bundle": "12345_TestBundle", "bundle_item_key": "Key"}}}]}'),
call(FunctionName=BATCH_DELETE_HELPER_LAMBDA_NAME, InvocationType='Event',
Payload='{"TableName": "test_bundles_table", "DeleteRequest": [{'
'"DeleteRequest": {"Key": {"player_id": "12345", "bundle_name": "TestBundle"}}}]}')]
index.lambda_client.invoke.assert_has_calls(calls, any_order=False)
@staticmethod
def get_lambda_event():
return {
'resource': '/usergamedata',
'path': '/usergamedata/',
'httpMethod': 'DELETE',
'headers': {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/json',
'Host': 'abcdefghij.execute-api.us-west-2.amazonaws.com',
'User-Agent': 'TestAgent',
'X-Amzn-Trace-Id': 'Root=1-61003a02-7e1356b05a1e1569614c0c46',
'X-Forwarded-For': '127.0.0.1',
'X-Forwarded-Port': '443',
'X-Forwarded-Proto': 'https'
},
'multiValueHeaders': {
'Accept': ['*/*'],
'Accept-Encoding': ['gzip, deflate, br'],
'Content-Type': ['application/json'],
'Host': ['abcdefghij.execute-api.us-west-2.amazonaws.com'],
'User-Agent': ['TestAgent'],
'X-Amzn-Trace-Id': ['Root=1-61003a02-7e1356b05a1e1569614c0c46'],
'X-Forwarded-For': ['127.0.0.1'],
'X-Forwarded-Port': ['443'],
'X-Forwarded-Proto': ['https']
},
'queryStringParameters': None,
'multiValueQueryStringParameters': None,
'pathParameters': None,
'stageVariables': None,
'requestContext': {
'resourceId': 'abcdef',
'authorizer': {
'claims': {
'sub': '12345678-1234-1234-1234-123456789012',
'iss': 'https://cognito-idp.us-west-2.amazonaws.com/us-west-2_123456789',
'cognito:username': 'jakschic',
'origin_jti': '12345678-1234-1234-1234-123456789012',
'aud': '7s24tlabcn8n0defbfoghijsgn',
'event_id': '6234d920-b637-4cdf-bd44-3a5e53f51569',
'token_use': 'id',
'auth_time': '1627438909',
'custom:gk_user_id': '12345678-1234-1234-1234-123456789012',
'exp': 'Wed Jul 28 03:21:49 UTC 2021',
'iat': 'Wed Jul 28 02:21:49 UTC 2021',
'jti': '7s24tlabcn8n0defbfoghijsgn',
'email': '<EMAIL>'
}
},
'domainName': 'abcdefghij.execute-api.us-west-2.amazonaws.com',
'apiId': 'abcdefghij'
},
'body': None,
'isBase64Encoded': False
} | 0.47317 | 0.275671 |
import re
import dns.exception
NONE = 0
A = 1
NS = 2
MD = 3
MF = 4
CNAME = 5
SOA = 6
MB = 7
MG = 8
MR = 9
NULL = 10
WKS = 11
PTR = 12
HINFO = 13
MINFO = 14
MX = 15
TXT = 16
RP = 17
AFSDB = 18
X25 = 19
ISDN = 20
RT = 21
NSAP = 22
NSAP_PTR = 23
SIG = 24
KEY = 25
PX = 26
GPOS = 27
AAAA = 28
LOC = 29
NXT = 30
SRV = 33
NAPTR = 35
KX = 36
CERT = 37
A6 = 38
DNAME = 39
OPT = 41
APL = 42
DS = 43
SSHFP = 44
IPSECKEY = 45
RRSIG = 46
NSEC = 47
DNSKEY = 48
DHCID = 49
NSEC3 = 50
NSEC3PARAM = 51
TLSA = 52
HIP = 55
CDS = 59
CDNSKEY = 60
CSYNC = 62
SPF = 99
UNSPEC = 103
EUI48 = 108
EUI64 = 109
TKEY = 249
TSIG = 250
IXFR = 251
AXFR = 252
MAILB = 253
MAILA = 254
ANY = 255
URI = 256
CAA = 257
AVC = 258
TA = 32768
DLV = 32769
_by_text = {
'NONE': NONE,
'A': A,
'NS': NS,
'MD': MD,
'MF': MF,
'CNAME': CNAME,
'SOA': SOA,
'MB': MB,
'MG': MG,
'MR': MR,
'NULL': NULL,
'WKS': WKS,
'PTR': PTR,
'HINFO': HINFO,
'MINFO': MINFO,
'MX': MX,
'TXT': TXT,
'RP': RP,
'AFSDB': AFSDB,
'X25': X25,
'ISDN': ISDN,
'RT': RT,
'NSAP': NSAP,
'NSAP-PTR': NSAP_PTR,
'SIG': SIG,
'KEY': KEY,
'PX': PX,
'GPOS': GPOS,
'AAAA': AAAA,
'LOC': LOC,
'NXT': NXT,
'SRV': SRV,
'NAPTR': NAPTR,
'KX': KX,
'CERT': CERT,
'A6': A6,
'DNAME': DNAME,
'OPT': OPT,
'APL': APL,
'DS': DS,
'SSHFP': SSHFP,
'IPSECKEY': IPSECKEY,
'RRSIG': RRSIG,
'NSEC': NSEC,
'DNSKEY': DNSKEY,
'DHCID': DHCID,
'NSEC3': NSEC3,
'NSEC3PARAM': NSEC3PARAM,
'TLSA': TLSA,
'HIP': HIP,
'CDS': CDS,
'CDNSKEY': CDNSKEY,
'CSYNC': CSYNC,
'SPF': SPF,
'UNSPEC': UNSPEC,
'EUI48': EUI48,
'EUI64': EUI64,
'TKEY': TKEY,
'TSIG': TSIG,
'IXFR': IXFR,
'AXFR': AXFR,
'MAILB': MAILB,
'MAILA': MAILA,
'ANY': ANY,
'URI': URI,
'CAA': CAA,
'AVC': AVC,
'TA': TA,
'DLV': DLV,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict((y, x) for x, y in _by_text.items())
_metatypes = {
OPT: True
}
_singletons = {
SOA: True,
NXT: True,
DNAME: True,
NSEC: True,
# CNAME is technically a singleton, but we allow multiple CNAMEs.
}
_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I)
class UnknownRdatatype(dns.exception.DNSException):
"""DNS resource record type is unknown."""
def from_text(text):
"""Convert text into a DNS rdata type value.
@param text: the text
@type text: string
@raises dns.rdatatype.UnknownRdatatype: the type is unknown
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: int"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_type_pattern.match(text)
if match is None:
raise UnknownRdatatype
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
return value
def to_text(value):
"""Convert a DNS rdata type to text.
@param value: the rdata type value
@type value: int
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: string"""
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
text = _by_value.get(value)
if text is None:
text = 'TYPE' + repr(value)
return text
def is_metatype(rdtype):
"""True if the type is a metatype.
@param rdtype: the type
@type rdtype: int
@rtype: bool"""
if rdtype >= TKEY and rdtype <= ANY or rdtype in _metatypes:
return True
return False
def is_singleton(rdtype):
"""True if the type is a singleton.
@param rdtype: the type
@type rdtype: int
@rtype: bool"""
if rdtype in _singletons:
return True
return False | oscar/lib/python2.7/site-packages/dns/rdatatype.py | import re
import dns.exception
NONE = 0
A = 1
NS = 2
MD = 3
MF = 4
CNAME = 5
SOA = 6
MB = 7
MG = 8
MR = 9
NULL = 10
WKS = 11
PTR = 12
HINFO = 13
MINFO = 14
MX = 15
TXT = 16
RP = 17
AFSDB = 18
X25 = 19
ISDN = 20
RT = 21
NSAP = 22
NSAP_PTR = 23
SIG = 24
KEY = 25
PX = 26
GPOS = 27
AAAA = 28
LOC = 29
NXT = 30
SRV = 33
NAPTR = 35
KX = 36
CERT = 37
A6 = 38
DNAME = 39
OPT = 41
APL = 42
DS = 43
SSHFP = 44
IPSECKEY = 45
RRSIG = 46
NSEC = 47
DNSKEY = 48
DHCID = 49
NSEC3 = 50
NSEC3PARAM = 51
TLSA = 52
HIP = 55
CDS = 59
CDNSKEY = 60
CSYNC = 62
SPF = 99
UNSPEC = 103
EUI48 = 108
EUI64 = 109
TKEY = 249
TSIG = 250
IXFR = 251
AXFR = 252
MAILB = 253
MAILA = 254
ANY = 255
URI = 256
CAA = 257
AVC = 258
TA = 32768
DLV = 32769
_by_text = {
'NONE': NONE,
'A': A,
'NS': NS,
'MD': MD,
'MF': MF,
'CNAME': CNAME,
'SOA': SOA,
'MB': MB,
'MG': MG,
'MR': MR,
'NULL': NULL,
'WKS': WKS,
'PTR': PTR,
'HINFO': HINFO,
'MINFO': MINFO,
'MX': MX,
'TXT': TXT,
'RP': RP,
'AFSDB': AFSDB,
'X25': X25,
'ISDN': ISDN,
'RT': RT,
'NSAP': NSAP,
'NSAP-PTR': NSAP_PTR,
'SIG': SIG,
'KEY': KEY,
'PX': PX,
'GPOS': GPOS,
'AAAA': AAAA,
'LOC': LOC,
'NXT': NXT,
'SRV': SRV,
'NAPTR': NAPTR,
'KX': KX,
'CERT': CERT,
'A6': A6,
'DNAME': DNAME,
'OPT': OPT,
'APL': APL,
'DS': DS,
'SSHFP': SSHFP,
'IPSECKEY': IPSECKEY,
'RRSIG': RRSIG,
'NSEC': NSEC,
'DNSKEY': DNSKEY,
'DHCID': DHCID,
'NSEC3': NSEC3,
'NSEC3PARAM': NSEC3PARAM,
'TLSA': TLSA,
'HIP': HIP,
'CDS': CDS,
'CDNSKEY': CDNSKEY,
'CSYNC': CSYNC,
'SPF': SPF,
'UNSPEC': UNSPEC,
'EUI48': EUI48,
'EUI64': EUI64,
'TKEY': TKEY,
'TSIG': TSIG,
'IXFR': IXFR,
'AXFR': AXFR,
'MAILB': MAILB,
'MAILA': MAILA,
'ANY': ANY,
'URI': URI,
'CAA': CAA,
'AVC': AVC,
'TA': TA,
'DLV': DLV,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict((y, x) for x, y in _by_text.items())
_metatypes = {
OPT: True
}
_singletons = {
SOA: True,
NXT: True,
DNAME: True,
NSEC: True,
# CNAME is technically a singleton, but we allow multiple CNAMEs.
}
_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I)
class UnknownRdatatype(dns.exception.DNSException):
"""DNS resource record type is unknown."""
def from_text(text):
"""Convert text into a DNS rdata type value.
@param text: the text
@type text: string
@raises dns.rdatatype.UnknownRdatatype: the type is unknown
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: int"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_type_pattern.match(text)
if match is None:
raise UnknownRdatatype
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
return value
def to_text(value):
"""Convert a DNS rdata type to text.
@param value: the rdata type value
@type value: int
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: string"""
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
text = _by_value.get(value)
if text is None:
text = 'TYPE' + repr(value)
return text
def is_metatype(rdtype):
"""True if the type is a metatype.
@param rdtype: the type
@type rdtype: int
@rtype: bool"""
if rdtype >= TKEY and rdtype <= ANY or rdtype in _metatypes:
return True
return False
def is_singleton(rdtype):
"""True if the type is a singleton.
@param rdtype: the type
@type rdtype: int
@rtype: bool"""
if rdtype in _singletons:
return True
return False | 0.388386 | 0.078325 |
from functools import lru_cache
REQUIRED_FEATURES = [
"is_question",
"action_verb_full",
"language_question",
"question_mark_full",
"norm_text_len",
]
def find_absent_features(data):
missing = []
for feat in REQUIRED_FEATURES:
if feat not in data.keys():
missing.append(feat)
return missing
def check_feature_types(data):
types = {
"is_question": bool,
"action_verb_full": bool,
"language_question": bool,
"question_mark_full": bool,
"norm_text_len": float,
}
mistypes = []
for field, data_type in types:
if not isinstance(data[field], data_type):
mistypes.append((data[field], data_type))
return mistypes
def run_heuristic(question_len):
pass
@lru_cache(maxsize=128)
def run_model(question_data):
"""
This is a stub function. We actually use the lru_cache with a purpose
in app.py
:param question_data:
"""
# Insert any slow model inference below
pass
def validate_and_handle_request(question_data):
missing = find_absent_features(question_data)
if len(missing) > 0:
raise ValueError("Missing feature(s) %s" % missing)
wrong_types = check_feature_types(question_data)
if len(wrong_types) > 0:
# If data is wrong but we have the length of the question, run heuristic
if "text_len" in question_data.keys():
if isinstance(question_data["text_len"], float):
return run_heuristic(question_data["text_len"])
raise ValueError("Incorrect type(s) %s" % wrong_types)
return run_model(question_data)
def verify_output_type_and_range(output):
if not isinstance(output, float):
raise ValueError("Wrong output type %s, %s" % (output, type(output)))
if not 0 < output < 1:
raise ValueError("Output out of range %s, %s" % output)
def validate_and_correct_output(question_data, model_output):
# Verify type and range and raise errors accordingly
try:
# Raises value error if model output is incorrect
verify_output_type_and_range(model_output)
except ValueError:
# We run a heuristic, but could run a different model here
run_heuristic(question_data["text_len"])
# If we did not raise an error, we return our model result
return model_output | ml_editor/inference.py | from functools import lru_cache
REQUIRED_FEATURES = [
"is_question",
"action_verb_full",
"language_question",
"question_mark_full",
"norm_text_len",
]
def find_absent_features(data):
missing = []
for feat in REQUIRED_FEATURES:
if feat not in data.keys():
missing.append(feat)
return missing
def check_feature_types(data):
types = {
"is_question": bool,
"action_verb_full": bool,
"language_question": bool,
"question_mark_full": bool,
"norm_text_len": float,
}
mistypes = []
for field, data_type in types:
if not isinstance(data[field], data_type):
mistypes.append((data[field], data_type))
return mistypes
def run_heuristic(question_len):
pass
@lru_cache(maxsize=128)
def run_model(question_data):
"""
This is a stub function. We actually use the lru_cache with a purpose
in app.py
:param question_data:
"""
# Insert any slow model inference below
pass
def validate_and_handle_request(question_data):
missing = find_absent_features(question_data)
if len(missing) > 0:
raise ValueError("Missing feature(s) %s" % missing)
wrong_types = check_feature_types(question_data)
if len(wrong_types) > 0:
# If data is wrong but we have the length of the question, run heuristic
if "text_len" in question_data.keys():
if isinstance(question_data["text_len"], float):
return run_heuristic(question_data["text_len"])
raise ValueError("Incorrect type(s) %s" % wrong_types)
return run_model(question_data)
def verify_output_type_and_range(output):
if not isinstance(output, float):
raise ValueError("Wrong output type %s, %s" % (output, type(output)))
if not 0 < output < 1:
raise ValueError("Output out of range %s, %s" % output)
def validate_and_correct_output(question_data, model_output):
# Verify type and range and raise errors accordingly
try:
# Raises value error if model output is incorrect
verify_output_type_and_range(model_output)
except ValueError:
# We run a heuristic, but could run a different model here
run_heuristic(question_data["text_len"])
# If we did not raise an error, we return our model result
return model_output | 0.612426 | 0.472379 |
from os import path
import json
import pandas as pd
import numpy as np
import multiprocessing as mp
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.pairwise import cosine_similarity
from percy.clusters import process, per_similarity, per_timeline
TOP_K = 100
def get_top_k_target(df, attr):
return df[attr].sort_values(ascending=True).iloc[min(TOP_K, len(df))-1]
def similarity_worker(args):
metadata, df, offset = args
return per_timeline(metadata, df, offset)
if __name__ == "__main__":
print('Loading metadata...')
metadata = {}
with open(path.join('inf-covid19-data', 'data', 'metadata.json')) as f:
metadata = json.load(f)
# process atributes
print('Processing attributes...')
df_attributes = process(metadata)
print(f' Found {len(df_attributes)} regions.')
# clustering by attributes
print('Clustering by attributes...')
clusters = per_similarity(df_attributes)
df_attributes['cluster'] = clusters.labels_
df_attributes = df_attributes.sort_values(by=['cluster', 'key'])
df_attributes.to_csv(path.join('inf-covid19-similarity-data', 'regions.csv'), index=False)
clusters = df_attributes['cluster'].unique()
print(f' Found {len(clusters)} clusters.')
# similarity by timeline
print('Calculating similarity by timeline...')
df_similarities = None
with mp.Pool(processes=3) as pool:
dfs = pool.map(similarity_worker, ((metadata, df_attributes, offset) for offset in range(0, len(df_attributes), 500)))
df_similarities = pd.concat(dfs, ignore_index=True)
# save each region
print('Saving output file for each region...')
for region in df_attributes['key']:
is_a = df_similarities['region_a'] == region
is_b = df_similarities['region_b'] == region
region_df = df_similarities[is_a | is_b].copy()
if len(region_df) == 0:
continue
region_df['region'] = region_df.apply(lambda r: r['region_b'] if r['region_a'] == region else r['region_a'], axis=1)
region_df = region_df[['region', 'cases_distance', 'deaths_distance', 'cases_per_100k_distance', 'deaths_per_100k_distance', 'is_same_cluster']]
within_cases_top_k = region_df['cases_distance'] <= get_top_k_target(region_df, 'cases_distance')
within_deaths_top_k = region_df['deaths_distance'] <= get_top_k_target(region_df, 'deaths_distance')
within_cases_per_100k_top_k = region_df['cases_per_100k_distance'] <= get_top_k_target(region_df, 'cases_per_100k_distance')
within_deaths_per_100k_top_k = region_df['deaths_per_100k_distance'] <= get_top_k_target(region_df, 'deaths_per_100k_distance')
within_top_k = within_cases_top_k | within_deaths_top_k | within_cases_per_100k_top_k | within_deaths_per_100k_top_k
within_same_cluster = region_df['is_same_cluster'] == True
region_df[within_top_k | within_same_cluster].to_csv(path.join('inf-covid19-similarity-data', 'by_key', f'{region}.csv'), index=False) | percy/similarity.py | from os import path
import json
import pandas as pd
import numpy as np
import multiprocessing as mp
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.pairwise import cosine_similarity
from percy.clusters import process, per_similarity, per_timeline
TOP_K = 100
def get_top_k_target(df, attr):
return df[attr].sort_values(ascending=True).iloc[min(TOP_K, len(df))-1]
def similarity_worker(args):
metadata, df, offset = args
return per_timeline(metadata, df, offset)
if __name__ == "__main__":
print('Loading metadata...')
metadata = {}
with open(path.join('inf-covid19-data', 'data', 'metadata.json')) as f:
metadata = json.load(f)
# process atributes
print('Processing attributes...')
df_attributes = process(metadata)
print(f' Found {len(df_attributes)} regions.')
# clustering by attributes
print('Clustering by attributes...')
clusters = per_similarity(df_attributes)
df_attributes['cluster'] = clusters.labels_
df_attributes = df_attributes.sort_values(by=['cluster', 'key'])
df_attributes.to_csv(path.join('inf-covid19-similarity-data', 'regions.csv'), index=False)
clusters = df_attributes['cluster'].unique()
print(f' Found {len(clusters)} clusters.')
# similarity by timeline
print('Calculating similarity by timeline...')
df_similarities = None
with mp.Pool(processes=3) as pool:
dfs = pool.map(similarity_worker, ((metadata, df_attributes, offset) for offset in range(0, len(df_attributes), 500)))
df_similarities = pd.concat(dfs, ignore_index=True)
# save each region
print('Saving output file for each region...')
for region in df_attributes['key']:
is_a = df_similarities['region_a'] == region
is_b = df_similarities['region_b'] == region
region_df = df_similarities[is_a | is_b].copy()
if len(region_df) == 0:
continue
region_df['region'] = region_df.apply(lambda r: r['region_b'] if r['region_a'] == region else r['region_a'], axis=1)
region_df = region_df[['region', 'cases_distance', 'deaths_distance', 'cases_per_100k_distance', 'deaths_per_100k_distance', 'is_same_cluster']]
within_cases_top_k = region_df['cases_distance'] <= get_top_k_target(region_df, 'cases_distance')
within_deaths_top_k = region_df['deaths_distance'] <= get_top_k_target(region_df, 'deaths_distance')
within_cases_per_100k_top_k = region_df['cases_per_100k_distance'] <= get_top_k_target(region_df, 'cases_per_100k_distance')
within_deaths_per_100k_top_k = region_df['deaths_per_100k_distance'] <= get_top_k_target(region_df, 'deaths_per_100k_distance')
within_top_k = within_cases_top_k | within_deaths_top_k | within_cases_per_100k_top_k | within_deaths_per_100k_top_k
within_same_cluster = region_df['is_same_cluster'] == True
region_df[within_top_k | within_same_cluster].to_csv(path.join('inf-covid19-similarity-data', 'by_key', f'{region}.csv'), index=False) | 0.461502 | 0.176707 |
import argparse
from ...helper import add_arg_group, _SHOW_ALL_ARGS, KVAppendAction
from .... import __default_host__
from .... import helper
from ....enums import OnErrorStrategy, SocketType
def mixin_zed_runtime_parser(parser):
"""Mixing in arguments required by :class:`ZEDRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='ZEDRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
''',
)
gp.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
help='The port for input data, default a random port between [49152, 65535]',
)
gp.add_argument(
'--port-out',
type=int,
default=helper.random_port(),
help='The port for output data, default a random port between [49152, 65535]',
)
gp.add_argument(
'--hosts-in-connect',
type=str,
nargs='*',
help=f'The host address for input, by default it is {__default_host__}',
)
gp.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for input, by default it is {__default_host__}',
)
gp.add_argument(
'--host-out',
type=str,
default=__default_host__,
help=f'The host address for output, by default it is {__default_host__}',
)
gp.add_argument(
'--socket-in',
type=SocketType.from_string,
choices=list(SocketType),
default=SocketType.PULL_BIND,
help='The socket type for input port',
)
gp.add_argument(
'--socket-out',
type=SocketType.from_string,
choices=list(SocketType),
default=SocketType.PUSH_BIND,
help='The socket type for output port',
)
gp.add_argument(
'--memory-hwm',
type=int,
default=-1,
help='The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. '
'-1 means no restriction',
)
gp.add_argument(
'--on-error-strategy',
type=OnErrorStrategy.from_string,
choices=list(OnErrorStrategy),
default=OnErrorStrategy.IGNORE,
help='''
The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
''',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.',
)
gp.add_argument(
'--num-part',
type=int,
default=0,
help='the number of messages expected from upstream, 0 and 1 means single part'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--dynamic-routing-out',
action='store_true',
default=False,
help='Tells if ZEDRuntime should respect routing graph for outgoing traffic.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--dynamic-routing-in',
action='store_true',
default=False,
help='Tells if ZEDRuntime should handle incoming traffic as dynamic routing.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--grpc-data-requests',
action='store_true',
default=False,
help='Tells if a Pea should use gRPC for data requests. Works only with dynamic routing out.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--runs-in-docker',
action='store_true',
default=False,
help='Informs a Pea that runs in a container. Important to properly set networking information',
)
gp.add_argument(
'--dump-path',
type=str,
default='',
help='Dump path to be passed to the executor'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
) | jina/parsers/peapods/runtimes/zed.py | import argparse
from ...helper import add_arg_group, _SHOW_ALL_ARGS, KVAppendAction
from .... import __default_host__
from .... import helper
from ....enums import OnErrorStrategy, SocketType
def mixin_zed_runtime_parser(parser):
"""Mixing in arguments required by :class:`ZEDRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='ZEDRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
''',
)
gp.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
help='The port for input data, default a random port between [49152, 65535]',
)
gp.add_argument(
'--port-out',
type=int,
default=helper.random_port(),
help='The port for output data, default a random port between [49152, 65535]',
)
gp.add_argument(
'--hosts-in-connect',
type=str,
nargs='*',
help=f'The host address for input, by default it is {__default_host__}',
)
gp.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for input, by default it is {__default_host__}',
)
gp.add_argument(
'--host-out',
type=str,
default=__default_host__,
help=f'The host address for output, by default it is {__default_host__}',
)
gp.add_argument(
'--socket-in',
type=SocketType.from_string,
choices=list(SocketType),
default=SocketType.PULL_BIND,
help='The socket type for input port',
)
gp.add_argument(
'--socket-out',
type=SocketType.from_string,
choices=list(SocketType),
default=SocketType.PUSH_BIND,
help='The socket type for output port',
)
gp.add_argument(
'--memory-hwm',
type=int,
default=-1,
help='The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. '
'-1 means no restriction',
)
gp.add_argument(
'--on-error-strategy',
type=OnErrorStrategy.from_string,
choices=list(OnErrorStrategy),
default=OnErrorStrategy.IGNORE,
help='''
The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
''',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.',
)
gp.add_argument(
'--num-part',
type=int,
default=0,
help='the number of messages expected from upstream, 0 and 1 means single part'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--dynamic-routing-out',
action='store_true',
default=False,
help='Tells if ZEDRuntime should respect routing graph for outgoing traffic.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--dynamic-routing-in',
action='store_true',
default=False,
help='Tells if ZEDRuntime should handle incoming traffic as dynamic routing.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--grpc-data-requests',
action='store_true',
default=False,
help='Tells if a Pea should use gRPC for data requests. Works only with dynamic routing out.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--runs-in-docker',
action='store_true',
default=False,
help='Informs a Pea that runs in a container. Important to properly set networking information',
)
gp.add_argument(
'--dump-path',
type=str,
default='',
help='Dump path to be passed to the executor'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
) | 0.747708 | 0.252511 |
# Enthought library imports.
from traits.api import Interface
class IExtensionRegistry(Interface):
""" The interface for extension registries. """
def add_extension_point_listener(self, listener, extension_point_id=None):
""" Add a listener for extensions being added or removed.
A listener is any Python callable with the following signature::
def listener(extension_registry, extension_point_changed_event):
...
If an extension point is specified then the listener will only be
called when extensions are added to or removed from that extension
point (the extension point may or may not have been added to the
registry at the time of this call).
If *no* extension point is specified then the listener will be called
when extensions are added to or removed from *any* extension point.
When extensions are added or removed all specific listeners are called
first (in arbitrary order), followed by all non-specific listeners
(again, in arbitrary order).
"""
def add_extension_point(self, extension_point):
""" Add an extension point.
If an extension point already exists with this Id then it is simply
replaced.
"""
def get_extensions(self, extension_point_id):
""" Return the extensions contributed to an extension point.
Return an empty list if the extension point does not exist.
"""
def get_extension_point(self, extension_point_id):
""" Return the extension point with the specified Id.
Return None if no such extension point exists.
"""
def get_extension_points(self):
""" Return all extension points that have been added to the registry.
"""
def remove_extension_point_listener(
self, listener, extension_point_id=None
):
""" Remove a listener for extensions being added or removed.
Raise a 'ValueError' if the listener does not exist.
"""
def remove_extension_point(self, extension_point_id):
""" Remove an extension point.
Raise an 'UnknownExtensionPoint' exception if no extension point exists
with the specified Id.
"""
def set_extensions(self, extension_point_id, extensions):
""" Set the extensions contributed to an extension point.
""" | envisage/i_extension_registry.py | # Enthought library imports.
from traits.api import Interface
class IExtensionRegistry(Interface):
""" The interface for extension registries. """
def add_extension_point_listener(self, listener, extension_point_id=None):
""" Add a listener for extensions being added or removed.
A listener is any Python callable with the following signature::
def listener(extension_registry, extension_point_changed_event):
...
If an extension point is specified then the listener will only be
called when extensions are added to or removed from that extension
point (the extension point may or may not have been added to the
registry at the time of this call).
If *no* extension point is specified then the listener will be called
when extensions are added to or removed from *any* extension point.
When extensions are added or removed all specific listeners are called
first (in arbitrary order), followed by all non-specific listeners
(again, in arbitrary order).
"""
def add_extension_point(self, extension_point):
""" Add an extension point.
If an extension point already exists with this Id then it is simply
replaced.
"""
def get_extensions(self, extension_point_id):
""" Return the extensions contributed to an extension point.
Return an empty list if the extension point does not exist.
"""
def get_extension_point(self, extension_point_id):
""" Return the extension point with the specified Id.
Return None if no such extension point exists.
"""
def get_extension_points(self):
""" Return all extension points that have been added to the registry.
"""
def remove_extension_point_listener(
self, listener, extension_point_id=None
):
""" Remove a listener for extensions being added or removed.
Raise a 'ValueError' if the listener does not exist.
"""
def remove_extension_point(self, extension_point_id):
""" Remove an extension point.
Raise an 'UnknownExtensionPoint' exception if no extension point exists
with the specified Id.
"""
def set_extensions(self, extension_point_id, extensions):
""" Set the extensions contributed to an extension point.
""" | 0.793346 | 0.312816 |
from gridded_noise_nofg import core_eor, core_instr, likelihood, model_name
from py21cmmc.mcmc.mcmc import build_computation_chain
import pickle
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
nrealisations = 400
nthreads = 4
def _produce_mock(i):
"""Produces a mock power spectrum for purposes of getting numerical_covariances"""
# Create an empty context with the given parameters.
ctx = likelihood.LikelihoodComputationChain.createChainContext()
# For each realisation, run every foreground core (not the signal!)
for core in likelihood.foreground_cores:
core.simulate_data(ctx)
# And turn them into visibilities
likelihood._instr_core.simulate_data(ctx)
# The following is basically "compute_power", but saves the steps.
visgrid = likelihood.grid_visibilities(ctx.get("visibilities"))
# Transform frequency axis
visgrid = likelihood.frequency_fft(visgrid, likelihood.frequencies, taper=likelihood.frequency_taper)
# Get 2D power from gridded vis.
power2d = likelihood.get_2d_power(visgrid)
# Restrict power to eta modes above eta_min
power2d = power2d[:, -len(likelihood.eta):]
power3d = np.abs(visgrid)**2
return power2d, power3d, visgrid
def numerical_variance():
"""
Calculate the covariance of the foregrounds.
Parameters
----------
params: dict
The parameters of this iteration. If empty, default parameters are used.
nrealisations: int, optional
Number of realisations to find the covariance.
Output
------
mean: (nperp, npar)-array
The mean 2D power spectrum of the foregrounds.
cov:
The sparse block diagonal matrix of the covariance if nrealisation is not 1
Else it is 0
"""
if nrealisations < 2:
raise ValueError("nrealisations must be more than one")
pool = multiprocessing.Pool(nthreads)
res = pool.map(_produce_mock, np.arange(nrealisations))
power2d = np.array([r[0] for r in res])
power3d = np.array([r[1] for r in res])
visgrid = np.array([r[2] for r in res])
mean_p2d = np.mean(power2d, axis=0)
var_p2d = np.var(power2d, axis=0)
var_p3d = np.var(power3d, axis=0)
var_V3d = np.var(visgrid, axis=0)
return mean_p2d, var_p2d, var_p3d, var_V3d
def make_the_plot(num_mean_p2d, num_var_p2d, num_var_V, num_var_p3d, anl_mean_p2d, anl_var_p2d, anl_var_V, anl_var_p3d):
# Make a plot
fig, ax = plt.subplots(
4, 3,
sharex=True, sharey=True,
subplot_kw={"xscale": 'log', 'yscale': 'log'},
figsize=(12, 12)
)
fig.suptitle("Thermal Noise Power")
extent = (likelihood.u.min(), likelihood.u.max(), likelihood.eta.min(), likelihood.eta.max())
im = ax[0, 0].imshow(num_mean_p2d.T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[0, 0])
ax[0, 0].set_title("Numerical mean")
im = ax[0, 1].imshow(anl_mean_p2d.T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[0, 1])
ax[0, 1].set_title("Analytic Mean")
im = ax[0, 2].imshow((num_mean_p2d / anl_mean_p2d).T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[0, 2])
ax[0, 2].set_title("Num/Anl Mean")
im = ax[1, 0].imshow(num_var_p2d.T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[1, 0])
ax[1, 0].set_title("Numerical Var.")
im = ax[1, 1].imshow(anl_var_p2d.T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[1, 1])
ax[1, 1].set_title("Analytic var.")
im = ax[1, 2].imshow((num_var_p2d / anl_var_p2d).T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[1, 2])
ax[1, 2].set_title("Num./Anl. Var")
im = ax[2, 0].imshow(num_var_V[0].T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[2, 0])
ax[2, 0].set_title("Numerical V Var.")
# im = ax[2, 1].imshow(anl_var_V.T, origin='lower', extent=extent)
# plt.colorbar(im, ax=ax[2, 1])
# ax[2, 1].set_title("Analytic V var.")
im = ax[2, 2].imshow((num_var_V[0].T / anl_var_V[0]), origin='lower', extent=extent)
plt.colorbar(im, ax=ax[2, 2])
ax[2, 2].set_title("Num./Anl. V Var")
im = ax[3, 0].imshow(num_var_p3d[0].T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[3, 0])
ax[3, 0].set_title("Numerical P Var.")
# im = ax[3, 1].imshow(anl_var_p3d.T, origin='lower', extent=extent)
# plt.colorbar(im, ax=ax[3, 1])
# ax[3, 1].set_title("Analytic P var.")
im = ax[3, 2].imshow((num_var_p3d[0].T / anl_var_p3d[0]), origin='lower', extent=extent)
plt.colorbar(im, ax=ax[3, 2])
ax[3, 2].set_title("Num./Anl. P Var")
# ADD SUPER AXIS LABELS
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("Perpendicular Scale, $u$", labelpad=15, fontsize=15)
plt.ylabel("Line-of-Sight Scale, $\eta$", labelpad=15, fontsize=15)
return fig, ax
if __name__=="__main__":
# Build the chain and run setup()
build_computation_chain([core_eor, core_instr], likelihood)
# Get numerical values
num_mean_p2d, num_var_p2d, num_var_p3d, num_var_V = numerical_variance()
# Get analytic values for p2d
anl_mean_p2d, anl_var_p2d = likelihood.noise['mean'], likelihood.noise['covariance']
anl_var_p2d = np.array([np.diag(c) for c in anl_var_p2d])
anl_var_V = core_instr.thermal_variance_baseline / likelihood.nbl_uv
anl_var_p3d = core_instr.thermal_variance_baseline**2 / likelihood.nbl_uv**2
# Dump data in case plotting doesn't work
with open("thermal_noise_data.pkl", 'wb') as f:
pickle.dump(
{"num_mean_p2d":num_mean_p2d, "num_var":num_var_p2d, "anl_mean":anl_mean_p2d, "anl_cov":anl_var_p2d,
"num_var_p3d":num_var_p3d, "num_var_V":num_var_V, "anl_var_V":anl_var_V, "anl_var_p3d":anl_var_p3d}, f
)
# Make the plot
fig, ax = make_the_plot(
num_mean_p2d, num_var_p2d, num_var_V, num_var_p3d,
anl_mean_p2d, anl_var_p2d, anl_var_V, anl_var_p3d
)
plt.savefig("thermal_noise_test.png") | devel/test_series/test_thermal_noise.py | from gridded_noise_nofg import core_eor, core_instr, likelihood, model_name
from py21cmmc.mcmc.mcmc import build_computation_chain
import pickle
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
nrealisations = 400
nthreads = 4
def _produce_mock(i):
"""Produces a mock power spectrum for purposes of getting numerical_covariances"""
# Create an empty context with the given parameters.
ctx = likelihood.LikelihoodComputationChain.createChainContext()
# For each realisation, run every foreground core (not the signal!)
for core in likelihood.foreground_cores:
core.simulate_data(ctx)
# And turn them into visibilities
likelihood._instr_core.simulate_data(ctx)
# The following is basically "compute_power", but saves the steps.
visgrid = likelihood.grid_visibilities(ctx.get("visibilities"))
# Transform frequency axis
visgrid = likelihood.frequency_fft(visgrid, likelihood.frequencies, taper=likelihood.frequency_taper)
# Get 2D power from gridded vis.
power2d = likelihood.get_2d_power(visgrid)
# Restrict power to eta modes above eta_min
power2d = power2d[:, -len(likelihood.eta):]
power3d = np.abs(visgrid)**2
return power2d, power3d, visgrid
def numerical_variance():
"""
Calculate the covariance of the foregrounds.
Parameters
----------
params: dict
The parameters of this iteration. If empty, default parameters are used.
nrealisations: int, optional
Number of realisations to find the covariance.
Output
------
mean: (nperp, npar)-array
The mean 2D power spectrum of the foregrounds.
cov:
The sparse block diagonal matrix of the covariance if nrealisation is not 1
Else it is 0
"""
if nrealisations < 2:
raise ValueError("nrealisations must be more than one")
pool = multiprocessing.Pool(nthreads)
res = pool.map(_produce_mock, np.arange(nrealisations))
power2d = np.array([r[0] for r in res])
power3d = np.array([r[1] for r in res])
visgrid = np.array([r[2] for r in res])
mean_p2d = np.mean(power2d, axis=0)
var_p2d = np.var(power2d, axis=0)
var_p3d = np.var(power3d, axis=0)
var_V3d = np.var(visgrid, axis=0)
return mean_p2d, var_p2d, var_p3d, var_V3d
def make_the_plot(num_mean_p2d, num_var_p2d, num_var_V, num_var_p3d, anl_mean_p2d, anl_var_p2d, anl_var_V, anl_var_p3d):
# Make a plot
fig, ax = plt.subplots(
4, 3,
sharex=True, sharey=True,
subplot_kw={"xscale": 'log', 'yscale': 'log'},
figsize=(12, 12)
)
fig.suptitle("Thermal Noise Power")
extent = (likelihood.u.min(), likelihood.u.max(), likelihood.eta.min(), likelihood.eta.max())
im = ax[0, 0].imshow(num_mean_p2d.T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[0, 0])
ax[0, 0].set_title("Numerical mean")
im = ax[0, 1].imshow(anl_mean_p2d.T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[0, 1])
ax[0, 1].set_title("Analytic Mean")
im = ax[0, 2].imshow((num_mean_p2d / anl_mean_p2d).T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[0, 2])
ax[0, 2].set_title("Num/Anl Mean")
im = ax[1, 0].imshow(num_var_p2d.T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[1, 0])
ax[1, 0].set_title("Numerical Var.")
im = ax[1, 1].imshow(anl_var_p2d.T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[1, 1])
ax[1, 1].set_title("Analytic var.")
im = ax[1, 2].imshow((num_var_p2d / anl_var_p2d).T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[1, 2])
ax[1, 2].set_title("Num./Anl. Var")
im = ax[2, 0].imshow(num_var_V[0].T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[2, 0])
ax[2, 0].set_title("Numerical V Var.")
# im = ax[2, 1].imshow(anl_var_V.T, origin='lower', extent=extent)
# plt.colorbar(im, ax=ax[2, 1])
# ax[2, 1].set_title("Analytic V var.")
im = ax[2, 2].imshow((num_var_V[0].T / anl_var_V[0]), origin='lower', extent=extent)
plt.colorbar(im, ax=ax[2, 2])
ax[2, 2].set_title("Num./Anl. V Var")
im = ax[3, 0].imshow(num_var_p3d[0].T, origin='lower', extent=extent)
plt.colorbar(im, ax=ax[3, 0])
ax[3, 0].set_title("Numerical P Var.")
# im = ax[3, 1].imshow(anl_var_p3d.T, origin='lower', extent=extent)
# plt.colorbar(im, ax=ax[3, 1])
# ax[3, 1].set_title("Analytic P var.")
im = ax[3, 2].imshow((num_var_p3d[0].T / anl_var_p3d[0]), origin='lower', extent=extent)
plt.colorbar(im, ax=ax[3, 2])
ax[3, 2].set_title("Num./Anl. P Var")
# ADD SUPER AXIS LABELS
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("Perpendicular Scale, $u$", labelpad=15, fontsize=15)
plt.ylabel("Line-of-Sight Scale, $\eta$", labelpad=15, fontsize=15)
return fig, ax
if __name__=="__main__":
# Build the chain and run setup()
build_computation_chain([core_eor, core_instr], likelihood)
# Get numerical values
num_mean_p2d, num_var_p2d, num_var_p3d, num_var_V = numerical_variance()
# Get analytic values for p2d
anl_mean_p2d, anl_var_p2d = likelihood.noise['mean'], likelihood.noise['covariance']
anl_var_p2d = np.array([np.diag(c) for c in anl_var_p2d])
anl_var_V = core_instr.thermal_variance_baseline / likelihood.nbl_uv
anl_var_p3d = core_instr.thermal_variance_baseline**2 / likelihood.nbl_uv**2
# Dump data in case plotting doesn't work
with open("thermal_noise_data.pkl", 'wb') as f:
pickle.dump(
{"num_mean_p2d":num_mean_p2d, "num_var":num_var_p2d, "anl_mean":anl_mean_p2d, "anl_cov":anl_var_p2d,
"num_var_p3d":num_var_p3d, "num_var_V":num_var_V, "anl_var_V":anl_var_V, "anl_var_p3d":anl_var_p3d}, f
)
# Make the plot
fig, ax = make_the_plot(
num_mean_p2d, num_var_p2d, num_var_V, num_var_p3d,
anl_mean_p2d, anl_var_p2d, anl_var_V, anl_var_p3d
)
plt.savefig("thermal_noise_test.png") | 0.835986 | 0.73369 |
import csv
from elasticsearch_dsl import connections
from elasticsearch_dsl import Search
connections.create_connection(hosts=["localhost"], timeout=20)
s = Search(index="fluentd-*")
# only return the selected fields
s = s.source(
[
"str_time",
"timestamp",
"handler",
"ellapsed_milli",
"thread_id",
"msg_id",
"outcome",
"traced_type",
]
)
s = s.sort("timestamp")
events = []
for x in s.scan():
events.append(
{
"str_time": x.str_time,
"timestamp": x.timestamp,
"handler": x.handler,
"ellapsed_milli": x.ellapsed_milli,
"thread_id": x.thread_id,
"msg_id": x.msg_id,
"outcome": x.outcome,
"traced_type": x.traced_type,
}
)
sorted_events = sorted(events, key=lambda i: i["timestamp"])
threads = {}
thread_count = 0
agents = {}
with open("agent-events.csv", "w", newline="") as csvfile:
spamwriter = csv.writer(csvfile)
i = 0
spamwriter.writerow(
[
"idx",
"str_time",
"timestamp",
"handler",
"ellapsed_milli",
"thread_id",
"msg_id",
"outcome",
"traced_type",
"delta_agent",
"delta_thread",
]
)
for x in sorted_events:
if x["handler"] in agents:
delta_agent = x["timestamp"] - agents[x["handler"]]
if delta_agent < 0:
print(i, delta_agent)
else:
delta_agent = 0
agents[x["handler"]] = x["timestamp"]
if x["thread_id"] in threads:
delta_thread = x["timestamp"] - threads[x["thread_id"]]
if delta_thread < 0:
print(i, delta_thread)
else:
delta_thread = 0
thread_count = thread_count + 1
threads[x["thread_id"]] = x["timestamp"]
i = i + 1
spamwriter.writerow(
[
i,
x["str_time"],
x["timestamp"],
x["handler"],
x["ellapsed_milli"],
x["thread_id"],
x["msg_id"],
x["outcome"],
x["traced_type"],
delta_agent,
delta_thread,
]
)
print("Total threads=", thread_count) | demo/EFK-stack/search.py | import csv
from elasticsearch_dsl import connections
from elasticsearch_dsl import Search
connections.create_connection(hosts=["localhost"], timeout=20)
s = Search(index="fluentd-*")
# only return the selected fields
s = s.source(
[
"str_time",
"timestamp",
"handler",
"ellapsed_milli",
"thread_id",
"msg_id",
"outcome",
"traced_type",
]
)
s = s.sort("timestamp")
events = []
for x in s.scan():
events.append(
{
"str_time": x.str_time,
"timestamp": x.timestamp,
"handler": x.handler,
"ellapsed_milli": x.ellapsed_milli,
"thread_id": x.thread_id,
"msg_id": x.msg_id,
"outcome": x.outcome,
"traced_type": x.traced_type,
}
)
sorted_events = sorted(events, key=lambda i: i["timestamp"])
threads = {}
thread_count = 0
agents = {}
with open("agent-events.csv", "w", newline="") as csvfile:
spamwriter = csv.writer(csvfile)
i = 0
spamwriter.writerow(
[
"idx",
"str_time",
"timestamp",
"handler",
"ellapsed_milli",
"thread_id",
"msg_id",
"outcome",
"traced_type",
"delta_agent",
"delta_thread",
]
)
for x in sorted_events:
if x["handler"] in agents:
delta_agent = x["timestamp"] - agents[x["handler"]]
if delta_agent < 0:
print(i, delta_agent)
else:
delta_agent = 0
agents[x["handler"]] = x["timestamp"]
if x["thread_id"] in threads:
delta_thread = x["timestamp"] - threads[x["thread_id"]]
if delta_thread < 0:
print(i, delta_thread)
else:
delta_thread = 0
thread_count = thread_count + 1
threads[x["thread_id"]] = x["timestamp"]
i = i + 1
spamwriter.writerow(
[
i,
x["str_time"],
x["timestamp"],
x["handler"],
x["ellapsed_milli"],
x["thread_id"],
x["msg_id"],
x["outcome"],
x["traced_type"],
delta_agent,
delta_thread,
]
)
print("Total threads=", thread_count) | 0.213295 | 0.200206 |
from micropython import mem_info
from microbit import *
from utime import sleep,sleep_ms
def CW01AT(t,raw=False,wait=None,useUART=1):
if useUART == 1 or useUART == 2:
uart.init(baudrate=115200, bits=8, parity=None, stop=1, tx=pin1, rx=pin0)
sleep_ms(10)
if raw:
uart.write(bytearray(t))
return
else:
uart.write((t+"\r\n"))
sleep_ms(10)
j = 0
while not uart.any():
j= j+1
if j>10:
break
sleep_ms(10)
data = uart.readline()
i = 0
rc = -1
tall = []
while(i<100): # 10 seconds max
i=i+1
sleep_ms(50)
if wait and data==bytearray(wait+"\r\n"):
rc = 1
if data == b'OK\r\n':
rc = 1
if data == b'ERROR\r\n':
rc = 0
if rc>-1:
break
if data and len(data)>2 and str(data, "utf-8")[:2] != "AT":
data = str(data[:-2], "utf-8")
tall.append(data)
j = 0
while not uart.any():
j= j+1
if j>10:
break
sleep_ms(10)
data = uart.readline()
if useUART == 1 or useUART == 3:
uart.init(115200)
if rc==1:
pass
elif rc==0:
print(t + ": ERROR")
else:
print(t +": ??????")
if tall:
print(', '.join(tall))
CW01AT("AT+TEST")
sleep(0.1)
# Fill in below
_WIFI_SSID = "<ssid>"
_WIFI_PASSWORD = "<password>"
_SERVER = "<proxy>"
_PORT = 80
_TIME_ZONE = <timezone>
mem_info()
def main():
CW01AT("AT") # Clear the channel
sleep(5)
EspConnect(_SERVER,_PORT)
sleep(2)
print(i2c.scan())
def EspConnect(url, port):
# RED LED on CW01 and SQUARE on Micro:Bit
display.show(Image.SQUARE)
# Connect to WiFI
CW01AT("AT+CWMODE=3")
sleep(0.1)
CW01AT("AT+CWJAP=\""+_WIFI_SSID+"\",\""+_WIFI_PASSWORD+"\"")
sleep(0.1)
# CW01AT("AT+CIFSR","+CIFSR")
display.show(Image.CHESSBOARD)
btn = 0
while True:
#Connect to Server
CW01AT("AT+CIPSTART=\"TCP\",\""+url+"\","+str(port))
sleep(0.2)
temp = str(temperature())
if(button_b.is_pressed()):
btn=1
else:
btn=0
payload="{\"temperature\":"+temp+"}"
req =("POST <url> HTTP/1.1\r\n"+
"Host:<proxy>\r\n"+
"Content-Length:%d\r\n"%len(payload)+
"Content-Type:application/json\r\n\r\n"+
payload+"\r\n")
CW01AT("AT+CIPSEND=%d"%len(req),useUART=2)
sleep(0.5)
CW01AT(req,True,useUART=0)
sleep(1)
CW01AT("AT+CIPCLOSE",useUART=0)
sleep(10)
CW01AT("AT",useUART=3)
display.show(Image.DIAMOND)
if __name__ == '__main__':
main()
#print("Finished!") | microbit-azure.py | from micropython import mem_info
from microbit import *
from utime import sleep,sleep_ms
def CW01AT(t,raw=False,wait=None,useUART=1):
if useUART == 1 or useUART == 2:
uart.init(baudrate=115200, bits=8, parity=None, stop=1, tx=pin1, rx=pin0)
sleep_ms(10)
if raw:
uart.write(bytearray(t))
return
else:
uart.write((t+"\r\n"))
sleep_ms(10)
j = 0
while not uart.any():
j= j+1
if j>10:
break
sleep_ms(10)
data = uart.readline()
i = 0
rc = -1
tall = []
while(i<100): # 10 seconds max
i=i+1
sleep_ms(50)
if wait and data==bytearray(wait+"\r\n"):
rc = 1
if data == b'OK\r\n':
rc = 1
if data == b'ERROR\r\n':
rc = 0
if rc>-1:
break
if data and len(data)>2 and str(data, "utf-8")[:2] != "AT":
data = str(data[:-2], "utf-8")
tall.append(data)
j = 0
while not uart.any():
j= j+1
if j>10:
break
sleep_ms(10)
data = uart.readline()
if useUART == 1 or useUART == 3:
uart.init(115200)
if rc==1:
pass
elif rc==0:
print(t + ": ERROR")
else:
print(t +": ??????")
if tall:
print(', '.join(tall))
CW01AT("AT+TEST")
sleep(0.1)
# Fill in below
_WIFI_SSID = "<ssid>"
_WIFI_PASSWORD = "<password>"
_SERVER = "<proxy>"
_PORT = 80
_TIME_ZONE = <timezone>
mem_info()
def main():
CW01AT("AT") # Clear the channel
sleep(5)
EspConnect(_SERVER,_PORT)
sleep(2)
print(i2c.scan())
def EspConnect(url, port):
# RED LED on CW01 and SQUARE on Micro:Bit
display.show(Image.SQUARE)
# Connect to WiFI
CW01AT("AT+CWMODE=3")
sleep(0.1)
CW01AT("AT+CWJAP=\""+_WIFI_SSID+"\",\""+_WIFI_PASSWORD+"\"")
sleep(0.1)
# CW01AT("AT+CIFSR","+CIFSR")
display.show(Image.CHESSBOARD)
btn = 0
while True:
#Connect to Server
CW01AT("AT+CIPSTART=\"TCP\",\""+url+"\","+str(port))
sleep(0.2)
temp = str(temperature())
if(button_b.is_pressed()):
btn=1
else:
btn=0
payload="{\"temperature\":"+temp+"}"
req =("POST <url> HTTP/1.1\r\n"+
"Host:<proxy>\r\n"+
"Content-Length:%d\r\n"%len(payload)+
"Content-Type:application/json\r\n\r\n"+
payload+"\r\n")
CW01AT("AT+CIPSEND=%d"%len(req),useUART=2)
sleep(0.5)
CW01AT(req,True,useUART=0)
sleep(1)
CW01AT("AT+CIPCLOSE",useUART=0)
sleep(10)
CW01AT("AT",useUART=3)
display.show(Image.DIAMOND)
if __name__ == '__main__':
main()
#print("Finished!") | 0.10939 | 0.153676 |
import datetime
from urllib.parse import urljoin # Python 3
from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify
from werkzeug import secure_filename
from app import app, db, utils
from app.models import Uploads, User, Comment, Media, Country, Genre, MediaType, StoryType
mod_media = Blueprint('media', __name__, url_prefix='/media')
@mod_media.route('/all')
def all():
selected_media_genre = 1 # skateboarding by default
if(request.args.get('media_genre')):
selected_media_genre = request.args.get('media_genre')
selected_media_type = 1 # photos by default
if(request.args.get('media_type')):
selected_media_type = request.args.get('media_type')
total = utils.get_count_by_genre_and_type(selected_media_genre, selected_media_type)
page, per_page, offset = utils.get_page_args(page_parameter='page', per_page_parameter='per_page')
media = db.session.query(
Media
).join(Country
).add_columns(
Media.media_id,
(Country.country_code).label("country_code"),
Media.media_topic,
Media.create_time,
Media.owner
).filter(
Media.media_type==selected_media_type
).filter(
Media.media_genre==selected_media_genre
).filter(
Media.hidden==0
).order_by(
Media.create_time.desc()
).offset(
offset
).limit(per_page)
pagination = utils.get_pagination(page=page, per_page=per_page, total=total, record_name=' media', format_total=True, format_number=True,)
return render_template('media/media.html', media=media, pagination=pagination, selected_media_genre=selected_media_genre, selected_media_type=selected_media_type)
@mod_media.route('/photo/<media_id>')
def photo(media_id):
photo = db.session.query(Media).join(Country).add_columns(Media.media_id,
(Country.country_code).label("country_code"),
Media.media_topic,
Media.media_text,
Media.create_time,
Media.owner).filter(Media.media_id==media_id).first()
comments = Comment.query.filter_by(media_id=media_id).filter(Comment.user_id == User.user_id).order_by(Comment.id.desc()).limit(100)
return render_template('media/photo.html', photo=photo, comments=comments)
@mod_media.route('/video/<string:media_id>')
def video(media_id):
video = Media.query.filter_by(media_id=media_id).first()
return render_template('media/video.html', video=video)
@mod_media.route("/latest")
def latest():
if(session and session['logged_in'] and session['user_level'] == 1):
latest = db.session.query(
Media
).join(Genre
).join(MediaType
).join(StoryType
).join(Country
).add_columns(
Media.media_id,
(Genre.type_name).label("genre"),
(MediaType.type_name).label("mediatype_name"),
(StoryType.type_name).label("storytype_name"),
(Country.country_code).label("country_code"),
Media.media_topic,
Media.media_desc,
Media.create_time,
Media.owner,
Media.hidden
).order_by(
Media.create_time.desc()
).limit(10)
return render_template("media/latest_media.html", latest=latest)
else:
flash("Please login first")
return redirect(url_for("home"))
@mod_media.route('/delete', methods = ['POST'])
def delete():
if(session and session['logged_in'] and session['user_level'] == 1):
if request.method == 'POST':
media_id = request.form.get('media_id')
Media.query.filter_by(media_id=media_id).delete()
db.session.commit()
flash("Record " + media_id + " was deleted succesfully by " + session['username'] + ".")
else:
flash("Please login first")
return redirect(url_for("home"))
@mod_media.route('/<path:filename>', methods=['GET'])
def filename(filename):
static_url = app.config.get('AZURE_BLOB_URI')
if static_url:
return redirect(urljoin(static_url, filename))
return app.send_static_file(filename)
@mod_media.route("/update/<media_id>", methods = ['POST', 'GET'])
def update(media_id):
if request.method == 'POST':
media = { 'media_id': request.form.get('media_id'),
'media_genre': request.form.get('media_genre'),
'media_type': request.form.get('media_type'),
'story_type': request.form.get('story_type'),
'media_topic': request.form.get('media_topic'),
'media_text': request.form.get('media_text'),
'media_desc': request.form.get('media_desc'),
'country_id': request.form.get('country_id'),
'hidden': request.form.get('hidden') }
if(session and session['logged_in'] and session['user_level'] == 1):
Media.query.filter_by(media_id=media_id).update(media)
db.session.commit()
flash("Record " + media_id + " was updated by user " + session['username'])
return redirect(url_for("home"))
else:
flash("Please login first")
return redirect(url_for("home"))
else:
if(session and session['logged_in'] and session['user_level'] == 1):
result = Media.query.filter_by(media_id=media_id).first()
return render_template("views/user/update_media.html", result=result)
else:
flash("Please login first")
return redirect(url_for("home"))
@app.route("/newupload", methods=['POST','GET'])
def new_upload():
if request.method == 'POST':
# Crea a blob container with the users name
blob_service = utils.get_azure_blob_service()
container = ''
file_to_upload = request.files['file']
filename = secure_filename(file_to_upload.filename)
if(session and session['logged_in']):
container = session['username']
if not blob_service.exists(container):
blob_service.create_container(container)
blob_service.set_container_acl(container, public_access=PublicAccess.Blob)
else:
flash("Please login first")
return redirect(url_for("home"))
# Create Blob from stream
try:
blob_service.create_blob_from_stream(container, filename, file_to_upload)
flash("File " + filename + " was uploaded successfully")
except:
print("Something went wrong while uploading the files %s"%filename)
flash("Something went wrong while uploading the files %s"%filename)
pass
blob = app.config.get('AZURE_BLOB_URI')
path = blob + '/' + container + '/' + filename
# Create a record in database
upload = Uploads(
user_id=session['user_id'],
create_time=datetime.datetime.now(),
path=path)
db.session.add(upload)
db.session.commit()
#print("Upload was inserted to database by user " + session['username'])
return redirect(url_for("my_uploads"))
return render_template("views/user/new_upload.html") | app/mod_media/controllers.py | import datetime
from urllib.parse import urljoin # Python 3
from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify
from werkzeug import secure_filename
from app import app, db, utils
from app.models import Uploads, User, Comment, Media, Country, Genre, MediaType, StoryType
mod_media = Blueprint('media', __name__, url_prefix='/media')
@mod_media.route('/all')
def all():
selected_media_genre = 1 # skateboarding by default
if(request.args.get('media_genre')):
selected_media_genre = request.args.get('media_genre')
selected_media_type = 1 # photos by default
if(request.args.get('media_type')):
selected_media_type = request.args.get('media_type')
total = utils.get_count_by_genre_and_type(selected_media_genre, selected_media_type)
page, per_page, offset = utils.get_page_args(page_parameter='page', per_page_parameter='per_page')
media = db.session.query(
Media
).join(Country
).add_columns(
Media.media_id,
(Country.country_code).label("country_code"),
Media.media_topic,
Media.create_time,
Media.owner
).filter(
Media.media_type==selected_media_type
).filter(
Media.media_genre==selected_media_genre
).filter(
Media.hidden==0
).order_by(
Media.create_time.desc()
).offset(
offset
).limit(per_page)
pagination = utils.get_pagination(page=page, per_page=per_page, total=total, record_name=' media', format_total=True, format_number=True,)
return render_template('media/media.html', media=media, pagination=pagination, selected_media_genre=selected_media_genre, selected_media_type=selected_media_type)
@mod_media.route('/photo/<media_id>')
def photo(media_id):
photo = db.session.query(Media).join(Country).add_columns(Media.media_id,
(Country.country_code).label("country_code"),
Media.media_topic,
Media.media_text,
Media.create_time,
Media.owner).filter(Media.media_id==media_id).first()
comments = Comment.query.filter_by(media_id=media_id).filter(Comment.user_id == User.user_id).order_by(Comment.id.desc()).limit(100)
return render_template('media/photo.html', photo=photo, comments=comments)
@mod_media.route('/video/<string:media_id>')
def video(media_id):
video = Media.query.filter_by(media_id=media_id).first()
return render_template('media/video.html', video=video)
@mod_media.route("/latest")
def latest():
if(session and session['logged_in'] and session['user_level'] == 1):
latest = db.session.query(
Media
).join(Genre
).join(MediaType
).join(StoryType
).join(Country
).add_columns(
Media.media_id,
(Genre.type_name).label("genre"),
(MediaType.type_name).label("mediatype_name"),
(StoryType.type_name).label("storytype_name"),
(Country.country_code).label("country_code"),
Media.media_topic,
Media.media_desc,
Media.create_time,
Media.owner,
Media.hidden
).order_by(
Media.create_time.desc()
).limit(10)
return render_template("media/latest_media.html", latest=latest)
else:
flash("Please login first")
return redirect(url_for("home"))
@mod_media.route('/delete', methods = ['POST'])
def delete():
if(session and session['logged_in'] and session['user_level'] == 1):
if request.method == 'POST':
media_id = request.form.get('media_id')
Media.query.filter_by(media_id=media_id).delete()
db.session.commit()
flash("Record " + media_id + " was deleted succesfully by " + session['username'] + ".")
else:
flash("Please login first")
return redirect(url_for("home"))
@mod_media.route('/<path:filename>', methods=['GET'])
def filename(filename):
static_url = app.config.get('AZURE_BLOB_URI')
if static_url:
return redirect(urljoin(static_url, filename))
return app.send_static_file(filename)
@mod_media.route("/update/<media_id>", methods = ['POST', 'GET'])
def update(media_id):
if request.method == 'POST':
media = { 'media_id': request.form.get('media_id'),
'media_genre': request.form.get('media_genre'),
'media_type': request.form.get('media_type'),
'story_type': request.form.get('story_type'),
'media_topic': request.form.get('media_topic'),
'media_text': request.form.get('media_text'),
'media_desc': request.form.get('media_desc'),
'country_id': request.form.get('country_id'),
'hidden': request.form.get('hidden') }
if(session and session['logged_in'] and session['user_level'] == 1):
Media.query.filter_by(media_id=media_id).update(media)
db.session.commit()
flash("Record " + media_id + " was updated by user " + session['username'])
return redirect(url_for("home"))
else:
flash("Please login first")
return redirect(url_for("home"))
else:
if(session and session['logged_in'] and session['user_level'] == 1):
result = Media.query.filter_by(media_id=media_id).first()
return render_template("views/user/update_media.html", result=result)
else:
flash("Please login first")
return redirect(url_for("home"))
@app.route("/newupload", methods=['POST','GET'])
def new_upload():
if request.method == 'POST':
# Crea a blob container with the users name
blob_service = utils.get_azure_blob_service()
container = ''
file_to_upload = request.files['file']
filename = secure_filename(file_to_upload.filename)
if(session and session['logged_in']):
container = session['username']
if not blob_service.exists(container):
blob_service.create_container(container)
blob_service.set_container_acl(container, public_access=PublicAccess.Blob)
else:
flash("Please login first")
return redirect(url_for("home"))
# Create Blob from stream
try:
blob_service.create_blob_from_stream(container, filename, file_to_upload)
flash("File " + filename + " was uploaded successfully")
except:
print("Something went wrong while uploading the files %s"%filename)
flash("Something went wrong while uploading the files %s"%filename)
pass
blob = app.config.get('AZURE_BLOB_URI')
path = blob + '/' + container + '/' + filename
# Create a record in database
upload = Uploads(
user_id=session['user_id'],
create_time=datetime.datetime.now(),
path=path)
db.session.add(upload)
db.session.commit()
#print("Upload was inserted to database by user " + session['username'])
return redirect(url_for("my_uploads"))
return render_template("views/user/new_upload.html") | 0.220426 | 0.055362 |
import numpy as np
from glue.config import data_translator
from glue.core import Data, Subset
from astropy.wcs import WCS
from astropy import units as u
from astropy.wcs import WCSSUB_SPECTRAL
from astropy.nddata import StdDevUncertainty, InverseVariance, VarianceUncertainty
from glue_astronomy.spectral_coordinates import SpectralCoordinates
from specutils import Spectrum1D
UNCERT_REF = {'std': StdDevUncertainty,
'var': VarianceUncertainty,
'ivar': InverseVariance}
@data_translator(Spectrum1D)
class Specutils1DHandler:
def to_data(self, obj):
coords = SpectralCoordinates(obj.spectral_axis)
data = Data(coords=coords)
data['flux'] = obj.flux
data.get_component('flux').units = str(obj.unit)
# Include uncertainties if they exist
if obj.uncertainty is not None:
data['uncertainty'] = obj.uncertainty.quantity
data.get_component('uncertainty').units = str(obj.unit)
data.meta.update({'uncertainty_type': obj.uncertainty.uncertainty_type})
# Include mask if it exists
if obj.mask is not None:
data['mask'] = obj.mask
data.meta.update(obj.meta)
return data
def to_object(self, data_or_subset, attribute=None, statistic='mean'):
"""
Convert a glue Data object to a Spectrum1D object.
Parameters
----------
data_or_subset : `glue.core.data.Data` or `glue.core.subset.Subset`
The data to convert to a Spectrum1D object
attribute : `glue.core.component_id.ComponentID`
The attribute to use for the Spectrum1D data
statistic : {'minimum', 'maximum', 'mean', 'median', 'sum', 'percentile'}
The statistic to use to collapse the dataset
"""
if isinstance(data_or_subset, Subset):
data = data_or_subset.data
subset_state = data_or_subset.subset_state
else:
data = data_or_subset
subset_state = None
if isinstance(data.coords, WCS):
# Find spectral axis
spec_axis = data.coords.naxis - 1 - data.coords.wcs.spec
# Find non-spectral axes
axes = tuple(i for i in range(data.ndim) if i != spec_axis)
kwargs = {'wcs': data.coords.sub([WCSSUB_SPECTRAL])}
elif isinstance(data.coords, SpectralCoordinates):
kwargs = {'spectral_axis': data.coords.spectral_axis}
else:
raise TypeError('data.coords should be an instance of WCS '
'or SpectralCoordinates')
if isinstance(attribute, str):
attribute = data.id[attribute]
elif len(data.main_components) == 0:
raise ValueError('Data object has no attributes.')
elif attribute is None:
if len(data.main_components) == 1:
attribute = data.main_components[0]
# If no specific attribute is defined, attempt to retrieve
# both the flux and uncertainties
elif any([x.label in ('flux', 'uncertainty') for x in data.components]):
attribute = [data.find_component_id('flux'),
data.find_component_id('uncertainty')]
else:
raise ValueError("Data object has more than one attribute, so "
"you will need to specify which one to use as "
"the flux for the spectrum using the "
"attribute= keyword argument.")
def parse_attributes(attributes):
data_kwargs = {}
for attribute in attributes:
component = data.get_component(attribute)
# Get mask if there is one defined, or if this is a subset
if subset_state is None:
mask = None
else:
mask = data.get_mask(subset_state=subset_state)
mask = ~mask
# Collapse values and mask to profile
if data.ndim > 1:
# Get units and attach to value
values = data.compute_statistic(statistic, attribute, axis=axes,
subset_state=subset_state)
if mask is not None:
collapse_axes = tuple([x for x in range(1, data.ndim)])
mask = np.all(mask, collapse_axes)
else:
values = data.get_data(attribute)
attribute_label = attribute.label
if attribute_label not in ('flux', 'uncertainty'):
attribute_label = 'flux'
values = values * u.Unit(component.units)
# If the attribute is uncertainty, we must coerce it to a
# specific uncertainty type. If no value exists in the glue
# object meta dictionary, use standard deviation.
if attribute_label == 'uncertainty':
values = UNCERT_REF[
data.meta.get('uncertainty_type', 'std')](values)
data_kwargs.update({attribute_label: values,
'mask': mask})
return data_kwargs
data_kwargs = parse_attributes(
[attribute] if not hasattr(attribute, '__len__') else attribute)
return Spectrum1D(**data_kwargs, **kwargs) | glue_astronomy/translators/spectrum1d.py | import numpy as np
from glue.config import data_translator
from glue.core import Data, Subset
from astropy.wcs import WCS
from astropy import units as u
from astropy.wcs import WCSSUB_SPECTRAL
from astropy.nddata import StdDevUncertainty, InverseVariance, VarianceUncertainty
from glue_astronomy.spectral_coordinates import SpectralCoordinates
from specutils import Spectrum1D
UNCERT_REF = {'std': StdDevUncertainty,
'var': VarianceUncertainty,
'ivar': InverseVariance}
@data_translator(Spectrum1D)
class Specutils1DHandler:
def to_data(self, obj):
coords = SpectralCoordinates(obj.spectral_axis)
data = Data(coords=coords)
data['flux'] = obj.flux
data.get_component('flux').units = str(obj.unit)
# Include uncertainties if they exist
if obj.uncertainty is not None:
data['uncertainty'] = obj.uncertainty.quantity
data.get_component('uncertainty').units = str(obj.unit)
data.meta.update({'uncertainty_type': obj.uncertainty.uncertainty_type})
# Include mask if it exists
if obj.mask is not None:
data['mask'] = obj.mask
data.meta.update(obj.meta)
return data
def to_object(self, data_or_subset, attribute=None, statistic='mean'):
"""
Convert a glue Data object to a Spectrum1D object.
Parameters
----------
data_or_subset : `glue.core.data.Data` or `glue.core.subset.Subset`
The data to convert to a Spectrum1D object
attribute : `glue.core.component_id.ComponentID`
The attribute to use for the Spectrum1D data
statistic : {'minimum', 'maximum', 'mean', 'median', 'sum', 'percentile'}
The statistic to use to collapse the dataset
"""
if isinstance(data_or_subset, Subset):
data = data_or_subset.data
subset_state = data_or_subset.subset_state
else:
data = data_or_subset
subset_state = None
if isinstance(data.coords, WCS):
# Find spectral axis
spec_axis = data.coords.naxis - 1 - data.coords.wcs.spec
# Find non-spectral axes
axes = tuple(i for i in range(data.ndim) if i != spec_axis)
kwargs = {'wcs': data.coords.sub([WCSSUB_SPECTRAL])}
elif isinstance(data.coords, SpectralCoordinates):
kwargs = {'spectral_axis': data.coords.spectral_axis}
else:
raise TypeError('data.coords should be an instance of WCS '
'or SpectralCoordinates')
if isinstance(attribute, str):
attribute = data.id[attribute]
elif len(data.main_components) == 0:
raise ValueError('Data object has no attributes.')
elif attribute is None:
if len(data.main_components) == 1:
attribute = data.main_components[0]
# If no specific attribute is defined, attempt to retrieve
# both the flux and uncertainties
elif any([x.label in ('flux', 'uncertainty') for x in data.components]):
attribute = [data.find_component_id('flux'),
data.find_component_id('uncertainty')]
else:
raise ValueError("Data object has more than one attribute, so "
"you will need to specify which one to use as "
"the flux for the spectrum using the "
"attribute= keyword argument.")
def parse_attributes(attributes):
data_kwargs = {}
for attribute in attributes:
component = data.get_component(attribute)
# Get mask if there is one defined, or if this is a subset
if subset_state is None:
mask = None
else:
mask = data.get_mask(subset_state=subset_state)
mask = ~mask
# Collapse values and mask to profile
if data.ndim > 1:
# Get units and attach to value
values = data.compute_statistic(statistic, attribute, axis=axes,
subset_state=subset_state)
if mask is not None:
collapse_axes = tuple([x for x in range(1, data.ndim)])
mask = np.all(mask, collapse_axes)
else:
values = data.get_data(attribute)
attribute_label = attribute.label
if attribute_label not in ('flux', 'uncertainty'):
attribute_label = 'flux'
values = values * u.Unit(component.units)
# If the attribute is uncertainty, we must coerce it to a
# specific uncertainty type. If no value exists in the glue
# object meta dictionary, use standard deviation.
if attribute_label == 'uncertainty':
values = UNCERT_REF[
data.meta.get('uncertainty_type', 'std')](values)
data_kwargs.update({attribute_label: values,
'mask': mask})
return data_kwargs
data_kwargs = parse_attributes(
[attribute] if not hasattr(attribute, '__len__') else attribute)
return Spectrum1D(**data_kwargs, **kwargs) | 0.865665 | 0.438605 |
from openstack import resource
from openstack import utils
class Trunk(resource.Resource, resource.TagMixin):
resource_key = 'trunk'
resources_key = 'trunks'
base_path = '/trunks'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'name', 'description', 'port_id', 'status', 'sub_ports',
project_id='tenant_id',
is_admin_state_up='admin_state_up',
**resource.TagMixin._tag_query_parameters
)
# Properties
#: Trunk name.
name = resource.Body('name')
#: The ID of the project who owns the trunk. Only administrative
#: users can specify a project ID other than their own.
project_id = resource.Body('tenant_id')
#: The trunk description.
description = resource.Body('description')
#: The administrative state of the port, which is up ``True`` or
#: down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: The ID of the trunk's parent port
port_id = resource.Body('port_id')
#: The status for the trunk. Possible values are ACTIVE, DOWN, BUILD,
#: DEGRADED, and ERROR.
status = resource.Body('status')
#: A list of ports associated with the trunk.
sub_ports = resource.Body('sub_ports', type=list)
def add_subports(self, session, subports):
url = utils.urljoin('/trunks', self.id, 'add_subports')
session.put(url, json={'sub_ports': subports})
self._body.attributes.update({'sub_ports': subports})
return self
def delete_subports(self, session, subports):
url = utils.urljoin('/trunks', self.id, 'remove_subports')
session.put(url, json={'sub_ports': subports})
self._body.attributes.update({'sub_ports': subports})
return self
def get_subports(self, session):
url = utils.urljoin('/trunks', self.id, 'get_subports')
resp = session.get(url)
self._body.attributes.update(resp.json())
return resp.json() | openstack/network/v2/trunk.py |
from openstack import resource
from openstack import utils
class Trunk(resource.Resource, resource.TagMixin):
resource_key = 'trunk'
resources_key = 'trunks'
base_path = '/trunks'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'name', 'description', 'port_id', 'status', 'sub_ports',
project_id='tenant_id',
is_admin_state_up='admin_state_up',
**resource.TagMixin._tag_query_parameters
)
# Properties
#: Trunk name.
name = resource.Body('name')
#: The ID of the project who owns the trunk. Only administrative
#: users can specify a project ID other than their own.
project_id = resource.Body('tenant_id')
#: The trunk description.
description = resource.Body('description')
#: The administrative state of the port, which is up ``True`` or
#: down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: The ID of the trunk's parent port
port_id = resource.Body('port_id')
#: The status for the trunk. Possible values are ACTIVE, DOWN, BUILD,
#: DEGRADED, and ERROR.
status = resource.Body('status')
#: A list of ports associated with the trunk.
sub_ports = resource.Body('sub_ports', type=list)
def add_subports(self, session, subports):
url = utils.urljoin('/trunks', self.id, 'add_subports')
session.put(url, json={'sub_ports': subports})
self._body.attributes.update({'sub_ports': subports})
return self
def delete_subports(self, session, subports):
url = utils.urljoin('/trunks', self.id, 'remove_subports')
session.put(url, json={'sub_ports': subports})
self._body.attributes.update({'sub_ports': subports})
return self
def get_subports(self, session):
url = utils.urljoin('/trunks', self.id, 'get_subports')
resp = session.get(url)
self._body.attributes.update(resp.json())
return resp.json() | 0.51879 | 0.108637 |
from pycontract import *
import unittest
import test.utest
from datetime import datetime
"""
Test example for SAC-SVT 2022.
"""
class M4(Monitor):
def transition(self, event):
match event:
case {'name': 'command', 'cmd': c, 'nr': n, 'kind': "FSW"}:
return self.Dispatch(c, n)
@data
class Dispatch(HotState):
cmd: str
nr: str
def transition(self, event):
match event:
case {'name': 'cancel', 'cmd': self.cmd, 'nr': self.nr}:
return ok
case {'name': 'dispatch', 'cmd': self.cmd, 'nr': self.nr}:
return self.Succeed(self.cmd, self.nr)
@data
class Succeed(HotState):
cmd: str
nr: str
def transition(self, event):
match event:
case {'name': 'succeed', 'cmd': self.cmd, 'nr': self.nr}:
return self.Close(self.cmd, self.nr)
case {'name': 'command', 'cmd': self.cmd, 'nr': _, 'kind': "FSW"}:
return error(f' command {self.cmd} re-issued')
case {'name': 'fail', 'cmd': self.cmd, 'nr': self.nr}:
return error(f'failure of cmd={self.cmd}, n={self.nr}')
@data
class Close(HotState):
cmd: str
nr: str
def transition(self, event):
match event:
case {'name': 'succeed', 'cmd': self.cmd, 'nr': self.nr}:
return error(f'cmd={self.cmd}, n={self.nr} succeeds more than once')
case {'name': 'close', 'cmd': self.cmd, 'nr': self.nr}:
return ok
def converter(line: List[str]) -> Dict[str,str]:
match line[0]:
case "command":
return {'name': 'command', 'cmd': line[1], 'nr': line[2], 'kind': line[3]}
case _:
return {'name': line[0], 'cmd': line[1], 'nr': line[2]}
class Test1(test.utest.Test):
def test1(self):
visualize(__file__, True)
"""
log-1-12500.csv
log-50-250.csv
log-1-50000.csv
log-5-10000.csv
log-10-5000.csv
log-20-2500.csv
log-1-125000.csv
log-5-25000.csv
"""
file = 'log-5-25000.csv'
class Test2(test.utest.Test):
def test1(self):
m = M4()
set_debug(False)
set_debug_progress(1000)
csv_reader = CSVReader(file, converter)
begin_time = datetime.now()
for event in csv_reader:
if event is not None:
m.eval(event)
m.end()
csv_reader.close()
print(f'\nExecution time: {datetime.now() - begin_time}\n')
if __name__ == '__main__':
unittest.main() | test/test12-vpt-2022/test12.py | from pycontract import *
import unittest
import test.utest
from datetime import datetime
"""
Test example for SAC-SVT 2022.
"""
class M4(Monitor):
def transition(self, event):
match event:
case {'name': 'command', 'cmd': c, 'nr': n, 'kind': "FSW"}:
return self.Dispatch(c, n)
@data
class Dispatch(HotState):
cmd: str
nr: str
def transition(self, event):
match event:
case {'name': 'cancel', 'cmd': self.cmd, 'nr': self.nr}:
return ok
case {'name': 'dispatch', 'cmd': self.cmd, 'nr': self.nr}:
return self.Succeed(self.cmd, self.nr)
@data
class Succeed(HotState):
cmd: str
nr: str
def transition(self, event):
match event:
case {'name': 'succeed', 'cmd': self.cmd, 'nr': self.nr}:
return self.Close(self.cmd, self.nr)
case {'name': 'command', 'cmd': self.cmd, 'nr': _, 'kind': "FSW"}:
return error(f' command {self.cmd} re-issued')
case {'name': 'fail', 'cmd': self.cmd, 'nr': self.nr}:
return error(f'failure of cmd={self.cmd}, n={self.nr}')
@data
class Close(HotState):
cmd: str
nr: str
def transition(self, event):
match event:
case {'name': 'succeed', 'cmd': self.cmd, 'nr': self.nr}:
return error(f'cmd={self.cmd}, n={self.nr} succeeds more than once')
case {'name': 'close', 'cmd': self.cmd, 'nr': self.nr}:
return ok
def converter(line: List[str]) -> Dict[str,str]:
match line[0]:
case "command":
return {'name': 'command', 'cmd': line[1], 'nr': line[2], 'kind': line[3]}
case _:
return {'name': line[0], 'cmd': line[1], 'nr': line[2]}
class Test1(test.utest.Test):
def test1(self):
visualize(__file__, True)
"""
log-1-12500.csv
log-50-250.csv
log-1-50000.csv
log-5-10000.csv
log-10-5000.csv
log-20-2500.csv
log-1-125000.csv
log-5-25000.csv
"""
file = 'log-5-25000.csv'
class Test2(test.utest.Test):
def test1(self):
m = M4()
set_debug(False)
set_debug_progress(1000)
csv_reader = CSVReader(file, converter)
begin_time = datetime.now()
for event in csv_reader:
if event is not None:
m.eval(event)
m.end()
csv_reader.close()
print(f'\nExecution time: {datetime.now() - begin_time}\n')
if __name__ == '__main__':
unittest.main() | 0.5 | 0.255209 |
import numpy as np
class Preprocessor:
def __init__(
self,
users_dataframe=None,
items_dataframe=None,
interactions_dataframe=None,
item_id_column=None,
items_feature_columns=None,
user_id_column=None,
user_features_columns=None,
interaction_column=None,
):
"""
this class dedicated to preprocess the Dataframes , ready to be fed into the model
:param users_dataframe: a dataframe contain users
:param items_dataframe: a dataframe contain items
:param interactions_dataframe: a dataframe contain ratings of items - users
:param item_id_column: name of items column
:param items_feature_columns: items_feature_columns
:param user_id_column: name of users column
:param user_features_columns: user_features_columns
"""
self.items_dataframe = None
self.users_dataframe = None
self.interactions_dataframe = None
if users_dataframe is not None:
self.add_users_dataframe(users_dataframe)
self.user_id_column = user_id_column
self.user_features_columns = user_features_columns
if items_dataframe is not None:
self.add_items_dataframe(items_dataframe)
self.item_id_column = item_id_column
self.items_feature_columns = items_feature_columns
if interactions_dataframe is not None:
self.add_interactions_dataframe(interactions_dataframe)
self.interaction_column = interaction_column
def get_data_status(self):
return {
"items_dataframe": self.get_dataframe_status(self.items_dataframe),
"users_dataframe": self.get_dataframe_status(self.users_dataframe),
"interactions_dataframe": self.get_dataframe_status(
self.interactions_dataframe
),
}
@staticmethod
def get_dataframe_status(data):
try:
return not data.empty
except:
return False
def add_items_dataframe(self, items_dataframe):
self.items_dataframe = items_dataframe
def add_users_dataframe(self, users_dataframe):
self.users_dataframe = users_dataframe
def add_interactions_dataframe(self, interactions_dataframe):
self.interactions_dataframe = interactions_dataframe
def get_unique_users(self):
return self.get_uniques_from(self.users_dataframe, self.user_id_column)
def get_unique_items(self):
return self.get_uniques_from(self.items_dataframe, self.item_id_column)
def get_unique_items_from_ratings(self):
return self.get_uniques_from(self.interactions_dataframe, self.item_id_column)
def get_unique_users_from_ratings(self):
return self.get_uniques_from(self.interactions_dataframe, self.user_id_column)
@staticmethod
def get_uniques_from(dataframe, column):
return dataframe[column].unique()
def clean_unknown_interactions_func(self):
"""
this function to remove all the existing ratings with unknown items and users
:return:
"""
self.interactions_dataframe = self.interactions_dataframe[
self.interactions_dataframe[self.item_id_column].isin(
self.items_dataframe[self.item_id_column]
)
]
self.interactions_dataframe = self.interactions_dataframe[
self.interactions_dataframe[self.user_id_column].isin(
self.users_dataframe[self.user_id_column]
)
]
def get_unique_items_features(self):
return self.get_uniques_by_columns(
self.items_dataframe, self.items_feature_columns
)
def get_unique_users_features(self):
return self.get_uniques_by_columns(
self.users_dataframe, self.user_features_columns
)
def get_uniques_by_columns(self, dataframe, columns):
dataframe = dataframe.applymap(str)
uniques = list()
for col in columns:
uniques.extend(dataframe[col].unique())
return uniques
def get_interactions_format(self):
"""
Todo : it was a generator but light FM need the len (if len(datum) == 3) so i changed it to an array
:return: iterable of (user_id, item_id, weight)
An iterable of interactions. The user and item ids will be
translated to internal model indices using the mappings
constructed during the fit call
"""
return [
(
row[self.user_id_column],
row[self.item_id_column],
np.float(row[self.interaction_column]),
)
for idx, row in self.interactions_dataframe.iterrows()
]
@staticmethod
def prepare_features_format(data, id, feature_columns):
for row in data.iterrows():
yield (row[1][id], [str(row[1][feature]) for feature in feature_columns]) | lightfm_dataset_helper/Preprocessor.py | import numpy as np
class Preprocessor:
def __init__(
self,
users_dataframe=None,
items_dataframe=None,
interactions_dataframe=None,
item_id_column=None,
items_feature_columns=None,
user_id_column=None,
user_features_columns=None,
interaction_column=None,
):
"""
this class dedicated to preprocess the Dataframes , ready to be fed into the model
:param users_dataframe: a dataframe contain users
:param items_dataframe: a dataframe contain items
:param interactions_dataframe: a dataframe contain ratings of items - users
:param item_id_column: name of items column
:param items_feature_columns: items_feature_columns
:param user_id_column: name of users column
:param user_features_columns: user_features_columns
"""
self.items_dataframe = None
self.users_dataframe = None
self.interactions_dataframe = None
if users_dataframe is not None:
self.add_users_dataframe(users_dataframe)
self.user_id_column = user_id_column
self.user_features_columns = user_features_columns
if items_dataframe is not None:
self.add_items_dataframe(items_dataframe)
self.item_id_column = item_id_column
self.items_feature_columns = items_feature_columns
if interactions_dataframe is not None:
self.add_interactions_dataframe(interactions_dataframe)
self.interaction_column = interaction_column
def get_data_status(self):
return {
"items_dataframe": self.get_dataframe_status(self.items_dataframe),
"users_dataframe": self.get_dataframe_status(self.users_dataframe),
"interactions_dataframe": self.get_dataframe_status(
self.interactions_dataframe
),
}
@staticmethod
def get_dataframe_status(data):
try:
return not data.empty
except:
return False
def add_items_dataframe(self, items_dataframe):
self.items_dataframe = items_dataframe
def add_users_dataframe(self, users_dataframe):
self.users_dataframe = users_dataframe
def add_interactions_dataframe(self, interactions_dataframe):
self.interactions_dataframe = interactions_dataframe
def get_unique_users(self):
return self.get_uniques_from(self.users_dataframe, self.user_id_column)
def get_unique_items(self):
return self.get_uniques_from(self.items_dataframe, self.item_id_column)
def get_unique_items_from_ratings(self):
return self.get_uniques_from(self.interactions_dataframe, self.item_id_column)
def get_unique_users_from_ratings(self):
return self.get_uniques_from(self.interactions_dataframe, self.user_id_column)
@staticmethod
def get_uniques_from(dataframe, column):
return dataframe[column].unique()
def clean_unknown_interactions_func(self):
"""
this function to remove all the existing ratings with unknown items and users
:return:
"""
self.interactions_dataframe = self.interactions_dataframe[
self.interactions_dataframe[self.item_id_column].isin(
self.items_dataframe[self.item_id_column]
)
]
self.interactions_dataframe = self.interactions_dataframe[
self.interactions_dataframe[self.user_id_column].isin(
self.users_dataframe[self.user_id_column]
)
]
def get_unique_items_features(self):
return self.get_uniques_by_columns(
self.items_dataframe, self.items_feature_columns
)
def get_unique_users_features(self):
return self.get_uniques_by_columns(
self.users_dataframe, self.user_features_columns
)
def get_uniques_by_columns(self, dataframe, columns):
dataframe = dataframe.applymap(str)
uniques = list()
for col in columns:
uniques.extend(dataframe[col].unique())
return uniques
def get_interactions_format(self):
"""
Todo : it was a generator but light FM need the len (if len(datum) == 3) so i changed it to an array
:return: iterable of (user_id, item_id, weight)
An iterable of interactions. The user and item ids will be
translated to internal model indices using the mappings
constructed during the fit call
"""
return [
(
row[self.user_id_column],
row[self.item_id_column],
np.float(row[self.interaction_column]),
)
for idx, row in self.interactions_dataframe.iterrows()
]
@staticmethod
def prepare_features_format(data, id, feature_columns):
for row in data.iterrows():
yield (row[1][id], [str(row[1][feature]) for feature in feature_columns]) | 0.662578 | 0.393385 |
def _github_wiki_impl(ctx):
clonedir = ctx.outputs.clonedir
output = []
for f in ctx.files.srcs:
file = ctx.actions.declare_file(ctx.attr.dest + f.path.replace("/", "-").replace(".log", ".md"))
ctx.actions.run_shell(
inputs = [f],
outputs = [file],
progress_message = "Generating files of %s" % file.short_path,
command = "echo '```' > {} && ".format(file.path) +
"cat {} >> {} && ".format(f.path, file.path) +
"echo '```' >> {} ".format(file.path),
execution_requirements = {
"no-sandbox": "1",
"no-cache": "1",
"no-remote": "1",
"local": "1",
},
)
output.append(file)
if len(output) > 0:
ctx.actions.run_shell(
inputs = output,
outputs = [clonedir],
progress_message = "Commiting the changes on %s" % clonedir.short_path,
command = "export HOME=\"$PWD\" && git config --global user.email \"{}\" && git config --global user.name \"{}\" &&".format(ctx.attr.git_email, ctx.attr.git_name) +
"git clone {} {} && ".format(ctx.attr.clone_url, clonedir.path) +
"for file in {}/*; do cat $file >> {}/$(basename $file); done && ".format(output[0].dirname, clonedir.path) +
"cd {} && ".format(clonedir.path) +
"git add * && git commit -a -m '\''Commit msg'\'' && git push || true",
)
return [DefaultInfo(files = depset([clonedir]))]
github_wiki = rule(
implementation = _github_wiki_impl,
attrs = {
"srcs": attr.label_list(
allow_files = True,
mandatory = True,
doc = "The file whose are to be published",
),
"clone_url": attr.string(
mandatory = True,
doc = "Git url to clone",
),
"dest": attr.string(
default = "wiki/",
doc = "Destination dir to ship",
),
"deps": attr.label_list(
default = [],
),
"git_name": attr.string(
default = "Wiki",
),
"git_email": attr.string(
default = "root@localhost",
),
},
outputs = {"clonedir": "%{name}-wiki"},
) | github_wiki.bzl |
def _github_wiki_impl(ctx):
clonedir = ctx.outputs.clonedir
output = []
for f in ctx.files.srcs:
file = ctx.actions.declare_file(ctx.attr.dest + f.path.replace("/", "-").replace(".log", ".md"))
ctx.actions.run_shell(
inputs = [f],
outputs = [file],
progress_message = "Generating files of %s" % file.short_path,
command = "echo '```' > {} && ".format(file.path) +
"cat {} >> {} && ".format(f.path, file.path) +
"echo '```' >> {} ".format(file.path),
execution_requirements = {
"no-sandbox": "1",
"no-cache": "1",
"no-remote": "1",
"local": "1",
},
)
output.append(file)
if len(output) > 0:
ctx.actions.run_shell(
inputs = output,
outputs = [clonedir],
progress_message = "Commiting the changes on %s" % clonedir.short_path,
command = "export HOME=\"$PWD\" && git config --global user.email \"{}\" && git config --global user.name \"{}\" &&".format(ctx.attr.git_email, ctx.attr.git_name) +
"git clone {} {} && ".format(ctx.attr.clone_url, clonedir.path) +
"for file in {}/*; do cat $file >> {}/$(basename $file); done && ".format(output[0].dirname, clonedir.path) +
"cd {} && ".format(clonedir.path) +
"git add * && git commit -a -m '\''Commit msg'\'' && git push || true",
)
return [DefaultInfo(files = depset([clonedir]))]
github_wiki = rule(
implementation = _github_wiki_impl,
attrs = {
"srcs": attr.label_list(
allow_files = True,
mandatory = True,
doc = "The file whose are to be published",
),
"clone_url": attr.string(
mandatory = True,
doc = "Git url to clone",
),
"dest": attr.string(
default = "wiki/",
doc = "Destination dir to ship",
),
"deps": attr.label_list(
default = [],
),
"git_name": attr.string(
default = "Wiki",
),
"git_email": attr.string(
default = "root@localhost",
),
},
outputs = {"clonedir": "%{name}-wiki"},
) | 0.385837 | 0.296973 |
import os
from nanome.util import Logs, config
from nanome.util.enums import StreamDirection, PluginListButtonType
from nanome._internal import _PluginInstance
from nanome._internal._process import _Bonding, _Dssp
from nanome._internal._network._commands._callbacks import _Messages
from nanome.api.integration import Integration
from nanome.api.ui import Menu
from nanome.api.streams import Stream
from nanome.api import Room, Files
class PluginInstance(_PluginInstance):
"""
| Base class of any plugin.
| Constructor should never be called by the user as it is network-instantiated when a session connects.
| Start, update, and all methods starting by "on" can be overridden by user, in order to get requests results
"""
_instance = None
is_async = False
def __init__(self):
# important: do not delete and leave empty to prevent double init.
pass
def __pseudo_init__(self):
self.__menu = Menu() # deprecated
self.room = Room()
self.integration = Integration()
self.files = Files(self)
self.__set_first = False
self.PluginListButtonType = PluginListButtonType
PluginInstance._instance = self
def __new__(cls):
n = super(PluginInstance, cls).__new__(cls)
n.__pseudo_init__()
return n
def start(self):
"""
| Called when user "Activates" the plugin
"""
pass
def update(self):
"""
| Called when instance updates (multiple times per second)
"""
pass
def on_run(self):
"""
| Called when user presses "Run"
"""
Logs.warning('Callback on_run not defined. Ignoring')
def on_stop(self):
"""
| Called when user disconnects or plugin crashes
"""
pass
def on_advanced_settings(self):
"""
| Called when user presses "Advanced Settings"
"""
Logs.warning('Callback on_advanced_settings not defined. Ignoring')
def on_complex_added(self):
"""
| Called whenever a complex is added to the workspace.
"""
pass
def on_complex_removed(self):
"""
| Called whenever a complex is removed from the workspace.
"""
pass
def on_presenter_change(self):
"""
| Called when room's presenter changes.
"""
pass
def request_workspace(self, callback=None):
"""
| Request the entire workspace, in deep mode
callback: Callable[[Workspace], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.workspace_request, None, expects_response)
return self._save_callback(id, callback)
def request_complex_list(self, callback=None):
"""
| Request the list of all complexes in the workspace, in shallow mode
kwarg callback: Callable[[List[Complex]], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.complex_list_request, None, expects_response)
return self._save_callback(id, callback)
def request_complexes(self, id_list, callback=None):
"""
| Requests a list of complexes by their indices
| Complexes returned contains the full structure (atom/bond/residue/chain/molecule)
:param id_list: List of indices
:type id_list: list of :class:`int`
:callback: Callable[[List[Complex]], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.complexes_request, id_list, expects_response)
return self._save_callback(id, callback)
def update_workspace(self, workspace):
"""
| Replace the current workspace in the scene by the workspace in parameter
:param workspace: New workspace
:type workspace: :class:`~nanome.structure.Workspace`
"""
self._network._send(_Messages.workspace_update, workspace, False)
def send_notification(self, type, message):
"""
| Send a notification to the user
:param type: Type of notification to send.
:type workspace: :class:`~nanome.util.enums.NotificationTypes`
:param message: Text to display to the user.
:type message: str
"""
# avoids unnecessary dependencies.
# needs to match the command serializer.
args = (type, message)
self._network._send(_Messages.notification_send, args, False)
def update_structures_deep(self, structures, callback=None):
"""
| Update the specific molecular structures in the scene to match the structures in parameter.
| Will also update descendent structures and can be used to remove descendent structures.
:param structures: List of molecular structures to update.
:type structures: list of :class:`~nanome.structure.Base`
callback: Callable[[], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.structures_deep_update, structures, expects_response)
return self._save_callback(id, callback)
def update_structures_shallow(self, structures):
"""
| Update the specific molecular structures in the scene to match the structures in parameter
| Only updates the structure's data, will not update children or other descendents.
:param structures: List of molecular structures to update.
:type structures: list of :class:`~nanome.structure.Base`
"""
self._network._send(_Messages.structures_shallow_update, structures, False)
def zoom_on_structures(self, structures, callback=None):
"""
| Repositions and resizes the workspace such that the provided structure(s) will be in the
| center of the users view.
:param structures: Molecular structure(s) to update.
:type structures: list of :class:`~nanome.structure.Base`
:kwarg callback: Callable[[], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.structures_zoom, structures, expects_response)
return self._save_callback(id, callback)
def center_on_structures(self, structures, callback=None):
"""
| Repositions the workspace such that the provided structure(s) will be in the
| center of the world.
:param structures: Molecular structure(s) to update.
:type structures: list of :class:`~nanome.structure.Base`
:kwarg callback: Callable[[], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.structures_center, structures, expects_response)
return self._save_callback(id, callback)
def add_to_workspace(self, complex_list, callback=None):
"""
| Add a list of complexes to the current workspace
:param complex_list: List of Complexes to add
:type complex_list: list of :class:`~nanome.structure.Complex`
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.add_to_workspace, complex_list, expects_response)
return self._save_callback(id, callback)
def update_menu(self, menu):
"""
| Update the menu in Nanome
:param menu: Menu to update
:type menu: :class:`~nanome.ui.Menu`
"""
self._menus[menu.index] = menu
self._network._send(_Messages.menu_update, menu, False)
def update_content(self, *content):
"""
| Update specific UI elements (button, slider, list...)
:param content: UI elements to update
:type content: :class:`~nanome.ui.UIBase`
or multiple :class:`~nanome.ui.UIBase`
or a list of :class:`~nanome.ui.UIBase`
"""
if len(content) == 1 and isinstance(content[0], list):
content = content[0]
self._network._send(_Messages.content_update, content, False)
def update_node(self, *nodes):
"""
| Updates layout nodes and their children
:param nodes: Layout nodes to update
:type nodes: :class:`~nanome.ui.LayoutNode`
or multiple :class:`~nanome.ui.LayoutNode`
or a list of :class:`~nanome.ui.LayoutNode`
"""
if len(nodes) == 1 and isinstance(nodes[0], list):
nodes = nodes[0]
self._network._send(_Messages.node_update, nodes, False)
def set_menu_transform(self, index, position, rotation, scale):
"""
| Update the position, scale, and rotation of the menu
:param index: Index of the menu you wish to update
:type index: int
:param position: New position of the menu
:type position: :class:`~nanome.util.vector3`
:param rotation: New rotation of the menu
:type rotation: :class:`~nanome.util.quaternion`
:param scale: New scale of the menu
:type scale: :class:`~nanome.util.vector3`
"""
self._network._send(_Messages.menu_transform_set,
(index, position, rotation, scale), False)
def request_menu_transform(self, index, callback=None):
"""
| Requests spatial information of the plugin menu (position, rotation, scale)
:param index: Index of the menu you wish to read
:type index: int
callback: Callable[[Vector3, Quaternion, Vector3], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.menu_transform_request, index, expects_response)
return self._save_callback(id, callback)
def save_files(self, file_list, callback=None):
"""
| Save files on the machine running Nanome, and returns result
:param file_list: List of files to save with their content
:type file_list: list of :class:`~nanome.util.file.FileSaveData`
:kwarg callable: Callable[[List[FileSaveData]], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.file_save, file_list, expects_response)
return self._save_callback(id, callback)
def create_writing_stream(self, indices_list, stream_type, callback=None):
"""
| Create a stream allowing the plugin to continuously update properties of many objects
:param indices_list: List of indices of all objects that should be in the stream
:type indices_list: list of :class:`int`
:param stream_type: Type of stream to create
:type stream_type: list of :class:`~nanome.streams.Stream.Type`
:param callback: Callable[[Stream, StreamCreationError], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.stream_create, (stream_type, indices_list, StreamDirection.writing), expects_response)
return self._save_callback(id, callback)
def create_reading_stream(self, indices_list, stream_type, callback=None):
"""
| Create a stream allowing the plugin to continuously receive properties of many objects
:param indices_list: List of indices of all objects that should be in the stream
:type indices_list: list of :class:`int`
:param stream_type: Type of stream to create
:type stream_type: list of :class:`~nanome.streams.Stream.Type`
:param callable: Callable[[Stream, StreamCreationError], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.stream_create, (stream_type, indices_list, StreamDirection.reading), expects_response)
return self._save_callback(id, callback)
def add_bonds(self, complex_list, callback=None, fast_mode=None):
"""
| Calculate bonds
| Requires openbabel to be installed
:param complex_list: List of complexes to add bonds to
:type complex_list: list of :class:`~nanome.structure.Complex`
:param callback: Callable[[List[Complex]], None]
"""
bonding = _Bonding(self, complex_list, callback, fast_mode)
return bonding._start()
def add_dssp(self, complex_list, callback=None):
"""
| Use DSSP to calculate secondary structures
:param complex_list: List of complexes to add ribbons to
:type complex_list: list of :class:`~nanome.structure.Complex`
:param callback: Callable[[List[Complex]], None]
"""
dssp = _Dssp(self, complex_list, callback)
return dssp._start()
def add_volume(self, complex, volume, properties, complex_to_align_index=-1, callback=None):
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.add_volume, (complex, complex_to_align_index, volume, properties), expects_response)
return self._save_callback(id, callback)
def open_url(self, url, desktop_browser=False):
"""
| Opens a URL alongside the Nanome session in the default web browser.
:param url: url to open
:type url: str
"""
url = url.strip()
if '://' not in url:
url = 'http://' + url
self._network._send(_Messages.open_url, (url, desktop_browser), False)
def request_presenter_info(self, callback=None):
"""
| Requests presenter account info (unique ID, name, email)
callback: Callable[[PresenterInfo], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.presenter_info_request, None, expects_response)
return self._save_callback(id, callback)
def request_controller_transforms(self, callback=None):
"""
| Requests presenter controller info (head position, head rotation, left controller position, left controller rotation, right controller position, right controller rotation)
param callback: Callable[[Vector3, Quaternion, Vector3, Quaternion, Vector3, Quaternion], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.controller_transforms_request, None, expects_response)
return self._save_callback(id, callback)
def set_plugin_list_button(self, button, text=None, usable=None):
"""
| Set text and/or usable state of the buttons on the plugin connection menu in Nanome
:param button: Button to set
:type button: :class:`~ButtonType`
:param text: Text displayed on the button. If None, doesn't set text
:type text: str
:param usable: Set button to be usable or not. If None, doesn't set usable text
:type usable: bool
"""
if button == PluginListButtonType.run:
current_text = [self._run_text]
current_usable = [self._run_usable]
else:
current_text = [self._advanced_settings_text]
current_usable = [self._advanced_settings_usable]
if text is None:
text = current_text[0]
else:
current_text[0] = text
if usable is None:
usable = current_usable[0]
else:
current_usable[0] = usable
self._network._send(_Messages.plugin_list_button_set, (button, text, usable), False)
def send_files_to_load(self, files_list, callback=None):
"""
| Send file(s) to Nanome to load directly using Nanome's importers.
| Can send just a list of paths, or a list of tuples containing (path, name)
:param files_list: List of files to load
:type files_list: list of or unique object of type :class:`str` or (:class:`str`, :class:`str`)
"""
files = []
if not isinstance(files_list, list):
files_list = [files_list]
for file in files_list:
if isinstance(file, tuple):
full_path, file_name = file
file_name += '.' + full_path.split('.')[-1]
else:
full_path = file.replace('\\', '/')
file_name = full_path.split('/')[-1]
with open(full_path, 'rb') as content_file:
data = content_file.read()
files.append((file_name, data))
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.load_file, (files, True, True), expects_response)
return self._save_callback(id, callback)
def request_export(self, format, callback=None, entities=None):
"""
Request a file export using Nanome exporters
Can request either molecule or workspace export, for entities in Nanome workspace
or directly sent by the plugin (without begin uploaded to workspace)
:param format: File format to export
:type format: :class:`~nanome.util.enums.ExportFormats`
:param entities: Entities to export (complexes to send, or indices if referencing complexes in workspace, or a workspace, or nothing if exporting Nanome workspace)
:type entities: list of or unique object of type :class:`~nanome.structure.Workspace` or :class:`~nanome.structure.Complex`, or None, or list of or unique :class:`int`
:kwarg callback: Callable[[Union[str, bytes]], None]
"""
if entities is not None and not isinstance(entities, list):
entities = [entities]
id = self._network._send(_Messages.export_files, (format, entities), True)
return self._save_callback(id, callback)
def apply_color_scheme(self, color_scheme, target, only_carbons):
"""
Applies a color scheme to selected atoms.
:param color_scheme: the color scheme to use on atoms
:type color_scheme: :class:`~nanome.util.enums.ColorScheme`
:param target: whether you want to color the atom, the surface, or the ribbon
:type target: :class:`~nanome.util.enums.ColorSchemeTarget`
:param only_carbons: whether you want to only color carbons, or all atoms.
:type only_carbons: bool
"""
self._network._send(_Messages.apply_color_scheme, (color_scheme, target, only_carbons), False)
@property
def plugin_files_path(self):
path = os.path.expanduser(config.fetch('plugin_files_path'))
if not os.path.exists(path):
os.makedirs(path)
return path
@property
def custom_data(self):
"""
Get custom data set with Plugin.set_custom_data
:type: tuple of objects or None if no data has been set
"""
return self._custom_data
@property
def menu(self):
if not self.__set_first:
self.__set_first = True
Logs.warning("The default menu (self.menu) is now deprecated and will be removed in a future version. Please use the ui.Menu() constructor to create the menu.")
return self.__menu
@menu.setter
def menu(self, value):
self.__set_first = True
self.__menu = value
@Logs.deprecated("create_writing_stream")
def create_stream(self, atom_indices_list, callback):
id = self._network._send(_Messages.stream_create, (Stream.Type.position, atom_indices_list, StreamDirection.writing), callback is not None)
self._save_callback(id, callback)
@Logs.deprecated("create_writing_stream")
def create_atom_stream(self, atom_indices_list, stream_type, callback):
self.create_writing_stream(atom_indices_list, stream_type, callback)
class AsyncPluginInstance(PluginInstance):
"""
| Base class of any asynchronous plugin.
| Constructor should never be called by the user as it is network-instantiated when a session connects.
| All methods available to PluginInstance are available to AsyncPluginInstance.
| Decorating these methods with @async_callback will allow them to use the async keyword in their definition
"""
is_async = True
class _DefaultPlugin(PluginInstance):
def __init__(self):
pass | nanome/api/plugin_instance.py | import os
from nanome.util import Logs, config
from nanome.util.enums import StreamDirection, PluginListButtonType
from nanome._internal import _PluginInstance
from nanome._internal._process import _Bonding, _Dssp
from nanome._internal._network._commands._callbacks import _Messages
from nanome.api.integration import Integration
from nanome.api.ui import Menu
from nanome.api.streams import Stream
from nanome.api import Room, Files
class PluginInstance(_PluginInstance):
"""
| Base class of any plugin.
| Constructor should never be called by the user as it is network-instantiated when a session connects.
| Start, update, and all methods starting by "on" can be overridden by user, in order to get requests results
"""
_instance = None
is_async = False
def __init__(self):
# important: do not delete and leave empty to prevent double init.
pass
def __pseudo_init__(self):
self.__menu = Menu() # deprecated
self.room = Room()
self.integration = Integration()
self.files = Files(self)
self.__set_first = False
self.PluginListButtonType = PluginListButtonType
PluginInstance._instance = self
def __new__(cls):
n = super(PluginInstance, cls).__new__(cls)
n.__pseudo_init__()
return n
def start(self):
"""
| Called when user "Activates" the plugin
"""
pass
def update(self):
"""
| Called when instance updates (multiple times per second)
"""
pass
def on_run(self):
"""
| Called when user presses "Run"
"""
Logs.warning('Callback on_run not defined. Ignoring')
def on_stop(self):
"""
| Called when user disconnects or plugin crashes
"""
pass
def on_advanced_settings(self):
"""
| Called when user presses "Advanced Settings"
"""
Logs.warning('Callback on_advanced_settings not defined. Ignoring')
def on_complex_added(self):
"""
| Called whenever a complex is added to the workspace.
"""
pass
def on_complex_removed(self):
"""
| Called whenever a complex is removed from the workspace.
"""
pass
def on_presenter_change(self):
"""
| Called when room's presenter changes.
"""
pass
def request_workspace(self, callback=None):
"""
| Request the entire workspace, in deep mode
callback: Callable[[Workspace], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.workspace_request, None, expects_response)
return self._save_callback(id, callback)
def request_complex_list(self, callback=None):
"""
| Request the list of all complexes in the workspace, in shallow mode
kwarg callback: Callable[[List[Complex]], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.complex_list_request, None, expects_response)
return self._save_callback(id, callback)
def request_complexes(self, id_list, callback=None):
"""
| Requests a list of complexes by their indices
| Complexes returned contains the full structure (atom/bond/residue/chain/molecule)
:param id_list: List of indices
:type id_list: list of :class:`int`
:callback: Callable[[List[Complex]], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.complexes_request, id_list, expects_response)
return self._save_callback(id, callback)
def update_workspace(self, workspace):
"""
| Replace the current workspace in the scene by the workspace in parameter
:param workspace: New workspace
:type workspace: :class:`~nanome.structure.Workspace`
"""
self._network._send(_Messages.workspace_update, workspace, False)
def send_notification(self, type, message):
"""
| Send a notification to the user
:param type: Type of notification to send.
:type workspace: :class:`~nanome.util.enums.NotificationTypes`
:param message: Text to display to the user.
:type message: str
"""
# avoids unnecessary dependencies.
# needs to match the command serializer.
args = (type, message)
self._network._send(_Messages.notification_send, args, False)
def update_structures_deep(self, structures, callback=None):
"""
| Update the specific molecular structures in the scene to match the structures in parameter.
| Will also update descendent structures and can be used to remove descendent structures.
:param structures: List of molecular structures to update.
:type structures: list of :class:`~nanome.structure.Base`
callback: Callable[[], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.structures_deep_update, structures, expects_response)
return self._save_callback(id, callback)
def update_structures_shallow(self, structures):
"""
| Update the specific molecular structures in the scene to match the structures in parameter
| Only updates the structure's data, will not update children or other descendents.
:param structures: List of molecular structures to update.
:type structures: list of :class:`~nanome.structure.Base`
"""
self._network._send(_Messages.structures_shallow_update, structures, False)
def zoom_on_structures(self, structures, callback=None):
"""
| Repositions and resizes the workspace such that the provided structure(s) will be in the
| center of the users view.
:param structures: Molecular structure(s) to update.
:type structures: list of :class:`~nanome.structure.Base`
:kwarg callback: Callable[[], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.structures_zoom, structures, expects_response)
return self._save_callback(id, callback)
def center_on_structures(self, structures, callback=None):
"""
| Repositions the workspace such that the provided structure(s) will be in the
| center of the world.
:param structures: Molecular structure(s) to update.
:type structures: list of :class:`~nanome.structure.Base`
:kwarg callback: Callable[[], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.structures_center, structures, expects_response)
return self._save_callback(id, callback)
def add_to_workspace(self, complex_list, callback=None):
"""
| Add a list of complexes to the current workspace
:param complex_list: List of Complexes to add
:type complex_list: list of :class:`~nanome.structure.Complex`
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.add_to_workspace, complex_list, expects_response)
return self._save_callback(id, callback)
def update_menu(self, menu):
"""
| Update the menu in Nanome
:param menu: Menu to update
:type menu: :class:`~nanome.ui.Menu`
"""
self._menus[menu.index] = menu
self._network._send(_Messages.menu_update, menu, False)
def update_content(self, *content):
"""
| Update specific UI elements (button, slider, list...)
:param content: UI elements to update
:type content: :class:`~nanome.ui.UIBase`
or multiple :class:`~nanome.ui.UIBase`
or a list of :class:`~nanome.ui.UIBase`
"""
if len(content) == 1 and isinstance(content[0], list):
content = content[0]
self._network._send(_Messages.content_update, content, False)
def update_node(self, *nodes):
"""
| Updates layout nodes and their children
:param nodes: Layout nodes to update
:type nodes: :class:`~nanome.ui.LayoutNode`
or multiple :class:`~nanome.ui.LayoutNode`
or a list of :class:`~nanome.ui.LayoutNode`
"""
if len(nodes) == 1 and isinstance(nodes[0], list):
nodes = nodes[0]
self._network._send(_Messages.node_update, nodes, False)
def set_menu_transform(self, index, position, rotation, scale):
"""
| Update the position, scale, and rotation of the menu
:param index: Index of the menu you wish to update
:type index: int
:param position: New position of the menu
:type position: :class:`~nanome.util.vector3`
:param rotation: New rotation of the menu
:type rotation: :class:`~nanome.util.quaternion`
:param scale: New scale of the menu
:type scale: :class:`~nanome.util.vector3`
"""
self._network._send(_Messages.menu_transform_set,
(index, position, rotation, scale), False)
def request_menu_transform(self, index, callback=None):
"""
| Requests spatial information of the plugin menu (position, rotation, scale)
:param index: Index of the menu you wish to read
:type index: int
callback: Callable[[Vector3, Quaternion, Vector3], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.menu_transform_request, index, expects_response)
return self._save_callback(id, callback)
def save_files(self, file_list, callback=None):
"""
| Save files on the machine running Nanome, and returns result
:param file_list: List of files to save with their content
:type file_list: list of :class:`~nanome.util.file.FileSaveData`
:kwarg callable: Callable[[List[FileSaveData]], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.file_save, file_list, expects_response)
return self._save_callback(id, callback)
def create_writing_stream(self, indices_list, stream_type, callback=None):
"""
| Create a stream allowing the plugin to continuously update properties of many objects
:param indices_list: List of indices of all objects that should be in the stream
:type indices_list: list of :class:`int`
:param stream_type: Type of stream to create
:type stream_type: list of :class:`~nanome.streams.Stream.Type`
:param callback: Callable[[Stream, StreamCreationError], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.stream_create, (stream_type, indices_list, StreamDirection.writing), expects_response)
return self._save_callback(id, callback)
def create_reading_stream(self, indices_list, stream_type, callback=None):
"""
| Create a stream allowing the plugin to continuously receive properties of many objects
:param indices_list: List of indices of all objects that should be in the stream
:type indices_list: list of :class:`int`
:param stream_type: Type of stream to create
:type stream_type: list of :class:`~nanome.streams.Stream.Type`
:param callable: Callable[[Stream, StreamCreationError], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.stream_create, (stream_type, indices_list, StreamDirection.reading), expects_response)
return self._save_callback(id, callback)
def add_bonds(self, complex_list, callback=None, fast_mode=None):
"""
| Calculate bonds
| Requires openbabel to be installed
:param complex_list: List of complexes to add bonds to
:type complex_list: list of :class:`~nanome.structure.Complex`
:param callback: Callable[[List[Complex]], None]
"""
bonding = _Bonding(self, complex_list, callback, fast_mode)
return bonding._start()
def add_dssp(self, complex_list, callback=None):
"""
| Use DSSP to calculate secondary structures
:param complex_list: List of complexes to add ribbons to
:type complex_list: list of :class:`~nanome.structure.Complex`
:param callback: Callable[[List[Complex]], None]
"""
dssp = _Dssp(self, complex_list, callback)
return dssp._start()
def add_volume(self, complex, volume, properties, complex_to_align_index=-1, callback=None):
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.add_volume, (complex, complex_to_align_index, volume, properties), expects_response)
return self._save_callback(id, callback)
def open_url(self, url, desktop_browser=False):
"""
| Opens a URL alongside the Nanome session in the default web browser.
:param url: url to open
:type url: str
"""
url = url.strip()
if '://' not in url:
url = 'http://' + url
self._network._send(_Messages.open_url, (url, desktop_browser), False)
def request_presenter_info(self, callback=None):
"""
| Requests presenter account info (unique ID, name, email)
callback: Callable[[PresenterInfo], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.presenter_info_request, None, expects_response)
return self._save_callback(id, callback)
def request_controller_transforms(self, callback=None):
"""
| Requests presenter controller info (head position, head rotation, left controller position, left controller rotation, right controller position, right controller rotation)
param callback: Callable[[Vector3, Quaternion, Vector3, Quaternion, Vector3, Quaternion], None]
"""
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.controller_transforms_request, None, expects_response)
return self._save_callback(id, callback)
def set_plugin_list_button(self, button, text=None, usable=None):
"""
| Set text and/or usable state of the buttons on the plugin connection menu in Nanome
:param button: Button to set
:type button: :class:`~ButtonType`
:param text: Text displayed on the button. If None, doesn't set text
:type text: str
:param usable: Set button to be usable or not. If None, doesn't set usable text
:type usable: bool
"""
if button == PluginListButtonType.run:
current_text = [self._run_text]
current_usable = [self._run_usable]
else:
current_text = [self._advanced_settings_text]
current_usable = [self._advanced_settings_usable]
if text is None:
text = current_text[0]
else:
current_text[0] = text
if usable is None:
usable = current_usable[0]
else:
current_usable[0] = usable
self._network._send(_Messages.plugin_list_button_set, (button, text, usable), False)
def send_files_to_load(self, files_list, callback=None):
"""
| Send file(s) to Nanome to load directly using Nanome's importers.
| Can send just a list of paths, or a list of tuples containing (path, name)
:param files_list: List of files to load
:type files_list: list of or unique object of type :class:`str` or (:class:`str`, :class:`str`)
"""
files = []
if not isinstance(files_list, list):
files_list = [files_list]
for file in files_list:
if isinstance(file, tuple):
full_path, file_name = file
file_name += '.' + full_path.split('.')[-1]
else:
full_path = file.replace('\\', '/')
file_name = full_path.split('/')[-1]
with open(full_path, 'rb') as content_file:
data = content_file.read()
files.append((file_name, data))
expects_response = callback is not None or self.is_async
id = self._network._send(_Messages.load_file, (files, True, True), expects_response)
return self._save_callback(id, callback)
def request_export(self, format, callback=None, entities=None):
"""
Request a file export using Nanome exporters
Can request either molecule or workspace export, for entities in Nanome workspace
or directly sent by the plugin (without begin uploaded to workspace)
:param format: File format to export
:type format: :class:`~nanome.util.enums.ExportFormats`
:param entities: Entities to export (complexes to send, or indices if referencing complexes in workspace, or a workspace, or nothing if exporting Nanome workspace)
:type entities: list of or unique object of type :class:`~nanome.structure.Workspace` or :class:`~nanome.structure.Complex`, or None, or list of or unique :class:`int`
:kwarg callback: Callable[[Union[str, bytes]], None]
"""
if entities is not None and not isinstance(entities, list):
entities = [entities]
id = self._network._send(_Messages.export_files, (format, entities), True)
return self._save_callback(id, callback)
def apply_color_scheme(self, color_scheme, target, only_carbons):
"""
Applies a color scheme to selected atoms.
:param color_scheme: the color scheme to use on atoms
:type color_scheme: :class:`~nanome.util.enums.ColorScheme`
:param target: whether you want to color the atom, the surface, or the ribbon
:type target: :class:`~nanome.util.enums.ColorSchemeTarget`
:param only_carbons: whether you want to only color carbons, or all atoms.
:type only_carbons: bool
"""
self._network._send(_Messages.apply_color_scheme, (color_scheme, target, only_carbons), False)
@property
def plugin_files_path(self):
path = os.path.expanduser(config.fetch('plugin_files_path'))
if not os.path.exists(path):
os.makedirs(path)
return path
@property
def custom_data(self):
"""
Get custom data set with Plugin.set_custom_data
:type: tuple of objects or None if no data has been set
"""
return self._custom_data
@property
def menu(self):
if not self.__set_first:
self.__set_first = True
Logs.warning("The default menu (self.menu) is now deprecated and will be removed in a future version. Please use the ui.Menu() constructor to create the menu.")
return self.__menu
@menu.setter
def menu(self, value):
self.__set_first = True
self.__menu = value
@Logs.deprecated("create_writing_stream")
def create_stream(self, atom_indices_list, callback):
id = self._network._send(_Messages.stream_create, (Stream.Type.position, atom_indices_list, StreamDirection.writing), callback is not None)
self._save_callback(id, callback)
@Logs.deprecated("create_writing_stream")
def create_atom_stream(self, atom_indices_list, stream_type, callback):
self.create_writing_stream(atom_indices_list, stream_type, callback)
class AsyncPluginInstance(PluginInstance):
"""
| Base class of any asynchronous plugin.
| Constructor should never be called by the user as it is network-instantiated when a session connects.
| All methods available to PluginInstance are available to AsyncPluginInstance.
| Decorating these methods with @async_callback will allow them to use the async keyword in their definition
"""
is_async = True
class _DefaultPlugin(PluginInstance):
def __init__(self):
pass | 0.680985 | 0.250134 |
from ...tl.tlobject import TLObject
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
if TYPE_CHECKING:
from ...tl.types import TypeBool, TypeChannelBannedRights, TypeInputUser, TypeInputChannel, TypeChannelAdminRights, TypeInputChatPhoto, TypeChannelParticipantsFilter, TypeChannelAdminLogEventsFilter, TypeInputStickerSet
pass
class CheckUsernameRequest(TLObject):
CONSTRUCTOR_ID = 0x10e6bd2c
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, username):
"""
:param InputChannel channel:
:param str username:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.username = username # type: str
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'CheckUsernameRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'username': self.username
}
def __bytes__(self):
return b''.join((
b',\xbd\xe6\x10',
bytes(self.channel),
TLObject.serialize_bytes(self.username),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_username = reader.tgread_string()
return CheckUsernameRequest(channel=_channel, username=_username)
class CreateChannelRequest(TLObject):
CONSTRUCTOR_ID = 0xf4893d7f
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, title, about, broadcast=None, megagroup=None):
"""
:param bool | None broadcast:
:param bool | None megagroup:
:param str title:
:param str about:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.broadcast = broadcast # type: Optional[bool]
self.megagroup = megagroup # type: Optional[bool]
self.title = title # type: str
self.about = about # type: str
def to_dict(self):
return {
'_': 'CreateChannelRequest',
'broadcast': self.broadcast,
'megagroup': self.megagroup,
'title': self.title,
'about': self.about
}
def __bytes__(self):
return b''.join((
b'\x7f=\x89\xf4',
struct.pack('<I', (0 if self.broadcast is None or self.broadcast is False else 1) | (0 if self.megagroup is None or self.megagroup is False else 2)),
TLObject.serialize_bytes(self.title),
TLObject.serialize_bytes(self.about),
))
@staticmethod
def from_reader(reader):
flags = reader.read_int()
_broadcast = bool(flags & 1)
_megagroup = bool(flags & 2)
_title = reader.tgread_string()
_about = reader.tgread_string()
return CreateChannelRequest(title=_title, about=_about, broadcast=_broadcast, megagroup=_megagroup)
class DeleteChannelRequest(TLObject):
CONSTRUCTOR_ID = 0xc0111fe3
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel):
"""
:param InputChannel channel:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xe3\x1f\x11\xc0',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return DeleteChannelRequest(channel=_channel)
class DeleteHistoryRequest(TLObject):
CONSTRUCTOR_ID = 0xaf369d42
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, max_id):
"""
:param InputChannel channel:
:param int max_id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.max_id = max_id # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteHistoryRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'max_id': self.max_id
}
def __bytes__(self):
return b''.join((
b'B\x9d6\xaf',
bytes(self.channel),
struct.pack('<i', self.max_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_max_id = reader.read_int()
return DeleteHistoryRequest(channel=_channel, max_id=_max_id)
class DeleteMessagesRequest(TLObject):
CONSTRUCTOR_ID = 0x84c1fd4e
SUBCLASS_OF_ID = 0xced3c06e
def __init__(self, channel, id):
"""
:param InputChannel channel:
:param list[int] id:
:returns messages.AffectedMessages: Instance of AffectedMessages.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteMessagesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'N\xfd\xc1\x84',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return DeleteMessagesRequest(channel=_channel, id=_id)
class DeleteUserHistoryRequest(TLObject):
CONSTRUCTOR_ID = 0xd10dd71b
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, channel, user_id):
"""
:param InputChannel channel:
:param InputUser user_id:
:returns messages.AffectedHistory: Instance of AffectedHistory.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'DeleteUserHistoryRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x1b\xd7\r\xd1',
bytes(self.channel),
bytes(self.user_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
return DeleteUserHistoryRequest(channel=_channel, user_id=_user_id)
class EditAboutRequest(TLObject):
CONSTRUCTOR_ID = 0x13e27f1e
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, about):
"""
:param InputChannel channel:
:param str about:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.about = about # type: str
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditAboutRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'about': self.about
}
def __bytes__(self):
return b''.join((
b'\x1e\x7f\xe2\x13',
bytes(self.channel),
TLObject.serialize_bytes(self.about),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_about = reader.tgread_string()
return EditAboutRequest(channel=_channel, about=_about)
class EditAdminRequest(TLObject):
CONSTRUCTOR_ID = 0x20b88214
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, user_id, admin_rights):
"""
:param InputChannel channel:
:param InputUser user_id:
:param ChannelAdminRights admin_rights:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
self.admin_rights = admin_rights # type: TypeChannelAdminRights
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'EditAdminRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'admin_rights': None if self.admin_rights is None else self.admin_rights.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x14\x82\xb8 ',
bytes(self.channel),
bytes(self.user_id),
bytes(self.admin_rights),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
_admin_rights = reader.tgread_object()
return EditAdminRequest(channel=_channel, user_id=_user_id, admin_rights=_admin_rights)
class EditBannedRequest(TLObject):
CONSTRUCTOR_ID = 0xbfd915cd
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, user_id, banned_rights):
"""
:param InputChannel channel:
:param InputUser user_id:
:param ChannelBannedRights banned_rights:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
self.banned_rights = banned_rights # type: TypeChannelBannedRights
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'EditBannedRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'banned_rights': None if self.banned_rights is None else self.banned_rights.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xcd\x15\xd9\xbf',
bytes(self.channel),
bytes(self.user_id),
bytes(self.banned_rights),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
_banned_rights = reader.tgread_object()
return EditBannedRequest(channel=_channel, user_id=_user_id, banned_rights=_banned_rights)
class EditPhotoRequest(TLObject):
CONSTRUCTOR_ID = 0xf12e57c9
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, photo):
"""
:param InputChannel channel:
:param InputChatPhoto photo:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.photo = photo # type: TypeInputChatPhoto
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditPhotoRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'photo': None if self.photo is None else self.photo.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xc9W.\xf1',
bytes(self.channel),
bytes(self.photo),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_photo = reader.tgread_object()
return EditPhotoRequest(channel=_channel, photo=_photo)
class EditTitleRequest(TLObject):
CONSTRUCTOR_ID = 0x566decd0
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, title):
"""
:param InputChannel channel:
:param str title:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.title = title # type: str
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditTitleRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'title': self.title
}
def __bytes__(self):
return b''.join((
b'\xd0\xecmV',
bytes(self.channel),
TLObject.serialize_bytes(self.title),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_title = reader.tgread_string()
return EditTitleRequest(channel=_channel, title=_title)
class ExportInviteRequest(TLObject):
CONSTRUCTOR_ID = 0xc7560885
SUBCLASS_OF_ID = 0xb4748a58
def __init__(self, channel):
"""
:param InputChannel channel:
:returns ExportedChatInvite: Instance of either ChatInviteEmpty, ChatInviteExported.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ExportInviteRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x85\x08V\xc7',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return ExportInviteRequest(channel=_channel)
class ExportMessageLinkRequest(TLObject):
CONSTRUCTOR_ID = 0xceb77163
SUBCLASS_OF_ID = 0xdee644cc
def __init__(self, channel, id, grouped):
"""
:param InputChannel channel:
:param int id:
:param Bool grouped:
:returns ExportedMessageLink: Instance of ExportedMessageLink.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: int
self.grouped = grouped # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ExportMessageLinkRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': self.id,
'grouped': self.grouped
}
def __bytes__(self):
return b''.join((
b'cq\xb7\xce',
bytes(self.channel),
struct.pack('<i', self.id),
b'\xb5ur\x99' if self.grouped else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_id = reader.read_int()
_grouped = reader.tgread_bool()
return ExportMessageLinkRequest(channel=_channel, id=_id, grouped=_grouped)
class GetAdminLogRequest(TLObject):
CONSTRUCTOR_ID = 0x33ddf480
SUBCLASS_OF_ID = 0x51f076bc
def __init__(self, channel, q, max_id, min_id, limit, events_filter=None, admins=None):
"""
:param InputChannel channel:
:param str q:
:param ChannelAdminLogEventsFilter | None events_filter:
:param list[InputUser] | None admins:
:param int max_id:
:param int min_id:
:param int limit:
:returns channels.AdminLogResults: Instance of AdminLogResults.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.q = q # type: str
self.events_filter = events_filter # type: Optional[TypeChannelAdminLogEventsFilter]
self.admins = admins # type: Optional[List[TypeInputUser]]
self.max_id = max_id # type: int
self.min_id = min_id # type: int
self.limit = limit # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.admins = [utils.get_input_user(client.get_input_entity(_x)) for _x in self.admins] if self.admins else None
def to_dict(self):
return {
'_': 'GetAdminLogRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'q': self.q,
'events_filter': None if self.events_filter is None else self.events_filter.to_dict(),
'admins': [] if self.admins is None else [None if x is None else x.to_dict() for x in self.admins],
'max_id': self.max_id,
'min_id': self.min_id,
'limit': self.limit
}
def __bytes__(self):
return b''.join((
b'\x80\xf4\xdd3',
struct.pack('<I', (0 if self.events_filter is None or self.events_filter is False else 1) | (0 if self.admins is None or self.admins is False else 2)),
bytes(self.channel),
TLObject.serialize_bytes(self.q),
b'' if self.events_filter is None or self.events_filter is False else (bytes(self.events_filter)),
b'' if self.admins is None or self.admins is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.admins)),b''.join(bytes(x) for x in self.admins))),
struct.pack('<q', self.max_id),
struct.pack('<q', self.min_id),
struct.pack('<i', self.limit),
))
@staticmethod
def from_reader(reader):
flags = reader.read_int()
_channel = reader.tgread_object()
_q = reader.tgread_string()
if flags & 1:
_events_filter = reader.tgread_object()
else:
_events_filter = None
if flags & 2:
reader.read_int()
_admins = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_admins.append(_x)
else:
_admins = None
_max_id = reader.read_long()
_min_id = reader.read_long()
_limit = reader.read_int()
return GetAdminLogRequest(channel=_channel, q=_q, max_id=_max_id, min_id=_min_id, limit=_limit, events_filter=_events_filter, admins=_admins)
class GetAdminedPublicChannelsRequest(TLObject):
CONSTRUCTOR_ID = 0x8d8d82d7
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self):
super().__init__()
self.result = None
self.content_related = True
def to_dict(self):
return {
'_': 'GetAdminedPublicChannelsRequest'
}
def __bytes__(self):
return b''.join((
b'\xd7\x82\x8d\x8d',
))
@staticmethod
def from_reader(reader):
return GetAdminedPublicChannelsRequest()
class GetChannelsRequest(TLObject):
CONSTRUCTOR_ID = 0xa7f6bbb
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, id):
"""
:param list[InputChannel] id:
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
super().__init__()
self.result = None
self.content_related = True
self.id = id # type: List[TypeInputChannel]
def resolve(self, client, utils):
self.id = [utils.get_input_channel(client.get_input_entity(_x)) for _x in self.id]
def to_dict(self):
return {
'_': 'GetChannelsRequest',
'id': [] if self.id is None else [None if x is None else x.to_dict() for x in self.id]
}
def __bytes__(self):
return b''.join((
b'\xbbk\x7f\n',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(bytes(x) for x in self.id),
))
@staticmethod
def from_reader(reader):
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_id.append(_x)
return GetChannelsRequest(id=_id)
class GetFullChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x8736a09
SUBCLASS_OF_ID = 0x225a5109
def __init__(self, channel):
"""
:param InputChannel channel:
:returns messages.ChatFull: Instance of ChatFull.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetFullChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\tjs\x08',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return GetFullChannelRequest(channel=_channel)
class GetMessagesRequest(TLObject):
CONSTRUCTOR_ID = 0x93d7b347
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, channel, id):
"""
:param InputChannel channel:
:param list[int] id:
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetMessagesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'G\xb3\xd7\x93',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return GetMessagesRequest(channel=_channel, id=_id)
class GetParticipantRequest(TLObject):
CONSTRUCTOR_ID = 0x546dd7a6
SUBCLASS_OF_ID = 0x6658151a
def __init__(self, channel, user_id):
"""
:param InputChannel channel:
:param InputUser user_id:
:returns channels.ChannelParticipant: Instance of ChannelParticipant.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'GetParticipantRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xa6\xd7mT',
bytes(self.channel),
bytes(self.user_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
return GetParticipantRequest(channel=_channel, user_id=_user_id)
class GetParticipantsRequest(TLObject):
CONSTRUCTOR_ID = 0x123e05e9
SUBCLASS_OF_ID = 0xe60a6e64
def __init__(self, channel, filter, offset, limit, hash):
"""
:param InputChannel channel:
:param ChannelParticipantsFilter filter:
:param int offset:
:param int limit:
:param int hash:
:returns channels.ChannelParticipants: Instance of either ChannelParticipants, ChannelParticipantsNotModified.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.filter = filter # type: TypeChannelParticipantsFilter
self.offset = offset # type: int
self.limit = limit # type: int
self.hash = hash # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetParticipantsRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'filter': None if self.filter is None else self.filter.to_dict(),
'offset': self.offset,
'limit': self.limit,
'hash': self.hash
}
def __bytes__(self):
return b''.join((
b'\xe9\x05>\x12',
bytes(self.channel),
bytes(self.filter),
struct.pack('<i', self.offset),
struct.pack('<i', self.limit),
struct.pack('<i', self.hash),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_filter = reader.tgread_object()
_offset = reader.read_int()
_limit = reader.read_int()
_hash = reader.read_int()
return GetParticipantsRequest(channel=_channel, filter=_filter, offset=_offset, limit=_limit, hash=_hash)
class InviteToChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x199f3a6c
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, users):
"""
:param InputChannel channel:
:param list[InputUser] users:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.users = users # type: List[TypeInputUser]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.users = [utils.get_input_user(client.get_input_entity(_x)) for _x in self.users]
def to_dict(self):
return {
'_': 'InviteToChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'l:\x9f\x19',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return InviteToChannelRequest(channel=_channel, users=_users)
class JoinChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x24b524c5
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel):
"""
:param InputChannel channel:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'JoinChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xc5$\xb5$',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return JoinChannelRequest(channel=_channel)
class LeaveChannelRequest(TLObject):
CONSTRUCTOR_ID = 0xf836aa95
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel):
"""
:param InputChannel channel:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'LeaveChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x95\xaa6\xf8',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return LeaveChannelRequest(channel=_channel)
class ReadHistoryRequest(TLObject):
CONSTRUCTOR_ID = 0xcc104937
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, max_id):
"""
:param InputChannel channel:
:param int max_id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.max_id = max_id # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReadHistoryRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'max_id': self.max_id
}
def __bytes__(self):
return b''.join((
b'7I\x10\xcc',
bytes(self.channel),
struct.pack('<i', self.max_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_max_id = reader.read_int()
return ReadHistoryRequest(channel=_channel, max_id=_max_id)
class ReadMessageContentsRequest(TLObject):
CONSTRUCTOR_ID = 0xeab5dc38
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, id):
"""
:param InputChannel channel:
:param list[int] id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReadMessageContentsRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'8\xdc\xb5\xea',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return ReadMessageContentsRequest(channel=_channel, id=_id)
class ReportSpamRequest(TLObject):
CONSTRUCTOR_ID = 0xfe087810
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, user_id, id):
"""
:param InputChannel channel:
:param InputUser user_id:
:param list[int] id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'ReportSpamRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'\x10x\x08\xfe',
bytes(self.channel),
bytes(self.user_id),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return ReportSpamRequest(channel=_channel, user_id=_user_id, id=_id)
class SetStickersRequest(TLObject):
CONSTRUCTOR_ID = 0xea8ca4f9
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, stickerset):
"""
:param InputChannel channel:
:param InputStickerSet stickerset:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.stickerset = stickerset # type: TypeInputStickerSet
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'SetStickersRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'stickerset': None if self.stickerset is None else self.stickerset.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xf9\xa4\x8c\xea',
bytes(self.channel),
bytes(self.stickerset),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_stickerset = reader.tgread_object()
return SetStickersRequest(channel=_channel, stickerset=_stickerset)
class ToggleInvitesRequest(TLObject):
CONSTRUCTOR_ID = 0x49609307
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, enabled):
"""
:param InputChannel channel:
:param Bool enabled:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.enabled = enabled # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleInvitesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'enabled': self.enabled
}
def __bytes__(self):
return b''.join((
b'\x07\x93`I',
bytes(self.channel),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return ToggleInvitesRequest(channel=_channel, enabled=_enabled)
class TogglePreHistoryHiddenRequest(TLObject):
CONSTRUCTOR_ID = 0xeabbb94c
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, enabled):
"""
:param InputChannel channel:
:param Bool enabled:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.enabled = enabled # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'TogglePreHistoryHiddenRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'enabled': self.enabled
}
def __bytes__(self):
return b''.join((
b'L\xb9\xbb\xea',
bytes(self.channel),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return TogglePreHistoryHiddenRequest(channel=_channel, enabled=_enabled)
class ToggleSignaturesRequest(TLObject):
CONSTRUCTOR_ID = 0x1f69b606
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, enabled):
"""
:param InputChannel channel:
:param Bool enabled:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.enabled = enabled # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleSignaturesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'enabled': self.enabled
}
def __bytes__(self):
return b''.join((
b'\x06\xb6i\x1f',
bytes(self.channel),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return ToggleSignaturesRequest(channel=_channel, enabled=_enabled)
class UpdatePinnedMessageRequest(TLObject):
CONSTRUCTOR_ID = 0xa72ded52
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, id, silent=None):
"""
:param bool | None silent:
:param InputChannel channel:
:param int id:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.silent = silent # type: Optional[bool]
self.channel = channel # type: TypeInputChannel
self.id = id # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'UpdatePinnedMessageRequest',
'silent': self.silent,
'channel': None if self.channel is None else self.channel.to_dict(),
'id': self.id
}
def __bytes__(self):
return b''.join((
b'R\xed-\xa7',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 1)),
bytes(self.channel),
struct.pack('<i', self.id),
))
@staticmethod
def from_reader(reader):
flags = reader.read_int()
_silent = bool(flags & 1)
_channel = reader.tgread_object()
_id = reader.read_int()
return UpdatePinnedMessageRequest(channel=_channel, id=_id, silent=_silent)
class UpdateUsernameRequest(TLObject):
CONSTRUCTOR_ID = 0x3514b3de
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, username):
"""
:param InputChannel channel:
:param str username:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.username = username # type: str
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'UpdateUsernameRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'username': self.username
}
def __bytes__(self):
return b''.join((
b'\xde\xb3\x145',
bytes(self.channel),
TLObject.serialize_bytes(self.username),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_username = reader.tgread_string()
return UpdateUsernameRequest(channel=_channel, username=_username) | Lib/site-packages/telethon/tl/functions/channels.py | from ...tl.tlobject import TLObject
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
if TYPE_CHECKING:
from ...tl.types import TypeBool, TypeChannelBannedRights, TypeInputUser, TypeInputChannel, TypeChannelAdminRights, TypeInputChatPhoto, TypeChannelParticipantsFilter, TypeChannelAdminLogEventsFilter, TypeInputStickerSet
pass
class CheckUsernameRequest(TLObject):
CONSTRUCTOR_ID = 0x10e6bd2c
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, username):
"""
:param InputChannel channel:
:param str username:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.username = username # type: str
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'CheckUsernameRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'username': self.username
}
def __bytes__(self):
return b''.join((
b',\xbd\xe6\x10',
bytes(self.channel),
TLObject.serialize_bytes(self.username),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_username = reader.tgread_string()
return CheckUsernameRequest(channel=_channel, username=_username)
class CreateChannelRequest(TLObject):
CONSTRUCTOR_ID = 0xf4893d7f
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, title, about, broadcast=None, megagroup=None):
"""
:param bool | None broadcast:
:param bool | None megagroup:
:param str title:
:param str about:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.broadcast = broadcast # type: Optional[bool]
self.megagroup = megagroup # type: Optional[bool]
self.title = title # type: str
self.about = about # type: str
def to_dict(self):
return {
'_': 'CreateChannelRequest',
'broadcast': self.broadcast,
'megagroup': self.megagroup,
'title': self.title,
'about': self.about
}
def __bytes__(self):
return b''.join((
b'\x7f=\x89\xf4',
struct.pack('<I', (0 if self.broadcast is None or self.broadcast is False else 1) | (0 if self.megagroup is None or self.megagroup is False else 2)),
TLObject.serialize_bytes(self.title),
TLObject.serialize_bytes(self.about),
))
@staticmethod
def from_reader(reader):
flags = reader.read_int()
_broadcast = bool(flags & 1)
_megagroup = bool(flags & 2)
_title = reader.tgread_string()
_about = reader.tgread_string()
return CreateChannelRequest(title=_title, about=_about, broadcast=_broadcast, megagroup=_megagroup)
class DeleteChannelRequest(TLObject):
CONSTRUCTOR_ID = 0xc0111fe3
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel):
"""
:param InputChannel channel:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xe3\x1f\x11\xc0',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return DeleteChannelRequest(channel=_channel)
class DeleteHistoryRequest(TLObject):
CONSTRUCTOR_ID = 0xaf369d42
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, max_id):
"""
:param InputChannel channel:
:param int max_id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.max_id = max_id # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteHistoryRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'max_id': self.max_id
}
def __bytes__(self):
return b''.join((
b'B\x9d6\xaf',
bytes(self.channel),
struct.pack('<i', self.max_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_max_id = reader.read_int()
return DeleteHistoryRequest(channel=_channel, max_id=_max_id)
class DeleteMessagesRequest(TLObject):
CONSTRUCTOR_ID = 0x84c1fd4e
SUBCLASS_OF_ID = 0xced3c06e
def __init__(self, channel, id):
"""
:param InputChannel channel:
:param list[int] id:
:returns messages.AffectedMessages: Instance of AffectedMessages.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteMessagesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'N\xfd\xc1\x84',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return DeleteMessagesRequest(channel=_channel, id=_id)
class DeleteUserHistoryRequest(TLObject):
CONSTRUCTOR_ID = 0xd10dd71b
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, channel, user_id):
"""
:param InputChannel channel:
:param InputUser user_id:
:returns messages.AffectedHistory: Instance of AffectedHistory.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'DeleteUserHistoryRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x1b\xd7\r\xd1',
bytes(self.channel),
bytes(self.user_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
return DeleteUserHistoryRequest(channel=_channel, user_id=_user_id)
class EditAboutRequest(TLObject):
CONSTRUCTOR_ID = 0x13e27f1e
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, about):
"""
:param InputChannel channel:
:param str about:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.about = about # type: str
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditAboutRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'about': self.about
}
def __bytes__(self):
return b''.join((
b'\x1e\x7f\xe2\x13',
bytes(self.channel),
TLObject.serialize_bytes(self.about),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_about = reader.tgread_string()
return EditAboutRequest(channel=_channel, about=_about)
class EditAdminRequest(TLObject):
CONSTRUCTOR_ID = 0x20b88214
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, user_id, admin_rights):
"""
:param InputChannel channel:
:param InputUser user_id:
:param ChannelAdminRights admin_rights:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
self.admin_rights = admin_rights # type: TypeChannelAdminRights
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'EditAdminRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'admin_rights': None if self.admin_rights is None else self.admin_rights.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x14\x82\xb8 ',
bytes(self.channel),
bytes(self.user_id),
bytes(self.admin_rights),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
_admin_rights = reader.tgread_object()
return EditAdminRequest(channel=_channel, user_id=_user_id, admin_rights=_admin_rights)
class EditBannedRequest(TLObject):
CONSTRUCTOR_ID = 0xbfd915cd
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, user_id, banned_rights):
"""
:param InputChannel channel:
:param InputUser user_id:
:param ChannelBannedRights banned_rights:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
self.banned_rights = banned_rights # type: TypeChannelBannedRights
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'EditBannedRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'banned_rights': None if self.banned_rights is None else self.banned_rights.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xcd\x15\xd9\xbf',
bytes(self.channel),
bytes(self.user_id),
bytes(self.banned_rights),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
_banned_rights = reader.tgread_object()
return EditBannedRequest(channel=_channel, user_id=_user_id, banned_rights=_banned_rights)
class EditPhotoRequest(TLObject):
CONSTRUCTOR_ID = 0xf12e57c9
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, photo):
"""
:param InputChannel channel:
:param InputChatPhoto photo:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.photo = photo # type: TypeInputChatPhoto
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditPhotoRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'photo': None if self.photo is None else self.photo.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xc9W.\xf1',
bytes(self.channel),
bytes(self.photo),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_photo = reader.tgread_object()
return EditPhotoRequest(channel=_channel, photo=_photo)
class EditTitleRequest(TLObject):
CONSTRUCTOR_ID = 0x566decd0
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, title):
"""
:param InputChannel channel:
:param str title:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.title = title # type: str
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditTitleRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'title': self.title
}
def __bytes__(self):
return b''.join((
b'\xd0\xecmV',
bytes(self.channel),
TLObject.serialize_bytes(self.title),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_title = reader.tgread_string()
return EditTitleRequest(channel=_channel, title=_title)
class ExportInviteRequest(TLObject):
CONSTRUCTOR_ID = 0xc7560885
SUBCLASS_OF_ID = 0xb4748a58
def __init__(self, channel):
"""
:param InputChannel channel:
:returns ExportedChatInvite: Instance of either ChatInviteEmpty, ChatInviteExported.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ExportInviteRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x85\x08V\xc7',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return ExportInviteRequest(channel=_channel)
class ExportMessageLinkRequest(TLObject):
CONSTRUCTOR_ID = 0xceb77163
SUBCLASS_OF_ID = 0xdee644cc
def __init__(self, channel, id, grouped):
"""
:param InputChannel channel:
:param int id:
:param Bool grouped:
:returns ExportedMessageLink: Instance of ExportedMessageLink.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: int
self.grouped = grouped # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ExportMessageLinkRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': self.id,
'grouped': self.grouped
}
def __bytes__(self):
return b''.join((
b'cq\xb7\xce',
bytes(self.channel),
struct.pack('<i', self.id),
b'\xb5ur\x99' if self.grouped else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_id = reader.read_int()
_grouped = reader.tgread_bool()
return ExportMessageLinkRequest(channel=_channel, id=_id, grouped=_grouped)
class GetAdminLogRequest(TLObject):
CONSTRUCTOR_ID = 0x33ddf480
SUBCLASS_OF_ID = 0x51f076bc
def __init__(self, channel, q, max_id, min_id, limit, events_filter=None, admins=None):
"""
:param InputChannel channel:
:param str q:
:param ChannelAdminLogEventsFilter | None events_filter:
:param list[InputUser] | None admins:
:param int max_id:
:param int min_id:
:param int limit:
:returns channels.AdminLogResults: Instance of AdminLogResults.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.q = q # type: str
self.events_filter = events_filter # type: Optional[TypeChannelAdminLogEventsFilter]
self.admins = admins # type: Optional[List[TypeInputUser]]
self.max_id = max_id # type: int
self.min_id = min_id # type: int
self.limit = limit # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.admins = [utils.get_input_user(client.get_input_entity(_x)) for _x in self.admins] if self.admins else None
def to_dict(self):
return {
'_': 'GetAdminLogRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'q': self.q,
'events_filter': None if self.events_filter is None else self.events_filter.to_dict(),
'admins': [] if self.admins is None else [None if x is None else x.to_dict() for x in self.admins],
'max_id': self.max_id,
'min_id': self.min_id,
'limit': self.limit
}
def __bytes__(self):
return b''.join((
b'\x80\xf4\xdd3',
struct.pack('<I', (0 if self.events_filter is None or self.events_filter is False else 1) | (0 if self.admins is None or self.admins is False else 2)),
bytes(self.channel),
TLObject.serialize_bytes(self.q),
b'' if self.events_filter is None or self.events_filter is False else (bytes(self.events_filter)),
b'' if self.admins is None or self.admins is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.admins)),b''.join(bytes(x) for x in self.admins))),
struct.pack('<q', self.max_id),
struct.pack('<q', self.min_id),
struct.pack('<i', self.limit),
))
@staticmethod
def from_reader(reader):
flags = reader.read_int()
_channel = reader.tgread_object()
_q = reader.tgread_string()
if flags & 1:
_events_filter = reader.tgread_object()
else:
_events_filter = None
if flags & 2:
reader.read_int()
_admins = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_admins.append(_x)
else:
_admins = None
_max_id = reader.read_long()
_min_id = reader.read_long()
_limit = reader.read_int()
return GetAdminLogRequest(channel=_channel, q=_q, max_id=_max_id, min_id=_min_id, limit=_limit, events_filter=_events_filter, admins=_admins)
class GetAdminedPublicChannelsRequest(TLObject):
CONSTRUCTOR_ID = 0x8d8d82d7
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self):
super().__init__()
self.result = None
self.content_related = True
def to_dict(self):
return {
'_': 'GetAdminedPublicChannelsRequest'
}
def __bytes__(self):
return b''.join((
b'\xd7\x82\x8d\x8d',
))
@staticmethod
def from_reader(reader):
return GetAdminedPublicChannelsRequest()
class GetChannelsRequest(TLObject):
CONSTRUCTOR_ID = 0xa7f6bbb
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, id):
"""
:param list[InputChannel] id:
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
super().__init__()
self.result = None
self.content_related = True
self.id = id # type: List[TypeInputChannel]
def resolve(self, client, utils):
self.id = [utils.get_input_channel(client.get_input_entity(_x)) for _x in self.id]
def to_dict(self):
return {
'_': 'GetChannelsRequest',
'id': [] if self.id is None else [None if x is None else x.to_dict() for x in self.id]
}
def __bytes__(self):
return b''.join((
b'\xbbk\x7f\n',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(bytes(x) for x in self.id),
))
@staticmethod
def from_reader(reader):
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_id.append(_x)
return GetChannelsRequest(id=_id)
class GetFullChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x8736a09
SUBCLASS_OF_ID = 0x225a5109
def __init__(self, channel):
"""
:param InputChannel channel:
:returns messages.ChatFull: Instance of ChatFull.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetFullChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\tjs\x08',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return GetFullChannelRequest(channel=_channel)
class GetMessagesRequest(TLObject):
CONSTRUCTOR_ID = 0x93d7b347
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, channel, id):
"""
:param InputChannel channel:
:param list[int] id:
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetMessagesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'G\xb3\xd7\x93',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return GetMessagesRequest(channel=_channel, id=_id)
class GetParticipantRequest(TLObject):
CONSTRUCTOR_ID = 0x546dd7a6
SUBCLASS_OF_ID = 0x6658151a
def __init__(self, channel, user_id):
"""
:param InputChannel channel:
:param InputUser user_id:
:returns channels.ChannelParticipant: Instance of ChannelParticipant.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'GetParticipantRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xa6\xd7mT',
bytes(self.channel),
bytes(self.user_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
return GetParticipantRequest(channel=_channel, user_id=_user_id)
class GetParticipantsRequest(TLObject):
CONSTRUCTOR_ID = 0x123e05e9
SUBCLASS_OF_ID = 0xe60a6e64
def __init__(self, channel, filter, offset, limit, hash):
"""
:param InputChannel channel:
:param ChannelParticipantsFilter filter:
:param int offset:
:param int limit:
:param int hash:
:returns channels.ChannelParticipants: Instance of either ChannelParticipants, ChannelParticipantsNotModified.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.filter = filter # type: TypeChannelParticipantsFilter
self.offset = offset # type: int
self.limit = limit # type: int
self.hash = hash # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetParticipantsRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'filter': None if self.filter is None else self.filter.to_dict(),
'offset': self.offset,
'limit': self.limit,
'hash': self.hash
}
def __bytes__(self):
return b''.join((
b'\xe9\x05>\x12',
bytes(self.channel),
bytes(self.filter),
struct.pack('<i', self.offset),
struct.pack('<i', self.limit),
struct.pack('<i', self.hash),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_filter = reader.tgread_object()
_offset = reader.read_int()
_limit = reader.read_int()
_hash = reader.read_int()
return GetParticipantsRequest(channel=_channel, filter=_filter, offset=_offset, limit=_limit, hash=_hash)
class InviteToChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x199f3a6c
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, users):
"""
:param InputChannel channel:
:param list[InputUser] users:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.users = users # type: List[TypeInputUser]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.users = [utils.get_input_user(client.get_input_entity(_x)) for _x in self.users]
def to_dict(self):
return {
'_': 'InviteToChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'l:\x9f\x19',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return InviteToChannelRequest(channel=_channel, users=_users)
class JoinChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x24b524c5
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel):
"""
:param InputChannel channel:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'JoinChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xc5$\xb5$',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return JoinChannelRequest(channel=_channel)
class LeaveChannelRequest(TLObject):
CONSTRUCTOR_ID = 0xf836aa95
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel):
"""
:param InputChannel channel:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'LeaveChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x95\xaa6\xf8',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return LeaveChannelRequest(channel=_channel)
class ReadHistoryRequest(TLObject):
CONSTRUCTOR_ID = 0xcc104937
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, max_id):
"""
:param InputChannel channel:
:param int max_id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.max_id = max_id # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReadHistoryRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'max_id': self.max_id
}
def __bytes__(self):
return b''.join((
b'7I\x10\xcc',
bytes(self.channel),
struct.pack('<i', self.max_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_max_id = reader.read_int()
return ReadHistoryRequest(channel=_channel, max_id=_max_id)
class ReadMessageContentsRequest(TLObject):
CONSTRUCTOR_ID = 0xeab5dc38
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, id):
"""
:param InputChannel channel:
:param list[int] id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReadMessageContentsRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'8\xdc\xb5\xea',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return ReadMessageContentsRequest(channel=_channel, id=_id)
class ReportSpamRequest(TLObject):
CONSTRUCTOR_ID = 0xfe087810
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, user_id, id):
"""
:param InputChannel channel:
:param InputUser user_id:
:param list[int] id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'ReportSpamRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'\x10x\x08\xfe',
bytes(self.channel),
bytes(self.user_id),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return ReportSpamRequest(channel=_channel, user_id=_user_id, id=_id)
class SetStickersRequest(TLObject):
CONSTRUCTOR_ID = 0xea8ca4f9
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, stickerset):
"""
:param InputChannel channel:
:param InputStickerSet stickerset:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.stickerset = stickerset # type: TypeInputStickerSet
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'SetStickersRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'stickerset': None if self.stickerset is None else self.stickerset.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xf9\xa4\x8c\xea',
bytes(self.channel),
bytes(self.stickerset),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_stickerset = reader.tgread_object()
return SetStickersRequest(channel=_channel, stickerset=_stickerset)
class ToggleInvitesRequest(TLObject):
CONSTRUCTOR_ID = 0x49609307
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, enabled):
"""
:param InputChannel channel:
:param Bool enabled:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.enabled = enabled # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleInvitesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'enabled': self.enabled
}
def __bytes__(self):
return b''.join((
b'\x07\x93`I',
bytes(self.channel),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return ToggleInvitesRequest(channel=_channel, enabled=_enabled)
class TogglePreHistoryHiddenRequest(TLObject):
CONSTRUCTOR_ID = 0xeabbb94c
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, enabled):
"""
:param InputChannel channel:
:param Bool enabled:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.enabled = enabled # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'TogglePreHistoryHiddenRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'enabled': self.enabled
}
def __bytes__(self):
return b''.join((
b'L\xb9\xbb\xea',
bytes(self.channel),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return TogglePreHistoryHiddenRequest(channel=_channel, enabled=_enabled)
class ToggleSignaturesRequest(TLObject):
CONSTRUCTOR_ID = 0x1f69b606
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, enabled):
"""
:param InputChannel channel:
:param Bool enabled:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.enabled = enabled # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleSignaturesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'enabled': self.enabled
}
def __bytes__(self):
return b''.join((
b'\x06\xb6i\x1f',
bytes(self.channel),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return ToggleSignaturesRequest(channel=_channel, enabled=_enabled)
class UpdatePinnedMessageRequest(TLObject):
CONSTRUCTOR_ID = 0xa72ded52
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, id, silent=None):
"""
:param bool | None silent:
:param InputChannel channel:
:param int id:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.silent = silent # type: Optional[bool]
self.channel = channel # type: TypeInputChannel
self.id = id # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'UpdatePinnedMessageRequest',
'silent': self.silent,
'channel': None if self.channel is None else self.channel.to_dict(),
'id': self.id
}
def __bytes__(self):
return b''.join((
b'R\xed-\xa7',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 1)),
bytes(self.channel),
struct.pack('<i', self.id),
))
@staticmethod
def from_reader(reader):
flags = reader.read_int()
_silent = bool(flags & 1)
_channel = reader.tgread_object()
_id = reader.read_int()
return UpdatePinnedMessageRequest(channel=_channel, id=_id, silent=_silent)
class UpdateUsernameRequest(TLObject):
CONSTRUCTOR_ID = 0x3514b3de
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, username):
"""
:param InputChannel channel:
:param str username:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.username = username # type: str
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'UpdateUsernameRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'username': self.username
}
def __bytes__(self):
return b''.join((
b'\xde\xb3\x145',
bytes(self.channel),
TLObject.serialize_bytes(self.username),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_username = reader.tgread_string()
return UpdateUsernameRequest(channel=_channel, username=_username) | 0.748995 | 0.112893 |
from __future__ import print_function
import os
import sys
import traceback
from bloom.logging import debug
from bloom.logging import error
from bloom.logging import fmt
from bloom.logging import info
from bloom.generators.debian.generator import generate_substitutions_from_package
from bloom.generators.debian.generator import place_template_files
from bloom.generators.debian.generator import process_template_files
from bloom.util import get_distro_list_prompt
try:
from rosdep2 import create_default_installer_context
except ImportError:
debug(traceback.format_exc())
error("rosdep was not detected, please install it.", exit=True)
try:
from catkin_pkg.packages import find_packages
except ImportError:
debug(traceback.format_exc())
error("catkin_pkg was not detected, please install it.", exit=True)
def prepare_arguments(parser):
add = parser.add_argument
add('package_path', nargs='?',
help="path to or containing the package.xml of a package")
action = parser.add_mutually_exclusive_group(required=False)
add = action.add_argument
add('--place-template-files', action='store_true',
help="places debian/* template files only")
add('--process-template-files', action='store_true',
help="processes templates in debian/* only")
add = parser.add_argument
add('--os-name', help='OS name, e.g. ubuntu, debian')
add('--os-version', help='OS version or codename, e.g. precise, wheezy')
add('--ros-distro', help="ROS distro, e.g. %s (used for rosdep)" % get_distro_list_prompt())
add('--native', action='store_true', help="generate native package")
return parser
def get_subs(pkg, os_name, os_version, ros_distro, native=False):
return generate_substitutions_from_package(
pkg,
os_name,
os_version,
ros_distro,
native=native
)
def main(args=None, get_subs_fn=None):
get_subs_fn = get_subs_fn or get_subs
_place_template_files = True
_process_template_files = True
package_path = os.getcwd()
if args is not None:
package_path = args.package_path or os.getcwd()
_place_template_files = args.place_template_files
_process_template_files = args.process_template_files
pkgs_dict = find_packages(package_path)
if len(pkgs_dict) == 0:
sys.exit("No packages found in path: '{0}'".format(package_path))
if len(pkgs_dict) > 1:
sys.exit("Multiple packages found, "
"this tool only supports one package at a time.")
os_data = create_default_installer_context().get_os_name_and_version()
os_name, os_version = os_data
ros_distro = os.environ.get('ROS_DISTRO', 'indigo')
# Allow args overrides
os_name = args.os_name or os_name
os_version = args.os_version or os_version
ros_distro = args.ros_distro or ros_distro
# Summarize
info(fmt("@!@{gf}==> @|") +
fmt("Generating debs for @{cf}%s:%s@| for package(s) %s" %
(os_name, os_version, [p.name for p in pkgs_dict.values()])))
for path, pkg in pkgs_dict.items():
template_files = None
try:
subs = get_subs_fn(pkg, os_name, os_version, ros_distro, args.native)
if _place_template_files:
# Place template files
place_template_files(path)
if _process_template_files:
# Just process existing template files
template_files = process_template_files(path, subs)
if not _place_template_files and not _process_template_files:
# If neither, do both
place_template_files(path)
template_files = process_template_files(path, subs)
if template_files is not None:
for template_file in template_files:
os.remove(os.path.normpath(template_file))
except Exception as exc:
debug(traceback.format_exc())
error(type(exc).__name__ + ": " + str(exc), exit=True)
except (KeyboardInterrupt, EOFError):
sys.exit(1)
# This describes this command to the loader
description = dict(
title='debian',
description="Generates debian packaging files for a catkin package",
main=main,
prepare_arguments=prepare_arguments
) | bloom/generators/debian/generate_cmd.py |
from __future__ import print_function
import os
import sys
import traceback
from bloom.logging import debug
from bloom.logging import error
from bloom.logging import fmt
from bloom.logging import info
from bloom.generators.debian.generator import generate_substitutions_from_package
from bloom.generators.debian.generator import place_template_files
from bloom.generators.debian.generator import process_template_files
from bloom.util import get_distro_list_prompt
try:
from rosdep2 import create_default_installer_context
except ImportError:
debug(traceback.format_exc())
error("rosdep was not detected, please install it.", exit=True)
try:
from catkin_pkg.packages import find_packages
except ImportError:
debug(traceback.format_exc())
error("catkin_pkg was not detected, please install it.", exit=True)
def prepare_arguments(parser):
add = parser.add_argument
add('package_path', nargs='?',
help="path to or containing the package.xml of a package")
action = parser.add_mutually_exclusive_group(required=False)
add = action.add_argument
add('--place-template-files', action='store_true',
help="places debian/* template files only")
add('--process-template-files', action='store_true',
help="processes templates in debian/* only")
add = parser.add_argument
add('--os-name', help='OS name, e.g. ubuntu, debian')
add('--os-version', help='OS version or codename, e.g. precise, wheezy')
add('--ros-distro', help="ROS distro, e.g. %s (used for rosdep)" % get_distro_list_prompt())
add('--native', action='store_true', help="generate native package")
return parser
def get_subs(pkg, os_name, os_version, ros_distro, native=False):
return generate_substitutions_from_package(
pkg,
os_name,
os_version,
ros_distro,
native=native
)
def main(args=None, get_subs_fn=None):
get_subs_fn = get_subs_fn or get_subs
_place_template_files = True
_process_template_files = True
package_path = os.getcwd()
if args is not None:
package_path = args.package_path or os.getcwd()
_place_template_files = args.place_template_files
_process_template_files = args.process_template_files
pkgs_dict = find_packages(package_path)
if len(pkgs_dict) == 0:
sys.exit("No packages found in path: '{0}'".format(package_path))
if len(pkgs_dict) > 1:
sys.exit("Multiple packages found, "
"this tool only supports one package at a time.")
os_data = create_default_installer_context().get_os_name_and_version()
os_name, os_version = os_data
ros_distro = os.environ.get('ROS_DISTRO', 'indigo')
# Allow args overrides
os_name = args.os_name or os_name
os_version = args.os_version or os_version
ros_distro = args.ros_distro or ros_distro
# Summarize
info(fmt("@!@{gf}==> @|") +
fmt("Generating debs for @{cf}%s:%s@| for package(s) %s" %
(os_name, os_version, [p.name for p in pkgs_dict.values()])))
for path, pkg in pkgs_dict.items():
template_files = None
try:
subs = get_subs_fn(pkg, os_name, os_version, ros_distro, args.native)
if _place_template_files:
# Place template files
place_template_files(path)
if _process_template_files:
# Just process existing template files
template_files = process_template_files(path, subs)
if not _place_template_files and not _process_template_files:
# If neither, do both
place_template_files(path)
template_files = process_template_files(path, subs)
if template_files is not None:
for template_file in template_files:
os.remove(os.path.normpath(template_file))
except Exception as exc:
debug(traceback.format_exc())
error(type(exc).__name__ + ": " + str(exc), exit=True)
except (KeyboardInterrupt, EOFError):
sys.exit(1)
# This describes this command to the loader
description = dict(
title='debian',
description="Generates debian packaging files for a catkin package",
main=main,
prepare_arguments=prepare_arguments
) | 0.333829 | 0.073497 |
import math
from collections import Counter
import numpy as np
__all__ = ["LambdaLR", "MultiplicativeLR", "StepLR", "MultiStepLR", "ExponentialLR",
"CosineAnnealingLR", "CyclicLR", "CosineAnnealingWarmRestarts", "OneCycleLR"]
class _WarmUp():
def __init__(self, warmup_init_lr):
self.warmup_init_lr = warmup_init_lr
def get_lr(self):
# Get learning rate during warmup
raise NotImplementedError
class _LinearWarmUp(_WarmUp):
"""
linear warmup function
"""
def __init__(self, lr, warmup_epochs, steps_per_epoch, warmup_init_lr=0):
self.base_lr = lr
self.warmup_init_lr = warmup_init_lr
self.warmup_steps = int(warmup_epochs * steps_per_epoch)
super(_LinearWarmUp, self).__init__(warmup_init_lr)
def get_warmup_steps(self):
return self.warmup_steps
def get_lr(self, current_step):
lr_inc = (float(self.base_lr) - float(self.warmup_init_lr)) / float(self.warmup_steps)
lr = float(self.warmup_init_lr) + lr_inc * current_step
return lr
class _ConstWarmUp(_WarmUp):
def get_lr(self):
return self.warmup_init_lr
class _LRScheduler():
def __init__(self, lr, max_epoch, steps_per_epoch):
self.base_lr = lr
self.steps_per_epoch = steps_per_epoch
self.total_steps = int(max_epoch * steps_per_epoch)
def get_lr(self):
# Compute learning rate using chainable form of the scheduler
raise NotImplementedError
class LambdaLR(_LRScheduler):
"""Sets the learning rate to the initial lr times a given function.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
lr_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
Example:
>>> # Assuming optimizer has two groups.
>>> lambda1 = lambda epoch: epoch // 30
>>> scheduler = LambdaLR(lr=0.1, lr_lambda=lambda1, steps_per_epoch=5000,
>>> max_epoch=90, warmup_epochs=0)
>>> lr = scheduler.get_lr()
"""
def __init__(self, lr, lr_lambda, steps_per_epoch, max_epoch, warmup_epochs=0):
self.lr_lambda = lr_lambda
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(LambdaLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
lr = self.base_lr * self.lr_lambda(cur_ep)
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class MultiplicativeLR(_LRScheduler):
"""Multiply the learning rate by the factor given
in the specified function.
Args:
lr_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch,.
Example:
>>> lmbda = lambda epoch: 0.95
>>> scheduler = MultiplicativeLR(lr=0.1, lr_lambda=lambda1, steps_per_epoch=5000,
>>> max_epoch=90, warmup_epochs=0)
>>> lr = scheduler.get_lr()
"""
def __init__(self, lr, lr_lambda, steps_per_epoch, max_epoch, warmup_epochs=0):
self.lr_lambda = lr_lambda
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(MultiplicativeLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
current_lr = self.base_lr
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
if i % self.steps_per_epoch == 0 and cur_ep > 0:
current_lr = current_lr * self.lr_lambda(cur_ep)
lr = current_lr
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class StepLR(_LRScheduler):
"""Decays the learning rate by gamma every epoch_size epochs.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
epoch_size (int): Period of learning rate decay.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 60
>>> # lr = 0.0005 if 60 <= epoch < 90
>>> # ...
>>> scheduler = StepLR(lr=0.1, epoch_size=30, gamma=0.1, steps_per_epoch=5000,
>>> max_epoch=90, warmup_epochs=0)
>>> lr = scheduler.get_lr()
"""
def __init__(self, lr, epoch_size, gamma, steps_per_epoch, max_epoch, warmup_epochs=0):
self.epoch_size = epoch_size
self.gamma = gamma
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(StepLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
lr = self.base_lr * self.gamma**(cur_ep // self.epoch_size)
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class MultiStepLR(_LRScheduler):
"""Decays the learning rate by gamma once the number of epoch reaches one
of the milestones.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
milestones (list): List of epoch indices. Must be increasing.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 80
>>> # lr = 0.0005 if epoch >= 80
>>> scheduler = MultiStepLR(lr=0.1, milestones=[30,80], gamma=0.1, steps_per_epoch=5000,
>>> max_epoch=90, warmup_epochs=0)
>>> lr = scheduler.get_lr()
"""
def __init__(self, lr, milestones, gamma, steps_per_epoch, max_epoch, warmup_epochs=0):
self.milestones = Counter(milestones)
self.gamma = gamma
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(MultiStepLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
current_lr = self.base_lr
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
if i % self.steps_per_epoch == 0 and cur_ep in self.milestones:
current_lr = current_lr * self.gamma
lr = current_lr
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class ExponentialLR(_LRScheduler):
"""Decays the learning rate of each parameter group by gamma every epoch.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
gamma (float): Multiplicative factor of learning rate decay.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
"""
def __init__(self, lr, gamma, steps_per_epoch, max_epoch, warmup_epochs=0):
self.gamma = gamma
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(ExponentialLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
current_lr = self.base_lr
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
if i % self.steps_per_epoch == 0 and i > 0:
current_lr = current_lr * self.gamma
lr = current_lr
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class CosineAnnealingLR(_LRScheduler):
r"""Set the learning rate using a cosine annealing schedule, where
:math:`\eta_{max}` is set to the initial lr and :math:`T_{cur}` is the
number of epochs since the last restart in SGDR:
.. math::
\begin{aligned}
\eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1
+ \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right),
& T_{cur} \neq (2k+1)T_{max}; \\
\eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min})
\left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right),
& T_{cur} = (2k+1)T_{max}.
\end{aligned}
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, lr, T_max, steps_per_epoch, max_epoch, warmup_epochs=0, eta_min=0):
self.T_max = T_max
self.eta_min = eta_min
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(CosineAnnealingLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
current_lr = self.base_lr
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
if i % self.steps_per_epoch == 0 and i > 0:
current_lr = self.eta_min + \
(self.base_lr - self.eta_min) * (1. + math.cos(math.pi*cur_ep / self.T_max)) / 2
lr = current_lr
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class CyclicLR(_LRScheduler):
r"""Sets the learning rate according to cyclical learning rate policy (CLR).
The policy cycles the learning rate between two boundaries with a constant
frequency, as detailed in the paper `Cyclical Learning Rates for Training
Neural Networks`_. The distance between the two boundaries can be scaled on
a per-iteration or per-cycle basis.
Cyclical learning rate policy changes the learning rate after every batch.
This class has three built-in policies, as put forth in the paper:
* "triangular": A basic triangular cycle without amplitude scaling.
* "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle.
* "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}`
at each cycle iteration.
This implementation was adapted from the github repo: `bckenstler/CLR`_
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
max_lr (float): Upper learning rate boundaries in the cycle.
Functionally, it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr and some scaling
of the amplitude; therefore max_lr may not actually be reached
depending on scaling function.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
step_size_up (int): Number of training iterations in the
increasing half of a cycle. Default: 2000
step_size_down (int): Number of training iterations in the
decreasing half of a cycle. If step_size_down is None,
it is set to step_size_up. Default: None
mode (str): One of {triangular, triangular2, exp_range}.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
Default: 'triangular'
gamma (float): Constant in 'exp_range' scaling function:
gamma**(cycle iterations)
Default: 1.0
scale_fn (function): Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
If specified, then 'mode' is ignored.
Default: None
scale_mode (str): {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle).
Default: 'cycle'
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
.. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186
.. _bckenstler/CLR: https://github.com/bckenstler/CLR
"""
def __init__(self,
lr,
max_lr,
steps_per_epoch,
max_epoch,
step_size_up=2000,
step_size_down=None,
mode='triangular',
gamma=1.,
scale_fn=None,
scale_mode='cycle',
warmup_epochs=0):
self.max_lr = max_lr
step_size_up = float(step_size_up)
step_size_down = float(step_size_down) if step_size_down is not None else step_size_up
self.total_size = step_size_up + step_size_down
self.step_ratio = step_size_up / self.total_size
if mode not in ['triangular', 'triangular2', 'exp_range'] \
and scale_fn is None:
raise ValueError('mode is invalid and scale_fn is None')
self.mode = mode
self.gamma = gamma
if scale_fn is None:
if self.mode == 'triangular':
self.scale_fn = self._triangular_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self.scale_fn = self._triangular2_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self.scale_fn = self._exp_range_scale_fn
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(CyclicLR, self).__init__(lr, max_epoch, steps_per_epoch)
def _triangular_scale_fn(self, x):
return 1.
def _triangular2_scale_fn(self, x):
return 1 / (2. ** (x - 1))
def _exp_range_scale_fn(self, x):
return self.gamma**(x)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
# Calculates the learning rate at batch index.
cycle = math.floor(1 + i / self.total_size)
x = 1. + i / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
scale_factor = (x - 1) / (self.step_ratio - 1)
base_height = (self.max_lr - self.base_lr) * scale_factor
if self.scale_mode == 'cycle':
lr = self.base_lr + base_height * self.scale_fn(cycle)
else:
lr = self.base_lr + base_height * self.scale_fn(i)
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class CosineAnnealingWarmRestarts(_LRScheduler):
r"""Set the learning rate using a cosine annealing schedule, where
:math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}` is the
number of epochs since the last restart and :math:`T_{i}` is the number
of epochs between two warm restarts in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right)
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
Args:
lr (float): Initial learning rate.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
T_0 (int): Number of iterations for the first restart.
T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, lr, steps_per_epoch, max_epoch, T_0, T_mult=1, eta_min=0, warmup_epochs=0):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError("Expected positive integer T_0, but got {}".format(T_0))
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
self.T_cur = 0
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(CosineAnnealingWarmRestarts, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
if i % self.steps_per_epoch == 0 and i > 0:
self.T_cur += 1
if self.T_cur >= self.T_i:
self.T_cur = self.T_cur - self.T_i
self.T_i = self.T_i * self.T_mult
lr = self.eta_min + (self.base_lr - self.eta_min) * \
(1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class OneCycleLR(_LRScheduler):
r"""Sets the learning rate of each parameter group according to the
1cycle learning rate policy. The 1cycle policy anneals the learning
rate from an initial learning rate to some maximum learning rate and then
from that maximum learning rate to some minimum learning rate much lower
than the initial learning rate.
This policy was initially described in the paper `Super-Convergence:
Very Fast Training of Neural Networks Using Large Learning Rates`_.
The 1cycle learning rate policy changes the learning rate after every batch.
This scheduler is not chainable.
Args:
lr (float): Initial learning rate.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
pct_start (float): The percentage of the cycle (in number of steps) spent
increasing the learning rate.
Default: 0.3
anneal_strategy (str): {'cos', 'linear'}
Specifies the annealing strategy: "cos" for cosine annealing, "linear" for
linear annealing.
Default: 'cos'
div_factor (float): Determines the max learning rate via
max_lr = lr * div_factor
Default: 25
final_div_factor (float): Determines the minimum learning rate via
min_lr = lr / final_div_factor
Default: 1e4
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
https://arxiv.org/abs/1708.07120
"""
def __init__(self,
lr,
steps_per_epoch,
max_epoch,
pct_start=0.3,
anneal_strategy='cos',
div_factor=25.,
final_div_factor=1e4,
warmup_epochs=0):
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(OneCycleLR, self).__init__(lr, max_epoch, steps_per_epoch)
self.step_size_up = float(pct_start * self.total_steps) - 1
self.step_size_down = float(self.total_steps - self.step_size_up) - 1
# Validate pct_start
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
raise ValueError("Expected float between 0 and 1 pct_start, but got {}".format(pct_start))
# Validate anneal_strategy
if anneal_strategy not in ['cos', 'linear']:
raise ValueError("anneal_strategy must by one of 'cos' or 'linear', instead got {}".format(anneal_strategy))
if anneal_strategy == 'cos':
self.anneal_func = self._annealing_cos
elif anneal_strategy == 'linear':
self.anneal_func = self._annealing_linear
# Initialize learning rate variables
self.max_lr = lr * div_factor
self.min_lr = lr / final_div_factor
def _annealing_cos(self, start, end, pct):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
def _annealing_linear(self, start, end, pct):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return (end - start) * pct + start
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
if i <= self.step_size_up:
lr = self.anneal_func(self.base_lr, self.max_lr, i / self.step_size_up)
else:
down_step_num = i - self.step_size_up
lr = self.anneal_func(self.max_lr, self.min_lr, down_step_num / self.step_size_down)
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32) | model_zoo/official/cv/densenet121/src/lr_scheduler/lr_scheduler.py | import math
from collections import Counter
import numpy as np
__all__ = ["LambdaLR", "MultiplicativeLR", "StepLR", "MultiStepLR", "ExponentialLR",
"CosineAnnealingLR", "CyclicLR", "CosineAnnealingWarmRestarts", "OneCycleLR"]
class _WarmUp():
def __init__(self, warmup_init_lr):
self.warmup_init_lr = warmup_init_lr
def get_lr(self):
# Get learning rate during warmup
raise NotImplementedError
class _LinearWarmUp(_WarmUp):
"""
linear warmup function
"""
def __init__(self, lr, warmup_epochs, steps_per_epoch, warmup_init_lr=0):
self.base_lr = lr
self.warmup_init_lr = warmup_init_lr
self.warmup_steps = int(warmup_epochs * steps_per_epoch)
super(_LinearWarmUp, self).__init__(warmup_init_lr)
def get_warmup_steps(self):
return self.warmup_steps
def get_lr(self, current_step):
lr_inc = (float(self.base_lr) - float(self.warmup_init_lr)) / float(self.warmup_steps)
lr = float(self.warmup_init_lr) + lr_inc * current_step
return lr
class _ConstWarmUp(_WarmUp):
def get_lr(self):
return self.warmup_init_lr
class _LRScheduler():
def __init__(self, lr, max_epoch, steps_per_epoch):
self.base_lr = lr
self.steps_per_epoch = steps_per_epoch
self.total_steps = int(max_epoch * steps_per_epoch)
def get_lr(self):
# Compute learning rate using chainable form of the scheduler
raise NotImplementedError
class LambdaLR(_LRScheduler):
"""Sets the learning rate to the initial lr times a given function.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
lr_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
Example:
>>> # Assuming optimizer has two groups.
>>> lambda1 = lambda epoch: epoch // 30
>>> scheduler = LambdaLR(lr=0.1, lr_lambda=lambda1, steps_per_epoch=5000,
>>> max_epoch=90, warmup_epochs=0)
>>> lr = scheduler.get_lr()
"""
def __init__(self, lr, lr_lambda, steps_per_epoch, max_epoch, warmup_epochs=0):
self.lr_lambda = lr_lambda
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(LambdaLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
lr = self.base_lr * self.lr_lambda(cur_ep)
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class MultiplicativeLR(_LRScheduler):
"""Multiply the learning rate by the factor given
in the specified function.
Args:
lr_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch,.
Example:
>>> lmbda = lambda epoch: 0.95
>>> scheduler = MultiplicativeLR(lr=0.1, lr_lambda=lambda1, steps_per_epoch=5000,
>>> max_epoch=90, warmup_epochs=0)
>>> lr = scheduler.get_lr()
"""
def __init__(self, lr, lr_lambda, steps_per_epoch, max_epoch, warmup_epochs=0):
self.lr_lambda = lr_lambda
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(MultiplicativeLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
current_lr = self.base_lr
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
if i % self.steps_per_epoch == 0 and cur_ep > 0:
current_lr = current_lr * self.lr_lambda(cur_ep)
lr = current_lr
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class StepLR(_LRScheduler):
"""Decays the learning rate by gamma every epoch_size epochs.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
epoch_size (int): Period of learning rate decay.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 60
>>> # lr = 0.0005 if 60 <= epoch < 90
>>> # ...
>>> scheduler = StepLR(lr=0.1, epoch_size=30, gamma=0.1, steps_per_epoch=5000,
>>> max_epoch=90, warmup_epochs=0)
>>> lr = scheduler.get_lr()
"""
def __init__(self, lr, epoch_size, gamma, steps_per_epoch, max_epoch, warmup_epochs=0):
self.epoch_size = epoch_size
self.gamma = gamma
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(StepLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
lr = self.base_lr * self.gamma**(cur_ep // self.epoch_size)
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class MultiStepLR(_LRScheduler):
"""Decays the learning rate by gamma once the number of epoch reaches one
of the milestones.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
milestones (list): List of epoch indices. Must be increasing.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 80
>>> # lr = 0.0005 if epoch >= 80
>>> scheduler = MultiStepLR(lr=0.1, milestones=[30,80], gamma=0.1, steps_per_epoch=5000,
>>> max_epoch=90, warmup_epochs=0)
>>> lr = scheduler.get_lr()
"""
def __init__(self, lr, milestones, gamma, steps_per_epoch, max_epoch, warmup_epochs=0):
self.milestones = Counter(milestones)
self.gamma = gamma
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(MultiStepLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
current_lr = self.base_lr
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
if i % self.steps_per_epoch == 0 and cur_ep in self.milestones:
current_lr = current_lr * self.gamma
lr = current_lr
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class ExponentialLR(_LRScheduler):
"""Decays the learning rate of each parameter group by gamma every epoch.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
gamma (float): Multiplicative factor of learning rate decay.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
"""
def __init__(self, lr, gamma, steps_per_epoch, max_epoch, warmup_epochs=0):
self.gamma = gamma
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(ExponentialLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
current_lr = self.base_lr
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
if i % self.steps_per_epoch == 0 and i > 0:
current_lr = current_lr * self.gamma
lr = current_lr
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class CosineAnnealingLR(_LRScheduler):
r"""Set the learning rate using a cosine annealing schedule, where
:math:`\eta_{max}` is set to the initial lr and :math:`T_{cur}` is the
number of epochs since the last restart in SGDR:
.. math::
\begin{aligned}
\eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1
+ \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right),
& T_{cur} \neq (2k+1)T_{max}; \\
\eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min})
\left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right),
& T_{cur} = (2k+1)T_{max}.
\end{aligned}
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, lr, T_max, steps_per_epoch, max_epoch, warmup_epochs=0, eta_min=0):
self.T_max = T_max
self.eta_min = eta_min
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(CosineAnnealingLR, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
current_lr = self.base_lr
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
cur_ep = i // self.steps_per_epoch
if i % self.steps_per_epoch == 0 and i > 0:
current_lr = self.eta_min + \
(self.base_lr - self.eta_min) * (1. + math.cos(math.pi*cur_ep / self.T_max)) / 2
lr = current_lr
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class CyclicLR(_LRScheduler):
r"""Sets the learning rate according to cyclical learning rate policy (CLR).
The policy cycles the learning rate between two boundaries with a constant
frequency, as detailed in the paper `Cyclical Learning Rates for Training
Neural Networks`_. The distance between the two boundaries can be scaled on
a per-iteration or per-cycle basis.
Cyclical learning rate policy changes the learning rate after every batch.
This class has three built-in policies, as put forth in the paper:
* "triangular": A basic triangular cycle without amplitude scaling.
* "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle.
* "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}`
at each cycle iteration.
This implementation was adapted from the github repo: `bckenstler/CLR`_
Args:
lr (float): Initial learning rate which is the
lower boundary in the cycle.
max_lr (float): Upper learning rate boundaries in the cycle.
Functionally, it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr and some scaling
of the amplitude; therefore max_lr may not actually be reached
depending on scaling function.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
step_size_up (int): Number of training iterations in the
increasing half of a cycle. Default: 2000
step_size_down (int): Number of training iterations in the
decreasing half of a cycle. If step_size_down is None,
it is set to step_size_up. Default: None
mode (str): One of {triangular, triangular2, exp_range}.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
Default: 'triangular'
gamma (float): Constant in 'exp_range' scaling function:
gamma**(cycle iterations)
Default: 1.0
scale_fn (function): Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
If specified, then 'mode' is ignored.
Default: None
scale_mode (str): {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle).
Default: 'cycle'
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
.. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186
.. _bckenstler/CLR: https://github.com/bckenstler/CLR
"""
def __init__(self,
lr,
max_lr,
steps_per_epoch,
max_epoch,
step_size_up=2000,
step_size_down=None,
mode='triangular',
gamma=1.,
scale_fn=None,
scale_mode='cycle',
warmup_epochs=0):
self.max_lr = max_lr
step_size_up = float(step_size_up)
step_size_down = float(step_size_down) if step_size_down is not None else step_size_up
self.total_size = step_size_up + step_size_down
self.step_ratio = step_size_up / self.total_size
if mode not in ['triangular', 'triangular2', 'exp_range'] \
and scale_fn is None:
raise ValueError('mode is invalid and scale_fn is None')
self.mode = mode
self.gamma = gamma
if scale_fn is None:
if self.mode == 'triangular':
self.scale_fn = self._triangular_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self.scale_fn = self._triangular2_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self.scale_fn = self._exp_range_scale_fn
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(CyclicLR, self).__init__(lr, max_epoch, steps_per_epoch)
def _triangular_scale_fn(self, x):
return 1.
def _triangular2_scale_fn(self, x):
return 1 / (2. ** (x - 1))
def _exp_range_scale_fn(self, x):
return self.gamma**(x)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
# Calculates the learning rate at batch index.
cycle = math.floor(1 + i / self.total_size)
x = 1. + i / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
scale_factor = (x - 1) / (self.step_ratio - 1)
base_height = (self.max_lr - self.base_lr) * scale_factor
if self.scale_mode == 'cycle':
lr = self.base_lr + base_height * self.scale_fn(cycle)
else:
lr = self.base_lr + base_height * self.scale_fn(i)
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class CosineAnnealingWarmRestarts(_LRScheduler):
r"""Set the learning rate using a cosine annealing schedule, where
:math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}` is the
number of epochs since the last restart and :math:`T_{i}` is the number
of epochs between two warm restarts in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right)
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
Args:
lr (float): Initial learning rate.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
T_0 (int): Number of iterations for the first restart.
T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, lr, steps_per_epoch, max_epoch, T_0, T_mult=1, eta_min=0, warmup_epochs=0):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError("Expected positive integer T_0, but got {}".format(T_0))
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
self.T_cur = 0
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(CosineAnnealingWarmRestarts, self).__init__(lr, max_epoch, steps_per_epoch)
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
if i % self.steps_per_epoch == 0 and i > 0:
self.T_cur += 1
if self.T_cur >= self.T_i:
self.T_cur = self.T_cur - self.T_i
self.T_i = self.T_i * self.T_mult
lr = self.eta_min + (self.base_lr - self.eta_min) * \
(1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32)
class OneCycleLR(_LRScheduler):
r"""Sets the learning rate of each parameter group according to the
1cycle learning rate policy. The 1cycle policy anneals the learning
rate from an initial learning rate to some maximum learning rate and then
from that maximum learning rate to some minimum learning rate much lower
than the initial learning rate.
This policy was initially described in the paper `Super-Convergence:
Very Fast Training of Neural Networks Using Large Learning Rates`_.
The 1cycle learning rate policy changes the learning rate after every batch.
This scheduler is not chainable.
Args:
lr (float): Initial learning rate.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
max_epoch (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle.
pct_start (float): The percentage of the cycle (in number of steps) spent
increasing the learning rate.
Default: 0.3
anneal_strategy (str): {'cos', 'linear'}
Specifies the annealing strategy: "cos" for cosine annealing, "linear" for
linear annealing.
Default: 'cos'
div_factor (float): Determines the max learning rate via
max_lr = lr * div_factor
Default: 25
final_div_factor (float): Determines the minimum learning rate via
min_lr = lr / final_div_factor
Default: 1e4
warmup_epochs (int): The number of epochs to Warmup.
Default: 0
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
https://arxiv.org/abs/1708.07120
"""
def __init__(self,
lr,
steps_per_epoch,
max_epoch,
pct_start=0.3,
anneal_strategy='cos',
div_factor=25.,
final_div_factor=1e4,
warmup_epochs=0):
self.warmup = _LinearWarmUp(lr, warmup_epochs, steps_per_epoch)
super(OneCycleLR, self).__init__(lr, max_epoch, steps_per_epoch)
self.step_size_up = float(pct_start * self.total_steps) - 1
self.step_size_down = float(self.total_steps - self.step_size_up) - 1
# Validate pct_start
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
raise ValueError("Expected float between 0 and 1 pct_start, but got {}".format(pct_start))
# Validate anneal_strategy
if anneal_strategy not in ['cos', 'linear']:
raise ValueError("anneal_strategy must by one of 'cos' or 'linear', instead got {}".format(anneal_strategy))
if anneal_strategy == 'cos':
self.anneal_func = self._annealing_cos
elif anneal_strategy == 'linear':
self.anneal_func = self._annealing_linear
# Initialize learning rate variables
self.max_lr = lr * div_factor
self.min_lr = lr / final_div_factor
def _annealing_cos(self, start, end, pct):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
def _annealing_linear(self, start, end, pct):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return (end - start) * pct + start
def get_lr(self):
warmup_steps = self.warmup.get_warmup_steps()
lr_each_step = []
for i in range(self.total_steps):
if i < warmup_steps:
lr = self.warmup.get_lr(i+1)
else:
if i <= self.step_size_up:
lr = self.anneal_func(self.base_lr, self.max_lr, i / self.step_size_up)
else:
down_step_num = i - self.step_size_up
lr = self.anneal_func(self.max_lr, self.min_lr, down_step_num / self.step_size_down)
lr_each_step.append(lr)
return np.array(lr_each_step).astype(np.float32) | 0.878887 | 0.288118 |
import logging
import seamm
import seamm_widgets as sw
import packmol_step
import Pmw
import tkinter as tk
import tkinter.ttk as ttk
logger = logging.getLogger(__name__)
class TkPackmol(seamm.TkNode):
"""Graphical interface for using Packmol for fluid boxes"""
def __init__(
self, tk_flowchart=None, node=None, canvas=None, x=None, y=None, w=200, h=50
):
"""Initialize a node
Keyword arguments:
"""
self.dialog = None
self._molecule_data = []
self._add_molecule = None
super().__init__(
tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h
)
def create_dialog(self):
"""Create the dialog!"""
self.dialog = Pmw.Dialog(
self.toplevel,
buttons=("OK", "Help", "Cancel"),
master=self.toplevel,
title="Edit Packmol step",
command=self.handle_dialog,
)
self.dialog.withdraw()
frame = ttk.Frame(self.dialog.interior())
frame.pack(expand=tk.YES, fill=tk.BOTH)
self["frame"] = frame
# Create all the widgets
P = self.node.parameters
for key in P:
if key not in ("molecules",):
self[key] = P[key].widget(frame)
self["molecule source"].combobox.config(state="readonly")
# Frame for specifying molecules
self["molecules"] = sw.ScrolledFrame(frame, height=500)
w = self["molecules"].interior()
self["smiles"] = ttk.Label(w, text="SMILES")
self["stoichiometry"] = ttk.Label(w, text="stoichiometry")
# And the molecule data
for molecule in P["molecules"].value:
_type = molecule["type"]
value = molecule["molecule"]
count = molecule["count"]
self._molecule_data.append(
{"type": _type, "molecule": value, "count": count}
)
for key in ("molecule source", "method", "submethod"):
self[key].combobox.bind("<<ComboboxSelected>>", self.reset_dialog)
self[key].combobox.bind("<Return>", self.reset_dialog)
self[key].combobox.bind("<FocusOut>", self.reset_dialog)
self.reset_dialog()
def reset_dialog(self, widget=None):
methods = packmol_step.PackmolParameters.methods
molecule_source = self["molecule source"].get()
method = self["method"].get()
submethod = self["submethod"].get()
logger.debug("reset_dialog: {} {}".format(method, submethod))
frame = self["frame"]
frame.grid_rowconfigure(1, weight=0, minsize=0)
frame.grid_columnconfigure(2, weight=0)
for slave in frame.grid_slaves():
slave.grid_forget()
row = 0
self["molecule source"].grid(row=row, column=0, columnspan=3, sticky=tk.EW)
row += 1
if molecule_source == "SMILES":
self["molecules"].grid(row=row, column=0, columnspan=3, sticky=tk.NSEW)
frame.grid_rowconfigure(row, weight=1, minsize=200)
frame.grid_columnconfigure(2, weight=1)
row += 1
self.reset_molecules()
self["method"].grid(row=row, column=0, sticky=tk.E)
if method[0] != "$":
self[method].grid(row=row, column=1, sticky=tk.W)
self[method].show("combobox", "entry", "units")
row += 1
if "pressure" in method:
key = "ideal gas temperature"
self[key].grid(row=row, column=0, sticky=tk.W)
self[key].show("all")
row += 1
sw.align_labels([self["method"], self[key]])
if method[0] == "$":
self["submethod"].combobox.config(values=[*methods])
self["submethod"].set(submethod)
if submethod[0] == "$":
# Both are variables, so any combination is possible
self["submethod"].grid(row=row, column=0, sticky=tk.E)
row += 1
widgets = []
for key in (
*methods,
"ideal gas temperature",
):
widgets.append(self[key])
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("all")
row += 1
sw.align_labels(widgets)
else:
# Submethod is given, so it controls the choices for the method
widgets = []
for key in methods[submethod]:
widgets.append(self[key])
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("all")
row += 1
if "pressure" in methods[submethod]:
key = "ideal gas temperature"
widgets.append(self[key])
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("all")
row += 1
sw.align_labels(widgets)
self["submethod"].grid(row=row, column=0, sticky=tk.E)
self[submethod].grid(row=row, column=1, sticky=tk.W)
self[submethod].show("combobox", "entry", "units")
else:
if submethod[0] == "$":
self["submethod"].grid(row=row, column=0, sticky=tk.E)
row += 1
widgets = []
for key in methods[method]:
widgets.append(self[key])
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("all")
row += 1
if "pressure" in methods[submethod]:
for key in (
"ideal gas pressure",
"ideal gas temperature",
):
widgets.append(self[key])
self[key].grid(row=row, column=0, sticky=tk.EW)
self[key].show("all")
row += 1
sw.align_labels(widgets)
else:
self["submethod"].combobox.config(values=methods[method])
if submethod in methods[method]:
self["submethod"].set(submethod)
else:
self["submethod"].combobox.current(0)
submethod = self["submethod"].get()
self["submethod"].grid(row=row, column=0, sticky=tk.E)
self[submethod].grid(row=row, column=1, sticky=tk.W)
self[submethod].show("combobox", "entry", "units")
row += 1
if "pressure" in submethod:
key = "ideal gas temperature"
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("combobox", "entry", "units")
row += 1
def reset_molecules(self):
"""Layout the table of molecules to use."""
frame = self["molecules"].interior()
# Unpack any widgets
for slave in frame.grid_slaves():
slave.grid_forget()
# Put in the column headers.
row = 0
self["smiles"].grid(row=row, column=1, sticky=tk.EW)
self["stoichiometry"].grid(row=row, column=2, sticky=tk.EW)
row += 1
for data in self._molecule_data:
molecule = data["molecule"]
self.logger.debug(molecule)
if "widgets" not in data:
widgets = data["widgets"] = {}
else:
widgets = data["widgets"]
if "remove" not in widgets:
# The button to remove a row...
widgets["remove"] = ttk.Button(
frame,
text="-",
width=2,
command=lambda row=row: self.remove_molecule(row),
takefocus=True,
)
if "molecule" not in widgets:
# the molecule (SMILES at the moment)
widgets["molecule"] = ttk.Entry(frame, width=50, takefocus=True)
widgets["molecule"].insert("end", molecule)
if "count" not in widgets:
# The count for the stoichiometry
widgets["count"] = ttk.Entry(frame, width=5, takefocus=True)
widgets["count"].insert("end", data["count"])
self.logger.debug(" widgets: " + str(widgets))
widgets["remove"].grid(row=row, column=0, sticky=tk.W)
widgets["molecule"].grid(row=row, column=1, stick=tk.EW)
widgets["count"].grid(row=row, column=2, stick=tk.EW)
row += 1
# The button to add a row...
if self._add_molecule is None:
self._add_molecule = ttk.Button(
frame,
text="+",
width=5,
command=self.add_molecule,
takefocus=True,
)
self._add_molecule.focus_set()
self._add_molecule.lift()
self._add_molecule.grid(row=row, column=0, columnspan=3, sticky=tk.W)
frame.grid_columnconfigure(1, weight=1)
def right_click(self, event):
"""Probably need to add our dialog..."""
super().right_click(event)
self.popup_menu.add_command(label="Edit..", command=self.edit)
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
def handle_dialog(self, result):
if result is None or result == "Cancel":
self.dialog.deactivate(result)
# Reset the molecules
for data in self._molecule_data:
if "widgets" in data:
widgets = data["widgets"]
for w in widgets.values():
w.destroy()
self._molecule_data = []
P = self.node.parameters
for molecule in P["molecules"].value:
_type = molecule["type"]
value = molecule["molecule"]
count = molecule["count"]
self._molecule_data.append(
{"type": _type, "molecule": value, "count": count}
)
super().handle_dialog(result)
return
if result == "Help":
# display help!!!
return
if result != "OK":
self.dialog.deactivate(result)
raise RuntimeError("Don't recognize dialog result '{}'".format(result))
self.dialog.deactivate(result)
# Shortcut for parameters
P = self.node.parameters
for key in P:
if key not in ("molecules",):
P[key].set_from_widget()
# And handle the molecules
molecules = []
for data in self._molecule_data:
widgets = data["widgets"]
molecules.append(
{
"type": data["type"],
"molecule": widgets["molecule"].get(),
"count": widgets["count"].get(),
}
)
P["molecules"].value = molecules
def add_molecule(self):
"""Add a new row to the molecule table."""
self._molecule_data.append({"type": "smiles", "molecule": "", "count": "1"})
self.reset_molecules()
def remove_molecule(self, row):
"""Remove a molecule entry from the table.
Parameters
----------
row : int
The row in the table to remove. Note the first molecule is at row 1.
"""
index = row - 1
data = self._molecule_data[index]
if "widgets" in data:
for w in data["widgets"].values():
w.destroy()
del self._molecule_data[index]
self.reset_molecules() | packmol_step/tk_packmol.py | import logging
import seamm
import seamm_widgets as sw
import packmol_step
import Pmw
import tkinter as tk
import tkinter.ttk as ttk
logger = logging.getLogger(__name__)
class TkPackmol(seamm.TkNode):
"""Graphical interface for using Packmol for fluid boxes"""
def __init__(
self, tk_flowchart=None, node=None, canvas=None, x=None, y=None, w=200, h=50
):
"""Initialize a node
Keyword arguments:
"""
self.dialog = None
self._molecule_data = []
self._add_molecule = None
super().__init__(
tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h
)
def create_dialog(self):
"""Create the dialog!"""
self.dialog = Pmw.Dialog(
self.toplevel,
buttons=("OK", "Help", "Cancel"),
master=self.toplevel,
title="Edit Packmol step",
command=self.handle_dialog,
)
self.dialog.withdraw()
frame = ttk.Frame(self.dialog.interior())
frame.pack(expand=tk.YES, fill=tk.BOTH)
self["frame"] = frame
# Create all the widgets
P = self.node.parameters
for key in P:
if key not in ("molecules",):
self[key] = P[key].widget(frame)
self["molecule source"].combobox.config(state="readonly")
# Frame for specifying molecules
self["molecules"] = sw.ScrolledFrame(frame, height=500)
w = self["molecules"].interior()
self["smiles"] = ttk.Label(w, text="SMILES")
self["stoichiometry"] = ttk.Label(w, text="stoichiometry")
# And the molecule data
for molecule in P["molecules"].value:
_type = molecule["type"]
value = molecule["molecule"]
count = molecule["count"]
self._molecule_data.append(
{"type": _type, "molecule": value, "count": count}
)
for key in ("molecule source", "method", "submethod"):
self[key].combobox.bind("<<ComboboxSelected>>", self.reset_dialog)
self[key].combobox.bind("<Return>", self.reset_dialog)
self[key].combobox.bind("<FocusOut>", self.reset_dialog)
self.reset_dialog()
def reset_dialog(self, widget=None):
methods = packmol_step.PackmolParameters.methods
molecule_source = self["molecule source"].get()
method = self["method"].get()
submethod = self["submethod"].get()
logger.debug("reset_dialog: {} {}".format(method, submethod))
frame = self["frame"]
frame.grid_rowconfigure(1, weight=0, minsize=0)
frame.grid_columnconfigure(2, weight=0)
for slave in frame.grid_slaves():
slave.grid_forget()
row = 0
self["molecule source"].grid(row=row, column=0, columnspan=3, sticky=tk.EW)
row += 1
if molecule_source == "SMILES":
self["molecules"].grid(row=row, column=0, columnspan=3, sticky=tk.NSEW)
frame.grid_rowconfigure(row, weight=1, minsize=200)
frame.grid_columnconfigure(2, weight=1)
row += 1
self.reset_molecules()
self["method"].grid(row=row, column=0, sticky=tk.E)
if method[0] != "$":
self[method].grid(row=row, column=1, sticky=tk.W)
self[method].show("combobox", "entry", "units")
row += 1
if "pressure" in method:
key = "ideal gas temperature"
self[key].grid(row=row, column=0, sticky=tk.W)
self[key].show("all")
row += 1
sw.align_labels([self["method"], self[key]])
if method[0] == "$":
self["submethod"].combobox.config(values=[*methods])
self["submethod"].set(submethod)
if submethod[0] == "$":
# Both are variables, so any combination is possible
self["submethod"].grid(row=row, column=0, sticky=tk.E)
row += 1
widgets = []
for key in (
*methods,
"ideal gas temperature",
):
widgets.append(self[key])
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("all")
row += 1
sw.align_labels(widgets)
else:
# Submethod is given, so it controls the choices for the method
widgets = []
for key in methods[submethod]:
widgets.append(self[key])
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("all")
row += 1
if "pressure" in methods[submethod]:
key = "ideal gas temperature"
widgets.append(self[key])
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("all")
row += 1
sw.align_labels(widgets)
self["submethod"].grid(row=row, column=0, sticky=tk.E)
self[submethod].grid(row=row, column=1, sticky=tk.W)
self[submethod].show("combobox", "entry", "units")
else:
if submethod[0] == "$":
self["submethod"].grid(row=row, column=0, sticky=tk.E)
row += 1
widgets = []
for key in methods[method]:
widgets.append(self[key])
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("all")
row += 1
if "pressure" in methods[submethod]:
for key in (
"ideal gas pressure",
"ideal gas temperature",
):
widgets.append(self[key])
self[key].grid(row=row, column=0, sticky=tk.EW)
self[key].show("all")
row += 1
sw.align_labels(widgets)
else:
self["submethod"].combobox.config(values=methods[method])
if submethod in methods[method]:
self["submethod"].set(submethod)
else:
self["submethod"].combobox.current(0)
submethod = self["submethod"].get()
self["submethod"].grid(row=row, column=0, sticky=tk.E)
self[submethod].grid(row=row, column=1, sticky=tk.W)
self[submethod].show("combobox", "entry", "units")
row += 1
if "pressure" in submethod:
key = "ideal gas temperature"
self[key].grid(row=row, column=1, sticky=tk.EW)
self[key].show("combobox", "entry", "units")
row += 1
def reset_molecules(self):
"""Layout the table of molecules to use."""
frame = self["molecules"].interior()
# Unpack any widgets
for slave in frame.grid_slaves():
slave.grid_forget()
# Put in the column headers.
row = 0
self["smiles"].grid(row=row, column=1, sticky=tk.EW)
self["stoichiometry"].grid(row=row, column=2, sticky=tk.EW)
row += 1
for data in self._molecule_data:
molecule = data["molecule"]
self.logger.debug(molecule)
if "widgets" not in data:
widgets = data["widgets"] = {}
else:
widgets = data["widgets"]
if "remove" not in widgets:
# The button to remove a row...
widgets["remove"] = ttk.Button(
frame,
text="-",
width=2,
command=lambda row=row: self.remove_molecule(row),
takefocus=True,
)
if "molecule" not in widgets:
# the molecule (SMILES at the moment)
widgets["molecule"] = ttk.Entry(frame, width=50, takefocus=True)
widgets["molecule"].insert("end", molecule)
if "count" not in widgets:
# The count for the stoichiometry
widgets["count"] = ttk.Entry(frame, width=5, takefocus=True)
widgets["count"].insert("end", data["count"])
self.logger.debug(" widgets: " + str(widgets))
widgets["remove"].grid(row=row, column=0, sticky=tk.W)
widgets["molecule"].grid(row=row, column=1, stick=tk.EW)
widgets["count"].grid(row=row, column=2, stick=tk.EW)
row += 1
# The button to add a row...
if self._add_molecule is None:
self._add_molecule = ttk.Button(
frame,
text="+",
width=5,
command=self.add_molecule,
takefocus=True,
)
self._add_molecule.focus_set()
self._add_molecule.lift()
self._add_molecule.grid(row=row, column=0, columnspan=3, sticky=tk.W)
frame.grid_columnconfigure(1, weight=1)
def right_click(self, event):
"""Probably need to add our dialog..."""
super().right_click(event)
self.popup_menu.add_command(label="Edit..", command=self.edit)
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
def handle_dialog(self, result):
if result is None or result == "Cancel":
self.dialog.deactivate(result)
# Reset the molecules
for data in self._molecule_data:
if "widgets" in data:
widgets = data["widgets"]
for w in widgets.values():
w.destroy()
self._molecule_data = []
P = self.node.parameters
for molecule in P["molecules"].value:
_type = molecule["type"]
value = molecule["molecule"]
count = molecule["count"]
self._molecule_data.append(
{"type": _type, "molecule": value, "count": count}
)
super().handle_dialog(result)
return
if result == "Help":
# display help!!!
return
if result != "OK":
self.dialog.deactivate(result)
raise RuntimeError("Don't recognize dialog result '{}'".format(result))
self.dialog.deactivate(result)
# Shortcut for parameters
P = self.node.parameters
for key in P:
if key not in ("molecules",):
P[key].set_from_widget()
# And handle the molecules
molecules = []
for data in self._molecule_data:
widgets = data["widgets"]
molecules.append(
{
"type": data["type"],
"molecule": widgets["molecule"].get(),
"count": widgets["count"].get(),
}
)
P["molecules"].value = molecules
def add_molecule(self):
"""Add a new row to the molecule table."""
self._molecule_data.append({"type": "smiles", "molecule": "", "count": "1"})
self.reset_molecules()
def remove_molecule(self, row):
"""Remove a molecule entry from the table.
Parameters
----------
row : int
The row in the table to remove. Note the first molecule is at row 1.
"""
index = row - 1
data = self._molecule_data[index]
if "widgets" in data:
for w in data["widgets"].values():
w.destroy()
del self._molecule_data[index]
self.reset_molecules() | 0.601242 | 0.149252 |
import pytest
from stp_core.loop.eventually import eventually
from plenum.test.pool_transactions.conftest import looper
from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check
from plenum.test.test_node import get_master_primary_node
def test_view_not_changed_when_short_disconnection(txnPoolNodeSet, looper,
sdk_pool_handle, sdk_wallet_client, tconf):
"""
When primary is disconnected but not long enough to trigger the timeout,
view change should not happen
"""
pr_node = get_master_primary_node(txnPoolNodeSet)
view_no = checkViewNoForNodes(txnPoolNodeSet)
lost_pr_calls = {node.name: node.spylog.count(
node.lost_master_primary.__name__) for node in txnPoolNodeSet
if node != pr_node}
prp_inst_chg_calls = {node.name: node.spylog.count(
node.propose_view_change.__name__) for node in txnPoolNodeSet
if node != pr_node}
recv_inst_chg_calls = {node.name: node.spylog.count(
node.view_changer.process_instance_change_msg.__name__) for node in txnPoolNodeSet
if node != pr_node}
def chk1():
# Check that non-primary nodes detects losing connection with
# primary
for node in txnPoolNodeSet:
if node != pr_node:
assert node.spylog.count(node.lost_master_primary.__name__) \
> lost_pr_calls[node.name]
def chk2():
# Schedule an instance change but do not send it
# since primary joins again
for node in txnPoolNodeSet:
if node != pr_node:
assert node.spylog.count(node.propose_view_change.__name__) \
> prp_inst_chg_calls[node.name]
assert node.view_changer.spylog.count(node.view_changer.process_instance_change_msg.__name__) \
== recv_inst_chg_calls[node.name]
# Disconnect master's primary
for node in txnPoolNodeSet:
if node != pr_node:
node.nodestack.getRemote(pr_node.nodestack.name).disconnect()
timeout = min(tconf.ToleratePrimaryDisconnection - 1, 1)
looper.run(eventually(chk1, retryWait=.2, timeout=timeout))
# Reconnect master's primary
for node in txnPoolNodeSet:
if node != pr_node:
node.nodestack.retryDisconnected()
looper.run(eventually(chk2, retryWait=.2, timeout=timeout + 1))
def chk3():
# Check the view does not change
with pytest.raises(AssertionError):
assert checkViewNoForNodes(txnPoolNodeSet) == view_no + 1
looper.run(eventually(chk3, retryWait=1, timeout=10))
# Send some requests and make sure the request execute
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) | plenum/test/view_change/test_view_not_changed_when_short_disconnection.py | import pytest
from stp_core.loop.eventually import eventually
from plenum.test.pool_transactions.conftest import looper
from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check
from plenum.test.test_node import get_master_primary_node
def test_view_not_changed_when_short_disconnection(txnPoolNodeSet, looper,
sdk_pool_handle, sdk_wallet_client, tconf):
"""
When primary is disconnected but not long enough to trigger the timeout,
view change should not happen
"""
pr_node = get_master_primary_node(txnPoolNodeSet)
view_no = checkViewNoForNodes(txnPoolNodeSet)
lost_pr_calls = {node.name: node.spylog.count(
node.lost_master_primary.__name__) for node in txnPoolNodeSet
if node != pr_node}
prp_inst_chg_calls = {node.name: node.spylog.count(
node.propose_view_change.__name__) for node in txnPoolNodeSet
if node != pr_node}
recv_inst_chg_calls = {node.name: node.spylog.count(
node.view_changer.process_instance_change_msg.__name__) for node in txnPoolNodeSet
if node != pr_node}
def chk1():
# Check that non-primary nodes detects losing connection with
# primary
for node in txnPoolNodeSet:
if node != pr_node:
assert node.spylog.count(node.lost_master_primary.__name__) \
> lost_pr_calls[node.name]
def chk2():
# Schedule an instance change but do not send it
# since primary joins again
for node in txnPoolNodeSet:
if node != pr_node:
assert node.spylog.count(node.propose_view_change.__name__) \
> prp_inst_chg_calls[node.name]
assert node.view_changer.spylog.count(node.view_changer.process_instance_change_msg.__name__) \
== recv_inst_chg_calls[node.name]
# Disconnect master's primary
for node in txnPoolNodeSet:
if node != pr_node:
node.nodestack.getRemote(pr_node.nodestack.name).disconnect()
timeout = min(tconf.ToleratePrimaryDisconnection - 1, 1)
looper.run(eventually(chk1, retryWait=.2, timeout=timeout))
# Reconnect master's primary
for node in txnPoolNodeSet:
if node != pr_node:
node.nodestack.retryDisconnected()
looper.run(eventually(chk2, retryWait=.2, timeout=timeout + 1))
def chk3():
# Check the view does not change
with pytest.raises(AssertionError):
assert checkViewNoForNodes(txnPoolNodeSet) == view_no + 1
looper.run(eventually(chk3, retryWait=1, timeout=10))
# Send some requests and make sure the request execute
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) | 0.460532 | 0.298702 |
import json
import os
from logging import debug
from .cmdline.interpreter import SpaceCmdInterpreter
from . import model
class SpaceEngine(model.ModelQueryMixin):
def __init__(self, save_file, opts=None):
self.save_file = save_file
self.opts = opts
self.user = None
self.galaxy = None
model.update.delayed_event_trigger.CALLABLE = (
self.execute_delayed_events)
def __repr__(self):
return "{}(save file: {}, user: {}, galaxy: {})".format(
self.__class__.__name__, self.save_file, repr(self.user),
repr(self.galaxy))
def __getstate__(self):
user_state = None if self.user is None else self.user.__getstate__()
gxy_state = None if self.galaxy is None else self.galaxy.__getstate__()
return (self.save_file, user_state, gxy_state)
def __setstate__(self, state):
(self.save_file, user_state, galaxy_state) = state
self.user = None
self.galaxy = None
if user_state is not None:
self.user = model.User(name='')
self.user.__setstate__(user_state)
if galaxy_state is not None:
self.galaxy = model.Galaxy()
self.galaxy.__setstate__(galaxy_state)
def load(self):
'''Load game state directly. Useful when used on the interpreter'''
debug('Loading saved game')
if not os.path.exists(self.save_file):
debug('No save file to load.')
raise FileNotFoundError('No save file to load.')
with open(self.save_file, 'r') as sf:
state = json.load(sf)
self.__setstate__(state[1])
return state[0]
def save(self, current_object=None):
debug('Saving game')
with open(self.save_file, 'w') as fd:
current_obj_state = None
if current_object:
current_obj_state = current_object[1].name
state = (current_obj_state, self.__getstate__())
json.dump(state, fd)
def _system_callback(self, coords):
return self.galaxy.system(coords)
def new_game(self, new_game_info_cb):
"""Set up a new game.
:param new_game_info_cb: UI callable that will retrieve info from the
user and return a tuple of (username, home_planet_coords,
home_planet)
"""
try:
self.galaxy = model.Galaxy()
self.user = model.User(*new_game_info_cb(self._system_callback))
system = self.galaxy.system(self.user.planets[0])
planet = system.planets[int(self.user.planets[0].planet)]
planet.resources.ore = 25
planet.resources.metal = 60
planet.emperor = self.user.name
finally:
self.save()
def mock_new_game_info_cb(self, system_callback):
"""Mock callback for creating test gamestates"""
coord, name = model.Coord(), "<NAME>"
return (name, coord)
def run(self):
SpaceCmdInterpreter(self, self.opts.debug).start()
def execute_delayed_events(self):
debug('delayed actions happening') | lib/engine.py |
import json
import os
from logging import debug
from .cmdline.interpreter import SpaceCmdInterpreter
from . import model
class SpaceEngine(model.ModelQueryMixin):
def __init__(self, save_file, opts=None):
self.save_file = save_file
self.opts = opts
self.user = None
self.galaxy = None
model.update.delayed_event_trigger.CALLABLE = (
self.execute_delayed_events)
def __repr__(self):
return "{}(save file: {}, user: {}, galaxy: {})".format(
self.__class__.__name__, self.save_file, repr(self.user),
repr(self.galaxy))
def __getstate__(self):
user_state = None if self.user is None else self.user.__getstate__()
gxy_state = None if self.galaxy is None else self.galaxy.__getstate__()
return (self.save_file, user_state, gxy_state)
def __setstate__(self, state):
(self.save_file, user_state, galaxy_state) = state
self.user = None
self.galaxy = None
if user_state is not None:
self.user = model.User(name='')
self.user.__setstate__(user_state)
if galaxy_state is not None:
self.galaxy = model.Galaxy()
self.galaxy.__setstate__(galaxy_state)
def load(self):
'''Load game state directly. Useful when used on the interpreter'''
debug('Loading saved game')
if not os.path.exists(self.save_file):
debug('No save file to load.')
raise FileNotFoundError('No save file to load.')
with open(self.save_file, 'r') as sf:
state = json.load(sf)
self.__setstate__(state[1])
return state[0]
def save(self, current_object=None):
debug('Saving game')
with open(self.save_file, 'w') as fd:
current_obj_state = None
if current_object:
current_obj_state = current_object[1].name
state = (current_obj_state, self.__getstate__())
json.dump(state, fd)
def _system_callback(self, coords):
return self.galaxy.system(coords)
def new_game(self, new_game_info_cb):
"""Set up a new game.
:param new_game_info_cb: UI callable that will retrieve info from the
user and return a tuple of (username, home_planet_coords,
home_planet)
"""
try:
self.galaxy = model.Galaxy()
self.user = model.User(*new_game_info_cb(self._system_callback))
system = self.galaxy.system(self.user.planets[0])
planet = system.planets[int(self.user.planets[0].planet)]
planet.resources.ore = 25
planet.resources.metal = 60
planet.emperor = self.user.name
finally:
self.save()
def mock_new_game_info_cb(self, system_callback):
"""Mock callback for creating test gamestates"""
coord, name = model.Coord(), "<NAME>"
return (name, coord)
def run(self):
SpaceCmdInterpreter(self, self.opts.debug).start()
def execute_delayed_events(self):
debug('delayed actions happening') | 0.500732 | 0.080069 |
from enum import Enum
class Type:
""" Dummy base class to implement strong typed references
"""
def __init__(self):
pass
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
return obj
class IntegerType (Type):
""" integer values
"""
def __init__(self):
super(Type, self).__init__()
#: integer values
self.format = None
#: integer values
self.default = None
#: integer values
self.minimum = None
#: integer values
self.exclusiveMinimum = None
#: integer values
self.maximum = None
#: integer values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.format = IntegerTypeFormatEnum.valueForString(dict.get('format', None))
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class IntegerTypeFormatEnum(Enum):
INT32 = 'int32'
INT64 = 'int64'
@classmethod
def valueForString(cls, stringValue):
lowerStringValue = stringValue.lower() if stringValue is not None else None
if lowerStringValue is None:
return None
elif lowerStringValue == 'int32':
return IntegerTypeFormatEnum.INT32
elif lowerStringValue == 'int64':
return IntegerTypeFormatEnum.INT64
else:
return None
@classmethod
def valueAsString(cls, enumValue):
if enumValue is None:
return ''
elif enumValue == IntegerTypeFormatEnum.INT32:
return 'int32'
elif enumValue == IntegerTypeFormatEnum.INT64:
return 'int64'
else:
return ''
class NumberType (Type):
""" floating point values
"""
def __init__(self):
super(Type, self).__init__()
#: floating point values
self.format = None
#: floating point values
self.default = None
#: floating point values
self.minimum = None
#: floating point values
self.exclusiveMinimum = None
#: floating point values
self.maximum = None
#: floating point values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.format = NumberTypeFormatEnum.valueForString(dict.get('format', None))
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class NumberTypeFormatEnum(Enum):
FLOAT = 'float'
DOUBLE = 'double'
@classmethod
def valueForString(cls, stringValue):
lowerStringValue = stringValue.lower() if stringValue is not None else None
if lowerStringValue is None:
return None
elif lowerStringValue == 'float':
return NumberTypeFormatEnum.FLOAT
elif lowerStringValue == 'double':
return NumberTypeFormatEnum.DOUBLE
else:
return None
@classmethod
def valueAsString(cls, enumValue):
if enumValue is None:
return ''
elif enumValue == NumberTypeFormatEnum.FLOAT:
return 'float'
elif enumValue == NumberTypeFormatEnum.DOUBLE:
return 'double'
else:
return ''
class BooleanType (Type):
""" boolean values
"""
def __init__(self):
super(Type, self).__init__()
#: boolean values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class StringType (Type):
""" integer values
"""
def __init__(self):
super(Type, self).__init__()
#: integer values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class UuidType (Type):
""" UUID values
"""
def __init__(self):
super(Type, self).__init__()
#: UUID values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class EnumType (Type):
""" type for enum values - fixed value types
"""
def __init__(self):
super(Type, self).__init__()
#: type for enum values - fixed value types
self.version = None
#: type for enum values - fixed value types
self.name = None
#: type for enum values - fixed value types
self.domain = None
#: type for enum values - fixed value types
self.source = None
#: type for enum values - fixed value types
self.description = None
#: type for enum values - fixed value types
self.values = []
#: type for enum values - fixed value types
self.default = None
#: type for enum values - fixed value types
self.tags = []
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.version = dict.get('version', None)
obj.name = dict.get('name', None)
obj.domain = dict.get('domain', None)
obj.source = dict.get('source', None)
obj.description = dict.get('description', None)
arrayValues = dict.get('values', [])
for elemValues in arrayValues:
obj.values.append(elemValues)
obj.default = dict.get('default', None)
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
return obj
class Tag:
""" a tag type
"""
def __init__(self):
#: a tag type
self.name = None
#: a tag type
self.value = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.name = dict.get('name', None)
obj.value = dict.get('value', None)
return obj
class DateType (Type):
""" type for date values
"""
def __init__(self):
super(Type, self).__init__()
#: type for date values
self.default = None
#: type for date values
self.minimum = None
#: type for date values
self.exclusiveMinimum = None
#: type for date values
self.maximum = None
#: type for date values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class DateTimeType (Type):
""" type for timestamp values
"""
def __init__(self):
super(Type, self).__init__()
#: type for timestamp values
self.default = None
#: type for timestamp values
self.minimum = None
#: type for timestamp values
self.exclusiveMinimum = None
#: type for timestamp values
self.maximum = None
#: type for timestamp values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class BytesType (Type):
""" type for byte values, it will usually be rendered to a byte array
"""
def __init__(self):
super(Type, self).__init__()
#: type for byte values, it will usually be rendered to a byte array
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class ComplexType (Type):
""" complex type description
"""
def __init__(self):
super(Type, self).__init__()
#: complex type description
self.version = None
#: complex type description
self.name = None
#: complex type description
self.description = None
#: complex type description
self.domain = None
#: complex type description
self.source = None
#: complex type description
self.extendsType = None
#: complex type description
self.extendedBy = []
#: complex type description
self.referencedBy = []
#: complex type description
self.properties = []
#: complex type description
self.tags = []
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.version = dict.get('version', None)
obj.name = dict.get('name', None)
obj.description = dict.get('description', None)
obj.domain = dict.get('domain', None)
obj.source = dict.get('source', None)
obj.extendsType = ComplexType.dictToObject(dict.get('extendsType', None))
arrayExtendedBy = dict.get('extendedBy', [])
for elemExtendedBy in arrayExtendedBy:
obj.extendedBy.append(
ComplexType.dictToObject(elemExtendedBy))
arrayReferencedBy = dict.get('referencedBy', [])
for elemReferencedBy in arrayReferencedBy:
obj.referencedBy.append(
ComplexType.dictToObject(elemReferencedBy))
arrayProperties = dict.get('properties', [])
for elemProperties in arrayProperties:
obj.properties.append(
Property.dictToObject(elemProperties))
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
return obj
class Property:
""" a property of a type
"""
def __init__(self):
#: a property of a type
self.name = None
#: a property of a type
self.isArray = False
#: a property of a type
self.arrayMinItems = None
#: a property of a type
self.arrayMaxItems = None
#: a property of a type
self.arrayUniqueItems = None
#: a property of a type
self.type = None
#: a property of a type
self.tags = []
#: a property of a type
self.description = None
#: a property of a type
self.required = False
#: a property of a type
self.ordinal = None
#: a property of a type
self.isKey = False
#: a property of a type
self.isVisualKey = False
#: a property of a type
self.foreignKey = None
#: a property of a type
self.format = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.name = dict.get('name', None)
obj.isArray = dict.get('isArray', False)
obj.arrayMinItems = dict.get('arrayMinItems', None)
obj.arrayMaxItems = dict.get('arrayMaxItems', None)
obj.arrayUniqueItems = dict.get('arrayUniqueItems', None)
obj.type = Type.dictToObject(dict.get('type', None))
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
obj.description = dict.get('description', None)
obj.required = dict.get('required', False)
obj.ordinal = dict.get('ordinal', None)
obj.isKey = dict.get('isKey', False)
obj.isVisualKey = dict.get('isVisualKey', False)
obj.foreignKey = Type.dictToObject(dict.get('foreignKey', None))
obj.format = dict.get('format', None)
return obj | yacg/model/model.py |
from enum import Enum
class Type:
""" Dummy base class to implement strong typed references
"""
def __init__(self):
pass
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
return obj
class IntegerType (Type):
""" integer values
"""
def __init__(self):
super(Type, self).__init__()
#: integer values
self.format = None
#: integer values
self.default = None
#: integer values
self.minimum = None
#: integer values
self.exclusiveMinimum = None
#: integer values
self.maximum = None
#: integer values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.format = IntegerTypeFormatEnum.valueForString(dict.get('format', None))
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class IntegerTypeFormatEnum(Enum):
INT32 = 'int32'
INT64 = 'int64'
@classmethod
def valueForString(cls, stringValue):
lowerStringValue = stringValue.lower() if stringValue is not None else None
if lowerStringValue is None:
return None
elif lowerStringValue == 'int32':
return IntegerTypeFormatEnum.INT32
elif lowerStringValue == 'int64':
return IntegerTypeFormatEnum.INT64
else:
return None
@classmethod
def valueAsString(cls, enumValue):
if enumValue is None:
return ''
elif enumValue == IntegerTypeFormatEnum.INT32:
return 'int32'
elif enumValue == IntegerTypeFormatEnum.INT64:
return 'int64'
else:
return ''
class NumberType (Type):
""" floating point values
"""
def __init__(self):
super(Type, self).__init__()
#: floating point values
self.format = None
#: floating point values
self.default = None
#: floating point values
self.minimum = None
#: floating point values
self.exclusiveMinimum = None
#: floating point values
self.maximum = None
#: floating point values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.format = NumberTypeFormatEnum.valueForString(dict.get('format', None))
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class NumberTypeFormatEnum(Enum):
FLOAT = 'float'
DOUBLE = 'double'
@classmethod
def valueForString(cls, stringValue):
lowerStringValue = stringValue.lower() if stringValue is not None else None
if lowerStringValue is None:
return None
elif lowerStringValue == 'float':
return NumberTypeFormatEnum.FLOAT
elif lowerStringValue == 'double':
return NumberTypeFormatEnum.DOUBLE
else:
return None
@classmethod
def valueAsString(cls, enumValue):
if enumValue is None:
return ''
elif enumValue == NumberTypeFormatEnum.FLOAT:
return 'float'
elif enumValue == NumberTypeFormatEnum.DOUBLE:
return 'double'
else:
return ''
class BooleanType (Type):
""" boolean values
"""
def __init__(self):
super(Type, self).__init__()
#: boolean values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class StringType (Type):
""" integer values
"""
def __init__(self):
super(Type, self).__init__()
#: integer values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class UuidType (Type):
""" UUID values
"""
def __init__(self):
super(Type, self).__init__()
#: UUID values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class EnumType (Type):
""" type for enum values - fixed value types
"""
def __init__(self):
super(Type, self).__init__()
#: type for enum values - fixed value types
self.version = None
#: type for enum values - fixed value types
self.name = None
#: type for enum values - fixed value types
self.domain = None
#: type for enum values - fixed value types
self.source = None
#: type for enum values - fixed value types
self.description = None
#: type for enum values - fixed value types
self.values = []
#: type for enum values - fixed value types
self.default = None
#: type for enum values - fixed value types
self.tags = []
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.version = dict.get('version', None)
obj.name = dict.get('name', None)
obj.domain = dict.get('domain', None)
obj.source = dict.get('source', None)
obj.description = dict.get('description', None)
arrayValues = dict.get('values', [])
for elemValues in arrayValues:
obj.values.append(elemValues)
obj.default = dict.get('default', None)
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
return obj
class Tag:
""" a tag type
"""
def __init__(self):
#: a tag type
self.name = None
#: a tag type
self.value = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.name = dict.get('name', None)
obj.value = dict.get('value', None)
return obj
class DateType (Type):
""" type for date values
"""
def __init__(self):
super(Type, self).__init__()
#: type for date values
self.default = None
#: type for date values
self.minimum = None
#: type for date values
self.exclusiveMinimum = None
#: type for date values
self.maximum = None
#: type for date values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class DateTimeType (Type):
""" type for timestamp values
"""
def __init__(self):
super(Type, self).__init__()
#: type for timestamp values
self.default = None
#: type for timestamp values
self.minimum = None
#: type for timestamp values
self.exclusiveMinimum = None
#: type for timestamp values
self.maximum = None
#: type for timestamp values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class BytesType (Type):
""" type for byte values, it will usually be rendered to a byte array
"""
def __init__(self):
super(Type, self).__init__()
#: type for byte values, it will usually be rendered to a byte array
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class ComplexType (Type):
""" complex type description
"""
def __init__(self):
super(Type, self).__init__()
#: complex type description
self.version = None
#: complex type description
self.name = None
#: complex type description
self.description = None
#: complex type description
self.domain = None
#: complex type description
self.source = None
#: complex type description
self.extendsType = None
#: complex type description
self.extendedBy = []
#: complex type description
self.referencedBy = []
#: complex type description
self.properties = []
#: complex type description
self.tags = []
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.version = dict.get('version', None)
obj.name = dict.get('name', None)
obj.description = dict.get('description', None)
obj.domain = dict.get('domain', None)
obj.source = dict.get('source', None)
obj.extendsType = ComplexType.dictToObject(dict.get('extendsType', None))
arrayExtendedBy = dict.get('extendedBy', [])
for elemExtendedBy in arrayExtendedBy:
obj.extendedBy.append(
ComplexType.dictToObject(elemExtendedBy))
arrayReferencedBy = dict.get('referencedBy', [])
for elemReferencedBy in arrayReferencedBy:
obj.referencedBy.append(
ComplexType.dictToObject(elemReferencedBy))
arrayProperties = dict.get('properties', [])
for elemProperties in arrayProperties:
obj.properties.append(
Property.dictToObject(elemProperties))
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
return obj
class Property:
""" a property of a type
"""
def __init__(self):
#: a property of a type
self.name = None
#: a property of a type
self.isArray = False
#: a property of a type
self.arrayMinItems = None
#: a property of a type
self.arrayMaxItems = None
#: a property of a type
self.arrayUniqueItems = None
#: a property of a type
self.type = None
#: a property of a type
self.tags = []
#: a property of a type
self.description = None
#: a property of a type
self.required = False
#: a property of a type
self.ordinal = None
#: a property of a type
self.isKey = False
#: a property of a type
self.isVisualKey = False
#: a property of a type
self.foreignKey = None
#: a property of a type
self.format = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.name = dict.get('name', None)
obj.isArray = dict.get('isArray', False)
obj.arrayMinItems = dict.get('arrayMinItems', None)
obj.arrayMaxItems = dict.get('arrayMaxItems', None)
obj.arrayUniqueItems = dict.get('arrayUniqueItems', None)
obj.type = Type.dictToObject(dict.get('type', None))
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
obj.description = dict.get('description', None)
obj.required = dict.get('required', False)
obj.ordinal = dict.get('ordinal', None)
obj.isKey = dict.get('isKey', False)
obj.isVisualKey = dict.get('isVisualKey', False)
obj.foreignKey = Type.dictToObject(dict.get('foreignKey', None))
obj.format = dict.get('format', None)
return obj | 0.858807 | 0.361418 |
import sys
import argparse
from mdt.database import (
load_rxnorm,
load_meps,
load_fda,
check_table,
)
from mdt.yamlmanager import (
create_mdt_settings,
create_module_settings,
get_settings,
)
from mdt.utils import (
get_rxcui_ingredient_df,
get_rxcui_product_df,
get_rxcui_ndc_df,
get_meps_rxcui_ndc_df,
generate_module_csv,
generate_module_json,
)
def init_db(args):
if check_table('rxcui_ndc') is False:
load_rxnorm()
if check_table('meps_demographics') is False:
load_meps()
if check_table('package') is False:
load_fda()
print('All Tables are loaded')
create_mdt_settings()
def module_create(args):
arguments = vars(args)
create_module_settings(arguments['module_name'])
def module_build(args):
arguments = vars(args)
module_name = arguments['module_name']
settings = get_settings(module_name)
# First, get all medications that contain one of the ingredient RXCUIs
# This will result in duplicate NDCs and potentially no MINs
rxcui_ingredient_df = get_rxcui_ingredient_df(settings)
# Second, get all of the medications that contain one of the product RXCUIs in the df above
# This will result in potentially INs and MINs, but still duplicate NDCs
rxcui_product_df = get_rxcui_product_df(rxcui_ingredient_df, settings)
# Third, query the rxcui_product_df with a window function to group by NDC and prefer MIN over IN
# This will result in only distinct NDCs that map to either an MIN (preferred) or an IN
# https://pandas.pydata.org/pandas-docs/stable/getting_started/comparison/comparison_with_sql.html#top-n-rows-per-group
# Also, filter by dose form and ingredient term type (if appliable)
rxcui_ndc_df = get_rxcui_ndc_df(rxcui_product_df, module_name, settings)
#Join MEPS data with rxcui_ndc_df
meps_rxcui_ndc_df = get_meps_rxcui_ndc_df(rxcui_ndc_df, module_name, settings)
#Generate distribution CSVs
dcp_demographictotal_ingred_df, dcp_demographictotal_prod_df = generate_module_csv(meps_rxcui_ndc_df, module_name, settings)
#Generate JSON
generate_module_json(meps_rxcui_ndc_df, dcp_demographictotal_ingred_df, dcp_demographictotal_prod_df, module_name, settings)
def main():
# Main command and child command setup
parser = argparse.ArgumentParser(
description='Medication Diversification Tool for Synthea'
)
subparsers = parser.add_subparsers(
title='Commands',
metavar='',
)
# Init command parsers
init_parser = subparsers.add_parser(
'init',
description='Download MEPS, RxNorm data and set up the database',
help='Initialize MDT DB'
)
init_parser.set_defaults(func=init_db)
# Module ommand parsers
module_parser = subparsers.add_parser(
'module',
description='Module-specific commands',
help='Module-specific commands'
)
module_parser.add_argument(
'--module-name',
'-n',
help='Specific name of module',
)
module_subparser = module_parser.add_subparsers(
title='Commands',
metavar='',
dest='module_commands'
)
create_parser = module_subparser.add_parser(
'create',
description='Create template module directory',
help='Create template module directory'
)
create_parser.set_defaults(func=module_create)
build_parser = module_subparser.add_parser(
'build',
description='Build Synthea module',
help='Build Synthea module'
)
build_parser.set_defaults(func=module_build)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
for key, _ in vars(args).items():
if key == 'module_commands':
module_parser.print_help() | src/mdt/cli.py | import sys
import argparse
from mdt.database import (
load_rxnorm,
load_meps,
load_fda,
check_table,
)
from mdt.yamlmanager import (
create_mdt_settings,
create_module_settings,
get_settings,
)
from mdt.utils import (
get_rxcui_ingredient_df,
get_rxcui_product_df,
get_rxcui_ndc_df,
get_meps_rxcui_ndc_df,
generate_module_csv,
generate_module_json,
)
def init_db(args):
if check_table('rxcui_ndc') is False:
load_rxnorm()
if check_table('meps_demographics') is False:
load_meps()
if check_table('package') is False:
load_fda()
print('All Tables are loaded')
create_mdt_settings()
def module_create(args):
arguments = vars(args)
create_module_settings(arguments['module_name'])
def module_build(args):
arguments = vars(args)
module_name = arguments['module_name']
settings = get_settings(module_name)
# First, get all medications that contain one of the ingredient RXCUIs
# This will result in duplicate NDCs and potentially no MINs
rxcui_ingredient_df = get_rxcui_ingredient_df(settings)
# Second, get all of the medications that contain one of the product RXCUIs in the df above
# This will result in potentially INs and MINs, but still duplicate NDCs
rxcui_product_df = get_rxcui_product_df(rxcui_ingredient_df, settings)
# Third, query the rxcui_product_df with a window function to group by NDC and prefer MIN over IN
# This will result in only distinct NDCs that map to either an MIN (preferred) or an IN
# https://pandas.pydata.org/pandas-docs/stable/getting_started/comparison/comparison_with_sql.html#top-n-rows-per-group
# Also, filter by dose form and ingredient term type (if appliable)
rxcui_ndc_df = get_rxcui_ndc_df(rxcui_product_df, module_name, settings)
#Join MEPS data with rxcui_ndc_df
meps_rxcui_ndc_df = get_meps_rxcui_ndc_df(rxcui_ndc_df, module_name, settings)
#Generate distribution CSVs
dcp_demographictotal_ingred_df, dcp_demographictotal_prod_df = generate_module_csv(meps_rxcui_ndc_df, module_name, settings)
#Generate JSON
generate_module_json(meps_rxcui_ndc_df, dcp_demographictotal_ingred_df, dcp_demographictotal_prod_df, module_name, settings)
def main():
# Main command and child command setup
parser = argparse.ArgumentParser(
description='Medication Diversification Tool for Synthea'
)
subparsers = parser.add_subparsers(
title='Commands',
metavar='',
)
# Init command parsers
init_parser = subparsers.add_parser(
'init',
description='Download MEPS, RxNorm data and set up the database',
help='Initialize MDT DB'
)
init_parser.set_defaults(func=init_db)
# Module ommand parsers
module_parser = subparsers.add_parser(
'module',
description='Module-specific commands',
help='Module-specific commands'
)
module_parser.add_argument(
'--module-name',
'-n',
help='Specific name of module',
)
module_subparser = module_parser.add_subparsers(
title='Commands',
metavar='',
dest='module_commands'
)
create_parser = module_subparser.add_parser(
'create',
description='Create template module directory',
help='Create template module directory'
)
create_parser.set_defaults(func=module_create)
build_parser = module_subparser.add_parser(
'build',
description='Build Synthea module',
help='Build Synthea module'
)
build_parser.set_defaults(func=module_build)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
for key, _ in vars(args).items():
if key == 'module_commands':
module_parser.print_help() | 0.26971 | 0.110375 |
import pyperclip
from password import User
import random
import string
class Credential:
'''
Class to create account credentials, generate new passwords and save user information
'''
list_of_credentials =[]
user_credentials_list = []
def __init__(self,username,credential_account,account_name,password):
'''
Method to define the properties for each user object.
'''
self.username = username
self.credential_account = credential_account = credential_account
self.account_name = account_name
self.password = password
@classmethod
def check_user(cls,first_name,password):
'''
Method that checks if the name and password entered exist in the users_list
'''
current_user = ''
for user in User.users_list:
if (user.first_name == first_name and user.password == password):
current_user = user.first_name
return current_user
def save_credentials(self):
'''
Function to save a newly created user credentials
'''
Credential.list_of_credentials.append(self)
@classmethod
def delete_credentials(self):
'''
Function to delete credentials
'''
Credential.list_of_credentials.remove(self)
def generate_password(size=8, char=string.ascii_lowercase+string.ascii_lowercase+string.digits):
'''
Function to generate a secure 8 character password for a user.
'''
password_gen=''.join(random.choice(char) for _ in range(size))
return password_gen
@classmethod
def display_credentials(cls,username):
'''
Method to display the list of credentials saved.
'''
user_credentials_list = []
for credential in cls.list_of_credentials:
if credential.username == username:
user_credentials_list.append(credential)
return user_credentials_list
@classmethod
def find_by_credential_name(cls, credential_account):
'''
Method that takes in a credential_account and returns a credential that matches that credential_account.
'''
for credential in cls.list_of_credentials:
if credential.credential_account == credential_account:
return credential
return False
@classmethod
def copy_credential(cls,credential_account):
'''
Method that copies a credential to the clipboard.
'''
try:
find_credential = Credential.find_by_credential_name(credential_account)
print(f'Your Password for {credential_account} has been copied. You can paste it anywhere now.')
return pyperclip.copy(find_credential.password)
except AttributeError:
return "Invalid credential name" | credential.py | import pyperclip
from password import User
import random
import string
class Credential:
'''
Class to create account credentials, generate new passwords and save user information
'''
list_of_credentials =[]
user_credentials_list = []
def __init__(self,username,credential_account,account_name,password):
'''
Method to define the properties for each user object.
'''
self.username = username
self.credential_account = credential_account = credential_account
self.account_name = account_name
self.password = password
@classmethod
def check_user(cls,first_name,password):
'''
Method that checks if the name and password entered exist in the users_list
'''
current_user = ''
for user in User.users_list:
if (user.first_name == first_name and user.password == password):
current_user = user.first_name
return current_user
def save_credentials(self):
'''
Function to save a newly created user credentials
'''
Credential.list_of_credentials.append(self)
@classmethod
def delete_credentials(self):
'''
Function to delete credentials
'''
Credential.list_of_credentials.remove(self)
def generate_password(size=8, char=string.ascii_lowercase+string.ascii_lowercase+string.digits):
'''
Function to generate a secure 8 character password for a user.
'''
password_gen=''.join(random.choice(char) for _ in range(size))
return password_gen
@classmethod
def display_credentials(cls,username):
'''
Method to display the list of credentials saved.
'''
user_credentials_list = []
for credential in cls.list_of_credentials:
if credential.username == username:
user_credentials_list.append(credential)
return user_credentials_list
@classmethod
def find_by_credential_name(cls, credential_account):
'''
Method that takes in a credential_account and returns a credential that matches that credential_account.
'''
for credential in cls.list_of_credentials:
if credential.credential_account == credential_account:
return credential
return False
@classmethod
def copy_credential(cls,credential_account):
'''
Method that copies a credential to the clipboard.
'''
try:
find_credential = Credential.find_by_credential_name(credential_account)
print(f'Your Password for {credential_account} has been copied. You can paste it anywhere now.')
return pyperclip.copy(find_credential.password)
except AttributeError:
return "Invalid credential name" | 0.241847 | 0.127245 |
from django.contrib import messages
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.utils.timezone import now
from django.views.decorators.http import require_http_methods
from api.utils import api_login_required, handle_api_errors
from core.admin_menus import AdminMenuItem
from core.csv_export import CSV_EXPORT_FORMATS, csv_response
from core.models import Organization
from core.sort_and_filter import Filter
from core.tabs import Tab
from core.utils import initialize_form, url
from event_log.utils import emit
from tickets.utils import format_price
from ..forms import MemberForm, MembershipForm
from ..helpers import membership_admin_required
from ..models import STATE_CHOICES, Membership, MembershipFeePayment
EXPORT_FORMATS = [
('html', 'Tulostettava versio'),
('xlsx', 'Excel'),
('csv', 'CSV'),
]
EXPORT_TYPE_VERBOSE = dict(
approval='Hyväksyntää odottavat hakemukset',
discharged='Erotetut jäsenet',
declined='Hylätyt jäsenhakemukset',
in_effect='Jäsenluettelo',
all='Jäsenluettelo',
)
HTML_TEMPLATES = dict(
screen='membership_admin_members_view.pug',
html='membership_admin_export_html_view.pug',
)
@membership_admin_required
@require_http_methods(['GET', 'HEAD', 'POST'])
def membership_admin_members_view(request, vars, organization, format='screen'):
memberships = organization.memberships.all().select_related('person')
num_all_members = memberships.count()
state_filters = Filter(request, 'state').add_choices('state', STATE_CHOICES)
memberships = state_filters.filter_queryset(memberships)
memberships = memberships.order_by('person__surname', 'person__official_first_names')
all_filters = [state_filters]
current_term = organization.membership_organization_meta.get_current_term()
if current_term:
payment_filters = Filter(request, 'paid')
paymentinator = lambda is_paid: lambda member: member.get_payment_for_term().is_paid == is_paid
payment_filters.add('1', 'Maksettu', paymentinator(True))
payment_filters.add('0', 'Ei maksettu', paymentinator(False))
memberships = payment_filters.filter_queryset(memberships)
all_filters.append(payment_filters)
else:
messages.warning(request, 'Nykyisen toimikauden tiedot puuttuvat. Syötä tiedot Toimikauden tiedot -näkymässä.')
payment_filters = None
filter_active = any(f.selected_slug != f.default for f in all_filters)
if request.method == 'POST' and state_filters.selected_slug == 'approval':
# PLEASE DON'T: locally cached objects do not get updated and apply_state does not do the needful
# memberships.update(state='in_effect')
# TODO encap in Membership
for membership in memberships:
membership.state = 'in_effect'
membership.save()
membership.apply_state()
messages.success(request, 'Hyväksyntää odottavat jäsenhakemukset hyväksyttiin.')
return redirect('membership_admin_members_view', organization.slug)
export_type = state_filters.selected_slug or 'all'
export_type_verbose = EXPORT_TYPE_VERBOSE[export_type]
title = '{organization.name} – {export_type_verbose}'.format(
organization=organization,
export_type_verbose=export_type_verbose,
)
vars.update(
show_approve_all_button=state_filters.selected_slug == 'approval',
memberships=memberships,
num_members=len(memberships),
num_all_members=num_all_members,
state_filters=state_filters,
payment_filters=payment_filters,
filter_active=filter_active,
css_to_show_filter_panel='in' if filter_active else '',
export_formats=EXPORT_FORMATS,
now=now(),
title=title,
current_term=current_term,
)
if format in HTML_TEMPLATES:
return render(request, HTML_TEMPLATES[format], vars)
elif format in CSV_EXPORT_FORMATS:
filename = "{organization.slug}_members_{timestamp}.{format}".format(
organization=organization,
timestamp=now().strftime('%Y%m%d%H%M%S'),
format=format,
)
emit('core.person.exported', request=request, organization=organization)
return csv_response(organization, Membership, memberships,
dialect=format,
filename=filename,
m2m_mode='separate_columns',
)
else:
raise NotImplementedError(format) | membership/views/membership_admin_members_view.py | from django.contrib import messages
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.utils.timezone import now
from django.views.decorators.http import require_http_methods
from api.utils import api_login_required, handle_api_errors
from core.admin_menus import AdminMenuItem
from core.csv_export import CSV_EXPORT_FORMATS, csv_response
from core.models import Organization
from core.sort_and_filter import Filter
from core.tabs import Tab
from core.utils import initialize_form, url
from event_log.utils import emit
from tickets.utils import format_price
from ..forms import MemberForm, MembershipForm
from ..helpers import membership_admin_required
from ..models import STATE_CHOICES, Membership, MembershipFeePayment
EXPORT_FORMATS = [
('html', 'Tulostettava versio'),
('xlsx', 'Excel'),
('csv', 'CSV'),
]
EXPORT_TYPE_VERBOSE = dict(
approval='Hyväksyntää odottavat hakemukset',
discharged='Erotetut jäsenet',
declined='Hylätyt jäsenhakemukset',
in_effect='Jäsenluettelo',
all='Jäsenluettelo',
)
HTML_TEMPLATES = dict(
screen='membership_admin_members_view.pug',
html='membership_admin_export_html_view.pug',
)
@membership_admin_required
@require_http_methods(['GET', 'HEAD', 'POST'])
def membership_admin_members_view(request, vars, organization, format='screen'):
memberships = organization.memberships.all().select_related('person')
num_all_members = memberships.count()
state_filters = Filter(request, 'state').add_choices('state', STATE_CHOICES)
memberships = state_filters.filter_queryset(memberships)
memberships = memberships.order_by('person__surname', 'person__official_first_names')
all_filters = [state_filters]
current_term = organization.membership_organization_meta.get_current_term()
if current_term:
payment_filters = Filter(request, 'paid')
paymentinator = lambda is_paid: lambda member: member.get_payment_for_term().is_paid == is_paid
payment_filters.add('1', 'Maksettu', paymentinator(True))
payment_filters.add('0', 'Ei maksettu', paymentinator(False))
memberships = payment_filters.filter_queryset(memberships)
all_filters.append(payment_filters)
else:
messages.warning(request, 'Nykyisen toimikauden tiedot puuttuvat. Syötä tiedot Toimikauden tiedot -näkymässä.')
payment_filters = None
filter_active = any(f.selected_slug != f.default for f in all_filters)
if request.method == 'POST' and state_filters.selected_slug == 'approval':
# PLEASE DON'T: locally cached objects do not get updated and apply_state does not do the needful
# memberships.update(state='in_effect')
# TODO encap in Membership
for membership in memberships:
membership.state = 'in_effect'
membership.save()
membership.apply_state()
messages.success(request, 'Hyväksyntää odottavat jäsenhakemukset hyväksyttiin.')
return redirect('membership_admin_members_view', organization.slug)
export_type = state_filters.selected_slug or 'all'
export_type_verbose = EXPORT_TYPE_VERBOSE[export_type]
title = '{organization.name} – {export_type_verbose}'.format(
organization=organization,
export_type_verbose=export_type_verbose,
)
vars.update(
show_approve_all_button=state_filters.selected_slug == 'approval',
memberships=memberships,
num_members=len(memberships),
num_all_members=num_all_members,
state_filters=state_filters,
payment_filters=payment_filters,
filter_active=filter_active,
css_to_show_filter_panel='in' if filter_active else '',
export_formats=EXPORT_FORMATS,
now=now(),
title=title,
current_term=current_term,
)
if format in HTML_TEMPLATES:
return render(request, HTML_TEMPLATES[format], vars)
elif format in CSV_EXPORT_FORMATS:
filename = "{organization.slug}_members_{timestamp}.{format}".format(
organization=organization,
timestamp=now().strftime('%Y%m%d%H%M%S'),
format=format,
)
emit('core.person.exported', request=request, organization=organization)
return csv_response(organization, Membership, memberships,
dialect=format,
filename=filename,
m2m_mode='separate_columns',
)
else:
raise NotImplementedError(format) | 0.322739 | 0.096535 |
from pathlib import Path
from os.path import exists, dirname
from os import remove, mkdir
import logging
import shutil
from policy_sentry.shared.constants import (
HOME,
CONFIG_DIRECTORY,
DATABASE_FILE_NAME,
HTML_DIRECTORY_PATH,
HTML_DATA_DIRECTORY_SUBFOLDER,
)
from policy_sentry.util.file import create_directory_if_it_doesnt_exist
logger = logging.getLogger(__name__)
def create_policy_sentry_config_directory():
"""
Creates a config directory at $HOME/.policy_sentry/
:return: the path of the database file
"""
print("Creating the database...")
database_file_path = HOME + CONFIG_DIRECTORY + DATABASE_FILE_NAME
logger.debug("We will store the new database here: %s", database_file_path)
# If the database file already exists
if exists(database_file_path):
remove(database_file_path)
elif exists(HOME + CONFIG_DIRECTORY):
pass
# If the config directory does not exist
else:
mkdir(HOME + CONFIG_DIRECTORY)
return database_file_path
def create_html_docs_directory():
"""
Copies the HTML files from the pip package over to its own folder in the CONFIG_DIRECTORY.
Also copies over the links.yml file, which is a mapping of services and relevant HTML links in the AWS docs.
Essentially:
mkdir -p ~/.policy_sentry/data/docs
cp -r $MODULE_DIR/policy_sentry/shared/data/docs ~/.policy_sentry/data/docs
:return:
"""
create_directory_if_it_doesnt_exist(HTML_DIRECTORY_PATH)
# Copy from the existing html docs folder - the path ./policy_sentry/shared/data/docs within this repository
# existing_html_docs_folder = abspath(
# dirname(__file__)) + HTML_DATA_DIRECTORY_SUBFOLDER
existing_html_docs_folder = (
str(Path(dirname(__file__)).parent) + "/shared" + HTML_DATA_DIRECTORY_SUBFOLDER
)
logger.debug(existing_html_docs_folder)
if exists(HTML_DIRECTORY_PATH):
shutil.rmtree(HTML_DIRECTORY_PATH)
shutil.copytree(existing_html_docs_folder, HTML_DIRECTORY_PATH)
# Copy the links.yml file from here to the config directory
existing_links_file = (
str(Path(dirname(__file__)).parent) + "/shared/data/" + "links.yml"
)
target_links_file = HOME + CONFIG_DIRECTORY + "links.yml"
shutil.copy(existing_links_file, target_links_file) | policy_sentry/configuration/config_directory.py | from pathlib import Path
from os.path import exists, dirname
from os import remove, mkdir
import logging
import shutil
from policy_sentry.shared.constants import (
HOME,
CONFIG_DIRECTORY,
DATABASE_FILE_NAME,
HTML_DIRECTORY_PATH,
HTML_DATA_DIRECTORY_SUBFOLDER,
)
from policy_sentry.util.file import create_directory_if_it_doesnt_exist
logger = logging.getLogger(__name__)
def create_policy_sentry_config_directory():
"""
Creates a config directory at $HOME/.policy_sentry/
:return: the path of the database file
"""
print("Creating the database...")
database_file_path = HOME + CONFIG_DIRECTORY + DATABASE_FILE_NAME
logger.debug("We will store the new database here: %s", database_file_path)
# If the database file already exists
if exists(database_file_path):
remove(database_file_path)
elif exists(HOME + CONFIG_DIRECTORY):
pass
# If the config directory does not exist
else:
mkdir(HOME + CONFIG_DIRECTORY)
return database_file_path
def create_html_docs_directory():
"""
Copies the HTML files from the pip package over to its own folder in the CONFIG_DIRECTORY.
Also copies over the links.yml file, which is a mapping of services and relevant HTML links in the AWS docs.
Essentially:
mkdir -p ~/.policy_sentry/data/docs
cp -r $MODULE_DIR/policy_sentry/shared/data/docs ~/.policy_sentry/data/docs
:return:
"""
create_directory_if_it_doesnt_exist(HTML_DIRECTORY_PATH)
# Copy from the existing html docs folder - the path ./policy_sentry/shared/data/docs within this repository
# existing_html_docs_folder = abspath(
# dirname(__file__)) + HTML_DATA_DIRECTORY_SUBFOLDER
existing_html_docs_folder = (
str(Path(dirname(__file__)).parent) + "/shared" + HTML_DATA_DIRECTORY_SUBFOLDER
)
logger.debug(existing_html_docs_folder)
if exists(HTML_DIRECTORY_PATH):
shutil.rmtree(HTML_DIRECTORY_PATH)
shutil.copytree(existing_html_docs_folder, HTML_DIRECTORY_PATH)
# Copy the links.yml file from here to the config directory
existing_links_file = (
str(Path(dirname(__file__)).parent) + "/shared/data/" + "links.yml"
)
target_links_file = HOME + CONFIG_DIRECTORY + "links.yml"
shutil.copy(existing_links_file, target_links_file) | 0.290276 | 0.099908 |
import numpy as np
import pandas
import warnings
from modin.core.storage_formats.pandas.parsers import (
_split_result_for_readers,
PandasCSVGlobParser,
PandasPickleExperimentalParser,
CustomTextExperimentalParser,
)
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.core.execution.ray.implementations.pandas_on_ray.io import PandasOnRayIO
from modin.core.io import (
CSVGlobDispatcher,
PickleExperimentalDispatcher,
CustomTextExperimentalDispatcher,
)
from modin.core.execution.ray.implementations.pandas_on_ray.dataframe import (
PandasOnRayDataframe,
)
from modin.core.execution.ray.implementations.pandas_on_ray.partitioning import (
PandasOnRayDataframePartition,
)
from modin.core.execution.ray.common import RayTask
from modin.config import NPartitions
import ray
class ExperimentalPandasOnRayIO(PandasOnRayIO):
"""
Class for handling experimental IO functionality with pandas storage format and Ray engine.
``ExperimentalPandasOnRayIO`` inherits some util functions and unmodified IO functions
from ``PandasOnRayIO`` class.
"""
build_args = dict(
frame_partition_cls=PandasOnRayDataframePartition,
query_compiler_cls=PandasQueryCompiler,
frame_cls=PandasOnRayDataframe,
)
read_csv_glob = type(
"", (RayTask, PandasCSVGlobParser, CSVGlobDispatcher), build_args
)._read
read_pickle_distributed = type(
"",
(RayTask, PandasPickleExperimentalParser, PickleExperimentalDispatcher),
build_args,
)._read
read_custom_text = type(
"",
(RayTask, CustomTextExperimentalParser, CustomTextExperimentalDispatcher),
build_args,
)._read
@classmethod
def read_sql(
cls,
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
partition_column=None,
lower_bound=None,
upper_bound=None,
max_sessions=None,
):
"""
Read SQL query or database table into a DataFrame.
The function extended with `Spark-like parameters <https://spark.apache.org/docs/2.0.0/api/R/read.jdbc.html>`_
such as ``partition_column``, ``lower_bound`` and ``upper_bound``. With these
parameters, the user will be able to specify how to partition the imported data.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable or str
Connection to database (sqlite3 connections are not supported).
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
coerce_float : bool, default: True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional
List of parameters to pass to ``execute`` method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
parse_dates : list or dict, optional
The behavior is as follows:
- List of column names to parse as dates.
- Dict of `{column_name: format string}` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of `{column_name: arg dict}`, where the arg dict corresponds
to the keyword arguments of ``pandas.to_datetime``.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, optional
List of column names to select from SQL table (only used when reading a
table).
chunksize : int, optional
If specified, return an iterator where `chunksize` is the number of rows
to include in each chunk.
partition_column : str, optional
Column name used for data partitioning between the workers
(MUST be an INTEGER column).
lower_bound : int, optional
The minimum value to be requested from the `partition_column`.
upper_bound : int, optional
The maximum value to be requested from the `partition_column`.
max_sessions : int, optional
The maximum number of simultaneous connections allowed to use.
Returns
-------
BaseQueryCompiler
A new query compiler with imported data for further processing.
"""
from .sql import is_distributed, get_query_info
if not is_distributed(partition_column, lower_bound, upper_bound):
warnings.warn("Defaulting to Modin core implementation")
return PandasOnRayIO.read_sql(
sql,
con,
index_col,
coerce_float=coerce_float,
params=params,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
# starts the distributed alternative
cols_names, query = get_query_info(sql, con, partition_column)
num_parts = min(NPartitions.get(), max_sessions if max_sessions else 1)
num_splits = min(len(cols_names), num_parts)
diff = (upper_bound - lower_bound) + 1
min_size = diff // num_parts
rest = diff % num_parts
partition_ids = []
index_ids = []
end = lower_bound - 1
for part in range(num_parts):
if rest:
size = min_size + 1
rest -= 1
else:
size = min_size
start = end + 1
end = start + size - 1
partition_id = _read_sql_with_offset_pandas_on_ray.options(
num_returns=num_splits + 1
).remote(
partition_column,
start,
end,
num_splits,
query,
con,
index_col,
coerce_float,
params,
parse_dates,
columns,
chunksize,
)
partition_ids.append(
[PandasOnRayDataframePartition(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
new_index = pandas.RangeIndex(sum(ray.get(index_ids)))
new_query_compiler = cls.query_compiler_cls(
cls.frame_cls(np.array(partition_ids), new_index, cols_names)
)
new_query_compiler._modin_frame.synchronize_labels(axis=0)
return new_query_compiler
@classmethod
def to_pickle_distributed(cls, qc, **kwargs):
"""
When `*` in the filename all partitions are written to their own separate file.
The filenames is determined as follows:
- if `*` in the filename then it will be replaced by the increasing sequence 0, 1, 2, …
- if `*` is not the filename, then will be used default implementation.
Examples #1: 4 partitions and input filename="partition*.pkl.gz", then filenames will be:
`partition0.pkl.gz`, `partition1.pkl.gz`, `partition2.pkl.gz`, `partition3.pkl.gz`.
Parameters
----------
qc : BaseQueryCompiler
The query compiler of the Modin dataframe that we want
to run ``to_pickle_distributed`` on.
**kwargs : dict
Parameters for ``pandas.to_pickle(**kwargs)``.
"""
if not (
isinstance(kwargs["filepath_or_buffer"], str)
and "*" in kwargs["filepath_or_buffer"]
) or not isinstance(qc, PandasQueryCompiler):
warnings.warn("Defaulting to Modin core implementation")
return PandasOnRayIO.to_pickle(qc, **kwargs)
def func(df, **kw):
idx = str(kw["partition_idx"])
kwargs["path"] = kwargs.pop("filepath_or_buffer").replace("*", idx)
df.to_pickle(**kwargs)
return pandas.DataFrame()
result = qc._modin_frame.broadcast_apply_full_axis(
1, func, other=None, new_index=[], new_columns=[], enumerate_partitions=True
)
result.to_pandas()
# Ray functions are not detected by codecov (thus pragma: no cover)
@ray.remote
def _read_sql_with_offset_pandas_on_ray(
partition_column,
start,
end,
num_splits,
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
): # pragma: no cover
"""
Read a chunk of SQL query or table into a pandas DataFrame using Ray task.
Parameters
----------
partition_column : str
Column name used for data partitioning between the workers.
start : int
Lowest value to request from the `partition_column`.
end : int
Highest value to request from the `partition_column`.
num_splits : int
The number of partitions to split the column into.
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable or str
Connection to database (sqlite3 connections are not supported).
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
coerce_float : bool, default: True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional
List of parameters to pass to ``execute`` method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
parse_dates : list or dict, optional
The behavior is as follows:
- List of column names to parse as dates.
- Dict of `{column_name: format string}` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of `{column_name: arg dict}`, where the arg dict corresponds
to the keyword arguments of ``pandas.to_datetime``
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, optional
List of column names to select from SQL table (only used when reading a
table).
chunksize : int, optional
If specified, return an iterator where `chunksize` is the number of rows
to include in each chunk.
Returns
-------
list
List with splitted read results and it's metadata (index, dtypes, etc.).
"""
from .sql import query_put_bounders
query_with_bounders = query_put_bounders(sql, partition_column, start, end)
pandas_df = pandas.read_sql(
query_with_bounders,
con,
index_col=index_col,
coerce_float=coerce_float,
params=params,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
index = len(pandas_df)
return _split_result_for_readers(1, num_splits, pandas_df) + [index] | modin/experimental/core/execution/ray/implementations/pandas_on_ray/io/io.py | import numpy as np
import pandas
import warnings
from modin.core.storage_formats.pandas.parsers import (
_split_result_for_readers,
PandasCSVGlobParser,
PandasPickleExperimentalParser,
CustomTextExperimentalParser,
)
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.core.execution.ray.implementations.pandas_on_ray.io import PandasOnRayIO
from modin.core.io import (
CSVGlobDispatcher,
PickleExperimentalDispatcher,
CustomTextExperimentalDispatcher,
)
from modin.core.execution.ray.implementations.pandas_on_ray.dataframe import (
PandasOnRayDataframe,
)
from modin.core.execution.ray.implementations.pandas_on_ray.partitioning import (
PandasOnRayDataframePartition,
)
from modin.core.execution.ray.common import RayTask
from modin.config import NPartitions
import ray
class ExperimentalPandasOnRayIO(PandasOnRayIO):
"""
Class for handling experimental IO functionality with pandas storage format and Ray engine.
``ExperimentalPandasOnRayIO`` inherits some util functions and unmodified IO functions
from ``PandasOnRayIO`` class.
"""
build_args = dict(
frame_partition_cls=PandasOnRayDataframePartition,
query_compiler_cls=PandasQueryCompiler,
frame_cls=PandasOnRayDataframe,
)
read_csv_glob = type(
"", (RayTask, PandasCSVGlobParser, CSVGlobDispatcher), build_args
)._read
read_pickle_distributed = type(
"",
(RayTask, PandasPickleExperimentalParser, PickleExperimentalDispatcher),
build_args,
)._read
read_custom_text = type(
"",
(RayTask, CustomTextExperimentalParser, CustomTextExperimentalDispatcher),
build_args,
)._read
@classmethod
def read_sql(
cls,
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
partition_column=None,
lower_bound=None,
upper_bound=None,
max_sessions=None,
):
"""
Read SQL query or database table into a DataFrame.
The function extended with `Spark-like parameters <https://spark.apache.org/docs/2.0.0/api/R/read.jdbc.html>`_
such as ``partition_column``, ``lower_bound`` and ``upper_bound``. With these
parameters, the user will be able to specify how to partition the imported data.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable or str
Connection to database (sqlite3 connections are not supported).
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
coerce_float : bool, default: True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional
List of parameters to pass to ``execute`` method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
parse_dates : list or dict, optional
The behavior is as follows:
- List of column names to parse as dates.
- Dict of `{column_name: format string}` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of `{column_name: arg dict}`, where the arg dict corresponds
to the keyword arguments of ``pandas.to_datetime``.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, optional
List of column names to select from SQL table (only used when reading a
table).
chunksize : int, optional
If specified, return an iterator where `chunksize` is the number of rows
to include in each chunk.
partition_column : str, optional
Column name used for data partitioning between the workers
(MUST be an INTEGER column).
lower_bound : int, optional
The minimum value to be requested from the `partition_column`.
upper_bound : int, optional
The maximum value to be requested from the `partition_column`.
max_sessions : int, optional
The maximum number of simultaneous connections allowed to use.
Returns
-------
BaseQueryCompiler
A new query compiler with imported data for further processing.
"""
from .sql import is_distributed, get_query_info
if not is_distributed(partition_column, lower_bound, upper_bound):
warnings.warn("Defaulting to Modin core implementation")
return PandasOnRayIO.read_sql(
sql,
con,
index_col,
coerce_float=coerce_float,
params=params,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
# starts the distributed alternative
cols_names, query = get_query_info(sql, con, partition_column)
num_parts = min(NPartitions.get(), max_sessions if max_sessions else 1)
num_splits = min(len(cols_names), num_parts)
diff = (upper_bound - lower_bound) + 1
min_size = diff // num_parts
rest = diff % num_parts
partition_ids = []
index_ids = []
end = lower_bound - 1
for part in range(num_parts):
if rest:
size = min_size + 1
rest -= 1
else:
size = min_size
start = end + 1
end = start + size - 1
partition_id = _read_sql_with_offset_pandas_on_ray.options(
num_returns=num_splits + 1
).remote(
partition_column,
start,
end,
num_splits,
query,
con,
index_col,
coerce_float,
params,
parse_dates,
columns,
chunksize,
)
partition_ids.append(
[PandasOnRayDataframePartition(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
new_index = pandas.RangeIndex(sum(ray.get(index_ids)))
new_query_compiler = cls.query_compiler_cls(
cls.frame_cls(np.array(partition_ids), new_index, cols_names)
)
new_query_compiler._modin_frame.synchronize_labels(axis=0)
return new_query_compiler
@classmethod
def to_pickle_distributed(cls, qc, **kwargs):
"""
When `*` in the filename all partitions are written to their own separate file.
The filenames is determined as follows:
- if `*` in the filename then it will be replaced by the increasing sequence 0, 1, 2, …
- if `*` is not the filename, then will be used default implementation.
Examples #1: 4 partitions and input filename="partition*.pkl.gz", then filenames will be:
`partition0.pkl.gz`, `partition1.pkl.gz`, `partition2.pkl.gz`, `partition3.pkl.gz`.
Parameters
----------
qc : BaseQueryCompiler
The query compiler of the Modin dataframe that we want
to run ``to_pickle_distributed`` on.
**kwargs : dict
Parameters for ``pandas.to_pickle(**kwargs)``.
"""
if not (
isinstance(kwargs["filepath_or_buffer"], str)
and "*" in kwargs["filepath_or_buffer"]
) or not isinstance(qc, PandasQueryCompiler):
warnings.warn("Defaulting to Modin core implementation")
return PandasOnRayIO.to_pickle(qc, **kwargs)
def func(df, **kw):
idx = str(kw["partition_idx"])
kwargs["path"] = kwargs.pop("filepath_or_buffer").replace("*", idx)
df.to_pickle(**kwargs)
return pandas.DataFrame()
result = qc._modin_frame.broadcast_apply_full_axis(
1, func, other=None, new_index=[], new_columns=[], enumerate_partitions=True
)
result.to_pandas()
# Ray functions are not detected by codecov (thus pragma: no cover)
@ray.remote
def _read_sql_with_offset_pandas_on_ray(
partition_column,
start,
end,
num_splits,
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
): # pragma: no cover
"""
Read a chunk of SQL query or table into a pandas DataFrame using Ray task.
Parameters
----------
partition_column : str
Column name used for data partitioning between the workers.
start : int
Lowest value to request from the `partition_column`.
end : int
Highest value to request from the `partition_column`.
num_splits : int
The number of partitions to split the column into.
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable or str
Connection to database (sqlite3 connections are not supported).
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
coerce_float : bool, default: True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional
List of parameters to pass to ``execute`` method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
parse_dates : list or dict, optional
The behavior is as follows:
- List of column names to parse as dates.
- Dict of `{column_name: format string}` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of `{column_name: arg dict}`, where the arg dict corresponds
to the keyword arguments of ``pandas.to_datetime``
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, optional
List of column names to select from SQL table (only used when reading a
table).
chunksize : int, optional
If specified, return an iterator where `chunksize` is the number of rows
to include in each chunk.
Returns
-------
list
List with splitted read results and it's metadata (index, dtypes, etc.).
"""
from .sql import query_put_bounders
query_with_bounders = query_put_bounders(sql, partition_column, start, end)
pandas_df = pandas.read_sql(
query_with_bounders,
con,
index_col=index_col,
coerce_float=coerce_float,
params=params,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
index = len(pandas_df)
return _split_result_for_readers(1, num_splits, pandas_df) + [index] | 0.797517 | 0.392133 |
import subprocess
from ..files import LocationDoesNotExist, NotADirectory
from ..resources import console, default_transient_progress
def make(dir_path, args, app_name=""):
"""Invoke make in the given directory.
Args:
dir_path (Path): the directory in which invoke make.
args (list): the arguments to pass to make. It must be a list of string
containing all arguments that must be passed to make.
Returns:
int: the return code of make. If it is different than zero then something
went wrong.
str: the standard output of make
std: the standard error output of make
Raises:
LocationDoesNotExist: if the given directory does not exist.
NotADirectory: if the given location is not a directory.
"""
if app_name != "" and not app_name.startswith(" "):
app_name = " {}".format(app_name)
with default_transient_progress() as progress:
progress.add_task("Making{}...".format(app_name), start=False)
if not dir_path.exists():
raise LocationDoesNotExist("{} does not exist".format(dir_path))
if not dir_path.is_dir():
raise NotADirectory("{} is not a directory".format(dir_path))
args = ["make"] + args
result = subprocess.run(args, cwd=dir_path, capture_output=True, text=True)
if result.returncode == 0:
console.print("Making{}...[bold green]Done![/]".format(app_name))
return result.returncode, result.stdout, result.stderr
def make_install(dir_path, args, app_name=""):
"""Invoke make install in the given directory.
Args:
dir_path (Path): the directory in which invoke make install.
args (list): the arguments to pass to make. It must be a list of string
containing all arguments that must be passed to make install.
Returns:
int: the return code of make install. If it is different than zero then something
went wrong.
str: the standard output of make install
std: the standard error output of make install
Raises:
LocationDoesNotExist: if the given directory does not exist.
NotADirectory: if the given location is not a directory.
"""
if app_name != "" and not app_name.startswith(" "):
app_name = " {}".format(app_name)
with default_transient_progress() as progress:
progress.add_task("Installing{}...".format(app_name), start=False)
if not dir_path.exists():
raise LocationDoesNotExist("{} does not exist".format(dir_path))
if not dir_path.is_dir():
raise NotADirectory("{} is not a directory".format(dir_path))
args = ["make", "install"] + args
result = subprocess.run(args, cwd=dir_path, capture_output=True, text=True)
if result.returncode == 0:
console.print("Installing{}...[bold green]Done![/]".format(app_name))
return result.returncode, result.stdout, result.stderr | commands/utils/make/make.py | import subprocess
from ..files import LocationDoesNotExist, NotADirectory
from ..resources import console, default_transient_progress
def make(dir_path, args, app_name=""):
"""Invoke make in the given directory.
Args:
dir_path (Path): the directory in which invoke make.
args (list): the arguments to pass to make. It must be a list of string
containing all arguments that must be passed to make.
Returns:
int: the return code of make. If it is different than zero then something
went wrong.
str: the standard output of make
std: the standard error output of make
Raises:
LocationDoesNotExist: if the given directory does not exist.
NotADirectory: if the given location is not a directory.
"""
if app_name != "" and not app_name.startswith(" "):
app_name = " {}".format(app_name)
with default_transient_progress() as progress:
progress.add_task("Making{}...".format(app_name), start=False)
if not dir_path.exists():
raise LocationDoesNotExist("{} does not exist".format(dir_path))
if not dir_path.is_dir():
raise NotADirectory("{} is not a directory".format(dir_path))
args = ["make"] + args
result = subprocess.run(args, cwd=dir_path, capture_output=True, text=True)
if result.returncode == 0:
console.print("Making{}...[bold green]Done![/]".format(app_name))
return result.returncode, result.stdout, result.stderr
def make_install(dir_path, args, app_name=""):
"""Invoke make install in the given directory.
Args:
dir_path (Path): the directory in which invoke make install.
args (list): the arguments to pass to make. It must be a list of string
containing all arguments that must be passed to make install.
Returns:
int: the return code of make install. If it is different than zero then something
went wrong.
str: the standard output of make install
std: the standard error output of make install
Raises:
LocationDoesNotExist: if the given directory does not exist.
NotADirectory: if the given location is not a directory.
"""
if app_name != "" and not app_name.startswith(" "):
app_name = " {}".format(app_name)
with default_transient_progress() as progress:
progress.add_task("Installing{}...".format(app_name), start=False)
if not dir_path.exists():
raise LocationDoesNotExist("{} does not exist".format(dir_path))
if not dir_path.is_dir():
raise NotADirectory("{} is not a directory".format(dir_path))
args = ["make", "install"] + args
result = subprocess.run(args, cwd=dir_path, capture_output=True, text=True)
if result.returncode == 0:
console.print("Installing{}...[bold green]Done![/]".format(app_name))
return result.returncode, result.stdout, result.stderr | 0.644673 | 0.250337 |
from os import path
from time import sleep
import sys
class FFEA_springs:
def __init__(self, fname = ""):
self.reset()
if fname == "":
return
try:
self.load(fname)
except:
raise
def load(self, fname):
sys.stdout.write("Loading FFEA springs file...")
# File format?
base, ext = path.splitext(fname)
try:
if ext == ".spring" or ext == ".springs":
self.load_springs(fname)
else:
raise FFEAIOError(fname=fname, fext=[".spring", ".springs"])
except:
raise
sys.stdout.write("done!\n")
def load_springs(self, fname):
# Open file
try:
fin = open(fname, "r")
except(IOError):
raise
# Test format
line = fin.readline().strip()
if line != "ffea springs file" and line != "walrus springs file":
raise FFEAFormatError(lin=1, lstr="ffea springs file")
try:
self.num_springs = int(fin.readline().split()[1])
except IndexError:
raise FFEAFormatError(lin="2", lstr="num_springs %d")
if fin.readline().strip() != "springs:":
raise FFEAFormatError(lin="5", lstr="springs:")
# Read springs now
try:
for i in range(self.num_springs):
sline = fin.readline().split()
# Get a spring
spring = FFEA_spring()
spring.set_properties(sline[0], sline[1], sline[2:4], sline[4:6], sline[6:8])
self.add_spring(spring)
except (IndexError, ValueError):
raise FFEAFormatError(lin=i+j+6, lstr="%f %f %d %d %d %d %d %d")
except:
raise
fin.close()
def add_spring(self, spring):
self.spring.append(spring)
self.num_springs += 1
def get_num_springs(self):
return self.num_springs
def print_details(self):
print("num_springs = %d" % (self.num_springs))
sleep(1)
print("\n\t k\t\tl\t\tblob\tconf\tnode")
for s in self.spring:
index = self.spring.index(s)
outline = "Spring " + str(index) + " "
outline += "%e\t%e\t%d %d\t%d %d\t%d %d" % (s.k, s.l, s.blob_index[0], s.blob_index[1], s.conformation_index[0], s.conformation_index[1], s.node_index[0], s.node_index[1])
print(outline)
def write_to_file(self, fname):
fout = open(fname, "w")
fout.write("ffea springs file\nnum_springs %d\nsprings:\n" % (self.num_springs))
for s in self.spring:
fout.write("%e %e %d %d %d %d %d %d\n" % (s.k, s.l, s.blob_index[0], s.blob_index[1], s.conformation_index[0], s.conformation_index[1], s.node_index[0], s.node_index[1]))
fout.close()
def reset(self):
self.spring = []
self.num_springs = 0
class FFEA_spring:
def __init__(self):
self.reset()
def set_properties(self, k, l, bin, cin, nin):
try:
self.k = float(k)
self.l = float(l)
self.blob_index = [int(i) for i in bin]
self.conformation_index = [int(i) for i in cin]
self.node_index = [int(i) for i in nin]
except:
raise
def print_details(self):
print("\n\t k\t\tl\t\tblob\tconf\tnode")
print("%e\t%e\t%d %d\t%d %d\t%d %d" % (self.k, self.l, self.blob_index[0], self.blob_index[1], self.conformation_index[0], self.conformation_index[1], self.node_index[0], self.node_index[1]))
def reset(self):
self.k = 0
self.l = 0
self.blob_index = []
self.conformation_index = []
self.node_index = [] | ffeatools/modules/FFEA_springs.py |
from os import path
from time import sleep
import sys
class FFEA_springs:
def __init__(self, fname = ""):
self.reset()
if fname == "":
return
try:
self.load(fname)
except:
raise
def load(self, fname):
sys.stdout.write("Loading FFEA springs file...")
# File format?
base, ext = path.splitext(fname)
try:
if ext == ".spring" or ext == ".springs":
self.load_springs(fname)
else:
raise FFEAIOError(fname=fname, fext=[".spring", ".springs"])
except:
raise
sys.stdout.write("done!\n")
def load_springs(self, fname):
# Open file
try:
fin = open(fname, "r")
except(IOError):
raise
# Test format
line = fin.readline().strip()
if line != "ffea springs file" and line != "walrus springs file":
raise FFEAFormatError(lin=1, lstr="ffea springs file")
try:
self.num_springs = int(fin.readline().split()[1])
except IndexError:
raise FFEAFormatError(lin="2", lstr="num_springs %d")
if fin.readline().strip() != "springs:":
raise FFEAFormatError(lin="5", lstr="springs:")
# Read springs now
try:
for i in range(self.num_springs):
sline = fin.readline().split()
# Get a spring
spring = FFEA_spring()
spring.set_properties(sline[0], sline[1], sline[2:4], sline[4:6], sline[6:8])
self.add_spring(spring)
except (IndexError, ValueError):
raise FFEAFormatError(lin=i+j+6, lstr="%f %f %d %d %d %d %d %d")
except:
raise
fin.close()
def add_spring(self, spring):
self.spring.append(spring)
self.num_springs += 1
def get_num_springs(self):
return self.num_springs
def print_details(self):
print("num_springs = %d" % (self.num_springs))
sleep(1)
print("\n\t k\t\tl\t\tblob\tconf\tnode")
for s in self.spring:
index = self.spring.index(s)
outline = "Spring " + str(index) + " "
outline += "%e\t%e\t%d %d\t%d %d\t%d %d" % (s.k, s.l, s.blob_index[0], s.blob_index[1], s.conformation_index[0], s.conformation_index[1], s.node_index[0], s.node_index[1])
print(outline)
def write_to_file(self, fname):
fout = open(fname, "w")
fout.write("ffea springs file\nnum_springs %d\nsprings:\n" % (self.num_springs))
for s in self.spring:
fout.write("%e %e %d %d %d %d %d %d\n" % (s.k, s.l, s.blob_index[0], s.blob_index[1], s.conformation_index[0], s.conformation_index[1], s.node_index[0], s.node_index[1]))
fout.close()
def reset(self):
self.spring = []
self.num_springs = 0
class FFEA_spring:
def __init__(self):
self.reset()
def set_properties(self, k, l, bin, cin, nin):
try:
self.k = float(k)
self.l = float(l)
self.blob_index = [int(i) for i in bin]
self.conformation_index = [int(i) for i in cin]
self.node_index = [int(i) for i in nin]
except:
raise
def print_details(self):
print("\n\t k\t\tl\t\tblob\tconf\tnode")
print("%e\t%e\t%d %d\t%d %d\t%d %d" % (self.k, self.l, self.blob_index[0], self.blob_index[1], self.conformation_index[0], self.conformation_index[1], self.node_index[0], self.node_index[1]))
def reset(self):
self.k = 0
self.l = 0
self.blob_index = []
self.conformation_index = []
self.node_index = [] | 0.134236 | 0.184988 |
from pathlib import Path, PurePath
from typing import Tuple, Collection, Mapping
from subprocess import check_output
from .fs.pathtree import PathTree, PathNode
class GitRepository:
"""Represents a Git repository on the file system."""
def __init__(self, path: str, init=False):
"""Create an abstraction for a Git repository at the given path.
Args:
path (str): path to the Git repository root directory
init (bool, optional): if True, initialize Git at the given path if
not already initialized. Defaults to False.
Raises:
ValueError: if the given path does not exist
"""
path = Path(path)
if not path.is_dir():
raise ValueError("Directory does not exist: {path}")
self._root = path
self._git_initd = (path / ".git").is_dir()
if not self._git_initd and init:
self.init()
@property
def root(self) -> Path:
"""Get the path to the root of this Git repository.
Returns:
Path: path to the root of this Git repository
"""
return self._root
def init(self):
"""Initialize this Git repository if not already initialized."""
if not self._git_initd:
self._run_git("init")
@property
def tracked(self) -> Collection[Path]:
"""Get all tracked files.
The paths of the tracked files are relative to the project root.
Returns:
Collection[Path]: paths to the tracked files
"""
return self.tracked_at(".")
def tracked_at(self, path: str) -> Collection[Path]:
"""Get tracked files filtered by the given path.
The paths of the tracked files are relative to the project root.
Args:
path (str): path relative to the project root to filter the tracked
files
Returns:
Collection[Path]: paths to the tracked files
"""
tracked = set()
# --full-tree means paths are interpreted as relative to root
cmd = ("ls-tree", "--name-only", "--full-tree", "-r", "HEAD")
if (self.root / path).is_dir():
cmd += (path,)
# List all files as full paths (--full-tree) recursively (-r) in HEAD
cmd_output = self._run_git(
*cmd, universal_newlines=True, encoding="utf8"
).strip()
if len(cmd_output) > 0:
# text mode in check_output normalizes line ending to line feed \n
for line in cmd_output.split("\n"):
# Cast to concrete paths native to the OS
pth = Path(line)
tracked.add(pth)
return tracked
def staged_at(self, path: str) -> Mapping[str, Collection[Tuple[Path]]]:
"""Get staged changes filtered by the given path.
staged changes as a mapping of change type to the paths affected by the
change. Change types include 'A' for added paths, 'D' for deleted
paths and 'R' for renamed paths. Each change type is mapped to a
collection of path tuples. For change type 'A' and 'D', the tuples
consist of only one Path. For change type 'R', the tuples consist of
two Paths, the first being the path being renamed from, and the second
being the Path being renamed to.
Returns:
Mapping[str, Set[Tuple[Path]]]: a mapping of change type to
affected path(s).
"""
changes = {}
# Compare the staging area/index (diff-index --cached) with the
# previous commit (HEAD) and detect changes that are renames
# (--find-renames) and list name, change type (--name-status).
# The output uses NULs as field delimiters (-z)
cmd = (
"diff",
"--cached",
"--name-status",
"--find-renames",
"-z",
)
if (self.root / path).is_dir():
cmd += ("--", path)
# Command output tends to have trailing NULs so trim those
cmd_output = (
self._run_git(*cmd, universal_newlines=True, encoding="utf8")
.strip()
.strip("\0")
)
if len(cmd_output) > 0:
words = cmd_output.split("\0")
i = 0
while i < len(words):
change = words[i]
if change == "A" and i + 1 < len(words):
# Detect additions
# Cast to concrete paths native to the OS
path = Path(words[i + 1])
# Paths are immutable and hashable
changes.setdefault(change, set()).add((path,))
i += 2
elif change == "D" and i + 1 < len(words):
# Detect deletions
# Cast to concrete paths native to the OS
path = Path(words[i + 1])
# Paths are immutable and hashable
changes.setdefault(change, set()).add((path,))
i += 2
elif change.startswith("R") and i + 2 < len(words):
# Detect renames
# Cast to concrete paths native to the OS
path_from, path_to = Path(words[i + 1]), Path(words[i + 2])
# Putting a tuple of paths inside set is okay because
# paths are immutable and hashable
# and tuples of hashable elements are hashable
changes.setdefault("R", set()).add((path_from, path_to))
i += 3
else:
# Go to next word, essential to prevent an infinite loop on
# encountering an unaccounted for change type.
# Such changes are C (copy), M (modification),
# T (file type change), U (unmerged file),
# X (unknown change type), etc.
i += 1
return changes
@property
def staged(self):
"""Get all staged changes.
staged changes as a mapping of change type to the paths affected by the
change. Change types include 'A' for added paths, 'D' for deleted
paths and 'R' for renamed paths. Each change type is mapped to a
collection of path tuples. For change type 'A' and 'D', the tuples
consist of only one Path. For change type 'R', the tuples consist of
two Paths, the first being the path being renamed from, and the second
being the Path being renamed to.
Returns:
Mapping[str, Set[Tuple[Path]]]: a mapping of change type to
affected path(s).
"""
return self.staged_at(".")
def rename(self, *from_to: str):
"""Rename files and stage the changes.
Note that each path to rename is specified as a (from, to) pair of
paths. The paths are interpreted as relative to the repository root.
"""
if len(from_to) > 0:
if all([len(p) == 2 and self.is_file(p[0]) for p in from_to]):
for old, new in from_to:
self._run_git("mv", old, new)
def add(self, *paths: str):
"""Add the given paths and stage the changes.
The paths are interpreted as relative to the repository root.
Args:
paths (str): paths to the files to be added
"""
if len(paths) > 0:
if all(self.is_file(p) for p in paths):
self._run_git("add", *paths)
def remove(self, *paths: str):
"""Remove given paths and stage the changes.
The paths are interpreted as relative to the repository root.
Args:
paths (str): paths to the files to be deleted
"""
if len(paths) > 0:
if all(self.is_file(p) for p in paths):
self._run_git("rm", *paths)
def commit(self, message: str):
"""Commit the staged files with the given commit message.
Args:
message (str): message for the commit
"""
self._run_git("commit", "-m", message)
def is_dir(self, path: str) -> bool:
"""Check if a directory exists at the given path.
Args:
path (str): path of the directory
Returns:
bool: True if a directory exists at the given path, False otherwise
"""
return self._abs_path(path).is_dir()
def is_file(self, path: str) -> bool:
"""Check if a file exists at the given path.
Args:
path (str): path of the file
Returns:
bool: True if a file exists at the given path, False otherwise
"""
return self._abs_path(path).is_file()
def path_exists(self, path: str) -> bool:
"""Check if the given path exists on the file system.
Args:
path (str): path relative to the project root
Returns:
bool: True if the path exists, False otherwise
"""
return self._abs_path(path).exists()
def _run_git(self, *args: str, **kwargs):
"""Invoke Git at the project root with the given arguments.
The keyword arguments are forwarded to subprocess.check_output.
Raises:
CalledProcessError: if the exit code of Git command was non-zero
Returns:
bytes: the output of the Git command
"""
return check_output(("git", "-C", self._root) + args, **kwargs)
def _abs_path(self, path: str) -> Path:
"""Get the absolute path given a path relative to the project root.
Args:
path (str): a path relative to the project root
Returns:
Path: an absolute path
"""
return self._root / path
class UnityGitRepository(GitRepository):
"""Represents a Git repository for a Unity project.
A UnityGitRepository has a few convenience methods for determining assets,
metafiles, etc.
"""
def __init__(self, path: str, init=False):
"""Create an abstraction for a Unity Git project at the given path.
For a project to be a Unity project, it is assumed to have an Assets/
directory at the project root.
Args:
path (str): path to the Git repository root directory
init (bool, optional): if True, initialize Git at the given path if
not already initialized. Defaults to False.
Raises:
ValueError: if the given path does not exist
ValueError: if the given path is not a Unity project
"""
super().__init__(path, init)
if not self.is_dir("Assets"):
# 'Assets/' does not exist
raise ValueError("Not a Unity git repository")
@staticmethod
def is_asset(path: Path) -> bool:
"""Check if the given path is an asset.
Unity considers anything under 'Assets/' directory to be an asset, be
it a file or a directory. Each asset has a unique .meta file.
Importantly, meta files are not assets. Dot-prefixed directories, and
anything in them, are excluded from being an asset. Note that hte path
need not exist on the file system for this to return True.
Args:
path (Path): path to a file or directory relative to the project
root
Returns:
bool: True if the given path is an asset, False otherwise
"""
parts = path.parts
# Asset is whatever Unity gives a .meta file
if len(parts) > 1 and parts[0] == "Assets":
# Path must begin with 'Assets/' and have at least one more
# component. None of the path components can start with a period.
# A meta file is not an Asset
return not UnityGitRepository.is_hidden(path) and path.suffix != ".meta"
else:
# Anything not under 'Assets/' is not an asset
return False
@staticmethod
def is_hidden(path: PurePath) -> bool:
"""Check if the given path will be considered hidden by Unity.
Unity considers anything that beings with a dot (.) to be hidden. If it
is a directory, then anything contained by the directory is also
hidden. Note that hte path need not exist on the file system for this
to return True.
Args:
path (PurePath): path to check, relative to the project root
Returns:
bool: True if the given path is hidden, False otherwise
"""
return any([p.startswith(".") for p in path.parts])
@staticmethod
def is_meta_file(path: PurePath) -> bool:
"""Check if the given path is a Unity .meta file.
Note that the check is superficially based on the filename only. If the
.meta suffix is removed from the given path, and the resulting path
would point to a valid asset, then it is a valid .meta file.
Args:
path (PurePath): path to the meta file relative to the project root
Returns:
bool: True if the path points to a .meta file, False otherwise
"""
parts = path.parts
if len(parts) > 1 and path.parts[0] == "Assets":
# Path must begin with 'Assets/' and have at least one more
# component. None of the path components can start with a period
# Path suffix must be .meta
return not UnityGitRepository.is_hidden(path) and path.suffix == ".meta"
else:
# Anything not under 'Assets/' is not a meta file
return False
@staticmethod
def meta_file_for_asset(asset_path: PurePath) -> PurePath:
"""Appends .meta suffix to an asset path to create a meta file path.
Args:
asset_path (Path): asset path to get .meta path for
Raises:
ValueError: if asset path is a .meta file
Returns:
Path: a path with .meta suffix appended
"""
# Meta files are not assets
if asset_path.suffix == ".meta":
raise ValueError(f"{asset_path} is not an asset")
else:
return asset_path.with_suffix(asset_path.suffix + ".meta")
@staticmethod
def asset_for_meta_file(meta_file_path: Path):
"""Removes .meta suffix from a meta file path to create an asset path.
Args:
meta_file_path (Path): meta file path to get asset path for
Raises:
ValueError: if meta file path is not a meta file
Returns:
Path: a path with .meta suffix removed
"""
if meta_file_path.suffix != ".meta":
raise ValueError(f"{meta_file_path} is not a meta file")
return meta_file_path.with_suffix("")
def proposed(self):
assets = filter(self.is_asset, self.tracked_at("Assets/"))
tree = PathTree.from_iterable(assets, self.root / "Assets")
for change, paths in self.staged_at("Assets/").items():
if change == "D":
for (path,) in paths:
tree.prune(path, PurePath("Assets"))
elif change == "A":
for (path,) in paths:
tree.add(path)
elif change == "R":
for old, new in paths:
tree.prune(path, PurePath("Assets"))
tree.add(new)
return tree | pre_commit_hooks/util/git.py | from pathlib import Path, PurePath
from typing import Tuple, Collection, Mapping
from subprocess import check_output
from .fs.pathtree import PathTree, PathNode
class GitRepository:
"""Represents a Git repository on the file system."""
def __init__(self, path: str, init=False):
"""Create an abstraction for a Git repository at the given path.
Args:
path (str): path to the Git repository root directory
init (bool, optional): if True, initialize Git at the given path if
not already initialized. Defaults to False.
Raises:
ValueError: if the given path does not exist
"""
path = Path(path)
if not path.is_dir():
raise ValueError("Directory does not exist: {path}")
self._root = path
self._git_initd = (path / ".git").is_dir()
if not self._git_initd and init:
self.init()
@property
def root(self) -> Path:
"""Get the path to the root of this Git repository.
Returns:
Path: path to the root of this Git repository
"""
return self._root
def init(self):
"""Initialize this Git repository if not already initialized."""
if not self._git_initd:
self._run_git("init")
@property
def tracked(self) -> Collection[Path]:
"""Get all tracked files.
The paths of the tracked files are relative to the project root.
Returns:
Collection[Path]: paths to the tracked files
"""
return self.tracked_at(".")
def tracked_at(self, path: str) -> Collection[Path]:
"""Get tracked files filtered by the given path.
The paths of the tracked files are relative to the project root.
Args:
path (str): path relative to the project root to filter the tracked
files
Returns:
Collection[Path]: paths to the tracked files
"""
tracked = set()
# --full-tree means paths are interpreted as relative to root
cmd = ("ls-tree", "--name-only", "--full-tree", "-r", "HEAD")
if (self.root / path).is_dir():
cmd += (path,)
# List all files as full paths (--full-tree) recursively (-r) in HEAD
cmd_output = self._run_git(
*cmd, universal_newlines=True, encoding="utf8"
).strip()
if len(cmd_output) > 0:
# text mode in check_output normalizes line ending to line feed \n
for line in cmd_output.split("\n"):
# Cast to concrete paths native to the OS
pth = Path(line)
tracked.add(pth)
return tracked
def staged_at(self, path: str) -> Mapping[str, Collection[Tuple[Path]]]:
"""Get staged changes filtered by the given path.
staged changes as a mapping of change type to the paths affected by the
change. Change types include 'A' for added paths, 'D' for deleted
paths and 'R' for renamed paths. Each change type is mapped to a
collection of path tuples. For change type 'A' and 'D', the tuples
consist of only one Path. For change type 'R', the tuples consist of
two Paths, the first being the path being renamed from, and the second
being the Path being renamed to.
Returns:
Mapping[str, Set[Tuple[Path]]]: a mapping of change type to
affected path(s).
"""
changes = {}
# Compare the staging area/index (diff-index --cached) with the
# previous commit (HEAD) and detect changes that are renames
# (--find-renames) and list name, change type (--name-status).
# The output uses NULs as field delimiters (-z)
cmd = (
"diff",
"--cached",
"--name-status",
"--find-renames",
"-z",
)
if (self.root / path).is_dir():
cmd += ("--", path)
# Command output tends to have trailing NULs so trim those
cmd_output = (
self._run_git(*cmd, universal_newlines=True, encoding="utf8")
.strip()
.strip("\0")
)
if len(cmd_output) > 0:
words = cmd_output.split("\0")
i = 0
while i < len(words):
change = words[i]
if change == "A" and i + 1 < len(words):
# Detect additions
# Cast to concrete paths native to the OS
path = Path(words[i + 1])
# Paths are immutable and hashable
changes.setdefault(change, set()).add((path,))
i += 2
elif change == "D" and i + 1 < len(words):
# Detect deletions
# Cast to concrete paths native to the OS
path = Path(words[i + 1])
# Paths are immutable and hashable
changes.setdefault(change, set()).add((path,))
i += 2
elif change.startswith("R") and i + 2 < len(words):
# Detect renames
# Cast to concrete paths native to the OS
path_from, path_to = Path(words[i + 1]), Path(words[i + 2])
# Putting a tuple of paths inside set is okay because
# paths are immutable and hashable
# and tuples of hashable elements are hashable
changes.setdefault("R", set()).add((path_from, path_to))
i += 3
else:
# Go to next word, essential to prevent an infinite loop on
# encountering an unaccounted for change type.
# Such changes are C (copy), M (modification),
# T (file type change), U (unmerged file),
# X (unknown change type), etc.
i += 1
return changes
@property
def staged(self):
"""Get all staged changes.
staged changes as a mapping of change type to the paths affected by the
change. Change types include 'A' for added paths, 'D' for deleted
paths and 'R' for renamed paths. Each change type is mapped to a
collection of path tuples. For change type 'A' and 'D', the tuples
consist of only one Path. For change type 'R', the tuples consist of
two Paths, the first being the path being renamed from, and the second
being the Path being renamed to.
Returns:
Mapping[str, Set[Tuple[Path]]]: a mapping of change type to
affected path(s).
"""
return self.staged_at(".")
def rename(self, *from_to: str):
"""Rename files and stage the changes.
Note that each path to rename is specified as a (from, to) pair of
paths. The paths are interpreted as relative to the repository root.
"""
if len(from_to) > 0:
if all([len(p) == 2 and self.is_file(p[0]) for p in from_to]):
for old, new in from_to:
self._run_git("mv", old, new)
def add(self, *paths: str):
"""Add the given paths and stage the changes.
The paths are interpreted as relative to the repository root.
Args:
paths (str): paths to the files to be added
"""
if len(paths) > 0:
if all(self.is_file(p) for p in paths):
self._run_git("add", *paths)
def remove(self, *paths: str):
"""Remove given paths and stage the changes.
The paths are interpreted as relative to the repository root.
Args:
paths (str): paths to the files to be deleted
"""
if len(paths) > 0:
if all(self.is_file(p) for p in paths):
self._run_git("rm", *paths)
def commit(self, message: str):
"""Commit the staged files with the given commit message.
Args:
message (str): message for the commit
"""
self._run_git("commit", "-m", message)
def is_dir(self, path: str) -> bool:
"""Check if a directory exists at the given path.
Args:
path (str): path of the directory
Returns:
bool: True if a directory exists at the given path, False otherwise
"""
return self._abs_path(path).is_dir()
def is_file(self, path: str) -> bool:
"""Check if a file exists at the given path.
Args:
path (str): path of the file
Returns:
bool: True if a file exists at the given path, False otherwise
"""
return self._abs_path(path).is_file()
def path_exists(self, path: str) -> bool:
"""Check if the given path exists on the file system.
Args:
path (str): path relative to the project root
Returns:
bool: True if the path exists, False otherwise
"""
return self._abs_path(path).exists()
def _run_git(self, *args: str, **kwargs):
"""Invoke Git at the project root with the given arguments.
The keyword arguments are forwarded to subprocess.check_output.
Raises:
CalledProcessError: if the exit code of Git command was non-zero
Returns:
bytes: the output of the Git command
"""
return check_output(("git", "-C", self._root) + args, **kwargs)
def _abs_path(self, path: str) -> Path:
"""Get the absolute path given a path relative to the project root.
Args:
path (str): a path relative to the project root
Returns:
Path: an absolute path
"""
return self._root / path
class UnityGitRepository(GitRepository):
"""Represents a Git repository for a Unity project.
A UnityGitRepository has a few convenience methods for determining assets,
metafiles, etc.
"""
def __init__(self, path: str, init=False):
"""Create an abstraction for a Unity Git project at the given path.
For a project to be a Unity project, it is assumed to have an Assets/
directory at the project root.
Args:
path (str): path to the Git repository root directory
init (bool, optional): if True, initialize Git at the given path if
not already initialized. Defaults to False.
Raises:
ValueError: if the given path does not exist
ValueError: if the given path is not a Unity project
"""
super().__init__(path, init)
if not self.is_dir("Assets"):
# 'Assets/' does not exist
raise ValueError("Not a Unity git repository")
@staticmethod
def is_asset(path: Path) -> bool:
"""Check if the given path is an asset.
Unity considers anything under 'Assets/' directory to be an asset, be
it a file or a directory. Each asset has a unique .meta file.
Importantly, meta files are not assets. Dot-prefixed directories, and
anything in them, are excluded from being an asset. Note that hte path
need not exist on the file system for this to return True.
Args:
path (Path): path to a file or directory relative to the project
root
Returns:
bool: True if the given path is an asset, False otherwise
"""
parts = path.parts
# Asset is whatever Unity gives a .meta file
if len(parts) > 1 and parts[0] == "Assets":
# Path must begin with 'Assets/' and have at least one more
# component. None of the path components can start with a period.
# A meta file is not an Asset
return not UnityGitRepository.is_hidden(path) and path.suffix != ".meta"
else:
# Anything not under 'Assets/' is not an asset
return False
@staticmethod
def is_hidden(path: PurePath) -> bool:
"""Check if the given path will be considered hidden by Unity.
Unity considers anything that beings with a dot (.) to be hidden. If it
is a directory, then anything contained by the directory is also
hidden. Note that hte path need not exist on the file system for this
to return True.
Args:
path (PurePath): path to check, relative to the project root
Returns:
bool: True if the given path is hidden, False otherwise
"""
return any([p.startswith(".") for p in path.parts])
@staticmethod
def is_meta_file(path: PurePath) -> bool:
"""Check if the given path is a Unity .meta file.
Note that the check is superficially based on the filename only. If the
.meta suffix is removed from the given path, and the resulting path
would point to a valid asset, then it is a valid .meta file.
Args:
path (PurePath): path to the meta file relative to the project root
Returns:
bool: True if the path points to a .meta file, False otherwise
"""
parts = path.parts
if len(parts) > 1 and path.parts[0] == "Assets":
# Path must begin with 'Assets/' and have at least one more
# component. None of the path components can start with a period
# Path suffix must be .meta
return not UnityGitRepository.is_hidden(path) and path.suffix == ".meta"
else:
# Anything not under 'Assets/' is not a meta file
return False
@staticmethod
def meta_file_for_asset(asset_path: PurePath) -> PurePath:
"""Appends .meta suffix to an asset path to create a meta file path.
Args:
asset_path (Path): asset path to get .meta path for
Raises:
ValueError: if asset path is a .meta file
Returns:
Path: a path with .meta suffix appended
"""
# Meta files are not assets
if asset_path.suffix == ".meta":
raise ValueError(f"{asset_path} is not an asset")
else:
return asset_path.with_suffix(asset_path.suffix + ".meta")
@staticmethod
def asset_for_meta_file(meta_file_path: Path):
"""Removes .meta suffix from a meta file path to create an asset path.
Args:
meta_file_path (Path): meta file path to get asset path for
Raises:
ValueError: if meta file path is not a meta file
Returns:
Path: a path with .meta suffix removed
"""
if meta_file_path.suffix != ".meta":
raise ValueError(f"{meta_file_path} is not a meta file")
return meta_file_path.with_suffix("")
def proposed(self):
assets = filter(self.is_asset, self.tracked_at("Assets/"))
tree = PathTree.from_iterable(assets, self.root / "Assets")
for change, paths in self.staged_at("Assets/").items():
if change == "D":
for (path,) in paths:
tree.prune(path, PurePath("Assets"))
elif change == "A":
for (path,) in paths:
tree.add(path)
elif change == "R":
for old, new in paths:
tree.prune(path, PurePath("Assets"))
tree.add(new)
return tree | 0.93263 | 0.463748 |
import requests
import unittest
class TestStringMethods(unittest.TestCase):
'''def test_000_operacoes_ola1(self):
r = requests.get('http://localhost:5000/ola/marcio')
self.assertEqual(r.text,'ola marcio')
r = requests.get('http://localhost:5000/ola/mario')
self.assertEqual(r.text,'ola mario')
def test_001_operacoes_ola2(self):
r = requests.get('http://localhost:5000/ola_upgrade?pessoa1=marcio&pessoa2=alvaro')
self.assertEqual(r.text,'ola marcio e alvaro')
r = requests.get('http://localhost:5000/ola_upgrade?pessoa2=alvaro&pessoa1=marcio')
self.assertEqual(r.text,'ola marcio e alvaro')
r = requests.get('http://localhost:5000/ola_upgrade?pessoa2=robin&pessoa1=batman')
self.assertEqual(r.text,'ola batman e robin')
def test_002_operacoes_ola3(self):
r = requests.post('http://localhost:5000/ola_upgrade', json={'pessoa1':'batman','pessoa2':'robin'})
self.assertEqual(r.text,'ola batman e robin')
r = requests.post('http://localhost:5000/ola_upgrade', json={'pessoa1':'tonico','pessoa2':'tinoco'})
self.assertEqual(r.text,'ola tonico e tinoco')
def test_003_operacoes_ola_com_dic(self):
r = requests.get('http://localhost:5000/ola_com_dic?pessoa1=barney&pessoa2=fred')
self.assertEqual(r.json()['pessoa1'],'barney')
self.assertEqual(r.json()['pessoa2'],'fred')
r = requests.get('http://localhost:5000/ola_com_dic?pessoa2=ron&pessoa1=harry')
self.assertEqual(r.json()['pessoa1'],'harry')
self.assertEqual(r.json()['pessoa2'],'ron')
def test_004_operacoes_ola_com_dic(self):
r = requests.get('http://localhost:5000/ola_com_dic?pessoa1=barney')
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
r = requests.get('http://localhost:5000/ola_com_dic?pessoa2=barney')
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
def test_005_operacoes_ola_com_dic(self):
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'barney','pessoa2':'fred'})
self.assertEqual(r.json()['pessoa1'],'barney')
self.assertEqual(r.json()['pessoa2'],'fred')
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'harry','pessoa2':'ron'})
self.assertEqual(r.json()['pessoa1'],'harry')
self.assertEqual(r.json()['pessoa2'],'ron')
def test_006_operacoes_ola_com_dic(self):
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa2':'fred'})
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'harry'})
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
'''
def test_100_arquivo_aquecimento(self):
import aquecimento_dicionarios #esse teste verifica se o arquivo aquecimento_dicionarios esta na mesma pasta que o runtests.py
def test_101_aquecimento_consulta(self):
self.carregar_arquivo_aquecimento()
self.assertEqual(consulta('tt0076759','lucio')['comment'],'achei legal')
self.assertEqual(consulta('tt0076759','marcos')['comment'],'gostei')
self.assertEqual(consulta('tt0076759','maria'),'nao encontrado')
def test_102_aquecimento_adiciona(self):
self.carregar_arquivo_aquecimento()
self.assertEqual(consulta('1212','maria'),'nao encontrado')
adiciona('1212','maria','filme otimo')
self.assertEqual(consulta('1212','maria')['comment'],'filme otimo')
def test_103_aquecimento_adiciona(self):
self.carregar_arquivo_aquecimento()
adiciona('1212','maria','filme otimo')
self.assertEqual(consulta('1212','maria')['comment'],'filme otimo')
antes = len(reviews_aquecimento)
adiciona('1212','maria','mudei de ideia')
self.assertEqual(consulta('1212','maria')['comment'],'mudei de ideia')
adiciona('1212','maria','quer saber? bom mesmo')
self.assertEqual(consulta('1212','maria')['comment'],'quer saber? bom mesmo')
depois = len(reviews_aquecimento)
self.assertEqual(antes,depois)
def test_203_pega_review(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0076759/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertTrue('gostei' in r.json()['comment'])
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0076759/lucio')
self.assertEqual(r.json(),{'user_id':'lucio','comment':'achei legal'})
r = requests.get('http://localhost:5001/socialfilm/reviews/tt1211837/lucio')
self.assertEqual(r.json(),{'user_id':'lucio','comment':'estranho'})
def test_204_pega_review_com_erro(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/outro/gato')
self.assertEqual(r.json(),{'erro':'comentario nao encontrado'})
self.assertEqual(r.status_code,404)
def test_205_adiciona_review(self):
r = requests.put('http://localhost:5001/socialfilm/reviews/tt1211837/marcos',
json={'comment':'esquisito mesmo'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'esquisito mesmo')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt1211837/marcos')
self.assertEqual(r.json(),{'user_id':'marcos','comment':'esquisito mesmo'})
r = requests.put('http://localhost:5001/socialfilm/reviews/tt0087332/marcos',
json={'comment':'curiosa mistura de fantasmas e empreendedorismo'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'curiosa mistura de fantasmas e empreendedorismo')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0087332/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'curiosa mistura de fantasmas e empreendedorismo')
def test_206_muda_review(self):
antes = self.total_reviews()
r = requests.put('http://localhost:5001/socialfilm/reviews/tt0087332/marcos',
json={'comment':'mudei de ideia. Nao gosto de fantasmas'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'mudei de ideia. Nao gosto de fantasmas')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0087332/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'mudei de ideia. Nao gosto de fantasmas')
depois = self.total_reviews()
self.assertEqual(antes,depois)
def test_207_all_films(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/all_films/marcos')
lista_respostas = r.json()
self.assertTrue(len(lista_respostas) >= 2)
achei_dr_strange = False
for review in r.json():
if review['film_id'] == 'tt1211837':
achei_dr_strange = True
if not achei_dr_strange:
self.fail('a lista de reviews do marcos nao contem o filme dr strange')
def test_208_estrelas(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/marcos')
self.assertEqual(int(r.json()['stars']),4)
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/lucio')
self.assertEqual(int(r.json()['stars']),5)
r = requests.get('http://localhost:5001/socialfilm/stars/tt1211837/lucio')
self.assertEqual(int(r.json()['stars']),2)
self.assertEqual(r.status_code,200) #codigo normal, que ocorre
#se voce simplesmente nao fizer nada
def test_209_estrelas_review_nao_encontrada(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt1211837/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'review nao encontrada')
self.assertEqual(r.status_code,404)
def test_210_novas_estrelas(self):
r = requests.put('http://localhost:5001/socialfilm/stars/tt0119177/marcos',
json={'stars':3})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0119177/marcos')
self.assertEqual(r.json()['stars'],3)
contagem = self.total_stars()
r = requests.put('http://localhost:5001/socialfilm/stars/tt0119177/marcos',
json={'stars':4})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0119177/marcos')
self.assertEqual(r.json()['stars'],4)
cont_depois = self.total_stars()
self.assertEqual(contagem,cont_depois)
def test_211_average_stars(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(4.4 < r.json()['average_stars'] < 4.6)
r = requests.put('http://localhost:5001/socialfilm/stars/tt0076759/marcos',
json={'stars':1})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(2.9 < r.json()['average_stars'] < 3.1)
r = requests.put('http://localhost:5001/socialfilm/stars/tt0076759/marcos',
json={'stars':4})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(4.4 < r.json()['average_stars'] < 4.6)
def test_301_filme_invalido(self):
r = requests.put('http://localhost:5001/socialfilm/reviews/jamesbond/marcos',
json={'comment':'mudei de ideia. Nao gosto de fantasmas'})
self.assertEqual(r.json()['error'],'filme nao encontrado')
self.assertEqual(r.status_code,404)
def test_302_all_films_nome(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/all_films/marcos')
lista_respostas = r.json()
achei_dr_strange = False
achei_star_wars = False
for review in r.json():
if 'film_name' not in review:
self.fail('achei um filme sem nome!')
if 'trange' in review['film_name']:
achei_dr_strange = True
if 'ars' in review['film_name']:
achei_star_wars = True
if not achei_dr_strange:
self.fail('a lista de reviews do marcos nao contem o nome do dr strange')
if not achei_star_wars:
self.fail('a lista de reviews do marcos nao contem o nome do star wars')
def test_303_all_films_nao_deve_alterar_a_review(self):
r = requests.get('http://localhost:5001/socialfilm/all')
lista_reviews = r.json()['reviews']
for review in lista_reviews:
if 'film_name' in review:
self.fail('voce alterou as reviews do servidor, colocando nome')
def test_304_estrelas_filme_inexistente(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759nao/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'filme nao encontrado')
r = requests.get('http://localhost:5001/socialfilm/stars/tt00076759/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'filme nao encontrado')
self.assertEqual(r.status_code,404)
def total_reviews(self):
r = requests.get('http://localhost:5001/socialfilm/all')
return len(r.json()['reviews'])
def total_stars(self):
r = requests.get('http://localhost:5001/socialfilm/all')
return len(r.json()['notas'])
def carregar_arquivo_aquecimento(self):
'''
carrega o arquivo aquecimento_dicionarios, se
ele ainda nao foi carregado
'''
global consulta,adiciona,reviews_aquecimento
try:
consulta #se o modulo ainda nao foi carregado
#essa linha da pau e o except é executado
except:
from aquecimento_dicionarios import consulta, adiciona#entao carregue
from aquecimento_dicionarios import reviews_aquecimento
def runTests():
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestStringMethods)
unittest.TextTestRunner(verbosity=2,failfast=True).run(suite)
try:
from aquecimento_dicionarios_gabarito_NAO import consulta,adiciona
from aquecimento_dicionarios_gabarito_NAO import reviews_aquecimento
except:
pass
if __name__ == '__main__':
runTests() | Aula_04_04/runtests.py | import requests
import unittest
class TestStringMethods(unittest.TestCase):
'''def test_000_operacoes_ola1(self):
r = requests.get('http://localhost:5000/ola/marcio')
self.assertEqual(r.text,'ola marcio')
r = requests.get('http://localhost:5000/ola/mario')
self.assertEqual(r.text,'ola mario')
def test_001_operacoes_ola2(self):
r = requests.get('http://localhost:5000/ola_upgrade?pessoa1=marcio&pessoa2=alvaro')
self.assertEqual(r.text,'ola marcio e alvaro')
r = requests.get('http://localhost:5000/ola_upgrade?pessoa2=alvaro&pessoa1=marcio')
self.assertEqual(r.text,'ola marcio e alvaro')
r = requests.get('http://localhost:5000/ola_upgrade?pessoa2=robin&pessoa1=batman')
self.assertEqual(r.text,'ola batman e robin')
def test_002_operacoes_ola3(self):
r = requests.post('http://localhost:5000/ola_upgrade', json={'pessoa1':'batman','pessoa2':'robin'})
self.assertEqual(r.text,'ola batman e robin')
r = requests.post('http://localhost:5000/ola_upgrade', json={'pessoa1':'tonico','pessoa2':'tinoco'})
self.assertEqual(r.text,'ola tonico e tinoco')
def test_003_operacoes_ola_com_dic(self):
r = requests.get('http://localhost:5000/ola_com_dic?pessoa1=barney&pessoa2=fred')
self.assertEqual(r.json()['pessoa1'],'barney')
self.assertEqual(r.json()['pessoa2'],'fred')
r = requests.get('http://localhost:5000/ola_com_dic?pessoa2=ron&pessoa1=harry')
self.assertEqual(r.json()['pessoa1'],'harry')
self.assertEqual(r.json()['pessoa2'],'ron')
def test_004_operacoes_ola_com_dic(self):
r = requests.get('http://localhost:5000/ola_com_dic?pessoa1=barney')
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
r = requests.get('http://localhost:5000/ola_com_dic?pessoa2=barney')
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
def test_005_operacoes_ola_com_dic(self):
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'barney','pessoa2':'fred'})
self.assertEqual(r.json()['pessoa1'],'barney')
self.assertEqual(r.json()['pessoa2'],'fred')
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'harry','pessoa2':'ron'})
self.assertEqual(r.json()['pessoa1'],'harry')
self.assertEqual(r.json()['pessoa2'],'ron')
def test_006_operacoes_ola_com_dic(self):
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa2':'fred'})
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'harry'})
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
'''
def test_100_arquivo_aquecimento(self):
import aquecimento_dicionarios #esse teste verifica se o arquivo aquecimento_dicionarios esta na mesma pasta que o runtests.py
def test_101_aquecimento_consulta(self):
self.carregar_arquivo_aquecimento()
self.assertEqual(consulta('tt0076759','lucio')['comment'],'achei legal')
self.assertEqual(consulta('tt0076759','marcos')['comment'],'gostei')
self.assertEqual(consulta('tt0076759','maria'),'nao encontrado')
def test_102_aquecimento_adiciona(self):
self.carregar_arquivo_aquecimento()
self.assertEqual(consulta('1212','maria'),'nao encontrado')
adiciona('1212','maria','filme otimo')
self.assertEqual(consulta('1212','maria')['comment'],'filme otimo')
def test_103_aquecimento_adiciona(self):
self.carregar_arquivo_aquecimento()
adiciona('1212','maria','filme otimo')
self.assertEqual(consulta('1212','maria')['comment'],'filme otimo')
antes = len(reviews_aquecimento)
adiciona('1212','maria','mudei de ideia')
self.assertEqual(consulta('1212','maria')['comment'],'mudei de ideia')
adiciona('1212','maria','quer saber? bom mesmo')
self.assertEqual(consulta('1212','maria')['comment'],'quer saber? bom mesmo')
depois = len(reviews_aquecimento)
self.assertEqual(antes,depois)
def test_203_pega_review(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0076759/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertTrue('gostei' in r.json()['comment'])
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0076759/lucio')
self.assertEqual(r.json(),{'user_id':'lucio','comment':'achei legal'})
r = requests.get('http://localhost:5001/socialfilm/reviews/tt1211837/lucio')
self.assertEqual(r.json(),{'user_id':'lucio','comment':'estranho'})
def test_204_pega_review_com_erro(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/outro/gato')
self.assertEqual(r.json(),{'erro':'comentario nao encontrado'})
self.assertEqual(r.status_code,404)
def test_205_adiciona_review(self):
r = requests.put('http://localhost:5001/socialfilm/reviews/tt1211837/marcos',
json={'comment':'esquisito mesmo'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'esquisito mesmo')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt1211837/marcos')
self.assertEqual(r.json(),{'user_id':'marcos','comment':'esquisito mesmo'})
r = requests.put('http://localhost:5001/socialfilm/reviews/tt0087332/marcos',
json={'comment':'curiosa mistura de fantasmas e empreendedorismo'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'curiosa mistura de fantasmas e empreendedorismo')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0087332/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'curiosa mistura de fantasmas e empreendedorismo')
def test_206_muda_review(self):
antes = self.total_reviews()
r = requests.put('http://localhost:5001/socialfilm/reviews/tt0087332/marcos',
json={'comment':'mudei de ideia. Nao gosto de fantasmas'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'mudei de ideia. Nao gosto de fantasmas')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0087332/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'mudei de ideia. Nao gosto de fantasmas')
depois = self.total_reviews()
self.assertEqual(antes,depois)
def test_207_all_films(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/all_films/marcos')
lista_respostas = r.json()
self.assertTrue(len(lista_respostas) >= 2)
achei_dr_strange = False
for review in r.json():
if review['film_id'] == 'tt1211837':
achei_dr_strange = True
if not achei_dr_strange:
self.fail('a lista de reviews do marcos nao contem o filme dr strange')
def test_208_estrelas(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/marcos')
self.assertEqual(int(r.json()['stars']),4)
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/lucio')
self.assertEqual(int(r.json()['stars']),5)
r = requests.get('http://localhost:5001/socialfilm/stars/tt1211837/lucio')
self.assertEqual(int(r.json()['stars']),2)
self.assertEqual(r.status_code,200) #codigo normal, que ocorre
#se voce simplesmente nao fizer nada
def test_209_estrelas_review_nao_encontrada(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt1211837/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'review nao encontrada')
self.assertEqual(r.status_code,404)
def test_210_novas_estrelas(self):
r = requests.put('http://localhost:5001/socialfilm/stars/tt0119177/marcos',
json={'stars':3})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0119177/marcos')
self.assertEqual(r.json()['stars'],3)
contagem = self.total_stars()
r = requests.put('http://localhost:5001/socialfilm/stars/tt0119177/marcos',
json={'stars':4})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0119177/marcos')
self.assertEqual(r.json()['stars'],4)
cont_depois = self.total_stars()
self.assertEqual(contagem,cont_depois)
def test_211_average_stars(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(4.4 < r.json()['average_stars'] < 4.6)
r = requests.put('http://localhost:5001/socialfilm/stars/tt0076759/marcos',
json={'stars':1})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(2.9 < r.json()['average_stars'] < 3.1)
r = requests.put('http://localhost:5001/socialfilm/stars/tt0076759/marcos',
json={'stars':4})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(4.4 < r.json()['average_stars'] < 4.6)
def test_301_filme_invalido(self):
r = requests.put('http://localhost:5001/socialfilm/reviews/jamesbond/marcos',
json={'comment':'mudei de ideia. Nao gosto de fantasmas'})
self.assertEqual(r.json()['error'],'filme nao encontrado')
self.assertEqual(r.status_code,404)
def test_302_all_films_nome(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/all_films/marcos')
lista_respostas = r.json()
achei_dr_strange = False
achei_star_wars = False
for review in r.json():
if 'film_name' not in review:
self.fail('achei um filme sem nome!')
if 'trange' in review['film_name']:
achei_dr_strange = True
if 'ars' in review['film_name']:
achei_star_wars = True
if not achei_dr_strange:
self.fail('a lista de reviews do marcos nao contem o nome do dr strange')
if not achei_star_wars:
self.fail('a lista de reviews do marcos nao contem o nome do star wars')
def test_303_all_films_nao_deve_alterar_a_review(self):
r = requests.get('http://localhost:5001/socialfilm/all')
lista_reviews = r.json()['reviews']
for review in lista_reviews:
if 'film_name' in review:
self.fail('voce alterou as reviews do servidor, colocando nome')
def test_304_estrelas_filme_inexistente(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759nao/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'filme nao encontrado')
r = requests.get('http://localhost:5001/socialfilm/stars/tt00076759/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'filme nao encontrado')
self.assertEqual(r.status_code,404)
def total_reviews(self):
r = requests.get('http://localhost:5001/socialfilm/all')
return len(r.json()['reviews'])
def total_stars(self):
r = requests.get('http://localhost:5001/socialfilm/all')
return len(r.json()['notas'])
def carregar_arquivo_aquecimento(self):
'''
carrega o arquivo aquecimento_dicionarios, se
ele ainda nao foi carregado
'''
global consulta,adiciona,reviews_aquecimento
try:
consulta #se o modulo ainda nao foi carregado
#essa linha da pau e o except é executado
except:
from aquecimento_dicionarios import consulta, adiciona#entao carregue
from aquecimento_dicionarios import reviews_aquecimento
def runTests():
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestStringMethods)
unittest.TextTestRunner(verbosity=2,failfast=True).run(suite)
try:
from aquecimento_dicionarios_gabarito_NAO import consulta,adiciona
from aquecimento_dicionarios_gabarito_NAO import reviews_aquecimento
except:
pass
if __name__ == '__main__':
runTests() | 0.148047 | 0.204203 |
from decimal import Decimal
from analytics.models import Rating
from collector.models import Log
from recs.base_recommender import base_recommender
from django.db.models import Count
from django.db.models import Q
from django.db.models import Avg
class PopularityBasedRecs(base_recommender):
def predict_score(self, user_id, item_id):
avg_rating = Rating.objects.filter(~Q(user_id=user_id) &
Q(movie_id=item_id)).values('movie_id').aggregate(Avg('rating'))
return avg_rating['rating__avg']
@staticmethod
def recommend_items_from_log(num=6):
items = Log.objects.values('content_id')
items = items.filter(event='buy').annotate(Count('user_id'))
sorted_items = sorted(items, key=lambda item: -float(item['user_id__count']))
return sorted_items[:num]
def recommend_items(self, user_id, num=6):
pop_items = Rating.objects.filter(~Q(user_id=user_id)).values('movie_id').annotate(Count('user_id'),
Avg('rating'))
sorted_items = sorted(pop_items, key=lambda item: -float(item['user_id__count']))[:num]
return sorted_items
@staticmethod
def recommend_items_by_ratings(user_id, active_user_items, num=6):
item_ids = [i['id'] for i in active_user_items]
pop_items = Rating.objects.filter(~Q(movie_id__in=item_ids)).values('movie_id').annotate(Count('user_id'),
Avg('rating'))
recs = {i['movie_id']: {'prediction': i['rating__avg'], 'pop': i['user_id__count']} for i in pop_items}
sorted_items = sorted(recs.items(), key=lambda item: -float(item[1]['pop']))[:num]
return sorted_items
@staticmethod
def predict_score_by_ratings(item_id, movies):
item = Rating.objects.filter(movie_id=item_id).values('movie_id').annotate(Avg('rating')).first()
if not item:
return 0
return Decimal(item['rating__avg']) | recs/popularity_recommender.py | from decimal import Decimal
from analytics.models import Rating
from collector.models import Log
from recs.base_recommender import base_recommender
from django.db.models import Count
from django.db.models import Q
from django.db.models import Avg
class PopularityBasedRecs(base_recommender):
def predict_score(self, user_id, item_id):
avg_rating = Rating.objects.filter(~Q(user_id=user_id) &
Q(movie_id=item_id)).values('movie_id').aggregate(Avg('rating'))
return avg_rating['rating__avg']
@staticmethod
def recommend_items_from_log(num=6):
items = Log.objects.values('content_id')
items = items.filter(event='buy').annotate(Count('user_id'))
sorted_items = sorted(items, key=lambda item: -float(item['user_id__count']))
return sorted_items[:num]
def recommend_items(self, user_id, num=6):
pop_items = Rating.objects.filter(~Q(user_id=user_id)).values('movie_id').annotate(Count('user_id'),
Avg('rating'))
sorted_items = sorted(pop_items, key=lambda item: -float(item['user_id__count']))[:num]
return sorted_items
@staticmethod
def recommend_items_by_ratings(user_id, active_user_items, num=6):
item_ids = [i['id'] for i in active_user_items]
pop_items = Rating.objects.filter(~Q(movie_id__in=item_ids)).values('movie_id').annotate(Count('user_id'),
Avg('rating'))
recs = {i['movie_id']: {'prediction': i['rating__avg'], 'pop': i['user_id__count']} for i in pop_items}
sorted_items = sorted(recs.items(), key=lambda item: -float(item[1]['pop']))[:num]
return sorted_items
@staticmethod
def predict_score_by_ratings(item_id, movies):
item = Rating.objects.filter(movie_id=item_id).values('movie_id').annotate(Avg('rating')).first()
if not item:
return 0
return Decimal(item['rating__avg']) | 0.58261 | 0.202739 |
import os
import sys
import pandas as pd
import yaml
# sys.path.append("/home/weber/PycharmProjects/EXOTIC/src")
sys.path.append("/home/weber/PycharmProjects/EXOTIC/clean/src")
pd.options.mode.chained_assignment = None # default='warn'
tqdm.pandas()
# YAML FILES CONFIG
yaml = yaml.load(open(
"/home/weber/PycharmProjects/gene_isoforms/src/config/config_files.yaml"), Loader=yaml.FullLoader)
base_directory = yaml["base_directory"]
if base_directory == 'TO_CHANGE':
sys.exit(
'EXIT : Need to specify the base_directory in config file : "conf_files.yaml"')
[os.makedirs(base_directory + e, exist_ok=True)
for e in yaml if e != "base_directory"]
class ProcessRefSeq:
def __init__(self, path):
"""[Main function to launch steps]
Arguments:
path {[str]} -- [Output file path]
Returns:
[pd.DataFrame] -- [Final processed refseq dataframe]
"""
self.base_directory = yaml["base_directory"]
if os.path.isfile(path) is True:
print("# Files don't exist ☒")
# * 0 Load raw file
refseq_gff = yaml["External"]["raw_refseq"]
refseq_df = pd.DataFrame()
# refseq_df = self.load_refseq(refseq_gff)
utils.mkdir(os.path.dirname(self.base_directory +
yaml["TMP"]["tmp_refseq_chroms"]))
# * 1 Build tmp files by category
refseq_df_chroms = self.refseq_chroms_fct(
yaml["TMP"]["tmp_refseq_chroms"], refseq_df)
refseq_df_pc_genes = self.refseq_pc_genes_fct(
yaml["TMP"]["tmp_refseq_pc_genes"], refseq_df)
refseq_df_mrnas = self.refseq_mrnas_fct(
yaml["TMP"]["tmp_refseq_mrnas"], refseq_df)
refseq_df_exons = self.refseq_exons_fct(
yaml["TMP"]["tmp_refseq_exons"], refseq_df)
refseq_df_cds = self.refseq_cds_fct(
yaml["TMP"]["tmp_refseq_cds"], refseq_df)
else:
print("# Files already exist ✓")
@staticmethod
def load_refseq(path):
"""[Load RefSeq GFF file]
Arguments:
path {[str]} -- [Path to the GFF RefSeq file]
Returns:
[pd.DataFrame] -- [RefSeq GFF turned into pandas dataframe]
"""
print("### Load RefSeq / File = {}".format(path))
refseq_df = pd.read_csv(
path,
compression="gzip",
sep="\t",
skiprows=9,
# nrows=10000,
names=["NC", "RefSeq_validation", "Region_type", "Start",
"End", "Score", "Strand", "Phase", "Attributes"],
)
refseq_df = refseq_df.dropna(subset=["Start", "End"])
refseq_df["Start"] = refseq_df["Start"].astype(int)
refseq_df["End"] = refseq_df["End"].astype(int)
return refseq_df
# @staticmethod
def refseq_chroms_fct(self, path, refseq_df):
"""[Extract chromosomes from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq chromosomes into pandas dataframe]
"""
print("### Build temp file (chroms part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_chroms = refseq_df.loc[refseq_df["Region_type"] == "region"]
index_list = list(refseq_df_chroms.index)
chroms = [(i, index_list[j + 1] - 1)
for j, i in enumerate(index_list) if j < (len(index_list) - 1)]
refseq_df_chroms = refseq_df_chroms.loc[
(refseq_df_chroms["NC"].str.contains("NC")) & (
refseq_df_chroms["RefSeq_validation"] == "RefSeq")
]
refseq_df_chroms.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_chroms = pd.read_parquet(self.base_directory + path)
print(refseq_df_chroms)
print(refseq_df_chroms.shape)
return refseq_df_chroms
# @staticmethod
def refseq_pc_genes_fct(self, path, refseq_df):
"""[Extract protein coding genes from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq protein coding genes into pandas dataframe]
"""
print("### Build temp file (protein coding genes part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_pc_genes = refseq_df.loc[
(refseq_df["Attributes"].str.contains(
"gene_biotype=protein_coding"))
& (refseq_df["NC"].str.contains("NC_"))
# & (refseq_df["RefSeq_validation"].str.contains("BestRefSeq"))
]
refseq_df_pc_genes.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_pc_genes = pd.read_parquet(self.base_directory + path)
print(refseq_df_pc_genes)
print(refseq_df_pc_genes.shape)
return refseq_df_pc_genes
# @staticmethod
def refseq_mrnas_fct(self, path, refseq_df):
"""[Extract mRNAs from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq mRNAs into pandas dataframe]
"""
print("### Build temp file (mRNAs part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_mrna = refseq_df.loc[
(refseq_df["Attributes"].str.contains("NM_")) & (
refseq_df["Region_type"] == "mRNA") & (refseq_df["NC"].str.contains("NC_"))
]
refseq_df_mrna.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_mrna = pd.read_parquet(self.base_directory + path)
print(refseq_df_mrna)
print(refseq_df_mrna.shape)
return refseq_df_mrna
# @staticmethod
def refseq_exons_fct(self, path, refseq_df):
"""[Extract exons including UTRs from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq exons including UTRs into pandas dataframe]
"""
print("### Build temp file (Exons (with UTRs) part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_exons = refseq_df.loc[
(refseq_df["Attributes"].str.contains("exon-NM"))
& (refseq_df["Region_type"] == "exon")
& (refseq_df["NC"].str.contains("NC_"))
]
refseq_df_exons.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_exons = pd.read_parquet(self.base_directory + path)
print(refseq_df_exons)
print(refseq_df_exons.shape)
return refseq_df_exons
# @staticmethod
def refseq_cds_fct(self, path, refseq_df):
"""[Extract coding exons (TER) from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq coding exons (CDS) into pandas dataframe]
"""
print("### Build temp file (coding exons part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_cds = refseq_df.loc[
(refseq_df["Attributes"].str.contains("NP_")) & (
refseq_df["Region_type"] == "CDS") & (refseq_df["NC"].str.contains("NC_"))
]
refseq_df_cds.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_cds = pd.read_parquet(self.base_directory + path)
print(refseq_df_cds)
print(refseq_df_cds.shape)
return refseq_df_cds
if __name__ == "__main__":
c = ProcessRefSeq(
base_directory + yaml["TMP"]["tmp_refseq_cds"])
# print(c.groupby_mrnas) | src/prepare_refseq.py |
import os
import sys
import pandas as pd
import yaml
# sys.path.append("/home/weber/PycharmProjects/EXOTIC/src")
sys.path.append("/home/weber/PycharmProjects/EXOTIC/clean/src")
pd.options.mode.chained_assignment = None # default='warn'
tqdm.pandas()
# YAML FILES CONFIG
yaml = yaml.load(open(
"/home/weber/PycharmProjects/gene_isoforms/src/config/config_files.yaml"), Loader=yaml.FullLoader)
base_directory = yaml["base_directory"]
if base_directory == 'TO_CHANGE':
sys.exit(
'EXIT : Need to specify the base_directory in config file : "conf_files.yaml"')
[os.makedirs(base_directory + e, exist_ok=True)
for e in yaml if e != "base_directory"]
class ProcessRefSeq:
def __init__(self, path):
"""[Main function to launch steps]
Arguments:
path {[str]} -- [Output file path]
Returns:
[pd.DataFrame] -- [Final processed refseq dataframe]
"""
self.base_directory = yaml["base_directory"]
if os.path.isfile(path) is True:
print("# Files don't exist ☒")
# * 0 Load raw file
refseq_gff = yaml["External"]["raw_refseq"]
refseq_df = pd.DataFrame()
# refseq_df = self.load_refseq(refseq_gff)
utils.mkdir(os.path.dirname(self.base_directory +
yaml["TMP"]["tmp_refseq_chroms"]))
# * 1 Build tmp files by category
refseq_df_chroms = self.refseq_chroms_fct(
yaml["TMP"]["tmp_refseq_chroms"], refseq_df)
refseq_df_pc_genes = self.refseq_pc_genes_fct(
yaml["TMP"]["tmp_refseq_pc_genes"], refseq_df)
refseq_df_mrnas = self.refseq_mrnas_fct(
yaml["TMP"]["tmp_refseq_mrnas"], refseq_df)
refseq_df_exons = self.refseq_exons_fct(
yaml["TMP"]["tmp_refseq_exons"], refseq_df)
refseq_df_cds = self.refseq_cds_fct(
yaml["TMP"]["tmp_refseq_cds"], refseq_df)
else:
print("# Files already exist ✓")
@staticmethod
def load_refseq(path):
"""[Load RefSeq GFF file]
Arguments:
path {[str]} -- [Path to the GFF RefSeq file]
Returns:
[pd.DataFrame] -- [RefSeq GFF turned into pandas dataframe]
"""
print("### Load RefSeq / File = {}".format(path))
refseq_df = pd.read_csv(
path,
compression="gzip",
sep="\t",
skiprows=9,
# nrows=10000,
names=["NC", "RefSeq_validation", "Region_type", "Start",
"End", "Score", "Strand", "Phase", "Attributes"],
)
refseq_df = refseq_df.dropna(subset=["Start", "End"])
refseq_df["Start"] = refseq_df["Start"].astype(int)
refseq_df["End"] = refseq_df["End"].astype(int)
return refseq_df
# @staticmethod
def refseq_chroms_fct(self, path, refseq_df):
"""[Extract chromosomes from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq chromosomes into pandas dataframe]
"""
print("### Build temp file (chroms part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_chroms = refseq_df.loc[refseq_df["Region_type"] == "region"]
index_list = list(refseq_df_chroms.index)
chroms = [(i, index_list[j + 1] - 1)
for j, i in enumerate(index_list) if j < (len(index_list) - 1)]
refseq_df_chroms = refseq_df_chroms.loc[
(refseq_df_chroms["NC"].str.contains("NC")) & (
refseq_df_chroms["RefSeq_validation"] == "RefSeq")
]
refseq_df_chroms.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_chroms = pd.read_parquet(self.base_directory + path)
print(refseq_df_chroms)
print(refseq_df_chroms.shape)
return refseq_df_chroms
# @staticmethod
def refseq_pc_genes_fct(self, path, refseq_df):
"""[Extract protein coding genes from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq protein coding genes into pandas dataframe]
"""
print("### Build temp file (protein coding genes part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_pc_genes = refseq_df.loc[
(refseq_df["Attributes"].str.contains(
"gene_biotype=protein_coding"))
& (refseq_df["NC"].str.contains("NC_"))
# & (refseq_df["RefSeq_validation"].str.contains("BestRefSeq"))
]
refseq_df_pc_genes.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_pc_genes = pd.read_parquet(self.base_directory + path)
print(refseq_df_pc_genes)
print(refseq_df_pc_genes.shape)
return refseq_df_pc_genes
# @staticmethod
def refseq_mrnas_fct(self, path, refseq_df):
"""[Extract mRNAs from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq mRNAs into pandas dataframe]
"""
print("### Build temp file (mRNAs part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_mrna = refseq_df.loc[
(refseq_df["Attributes"].str.contains("NM_")) & (
refseq_df["Region_type"] == "mRNA") & (refseq_df["NC"].str.contains("NC_"))
]
refseq_df_mrna.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_mrna = pd.read_parquet(self.base_directory + path)
print(refseq_df_mrna)
print(refseq_df_mrna.shape)
return refseq_df_mrna
# @staticmethod
def refseq_exons_fct(self, path, refseq_df):
"""[Extract exons including UTRs from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq exons including UTRs into pandas dataframe]
"""
print("### Build temp file (Exons (with UTRs) part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_exons = refseq_df.loc[
(refseq_df["Attributes"].str.contains("exon-NM"))
& (refseq_df["Region_type"] == "exon")
& (refseq_df["NC"].str.contains("NC_"))
]
refseq_df_exons.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_exons = pd.read_parquet(self.base_directory + path)
print(refseq_df_exons)
print(refseq_df_exons.shape)
return refseq_df_exons
# @staticmethod
def refseq_cds_fct(self, path, refseq_df):
"""[Extract coding exons (TER) from RefSeq GFF]
Arguments:
path {[str]} -- [Tmp output file]
refseq_df {[pd.DataFrame]} -- [RefSeq GFF turned into pandas dataframe]
Returns:
[pd.DataFrame] -- [RefSeq coding exons (CDS) into pandas dataframe]
"""
print("### Build temp file (coding exons part) / File = {}".format(path))
if os.path.isfile(self.base_directory + path) is False:
print("# Files don't exist ☒")
refseq_df_cds = refseq_df.loc[
(refseq_df["Attributes"].str.contains("NP_")) & (
refseq_df["Region_type"] == "CDS") & (refseq_df["NC"].str.contains("NC_"))
]
refseq_df_cds.to_parquet(self.base_directory + path)
else:
print("# Files exist ✓, Loading ... ")
refseq_df_cds = pd.read_parquet(self.base_directory + path)
print(refseq_df_cds)
print(refseq_df_cds.shape)
return refseq_df_cds
if __name__ == "__main__":
c = ProcessRefSeq(
base_directory + yaml["TMP"]["tmp_refseq_cds"])
# print(c.groupby_mrnas) | 0.396652 | 0.195249 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn import datasets
from sklearn.model_selection import train_test_split
import random
from qiskit import *
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import BasicAer, execute, IBMQ, Aer
from qiskit.circuit import Gate
from qiskit.quantum_info.operators import Operator
from qiskit.aqua.components.optimizers import ADAM, CG, AQGD
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings("ignore")
def predict(probas):
return (probas >= 0.5) * 1
def binary_crossentropy(labels, predictions):
'''
Compare a set of predictions and the real values to compute the Binary Crossentropy for a binary target variable
:param labels: true values for a binary target variable.
:param predictions: predicted probabilities for a binary target variable
:return: the value of the binary cross entropy. The lower the value is, the better are the predictions.
'''
loss = 0
for l, p in zip(labels, predictions):
# print(l,p)
loss = loss - l * np.log(np.max([p, 1e-8]))
loss = loss / len(labels)
return loss
def square_loss(labels, predictions):
'''
Compare a set of predictions and the real values to compute the Mean Squared Error for a binary target variable
:param labels: true values for a binary target variable.
:param predictions: predicted probabilities for a binary target variable
:return: the value of the binary cross entropy. The lower the value is, the better are the predictions.
'''
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
'''
Compare a set of predictions and the real values to compute the Accuracy for a binary target variable
:param labels: true values for a binary target variable.
:param predictions: predicted values for a binary target variable
:return: the value of the binary cross entropy. The lower the value is, the better are the predictions.
'''
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def get_angles(x):
'''
Given a real vector computes the rotation angles for state preparation
:param labels: positive 4 dimensional real vector
:return: rotation angles
'''
beta0 = 2 * np.arcsin(np.sqrt(x[1]) ** 2 / np.sqrt(x[0] ** 2 + x[1] ** 2 + 1e-12))
beta1 = 2 * np.arcsin(np.sqrt(x[3]) ** 2 / np.sqrt(x[2] ** 2 + x[3] ** 2 + 1e-12))
beta2 = 2 * np.arcsin(
np.sqrt(x[2] ** 2 + x[3] ** 2) / np.sqrt(x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3] ** 2)
)
return np.array([beta2, -beta1 / 2, beta1 / 2, -beta0 / 2, beta0 / 2])
def plot_images(images, labels, num_row, num_col):
fig, axes = plt.subplots(num_row, num_col, figsize=(1.5 * num_col, 2 * num_row))
for i in range(num_row * num_col):
ax = axes[i // num_col, i % num_col]
ax.imshow(images[i], cmap='gray')
ax.set_title('Label: {}'.format(labels[i]))
plt.tight_layout()
plt.show()
def scatterplot_matrix(data):
# sns.set_theme(style="ticks")
sns.pairplot(data, hue="Y")
plt.show()
def scatterplot(X, Y, cols):
data = pd.DataFrame(X, columns=cols)
data['Y'] = Y
plt.figure(figsize=(10, 6))
sns.scatterplot(
x=cols[0], y=cols[1],
hue="Y",
palette=sns.color_palette("hls", 2),
data=data,
legend="full",
alpha=0.3)
def image_reshape(x, ncol, pca, lower, upper):
x = x.reshape(-1, ncol)
x = pca.transform(x)
x = (x - lower) / (upper - lower)
padding = 0.3 * np.ones((len(x), 1))
x_pad = np.c_[np.c_[x, padding], np.zeros((len(x), 1))]
normalization = np.sqrt(np.sum(x_pad ** 2, -1))
x_norm = (x_pad.T / normalization).T
features = np.nan_to_num((np.array([get_angles(x) for x in x_norm])))
return features[0].tolist()
from qiskit import *
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import BasicAer, execute, IBMQ, Aer
from qiskit.circuit import Gate
from qiskit.quantum_info.operators import Operator
from qiskit.aqua.components.optimizers import ADAM, CG, AQGD
def R_gate(beta, circuit = False):
backend = Aer.get_backend('unitary_simulator')
control = QuantumRegister(1)
qc = QuantumCircuit(control)
qc.ry(beta, control)
job = execute(qc, backend)
result = job.result()
U = result.get_unitary(qc)
R = Operator(U)
if circuit==True:
return qc
else:
return R
def padding(X):
padding = 0.3 * np.ones((len(X), 1))
X_pad = np.c_[np.c_[X, padding], np.zeros((len(X), 1))]
normalization = np.sqrt(np.sum(X_pad ** 2, -1))
X_norm = (X_pad.T / normalization).T
features = np.nan_to_num((np.array([get_angles(x) for x in X_norm])))
return features | utils/Utils_pad.py |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn import datasets
from sklearn.model_selection import train_test_split
import random
from qiskit import *
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import BasicAer, execute, IBMQ, Aer
from qiskit.circuit import Gate
from qiskit.quantum_info.operators import Operator
from qiskit.aqua.components.optimizers import ADAM, CG, AQGD
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings("ignore")
def predict(probas):
return (probas >= 0.5) * 1
def binary_crossentropy(labels, predictions):
'''
Compare a set of predictions and the real values to compute the Binary Crossentropy for a binary target variable
:param labels: true values for a binary target variable.
:param predictions: predicted probabilities for a binary target variable
:return: the value of the binary cross entropy. The lower the value is, the better are the predictions.
'''
loss = 0
for l, p in zip(labels, predictions):
# print(l,p)
loss = loss - l * np.log(np.max([p, 1e-8]))
loss = loss / len(labels)
return loss
def square_loss(labels, predictions):
'''
Compare a set of predictions and the real values to compute the Mean Squared Error for a binary target variable
:param labels: true values for a binary target variable.
:param predictions: predicted probabilities for a binary target variable
:return: the value of the binary cross entropy. The lower the value is, the better are the predictions.
'''
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
'''
Compare a set of predictions and the real values to compute the Accuracy for a binary target variable
:param labels: true values for a binary target variable.
:param predictions: predicted values for a binary target variable
:return: the value of the binary cross entropy. The lower the value is, the better are the predictions.
'''
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def get_angles(x):
'''
Given a real vector computes the rotation angles for state preparation
:param labels: positive 4 dimensional real vector
:return: rotation angles
'''
beta0 = 2 * np.arcsin(np.sqrt(x[1]) ** 2 / np.sqrt(x[0] ** 2 + x[1] ** 2 + 1e-12))
beta1 = 2 * np.arcsin(np.sqrt(x[3]) ** 2 / np.sqrt(x[2] ** 2 + x[3] ** 2 + 1e-12))
beta2 = 2 * np.arcsin(
np.sqrt(x[2] ** 2 + x[3] ** 2) / np.sqrt(x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3] ** 2)
)
return np.array([beta2, -beta1 / 2, beta1 / 2, -beta0 / 2, beta0 / 2])
def plot_images(images, labels, num_row, num_col):
fig, axes = plt.subplots(num_row, num_col, figsize=(1.5 * num_col, 2 * num_row))
for i in range(num_row * num_col):
ax = axes[i // num_col, i % num_col]
ax.imshow(images[i], cmap='gray')
ax.set_title('Label: {}'.format(labels[i]))
plt.tight_layout()
plt.show()
def scatterplot_matrix(data):
# sns.set_theme(style="ticks")
sns.pairplot(data, hue="Y")
plt.show()
def scatterplot(X, Y, cols):
data = pd.DataFrame(X, columns=cols)
data['Y'] = Y
plt.figure(figsize=(10, 6))
sns.scatterplot(
x=cols[0], y=cols[1],
hue="Y",
palette=sns.color_palette("hls", 2),
data=data,
legend="full",
alpha=0.3)
def image_reshape(x, ncol, pca, lower, upper):
x = x.reshape(-1, ncol)
x = pca.transform(x)
x = (x - lower) / (upper - lower)
padding = 0.3 * np.ones((len(x), 1))
x_pad = np.c_[np.c_[x, padding], np.zeros((len(x), 1))]
normalization = np.sqrt(np.sum(x_pad ** 2, -1))
x_norm = (x_pad.T / normalization).T
features = np.nan_to_num((np.array([get_angles(x) for x in x_norm])))
return features[0].tolist()
from qiskit import *
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import BasicAer, execute, IBMQ, Aer
from qiskit.circuit import Gate
from qiskit.quantum_info.operators import Operator
from qiskit.aqua.components.optimizers import ADAM, CG, AQGD
def R_gate(beta, circuit = False):
backend = Aer.get_backend('unitary_simulator')
control = QuantumRegister(1)
qc = QuantumCircuit(control)
qc.ry(beta, control)
job = execute(qc, backend)
result = job.result()
U = result.get_unitary(qc)
R = Operator(U)
if circuit==True:
return qc
else:
return R
def padding(X):
padding = 0.3 * np.ones((len(X), 1))
X_pad = np.c_[np.c_[X, padding], np.zeros((len(X), 1))]
normalization = np.sqrt(np.sum(X_pad ** 2, -1))
X_norm = (X_pad.T / normalization).T
features = np.nan_to_num((np.array([get_angles(x) for x in X_norm])))
return features | 0.659734 | 0.868938 |
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from collections import OrderedDict
from io import open
import numpy as np
from nltk import word_tokenize
logger = logging.getLogger(__name__)
class ExplaGraphInputExample(object):
def __init__(self, id, belief, argument, external, node_label_internal_belief, node_label_internal_argument,
node_label_external, edge_label, stance_label):
self.id = id
self.belief = belief
self.argument = argument
self.external = external
self.node_label_internal_belief = node_label_internal_belief
self.node_label_internal_argument = node_label_internal_argument
self.node_label_external = node_label_external
self.edge_label = edge_label
self.stance_label = stance_label
class ExplaGraphFeatures(object):
def __init__(self, id, input_ids, input_mask, segment_ids, node_start_index, node_end_index, node_label,
edge_label, stance_label):
self.id = id
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.node_start_index = node_start_index
self.node_end_index = node_end_index
self.node_label = node_label
self.edge_label = edge_label
self.stance_label = stance_label
class ExplaGraphProcessor(object):
def _read_tsv(self, input_file, quotechar=None):
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")))
def get_dev_examples(self, data_dir, is_edge_pred=True):
# If predicting nodes, then create labels using gold nodes, because don't care
# But if predicting edges, create node labels using the predicting nodes
if not is_edge_pred:
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), is_eval=True)
else:
return self._create_examples_with_predicted_nodes(self._read_tsv(os.path.join(data_dir, "dev.tsv")),
open(os.path.join(data_dir, "internal_nodes_dev.txt"),
"r").read().splitlines(),
open(os.path.join(data_dir, "external_nodes_dev.txt"),
"r").read().splitlines())
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")))
def get_stance_labels(self):
return ["support", "counter"]
def get_node_labels(self):
return ["B-N", "I-N", "O"]
def get_edge_labels(self):
return ["antonym of", "synonym of", "at location", "not at location", "capable of", "not capable of", "causes",
"not causes", "created by", "not created by", "is a", "is not a", "desires", "not desires",
"has subevent", "not has subevent", "part of", "not part of", "has context", "not has context",
"has property", "not has property", "made of", "not made of", "receives action", "not receives action",
"used for", "not used for", "no relation"]
def _get_external_nodes_eval(self, belief, argument, external_nodes, internal_nodes_count):
filtered_external_nodes = []
for external_node in list(set(external_nodes.split(", "))):
# We'll consider a maximum of 11 nodes (9+2 shared between belief and argument)
if internal_nodes_count + len(filtered_external_nodes) == 11:
break
if external_node in belief or external_node in argument:
continue
words = word_tokenize(external_node)
if len(words) > 3:
continue
filtered_external_nodes.append(external_node)
return filtered_external_nodes
def _get_external_nodes_train(self, belief, argument, graph):
external_nodes = []
for edge in graph[1:-1].split(")("):
edge_parts = edge.split("; ")
if edge_parts[0] not in belief and edge_parts[0] not in argument and edge_parts[0] not in external_nodes:
external_nodes.append(edge_parts[0])
if edge_parts[2] not in belief and edge_parts[2] not in argument and edge_parts[2] not in external_nodes:
external_nodes.append(edge_parts[2])
return external_nodes
def _get_internal_nodes(self, belief, argument, graph):
internal_nodes = {}
for edge in graph[1:-1].split(")("):
edge_parts = edge.split("; ")
if edge_parts[0] in belief or edge_parts[0] in argument:
length = len(edge_parts[0].split(" "))
if length not in internal_nodes:
internal_nodes[length] = [edge_parts[0]]
elif edge_parts[0] not in internal_nodes[length]:
internal_nodes[length].append(edge_parts[0])
if edge_parts[2] in belief or edge_parts[2] in argument:
length = len(edge_parts[2].split(" "))
if length not in internal_nodes:
internal_nodes[length] = [edge_parts[2]]
elif edge_parts[2] not in internal_nodes[length]:
internal_nodes[length].append(edge_parts[2])
return internal_nodes
def _get_edge_label(self, node_label_internal_belief, belief, node_label_internal_argument, argument,
external_nodes, graph):
edge_label_map = {label: i for i, label in enumerate(self.get_edge_labels())}
gold_edges = {}
for edge in graph[1:-1].split(")("):
parts = edge.split("; ")
gold_edges[parts[0], parts[2]] = parts[1]
ordered_nodes = []
for i, (word, node_label) in enumerate(zip(belief, node_label_internal_belief)):
if node_label == "B-N":
node = word
if i + 1 < len(belief) and node_label_internal_belief[i + 1] == "I-N":
node += " " + belief[i + 1]
if i + 2 < len(belief) and node_label_internal_belief[i + 2] == "I-N":
node += " " + belief[i + 2]
ordered_nodes.append(node)
for i, (word, node_label) in enumerate(zip(argument, node_label_internal_argument)):
if node_label == "B-N":
node = word
if i + 1 < len(argument) and node_label_internal_argument[i + 1] == "I-N":
node += " " + argument[i + 1]
if i + 2 < len(argument) and node_label_internal_argument[i + 2] == "I-N":
node += " " + argument[i + 2]
ordered_nodes.append(node)
ordered_nodes.extend(external_nodes)
edge_label = np.zeros((len(ordered_nodes), len(ordered_nodes)), dtype=int)
for i in range(len(edge_label)):
for j in range(len(edge_label)):
if i == j:
edge_label[i][j] = -100
elif (ordered_nodes[i], ordered_nodes[j]) in gold_edges:
edge_label[i][j] = edge_label_map[gold_edges[(ordered_nodes[i], ordered_nodes[j])]]
else:
edge_label[i][j] = edge_label_map["no relation"]
return list(edge_label.flatten())
def _get_node_label_internal(self, internal_nodes, words):
labels = ["O"] * len(words)
for length in range(3, 0, -1):
if length not in internal_nodes:
continue
nodes = internal_nodes[length]
for node in nodes:
node_words = node.split(" ")
for (i, word) in enumerate(words):
if length == 3 and i < len(words) - 2 and words[i] == node_words[0] and words[i + 1] == node_words[
1] and words[i + 2] == node_words[2]:
if labels[i] == "O" and labels[i + 1] == "O" and labels[i + 2] == "O":
labels[i] = "B-N"
labels[i + 1] = "I-N"
labels[i + 2] = "I-N"
if length == 2 and i < len(words) - 1 and words[i] == node_words[0] and words[i + 1] == node_words[
1]:
if labels[i] == "O" and labels[i + 1] == "O":
labels[i] = "B-N"
labels[i + 1] = "I-N"
if length == 1 and words[i] == node_words[0]:
if labels[i] == "O":
labels[i] = "B-N"
return labels
def _get_node_label_external(self, external_nodes):
labels = []
for external_node in external_nodes:
length = len(word_tokenize(external_node))
labels.extend(["B-N"] + ["I-N"] * (length - 1))
return labels
def _create_examples(self, records, is_eval=False):
examples = []
max_edge_length = 0
for (i, record) in enumerate(records):
belief = record[0].lower()
argument = record[1].lower()
stance_label = record[2]
graph = record[3].lower()
belief_words = word_tokenize(belief)
argument_words = word_tokenize(argument)
internal_nodes = self._get_internal_nodes(belief, argument, graph)
node_label_internal_belief = self._get_node_label_internal(internal_nodes, belief_words)
node_label_internal_argument = self._get_node_label_internal(internal_nodes, argument_words)
# If evaluating, external nodes are not required for tagging because they will come from generation model
external_nodes = self._get_external_nodes_train(belief, argument, graph) if not is_eval else []
node_label_external = self._get_node_label_external(external_nodes)
edge_label = self._get_edge_label(node_label_internal_belief, belief_words, node_label_internal_argument,
argument_words,
external_nodes, graph)
max_edge_length = max(max_edge_length, len(edge_label))
external = []
for external_node in external_nodes:
external.extend(word_tokenize(external_node))
examples.append(
ExplaGraphInputExample(id=i, belief=belief_words, argument=argument_words, external=external,
node_label_internal_belief=node_label_internal_belief,
node_label_internal_argument=node_label_internal_argument,
node_label_external=node_label_external, edge_label=edge_label,
stance_label=stance_label))
return examples
def _get_unique_node_count(self, belief, argument, node_label_internal_belief, node_label_internal_argument):
nodes = []
for i, (word, node_label) in enumerate(zip(belief, node_label_internal_belief)):
if node_label == "B-N":
node = word
if i + 1 < len(belief) and node_label_internal_belief[i + 1] == "I-N":
node += " " + belief[i + 1]
if i + 2 < len(belief) and node_label_internal_belief[i + 2] == "I-N":
node += " " + belief[i + 2]
nodes.append(node)
for i, (word, node_label) in enumerate(zip(argument, node_label_internal_argument)):
if node_label == "B-N":
node = word
if i + 1 < len(argument) and node_label_internal_argument[i + 1] == "I-N":
node += " " + argument[i + 1]
if i + 2 < len(argument) and node_label_internal_argument[i + 2] == "I-N":
node += " " + argument[i + 2]
if node not in nodes:
nodes.append(node)
return len(nodes)
def _create_examples_with_predicted_nodes(self, records, internal_nodes, external_nodes):
assert len(records) == len(external_nodes)
examples = []
sample_breaks = [i for i, x in enumerate(internal_nodes) if x == ""]
max_node_count = 0
for (i, record) in enumerate(records):
belief = record[0].lower()
argument = record[1].lower()
stance_label = record[2]
belief_words = word_tokenize(belief)
argument_words = word_tokenize(argument)
start = 0 if i == 0 else sample_breaks[i - 1] + 1
end = sample_breaks[i]
belief_lines = internal_nodes[start:(start + len(belief_words))]
argument_lines = internal_nodes[(start + len(belief_words)):end]
node_label_internal_belief = [belief_line.split("\t")[1] for belief_line in belief_lines]
node_label_internal_argument = [argument_line.split("\t")[1] for argument_line in argument_lines]
node_count = self._get_unique_node_count(belief_words, argument_words, node_label_internal_belief,
node_label_internal_argument)
external = []
node_label_external = []
for external_node in list(OrderedDict.fromkeys(external_nodes[i].split(", "))):
# Allowing a maximum of 9 unique nodes, as per the task
if node_count >= 8:
break
if external_node in belief or external_node in argument:
continue
words = word_tokenize(external_node)
if len(words) > 3:
continue
node_count += 1
external.extend(words)
node_label_external.extend(["B-N"] + ["I-N"] * (len(words) - 1))
max_node_count = max(max_node_count, node_count)
edge_label = np.zeros((node_count, node_count), dtype=int)
for a in range(len(edge_label)):
for b in range(len(edge_label)):
if a == b:
edge_label[a][b] = -100
else:
edge_label[a][b] = 0 # Don't care, some placeholder value
edge_label = list(edge_label.flatten())
examples.append(
ExplaGraphInputExample(id=i, belief=belief_words, argument=argument_words, external=external,
node_label_internal_belief=node_label_internal_belief,
node_label_internal_argument=node_label_internal_argument,
node_label_external=node_label_external, edge_label=edge_label,
stance_label=stance_label))
print(max_node_count)
return examples
def get_word_start_indices(examples, tokenizer, cls_token, sep_token):
all_word_start_indices = []
for (ex_index, example) in enumerate(examples):
word_start_indices = []
print(ex_index)
tokens = [cls_token]
for word in example.belief:
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
word_start_indices.append(len(tokens))
tokens.extend(word_tokens)
tokens = tokens + [sep_token] + [sep_token]
for word in example.argument:
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
word_start_indices.append(len(tokens))
tokens.extend(word_tokens)
all_word_start_indices.append(word_start_indices)
return all_word_start_indices
def convert_examples_to_features(examples,
stance_label_list,
node_label_list,
max_seq_length,
max_nodes,
tokenizer,
cls_token='[CLS]',
sep_token='[SEP]'):
# The encoding is based on RoBERTa (and hence segment ids don't matter)
node_label_map = {label: i for i, label in enumerate(node_label_list)}
stance_label_map = {label: i for i, label in enumerate(stance_label_list)}
features = []
for (ex_index, example) in enumerate(examples):
print(ex_index)
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens = [cls_token]
node_label_ids = [-100]
node_start_index, node_end_index = [], []
# Encode the belief
for word, label in zip(example.belief, example.node_label_internal_belief):
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
if label == "B-N":
node_start_index.append(len(tokens))
tokens.extend(word_tokens)
if label == "B-N":
node_end_index.append(len(tokens) - 1)
elif label == "I-N":
node_end_index[len(node_end_index) - 1] = len(tokens) - 1 # Update the end index
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
# node_label_ids.extend([node_label_map[label]] + [-100] * (len(word_tokens) - 1))
if label == "B-N":
node_label_ids.extend(
[node_label_map[label]] + [node_label_map["I-N"]] * (len(word_tokens) - 1))
else:
node_label_ids.extend([node_label_map[label]] * len(word_tokens))
tokens = tokens + [sep_token] + [sep_token]
node_label_ids = node_label_ids + [-100, -100]
# Encode the argument
for word, label in zip(example.argument, example.node_label_internal_argument):
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
if label == "B-N":
node_start_index.append(len(tokens))
tokens.extend(word_tokens)
if label == "B-N":
node_end_index.append(len(tokens) - 1)
elif label == "I-N":
node_end_index[len(node_end_index) - 1] = len(tokens) - 1 # Update the end index
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
# node_label_ids.extend([node_label_map[label]] + [-100] * (len(word_tokens) - 1))
if label == "B-N":
node_label_ids.extend([node_label_map[label]] + [node_label_map["I-N"]] * (len(word_tokens) - 1))
else:
node_label_ids.extend([node_label_map[label]] * len(word_tokens))
tokens = tokens + [sep_token] + [sep_token]
node_label_ids = node_label_ids + [-100, -100]
# Encode the external concepts
for word, label in zip(example.external, example.node_label_external):
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
if label == "B-N":
node_start_index.append(len(tokens))
tokens.extend(word_tokens)
if label == "B-N":
node_end_index.append(len(tokens) - 1)
elif label == "I-N":
node_end_index[len(node_end_index) - 1] = len(tokens) - 1 # Update the end index
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
# node_label_ids.extend([node_label_map[label]] + [-100] * (len(word_tokens) - 1))
if label == "B-N":
node_label_ids.extend(
[node_label_map[label]] + [node_label_map["I-N"]] * (len(word_tokens) - 1))
else:
node_label_ids.extend([node_label_map[label]] * len(word_tokens))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
input_ids = input_ids + ([0] * padding_length)
input_mask = input_mask + ([0] * padding_length)
node_label = node_label_ids + ([-100] * padding_length)
segment_ids = [0] * len(input_ids)
padding_length = max_seq_length - len(node_start_index)
node_start_index = node_start_index + ([0] * padding_length)
node_end_index = node_end_index + ([0] * padding_length)
edge_label = example.edge_label + [-100] * (max_nodes * max_nodes - len(example.edge_label))
stance_label = stance_label_map[example.stance_label]
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(node_start_index) == max_seq_length
assert len(node_end_index) == max_seq_length
assert len(edge_label) == max_nodes * max_nodes
if ex_index < 5:
logger.info("*** Example ***")
logger.info("id: %s" % (example.id))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("node_start_index: %s" % " ".join([str(x) for x in node_start_index]))
logger.info("node_end_index: %s" % " ".join([str(x) for x in node_end_index]))
logger.info("node_label: %s" % " ".join([str(x) for x in node_label]))
logger.info("edge_label: %s" % " ".join([str(x) for x in edge_label]))
logger.info("label: %s (id = %d)" % (example.stance_label, stance_label))
features.append(
ExplaGraphFeatures(id=id,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
node_start_index=node_start_index,
node_end_index=node_end_index,
node_label=node_label,
edge_label=edge_label,
stance_label=stance_label))
return features
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "eg":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def write_node_predictions_to_file(writer, test_input_reader, preds_list):
example_id = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
processors = {
"eg": ExplaGraphProcessor
}
output_modes = {
"eg": "classification"
} | structured_model/utils_joint_model.py | from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from collections import OrderedDict
from io import open
import numpy as np
from nltk import word_tokenize
logger = logging.getLogger(__name__)
class ExplaGraphInputExample(object):
def __init__(self, id, belief, argument, external, node_label_internal_belief, node_label_internal_argument,
node_label_external, edge_label, stance_label):
self.id = id
self.belief = belief
self.argument = argument
self.external = external
self.node_label_internal_belief = node_label_internal_belief
self.node_label_internal_argument = node_label_internal_argument
self.node_label_external = node_label_external
self.edge_label = edge_label
self.stance_label = stance_label
class ExplaGraphFeatures(object):
def __init__(self, id, input_ids, input_mask, segment_ids, node_start_index, node_end_index, node_label,
edge_label, stance_label):
self.id = id
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.node_start_index = node_start_index
self.node_end_index = node_end_index
self.node_label = node_label
self.edge_label = edge_label
self.stance_label = stance_label
class ExplaGraphProcessor(object):
def _read_tsv(self, input_file, quotechar=None):
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")))
def get_dev_examples(self, data_dir, is_edge_pred=True):
# If predicting nodes, then create labels using gold nodes, because don't care
# But if predicting edges, create node labels using the predicting nodes
if not is_edge_pred:
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), is_eval=True)
else:
return self._create_examples_with_predicted_nodes(self._read_tsv(os.path.join(data_dir, "dev.tsv")),
open(os.path.join(data_dir, "internal_nodes_dev.txt"),
"r").read().splitlines(),
open(os.path.join(data_dir, "external_nodes_dev.txt"),
"r").read().splitlines())
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")))
def get_stance_labels(self):
return ["support", "counter"]
def get_node_labels(self):
return ["B-N", "I-N", "O"]
def get_edge_labels(self):
return ["antonym of", "synonym of", "at location", "not at location", "capable of", "not capable of", "causes",
"not causes", "created by", "not created by", "is a", "is not a", "desires", "not desires",
"has subevent", "not has subevent", "part of", "not part of", "has context", "not has context",
"has property", "not has property", "made of", "not made of", "receives action", "not receives action",
"used for", "not used for", "no relation"]
def _get_external_nodes_eval(self, belief, argument, external_nodes, internal_nodes_count):
filtered_external_nodes = []
for external_node in list(set(external_nodes.split(", "))):
# We'll consider a maximum of 11 nodes (9+2 shared between belief and argument)
if internal_nodes_count + len(filtered_external_nodes) == 11:
break
if external_node in belief or external_node in argument:
continue
words = word_tokenize(external_node)
if len(words) > 3:
continue
filtered_external_nodes.append(external_node)
return filtered_external_nodes
def _get_external_nodes_train(self, belief, argument, graph):
external_nodes = []
for edge in graph[1:-1].split(")("):
edge_parts = edge.split("; ")
if edge_parts[0] not in belief and edge_parts[0] not in argument and edge_parts[0] not in external_nodes:
external_nodes.append(edge_parts[0])
if edge_parts[2] not in belief and edge_parts[2] not in argument and edge_parts[2] not in external_nodes:
external_nodes.append(edge_parts[2])
return external_nodes
def _get_internal_nodes(self, belief, argument, graph):
internal_nodes = {}
for edge in graph[1:-1].split(")("):
edge_parts = edge.split("; ")
if edge_parts[0] in belief or edge_parts[0] in argument:
length = len(edge_parts[0].split(" "))
if length not in internal_nodes:
internal_nodes[length] = [edge_parts[0]]
elif edge_parts[0] not in internal_nodes[length]:
internal_nodes[length].append(edge_parts[0])
if edge_parts[2] in belief or edge_parts[2] in argument:
length = len(edge_parts[2].split(" "))
if length not in internal_nodes:
internal_nodes[length] = [edge_parts[2]]
elif edge_parts[2] not in internal_nodes[length]:
internal_nodes[length].append(edge_parts[2])
return internal_nodes
def _get_edge_label(self, node_label_internal_belief, belief, node_label_internal_argument, argument,
external_nodes, graph):
edge_label_map = {label: i for i, label in enumerate(self.get_edge_labels())}
gold_edges = {}
for edge in graph[1:-1].split(")("):
parts = edge.split("; ")
gold_edges[parts[0], parts[2]] = parts[1]
ordered_nodes = []
for i, (word, node_label) in enumerate(zip(belief, node_label_internal_belief)):
if node_label == "B-N":
node = word
if i + 1 < len(belief) and node_label_internal_belief[i + 1] == "I-N":
node += " " + belief[i + 1]
if i + 2 < len(belief) and node_label_internal_belief[i + 2] == "I-N":
node += " " + belief[i + 2]
ordered_nodes.append(node)
for i, (word, node_label) in enumerate(zip(argument, node_label_internal_argument)):
if node_label == "B-N":
node = word
if i + 1 < len(argument) and node_label_internal_argument[i + 1] == "I-N":
node += " " + argument[i + 1]
if i + 2 < len(argument) and node_label_internal_argument[i + 2] == "I-N":
node += " " + argument[i + 2]
ordered_nodes.append(node)
ordered_nodes.extend(external_nodes)
edge_label = np.zeros((len(ordered_nodes), len(ordered_nodes)), dtype=int)
for i in range(len(edge_label)):
for j in range(len(edge_label)):
if i == j:
edge_label[i][j] = -100
elif (ordered_nodes[i], ordered_nodes[j]) in gold_edges:
edge_label[i][j] = edge_label_map[gold_edges[(ordered_nodes[i], ordered_nodes[j])]]
else:
edge_label[i][j] = edge_label_map["no relation"]
return list(edge_label.flatten())
def _get_node_label_internal(self, internal_nodes, words):
labels = ["O"] * len(words)
for length in range(3, 0, -1):
if length not in internal_nodes:
continue
nodes = internal_nodes[length]
for node in nodes:
node_words = node.split(" ")
for (i, word) in enumerate(words):
if length == 3 and i < len(words) - 2 and words[i] == node_words[0] and words[i + 1] == node_words[
1] and words[i + 2] == node_words[2]:
if labels[i] == "O" and labels[i + 1] == "O" and labels[i + 2] == "O":
labels[i] = "B-N"
labels[i + 1] = "I-N"
labels[i + 2] = "I-N"
if length == 2 and i < len(words) - 1 and words[i] == node_words[0] and words[i + 1] == node_words[
1]:
if labels[i] == "O" and labels[i + 1] == "O":
labels[i] = "B-N"
labels[i + 1] = "I-N"
if length == 1 and words[i] == node_words[0]:
if labels[i] == "O":
labels[i] = "B-N"
return labels
def _get_node_label_external(self, external_nodes):
labels = []
for external_node in external_nodes:
length = len(word_tokenize(external_node))
labels.extend(["B-N"] + ["I-N"] * (length - 1))
return labels
def _create_examples(self, records, is_eval=False):
examples = []
max_edge_length = 0
for (i, record) in enumerate(records):
belief = record[0].lower()
argument = record[1].lower()
stance_label = record[2]
graph = record[3].lower()
belief_words = word_tokenize(belief)
argument_words = word_tokenize(argument)
internal_nodes = self._get_internal_nodes(belief, argument, graph)
node_label_internal_belief = self._get_node_label_internal(internal_nodes, belief_words)
node_label_internal_argument = self._get_node_label_internal(internal_nodes, argument_words)
# If evaluating, external nodes are not required for tagging because they will come from generation model
external_nodes = self._get_external_nodes_train(belief, argument, graph) if not is_eval else []
node_label_external = self._get_node_label_external(external_nodes)
edge_label = self._get_edge_label(node_label_internal_belief, belief_words, node_label_internal_argument,
argument_words,
external_nodes, graph)
max_edge_length = max(max_edge_length, len(edge_label))
external = []
for external_node in external_nodes:
external.extend(word_tokenize(external_node))
examples.append(
ExplaGraphInputExample(id=i, belief=belief_words, argument=argument_words, external=external,
node_label_internal_belief=node_label_internal_belief,
node_label_internal_argument=node_label_internal_argument,
node_label_external=node_label_external, edge_label=edge_label,
stance_label=stance_label))
return examples
def _get_unique_node_count(self, belief, argument, node_label_internal_belief, node_label_internal_argument):
nodes = []
for i, (word, node_label) in enumerate(zip(belief, node_label_internal_belief)):
if node_label == "B-N":
node = word
if i + 1 < len(belief) and node_label_internal_belief[i + 1] == "I-N":
node += " " + belief[i + 1]
if i + 2 < len(belief) and node_label_internal_belief[i + 2] == "I-N":
node += " " + belief[i + 2]
nodes.append(node)
for i, (word, node_label) in enumerate(zip(argument, node_label_internal_argument)):
if node_label == "B-N":
node = word
if i + 1 < len(argument) and node_label_internal_argument[i + 1] == "I-N":
node += " " + argument[i + 1]
if i + 2 < len(argument) and node_label_internal_argument[i + 2] == "I-N":
node += " " + argument[i + 2]
if node not in nodes:
nodes.append(node)
return len(nodes)
def _create_examples_with_predicted_nodes(self, records, internal_nodes, external_nodes):
assert len(records) == len(external_nodes)
examples = []
sample_breaks = [i for i, x in enumerate(internal_nodes) if x == ""]
max_node_count = 0
for (i, record) in enumerate(records):
belief = record[0].lower()
argument = record[1].lower()
stance_label = record[2]
belief_words = word_tokenize(belief)
argument_words = word_tokenize(argument)
start = 0 if i == 0 else sample_breaks[i - 1] + 1
end = sample_breaks[i]
belief_lines = internal_nodes[start:(start + len(belief_words))]
argument_lines = internal_nodes[(start + len(belief_words)):end]
node_label_internal_belief = [belief_line.split("\t")[1] for belief_line in belief_lines]
node_label_internal_argument = [argument_line.split("\t")[1] for argument_line in argument_lines]
node_count = self._get_unique_node_count(belief_words, argument_words, node_label_internal_belief,
node_label_internal_argument)
external = []
node_label_external = []
for external_node in list(OrderedDict.fromkeys(external_nodes[i].split(", "))):
# Allowing a maximum of 9 unique nodes, as per the task
if node_count >= 8:
break
if external_node in belief or external_node in argument:
continue
words = word_tokenize(external_node)
if len(words) > 3:
continue
node_count += 1
external.extend(words)
node_label_external.extend(["B-N"] + ["I-N"] * (len(words) - 1))
max_node_count = max(max_node_count, node_count)
edge_label = np.zeros((node_count, node_count), dtype=int)
for a in range(len(edge_label)):
for b in range(len(edge_label)):
if a == b:
edge_label[a][b] = -100
else:
edge_label[a][b] = 0 # Don't care, some placeholder value
edge_label = list(edge_label.flatten())
examples.append(
ExplaGraphInputExample(id=i, belief=belief_words, argument=argument_words, external=external,
node_label_internal_belief=node_label_internal_belief,
node_label_internal_argument=node_label_internal_argument,
node_label_external=node_label_external, edge_label=edge_label,
stance_label=stance_label))
print(max_node_count)
return examples
def get_word_start_indices(examples, tokenizer, cls_token, sep_token):
all_word_start_indices = []
for (ex_index, example) in enumerate(examples):
word_start_indices = []
print(ex_index)
tokens = [cls_token]
for word in example.belief:
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
word_start_indices.append(len(tokens))
tokens.extend(word_tokens)
tokens = tokens + [sep_token] + [sep_token]
for word in example.argument:
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
word_start_indices.append(len(tokens))
tokens.extend(word_tokens)
all_word_start_indices.append(word_start_indices)
return all_word_start_indices
def convert_examples_to_features(examples,
stance_label_list,
node_label_list,
max_seq_length,
max_nodes,
tokenizer,
cls_token='[CLS]',
sep_token='[SEP]'):
# The encoding is based on RoBERTa (and hence segment ids don't matter)
node_label_map = {label: i for i, label in enumerate(node_label_list)}
stance_label_map = {label: i for i, label in enumerate(stance_label_list)}
features = []
for (ex_index, example) in enumerate(examples):
print(ex_index)
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens = [cls_token]
node_label_ids = [-100]
node_start_index, node_end_index = [], []
# Encode the belief
for word, label in zip(example.belief, example.node_label_internal_belief):
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
if label == "B-N":
node_start_index.append(len(tokens))
tokens.extend(word_tokens)
if label == "B-N":
node_end_index.append(len(tokens) - 1)
elif label == "I-N":
node_end_index[len(node_end_index) - 1] = len(tokens) - 1 # Update the end index
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
# node_label_ids.extend([node_label_map[label]] + [-100] * (len(word_tokens) - 1))
if label == "B-N":
node_label_ids.extend(
[node_label_map[label]] + [node_label_map["I-N"]] * (len(word_tokens) - 1))
else:
node_label_ids.extend([node_label_map[label]] * len(word_tokens))
tokens = tokens + [sep_token] + [sep_token]
node_label_ids = node_label_ids + [-100, -100]
# Encode the argument
for word, label in zip(example.argument, example.node_label_internal_argument):
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
if label == "B-N":
node_start_index.append(len(tokens))
tokens.extend(word_tokens)
if label == "B-N":
node_end_index.append(len(tokens) - 1)
elif label == "I-N":
node_end_index[len(node_end_index) - 1] = len(tokens) - 1 # Update the end index
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
# node_label_ids.extend([node_label_map[label]] + [-100] * (len(word_tokens) - 1))
if label == "B-N":
node_label_ids.extend([node_label_map[label]] + [node_label_map["I-N"]] * (len(word_tokens) - 1))
else:
node_label_ids.extend([node_label_map[label]] * len(word_tokens))
tokens = tokens + [sep_token] + [sep_token]
node_label_ids = node_label_ids + [-100, -100]
# Encode the external concepts
for word, label in zip(example.external, example.node_label_external):
word_tokens = tokenizer.tokenize(word)
if len(word_tokens) > 0:
if label == "B-N":
node_start_index.append(len(tokens))
tokens.extend(word_tokens)
if label == "B-N":
node_end_index.append(len(tokens) - 1)
elif label == "I-N":
node_end_index[len(node_end_index) - 1] = len(tokens) - 1 # Update the end index
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
# node_label_ids.extend([node_label_map[label]] + [-100] * (len(word_tokens) - 1))
if label == "B-N":
node_label_ids.extend(
[node_label_map[label]] + [node_label_map["I-N"]] * (len(word_tokens) - 1))
else:
node_label_ids.extend([node_label_map[label]] * len(word_tokens))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
input_ids = input_ids + ([0] * padding_length)
input_mask = input_mask + ([0] * padding_length)
node_label = node_label_ids + ([-100] * padding_length)
segment_ids = [0] * len(input_ids)
padding_length = max_seq_length - len(node_start_index)
node_start_index = node_start_index + ([0] * padding_length)
node_end_index = node_end_index + ([0] * padding_length)
edge_label = example.edge_label + [-100] * (max_nodes * max_nodes - len(example.edge_label))
stance_label = stance_label_map[example.stance_label]
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(node_start_index) == max_seq_length
assert len(node_end_index) == max_seq_length
assert len(edge_label) == max_nodes * max_nodes
if ex_index < 5:
logger.info("*** Example ***")
logger.info("id: %s" % (example.id))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("node_start_index: %s" % " ".join([str(x) for x in node_start_index]))
logger.info("node_end_index: %s" % " ".join([str(x) for x in node_end_index]))
logger.info("node_label: %s" % " ".join([str(x) for x in node_label]))
logger.info("edge_label: %s" % " ".join([str(x) for x in edge_label]))
logger.info("label: %s (id = %d)" % (example.stance_label, stance_label))
features.append(
ExplaGraphFeatures(id=id,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
node_start_index=node_start_index,
node_end_index=node_end_index,
node_label=node_label,
edge_label=edge_label,
stance_label=stance_label))
return features
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "eg":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def write_node_predictions_to_file(writer, test_input_reader, preds_list):
example_id = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
processors = {
"eg": ExplaGraphProcessor
}
output_modes = {
"eg": "classification"
} | 0.365004 | 0.247214 |
import os
from tempfile import tempdir
from pulsar.manager_factory import build_managers
from pulsar.cache import Cache
from pulsar.tools import ToolBox
from pulsar.tools.authorization import get_authorizer
from pulsar import messaging
from galaxy.objectstore import build_object_store_from_config
from galaxy.tools.deps import DependencyManager
from galaxy.jobs.metrics import JobMetrics
from galaxy.util.bunch import Bunch
from logging import getLogger
log = getLogger(__name__)
DEFAULT_PRIVATE_TOKEN = None
DEFAULT_FILES_DIRECTORY = "files"
DEFAULT_STAGING_DIRECTORY = os.path.join(DEFAULT_FILES_DIRECTORY, "staging")
DEFAULT_PERSISTENCE_DIRECTORY = os.path.join(DEFAULT_FILES_DIRECTORY, "persisted_data")
NOT_WHITELIST_WARNING = "Starting the Pulsar without a toolbox to white-list." + \
"Ensure this application is protected by firewall or a configured private token."
MULTIPLE_MANAGERS_MESSAGE = "app.only_manager accessed with multiple managers configured"
class PulsarApp(object):
def __init__(self, **conf):
if conf is None:
conf = {}
self.__setup_staging_directory(conf.get("staging_directory", DEFAULT_STAGING_DIRECTORY))
self.__setup_private_token(conf.get("private_token", DEFAULT_PRIVATE_TOKEN))
self.__setup_persistence_directory(conf.get("persistence_directory", None))
self.__setup_tool_config(conf)
self.__setup_object_store(conf)
self.__setup_dependency_manager(conf)
self.__setup_job_metrics(conf)
self.__setup_managers(conf)
self.__setup_file_cache(conf)
self.__setup_bind_to_message_queue(conf)
self.__recover_jobs()
self.ensure_cleanup = conf.get("ensure_cleanup", False)
def shutdown(self, timeout=None):
for manager in self.managers.values():
try:
manager.shutdown(timeout)
except Exception:
pass
if self.__queue_state:
self.__queue_state.deactivate()
if self.ensure_cleanup:
self.__queue_state.join(timeout)
def __setup_bind_to_message_queue(self, conf):
message_queue_url = conf.get("message_queue_url", None)
queue_state = None
if message_queue_url:
queue_state = messaging.bind_app(self, message_queue_url, conf)
self.__queue_state = queue_state
def __setup_tool_config(self, conf):
"""
Setups toolbox object and authorization mechanism based
on supplied toolbox_path.
"""
tool_config_files = conf.get("tool_config_files", None)
if not tool_config_files:
# For compatibity with Galaxy, allow tool_config_file
# option name.
tool_config_files = conf.get("tool_config_file", None)
toolbox = None
if tool_config_files:
toolbox = ToolBox(tool_config_files)
else:
log.info(NOT_WHITELIST_WARNING)
self.toolbox = toolbox
self.authorizer = get_authorizer(toolbox)
def __setup_staging_directory(self, staging_directory):
self.staging_directory = os.path.abspath(staging_directory)
def __setup_managers(self, conf):
self.managers = build_managers(self, conf)
def __recover_jobs(self):
for manager in self.managers.values():
manager.recover_active_jobs()
def __setup_private_token(self, private_token):
self.private_token = private_token
if private_token:
log.info("Securing Pulsar web app with private key, please verify you are using HTTPS so key cannot be obtained by monitoring traffic.")
def __setup_persistence_directory(self, persistence_directory):
persistence_directory = persistence_directory or DEFAULT_PERSISTENCE_DIRECTORY
if persistence_directory == "__none__":
persistence_directory = None
self.persistence_directory = persistence_directory
def __setup_file_cache(self, conf):
file_cache_dir = conf.get('file_cache_dir', None)
self.file_cache = Cache(file_cache_dir) if file_cache_dir else None
def __setup_object_store(self, conf):
if "object_store_config_file" not in conf:
self.object_store = None
return
object_store_config = Bunch(
object_store_config_file=conf['object_store_config_file'],
file_path=conf.get("object_store_file_path", None),
object_store_check_old_style=False,
job_working_directory=conf.get("object_store_job_working_directory", None),
new_file_path=conf.get("object_store_new_file_path", tempdir),
umask=int(conf.get("object_store_umask", "0000")),
)
self.object_store = build_object_store_from_config(object_store_config)
def __setup_dependency_manager(self, conf):
dependencies_dir = conf.get("tool_dependency_dir", "dependencies")
resolvers_config_file = conf.get("dependency_resolvers_config_file", "dependency_resolvers_conf.xml")
conda_config = {k: v for k, v in conf.items() if k.startswith("conda_")}
self.dependency_manager = DependencyManager(dependencies_dir, resolvers_config_file, app_config=conda_config)
def __setup_job_metrics(self, conf):
job_metrics = conf.get("job_metrics", None)
if job_metrics is None:
job_metrics_config_file = conf.get("job_metrics_config_file", "job_metrics_conf.xml")
job_metrics = JobMetrics(job_metrics_config_file)
self.job_metrics = job_metrics
@property
def only_manager(self):
"""Convience accessor for tests and contexts with sole manager."""
assert len(self.managers) == 1, MULTIPLE_MANAGERS_MESSAGE
return list(self.managers.values())[0] | pulsar/core.py | import os
from tempfile import tempdir
from pulsar.manager_factory import build_managers
from pulsar.cache import Cache
from pulsar.tools import ToolBox
from pulsar.tools.authorization import get_authorizer
from pulsar import messaging
from galaxy.objectstore import build_object_store_from_config
from galaxy.tools.deps import DependencyManager
from galaxy.jobs.metrics import JobMetrics
from galaxy.util.bunch import Bunch
from logging import getLogger
log = getLogger(__name__)
DEFAULT_PRIVATE_TOKEN = None
DEFAULT_FILES_DIRECTORY = "files"
DEFAULT_STAGING_DIRECTORY = os.path.join(DEFAULT_FILES_DIRECTORY, "staging")
DEFAULT_PERSISTENCE_DIRECTORY = os.path.join(DEFAULT_FILES_DIRECTORY, "persisted_data")
NOT_WHITELIST_WARNING = "Starting the Pulsar without a toolbox to white-list." + \
"Ensure this application is protected by firewall or a configured private token."
MULTIPLE_MANAGERS_MESSAGE = "app.only_manager accessed with multiple managers configured"
class PulsarApp(object):
def __init__(self, **conf):
if conf is None:
conf = {}
self.__setup_staging_directory(conf.get("staging_directory", DEFAULT_STAGING_DIRECTORY))
self.__setup_private_token(conf.get("private_token", DEFAULT_PRIVATE_TOKEN))
self.__setup_persistence_directory(conf.get("persistence_directory", None))
self.__setup_tool_config(conf)
self.__setup_object_store(conf)
self.__setup_dependency_manager(conf)
self.__setup_job_metrics(conf)
self.__setup_managers(conf)
self.__setup_file_cache(conf)
self.__setup_bind_to_message_queue(conf)
self.__recover_jobs()
self.ensure_cleanup = conf.get("ensure_cleanup", False)
def shutdown(self, timeout=None):
for manager in self.managers.values():
try:
manager.shutdown(timeout)
except Exception:
pass
if self.__queue_state:
self.__queue_state.deactivate()
if self.ensure_cleanup:
self.__queue_state.join(timeout)
def __setup_bind_to_message_queue(self, conf):
message_queue_url = conf.get("message_queue_url", None)
queue_state = None
if message_queue_url:
queue_state = messaging.bind_app(self, message_queue_url, conf)
self.__queue_state = queue_state
def __setup_tool_config(self, conf):
"""
Setups toolbox object and authorization mechanism based
on supplied toolbox_path.
"""
tool_config_files = conf.get("tool_config_files", None)
if not tool_config_files:
# For compatibity with Galaxy, allow tool_config_file
# option name.
tool_config_files = conf.get("tool_config_file", None)
toolbox = None
if tool_config_files:
toolbox = ToolBox(tool_config_files)
else:
log.info(NOT_WHITELIST_WARNING)
self.toolbox = toolbox
self.authorizer = get_authorizer(toolbox)
def __setup_staging_directory(self, staging_directory):
self.staging_directory = os.path.abspath(staging_directory)
def __setup_managers(self, conf):
self.managers = build_managers(self, conf)
def __recover_jobs(self):
for manager in self.managers.values():
manager.recover_active_jobs()
def __setup_private_token(self, private_token):
self.private_token = private_token
if private_token:
log.info("Securing Pulsar web app with private key, please verify you are using HTTPS so key cannot be obtained by monitoring traffic.")
def __setup_persistence_directory(self, persistence_directory):
persistence_directory = persistence_directory or DEFAULT_PERSISTENCE_DIRECTORY
if persistence_directory == "__none__":
persistence_directory = None
self.persistence_directory = persistence_directory
def __setup_file_cache(self, conf):
file_cache_dir = conf.get('file_cache_dir', None)
self.file_cache = Cache(file_cache_dir) if file_cache_dir else None
def __setup_object_store(self, conf):
if "object_store_config_file" not in conf:
self.object_store = None
return
object_store_config = Bunch(
object_store_config_file=conf['object_store_config_file'],
file_path=conf.get("object_store_file_path", None),
object_store_check_old_style=False,
job_working_directory=conf.get("object_store_job_working_directory", None),
new_file_path=conf.get("object_store_new_file_path", tempdir),
umask=int(conf.get("object_store_umask", "0000")),
)
self.object_store = build_object_store_from_config(object_store_config)
def __setup_dependency_manager(self, conf):
dependencies_dir = conf.get("tool_dependency_dir", "dependencies")
resolvers_config_file = conf.get("dependency_resolvers_config_file", "dependency_resolvers_conf.xml")
conda_config = {k: v for k, v in conf.items() if k.startswith("conda_")}
self.dependency_manager = DependencyManager(dependencies_dir, resolvers_config_file, app_config=conda_config)
def __setup_job_metrics(self, conf):
job_metrics = conf.get("job_metrics", None)
if job_metrics is None:
job_metrics_config_file = conf.get("job_metrics_config_file", "job_metrics_conf.xml")
job_metrics = JobMetrics(job_metrics_config_file)
self.job_metrics = job_metrics
@property
def only_manager(self):
"""Convience accessor for tests and contexts with sole manager."""
assert len(self.managers) == 1, MULTIPLE_MANAGERS_MESSAGE
return list(self.managers.values())[0] | 0.585457 | 0.095729 |
import collections
from typing import Dict, Text
import prometheus_client
import six
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.stats import stats_collector
from grr_response_core.stats import stats_utils
class _Metric(object):
"""A Metric that wraps a prometheus_client metrics.
Attributes:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
fields: A list of (field name, field type) tuples, defining the dimensions
of this metric.
metric: The underlying metric, an instance of prometheus_client.Counter,
Gauge, or Histogram.
"""
def __init__(self, metadata: rdf_stats.MetricMetadata,
registry: prometheus_client.registry.CollectorRegistry):
"""Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type.
"""
self.metadata = metadata
self.fields = stats_utils.FieldDefinitionTuplesFromProtos(
metadata.fields_defs)
field_names = [name for name, _ in self.fields]
if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
self.metric = prometheus_client.Counter(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
bins = metadata.bins or [
0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8,
9, 10, 15, 20, 50, 100
]
self.metric = prometheus_client.Histogram(
metadata.varname,
metadata.docstring,
labelnames=field_names,
buckets=bins,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:
self.metric = prometheus_client.Gauge(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
else:
raise ValueError("Unknown metric type: {!r}".format(metadata.metric_type))
def Validate(self, fields):
if len(fields or ()) != len(self.fields):
raise ValueError(
"Statistic {} was created with {!r} fields, but a value with fields"
" {!r} was trying to be saved.".format(self.metadata.varname,
self.fields, fields))
def ForFields(self, fields) -> prometheus_client.metrics.MetricWrapperBase:
self.Validate(fields)
if fields:
return self.metric.labels(*fields)
else:
return self.metric
def __repr__(self):
return "<{} varname={!r} fields={!r} metric={!r}>".format(
compatibility.GetName(type(self)), self.metadata.varname, self.fields,
self.metric)
def _DistributionFromHistogram(metric, values_by_suffix):
"""Instantiate a rdf_stats.Distribution from a Prometheus Histogram.
Prometheus Histogram uses cumulative "buckets" lower or equal to an upper
bound. At instantiation, +Inf is implicitly appended to the upper bounds.
The delimiters [0.0, 0.1, 0.2 (, +Inf)] produce the following buckets:
Bucket "0.0" : -Inf <= values <= 0.0
Bucket "0.1" : -Inf <= values <= 0.1
Bucket "0.2" : -Inf <= values <= 0.2
Bucket "+Inf": -Inf <= values <= +Inf
Distribution uses exclusive bins greater or equal to a lower bound and
strictly lower than the next lower bound. At instantiation, -Inf is implicitly
prepended. The delimiters [(-Inf,) 0.0, 0.1, 0.2] produce the following bins:
Bin "-Inf": -Inf <= values < 0.0
Bin "0.0" : 0.0 <= values < 0.1
Bin "0.1" : 0.1 <= values < 0.2
Bin "0.2" : 0.2 <= values <= +Inf
Thus, Histogram buckets can be transformed to Distribution bins, by reading
in the same order and subtracting the value of the previous bin to remove the
cumulative sum. There is a slight incompatibility for values equal to bin
boundaries, because boundaries describe the upper bound for Prometheus and
the lower bound for our internal implementation.
Args:
metric: prometheus_stats_collector.Metric
values_by_suffix: dict of metric name suffixes and sample values lists
Returns:
rdf_stats.Distribution
Raises:
ValueError: The Histogram and metadata bin count do not match.
"""
dist = rdf_stats.Distribution(bins=list(metric.metadata.bins))
if metric.metadata.bins and len(dist.heights) != len(
values_by_suffix["_bucket"]):
raise ValueError(
"Trying to create Distribution with {} bins, but underlying"
"Histogram has {} buckets".format(
len(dist.heights), len(values_by_suffix["_bucket"])))
dist.heights = values_by_suffix["_bucket"]
# Remove cumulative sum by subtracting the value of the previous bin
for i in reversed(range(1, len(dist.heights))):
dist.heights[i] -= dist.heights[i - 1]
dist.count = values_by_suffix["_count"][0]
dist.sum = values_by_suffix["_sum"][0]
return dist
class PrometheusStatsCollector(stats_collector.StatsCollector):
"""Prometheus-based StatsCollector.
This StatsCollector maps native Counters and Gauges to their Prometheus
counterparts. Native Events are mapped to Prometheus Histograms.
Attributes:
lock: threading.Lock required by the utils.Synchronized decorator.
"""
def __init__(self, registry=None):
"""Instantiates a new PrometheusStatsCollector.
Args:
registry: An instance of prometheus_client.CollectorRegistry. If None, a
new CollectorRegistry is instantiated. Use prometheus_client.REGISTRY
for the global default registry.
"""
self._metrics = {} # type: Dict[Text, _Metric]
if registry is None:
self._registry = prometheus_client.CollectorRegistry(auto_describe=True)
else:
self._registry = registry
super().__init__()
def _InitializeMetric(self, metadata: rdf_stats.MetricMetadata):
self._metrics[metadata.varname] = _Metric(metadata, registry=self._registry)
@utils.Synchronized
def IncrementCounter(self, metric_name, delta=1, fields=None):
metric = self._metrics[metric_name]
counter = metric.ForFields(fields) # type: prometheus_client.Counter
counter.inc(delta)
@utils.Synchronized
def RecordEvent(self, metric_name, value, fields=None):
# TODO(user): decouple validation from implementation.
# Use validation wrapper approach in StatsCollector (similar to
# how it's done in REL_DB).
precondition.AssertType(value, six.integer_types + (float,))
metric = self._metrics[metric_name]
histogram = metric.ForFields(fields) # type: prometheus_client.Histogram
histogram.observe(value)
@utils.Synchronized
def SetGaugeValue(self, metric_name, value, fields=None):
metric = self._metrics[metric_name]
gauge = metric.ForFields(fields) # type: prometheus_client.Gauge
gauge.set(value)
@utils.Synchronized
def SetGaugeCallback(self, metric_name, callback, fields=None):
metric = self._metrics[metric_name]
gauge = metric.ForFields(fields) # type: prometheus_client.Gauge
gauge.set_function(callback)
@utils.Synchronized
def GetMetricFields(self, metric_name):
metric = self._metrics[metric_name]
if not metric.fields:
return []
field_tuples = set()
for prom_metric in metric.metric.collect():
for sample in prom_metric.samples:
labels = [sample.labels[field_name] for field_name, _ in metric.fields]
field_tuples.add(tuple(labels))
return list(field_tuples)
@utils.Synchronized
def GetMetricValue(self, metric_name, fields=None):
metric = self._metrics[metric_name]
metric_type = metric.metadata.metric_type
sub_metrics = metric.ForFields(fields).collect()
samples = [sample for sm in sub_metrics for sample in sm.samples]
values_by_suffix = collections.defaultdict(list)
for sample in samples:
suffix = sample.name.replace(metric_name, "")
values_by_suffix[suffix].append(sample.value)
if metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
return _DistributionFromHistogram(metric, values_by_suffix)
elif metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
return values_by_suffix["_total"][0]
else:
return samples[-1].value | grr/server/grr_response_server/prometheus_stats_collector.py | import collections
from typing import Dict, Text
import prometheus_client
import six
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.stats import stats_collector
from grr_response_core.stats import stats_utils
class _Metric(object):
"""A Metric that wraps a prometheus_client metrics.
Attributes:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
fields: A list of (field name, field type) tuples, defining the dimensions
of this metric.
metric: The underlying metric, an instance of prometheus_client.Counter,
Gauge, or Histogram.
"""
def __init__(self, metadata: rdf_stats.MetricMetadata,
registry: prometheus_client.registry.CollectorRegistry):
"""Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type.
"""
self.metadata = metadata
self.fields = stats_utils.FieldDefinitionTuplesFromProtos(
metadata.fields_defs)
field_names = [name for name, _ in self.fields]
if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
self.metric = prometheus_client.Counter(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
bins = metadata.bins or [
0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8,
9, 10, 15, 20, 50, 100
]
self.metric = prometheus_client.Histogram(
metadata.varname,
metadata.docstring,
labelnames=field_names,
buckets=bins,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:
self.metric = prometheus_client.Gauge(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
else:
raise ValueError("Unknown metric type: {!r}".format(metadata.metric_type))
def Validate(self, fields):
if len(fields or ()) != len(self.fields):
raise ValueError(
"Statistic {} was created with {!r} fields, but a value with fields"
" {!r} was trying to be saved.".format(self.metadata.varname,
self.fields, fields))
def ForFields(self, fields) -> prometheus_client.metrics.MetricWrapperBase:
self.Validate(fields)
if fields:
return self.metric.labels(*fields)
else:
return self.metric
def __repr__(self):
return "<{} varname={!r} fields={!r} metric={!r}>".format(
compatibility.GetName(type(self)), self.metadata.varname, self.fields,
self.metric)
def _DistributionFromHistogram(metric, values_by_suffix):
"""Instantiate a rdf_stats.Distribution from a Prometheus Histogram.
Prometheus Histogram uses cumulative "buckets" lower or equal to an upper
bound. At instantiation, +Inf is implicitly appended to the upper bounds.
The delimiters [0.0, 0.1, 0.2 (, +Inf)] produce the following buckets:
Bucket "0.0" : -Inf <= values <= 0.0
Bucket "0.1" : -Inf <= values <= 0.1
Bucket "0.2" : -Inf <= values <= 0.2
Bucket "+Inf": -Inf <= values <= +Inf
Distribution uses exclusive bins greater or equal to a lower bound and
strictly lower than the next lower bound. At instantiation, -Inf is implicitly
prepended. The delimiters [(-Inf,) 0.0, 0.1, 0.2] produce the following bins:
Bin "-Inf": -Inf <= values < 0.0
Bin "0.0" : 0.0 <= values < 0.1
Bin "0.1" : 0.1 <= values < 0.2
Bin "0.2" : 0.2 <= values <= +Inf
Thus, Histogram buckets can be transformed to Distribution bins, by reading
in the same order and subtracting the value of the previous bin to remove the
cumulative sum. There is a slight incompatibility for values equal to bin
boundaries, because boundaries describe the upper bound for Prometheus and
the lower bound for our internal implementation.
Args:
metric: prometheus_stats_collector.Metric
values_by_suffix: dict of metric name suffixes and sample values lists
Returns:
rdf_stats.Distribution
Raises:
ValueError: The Histogram and metadata bin count do not match.
"""
dist = rdf_stats.Distribution(bins=list(metric.metadata.bins))
if metric.metadata.bins and len(dist.heights) != len(
values_by_suffix["_bucket"]):
raise ValueError(
"Trying to create Distribution with {} bins, but underlying"
"Histogram has {} buckets".format(
len(dist.heights), len(values_by_suffix["_bucket"])))
dist.heights = values_by_suffix["_bucket"]
# Remove cumulative sum by subtracting the value of the previous bin
for i in reversed(range(1, len(dist.heights))):
dist.heights[i] -= dist.heights[i - 1]
dist.count = values_by_suffix["_count"][0]
dist.sum = values_by_suffix["_sum"][0]
return dist
class PrometheusStatsCollector(stats_collector.StatsCollector):
"""Prometheus-based StatsCollector.
This StatsCollector maps native Counters and Gauges to their Prometheus
counterparts. Native Events are mapped to Prometheus Histograms.
Attributes:
lock: threading.Lock required by the utils.Synchronized decorator.
"""
def __init__(self, registry=None):
"""Instantiates a new PrometheusStatsCollector.
Args:
registry: An instance of prometheus_client.CollectorRegistry. If None, a
new CollectorRegistry is instantiated. Use prometheus_client.REGISTRY
for the global default registry.
"""
self._metrics = {} # type: Dict[Text, _Metric]
if registry is None:
self._registry = prometheus_client.CollectorRegistry(auto_describe=True)
else:
self._registry = registry
super().__init__()
def _InitializeMetric(self, metadata: rdf_stats.MetricMetadata):
self._metrics[metadata.varname] = _Metric(metadata, registry=self._registry)
@utils.Synchronized
def IncrementCounter(self, metric_name, delta=1, fields=None):
metric = self._metrics[metric_name]
counter = metric.ForFields(fields) # type: prometheus_client.Counter
counter.inc(delta)
@utils.Synchronized
def RecordEvent(self, metric_name, value, fields=None):
# TODO(user): decouple validation from implementation.
# Use validation wrapper approach in StatsCollector (similar to
# how it's done in REL_DB).
precondition.AssertType(value, six.integer_types + (float,))
metric = self._metrics[metric_name]
histogram = metric.ForFields(fields) # type: prometheus_client.Histogram
histogram.observe(value)
@utils.Synchronized
def SetGaugeValue(self, metric_name, value, fields=None):
metric = self._metrics[metric_name]
gauge = metric.ForFields(fields) # type: prometheus_client.Gauge
gauge.set(value)
@utils.Synchronized
def SetGaugeCallback(self, metric_name, callback, fields=None):
metric = self._metrics[metric_name]
gauge = metric.ForFields(fields) # type: prometheus_client.Gauge
gauge.set_function(callback)
@utils.Synchronized
def GetMetricFields(self, metric_name):
metric = self._metrics[metric_name]
if not metric.fields:
return []
field_tuples = set()
for prom_metric in metric.metric.collect():
for sample in prom_metric.samples:
labels = [sample.labels[field_name] for field_name, _ in metric.fields]
field_tuples.add(tuple(labels))
return list(field_tuples)
@utils.Synchronized
def GetMetricValue(self, metric_name, fields=None):
metric = self._metrics[metric_name]
metric_type = metric.metadata.metric_type
sub_metrics = metric.ForFields(fields).collect()
samples = [sample for sm in sub_metrics for sample in sm.samples]
values_by_suffix = collections.defaultdict(list)
for sample in samples:
suffix = sample.name.replace(metric_name, "")
values_by_suffix[suffix].append(sample.value)
if metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
return _DistributionFromHistogram(metric, values_by_suffix)
elif metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
return values_by_suffix["_total"][0]
else:
return samples[-1].value | 0.897622 | 0.287502 |
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailmojo_sdk.api_client import ApiClient
class ListApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_segment(self, list_id, segment, **kwargs): # noqa: E501
"""Create a segment in the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_segment(list_id, segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object list_id: ID of the email list to create a segment in. (required)
:param SegmentCreation segment: (required)
:return: Segment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_segment_with_http_info(list_id, segment, **kwargs) # noqa: E501
else:
(data) = self.create_segment_with_http_info(list_id, segment, **kwargs) # noqa: E501
return data
def create_segment_with_http_info(self, list_id, segment, **kwargs): # noqa: E501
"""Create a segment in the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_segment_with_http_info(list_id, segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object list_id: ID of the email list to create a segment in. (required)
:param SegmentCreation segment: (required)
:return: Segment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'segment'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_segment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `create_segment`") # noqa: E501
# verify the required parameter 'segment' is set
if ('segment' not in params or
params['segment'] is None):
raise ValueError("Missing the required parameter `segment` when calling `create_segment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'segment' in params:
body_params = params['segment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/segments/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Segment', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_by_id(self, list_id, **kwargs): # noqa: E501
"""Retrieve an email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_by_id(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_by_id_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_list_by_id_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_list_by_id_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Retrieve an email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_by_id_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_list_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_lists(self, **kwargs): # noqa: E501
"""Retrieve all email lists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lists(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[List]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_lists_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_lists_with_http_info(**kwargs) # noqa: E501
return data
def get_lists_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve all email lists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lists_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[List]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_lists" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[List]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_subscriber_on_list_by_email(self, list_id, email, **kwargs): # noqa: E501
"""Retrieve a subscriber. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscriber_on_list_by_email(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve the subscriber from. (required)
:param str email: Email address of the contact to retrieve. (required)
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_subscriber_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
else:
(data) = self.get_subscriber_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
return data
def get_subscriber_on_list_by_email_with_http_info(self, list_id, email, **kwargs): # noqa: E501
"""Retrieve a subscriber. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscriber_on_list_by_email_with_http_info(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve the subscriber from. (required)
:param str email: Email address of the contact to retrieve. (required)
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_subscriber_on_list_by_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_subscriber_on_list_by_email`") # noqa: E501
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `get_subscriber_on_list_by_email`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'email' in params:
path_params['email'] = params['email'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/{email}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Subscriber', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_subscribers_on_list(self, list_id, **kwargs): # noqa: E501
"""Retrieve subscribers on a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscribers_on_list(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list. (required)
:param int limit: Limits the result to given count.
:return: list[Subscriber]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_subscribers_on_list_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_subscribers_on_list_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_subscribers_on_list_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Retrieve subscribers on a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscribers_on_list_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list. (required)
:param int limit: Limits the result to given count.
:return: list[Subscriber]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_subscribers_on_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_subscribers_on_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Subscriber]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_unsubscribed_on_list(self, list_id, **kwargs): # noqa: E501
"""Retrieve unsubscribed contacts on a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_unsubscribed_on_list(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list. (required)
:param int limit: Limits the result to given count.
:return: list[Contact]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_unsubscribed_on_list_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_unsubscribed_on_list_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_unsubscribed_on_list_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Retrieve unsubscribed contacts on a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_unsubscribed_on_list_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list. (required)
:param int limit: Limits the result to given count.
:return: list[Contact]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_unsubscribed_on_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_unsubscribed_on_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/unsubscribed/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Contact]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def import_subscribers_to_list(self, list_id, contacts, **kwargs): # noqa: E501
"""Subscribe contacts to the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_subscribers_to_list(list_id, contacts, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to subscribe to. (required)
:param list[Contacts] contacts: (required)
:return: ImportResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.import_subscribers_to_list_with_http_info(list_id, contacts, **kwargs) # noqa: E501
else:
(data) = self.import_subscribers_to_list_with_http_info(list_id, contacts, **kwargs) # noqa: E501
return data
def import_subscribers_to_list_with_http_info(self, list_id, contacts, **kwargs): # noqa: E501
"""Subscribe contacts to the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_subscribers_to_list_with_http_info(list_id, contacts, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to subscribe to. (required)
:param list[Contacts] contacts: (required)
:return: ImportResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'contacts'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method import_subscribers_to_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `import_subscribers_to_list`") # noqa: E501
# verify the required parameter 'contacts' is set
if ('contacts' not in params or
params['contacts'] is None):
raise ValueError("Missing the required parameter `contacts` when calling `import_subscribers_to_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'contacts' in params:
body_params = params['contacts']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['text/csv']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/import/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ImportResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def subscribe_contact_to_list(self, list_id, contact, **kwargs): # noqa: E501
"""Subscribe a contact to the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.subscribe_contact_to_list(list_id, contact, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to subscribe to. (required)
:param Subscriber contact: (required)
:return: Contact
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.subscribe_contact_to_list_with_http_info(list_id, contact, **kwargs) # noqa: E501
else:
(data) = self.subscribe_contact_to_list_with_http_info(list_id, contact, **kwargs) # noqa: E501
return data
def subscribe_contact_to_list_with_http_info(self, list_id, contact, **kwargs): # noqa: E501
"""Subscribe a contact to the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.subscribe_contact_to_list_with_http_info(list_id, contact, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to subscribe to. (required)
:param Subscriber contact: (required)
:return: Contact
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'contact'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method subscribe_contact_to_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `subscribe_contact_to_list`") # noqa: E501
# verify the required parameter 'contact' is set
if ('contact' not in params or
params['contact'] is None):
raise ValueError("Missing the required parameter `contact` when calling `subscribe_contact_to_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'contact' in params:
body_params = params['contact']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Contact', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unsubscribe_contact_on_list_by_email(self, list_id, email, **kwargs): # noqa: E501
"""Unsubscribe a contact. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unsubscribe_contact_on_list_by_email(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to unsubscribe from. (required)
:param str email: Email address of the contact to unsubscribe. (required)
:return: Contact
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unsubscribe_contact_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
else:
(data) = self.unsubscribe_contact_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
return data
def unsubscribe_contact_on_list_by_email_with_http_info(self, list_id, email, **kwargs): # noqa: E501
"""Unsubscribe a contact. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unsubscribe_contact_on_list_by_email_with_http_info(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to unsubscribe from. (required)
:param str email: Email address of the contact to unsubscribe. (required)
:return: Contact
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unsubscribe_contact_on_list_by_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `unsubscribe_contact_on_list_by_email`") # noqa: E501
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `unsubscribe_contact_on_list_by_email`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'email' in params:
path_params['email'] = params['email'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/{email}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Contact', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_list(self, list_id, **kwargs): # noqa: E501
"""Update an email list partially. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_list(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:param ListDetail list:
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_list_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.update_list_with_http_info(list_id, **kwargs) # noqa: E501
return data
def update_list_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Update an email list partially. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_list_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:param ListDetail list:
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'list'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `update_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'list' in params:
body_params = params['list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | mailmojo_sdk/api/list_api.py | from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailmojo_sdk.api_client import ApiClient
class ListApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_segment(self, list_id, segment, **kwargs): # noqa: E501
"""Create a segment in the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_segment(list_id, segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object list_id: ID of the email list to create a segment in. (required)
:param SegmentCreation segment: (required)
:return: Segment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_segment_with_http_info(list_id, segment, **kwargs) # noqa: E501
else:
(data) = self.create_segment_with_http_info(list_id, segment, **kwargs) # noqa: E501
return data
def create_segment_with_http_info(self, list_id, segment, **kwargs): # noqa: E501
"""Create a segment in the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_segment_with_http_info(list_id, segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object list_id: ID of the email list to create a segment in. (required)
:param SegmentCreation segment: (required)
:return: Segment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'segment'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_segment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `create_segment`") # noqa: E501
# verify the required parameter 'segment' is set
if ('segment' not in params or
params['segment'] is None):
raise ValueError("Missing the required parameter `segment` when calling `create_segment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'segment' in params:
body_params = params['segment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/segments/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Segment', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_by_id(self, list_id, **kwargs): # noqa: E501
"""Retrieve an email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_by_id(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_by_id_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_list_by_id_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_list_by_id_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Retrieve an email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_by_id_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_list_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_lists(self, **kwargs): # noqa: E501
"""Retrieve all email lists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lists(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[List]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_lists_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_lists_with_http_info(**kwargs) # noqa: E501
return data
def get_lists_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve all email lists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lists_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[List]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_lists" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[List]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_subscriber_on_list_by_email(self, list_id, email, **kwargs): # noqa: E501
"""Retrieve a subscriber. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscriber_on_list_by_email(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve the subscriber from. (required)
:param str email: Email address of the contact to retrieve. (required)
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_subscriber_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
else:
(data) = self.get_subscriber_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
return data
def get_subscriber_on_list_by_email_with_http_info(self, list_id, email, **kwargs): # noqa: E501
"""Retrieve a subscriber. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscriber_on_list_by_email_with_http_info(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve the subscriber from. (required)
:param str email: Email address of the contact to retrieve. (required)
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_subscriber_on_list_by_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_subscriber_on_list_by_email`") # noqa: E501
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `get_subscriber_on_list_by_email`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'email' in params:
path_params['email'] = params['email'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/{email}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Subscriber', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_subscribers_on_list(self, list_id, **kwargs): # noqa: E501
"""Retrieve subscribers on a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscribers_on_list(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list. (required)
:param int limit: Limits the result to given count.
:return: list[Subscriber]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_subscribers_on_list_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_subscribers_on_list_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_subscribers_on_list_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Retrieve subscribers on a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscribers_on_list_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list. (required)
:param int limit: Limits the result to given count.
:return: list[Subscriber]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_subscribers_on_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_subscribers_on_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Subscriber]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_unsubscribed_on_list(self, list_id, **kwargs): # noqa: E501
"""Retrieve unsubscribed contacts on a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_unsubscribed_on_list(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list. (required)
:param int limit: Limits the result to given count.
:return: list[Contact]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_unsubscribed_on_list_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_unsubscribed_on_list_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_unsubscribed_on_list_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Retrieve unsubscribed contacts on a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_unsubscribed_on_list_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list. (required)
:param int limit: Limits the result to given count.
:return: list[Contact]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_unsubscribed_on_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_unsubscribed_on_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/unsubscribed/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Contact]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def import_subscribers_to_list(self, list_id, contacts, **kwargs): # noqa: E501
"""Subscribe contacts to the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_subscribers_to_list(list_id, contacts, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to subscribe to. (required)
:param list[Contacts] contacts: (required)
:return: ImportResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.import_subscribers_to_list_with_http_info(list_id, contacts, **kwargs) # noqa: E501
else:
(data) = self.import_subscribers_to_list_with_http_info(list_id, contacts, **kwargs) # noqa: E501
return data
def import_subscribers_to_list_with_http_info(self, list_id, contacts, **kwargs): # noqa: E501
"""Subscribe contacts to the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_subscribers_to_list_with_http_info(list_id, contacts, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to subscribe to. (required)
:param list[Contacts] contacts: (required)
:return: ImportResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'contacts'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method import_subscribers_to_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `import_subscribers_to_list`") # noqa: E501
# verify the required parameter 'contacts' is set
if ('contacts' not in params or
params['contacts'] is None):
raise ValueError("Missing the required parameter `contacts` when calling `import_subscribers_to_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'contacts' in params:
body_params = params['contacts']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['text/csv']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/import/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ImportResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def subscribe_contact_to_list(self, list_id, contact, **kwargs): # noqa: E501
"""Subscribe a contact to the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.subscribe_contact_to_list(list_id, contact, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to subscribe to. (required)
:param Subscriber contact: (required)
:return: Contact
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.subscribe_contact_to_list_with_http_info(list_id, contact, **kwargs) # noqa: E501
else:
(data) = self.subscribe_contact_to_list_with_http_info(list_id, contact, **kwargs) # noqa: E501
return data
def subscribe_contact_to_list_with_http_info(self, list_id, contact, **kwargs): # noqa: E501
"""Subscribe a contact to the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.subscribe_contact_to_list_with_http_info(list_id, contact, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to subscribe to. (required)
:param Subscriber contact: (required)
:return: Contact
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'contact'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method subscribe_contact_to_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `subscribe_contact_to_list`") # noqa: E501
# verify the required parameter 'contact' is set
if ('contact' not in params or
params['contact'] is None):
raise ValueError("Missing the required parameter `contact` when calling `subscribe_contact_to_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'contact' in params:
body_params = params['contact']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Contact', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unsubscribe_contact_on_list_by_email(self, list_id, email, **kwargs): # noqa: E501
"""Unsubscribe a contact. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unsubscribe_contact_on_list_by_email(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to unsubscribe from. (required)
:param str email: Email address of the contact to unsubscribe. (required)
:return: Contact
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unsubscribe_contact_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
else:
(data) = self.unsubscribe_contact_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
return data
def unsubscribe_contact_on_list_by_email_with_http_info(self, list_id, email, **kwargs): # noqa: E501
"""Unsubscribe a contact. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unsubscribe_contact_on_list_by_email_with_http_info(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to unsubscribe from. (required)
:param str email: Email address of the contact to unsubscribe. (required)
:return: Contact
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unsubscribe_contact_on_list_by_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `unsubscribe_contact_on_list_by_email`") # noqa: E501
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `unsubscribe_contact_on_list_by_email`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'email' in params:
path_params['email'] = params['email'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/subscribers/{email}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Contact', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_list(self, list_id, **kwargs): # noqa: E501
"""Update an email list partially. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_list(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:param ListDetail list:
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_list_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.update_list_with_http_info(list_id, **kwargs) # noqa: E501
return data
def update_list_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Update an email list partially. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_list_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:param ListDetail list:
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'list'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `update_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'list' in params:
body_params = params['list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | 0.665193 | 0.051487 |
import json
import logging
import os
try:
import queue
except ImportError:
import Queue as queue
try:
import socketserver
except ImportError:
import SocketServer as socketserver
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime, timedelta
from flask import Flask, render_template, Response, request, abort, jsonify
from flask_assets import Environment, Bundle
from flask.templating import TemplateNotFound
from jobs import load_jobs
from random import randint
app = Flask(__name__)
app.logger.setLevel(logging.INFO)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
sched = BackgroundScheduler(logger=app.logger)
queues = {}
last_events = {}
@app.before_first_request
def _configure_bundles():
js = ["main.js"]
css = ["main.css"]
widgets_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "static", "widgets")
)
for widget in os.listdir(widgets_path):
widget_path = os.path.join("widgets", widget)
for asset_file in os.listdir(os.path.join(widgets_path, widget)):
asset_path = os.path.join(widget_path, asset_file)
if asset_file.endswith(".js"):
js.append(asset_path)
elif asset_file.endswith(".css"):
css.append(asset_path)
assets = Environment(app)
if app.debug:
assets.register("js_all", Bundle(*js, output="gen/app.js"))
assets.register("css_all", Bundle(*css, output="gen/styles.css"))
else:
assets.register(
"js_min_all", Bundle(*js, filters="rjsmin", output="gen/app.min.js")
)
assets.register(
"css_min_all", Bundle(*css, filters="cssmin", output="gen/styles.min.css")
)
@app.route("/w/<job_id>")
@app.route("/widget/<job_id>")
def widget(job_id):
if not _is_enabled(job_id):
abort(404)
x = request.args.get("x", 3)
widgets = _enabled_jobs()
# Use the widget matching the job implementation, or an explicitly declared
# widget
job = _config()["JOBS"][job_id]
widget = job.get("job_impl", job_id)
widget = job.get("widget", widget)
return render_template(
"index.html",
layout="layout_single.html",
widget=widget,
job=job_id,
x=x,
widgets=widgets,
)
@app.route("/")
@app.route("/d/<layout>")
@app.route("/dashboard/<layout>")
def dashboard(layout=None):
locale = request.args.get("locale")
widgets = _enabled_jobs()
layout = layout or _config().get("DEFAULT_LAYOUT")
if layout is None:
return render_template("index.html", locale=locale, widgets=widgets)
try:
return render_template(
"index.html",
layout="layouts/{0}.html".format(layout),
locale=locale,
widgets=widgets,
)
except TemplateNotFound:
abort(404)
@app.route("/widgets")
def widgets():
return jsonify(_enabled_jobs())
@app.route("/events")
def events():
remote_port = request.environ["REMOTE_PORT"]
current_queue = queue.Queue()
queues[remote_port] = current_queue
for event in last_events.values():
current_queue.put(event)
def consume():
while True:
data = current_queue.get()
if data is None:
break
yield "data: %s\n\n" % (data,)
response = Response(consume(), mimetype="text/event-stream")
response.headers["X-Accel-Buffering"] = "no"
return response
@app.route("/events/<job_id>", methods=["POST"])
def create_event(job_id):
if not _is_enabled(job_id):
abort(404)
body = request.get_json()
if not body:
abort(400)
_add_event(job_id, body)
return "", 201
def _config():
if app.testing: # tests set their own config
return app.config
app.config.from_envvar("JARVIS_SETTINGS")
return app.config
def _enabled_jobs():
config = _config()["JOBS"]
return [job_id for job_id in config.keys() if config[job_id].get("enabled")]
def _is_enabled(job_id):
return job_id in _enabled_jobs()
@app.context_processor
def _inject_template_methods():
return dict(is_job_enabled=_is_enabled)
@app.after_request
def _set_security_headers(response):
csp = (
"default-src 'none'; "
"connect-src 'self'; "
"img-src 'self' https://i.scdn.co; "
"script-src 'self' https://cdnjs.cloudflare.com; "
"style-src 'self' https://cdnjs.cloudflare.com https://fonts.googleapis.com; "
"font-src https://fonts.gstatic.com"
)
response.headers["Content-Security-Policy"] = csp
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "DENY"
response.headers["X-XSS-Protection"] = "1; mode=block"
return response
@app.before_first_request
def _schedule_jobs():
offset = 0
jobs = load_jobs()
for job_id, job_config in _config()["JOBS"].items():
job_impl = job_config.get("job_impl", job_id)
if not job_config.get("enabled"):
app.logger.info("Skipping disabled job: %s", job_id)
continue
if job_impl not in jobs:
app.logger.info(
("Skipping job with ID %s (no such " "implementation: %s)"),
job_id,
job_impl,
)
continue
job = jobs[job_impl](job_config)
if app.debug:
start_date = datetime.now() + timedelta(seconds=1)
else:
offset += randint(4, 10)
start_date = datetime.now() + timedelta(seconds=offset)
job.start_date = start_date
app.logger.info(
"Scheduling job with ID %s (implementation: %s): %s", job_id, job_impl, job
)
sched.add_job(
_run_job,
"interval",
name=job_id,
next_run_time=job.start_date,
coalesce=True,
seconds=job.interval,
kwargs={"job_id": job_id, "job": job},
)
if not sched.running:
sched.start()
def _add_event(job_id, data):
json_data = json.dumps(
{"body": data, "job": job_id}, separators=(",", ":"), sort_keys=True
)
last_events[job_id] = json_data
for q in queues.values():
q.put(json_data)
def _run_job(job_id, job):
try:
data = job.get()
_add_event(job_id, data)
except Exception as e:
app.logger.warning("Failed to execute job: " + job_id + ": " + str(e))
def _close_stream(*args, **kwargs):
remote_port = args[2][1]
if remote_port in queues:
del queues[remote_port]
socketserver.BaseServer.handle_error = _close_stream | jarvis/app.py |
import json
import logging
import os
try:
import queue
except ImportError:
import Queue as queue
try:
import socketserver
except ImportError:
import SocketServer as socketserver
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime, timedelta
from flask import Flask, render_template, Response, request, abort, jsonify
from flask_assets import Environment, Bundle
from flask.templating import TemplateNotFound
from jobs import load_jobs
from random import randint
app = Flask(__name__)
app.logger.setLevel(logging.INFO)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
sched = BackgroundScheduler(logger=app.logger)
queues = {}
last_events = {}
@app.before_first_request
def _configure_bundles():
js = ["main.js"]
css = ["main.css"]
widgets_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "static", "widgets")
)
for widget in os.listdir(widgets_path):
widget_path = os.path.join("widgets", widget)
for asset_file in os.listdir(os.path.join(widgets_path, widget)):
asset_path = os.path.join(widget_path, asset_file)
if asset_file.endswith(".js"):
js.append(asset_path)
elif asset_file.endswith(".css"):
css.append(asset_path)
assets = Environment(app)
if app.debug:
assets.register("js_all", Bundle(*js, output="gen/app.js"))
assets.register("css_all", Bundle(*css, output="gen/styles.css"))
else:
assets.register(
"js_min_all", Bundle(*js, filters="rjsmin", output="gen/app.min.js")
)
assets.register(
"css_min_all", Bundle(*css, filters="cssmin", output="gen/styles.min.css")
)
@app.route("/w/<job_id>")
@app.route("/widget/<job_id>")
def widget(job_id):
if not _is_enabled(job_id):
abort(404)
x = request.args.get("x", 3)
widgets = _enabled_jobs()
# Use the widget matching the job implementation, or an explicitly declared
# widget
job = _config()["JOBS"][job_id]
widget = job.get("job_impl", job_id)
widget = job.get("widget", widget)
return render_template(
"index.html",
layout="layout_single.html",
widget=widget,
job=job_id,
x=x,
widgets=widgets,
)
@app.route("/")
@app.route("/d/<layout>")
@app.route("/dashboard/<layout>")
def dashboard(layout=None):
locale = request.args.get("locale")
widgets = _enabled_jobs()
layout = layout or _config().get("DEFAULT_LAYOUT")
if layout is None:
return render_template("index.html", locale=locale, widgets=widgets)
try:
return render_template(
"index.html",
layout="layouts/{0}.html".format(layout),
locale=locale,
widgets=widgets,
)
except TemplateNotFound:
abort(404)
@app.route("/widgets")
def widgets():
return jsonify(_enabled_jobs())
@app.route("/events")
def events():
remote_port = request.environ["REMOTE_PORT"]
current_queue = queue.Queue()
queues[remote_port] = current_queue
for event in last_events.values():
current_queue.put(event)
def consume():
while True:
data = current_queue.get()
if data is None:
break
yield "data: %s\n\n" % (data,)
response = Response(consume(), mimetype="text/event-stream")
response.headers["X-Accel-Buffering"] = "no"
return response
@app.route("/events/<job_id>", methods=["POST"])
def create_event(job_id):
if not _is_enabled(job_id):
abort(404)
body = request.get_json()
if not body:
abort(400)
_add_event(job_id, body)
return "", 201
def _config():
if app.testing: # tests set their own config
return app.config
app.config.from_envvar("JARVIS_SETTINGS")
return app.config
def _enabled_jobs():
config = _config()["JOBS"]
return [job_id for job_id in config.keys() if config[job_id].get("enabled")]
def _is_enabled(job_id):
return job_id in _enabled_jobs()
@app.context_processor
def _inject_template_methods():
return dict(is_job_enabled=_is_enabled)
@app.after_request
def _set_security_headers(response):
csp = (
"default-src 'none'; "
"connect-src 'self'; "
"img-src 'self' https://i.scdn.co; "
"script-src 'self' https://cdnjs.cloudflare.com; "
"style-src 'self' https://cdnjs.cloudflare.com https://fonts.googleapis.com; "
"font-src https://fonts.gstatic.com"
)
response.headers["Content-Security-Policy"] = csp
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "DENY"
response.headers["X-XSS-Protection"] = "1; mode=block"
return response
@app.before_first_request
def _schedule_jobs():
offset = 0
jobs = load_jobs()
for job_id, job_config in _config()["JOBS"].items():
job_impl = job_config.get("job_impl", job_id)
if not job_config.get("enabled"):
app.logger.info("Skipping disabled job: %s", job_id)
continue
if job_impl not in jobs:
app.logger.info(
("Skipping job with ID %s (no such " "implementation: %s)"),
job_id,
job_impl,
)
continue
job = jobs[job_impl](job_config)
if app.debug:
start_date = datetime.now() + timedelta(seconds=1)
else:
offset += randint(4, 10)
start_date = datetime.now() + timedelta(seconds=offset)
job.start_date = start_date
app.logger.info(
"Scheduling job with ID %s (implementation: %s): %s", job_id, job_impl, job
)
sched.add_job(
_run_job,
"interval",
name=job_id,
next_run_time=job.start_date,
coalesce=True,
seconds=job.interval,
kwargs={"job_id": job_id, "job": job},
)
if not sched.running:
sched.start()
def _add_event(job_id, data):
json_data = json.dumps(
{"body": data, "job": job_id}, separators=(",", ":"), sort_keys=True
)
last_events[job_id] = json_data
for q in queues.values():
q.put(json_data)
def _run_job(job_id, job):
try:
data = job.get()
_add_event(job_id, data)
except Exception as e:
app.logger.warning("Failed to execute job: " + job_id + ": " + str(e))
def _close_stream(*args, **kwargs):
remote_port = args[2][1]
if remote_port in queues:
del queues[remote_port]
socketserver.BaseServer.handle_error = _close_stream | 0.412412 | 0.06134 |
import asyncio
import json
from metarepo2json.metarepo2json.interfaces import KitsInterface
def __init__(hub):
global HUB
global repo_web
global kitinfo_subpath
global kitsha1_subpath
global version_subpath
global fetcher
global get_raw_file_uri
global throw_on_corrupted_metarepo
global get_kit
global sort_kits
HUB = hub
repo_web = hub.OPT.metarepo2json.repo_web
kitinfo_subpath = hub.OPT.metarepo2json.kitinfo_subpath
kitsha1_subpath = hub.OPT.metarepo2json.kitsha1_subpath
version_subpath = hub.OPT.metarepo2json.version_subpath
fetcher = hub.metarepo2json.http_fetcher.fetch_html
get_raw_file_uri = hub.metarepo2json.utils.get_raw_file_uri
throw_on_corrupted_metarepo = (
hub.metarepo2json.utils.throw_on_corrupted_metarepo
)
get_kit = hub.metarepo2json.utils.get_kit
sort_kits = hub.metarepo2json.utils.sort_list_of_dicts_by_key_values
class KitsFromWeb(KitsInterface):
def __init__(self, metarepo_location=None):
self.hub = HUB
self.fetch = fetcher
self.metarepo_location = (
metarepo_location
if metarepo_location is not None
else repo_web
)
self.kitinfo_location = None
self.kitsha1_location = None
self.kitinfo = None
self.kitsha1 = None
self.kits = None
def _set_locations(self):
self.kitinfo_location = get_raw_file_uri(
self.metarepo_location, kitinfo_subpath
)
self.kitsha1_location = get_raw_file_uri(
self.metarepo_location, kitsha1_subpath
)
def _set_session(self, session):
self.session = session
def set_fetcher(self, fetcher):
self.fetch = fetcher
async def _set_kitinfo(self, session):
self.kitinfo = json.loads(await self.fetch(self.kitinfo_location, session))
async def _set_kitsha1(self, session):
self.kitsha1 = json.loads(await self.fetch(self.kitsha1_location, session))
async def _load_data(self, session):
await asyncio.wait([self._set_kitinfo(session), self._set_kitsha1(session)])
async def load_data(self, location=None, **kwargs):
if location is not None:
self.metarepo_location = location
session = kwargs["session"] if "session" in kwargs else None
self._set_locations()
await self._load_data(session)
throw_on_corrupted_metarepo(self.kitinfo, self.kitsha1)
async def process_data(self):
kits = []
for kit_name, branches in self.kitinfo["release_defs"].items():
kit_settings = self.kitinfo["kit_settings"][kit_name]
kitsha1 = self.kitsha1[kit_name]
kits.append(get_kit(kit_name, kit_settings, branches, kitsha1))
self.kits = kits
async def get_result(self) -> list:
if self.kitinfo_location is None or self.kitsha1_location is None:
await self.load_data()
if self.kits is None:
await self.process_data()
return sort_kits(self.kits, "name", self.kitinfo["kit_order"]) | metarepo2json/metarepo2json/kits/kits_web.py |
import asyncio
import json
from metarepo2json.metarepo2json.interfaces import KitsInterface
def __init__(hub):
global HUB
global repo_web
global kitinfo_subpath
global kitsha1_subpath
global version_subpath
global fetcher
global get_raw_file_uri
global throw_on_corrupted_metarepo
global get_kit
global sort_kits
HUB = hub
repo_web = hub.OPT.metarepo2json.repo_web
kitinfo_subpath = hub.OPT.metarepo2json.kitinfo_subpath
kitsha1_subpath = hub.OPT.metarepo2json.kitsha1_subpath
version_subpath = hub.OPT.metarepo2json.version_subpath
fetcher = hub.metarepo2json.http_fetcher.fetch_html
get_raw_file_uri = hub.metarepo2json.utils.get_raw_file_uri
throw_on_corrupted_metarepo = (
hub.metarepo2json.utils.throw_on_corrupted_metarepo
)
get_kit = hub.metarepo2json.utils.get_kit
sort_kits = hub.metarepo2json.utils.sort_list_of_dicts_by_key_values
class KitsFromWeb(KitsInterface):
def __init__(self, metarepo_location=None):
self.hub = HUB
self.fetch = fetcher
self.metarepo_location = (
metarepo_location
if metarepo_location is not None
else repo_web
)
self.kitinfo_location = None
self.kitsha1_location = None
self.kitinfo = None
self.kitsha1 = None
self.kits = None
def _set_locations(self):
self.kitinfo_location = get_raw_file_uri(
self.metarepo_location, kitinfo_subpath
)
self.kitsha1_location = get_raw_file_uri(
self.metarepo_location, kitsha1_subpath
)
def _set_session(self, session):
self.session = session
def set_fetcher(self, fetcher):
self.fetch = fetcher
async def _set_kitinfo(self, session):
self.kitinfo = json.loads(await self.fetch(self.kitinfo_location, session))
async def _set_kitsha1(self, session):
self.kitsha1 = json.loads(await self.fetch(self.kitsha1_location, session))
async def _load_data(self, session):
await asyncio.wait([self._set_kitinfo(session), self._set_kitsha1(session)])
async def load_data(self, location=None, **kwargs):
if location is not None:
self.metarepo_location = location
session = kwargs["session"] if "session" in kwargs else None
self._set_locations()
await self._load_data(session)
throw_on_corrupted_metarepo(self.kitinfo, self.kitsha1)
async def process_data(self):
kits = []
for kit_name, branches in self.kitinfo["release_defs"].items():
kit_settings = self.kitinfo["kit_settings"][kit_name]
kitsha1 = self.kitsha1[kit_name]
kits.append(get_kit(kit_name, kit_settings, branches, kitsha1))
self.kits = kits
async def get_result(self) -> list:
if self.kitinfo_location is None or self.kitsha1_location is None:
await self.load_data()
if self.kits is None:
await self.process_data()
return sort_kits(self.kits, "name", self.kitinfo["kit_order"]) | 0.429549 | 0.110904 |
import datetime
import factory
from accounts.models import Profile
from accounts.models.choices import Title, Role
from django.db.models.signals import post_save
from django.conf import settings
from django.utils import timezone
from random import randint
TEST_PASSWORD = "<PASSWORD>"
TITLES = [title.name for title in Title]
ROLES = [role.name for role in Role]
@factory.django.mute_signals(post_save)
class ProfileFactory(factory.django.DjangoModelFactory):
class Meta:
model = Profile
django_get_or_create = ("user",)
title = factory.Faker("random_element", elements=TITLES)
date_of_birth = factory.Faker("date_this_century", before_today=True)
institute = factory.Faker("company")
# position = factory.Faker("random_element", elements=ROLES)
bio = factory.Faker("text", max_nb_chars=500)
# We pass in profile=None to prevent UserFactory from creating another
# profile (this disables the RelatedFactory)
user = factory.SubFactory("accounts.tests.factories.UserFactory", profile=None)
@factory.django.mute_signals(post_save)
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = settings.AUTH_USER_MODEL
django_get_or_create = ("username",)
strategy = factory.BUILD_STRATEGY
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
username = factory.Sequence(lambda n: f"user{str(n).zfill(3)}")
email = factory.Faker("email")
password = factory.PostGenerationMethodCall("set_password", <PASSWORD>_PASSWORD)
@factory.lazy_attribute
def date_joined(self):
return timezone.now() - datetime.timedelta(days=randint(5, 50))
last_login = factory.lazy_attribute(
lambda o: o.date_joined + datetime.timedelta(days=4)
)
is_superuser = True
is_staff = True
is_active = True
# We pass in 'user' to link the generated Profile to our just-generated
# User. This will call ProfileFactory(user=our_new_user), thus skipping the
# SubFactory.
profile = factory.RelatedFactory(ProfileFactory, "user") | accounts/tests/factories.py | import datetime
import factory
from accounts.models import Profile
from accounts.models.choices import Title, Role
from django.db.models.signals import post_save
from django.conf import settings
from django.utils import timezone
from random import randint
TEST_PASSWORD = "<PASSWORD>"
TITLES = [title.name for title in Title]
ROLES = [role.name for role in Role]
@factory.django.mute_signals(post_save)
class ProfileFactory(factory.django.DjangoModelFactory):
class Meta:
model = Profile
django_get_or_create = ("user",)
title = factory.Faker("random_element", elements=TITLES)
date_of_birth = factory.Faker("date_this_century", before_today=True)
institute = factory.Faker("company")
# position = factory.Faker("random_element", elements=ROLES)
bio = factory.Faker("text", max_nb_chars=500)
# We pass in profile=None to prevent UserFactory from creating another
# profile (this disables the RelatedFactory)
user = factory.SubFactory("accounts.tests.factories.UserFactory", profile=None)
@factory.django.mute_signals(post_save)
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = settings.AUTH_USER_MODEL
django_get_or_create = ("username",)
strategy = factory.BUILD_STRATEGY
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
username = factory.Sequence(lambda n: f"user{str(n).zfill(3)}")
email = factory.Faker("email")
password = factory.PostGenerationMethodCall("set_password", <PASSWORD>_PASSWORD)
@factory.lazy_attribute
def date_joined(self):
return timezone.now() - datetime.timedelta(days=randint(5, 50))
last_login = factory.lazy_attribute(
lambda o: o.date_joined + datetime.timedelta(days=4)
)
is_superuser = True
is_staff = True
is_active = True
# We pass in 'user' to link the generated Profile to our just-generated
# User. This will call ProfileFactory(user=our_new_user), thus skipping the
# SubFactory.
profile = factory.RelatedFactory(ProfileFactory, "user") | 0.456894 | 0.114146 |
import inkex
import simplestyle, sys
from math import *
# The simplestyle module provides functions for style parsing.
from simplestyle import *
class Knob_Scale(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
# General settings
self.OptionParser.add_option("--x",
action="store", type="int",
dest="x", default=0.0,
help="Center X")
self.OptionParser.add_option("--y",
action="store", type="int",
dest="y", default=0.0,
help="Center Y")
self.OptionParser.add_option("--radius",
action="store", type="int",
dest="radius", default=100.0,
help="Knob radius")
self.OptionParser.add_option("--linewidth",
action="store", type="int",
dest="linewidth", default=1,
help="")
self.OptionParser.add_option("--angle",
action="store", type="float",
dest="angle", default=260.0,
help="Angle of the knob scale in degrees")
self.OptionParser.add_option("--draw_arc",
action="store", type="inkbool",
dest="draw_arc", default='True',
help="")
self.OptionParser.add_option("--draw_centering_circle",
action="store", type="inkbool",
dest="draw_centering_circle", default='False',
help="")
self.OptionParser.add_option("-u", "--units",
action="store", type="string",
dest="units", default="px",
help="units to measure size of knob")
# Tick settings
self.OptionParser.add_option("--n_ticks",
action="store", type="int",
dest="n_ticks", default=5,
help="")
self.OptionParser.add_option("--ticksize",
action="store", type="int",
dest="ticksize", default=10,
help="")
self.OptionParser.add_option("--n_subticks",
action="store", type="int",
dest="n_subticks", default=10,
help="")
self.OptionParser.add_option("--subticksize",
action="store", type="int",
dest="subticksize", default=5,
help="")
self.OptionParser.add_option("--style",
action="store", type="string",
dest="style", default='marks_outwards',
help="Style of marks")
# Label settings
self.OptionParser.add_option("--labels_enabled",
action="store", type="inkbool",
dest="labels_enabled", default='False',
help="")
self.OptionParser.add_option("--rounding_level",
action="store", type="int",
dest="rounding_level", default=0,
help="")
self.OptionParser.add_option("--text_size",
action="store", type="int",
dest="text_size", default=1,
help="")
self.OptionParser.add_option("--text_offset",
action="store", type="int",
dest="text_offset", default=20,
help="")
self.OptionParser.add_option("--start_value",
action="store", type="float",
dest="start_value", default=0,
help="")
self.OptionParser.add_option("--stop_value",
action="store", type="float",
dest="stop_value", default=10,
help="")
# Dummy
self.OptionParser.add_option("","--tab")
def draw_text(self, textvalue, radius, angular_position, text_size, parent):
# Create text element
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.text = textvalue
# Set text position to center of document.
text.set('x', str(self.x_offset + radius*cos(angular_position)))
text.set('y', str(self.y_offset + radius*sin(angular_position) + text_size/2))
# Center text horizontally with CSS style.
style = {
'text-align' : 'center',
'text-anchor': 'middle',
'alignment-baseline' : 'center',
'font-size' : str(text_size),
'vertical-align' : 'middle'
}
text.set('style', formatStyle(style))
parent.append(text)
def draw_knob_arc(self, radius, parent, angle, transform='' ):
start_point_angle = (angle - pi)/2.0
end_point_angle = pi - start_point_angle
style = { 'stroke' : '#000000',
'stroke-width' : str(self.options.linewidth),
'fill' : 'none' }
ell_attribs = {'style':simplestyle.formatStyle(style),
inkex.addNS('cx','sodipodi') :str(self.x_offset),
inkex.addNS('cy','sodipodi') :str(self.y_offset),
inkex.addNS('rx','sodipodi') :str(radius),
inkex.addNS('ry','sodipodi') :str(radius),
inkex.addNS('start','sodipodi') :str(end_point_angle),
inkex.addNS('end','sodipodi') :str(start_point_angle),
inkex.addNS('open','sodipodi') :'true', #all ellipse sectors we will draw are open
inkex.addNS('type','sodipodi') :'arc',
'transform' :transform
}
ell = inkex.etree.SubElement(parent, inkex.addNS('path','svg'), ell_attribs )
def draw_centering_circle(self, radius, parent):
style = { 'stroke' : '#000000',
'stroke-width' : '1',
'fill' : 'none' }
ell_attribs = {'style':simplestyle.formatStyle(style),
inkex.addNS('cx','sodipodi') :str(self.x_offset),
inkex.addNS('cy','sodipodi') :str(self.y_offset),
inkex.addNS('rx','sodipodi') :str(radius),
inkex.addNS('ry','sodipodi') :str(radius),
inkex.addNS('type','sodipodi') :'arc'
}
ell = inkex.etree.SubElement(parent, inkex.addNS('path','svg'), ell_attribs )
def draw_circle_mark(self, x_offset, y_offset, radius, mark_angle, mark_length, parent):
cx = x_offset + radius*cos(mark_angle)
cy = y_offset + radius*sin(mark_angle)
r = mark_length / 2.0
style = {
'stroke': '#000000',
'stroke-width':'0',
'fill': '#000000'
}
circ_attribs = {
'style':simplestyle.formatStyle(style),
'cx':str(cx),
'cy':str(cy),
'r':str(r)
}
circle = inkex.etree.SubElement(parent, inkex.addNS('circle','svg'), circ_attribs )
def draw_knob_line_mark(self, x_offset, y_offset, radius, mark_angle, mark_length, parent):
x1 = x_offset + radius*cos(mark_angle)
y1 = y_offset + radius*sin(mark_angle)
x2 = x_offset + (radius + mark_length)*cos(mark_angle)
y2 = y_offset + (radius + mark_length)*sin(mark_angle)
line_style = { 'stroke': '#000000',
'stroke-width': str(self.options.linewidth),
'fill': 'none'
}
line_attribs = {'style' : simplestyle.formatStyle(line_style),
inkex.addNS('label','inkscape') : "none",
'd' : 'M '+str(x1) +',' +
str(y1) +' L '+str(x2)
+','+str(y2) }
line = inkex.etree.SubElement(parent, inkex.addNS('path','svg'), line_attribs )
def draw_tick(self, radius, mark_angle, mark_size, parent):
if (self.options.style == 'marks_inwards') or (self.options.style == 'marks_outwards'):
self.draw_knob_line_mark(self.x_offset, self.y_offset, radius, mark_angle, mark_size, parent)
elif self.options.style == 'marks_circles':
self.draw_circle_mark(self.x_offset, self.y_offset, radius, mark_angle, mark_size, parent)
def effect(self):
parent = self.current_layer
radius = self.unittouu(str(self.options.radius) + self.options.units)
self.x_offset = self.unittouu(str(self.options.x) + self.options.units)
self.y_offset = self.unittouu(str(self.options.y) + self.options.units)
#print >>sys.stderr, "x_offset: %s\n" % x_offset
#print >>sys.stderr, "y_offset: %s\n" % y_offset
#radius = self.options.radius
angle = self.options.angle*pi/180.0
n_ticks = self.options.n_ticks
n_subticks = self.options.n_subticks
is_outer = True
if self.options.style == 'marks_inwards':
is_outer = False
tick_length = self.unittouu(str(self.options.ticksize) + self.options.units)
subtick_length = self.unittouu(str(self.options.subticksize) + self.options.units)
arc_radius = radius
# Labeling settings
start_num = self.options.start_value
end_num = self.options.stop_value
text_spacing = self.unittouu(str(self.options.text_offset) + self.options.units)
text_size = self.unittouu(str(self.options.text_size) + self.options.units)
if not is_outer:
subtick_radius = radius + tick_length - subtick_length
arc_radius = radius + tick_length
else:
subtick_radius = radius
arc_radius = radius
if self.options.draw_arc:
self.draw_knob_arc(arc_radius, parent, angle)
if self.options.draw_centering_circle:
self.draw_centering_circle(arc_radius + tick_length + text_size + text_spacing, parent)
ticks_delta = angle / (n_ticks - 1)
start_ticks_angle = 1.5*pi - 0.5*angle
for tick in range(n_ticks):
self.draw_tick(radius, start_ticks_angle + ticks_delta*tick,
tick_length, parent)
if self.options.labels_enabled:
if self.options.rounding_level > 0:
tick_text = str(round(start_num +
float(tick) * (end_num - start_num) / (n_ticks - 1),
self.options.rounding_level))
else:
tick_text = str(int(start_num + float(tick) * (end_num - start_num) / (n_ticks - 1)))
self.draw_text(tick_text, radius + tick_length + text_spacing,
start_ticks_angle + ticks_delta*tick,
text_size,
parent)
if tick == (n_ticks - 1):
break
subticks_delta = ticks_delta / (n_subticks + 1)
subtick_start_angle = start_ticks_angle + ticks_delta*tick + subticks_delta
for subtick in range(n_subticks):
self.draw_tick(subtick_radius, subtick_start_angle + subticks_delta*subtick,
subtick_length, parent)
if __name__ == '__main__':
e = Knob_Scale()
e.affect() | knob-scale-generator-master/render_knob_scale.py | import inkex
import simplestyle, sys
from math import *
# The simplestyle module provides functions for style parsing.
from simplestyle import *
class Knob_Scale(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
# General settings
self.OptionParser.add_option("--x",
action="store", type="int",
dest="x", default=0.0,
help="Center X")
self.OptionParser.add_option("--y",
action="store", type="int",
dest="y", default=0.0,
help="Center Y")
self.OptionParser.add_option("--radius",
action="store", type="int",
dest="radius", default=100.0,
help="Knob radius")
self.OptionParser.add_option("--linewidth",
action="store", type="int",
dest="linewidth", default=1,
help="")
self.OptionParser.add_option("--angle",
action="store", type="float",
dest="angle", default=260.0,
help="Angle of the knob scale in degrees")
self.OptionParser.add_option("--draw_arc",
action="store", type="inkbool",
dest="draw_arc", default='True',
help="")
self.OptionParser.add_option("--draw_centering_circle",
action="store", type="inkbool",
dest="draw_centering_circle", default='False',
help="")
self.OptionParser.add_option("-u", "--units",
action="store", type="string",
dest="units", default="px",
help="units to measure size of knob")
# Tick settings
self.OptionParser.add_option("--n_ticks",
action="store", type="int",
dest="n_ticks", default=5,
help="")
self.OptionParser.add_option("--ticksize",
action="store", type="int",
dest="ticksize", default=10,
help="")
self.OptionParser.add_option("--n_subticks",
action="store", type="int",
dest="n_subticks", default=10,
help="")
self.OptionParser.add_option("--subticksize",
action="store", type="int",
dest="subticksize", default=5,
help="")
self.OptionParser.add_option("--style",
action="store", type="string",
dest="style", default='marks_outwards',
help="Style of marks")
# Label settings
self.OptionParser.add_option("--labels_enabled",
action="store", type="inkbool",
dest="labels_enabled", default='False',
help="")
self.OptionParser.add_option("--rounding_level",
action="store", type="int",
dest="rounding_level", default=0,
help="")
self.OptionParser.add_option("--text_size",
action="store", type="int",
dest="text_size", default=1,
help="")
self.OptionParser.add_option("--text_offset",
action="store", type="int",
dest="text_offset", default=20,
help="")
self.OptionParser.add_option("--start_value",
action="store", type="float",
dest="start_value", default=0,
help="")
self.OptionParser.add_option("--stop_value",
action="store", type="float",
dest="stop_value", default=10,
help="")
# Dummy
self.OptionParser.add_option("","--tab")
def draw_text(self, textvalue, radius, angular_position, text_size, parent):
# Create text element
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.text = textvalue
# Set text position to center of document.
text.set('x', str(self.x_offset + radius*cos(angular_position)))
text.set('y', str(self.y_offset + radius*sin(angular_position) + text_size/2))
# Center text horizontally with CSS style.
style = {
'text-align' : 'center',
'text-anchor': 'middle',
'alignment-baseline' : 'center',
'font-size' : str(text_size),
'vertical-align' : 'middle'
}
text.set('style', formatStyle(style))
parent.append(text)
def draw_knob_arc(self, radius, parent, angle, transform='' ):
start_point_angle = (angle - pi)/2.0
end_point_angle = pi - start_point_angle
style = { 'stroke' : '#000000',
'stroke-width' : str(self.options.linewidth),
'fill' : 'none' }
ell_attribs = {'style':simplestyle.formatStyle(style),
inkex.addNS('cx','sodipodi') :str(self.x_offset),
inkex.addNS('cy','sodipodi') :str(self.y_offset),
inkex.addNS('rx','sodipodi') :str(radius),
inkex.addNS('ry','sodipodi') :str(radius),
inkex.addNS('start','sodipodi') :str(end_point_angle),
inkex.addNS('end','sodipodi') :str(start_point_angle),
inkex.addNS('open','sodipodi') :'true', #all ellipse sectors we will draw are open
inkex.addNS('type','sodipodi') :'arc',
'transform' :transform
}
ell = inkex.etree.SubElement(parent, inkex.addNS('path','svg'), ell_attribs )
def draw_centering_circle(self, radius, parent):
style = { 'stroke' : '#000000',
'stroke-width' : '1',
'fill' : 'none' }
ell_attribs = {'style':simplestyle.formatStyle(style),
inkex.addNS('cx','sodipodi') :str(self.x_offset),
inkex.addNS('cy','sodipodi') :str(self.y_offset),
inkex.addNS('rx','sodipodi') :str(radius),
inkex.addNS('ry','sodipodi') :str(radius),
inkex.addNS('type','sodipodi') :'arc'
}
ell = inkex.etree.SubElement(parent, inkex.addNS('path','svg'), ell_attribs )
def draw_circle_mark(self, x_offset, y_offset, radius, mark_angle, mark_length, parent):
cx = x_offset + radius*cos(mark_angle)
cy = y_offset + radius*sin(mark_angle)
r = mark_length / 2.0
style = {
'stroke': '#000000',
'stroke-width':'0',
'fill': '#000000'
}
circ_attribs = {
'style':simplestyle.formatStyle(style),
'cx':str(cx),
'cy':str(cy),
'r':str(r)
}
circle = inkex.etree.SubElement(parent, inkex.addNS('circle','svg'), circ_attribs )
def draw_knob_line_mark(self, x_offset, y_offset, radius, mark_angle, mark_length, parent):
x1 = x_offset + radius*cos(mark_angle)
y1 = y_offset + radius*sin(mark_angle)
x2 = x_offset + (radius + mark_length)*cos(mark_angle)
y2 = y_offset + (radius + mark_length)*sin(mark_angle)
line_style = { 'stroke': '#000000',
'stroke-width': str(self.options.linewidth),
'fill': 'none'
}
line_attribs = {'style' : simplestyle.formatStyle(line_style),
inkex.addNS('label','inkscape') : "none",
'd' : 'M '+str(x1) +',' +
str(y1) +' L '+str(x2)
+','+str(y2) }
line = inkex.etree.SubElement(parent, inkex.addNS('path','svg'), line_attribs )
def draw_tick(self, radius, mark_angle, mark_size, parent):
if (self.options.style == 'marks_inwards') or (self.options.style == 'marks_outwards'):
self.draw_knob_line_mark(self.x_offset, self.y_offset, radius, mark_angle, mark_size, parent)
elif self.options.style == 'marks_circles':
self.draw_circle_mark(self.x_offset, self.y_offset, radius, mark_angle, mark_size, parent)
def effect(self):
parent = self.current_layer
radius = self.unittouu(str(self.options.radius) + self.options.units)
self.x_offset = self.unittouu(str(self.options.x) + self.options.units)
self.y_offset = self.unittouu(str(self.options.y) + self.options.units)
#print >>sys.stderr, "x_offset: %s\n" % x_offset
#print >>sys.stderr, "y_offset: %s\n" % y_offset
#radius = self.options.radius
angle = self.options.angle*pi/180.0
n_ticks = self.options.n_ticks
n_subticks = self.options.n_subticks
is_outer = True
if self.options.style == 'marks_inwards':
is_outer = False
tick_length = self.unittouu(str(self.options.ticksize) + self.options.units)
subtick_length = self.unittouu(str(self.options.subticksize) + self.options.units)
arc_radius = radius
# Labeling settings
start_num = self.options.start_value
end_num = self.options.stop_value
text_spacing = self.unittouu(str(self.options.text_offset) + self.options.units)
text_size = self.unittouu(str(self.options.text_size) + self.options.units)
if not is_outer:
subtick_radius = radius + tick_length - subtick_length
arc_radius = radius + tick_length
else:
subtick_radius = radius
arc_radius = radius
if self.options.draw_arc:
self.draw_knob_arc(arc_radius, parent, angle)
if self.options.draw_centering_circle:
self.draw_centering_circle(arc_radius + tick_length + text_size + text_spacing, parent)
ticks_delta = angle / (n_ticks - 1)
start_ticks_angle = 1.5*pi - 0.5*angle
for tick in range(n_ticks):
self.draw_tick(radius, start_ticks_angle + ticks_delta*tick,
tick_length, parent)
if self.options.labels_enabled:
if self.options.rounding_level > 0:
tick_text = str(round(start_num +
float(tick) * (end_num - start_num) / (n_ticks - 1),
self.options.rounding_level))
else:
tick_text = str(int(start_num + float(tick) * (end_num - start_num) / (n_ticks - 1)))
self.draw_text(tick_text, radius + tick_length + text_spacing,
start_ticks_angle + ticks_delta*tick,
text_size,
parent)
if tick == (n_ticks - 1):
break
subticks_delta = ticks_delta / (n_subticks + 1)
subtick_start_angle = start_ticks_angle + ticks_delta*tick + subticks_delta
for subtick in range(n_subticks):
self.draw_tick(subtick_radius, subtick_start_angle + subticks_delta*subtick,
subtick_length, parent)
if __name__ == '__main__':
e = Knob_Scale()
e.affect() | 0.231354 | 0.143008 |
import copy
import os
import shutil
import tempfile
from pathlib import Path
import numpy as np
from fccpy import read_pdb
from fccpy.contacts import get_intermolecular_contacts
from pdbtools import pdb_segxchain
from haddock import log
from haddock.libs.libontology import Format, ModuleIO
from haddock.modules import BaseHaddockModule
RECIPE_PATH = Path(__file__).resolve().parent
DEFAULT_CONFIG = Path(RECIPE_PATH, "defaults.cfg")
def add_chain_from_segid(pdb_path):
"""Replace the chainID with the segID."""
temp_f = tempfile.NamedTemporaryFile(delete=False,
mode='w+t')
with open(pdb_path) as fh:
for line in list(pdb_segxchain.run(fh)):
temp_f.writelines(line)
temp_f.close()
# REPLACE!
new_pdb_path = shutil.move(temp_f.name, pdb_path)
return new_pdb_path
def centroid(X):
"""Get the centroid."""
return X.mean(axis=0)
def kabsch(P, Q):
"""Find the rotation matrix using Kabsch algorithm."""
# Covariance matrix
C = np.dot(np.transpose(P), Q)
# use SVD
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def calc_rmsd(V, W):
"""Calculate the RMSD from two vectors."""
diff = np.array(V) - np.array(W)
N = len(V)
return np.sqrt((diff * diff).sum() / N)
def read_res(pdb_f):
"""Read residue numbers in a PDB file."""
res_dic = {}
with open(pdb_f, 'r') as fh:
for line in fh.readlines():
if line.startswith('ATOM'):
chain = line[21]
resnum = int(line[22:26])
atom = line[12:16].strip()
if chain not in res_dic:
res_dic[chain] = {}
if resnum not in res_dic[chain]:
res_dic[chain][resnum] = []
if atom not in res_dic[chain][resnum]:
res_dic[chain][resnum].append(atom)
return res_dic
# Debug only
def write_coords(output_name, coor_list):
"""Add a dummy atom to a PDB file according to a list of coordinates."""
with open(output_name, 'w') as fh:
for i, dummy_coord in enumerate(coor_list):
atom_num = f'{i}'.rjust(4, ' ')
resnum = f'{i}'.rjust(3, ' ')
dum_x = f'{dummy_coord[0]:.3f}'.rjust(7, ' ')
dum_y = f'{dummy_coord[1]:.3f}'.rjust(7, ' ')
dum_z = f'{dummy_coord[2]:.3f}'.rjust(7, ' ')
dummy_line = (f'ATOM {atom_num} H DUM X {resnum} '
f' {dum_x} {dum_y} {dum_z} 1.00 1.00 '
' H ' + os.linesep)
fh.write(dummy_line)
def load_contacts(pdb_f, cutoff):
"""Load residue-based contacts."""
con_list = []
structure = read_pdb(pdb_f)
for atom_i, atom_j in get_intermolecular_contacts(structure, cutoff):
con = (atom_i.chain, atom_i.resid, atom_j.chain, atom_j.resid)
con_list.append(con)
return set(con_list)
def load_coords(pdb_f, filter_resdic=None, atoms=None, ignore_missing=True):
"""Load coordinates from PDB."""
# ignore_missing = will use only atoms that are present in filter_resdic
C = []
chain_dic = {}
idx = 0
with open(pdb_f, 'r') as fh:
for line in fh.readlines():
if line.startswith('ATOM'):
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
resnum = int(line[22:26])
chain = line[21]
if chain not in chain_dic:
chain_dic[chain] = []
atom_name = line[12:16].strip()
if atoms:
if atom_name not in atoms:
continue
if filter_resdic and ignore_missing:
if chain in filter_resdic:
if resnum in filter_resdic[chain]:
if atom_name in filter_resdic[chain][resnum]:
C.append(np.asarray([x, y, z], dtype=float))
chain_dic[chain].append(idx)
idx += 1
elif filter_resdic and not ignore_missing:
if chain in filter_resdic:
if resnum in filter_resdic[chain]:
C.append(np.asarray([x, y, z], dtype=float))
chain_dic[chain].append(idx)
idx += 1
else:
C.append(np.asarray([x, y, z], dtype=float))
chain_dic[chain].append(idx)
idx += 1
chain_ranges = {}
for chain in chain_dic:
min_idx = min(chain_dic[chain])
max_idx = max(chain_dic[chain])
chain_ranges[chain] = (min_idx, max_idx)
return np.asarray(C), chain_ranges
def identify_interface(pdb_f, cutoff):
"""Identify the interface."""
pdb = read_pdb(pdb_f)
interface_resdic = {}
for atom_i, atom_j in get_intermolecular_contacts(pdb, cutoff):
if atom_i.chain not in interface_resdic:
interface_resdic[atom_i.chain] = {}
if atom_j.chain not in interface_resdic:
interface_resdic[atom_j.chain] = {}
if atom_i.resid not in interface_resdic[atom_i.chain]:
interface_resdic[atom_i.chain][atom_i.resid] = []
if atom_j.resid not in interface_resdic[atom_j.chain]:
interface_resdic[atom_j.chain][atom_j.resid] = []
atom_i_name = atom_i.name.strip()
atom_j_name = atom_j.name.strip()
if atom_i_name not in interface_resdic[atom_i.chain][atom_i.resid]:
interface_resdic[atom_i.chain][atom_i.resid].append(atom_i_name)
if atom_j_name not in interface_resdic[atom_j.chain][atom_j.resid]:
interface_resdic[atom_j.chain][atom_j.resid].append(atom_j_name)
return interface_resdic
class CAPRI:
"""CAPRI class."""
def __init__(self, reference, model_list, atoms, ignore_missing):
self.reference = reference
self.model_list = []
self.irmsd_dic = {}
self.lrmsd_dic = {}
self.ilrmsd_dic = {}
self.fnat_dic = {}
self.atoms = atoms
self.ignore_missing = ignore_missing
self.score_dic = {}
for struct in model_list:
pdb_f = Path(struct.path, struct.file_name)
pdb_w_chain = add_chain_from_segid(pdb_f)
self.model_list.append(pdb_w_chain)
self.score_dic[pdb_f] = struct.score
def irmsd(self, cutoff=5.):
"""Calculate the I-RMSD."""
log.info(f'[caprieval] cutoff: {cutoff}A')
# Identify interface
ref_interface_resdic = identify_interface(self.reference, cutoff)
# Load interface coordinates
_Q, _ = load_coords(self.reference,
filter_resdic=ref_interface_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
# Move to centroids
_Q -= centroid(_Q)
for model in self.model_list:
# This has no effect, but keep it here
# for the next time we need to debug this function
Q = copy.deepcopy(_Q)
P, _ = load_coords(model,
filter_resdic=ref_interface_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
if P.shape != Q.shape:
log.warning('[caprieval] Cannot align these models,'
' the number of atoms is in the interface'
' is different.')
i_rmsd = float('nan')
else:
P = P - centroid(P)
U = kabsch(P, Q)
P = np.dot(P, U)
i_rmsd = calc_rmsd(P, Q)
# write_coords('ref.pdb', P)
# write_coords('model.pdb', Q)
self.irmsd_dic[model] = i_rmsd
return self.irmsd_dic
def lrmsd(self, receptor_chain, ligand_chain):
"""Calculate the L-RMSD."""
log.info(f'[caprieval] Receptor chain: {receptor_chain}')
log.info(f'[caprieval] Ligand chain: {ligand_chain}')
ref_resdic = read_res(self.reference)
# Get reference coordinates
_Q, chain_ranges = load_coords(self.reference,
filter_resdic=ref_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
receptor_start = chain_ranges[receptor_chain][0]
receptor_end = chain_ranges[receptor_chain][1]
_Q_receptor = _Q[receptor_start:receptor_end]
# loop goes here
model = self.model_list[0]
for model in self.model_list:
Q_all = copy.deepcopy(_Q)
Q_receptor = copy.deepcopy(_Q_receptor)
P_all, _ = load_coords(model,
filter_resdic=ref_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
receptor_start = chain_ranges[receptor_chain][0]
receptor_end = chain_ranges[receptor_chain][1]
P_receptor = P_all[receptor_start:receptor_end]
# write_coords('ref_ori.pdb', Q_all)
# write_coords('model_ori.pdb', P_all)
# Center receptors and get rotation matrix
Q_receptor_centroid = centroid(Q_receptor)
Q_receptor -= Q_receptor_centroid
P_receptor_centroid = centroid(P_receptor)
P_receptor -= P_receptor_centroid
U_receptor = kabsch(P_receptor, Q_receptor)
# Center complexes in the receptor centroids
P_all -= P_receptor_centroid
Q_all -= Q_receptor_centroid
# Apply rotation to complex
# - complex are aligned on the receptor
P_all = np.dot(P_all, U_receptor)
# write_coords('ref.pdb', Q_all)
# write_coords('model.pdb', P_all)
# Identify the ligand coordinates
ligand_start = chain_ranges[ligand_chain][0]
ligand_end = chain_ranges[ligand_chain][1]
Q_ligand = Q_all[ligand_start:ligand_end]
P_ligand = P_all[ligand_start:ligand_end]
# write_coords('ref_ligand.pdb', Q_ligand)
# write_coords('model_ligand.pdb', P_ligand)
# Calculate the RMSD of the ligands
l_rmsd = calc_rmsd(P_ligand, Q_ligand)
self.lrmsd_dic[model] = l_rmsd
return self.lrmsd_dic
def ilrmsd(self, ligand_chain, cutoff):
"""Calculate the Interface Ligand RMSD."""
log.info(f'[caprieval] cutoff: {cutoff}A')
log.info(f'[caprieval] Ligand chain: {ligand_chain}')
ref_resdic = read_res(self.reference)
# Identify interface
ref_interface_resdic = identify_interface(self.reference, cutoff)
# Load interface coordinates
_Q, chain_ranges = load_coords(self.reference,
filter_resdic=ref_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
Q_int, _ = load_coords(self.reference,
filter_resdic=ref_interface_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
# Move to centroids
Q_int_centroid = centroid(Q_int)
Q_int = Q_int - Q_int_centroid
for model in self.model_list:
Q_all = copy.deepcopy(_Q)
P_all, _ = load_coords(model,
filter_resdic=ref_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
P_int, _ = load_coords(model,
filter_resdic=ref_interface_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
P_int_centroid = centroid(P_int)
P_int = P_int - P_int_centroid
# find the rotation that minimizes the interface rmsd
U_int = kabsch(P_int, Q_int)
P_all -= P_int_centroid
Q_all -= Q_int_centroid
# apply this rotation to the model
P_all = np.dot(P_all, U_int)
# Calculate the rmsd of the ligand
ligand_start = chain_ranges[ligand_chain][0]
ligand_end = chain_ranges[ligand_chain][1]
Q_ligand = Q_all[ligand_start:ligand_end]
P_ligand = P_all[ligand_start:ligand_end]
# write_coords('ref.pdb', P_ligand)
# write_coords('model.pdb', Q_ligand)
# this will be the interface-ligand-rmsd
i_l_rmsd = calc_rmsd(P_ligand, Q_ligand)
self.ilrmsd_dic[model] = i_l_rmsd
return self.ilrmsd_dic
def fnat(self, cutoff=5.0):
"""Calculate the frequency of native contacts."""
log.info(f'[caprieval] cutoff: {cutoff}A')
ref_contacts = load_contacts(self.reference, cutoff)
for model in self.model_list:
model_contacts = load_contacts(model, cutoff)
intersection = ref_contacts & model_contacts
fnat = len(intersection) / float(len(ref_contacts))
self.fnat_dic[model] = fnat
return self.fnat_dic
def output(self, output_f):
"""Output the CAPRI results to a .tsv file."""
sep = '\t'
with open(output_f, 'w') as fh:
header = 'model' + sep
header += 'score' + sep
if self.fnat_dic:
header += 'fnat' + sep
if self.irmsd_dic:
header += 'irmsd' + sep
if self.lrmsd_dic:
header += 'lrmsd' + sep
if self.ilrmsd_dic:
header += 'ilrmsd' + sep
header += os.linesep
fh.write(header)
for model in self.model_list:
row = f'{model.name}' + sep
row += f'{self.score_dic[model]:.3f}' + sep
if model in self.fnat_dic:
row += f'{self.fnat_dic[model]:.3f}' + sep
if model in self.irmsd_dic:
row += f'{self.irmsd_dic[model]:.2f}' + sep
if model in self.lrmsd_dic:
row += f'{self.lrmsd_dic[model]:.2f}' + sep
if model in self.ilrmsd_dic:
row += f'{self.ilrmsd_dic[model]:.2f}' + sep
row += os.linesep
fh.write(row)
class HaddockModule(BaseHaddockModule):
"""HADDOCK3 module to calculate the CAPRI metrics."""
name = RECIPE_PATH.name
def __init__(
self,
order,
path,
*ignore,
init_params=DEFAULT_CONFIG,
**everything):
super().__init__(order, path, init_params)
@classmethod
def confirm_installation(cls):
"""Confirm if contact executable is compiled."""
return
def _run(self):
"""Execute module."""
# Get the models generated in previous step
if type(self.previous_io) == iter:
self.finish_with_error('This module cannot come after one'
' that produced an iterable')
models_to_calc = [
p
for p in self.previous_io.output
if p.file_type == Format.PDB
]
if not self.params['reference']:
# No reference was given, use the lowest
self.log('No reference was given, using best ranking'
' structure from previous step')
# by default modes_to_calc should have been sorted by the module
# that produced it
target_model = models_to_calc[0]
reference = Path(target_model.path, target_model.file_name)
else:
reference = Path(self.params['reference'])
self.log(f'Using {reference} as reference structure')
capri = CAPRI(reference,
models_to_calc,
atoms=self.params['atoms'],
ignore_missing=self.params['ignore_missing'])
if self.params['fnat']:
self.log('Calculating FNAT')
capri.fnat(cutoff=self.params['fnat_cutoff'])
if self.params['irmsd']:
self.log('Calculating I-RMSD')
capri.irmsd(cutoff=self.params['irmsd_cutoff'])
if self.params['lrmsd']:
self.log('Calculating L-RMSD')
capri.lrmsd(receptor_chain=self.params['receptor_chain'],
ligand_chain=self.params['ligand_chain'])
if self.params['ilrmsd']:
self.log('Calculating I-L-RMSD')
capri.ilrmsd(ligand_chain=self.params['ligand_chain'],
cutoff=self.params['irmsd_cutoff'])
output_fname = Path(self.path, 'capri.tsv')
self.log(f' Saving output to {output_fname}')
capri.output(output_fname)
selected_models = models_to_calc
io = ModuleIO()
io.add(selected_models, "o")
io.save(self.path) | src/haddock/modules/analysis/caprieval/__init__.py | import copy
import os
import shutil
import tempfile
from pathlib import Path
import numpy as np
from fccpy import read_pdb
from fccpy.contacts import get_intermolecular_contacts
from pdbtools import pdb_segxchain
from haddock import log
from haddock.libs.libontology import Format, ModuleIO
from haddock.modules import BaseHaddockModule
RECIPE_PATH = Path(__file__).resolve().parent
DEFAULT_CONFIG = Path(RECIPE_PATH, "defaults.cfg")
def add_chain_from_segid(pdb_path):
"""Replace the chainID with the segID."""
temp_f = tempfile.NamedTemporaryFile(delete=False,
mode='w+t')
with open(pdb_path) as fh:
for line in list(pdb_segxchain.run(fh)):
temp_f.writelines(line)
temp_f.close()
# REPLACE!
new_pdb_path = shutil.move(temp_f.name, pdb_path)
return new_pdb_path
def centroid(X):
"""Get the centroid."""
return X.mean(axis=0)
def kabsch(P, Q):
"""Find the rotation matrix using Kabsch algorithm."""
# Covariance matrix
C = np.dot(np.transpose(P), Q)
# use SVD
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def calc_rmsd(V, W):
"""Calculate the RMSD from two vectors."""
diff = np.array(V) - np.array(W)
N = len(V)
return np.sqrt((diff * diff).sum() / N)
def read_res(pdb_f):
"""Read residue numbers in a PDB file."""
res_dic = {}
with open(pdb_f, 'r') as fh:
for line in fh.readlines():
if line.startswith('ATOM'):
chain = line[21]
resnum = int(line[22:26])
atom = line[12:16].strip()
if chain not in res_dic:
res_dic[chain] = {}
if resnum not in res_dic[chain]:
res_dic[chain][resnum] = []
if atom not in res_dic[chain][resnum]:
res_dic[chain][resnum].append(atom)
return res_dic
# Debug only
def write_coords(output_name, coor_list):
"""Add a dummy atom to a PDB file according to a list of coordinates."""
with open(output_name, 'w') as fh:
for i, dummy_coord in enumerate(coor_list):
atom_num = f'{i}'.rjust(4, ' ')
resnum = f'{i}'.rjust(3, ' ')
dum_x = f'{dummy_coord[0]:.3f}'.rjust(7, ' ')
dum_y = f'{dummy_coord[1]:.3f}'.rjust(7, ' ')
dum_z = f'{dummy_coord[2]:.3f}'.rjust(7, ' ')
dummy_line = (f'ATOM {atom_num} H DUM X {resnum} '
f' {dum_x} {dum_y} {dum_z} 1.00 1.00 '
' H ' + os.linesep)
fh.write(dummy_line)
def load_contacts(pdb_f, cutoff):
"""Load residue-based contacts."""
con_list = []
structure = read_pdb(pdb_f)
for atom_i, atom_j in get_intermolecular_contacts(structure, cutoff):
con = (atom_i.chain, atom_i.resid, atom_j.chain, atom_j.resid)
con_list.append(con)
return set(con_list)
def load_coords(pdb_f, filter_resdic=None, atoms=None, ignore_missing=True):
"""Load coordinates from PDB."""
# ignore_missing = will use only atoms that are present in filter_resdic
C = []
chain_dic = {}
idx = 0
with open(pdb_f, 'r') as fh:
for line in fh.readlines():
if line.startswith('ATOM'):
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
resnum = int(line[22:26])
chain = line[21]
if chain not in chain_dic:
chain_dic[chain] = []
atom_name = line[12:16].strip()
if atoms:
if atom_name not in atoms:
continue
if filter_resdic and ignore_missing:
if chain in filter_resdic:
if resnum in filter_resdic[chain]:
if atom_name in filter_resdic[chain][resnum]:
C.append(np.asarray([x, y, z], dtype=float))
chain_dic[chain].append(idx)
idx += 1
elif filter_resdic and not ignore_missing:
if chain in filter_resdic:
if resnum in filter_resdic[chain]:
C.append(np.asarray([x, y, z], dtype=float))
chain_dic[chain].append(idx)
idx += 1
else:
C.append(np.asarray([x, y, z], dtype=float))
chain_dic[chain].append(idx)
idx += 1
chain_ranges = {}
for chain in chain_dic:
min_idx = min(chain_dic[chain])
max_idx = max(chain_dic[chain])
chain_ranges[chain] = (min_idx, max_idx)
return np.asarray(C), chain_ranges
def identify_interface(pdb_f, cutoff):
"""Identify the interface."""
pdb = read_pdb(pdb_f)
interface_resdic = {}
for atom_i, atom_j in get_intermolecular_contacts(pdb, cutoff):
if atom_i.chain not in interface_resdic:
interface_resdic[atom_i.chain] = {}
if atom_j.chain not in interface_resdic:
interface_resdic[atom_j.chain] = {}
if atom_i.resid not in interface_resdic[atom_i.chain]:
interface_resdic[atom_i.chain][atom_i.resid] = []
if atom_j.resid not in interface_resdic[atom_j.chain]:
interface_resdic[atom_j.chain][atom_j.resid] = []
atom_i_name = atom_i.name.strip()
atom_j_name = atom_j.name.strip()
if atom_i_name not in interface_resdic[atom_i.chain][atom_i.resid]:
interface_resdic[atom_i.chain][atom_i.resid].append(atom_i_name)
if atom_j_name not in interface_resdic[atom_j.chain][atom_j.resid]:
interface_resdic[atom_j.chain][atom_j.resid].append(atom_j_name)
return interface_resdic
class CAPRI:
"""CAPRI class."""
def __init__(self, reference, model_list, atoms, ignore_missing):
self.reference = reference
self.model_list = []
self.irmsd_dic = {}
self.lrmsd_dic = {}
self.ilrmsd_dic = {}
self.fnat_dic = {}
self.atoms = atoms
self.ignore_missing = ignore_missing
self.score_dic = {}
for struct in model_list:
pdb_f = Path(struct.path, struct.file_name)
pdb_w_chain = add_chain_from_segid(pdb_f)
self.model_list.append(pdb_w_chain)
self.score_dic[pdb_f] = struct.score
def irmsd(self, cutoff=5.):
"""Calculate the I-RMSD."""
log.info(f'[caprieval] cutoff: {cutoff}A')
# Identify interface
ref_interface_resdic = identify_interface(self.reference, cutoff)
# Load interface coordinates
_Q, _ = load_coords(self.reference,
filter_resdic=ref_interface_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
# Move to centroids
_Q -= centroid(_Q)
for model in self.model_list:
# This has no effect, but keep it here
# for the next time we need to debug this function
Q = copy.deepcopy(_Q)
P, _ = load_coords(model,
filter_resdic=ref_interface_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
if P.shape != Q.shape:
log.warning('[caprieval] Cannot align these models,'
' the number of atoms is in the interface'
' is different.')
i_rmsd = float('nan')
else:
P = P - centroid(P)
U = kabsch(P, Q)
P = np.dot(P, U)
i_rmsd = calc_rmsd(P, Q)
# write_coords('ref.pdb', P)
# write_coords('model.pdb', Q)
self.irmsd_dic[model] = i_rmsd
return self.irmsd_dic
def lrmsd(self, receptor_chain, ligand_chain):
"""Calculate the L-RMSD."""
log.info(f'[caprieval] Receptor chain: {receptor_chain}')
log.info(f'[caprieval] Ligand chain: {ligand_chain}')
ref_resdic = read_res(self.reference)
# Get reference coordinates
_Q, chain_ranges = load_coords(self.reference,
filter_resdic=ref_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
receptor_start = chain_ranges[receptor_chain][0]
receptor_end = chain_ranges[receptor_chain][1]
_Q_receptor = _Q[receptor_start:receptor_end]
# loop goes here
model = self.model_list[0]
for model in self.model_list:
Q_all = copy.deepcopy(_Q)
Q_receptor = copy.deepcopy(_Q_receptor)
P_all, _ = load_coords(model,
filter_resdic=ref_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
receptor_start = chain_ranges[receptor_chain][0]
receptor_end = chain_ranges[receptor_chain][1]
P_receptor = P_all[receptor_start:receptor_end]
# write_coords('ref_ori.pdb', Q_all)
# write_coords('model_ori.pdb', P_all)
# Center receptors and get rotation matrix
Q_receptor_centroid = centroid(Q_receptor)
Q_receptor -= Q_receptor_centroid
P_receptor_centroid = centroid(P_receptor)
P_receptor -= P_receptor_centroid
U_receptor = kabsch(P_receptor, Q_receptor)
# Center complexes in the receptor centroids
P_all -= P_receptor_centroid
Q_all -= Q_receptor_centroid
# Apply rotation to complex
# - complex are aligned on the receptor
P_all = np.dot(P_all, U_receptor)
# write_coords('ref.pdb', Q_all)
# write_coords('model.pdb', P_all)
# Identify the ligand coordinates
ligand_start = chain_ranges[ligand_chain][0]
ligand_end = chain_ranges[ligand_chain][1]
Q_ligand = Q_all[ligand_start:ligand_end]
P_ligand = P_all[ligand_start:ligand_end]
# write_coords('ref_ligand.pdb', Q_ligand)
# write_coords('model_ligand.pdb', P_ligand)
# Calculate the RMSD of the ligands
l_rmsd = calc_rmsd(P_ligand, Q_ligand)
self.lrmsd_dic[model] = l_rmsd
return self.lrmsd_dic
def ilrmsd(self, ligand_chain, cutoff):
"""Calculate the Interface Ligand RMSD."""
log.info(f'[caprieval] cutoff: {cutoff}A')
log.info(f'[caprieval] Ligand chain: {ligand_chain}')
ref_resdic = read_res(self.reference)
# Identify interface
ref_interface_resdic = identify_interface(self.reference, cutoff)
# Load interface coordinates
_Q, chain_ranges = load_coords(self.reference,
filter_resdic=ref_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
Q_int, _ = load_coords(self.reference,
filter_resdic=ref_interface_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
# Move to centroids
Q_int_centroid = centroid(Q_int)
Q_int = Q_int - Q_int_centroid
for model in self.model_list:
Q_all = copy.deepcopy(_Q)
P_all, _ = load_coords(model,
filter_resdic=ref_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
P_int, _ = load_coords(model,
filter_resdic=ref_interface_resdic,
atoms=self.atoms,
ignore_missing=self.ignore_missing)
P_int_centroid = centroid(P_int)
P_int = P_int - P_int_centroid
# find the rotation that minimizes the interface rmsd
U_int = kabsch(P_int, Q_int)
P_all -= P_int_centroid
Q_all -= Q_int_centroid
# apply this rotation to the model
P_all = np.dot(P_all, U_int)
# Calculate the rmsd of the ligand
ligand_start = chain_ranges[ligand_chain][0]
ligand_end = chain_ranges[ligand_chain][1]
Q_ligand = Q_all[ligand_start:ligand_end]
P_ligand = P_all[ligand_start:ligand_end]
# write_coords('ref.pdb', P_ligand)
# write_coords('model.pdb', Q_ligand)
# this will be the interface-ligand-rmsd
i_l_rmsd = calc_rmsd(P_ligand, Q_ligand)
self.ilrmsd_dic[model] = i_l_rmsd
return self.ilrmsd_dic
def fnat(self, cutoff=5.0):
"""Calculate the frequency of native contacts."""
log.info(f'[caprieval] cutoff: {cutoff}A')
ref_contacts = load_contacts(self.reference, cutoff)
for model in self.model_list:
model_contacts = load_contacts(model, cutoff)
intersection = ref_contacts & model_contacts
fnat = len(intersection) / float(len(ref_contacts))
self.fnat_dic[model] = fnat
return self.fnat_dic
def output(self, output_f):
"""Output the CAPRI results to a .tsv file."""
sep = '\t'
with open(output_f, 'w') as fh:
header = 'model' + sep
header += 'score' + sep
if self.fnat_dic:
header += 'fnat' + sep
if self.irmsd_dic:
header += 'irmsd' + sep
if self.lrmsd_dic:
header += 'lrmsd' + sep
if self.ilrmsd_dic:
header += 'ilrmsd' + sep
header += os.linesep
fh.write(header)
for model in self.model_list:
row = f'{model.name}' + sep
row += f'{self.score_dic[model]:.3f}' + sep
if model in self.fnat_dic:
row += f'{self.fnat_dic[model]:.3f}' + sep
if model in self.irmsd_dic:
row += f'{self.irmsd_dic[model]:.2f}' + sep
if model in self.lrmsd_dic:
row += f'{self.lrmsd_dic[model]:.2f}' + sep
if model in self.ilrmsd_dic:
row += f'{self.ilrmsd_dic[model]:.2f}' + sep
row += os.linesep
fh.write(row)
class HaddockModule(BaseHaddockModule):
"""HADDOCK3 module to calculate the CAPRI metrics."""
name = RECIPE_PATH.name
def __init__(
self,
order,
path,
*ignore,
init_params=DEFAULT_CONFIG,
**everything):
super().__init__(order, path, init_params)
@classmethod
def confirm_installation(cls):
"""Confirm if contact executable is compiled."""
return
def _run(self):
"""Execute module."""
# Get the models generated in previous step
if type(self.previous_io) == iter:
self.finish_with_error('This module cannot come after one'
' that produced an iterable')
models_to_calc = [
p
for p in self.previous_io.output
if p.file_type == Format.PDB
]
if not self.params['reference']:
# No reference was given, use the lowest
self.log('No reference was given, using best ranking'
' structure from previous step')
# by default modes_to_calc should have been sorted by the module
# that produced it
target_model = models_to_calc[0]
reference = Path(target_model.path, target_model.file_name)
else:
reference = Path(self.params['reference'])
self.log(f'Using {reference} as reference structure')
capri = CAPRI(reference,
models_to_calc,
atoms=self.params['atoms'],
ignore_missing=self.params['ignore_missing'])
if self.params['fnat']:
self.log('Calculating FNAT')
capri.fnat(cutoff=self.params['fnat_cutoff'])
if self.params['irmsd']:
self.log('Calculating I-RMSD')
capri.irmsd(cutoff=self.params['irmsd_cutoff'])
if self.params['lrmsd']:
self.log('Calculating L-RMSD')
capri.lrmsd(receptor_chain=self.params['receptor_chain'],
ligand_chain=self.params['ligand_chain'])
if self.params['ilrmsd']:
self.log('Calculating I-L-RMSD')
capri.ilrmsd(ligand_chain=self.params['ligand_chain'],
cutoff=self.params['irmsd_cutoff'])
output_fname = Path(self.path, 'capri.tsv')
self.log(f' Saving output to {output_fname}')
capri.output(output_fname)
selected_models = models_to_calc
io = ModuleIO()
io.add(selected_models, "o")
io.save(self.path) | 0.52074 | 0.249865 |
import logging
import time
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.topology import event_hosts
from ryu.lib import hub
from ryu.topology.api import get_all_switch, get_all_link
LOG = logging.getLogger(__name__)
class HostDiscovery(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_EVENTS = []
MAX_TIMEOUT = 60
CLEAN_PERIOD = 60
UPDATE_TOPO_PERIOD = 3
def __init__(self, *args, **kwargs):
super(HostDiscovery, self).__init__(*args, **kwargs)
self.name = 'HostDiscovery'
self.all_dps_mac_addr = []
# {(int$dpid,int$in_port):(str$mac_addr_src,float$host_lastest_conn)}
self.hosts_loc = {}
self.topo_dps_mac = []
self.topo_links = []
# TODO the hosts_loc is a shared variable.
self.timeout_event = hub.Event()
self.get_topology_event = hub.Event()
self.threads.append(hub.spawn(self.host_conn_timeout_loop))
self.threads.append(hub.spawn(self.topology_loop))
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
@staticmethod
def add_flow(datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
dpid = datapath.id # int
in_port_no = msg.match['in_port'] # int
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
mac_addr_src = eth.src # str
mac_addr_dst = eth.dst # str
LOG.debug("packet in dpid=%s in_port=%s FROM:%s TO:%s", dpid, in_port_no, mac_addr_src, mac_addr_dst)
# TODO abstract the algorithm using strategy design pattern.
if self.topo_dps_mac == [] or self.topo_links == []:
return
if mac_addr_src not in self.topo_dps_mac:
LOG.debug(mac_addr_src)
if self.is_direct_from_host(dpid, in_port_no):
time_now = time.time()
self.hosts_loc[(dpid, in_port_no)] = (mac_addr_src, time_now)
LOG.debug(self.hosts_loc)
def host_conn_timeout_loop(self):
while True:
host_expire = []
for (key, value) in self.hosts_loc.items():
time_latest_conn = value[1]
now = time.time()
# LOG.debug(str(key) + ':' + str(now - time_latest_conn))
if now - time_latest_conn > self.MAX_TIMEOUT:
host_expire.append(key)
for host in host_expire:
self.hosts_loc.pop(host)
self.timeout_event.wait(self.CLEAN_PERIOD)
def topology_loop(self):
while True:
switches = get_all_switch(self) # [switches.Switch obj,]
# LOG.debug(switches)
all_switches_mac_addr = []
for switch in switches:
ports = switch.ports # list
for port in ports:
# LOG.debug(port.hw_addr) # switches.Port
all_switches_mac_addr.append(port.hw_addr)
self.topo_dps_mac = all_switches_mac_addr
# LOG.debug(self.topo_dps_mac)
self.topo_links = get_all_link(self)
# LOG.debug(self.topo_links)
self.get_topology_event.wait(self.UPDATE_TOPO_PERIOD)
def is_direct_from_host(self, dpid, in_port_no):
links = self.topo_links # {switches.Link obj:timestamp}
if self.is_in_links((dpid, in_port_no), links):
return False
else:
return True
@staticmethod
def is_in_links(port, all_links):
for link in all_links:
# LOG.debug(type(link))
link_src = link.src # switches.Port
link_dst = link.dst # switches.Port
src_port = (link_src.dpid, link_src.port_no) # (int, int)
dst_port = (link_dst.dpid, link_dst.port_no)
if port == src_port or port == dst_port:
return True
return False
@set_ev_cls(event_hosts.EventHostsRequest)
def hosts_request_handler(self, req):
# LOG.debug(req)
hosts = []
# {(int$dpid,int$in_port):(str$mac_addr_src,float$time_incoming)}
# to
# { "(" dpid ":" port ")" :str$nic_mac_addr}
for (key, value) in self.hosts_loc.items():
dpid = str(key[0])
in_port_num = str(key[1])
peer_mac_addr = str(value[0])
hosts.append(dict(dpid=dpid, port_no=in_port_num, peer_mac=peer_mac_addr))
rep = event_hosts.EventHostsReply(req.src, hosts)
self.reply_to_request(req, rep)
def get_hosts(app):
request = event_hosts.EventHostsRequest()
# LOG.debug(request)
rep = app.send_request(request)
return rep.hosts | ryu/topology/hosts_discovery.py |
import logging
import time
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.topology import event_hosts
from ryu.lib import hub
from ryu.topology.api import get_all_switch, get_all_link
LOG = logging.getLogger(__name__)
class HostDiscovery(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_EVENTS = []
MAX_TIMEOUT = 60
CLEAN_PERIOD = 60
UPDATE_TOPO_PERIOD = 3
def __init__(self, *args, **kwargs):
super(HostDiscovery, self).__init__(*args, **kwargs)
self.name = 'HostDiscovery'
self.all_dps_mac_addr = []
# {(int$dpid,int$in_port):(str$mac_addr_src,float$host_lastest_conn)}
self.hosts_loc = {}
self.topo_dps_mac = []
self.topo_links = []
# TODO the hosts_loc is a shared variable.
self.timeout_event = hub.Event()
self.get_topology_event = hub.Event()
self.threads.append(hub.spawn(self.host_conn_timeout_loop))
self.threads.append(hub.spawn(self.topology_loop))
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
@staticmethod
def add_flow(datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
dpid = datapath.id # int
in_port_no = msg.match['in_port'] # int
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
mac_addr_src = eth.src # str
mac_addr_dst = eth.dst # str
LOG.debug("packet in dpid=%s in_port=%s FROM:%s TO:%s", dpid, in_port_no, mac_addr_src, mac_addr_dst)
# TODO abstract the algorithm using strategy design pattern.
if self.topo_dps_mac == [] or self.topo_links == []:
return
if mac_addr_src not in self.topo_dps_mac:
LOG.debug(mac_addr_src)
if self.is_direct_from_host(dpid, in_port_no):
time_now = time.time()
self.hosts_loc[(dpid, in_port_no)] = (mac_addr_src, time_now)
LOG.debug(self.hosts_loc)
def host_conn_timeout_loop(self):
while True:
host_expire = []
for (key, value) in self.hosts_loc.items():
time_latest_conn = value[1]
now = time.time()
# LOG.debug(str(key) + ':' + str(now - time_latest_conn))
if now - time_latest_conn > self.MAX_TIMEOUT:
host_expire.append(key)
for host in host_expire:
self.hosts_loc.pop(host)
self.timeout_event.wait(self.CLEAN_PERIOD)
def topology_loop(self):
while True:
switches = get_all_switch(self) # [switches.Switch obj,]
# LOG.debug(switches)
all_switches_mac_addr = []
for switch in switches:
ports = switch.ports # list
for port in ports:
# LOG.debug(port.hw_addr) # switches.Port
all_switches_mac_addr.append(port.hw_addr)
self.topo_dps_mac = all_switches_mac_addr
# LOG.debug(self.topo_dps_mac)
self.topo_links = get_all_link(self)
# LOG.debug(self.topo_links)
self.get_topology_event.wait(self.UPDATE_TOPO_PERIOD)
def is_direct_from_host(self, dpid, in_port_no):
links = self.topo_links # {switches.Link obj:timestamp}
if self.is_in_links((dpid, in_port_no), links):
return False
else:
return True
@staticmethod
def is_in_links(port, all_links):
for link in all_links:
# LOG.debug(type(link))
link_src = link.src # switches.Port
link_dst = link.dst # switches.Port
src_port = (link_src.dpid, link_src.port_no) # (int, int)
dst_port = (link_dst.dpid, link_dst.port_no)
if port == src_port or port == dst_port:
return True
return False
@set_ev_cls(event_hosts.EventHostsRequest)
def hosts_request_handler(self, req):
# LOG.debug(req)
hosts = []
# {(int$dpid,int$in_port):(str$mac_addr_src,float$time_incoming)}
# to
# { "(" dpid ":" port ")" :str$nic_mac_addr}
for (key, value) in self.hosts_loc.items():
dpid = str(key[0])
in_port_num = str(key[1])
peer_mac_addr = str(value[0])
hosts.append(dict(dpid=dpid, port_no=in_port_num, peer_mac=peer_mac_addr))
rep = event_hosts.EventHostsReply(req.src, hosts)
self.reply_to_request(req, rep)
def get_hosts(app):
request = event_hosts.EventHostsRequest()
# LOG.debug(request)
rep = app.send_request(request)
return rep.hosts | 0.203154 | 0.067424 |
import cv2
import depthai as dai
import contextlib
# This can be customized to pass multiple parameters
def getPipeline(device_type):
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
# For the demo, just set a larger RGB preview size for OAK-D
if device_type.startswith("OAK-D"):
cam_rgb.setPreviewSize(600, 300)
else:
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam_rgb.setInterleaved(False)
# Create output
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)
return pipeline
q_rgb_list = []
# https://docs.python.org/3/library/contextlib.html#contextlib.ExitStack
with contextlib.ExitStack() as stack:
device_infos = dai.Device.getAllAvailableDevices()
if len(device_infos) == 0:
raise RuntimeError("No devices found!")
else:
print("Found", len(device_infos), "devices")
for device_info in device_infos:
# Note: the pipeline isn't set here, as we don't know yet what device it is.
# The extra arguments passed are required by the existing overload variants
openvino_version = dai.OpenVINO.Version.VERSION_2021_4
usb2_mode = False
device = stack.enter_context(dai.Device(openvino_version, device_info, usb2_mode))
# Note: currently on POE, DeviceInfo.getMxId() and Device.getMxId() are different!
print("=== Connected to " + device_info.getMxId())
mxid = device.getMxId()
cameras = device.getConnectedCameras()
usb_speed = device.getUsbSpeed()
print(" >>> MXID:", mxid)
print(" >>> Cameras:", *[c.name for c in cameras])
print(" >>> USB speed:", usb_speed.name)
device_type = "unknown"
if len(cameras) == 1: device_type = "OAK-1"
elif len(cameras) == 3: device_type = "OAK-D"
# If USB speed is UNKNOWN, assume it's a POE device
if usb_speed == dai.UsbSpeed.UNKNOWN: device_type += "-POE"
# Get a customized pipeline based on identified device type
pipeline = getPipeline(device_type)
print(" >>> Loading pipeline for:", device_type)
device.startPipeline(pipeline)
# Output queue will be used to get the rgb frames from the output defined above
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
stream_name = "rgb-" + mxid + "-" + device_type
q_rgb_list.append((q_rgb, stream_name))
while True:
for q_rgb, stream_name in q_rgb_list:
in_rgb = q_rgb.tryGet()
if in_rgb is not None:
cv2.imshow(stream_name, in_rgb.getCvFrame())
if cv2.waitKey(1) == ord('q'):
break | src/multi.py |
import cv2
import depthai as dai
import contextlib
# This can be customized to pass multiple parameters
def getPipeline(device_type):
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
# For the demo, just set a larger RGB preview size for OAK-D
if device_type.startswith("OAK-D"):
cam_rgb.setPreviewSize(600, 300)
else:
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam_rgb.setInterleaved(False)
# Create output
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)
return pipeline
q_rgb_list = []
# https://docs.python.org/3/library/contextlib.html#contextlib.ExitStack
with contextlib.ExitStack() as stack:
device_infos = dai.Device.getAllAvailableDevices()
if len(device_infos) == 0:
raise RuntimeError("No devices found!")
else:
print("Found", len(device_infos), "devices")
for device_info in device_infos:
# Note: the pipeline isn't set here, as we don't know yet what device it is.
# The extra arguments passed are required by the existing overload variants
openvino_version = dai.OpenVINO.Version.VERSION_2021_4
usb2_mode = False
device = stack.enter_context(dai.Device(openvino_version, device_info, usb2_mode))
# Note: currently on POE, DeviceInfo.getMxId() and Device.getMxId() are different!
print("=== Connected to " + device_info.getMxId())
mxid = device.getMxId()
cameras = device.getConnectedCameras()
usb_speed = device.getUsbSpeed()
print(" >>> MXID:", mxid)
print(" >>> Cameras:", *[c.name for c in cameras])
print(" >>> USB speed:", usb_speed.name)
device_type = "unknown"
if len(cameras) == 1: device_type = "OAK-1"
elif len(cameras) == 3: device_type = "OAK-D"
# If USB speed is UNKNOWN, assume it's a POE device
if usb_speed == dai.UsbSpeed.UNKNOWN: device_type += "-POE"
# Get a customized pipeline based on identified device type
pipeline = getPipeline(device_type)
print(" >>> Loading pipeline for:", device_type)
device.startPipeline(pipeline)
# Output queue will be used to get the rgb frames from the output defined above
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
stream_name = "rgb-" + mxid + "-" + device_type
q_rgb_list.append((q_rgb, stream_name))
while True:
for q_rgb, stream_name in q_rgb_list:
in_rgb = q_rgb.tryGet()
if in_rgb is not None:
cv2.imshow(stream_name, in_rgb.getCvFrame())
if cv2.waitKey(1) == ord('q'):
break | 0.587588 | 0.181553 |
import torch
from tqdm import tqdm, trange
import torch.nn.functional as F
from torch.autograd import Variable
from denseGCNConv import DenseGCNConv
from torch.nn import Linear, BatchNorm1d
from layers import SecondaryCapsuleLayer, firstCapsuleLayer, ReconstructionNet
class CapsGNN(torch.nn.Module):
def __init__(self, args, number_of_features, number_of_targets, max_node_num):
super(CapsGNN, self).__init__()
self.args = args
self.number_of_features = number_of_features
self.number_of_targets = number_of_targets
self.max_node_num = max_node_num
self._setup_layers()
def _setup_firstCapsuleLayer(self):
self.first_capsule = firstCapsuleLayer(number_of_features=self.number_of_features,
max_node_num=self.max_node_num,
capsule_dimensions=self.args.capsule_dimensions,
disentangle_num=self.args.disentangle_num,
dropout=self.args.dropout)
def _setup_hidden_capsules(self):
self.hidden_capsule = SecondaryCapsuleLayer(num_iterations=self.args.num_iterations,
num_routes=self.max_node_num,
num_capsules=self.args.capsule_num,
in_channels=self.args.capsule_dimensions,
out_channels=self.args.capsule_dimensions,
dropout=self.args.dropout)
def _setup_class_capsule(self):
self.class_capsule = SecondaryCapsuleLayer(num_iterations=self.args.num_iterations,
num_routes=self.args.capsule_num,
num_capsules=self.number_of_targets,
in_channels=self.args.capsule_dimensions,
out_channels=self.args.capsule_dimensions,
dropout=self.args.dropout)
def _setup_reconstructNet(self):
self.recons_net = ReconstructionNet(n_dim=self.args.capsule_dimensions,
n_classes=self.number_of_targets,
hidden=self.args.capsule_dimensions)
def _setup_layers(self):
self._setup_firstCapsuleLayer()
self._setup_hidden_capsules()
self._setup_class_capsule()
self._setup_reconstructNet()
def cal_recons_loss(self, pred_adj, adj, mask=None):
eps = 1e-7
# Each entry in pred_adj cannot larger than 1
pred_adj = torch.min(pred_adj, torch.ones(1, dtype=pred_adj.dtype).cuda())
# The diagonal entries in pred_adj should be 0
pred_adj = pred_adj.masked_fill_(torch.eye(adj.size(1), adj.size(1)).bool().to('cuda'), 0)
# Cross entropy loss
link_loss = -adj * torch.log(pred_adj+eps) - (1-adj) * torch.log(1-pred_adj+eps)
if mask is not None:
num_entries = torch.sum(torch.sum(mask, dim=1) ** 2)
adj_mask = mask.unsqueeze(2).float() @ torch.transpose(mask.unsqueeze(2).float(), 1, 2)
link_loss[(1-adj_mask).bool()] = 0.0
else:
num_entries = pred_adj.size(0) * pred_adj.size(1) * pred_adj.size(2)
link_loss = torch.sum(link_loss) / float(num_entries)
return link_loss
def forward(self, x, adj_in, mask, batch, y):
batch_size = x.size(0)
out = self.first_capsule(x, adj_in, mask, batch)
residual = out
out, c_ij, adj = self.hidden_capsule(out, adj_in, mask)
out = out.view(batch_size, -1, self.args.capsule_dimensions)
adj = torch.min(adj, torch.ones(1, dtype=adj.dtype).cuda())
adj = adj.masked_fill_(torch.eye(adj.size(1), adj.size(1)).bool().to('cuda'), 0)
out, c_ij, adj = self.class_capsule(out, adj)
out = out.squeeze(4).squeeze(1)
recons_out = self.recons_net(residual, out, y) # reconstructed adjacency matrix
recon_loss = self.cal_recons_loss(recons_out, adj_in, mask)
out = (torch.sqrt((out ** 2).sum(2))).view(batch_size, self.number_of_targets)
return out, recon_loss | capsgnn.py | import torch
from tqdm import tqdm, trange
import torch.nn.functional as F
from torch.autograd import Variable
from denseGCNConv import DenseGCNConv
from torch.nn import Linear, BatchNorm1d
from layers import SecondaryCapsuleLayer, firstCapsuleLayer, ReconstructionNet
class CapsGNN(torch.nn.Module):
def __init__(self, args, number_of_features, number_of_targets, max_node_num):
super(CapsGNN, self).__init__()
self.args = args
self.number_of_features = number_of_features
self.number_of_targets = number_of_targets
self.max_node_num = max_node_num
self._setup_layers()
def _setup_firstCapsuleLayer(self):
self.first_capsule = firstCapsuleLayer(number_of_features=self.number_of_features,
max_node_num=self.max_node_num,
capsule_dimensions=self.args.capsule_dimensions,
disentangle_num=self.args.disentangle_num,
dropout=self.args.dropout)
def _setup_hidden_capsules(self):
self.hidden_capsule = SecondaryCapsuleLayer(num_iterations=self.args.num_iterations,
num_routes=self.max_node_num,
num_capsules=self.args.capsule_num,
in_channels=self.args.capsule_dimensions,
out_channels=self.args.capsule_dimensions,
dropout=self.args.dropout)
def _setup_class_capsule(self):
self.class_capsule = SecondaryCapsuleLayer(num_iterations=self.args.num_iterations,
num_routes=self.args.capsule_num,
num_capsules=self.number_of_targets,
in_channels=self.args.capsule_dimensions,
out_channels=self.args.capsule_dimensions,
dropout=self.args.dropout)
def _setup_reconstructNet(self):
self.recons_net = ReconstructionNet(n_dim=self.args.capsule_dimensions,
n_classes=self.number_of_targets,
hidden=self.args.capsule_dimensions)
def _setup_layers(self):
self._setup_firstCapsuleLayer()
self._setup_hidden_capsules()
self._setup_class_capsule()
self._setup_reconstructNet()
def cal_recons_loss(self, pred_adj, adj, mask=None):
eps = 1e-7
# Each entry in pred_adj cannot larger than 1
pred_adj = torch.min(pred_adj, torch.ones(1, dtype=pred_adj.dtype).cuda())
# The diagonal entries in pred_adj should be 0
pred_adj = pred_adj.masked_fill_(torch.eye(adj.size(1), adj.size(1)).bool().to('cuda'), 0)
# Cross entropy loss
link_loss = -adj * torch.log(pred_adj+eps) - (1-adj) * torch.log(1-pred_adj+eps)
if mask is not None:
num_entries = torch.sum(torch.sum(mask, dim=1) ** 2)
adj_mask = mask.unsqueeze(2).float() @ torch.transpose(mask.unsqueeze(2).float(), 1, 2)
link_loss[(1-adj_mask).bool()] = 0.0
else:
num_entries = pred_adj.size(0) * pred_adj.size(1) * pred_adj.size(2)
link_loss = torch.sum(link_loss) / float(num_entries)
return link_loss
def forward(self, x, adj_in, mask, batch, y):
batch_size = x.size(0)
out = self.first_capsule(x, adj_in, mask, batch)
residual = out
out, c_ij, adj = self.hidden_capsule(out, adj_in, mask)
out = out.view(batch_size, -1, self.args.capsule_dimensions)
adj = torch.min(adj, torch.ones(1, dtype=adj.dtype).cuda())
adj = adj.masked_fill_(torch.eye(adj.size(1), adj.size(1)).bool().to('cuda'), 0)
out, c_ij, adj = self.class_capsule(out, adj)
out = out.squeeze(4).squeeze(1)
recons_out = self.recons_net(residual, out, y) # reconstructed adjacency matrix
recon_loss = self.cal_recons_loss(recons_out, adj_in, mask)
out = (torch.sqrt((out ** 2).sum(2))).view(batch_size, self.number_of_targets)
return out, recon_loss | 0.903741 | 0.415729 |
import argparse
import atexit
import pkg_resources # part of setuptools
from winnaker.models import *
from winnaker.notify import *
from winnaker.settings import *
def main():
print("""
____ __ ____ __ .__ __. .__ __. ___ __ ___ _______ .______
\ \ / \ / / | | | \ | | | \ | | / \ | |/ / | ____|| _ \\
\ \/ \/ / | | | \| | | \| | / ^ \ | ' / | |__ | |_) |
\ / | | | . ` | | . ` | / /_\ \ | < | __| | /
\ /\ / | | | |\ | | |\ | / _____ \ | . \ | |____ | |\ \----.
\__/ \__/ |__| |__| \__| |__| \__| /__/ \__\ |__|\__\ |_______|| _| `._____|
""")
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--start",
help="starts manual execution of the pipeline",
action="store_true")
parser.add_argument(
"-fb",
"--forcebake",
help="force bake, to be used wth --start ",
action="store_true")
parser.add_argument(
"-a",
"--app",
type=str,
help="the name of application to look for",
default=cfg_app_name)
parser.add_argument(
"-p",
"--pipeline",
type=str,
help="the name of pipline to test",
default=os.environ["WINNAKER_PIPELINE_NAME"])
parser.add_argument(
"-nl", "--nologin",
help="will not attempt to login",
action="store_true")
parser.add_argument(
"-oa", "--authorize",
help="authorize the oauth application with the logged in user if required. " +
"This argument and '--nologin' are mutually exclusive",
action="store_true"
)
parser.add_argument(
"-nlb",
"--nolastbuild",
help="will not attempt to check last build status or stages",
action="store_true")
parser.add_argument(
"-hl",
"--headless",
help="will run in an xfvb display ",
action="store_true")
parser.add_argument(
"-v",
"--verbose",
help="print more logs, DEBUG level",
action="store_true")
args = parser.parse_args()
# Logging setup
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logFormatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(log_level)
fileHandler = logging.FileHandler(
join(cfg_output_files_path, "winnaker.log"))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
version = pkg_resources.require("winnaker")[0].version
logging.info("Winnaker Version: {}".format(version))
logging.info("Current Config: {}".format(args))
if not os.path.exists(cfg_output_files_path):
os.makedirs(cfg_output_files_path)
if cfg_email_smtp and cfg_email_to and cfg_email_from:
atexit.register(send_mail, cfg_email_from, cfg_email_to, "Winnaker Screenshots " +
str(datetime.utcnow()), "Here are the screenshots of the spinnaker's last run at " +
str(datetime.utcnow()) +
" UTC Time", server=cfg_email_smtp)
if args.headless:
logging.debug("Starting virtual display")
from pyvirtualdisplay import Display
display = Display(visible=0, size=(2560, 1440))
display.start()
logging.debug("Started virtual display")
s = Spinnaker()
if not args.nologin:
logging.debug("Starting login")
s.login()
if args.authorize:
s.authorize()
s.get_pipeline(args.app, args.pipeline)
if not args.nolastbuild:
logging.info(
"- Last build status: {}".format(s.get_last_build().status.encode('utf-8')))
logging.info("- Screenshot Stages")
logging.info("- Current working directory: {}".format(os.getcwd()))
s.get_stages()
if args.start:
logging.debug("Going into start block")
s.start_manual_execution(force_bake=args.forcebake)
if args.headless:
logging.debug("Stopping virtualdisplay")
display.stop()
logging.debug("virtualdisplay stopped")
if __name__ == "__main__":
main() | winnaker/main.py | import argparse
import atexit
import pkg_resources # part of setuptools
from winnaker.models import *
from winnaker.notify import *
from winnaker.settings import *
def main():
print("""
____ __ ____ __ .__ __. .__ __. ___ __ ___ _______ .______
\ \ / \ / / | | | \ | | | \ | | / \ | |/ / | ____|| _ \\
\ \/ \/ / | | | \| | | \| | / ^ \ | ' / | |__ | |_) |
\ / | | | . ` | | . ` | / /_\ \ | < | __| | /
\ /\ / | | | |\ | | |\ | / _____ \ | . \ | |____ | |\ \----.
\__/ \__/ |__| |__| \__| |__| \__| /__/ \__\ |__|\__\ |_______|| _| `._____|
""")
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--start",
help="starts manual execution of the pipeline",
action="store_true")
parser.add_argument(
"-fb",
"--forcebake",
help="force bake, to be used wth --start ",
action="store_true")
parser.add_argument(
"-a",
"--app",
type=str,
help="the name of application to look for",
default=cfg_app_name)
parser.add_argument(
"-p",
"--pipeline",
type=str,
help="the name of pipline to test",
default=os.environ["WINNAKER_PIPELINE_NAME"])
parser.add_argument(
"-nl", "--nologin",
help="will not attempt to login",
action="store_true")
parser.add_argument(
"-oa", "--authorize",
help="authorize the oauth application with the logged in user if required. " +
"This argument and '--nologin' are mutually exclusive",
action="store_true"
)
parser.add_argument(
"-nlb",
"--nolastbuild",
help="will not attempt to check last build status or stages",
action="store_true")
parser.add_argument(
"-hl",
"--headless",
help="will run in an xfvb display ",
action="store_true")
parser.add_argument(
"-v",
"--verbose",
help="print more logs, DEBUG level",
action="store_true")
args = parser.parse_args()
# Logging setup
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logFormatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(log_level)
fileHandler = logging.FileHandler(
join(cfg_output_files_path, "winnaker.log"))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
version = pkg_resources.require("winnaker")[0].version
logging.info("Winnaker Version: {}".format(version))
logging.info("Current Config: {}".format(args))
if not os.path.exists(cfg_output_files_path):
os.makedirs(cfg_output_files_path)
if cfg_email_smtp and cfg_email_to and cfg_email_from:
atexit.register(send_mail, cfg_email_from, cfg_email_to, "Winnaker Screenshots " +
str(datetime.utcnow()), "Here are the screenshots of the spinnaker's last run at " +
str(datetime.utcnow()) +
" UTC Time", server=cfg_email_smtp)
if args.headless:
logging.debug("Starting virtual display")
from pyvirtualdisplay import Display
display = Display(visible=0, size=(2560, 1440))
display.start()
logging.debug("Started virtual display")
s = Spinnaker()
if not args.nologin:
logging.debug("Starting login")
s.login()
if args.authorize:
s.authorize()
s.get_pipeline(args.app, args.pipeline)
if not args.nolastbuild:
logging.info(
"- Last build status: {}".format(s.get_last_build().status.encode('utf-8')))
logging.info("- Screenshot Stages")
logging.info("- Current working directory: {}".format(os.getcwd()))
s.get_stages()
if args.start:
logging.debug("Going into start block")
s.start_manual_execution(force_bake=args.forcebake)
if args.headless:
logging.debug("Stopping virtualdisplay")
display.stop()
logging.debug("virtualdisplay stopped")
if __name__ == "__main__":
main() | 0.31279 | 0.091463 |
import os
import argparse
# Creates a new metricset with all the necessary file
# In case the module does not exist, also the module is created
def generate_metricset(base_path, metricbeat_path, module, metricset):
generate_module(base_path, metricbeat_path, module, metricset)
metricset_path = base_path + "/module/" + module + "/" + metricset
meta_path = metricset_path + "/_meta"
if os.path.isdir(metricset_path):
print("Metricset already exists. Skipping creating metricset {}"
.format(metricset))
return
os.makedirs(meta_path)
templates = metricbeat_path + "/scripts/module/metricset/"
content = load_file(templates + "metricset.go.tmpl", module, metricset)
with open(metricset_path + "/" + metricset + ".go", "w") as f:
f.write(content)
content = load_file(templates + "fields.yml", module, metricset)
with open(meta_path + "/fields.yml", "w") as f:
f.write(content)
content = load_file(templates + "docs.asciidoc", module, metricset)
with open(meta_path + "/docs.asciidoc", "w") as f:
f.write(content)
content = load_file(templates + "data.json", module, metricset)
with open(meta_path + "/data.json", "w") as f:
f.write(content)
print("Metricset {} created.".format(metricset))
def generate_module(base_path, metricbeat_path, module, metricset):
module_path = base_path + "/module/" + module
meta_path = module_path + "/_meta"
if os.path.isdir(module_path):
print("Module already exists. Skipping creating module {}"
.format(module))
return
os.makedirs(meta_path)
templates = metricbeat_path + "/scripts/module/"
content = load_file(templates + "fields.yml", module, "")
with open(meta_path + "/fields.yml", "w") as f:
f.write(content)
content = load_file(templates + "docs.asciidoc", module, "")
with open(meta_path + "/docs.asciidoc", "w") as f:
f.write(content)
content = load_file(templates + "config.yml", module, metricset)
with open(meta_path + "/config.yml", "w") as f:
f.write(content)
content = load_file(templates + "doc.go.tmpl", module, "")
with open(module_path + "/doc.go", "w") as f:
f.write(content)
print("Module {} created.".format(module))
def load_file(file, module, metricset):
content = ""
with open(file) as f:
content = f.read()
return content.replace("{module}", module).replace("{metricset}",
metricset)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Creates a metricset")
parser.add_argument("--module", help="Module name")
parser.add_argument("--metricset", help="Metricset name")
parser.add_argument("--path", help="Beat path")
parser.add_argument("--es_beats",
help="The path to the general beats folder")
args = parser.parse_args()
if args.path is None:
args.path = './'
print("Set default path for beat path: " + args.path)
if args.es_beats is None:
args.es_beats = '../'
print("Set default path for es_beats path: " + args.es_beats)
if args.module is None or args.module == '':
args.module = raw_input("Module name: ")
if args.metricset is None or args.metricset == '':
args.metricset = raw_input("Metricset name: ")
path = os.path.abspath(args.path)
metricbeat_path = os.path.abspath(args.es_beats + "/metricbeat")
generate_metricset(path, metricbeat_path, args.module.lower(),
args.metricset.lower()) | vendor/github.com/elastic/beats/metricbeat/scripts/create_metricset.py | import os
import argparse
# Creates a new metricset with all the necessary file
# In case the module does not exist, also the module is created
def generate_metricset(base_path, metricbeat_path, module, metricset):
generate_module(base_path, metricbeat_path, module, metricset)
metricset_path = base_path + "/module/" + module + "/" + metricset
meta_path = metricset_path + "/_meta"
if os.path.isdir(metricset_path):
print("Metricset already exists. Skipping creating metricset {}"
.format(metricset))
return
os.makedirs(meta_path)
templates = metricbeat_path + "/scripts/module/metricset/"
content = load_file(templates + "metricset.go.tmpl", module, metricset)
with open(metricset_path + "/" + metricset + ".go", "w") as f:
f.write(content)
content = load_file(templates + "fields.yml", module, metricset)
with open(meta_path + "/fields.yml", "w") as f:
f.write(content)
content = load_file(templates + "docs.asciidoc", module, metricset)
with open(meta_path + "/docs.asciidoc", "w") as f:
f.write(content)
content = load_file(templates + "data.json", module, metricset)
with open(meta_path + "/data.json", "w") as f:
f.write(content)
print("Metricset {} created.".format(metricset))
def generate_module(base_path, metricbeat_path, module, metricset):
module_path = base_path + "/module/" + module
meta_path = module_path + "/_meta"
if os.path.isdir(module_path):
print("Module already exists. Skipping creating module {}"
.format(module))
return
os.makedirs(meta_path)
templates = metricbeat_path + "/scripts/module/"
content = load_file(templates + "fields.yml", module, "")
with open(meta_path + "/fields.yml", "w") as f:
f.write(content)
content = load_file(templates + "docs.asciidoc", module, "")
with open(meta_path + "/docs.asciidoc", "w") as f:
f.write(content)
content = load_file(templates + "config.yml", module, metricset)
with open(meta_path + "/config.yml", "w") as f:
f.write(content)
content = load_file(templates + "doc.go.tmpl", module, "")
with open(module_path + "/doc.go", "w") as f:
f.write(content)
print("Module {} created.".format(module))
def load_file(file, module, metricset):
content = ""
with open(file) as f:
content = f.read()
return content.replace("{module}", module).replace("{metricset}",
metricset)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Creates a metricset")
parser.add_argument("--module", help="Module name")
parser.add_argument("--metricset", help="Metricset name")
parser.add_argument("--path", help="Beat path")
parser.add_argument("--es_beats",
help="The path to the general beats folder")
args = parser.parse_args()
if args.path is None:
args.path = './'
print("Set default path for beat path: " + args.path)
if args.es_beats is None:
args.es_beats = '../'
print("Set default path for es_beats path: " + args.es_beats)
if args.module is None or args.module == '':
args.module = raw_input("Module name: ")
if args.metricset is None or args.metricset == '':
args.metricset = raw_input("Metricset name: ")
path = os.path.abspath(args.path)
metricbeat_path = os.path.abspath(args.es_beats + "/metricbeat")
generate_metricset(path, metricbeat_path, args.module.lower(),
args.metricset.lower()) | 0.547222 | 0.104889 |
import numpy, theano, unittest
from theano.compile.pfunc import pfunc
from theano.compile.sharedvalue import shared
from theano import tensor
from theano.tensor.nnet import sigmoid
class NNet(object):
def __init__(self,
input=tensor.dvector('input'),
target=tensor.dvector('target'),
n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
super(NNet, self).__init__(**kw)
self.input = input
self.target = target
self.lr = shared(lr, 'learning_rate')
self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
# print self.lr.type
self.hidden = sigmoid(tensor.dot(self.w1, self.input))
self.output = tensor.dot(self.w2, self.hidden)
self.cost = tensor.sum((self.output - self.target)**2)
self.sgd_updates = {
self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}
self.sgd_step = pfunc(
params=[self.input, self.target],
outputs=[self.output, self.cost],
updates=self.sgd_updates)
self.compute_output = pfunc([self.input], self.output)
self.output_from_hidden = pfunc([self.hidden], self.output)
class TestNnet(unittest.TestCase):
def test_nnet(self):
rng = numpy.random.RandomState(1827)
data = rng.rand(10, 4)
nnet = NNet(n_input=3, n_hidden=10)
for epoch in range(3):
mean_cost = 0
for x in data:
input = x[0:3]
target = x[3:]
output, cost = nnet.sgd_step(input, target)
mean_cost += cost
mean_cost /= float(len(data))
# print 'Mean cost at epoch %s: %s' % (epoch, mean_cost)
self.assertTrue(abs(mean_cost - 0.20588975452) < 1e-6)
# Just call functions to make sure they do not crash.
out = nnet.compute_output(input)
out = nnet.output_from_hidden(numpy.ones(10)) | theano/compile/tests/test_misc.py | import numpy, theano, unittest
from theano.compile.pfunc import pfunc
from theano.compile.sharedvalue import shared
from theano import tensor
from theano.tensor.nnet import sigmoid
class NNet(object):
def __init__(self,
input=tensor.dvector('input'),
target=tensor.dvector('target'),
n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
super(NNet, self).__init__(**kw)
self.input = input
self.target = target
self.lr = shared(lr, 'learning_rate')
self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
# print self.lr.type
self.hidden = sigmoid(tensor.dot(self.w1, self.input))
self.output = tensor.dot(self.w2, self.hidden)
self.cost = tensor.sum((self.output - self.target)**2)
self.sgd_updates = {
self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}
self.sgd_step = pfunc(
params=[self.input, self.target],
outputs=[self.output, self.cost],
updates=self.sgd_updates)
self.compute_output = pfunc([self.input], self.output)
self.output_from_hidden = pfunc([self.hidden], self.output)
class TestNnet(unittest.TestCase):
def test_nnet(self):
rng = numpy.random.RandomState(1827)
data = rng.rand(10, 4)
nnet = NNet(n_input=3, n_hidden=10)
for epoch in range(3):
mean_cost = 0
for x in data:
input = x[0:3]
target = x[3:]
output, cost = nnet.sgd_step(input, target)
mean_cost += cost
mean_cost /= float(len(data))
# print 'Mean cost at epoch %s: %s' % (epoch, mean_cost)
self.assertTrue(abs(mean_cost - 0.20588975452) < 1e-6)
# Just call functions to make sure they do not crash.
out = nnet.compute_output(input)
out = nnet.output_from_hidden(numpy.ones(10)) | 0.497803 | 0.34679 |
from __future__ import division
import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from chainercv.links import Conv2DBNActiv
from chainercv.links.model.resnet import ResBlock
from chainercv.links import PickableSequentialChain
from chainercv import utils
# RGB order
# This is channel wise mean of mean image distributed at
# https://github.com/KaimingHe/deep-residual-networks
_imagenet_mean = np.array(
[123.15163084, 115.90288257, 103.0626238],
dtype=np.float32)[:, np.newaxis, np.newaxis]
class SEResNeXt(PickableSequentialChain):
"""Base class for SE-ResNeXt architecture.
ResNeXt is a ResNet-based architecture, where grouped convolution is
adopted to the second convolution layer of each bottleneck block.
In addition, a squeeze-and-excitation block is applied at the end of
each non-identity branch of residual block. Please refer to `Aggregated
Residual Transformations for Deep Neural Networks
<https://arxiv.org/pdf/1611.05431.pdf>`_ and `Squeeze-and-Excitation
Networks <https://arxiv.org/pdf/1709.01507.pdf>`_ for detailed
description of network architecture.
Similar to :class:`chainercv.links.model.resnet.ResNet`, ImageNet
pretrained weights are downloaded when :obj:`pretrained_model` argument
is :obj:`imagenet`, originally distributed at `the Github repository by
one of the paper authors of SENet <https://github.com/hujie-frank/SENet>`_.
.. seealso::
:class:`chainercv.links.model.resnet.ResNet`
:class:`chainercv.links.model.senet.SEResNet`
:class:`chainercv.links.connection.SEBlock`
Args:
n_layer (int): The number of layers.
n_class (int): The number of classes. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the number of classes used to train the pretrained model
is used. Otherwise, the number of classes in ILSVRC 2012 dataset
is used.
pretrained_model (string): The destination of the pre-trained
chainer model serialized as a :obj:`.npz` file.
If this is one of the strings described
above, it automatically loads weights stored under a directory
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,
where :obj:`$CHAINER_DATASET_ROOT` is set as
:obj:`$HOME/.chainer/dataset` unless you specify another value
by modifying the environment variable.
mean (numpy.ndarray): A mean value. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the mean value used to train the pretrained model is used.
Otherwise, the mean value calculated from ILSVRC 2012 dataset
is used.
initialW (callable): Initializer for the weights of
convolution kernels.
fc_kwargs (dict): Keyword arguments passed to initialize
the :class:`chainer.links.Linear`.
"""
_blocks = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
}
_models = {
50: {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': {'mean'},
'url': 'https://chainercv-models.preferred.jp/'
'se_resnext50_imagenet_converted_2018_06_28.npz'
},
},
101: {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': {'mean'},
'url': 'https://chainercv-models.preferred.jp/'
'se_resnext101_imagenet_converted_2018_06_28.npz'
},
},
}
def __init__(self, n_layer,
n_class=None,
pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
blocks = self._blocks[n_layer]
param, path = utils.prepare_pretrained_model(
{'n_class': n_class, 'mean': mean},
pretrained_model, self._models[n_layer],
{'n_class': 1000, 'mean': _imagenet_mean})
self.mean = param['mean']
if initialW is None:
initialW = initializers.HeNormal(scale=1., fan_option='fan_out')
if 'initialW' not in fc_kwargs:
fc_kwargs['initialW'] = initializers.Normal(scale=0.01)
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
initialW = initializers.constant.Zero()
fc_kwargs['initialW'] = initializers.constant.Zero()
kwargs = {
'groups': 32, 'initialW': initialW, 'stride_first': False,
'add_seblock': True}
super(SEResNeXt, self).__init__()
with self.init_scope():
self.conv1 = Conv2DBNActiv(None, 64, 7, 2, 3, nobias=True,
initialW=initialW)
self.pool1 = lambda x: F.max_pooling_2d(x, ksize=3, stride=2)
self.res2 = ResBlock(blocks[0], None, 128, 256, 1, **kwargs)
self.res3 = ResBlock(blocks[1], None, 256, 512, 2, **kwargs)
self.res4 = ResBlock(blocks[2], None, 512, 1024, 2, **kwargs)
self.res5 = ResBlock(blocks[3], None, 1024, 2048, 2, **kwargs)
self.pool5 = lambda x: F.average(x, axis=(2, 3))
self.fc6 = L.Linear(None, param['n_class'], **fc_kwargs)
self.prob = F.softmax
if path:
chainer.serializers.load_npz(path, self)
class SEResNeXt50(SEResNeXt):
"""SE-ResNeXt-50 Network
Please consult the documentation for :class:`SEResNeXt`.
.. seealso::
:class:`chainercv.links.model.senet.SEResNeXt`
"""
def __init__(self, n_class=None, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SEResNeXt50, self).__init__(
50, n_class, pretrained_model,
mean, initialW, fc_kwargs)
class SEResNeXt101(SEResNeXt):
"""SE-ResNeXt-101 Network
Please consult the documentation for :class:`SEResNeXt`.
.. seealso::
:class:`chainercv.links.model.senet.SEResNeXt`
"""
def __init__(self, n_class=None, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SEResNeXt101, self).__init__(
101, n_class, pretrained_model,
mean, initialW, fc_kwargs) | chainercv/links/model/senet/se_resnext.py | from __future__ import division
import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from chainercv.links import Conv2DBNActiv
from chainercv.links.model.resnet import ResBlock
from chainercv.links import PickableSequentialChain
from chainercv import utils
# RGB order
# This is channel wise mean of mean image distributed at
# https://github.com/KaimingHe/deep-residual-networks
_imagenet_mean = np.array(
[123.15163084, 115.90288257, 103.0626238],
dtype=np.float32)[:, np.newaxis, np.newaxis]
class SEResNeXt(PickableSequentialChain):
"""Base class for SE-ResNeXt architecture.
ResNeXt is a ResNet-based architecture, where grouped convolution is
adopted to the second convolution layer of each bottleneck block.
In addition, a squeeze-and-excitation block is applied at the end of
each non-identity branch of residual block. Please refer to `Aggregated
Residual Transformations for Deep Neural Networks
<https://arxiv.org/pdf/1611.05431.pdf>`_ and `Squeeze-and-Excitation
Networks <https://arxiv.org/pdf/1709.01507.pdf>`_ for detailed
description of network architecture.
Similar to :class:`chainercv.links.model.resnet.ResNet`, ImageNet
pretrained weights are downloaded when :obj:`pretrained_model` argument
is :obj:`imagenet`, originally distributed at `the Github repository by
one of the paper authors of SENet <https://github.com/hujie-frank/SENet>`_.
.. seealso::
:class:`chainercv.links.model.resnet.ResNet`
:class:`chainercv.links.model.senet.SEResNet`
:class:`chainercv.links.connection.SEBlock`
Args:
n_layer (int): The number of layers.
n_class (int): The number of classes. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the number of classes used to train the pretrained model
is used. Otherwise, the number of classes in ILSVRC 2012 dataset
is used.
pretrained_model (string): The destination of the pre-trained
chainer model serialized as a :obj:`.npz` file.
If this is one of the strings described
above, it automatically loads weights stored under a directory
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,
where :obj:`$CHAINER_DATASET_ROOT` is set as
:obj:`$HOME/.chainer/dataset` unless you specify another value
by modifying the environment variable.
mean (numpy.ndarray): A mean value. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the mean value used to train the pretrained model is used.
Otherwise, the mean value calculated from ILSVRC 2012 dataset
is used.
initialW (callable): Initializer for the weights of
convolution kernels.
fc_kwargs (dict): Keyword arguments passed to initialize
the :class:`chainer.links.Linear`.
"""
_blocks = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
}
_models = {
50: {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': {'mean'},
'url': 'https://chainercv-models.preferred.jp/'
'se_resnext50_imagenet_converted_2018_06_28.npz'
},
},
101: {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': {'mean'},
'url': 'https://chainercv-models.preferred.jp/'
'se_resnext101_imagenet_converted_2018_06_28.npz'
},
},
}
def __init__(self, n_layer,
n_class=None,
pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
blocks = self._blocks[n_layer]
param, path = utils.prepare_pretrained_model(
{'n_class': n_class, 'mean': mean},
pretrained_model, self._models[n_layer],
{'n_class': 1000, 'mean': _imagenet_mean})
self.mean = param['mean']
if initialW is None:
initialW = initializers.HeNormal(scale=1., fan_option='fan_out')
if 'initialW' not in fc_kwargs:
fc_kwargs['initialW'] = initializers.Normal(scale=0.01)
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
initialW = initializers.constant.Zero()
fc_kwargs['initialW'] = initializers.constant.Zero()
kwargs = {
'groups': 32, 'initialW': initialW, 'stride_first': False,
'add_seblock': True}
super(SEResNeXt, self).__init__()
with self.init_scope():
self.conv1 = Conv2DBNActiv(None, 64, 7, 2, 3, nobias=True,
initialW=initialW)
self.pool1 = lambda x: F.max_pooling_2d(x, ksize=3, stride=2)
self.res2 = ResBlock(blocks[0], None, 128, 256, 1, **kwargs)
self.res3 = ResBlock(blocks[1], None, 256, 512, 2, **kwargs)
self.res4 = ResBlock(blocks[2], None, 512, 1024, 2, **kwargs)
self.res5 = ResBlock(blocks[3], None, 1024, 2048, 2, **kwargs)
self.pool5 = lambda x: F.average(x, axis=(2, 3))
self.fc6 = L.Linear(None, param['n_class'], **fc_kwargs)
self.prob = F.softmax
if path:
chainer.serializers.load_npz(path, self)
class SEResNeXt50(SEResNeXt):
"""SE-ResNeXt-50 Network
Please consult the documentation for :class:`SEResNeXt`.
.. seealso::
:class:`chainercv.links.model.senet.SEResNeXt`
"""
def __init__(self, n_class=None, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SEResNeXt50, self).__init__(
50, n_class, pretrained_model,
mean, initialW, fc_kwargs)
class SEResNeXt101(SEResNeXt):
"""SE-ResNeXt-101 Network
Please consult the documentation for :class:`SEResNeXt`.
.. seealso::
:class:`chainercv.links.model.senet.SEResNeXt`
"""
def __init__(self, n_class=None, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SEResNeXt101, self).__init__(
101, n_class, pretrained_model,
mean, initialW, fc_kwargs) | 0.886746 | 0.425546 |
import unittest
from allergies import Allergies
# Python 2/3 compatibility
if not hasattr(unittest.TestCase, 'assertCountEqual'):
unittest.TestCase.assertCountEqual = unittest.TestCase.assertItemsEqual
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0
class AllergiesTest(unittest.TestCase):
def test_no_allergies_means_not_allergic(self):
allergies = Allergies(0)
self.assertIs(allergies.is_allergic_to('peanuts'), False)
self.assertIs(allergies.is_allergic_to('cats'), False)
self.assertIs(allergies.is_allergic_to('strawberries'), False)
def test_is_allergic_to_eggs(self):
self.assertIs(Allergies(1).is_allergic_to('eggs'), True)
def test_allergic_to_eggs_in_addition_to_other_stuff(self):
allergies = Allergies(5)
self.assertIs(allergies.is_allergic_to('eggs'), True)
self.assertIs(allergies.is_allergic_to('shellfish'), True)
self.assertIs(allergies.is_allergic_to('strawberries'), False)
def test_no_allergies_at_all(self):
self.assertEqual(Allergies(0).lst, [])
def test_allergic_to_just_eggs(self):
self.assertEqual(Allergies(1).lst, ['eggs'])
def test_allergic_to_just_peanuts(self):
self.assertEqual(Allergies(2).lst, ['peanuts'])
def test_allergic_to_just_strawberries(self):
self.assertEqual(Allergies(8).lst, ['strawberries'])
def test_allergic_to_eggs_and_peanuts(self):
self.assertCountEqual(Allergies(3).lst, ['eggs', 'peanuts'])
def test_allergic_to_more_than_eggs_but_not_peanuts(self):
self.assertCountEqual(Allergies(5).lst, ['eggs', 'shellfish'])
def test_allergic_to_lots_of_stuff(self):
self.assertCountEqual(
Allergies(248).lst,
['strawberries', 'tomatoes', 'chocolate', 'pollen', 'cats'])
def test_allergic_to_everything(self):
self.assertCountEqual(
Allergies(255).lst, [
'eggs', 'peanuts', 'shellfish', 'strawberries', 'tomatoes',
'chocolate', 'pollen', 'cats'
])
def test_ignore_non_allergen_score_parts_only_eggs(self):
self.assertEqual(Allergies(257).lst, ['eggs'])
def test_ignore_non_allergen_score_parts(self):
self.assertCountEqual(
Allergies(509).lst, [
'eggs', 'shellfish', 'strawberries', 'tomatoes', 'chocolate',
'pollen', 'cats'
])
if __name__ == '__main__':
unittest.main() | exercises/allergies/allergies_test.py | import unittest
from allergies import Allergies
# Python 2/3 compatibility
if not hasattr(unittest.TestCase, 'assertCountEqual'):
unittest.TestCase.assertCountEqual = unittest.TestCase.assertItemsEqual
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0
class AllergiesTest(unittest.TestCase):
def test_no_allergies_means_not_allergic(self):
allergies = Allergies(0)
self.assertIs(allergies.is_allergic_to('peanuts'), False)
self.assertIs(allergies.is_allergic_to('cats'), False)
self.assertIs(allergies.is_allergic_to('strawberries'), False)
def test_is_allergic_to_eggs(self):
self.assertIs(Allergies(1).is_allergic_to('eggs'), True)
def test_allergic_to_eggs_in_addition_to_other_stuff(self):
allergies = Allergies(5)
self.assertIs(allergies.is_allergic_to('eggs'), True)
self.assertIs(allergies.is_allergic_to('shellfish'), True)
self.assertIs(allergies.is_allergic_to('strawberries'), False)
def test_no_allergies_at_all(self):
self.assertEqual(Allergies(0).lst, [])
def test_allergic_to_just_eggs(self):
self.assertEqual(Allergies(1).lst, ['eggs'])
def test_allergic_to_just_peanuts(self):
self.assertEqual(Allergies(2).lst, ['peanuts'])
def test_allergic_to_just_strawberries(self):
self.assertEqual(Allergies(8).lst, ['strawberries'])
def test_allergic_to_eggs_and_peanuts(self):
self.assertCountEqual(Allergies(3).lst, ['eggs', 'peanuts'])
def test_allergic_to_more_than_eggs_but_not_peanuts(self):
self.assertCountEqual(Allergies(5).lst, ['eggs', 'shellfish'])
def test_allergic_to_lots_of_stuff(self):
self.assertCountEqual(
Allergies(248).lst,
['strawberries', 'tomatoes', 'chocolate', 'pollen', 'cats'])
def test_allergic_to_everything(self):
self.assertCountEqual(
Allergies(255).lst, [
'eggs', 'peanuts', 'shellfish', 'strawberries', 'tomatoes',
'chocolate', 'pollen', 'cats'
])
def test_ignore_non_allergen_score_parts_only_eggs(self):
self.assertEqual(Allergies(257).lst, ['eggs'])
def test_ignore_non_allergen_score_parts(self):
self.assertCountEqual(
Allergies(509).lst, [
'eggs', 'shellfish', 'strawberries', 'tomatoes', 'chocolate',
'pollen', 'cats'
])
if __name__ == '__main__':
unittest.main() | 0.633297 | 0.66503 |
import json
import typing
import os
def parseJson(filepath: str, output_folder: str) -> typing.List[dict]:
"""
Read and preprocess document input json, e.g., removal of certain characters.
@param filepath: path to json file
@param output_folder: The folder in which the result is stored
@return: Read and processed json file.
"""
with open(filepath, "r") as f:
data = json.load(f)
# sanity check: "text" field is absolutely necessary
for doc in data:
if "text" not in doc.keys():
raise KeyError("Key \"text\" not in input json file.")
# add a unique index to each document
data, output_data = _jsonAddIndex(data)
# save the indexed files in a separate json for later visualization in the web api
output_file = os.path.join(output_folder, "indexed_documents.json")
with open(output_file, "w") as f:
json.dump(output_data, f, indent=2, ensure_ascii=False)
data = _preprocessJson(data)
return data
def storeJson(data: typing.List[dict], filepath: str) -> None:
"""
Write a json file
@param data: json file that needs to be stored
@param filepath: output path
"""
with open(filepath, "w") as f:
json.dump(data, f)
def loadJson(filepath: str) -> typing.List[dict]:
"""
Load a json file
@param filepath: input path
@return: json file
"""
with open(filepath, "r") as f:
data = json.load(f)
return data
def _preprocessJson(data: typing.List[dict]) -> typing.List[dict]:
"""
Necessary preprocessing steps. At the moment, this removes certain characters that cannot be processed in later
steps, e.g., "<" and ">", since they conflict with the design of TIMEX3 tags.
@param data:
@return:
"""
char_replace = [("&", " "), ("<", " "), (">", " "), (r"\u0007", ""), (r"\b", ""), ("–", "-")]
for d in data:
for y0, y1 in char_replace:
d["text"] = d["text"].replace(y0, y1)
return data
def _jsonAddIndex(data: typing.List[dict]) -> (typing.List[dict], typing.Dict[int, dict]):
"""
Adds an index to each document in the json list.
@param data: Input document json
@return: Input data with additional field "id" which stores a unique ID for each document
"""
curr_index = 1
output_data = {}
for d in data:
output_data[curr_index] = d
d["id"] = curr_index
curr_index += 1
return data, output_data | parser/jsonparser.py | import json
import typing
import os
def parseJson(filepath: str, output_folder: str) -> typing.List[dict]:
"""
Read and preprocess document input json, e.g., removal of certain characters.
@param filepath: path to json file
@param output_folder: The folder in which the result is stored
@return: Read and processed json file.
"""
with open(filepath, "r") as f:
data = json.load(f)
# sanity check: "text" field is absolutely necessary
for doc in data:
if "text" not in doc.keys():
raise KeyError("Key \"text\" not in input json file.")
# add a unique index to each document
data, output_data = _jsonAddIndex(data)
# save the indexed files in a separate json for later visualization in the web api
output_file = os.path.join(output_folder, "indexed_documents.json")
with open(output_file, "w") as f:
json.dump(output_data, f, indent=2, ensure_ascii=False)
data = _preprocessJson(data)
return data
def storeJson(data: typing.List[dict], filepath: str) -> None:
"""
Write a json file
@param data: json file that needs to be stored
@param filepath: output path
"""
with open(filepath, "w") as f:
json.dump(data, f)
def loadJson(filepath: str) -> typing.List[dict]:
"""
Load a json file
@param filepath: input path
@return: json file
"""
with open(filepath, "r") as f:
data = json.load(f)
return data
def _preprocessJson(data: typing.List[dict]) -> typing.List[dict]:
"""
Necessary preprocessing steps. At the moment, this removes certain characters that cannot be processed in later
steps, e.g., "<" and ">", since they conflict with the design of TIMEX3 tags.
@param data:
@return:
"""
char_replace = [("&", " "), ("<", " "), (">", " "), (r"\u0007", ""), (r"\b", ""), ("–", "-")]
for d in data:
for y0, y1 in char_replace:
d["text"] = d["text"].replace(y0, y1)
return data
def _jsonAddIndex(data: typing.List[dict]) -> (typing.List[dict], typing.Dict[int, dict]):
"""
Adds an index to each document in the json list.
@param data: Input document json
@return: Input data with additional field "id" which stores a unique ID for each document
"""
curr_index = 1
output_data = {}
for d in data:
output_data[curr_index] = d
d["id"] = curr_index
curr_index += 1
return data, output_data | 0.461745 | 0.34621 |
"""Utilities for synchronizing and communication across multiple hosts."""
import functools
from typing import Optional
import zlib
import jax
import numpy as np
PyTreeDef = type(jax.tree_structure(None))
# NB: This needs to be top-level for the jax compilation cache.
@functools.partial(jax.pmap, axis_name='hosts')
def _host_allgather_psum(x: PyTreeDef) -> PyTreeDef:
"""Host psum for host_allgather."""
return jax.lax.psum(x, 'hosts')
def broadcast_one_to_all(in_tree: PyTreeDef,
is_source: Optional[bool] = None) -> PyTreeDef:
"""Broadcast data from a source host (host 0 by default) to all other hosts.
Args:
in_tree: pytree of arrays - each array *must* have the same shape across the
hosts.
is_source: optional bool denoting whether the caller is the source. Only
'source host' will contribute the data for the broadcast. If None, then
host 0 is used.
Returns:
A pytree matching in_tree where the leaves now all contain the data from the
first host.
"""
if is_source is None:
is_source = jax.process_index() == 0
def pre_pmap(x):
if is_source:
return np.concatenate([
x[None, ...],
np.repeat([np.zeros_like(x)],
jax.local_device_count() - 1, 0)
])
else:
return np.repeat([np.zeros_like(x)], jax.local_device_count(), 0)
def post_pmap(x):
return jax.device_get(x)[0]
in_tree = jax.tree_map(pre_pmap, in_tree)
in_tree = jax.device_get(_host_allgather_psum(in_tree))
return jax.tree_map(post_pmap, in_tree)
def sync_devices(name: str):
"""Creates a barrier across all hosts/devices."""
h = np.int32(zlib.crc32(name.encode()))
assert_same(h, f"sync_devices name mismatch ('{name}')")
def host_allgather(in_tree: PyTreeDef, num_replica_sets: int,
replica_set_id: int,
is_first_host_in_replica_set: bool) -> PyTreeDef:
"""Gather data from across hosts/replica sets.
Args:
in_tree: pytree of arrays - each array _must_ have the same shape across the
hosts.
num_replica_sets: int denoting the number of replica sets (least common
multiples of hosts and replicas) in the computation.
replica_set_id: int denoting which replica set the current host belongs to.
is_first_host_in_replica_set: bool denoting whether the current host is the
first one in its replica set. Only that first host will contribute the
data for the all-gather from its replica set.
Returns:
A pytree matching in_tree where each leaf array has a new leading
dimension of size num_replica_sets, carrying the data copied from all hosts.
"""
num_local_devices = jax.local_device_count()
# We collect data per-host by creating two new axes: a pmap outer axis, and
# an inner 'host' axis. The latter is filled based on process_index, and the
# outer only has this single nonzero entry. Thus after a psum, we collect the
# first member of the outer axis and have a new 'host' dimension such that
# the returned leaves contain the data gathered from other hosts.
def pre_pmap(x):
y = np.zeros((num_local_devices, num_replica_sets, *x.shape), dtype=x.dtype)
if is_first_host_in_replica_set:
y[0, replica_set_id] = x
return y
def post_pmap(x):
return jax.device_get(x)[0]
return jax.tree_map(post_pmap,
_host_allgather_psum(jax.tree_map(pre_pmap, in_tree)))
def assert_same(in_tree: PyTreeDef, fail_message: str = ''):
"""Verifies that all the hosts have the same tree of values`."""
expected = broadcast_one_to_all(in_tree)
if not jax.tree_util.tree_all(
jax.tree_map(lambda *x: np.all(np.equal(*x)), in_tree, expected)):
raise AssertionError(
f'{fail_message} Expected: {expected}; got: {in_tree}.') | t5x/multihost_utils.py |
"""Utilities for synchronizing and communication across multiple hosts."""
import functools
from typing import Optional
import zlib
import jax
import numpy as np
PyTreeDef = type(jax.tree_structure(None))
# NB: This needs to be top-level for the jax compilation cache.
@functools.partial(jax.pmap, axis_name='hosts')
def _host_allgather_psum(x: PyTreeDef) -> PyTreeDef:
"""Host psum for host_allgather."""
return jax.lax.psum(x, 'hosts')
def broadcast_one_to_all(in_tree: PyTreeDef,
is_source: Optional[bool] = None) -> PyTreeDef:
"""Broadcast data from a source host (host 0 by default) to all other hosts.
Args:
in_tree: pytree of arrays - each array *must* have the same shape across the
hosts.
is_source: optional bool denoting whether the caller is the source. Only
'source host' will contribute the data for the broadcast. If None, then
host 0 is used.
Returns:
A pytree matching in_tree where the leaves now all contain the data from the
first host.
"""
if is_source is None:
is_source = jax.process_index() == 0
def pre_pmap(x):
if is_source:
return np.concatenate([
x[None, ...],
np.repeat([np.zeros_like(x)],
jax.local_device_count() - 1, 0)
])
else:
return np.repeat([np.zeros_like(x)], jax.local_device_count(), 0)
def post_pmap(x):
return jax.device_get(x)[0]
in_tree = jax.tree_map(pre_pmap, in_tree)
in_tree = jax.device_get(_host_allgather_psum(in_tree))
return jax.tree_map(post_pmap, in_tree)
def sync_devices(name: str):
"""Creates a barrier across all hosts/devices."""
h = np.int32(zlib.crc32(name.encode()))
assert_same(h, f"sync_devices name mismatch ('{name}')")
def host_allgather(in_tree: PyTreeDef, num_replica_sets: int,
replica_set_id: int,
is_first_host_in_replica_set: bool) -> PyTreeDef:
"""Gather data from across hosts/replica sets.
Args:
in_tree: pytree of arrays - each array _must_ have the same shape across the
hosts.
num_replica_sets: int denoting the number of replica sets (least common
multiples of hosts and replicas) in the computation.
replica_set_id: int denoting which replica set the current host belongs to.
is_first_host_in_replica_set: bool denoting whether the current host is the
first one in its replica set. Only that first host will contribute the
data for the all-gather from its replica set.
Returns:
A pytree matching in_tree where each leaf array has a new leading
dimension of size num_replica_sets, carrying the data copied from all hosts.
"""
num_local_devices = jax.local_device_count()
# We collect data per-host by creating two new axes: a pmap outer axis, and
# an inner 'host' axis. The latter is filled based on process_index, and the
# outer only has this single nonzero entry. Thus after a psum, we collect the
# first member of the outer axis and have a new 'host' dimension such that
# the returned leaves contain the data gathered from other hosts.
def pre_pmap(x):
y = np.zeros((num_local_devices, num_replica_sets, *x.shape), dtype=x.dtype)
if is_first_host_in_replica_set:
y[0, replica_set_id] = x
return y
def post_pmap(x):
return jax.device_get(x)[0]
return jax.tree_map(post_pmap,
_host_allgather_psum(jax.tree_map(pre_pmap, in_tree)))
def assert_same(in_tree: PyTreeDef, fail_message: str = ''):
"""Verifies that all the hosts have the same tree of values`."""
expected = broadcast_one_to_all(in_tree)
if not jax.tree_util.tree_all(
jax.tree_map(lambda *x: np.all(np.equal(*x)), in_tree, expected)):
raise AssertionError(
f'{fail_message} Expected: {expected}; got: {in_tree}.') | 0.957715 | 0.464962 |
import numpy as np
import matplotlib.cm
import tensorflow as tf
from audiocodec.mdctransformer import MDCTransformer
from audiocodec.psychoacoustic import PsychoacousticModel
class AudioRepresentation:
def __init__(self, sample_rate, freq_n, compute_dtype):
# [batches_n, blocks_n, freqs_n, channels_n]
# sample_rate = blocks_n x freqs_n
self.sample_rate = tf.constant(sample_rate)
self.freq_n = tf.constant(freq_n)
# initialize MDCTransformer and PsychoacousticModel
self.mdctransformer = MDCTransformer(freq_n, window_type='vorbis',
compute_dtype=compute_dtype)
# Note: Number of bark determines level of masking threshold!!! For freq_n = 256, bark_bands_n=24 is appropriate
self.psychoacoustic = PsychoacousticModel(sample_rate, freq_n, bark_bands_n=24, alpha=0.6,
compute_dtype=compute_dtype)
@tf.function
def t_to_repr(self, wave):
"""Convert audio signal to representation for neural model
:param wave: audio signal [batches_n, samples_n, channels_n]
:return audio representation [batches_n, blocks_n+1, freqs_n, channels_n]
with samples_n = blocks_n * freq_n
"""
samples_n = tf.shape(wave)[1]
tf.assert_equal(tf.truncatemod(samples_n, self.freq_n), 0,
f'Number of samples ({samples_n}) needs to be a multiple of {self.freq_n}')
return self.mdctransformer.transform(wave)
@tf.function
def repr_to_t(self, mdct_norm):
"""Convert representation to audio signal
:param mdct_norm: audio representation [batches_n, blocks_n, freqs_n, channels_n]
:return audio signal [batches_n, samples_n, channels_n]
with samples_n = (blocks_n+1) * freq_n
"""
return self.mdctransformer.inverse_transform(mdct_norm)
@tf.function
def tonality(self, mdct_norm):
"""Computes the tonality of the audio signal defined by the representation
:param mdct_norm: audio representation [batches_n, blocks_n, freqs_n, channels_n]
:return: tonality per block [batches_n, blocks_n, 1, channels_n]
"""
return self.psychoacoustic.tonality(mdct_norm)
@tf.function
def psychoacoustic_masking_ampl(self, mdct_norm, drown=0.0):
"""Get hearing threshold for each pixel in the spectrogram
:param mdct_norm: normalized mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n]
:param drown: factor 0..1 to drown out audible sounds (0: no drowning, 1: fully drowned)
:return: masking amplitude (positive) [batches_n, blocks_n, freqs_n, channels_n]
"""
tonality_per_block = self.psychoacoustic.tonality(mdct_norm)
total_threshold = self.psychoacoustic.global_masking_threshold(mdct_norm, tonality_per_block, drown)
return total_threshold
@tf.function
def add_noise(self, mdct_norm, masking_threshold):
"""
Adds inaudible noise to amplitudes, using the masking_threshold.
The noise added is calibrated at a 3-sigma deviation in both directions:
masking_threshold = 6*sigma
As such, there is a 0.2% probability that the noise added is bigger than the masking_threshold
:param mdct_norm: mdct amplitudes (spectrum) for each filter [batches_n, blocks_n, filter_bands_n, channels_n]
must be of compute_dtype
:param masking_threshold: masking threshold in amplitude. Masking threshold is never negative
output dtype is compute_dtype
[batches_n, blocks_n, filter_bands_n, channels_n]
:return: mdct amplitudes with inaudible noise added [batches_n, blocks_n, filter_bands_n, channels_n]
"""
return self.psychoacoustic.add_noise(mdct_norm, masking_threshold)
@tf.function
def psychoacoustic_filter(self, mdct_norm, masking_threshold, max_gradient=10):
"""Apply lRElu filter to tab-representation
:param mdct_norm: normalized mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n=1]
:param masking_threshold: masking threshold in amplitude. Masking threshold is never negative
output dtype is compute_dtype
[batches_n, blocks_n, filter_bands_n, channels_n]
:param drown: factor 0..1 to drown out audible sounds (0: no drowning, 1: fully drowned)
:param max_gradient: maximum gradient filter will introduce
:return: normalized mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n=1]
"""
# ReLU-filter
def f_attentuation(x):
# function with
# f(0) = 0.
# f(1) = 1.
# f'(0) = max_gradient / (2**(max_gradient+1) - 2) >~ 0
# f'(1) = max_gradient / (1. - 1./2**max_gradient) >~ max_gradient
return (1. / (2. - x)**max_gradient - 1./2**max_gradient) / (1. - 1./2**max_gradient)
x_abs = tf.abs(mdct_norm) / masking_threshold
x_abs_safe = tf.where(x_abs < 1., x_abs, 1.)
mdct_norm_filtered = tf.where(x_abs < 1., f_attentuation(x_abs_safe) * mdct_norm, mdct_norm)
return mdct_norm_filtered
def repr_to_spectrogram(self, mdct_norm, intensity=False, channel=0, cmap=None):
"""Make image of normalized mdct amplitudes
:param mdct_norm: mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n]
:param intensity: shows amplitudes if False, intensities if True
:param channel: select (stereo)-channel which needs to be displayed
:param cmap: matplotlib colormap
:return: uint8 image with filter_band_n as height and #blocks as width
shape = [batches_n, blocks_n, freqs_n, color_channels]
where color_channels is 1 if cmap = None, otherwise it is 3 (RGB)
"""
x = tf.cast(mdct_norm[:, :, :, channel:channel+1], tf.float32)
def normalized_dB_scale(ampl, with_sign=True):
normalized_dB = self.psychoacoustic.amplitude_to_dB_norm(ampl)
if with_sign:
# range -1..1
return tf.sign(ampl) * normalized_dB
else:
# range 0..1
return normalized_dB
# convert to 0..1 range
if intensity:
image = normalized_dB_scale(x, with_sign=False)
else:
image = (normalized_dB_scale(x, with_sign=True) + 1.) / 2.
image = tf.map_fn(lambda im: tf.image.rot90(im), image)
# colorize with cmap
if cmap is not None:
# quantize
image = image[:, :, :, 0] # remove the dummy channel direction (will be replace with rgb info from color map)
image_index = tf.cast(tf.round(image * (cmap.N-1)), dtype=tf.int32) # indices in [0, cmap.N-1]
image_index = tf.clip_by_value(image_index, clip_value_min=0, clip_value_max=cmap.N-1)
# gather
color_map = matplotlib.cm.get_cmap(cmap)(np.arange(cmap.N)) # shape=[cmap.N, 3]
colors = tf.constant(color_map, dtype=tf.float32)
image = tf.gather(colors, image_index) # image[b, h, w, c] = color[image_index[b, h, w], c]
return image
def repr_to_audio(self, mdct_norm):
"""Make audio of mdct amplitudes
:param mdct_norm: mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n]
:return: audio signal [batches_n, samples_n, channels_n]
with samples_n = (blocks_n+1) * freq_n
"""
mdct_norm_ft32 = tf.cast(mdct_norm, dtype=tf.float32)
wave = self.repr_to_t(mdct_norm_ft32)
wave = tf.clip_by_value(wave, clip_value_min=-1., clip_value_max=1.)
return wave | model/audiorepresentation.py | import numpy as np
import matplotlib.cm
import tensorflow as tf
from audiocodec.mdctransformer import MDCTransformer
from audiocodec.psychoacoustic import PsychoacousticModel
class AudioRepresentation:
def __init__(self, sample_rate, freq_n, compute_dtype):
# [batches_n, blocks_n, freqs_n, channels_n]
# sample_rate = blocks_n x freqs_n
self.sample_rate = tf.constant(sample_rate)
self.freq_n = tf.constant(freq_n)
# initialize MDCTransformer and PsychoacousticModel
self.mdctransformer = MDCTransformer(freq_n, window_type='vorbis',
compute_dtype=compute_dtype)
# Note: Number of bark determines level of masking threshold!!! For freq_n = 256, bark_bands_n=24 is appropriate
self.psychoacoustic = PsychoacousticModel(sample_rate, freq_n, bark_bands_n=24, alpha=0.6,
compute_dtype=compute_dtype)
@tf.function
def t_to_repr(self, wave):
"""Convert audio signal to representation for neural model
:param wave: audio signal [batches_n, samples_n, channels_n]
:return audio representation [batches_n, blocks_n+1, freqs_n, channels_n]
with samples_n = blocks_n * freq_n
"""
samples_n = tf.shape(wave)[1]
tf.assert_equal(tf.truncatemod(samples_n, self.freq_n), 0,
f'Number of samples ({samples_n}) needs to be a multiple of {self.freq_n}')
return self.mdctransformer.transform(wave)
@tf.function
def repr_to_t(self, mdct_norm):
"""Convert representation to audio signal
:param mdct_norm: audio representation [batches_n, blocks_n, freqs_n, channels_n]
:return audio signal [batches_n, samples_n, channels_n]
with samples_n = (blocks_n+1) * freq_n
"""
return self.mdctransformer.inverse_transform(mdct_norm)
@tf.function
def tonality(self, mdct_norm):
"""Computes the tonality of the audio signal defined by the representation
:param mdct_norm: audio representation [batches_n, blocks_n, freqs_n, channels_n]
:return: tonality per block [batches_n, blocks_n, 1, channels_n]
"""
return self.psychoacoustic.tonality(mdct_norm)
@tf.function
def psychoacoustic_masking_ampl(self, mdct_norm, drown=0.0):
"""Get hearing threshold for each pixel in the spectrogram
:param mdct_norm: normalized mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n]
:param drown: factor 0..1 to drown out audible sounds (0: no drowning, 1: fully drowned)
:return: masking amplitude (positive) [batches_n, blocks_n, freqs_n, channels_n]
"""
tonality_per_block = self.psychoacoustic.tonality(mdct_norm)
total_threshold = self.psychoacoustic.global_masking_threshold(mdct_norm, tonality_per_block, drown)
return total_threshold
@tf.function
def add_noise(self, mdct_norm, masking_threshold):
"""
Adds inaudible noise to amplitudes, using the masking_threshold.
The noise added is calibrated at a 3-sigma deviation in both directions:
masking_threshold = 6*sigma
As such, there is a 0.2% probability that the noise added is bigger than the masking_threshold
:param mdct_norm: mdct amplitudes (spectrum) for each filter [batches_n, blocks_n, filter_bands_n, channels_n]
must be of compute_dtype
:param masking_threshold: masking threshold in amplitude. Masking threshold is never negative
output dtype is compute_dtype
[batches_n, blocks_n, filter_bands_n, channels_n]
:return: mdct amplitudes with inaudible noise added [batches_n, blocks_n, filter_bands_n, channels_n]
"""
return self.psychoacoustic.add_noise(mdct_norm, masking_threshold)
@tf.function
def psychoacoustic_filter(self, mdct_norm, masking_threshold, max_gradient=10):
"""Apply lRElu filter to tab-representation
:param mdct_norm: normalized mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n=1]
:param masking_threshold: masking threshold in amplitude. Masking threshold is never negative
output dtype is compute_dtype
[batches_n, blocks_n, filter_bands_n, channels_n]
:param drown: factor 0..1 to drown out audible sounds (0: no drowning, 1: fully drowned)
:param max_gradient: maximum gradient filter will introduce
:return: normalized mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n=1]
"""
# ReLU-filter
def f_attentuation(x):
# function with
# f(0) = 0.
# f(1) = 1.
# f'(0) = max_gradient / (2**(max_gradient+1) - 2) >~ 0
# f'(1) = max_gradient / (1. - 1./2**max_gradient) >~ max_gradient
return (1. / (2. - x)**max_gradient - 1./2**max_gradient) / (1. - 1./2**max_gradient)
x_abs = tf.abs(mdct_norm) / masking_threshold
x_abs_safe = tf.where(x_abs < 1., x_abs, 1.)
mdct_norm_filtered = tf.where(x_abs < 1., f_attentuation(x_abs_safe) * mdct_norm, mdct_norm)
return mdct_norm_filtered
def repr_to_spectrogram(self, mdct_norm, intensity=False, channel=0, cmap=None):
"""Make image of normalized mdct amplitudes
:param mdct_norm: mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n]
:param intensity: shows amplitudes if False, intensities if True
:param channel: select (stereo)-channel which needs to be displayed
:param cmap: matplotlib colormap
:return: uint8 image with filter_band_n as height and #blocks as width
shape = [batches_n, blocks_n, freqs_n, color_channels]
where color_channels is 1 if cmap = None, otherwise it is 3 (RGB)
"""
x = tf.cast(mdct_norm[:, :, :, channel:channel+1], tf.float32)
def normalized_dB_scale(ampl, with_sign=True):
normalized_dB = self.psychoacoustic.amplitude_to_dB_norm(ampl)
if with_sign:
# range -1..1
return tf.sign(ampl) * normalized_dB
else:
# range 0..1
return normalized_dB
# convert to 0..1 range
if intensity:
image = normalized_dB_scale(x, with_sign=False)
else:
image = (normalized_dB_scale(x, with_sign=True) + 1.) / 2.
image = tf.map_fn(lambda im: tf.image.rot90(im), image)
# colorize with cmap
if cmap is not None:
# quantize
image = image[:, :, :, 0] # remove the dummy channel direction (will be replace with rgb info from color map)
image_index = tf.cast(tf.round(image * (cmap.N-1)), dtype=tf.int32) # indices in [0, cmap.N-1]
image_index = tf.clip_by_value(image_index, clip_value_min=0, clip_value_max=cmap.N-1)
# gather
color_map = matplotlib.cm.get_cmap(cmap)(np.arange(cmap.N)) # shape=[cmap.N, 3]
colors = tf.constant(color_map, dtype=tf.float32)
image = tf.gather(colors, image_index) # image[b, h, w, c] = color[image_index[b, h, w], c]
return image
def repr_to_audio(self, mdct_norm):
"""Make audio of mdct amplitudes
:param mdct_norm: mdct amplitudes [batches_n, blocks_n, freqs_n, channels_n]
:return: audio signal [batches_n, samples_n, channels_n]
with samples_n = (blocks_n+1) * freq_n
"""
mdct_norm_ft32 = tf.cast(mdct_norm, dtype=tf.float32)
wave = self.repr_to_t(mdct_norm_ft32)
wave = tf.clip_by_value(wave, clip_value_min=-1., clip_value_max=1.)
return wave | 0.90016 | 0.402627 |
import os
import sys
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_path)
import multiprocessing as mp
import time
import argparse
import megengine as mge
import megengine.distributed as dist
from megengine.jit import trace
from megengine.data import RandomSampler, SequentialSampler, DataLoader
from edit.utils import Config, mkdir_or_exist, build_from_cfg, get_root_logger
from edit.models import build_model
from edit.datasets import build_dataset
from edit.core.runner import EpochBasedRunner
from edit.core.hook import HOOKS
from edit.core.hook.evaluation import EvalIterHook
def parse_args():
parser = argparse.ArgumentParser(description='Train and Eval an editor o(* ̄▽ ̄*)ブ')
parser.add_argument('config', help='train config file path')
parser.add_argument("-d", "--dynamic", default=True, action='store_true', help="enable dygraph mode")
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument("--gpuids", type=str, default="-1", help="spcefic gpus, -1 for cpu, >=0 for gpu, e.g.: 2,3")
parser.add_argument('--work_dir', type=str, default=None, help='the dir to save logs and models')
parser.add_argument('--resume_from', type=str, default=None, help='the checkpoint file to resume from')
args = parser.parse_args()
return args
def get_loader(dataset, cfg, mode='train'):
assert mode in ('train', 'eval')
if mode == 'train':
sampler = RandomSampler(dataset, batch_size=cfg.data.samples_per_gpu, drop_last=True, seed=0)
loader = DataLoader(dataset, sampler, num_workers=cfg.data.workers_per_gpu)
else:
samples_per_gpu = cfg.data.get('eval_samples_per_gpu', cfg.data.samples_per_gpu)
workers_per_gpu = cfg.data.get('eval_workers_per_gpu', cfg.data.workers_per_gpu)
if cfg.evaluation.multi_process is True:
sampler = SequentialSampler(dataset, batch_size=samples_per_gpu, drop_last=False)
else:
sampler = SequentialSampler(dataset, batch_size=samples_per_gpu, drop_last=False, world_size=1, rank=0)
loader = DataLoader(dataset, sampler, num_workers=workers_per_gpu)
return loader
def train(model, datasets, cfg, rank):
data_loaders = [ get_loader(ds, cfg, 'train') for ds in datasets]
runner = EpochBasedRunner(model=model, optimizers_cfg=cfg.optimizers, work_dir=cfg.work_dir)
runner.create_gradmanager_and_optimizers()
if cfg.resume_from is not None:
runner.resume(cfg.resume_from, cfg.get('resume_optim', True))
elif cfg.load_from is not None:
runner.load_checkpoint(cfg.load_from, load_optim=False)
else:
pass
runner.sync_model_params()
# register some useful hooks
runner.register_training_hooks(lr_config=cfg.lr_config, checkpoint_config=cfg.checkpoint_config, log_config=cfg.log_config)
# register evaluation hook
if cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.eval)
save_path = os.path.join(cfg.work_dir, 'eval_visuals')
log_path = os.path.join(cfg.work_dir, 'eval.log')
runner.register_hook(EvalIterHook(get_loader(dataset, cfg, 'eval'), save_path=save_path, log_path=log_path, **cfg.evaluation))
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def worker(rank, world_size, cfg, gpu_id="0", port=23333):
if cfg.dynamic:
trace.enabled = False
if world_size > 1:
dist.init_process_group(
master_ip = "localhost",
port = port,
world_size = world_size,
rank = rank,
device = int(gpu_id)%10,
)
log_file = os.path.join(cfg.work_dir, 'rank{}_root.log'.format(rank))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
model = build_model(cfg.model, train_cfg=cfg.train_cfg, eval_cfg=cfg.eval_cfg) # 此时参数已经随机化完成
datasets = [build_dataset(cfg.data.train)]
train(model, datasets, cfg, rank)
def main():
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.dynamic = args.dynamic
if args.work_dir is not None:
cfg.work_dir = args.work_dir
else:
assert cfg.get('work_dir', None) is not None, 'if do not set work_dir in args, please set in config file'
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.work_dir = os.path.join(cfg.work_dir, timestamp)
mkdir_or_exist(os.path.abspath(cfg.work_dir))
log_file = os.path.join(cfg.work_dir, 'root.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
logger.info('Config:\n{}'.format(cfg.text))
gpu_list = [ item.strip() for item in args.gpuids.split(",")]
if gpu_list[0] == "-1":
world_size = 0 # use cpu
logger.info('training use only cpu')
else:
world_size = len(gpu_list)
logger.info('training gpus num: {}'.format(world_size))
if world_size == 0: # use cpu
mge.set_default_device(device='cpux')
elif world_size == 1:
mge.set_default_device(device='gpu' + gpu_list[0])
else:
pass
if world_size > 1:
# scale weight decay in "SUM" mode
port = dist.util.get_free_ports(1)[0]
server = dist.Server(port)
processes = []
for rank in range(world_size):
logger.info("init distributed process group {} / {}".format(rank, world_size))
p = mp.Process(target=worker, args=(rank, world_size, cfg, gpu_list[rank], port))
p.start()
processes.append(p)
for rank in range(world_size):
processes[rank].join()
code = processes[rank].exitcode
assert code == 0, "subprocess {} exit with code {}".format(rank, code)
else:
worker(0, 1, cfg)
if __name__ == "__main__":
main() | tools/train.py | import os
import sys
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_path)
import multiprocessing as mp
import time
import argparse
import megengine as mge
import megengine.distributed as dist
from megengine.jit import trace
from megengine.data import RandomSampler, SequentialSampler, DataLoader
from edit.utils import Config, mkdir_or_exist, build_from_cfg, get_root_logger
from edit.models import build_model
from edit.datasets import build_dataset
from edit.core.runner import EpochBasedRunner
from edit.core.hook import HOOKS
from edit.core.hook.evaluation import EvalIterHook
def parse_args():
parser = argparse.ArgumentParser(description='Train and Eval an editor o(* ̄▽ ̄*)ブ')
parser.add_argument('config', help='train config file path')
parser.add_argument("-d", "--dynamic", default=True, action='store_true', help="enable dygraph mode")
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument("--gpuids", type=str, default="-1", help="spcefic gpus, -1 for cpu, >=0 for gpu, e.g.: 2,3")
parser.add_argument('--work_dir', type=str, default=None, help='the dir to save logs and models')
parser.add_argument('--resume_from', type=str, default=None, help='the checkpoint file to resume from')
args = parser.parse_args()
return args
def get_loader(dataset, cfg, mode='train'):
assert mode in ('train', 'eval')
if mode == 'train':
sampler = RandomSampler(dataset, batch_size=cfg.data.samples_per_gpu, drop_last=True, seed=0)
loader = DataLoader(dataset, sampler, num_workers=cfg.data.workers_per_gpu)
else:
samples_per_gpu = cfg.data.get('eval_samples_per_gpu', cfg.data.samples_per_gpu)
workers_per_gpu = cfg.data.get('eval_workers_per_gpu', cfg.data.workers_per_gpu)
if cfg.evaluation.multi_process is True:
sampler = SequentialSampler(dataset, batch_size=samples_per_gpu, drop_last=False)
else:
sampler = SequentialSampler(dataset, batch_size=samples_per_gpu, drop_last=False, world_size=1, rank=0)
loader = DataLoader(dataset, sampler, num_workers=workers_per_gpu)
return loader
def train(model, datasets, cfg, rank):
data_loaders = [ get_loader(ds, cfg, 'train') for ds in datasets]
runner = EpochBasedRunner(model=model, optimizers_cfg=cfg.optimizers, work_dir=cfg.work_dir)
runner.create_gradmanager_and_optimizers()
if cfg.resume_from is not None:
runner.resume(cfg.resume_from, cfg.get('resume_optim', True))
elif cfg.load_from is not None:
runner.load_checkpoint(cfg.load_from, load_optim=False)
else:
pass
runner.sync_model_params()
# register some useful hooks
runner.register_training_hooks(lr_config=cfg.lr_config, checkpoint_config=cfg.checkpoint_config, log_config=cfg.log_config)
# register evaluation hook
if cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.eval)
save_path = os.path.join(cfg.work_dir, 'eval_visuals')
log_path = os.path.join(cfg.work_dir, 'eval.log')
runner.register_hook(EvalIterHook(get_loader(dataset, cfg, 'eval'), save_path=save_path, log_path=log_path, **cfg.evaluation))
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def worker(rank, world_size, cfg, gpu_id="0", port=23333):
if cfg.dynamic:
trace.enabled = False
if world_size > 1:
dist.init_process_group(
master_ip = "localhost",
port = port,
world_size = world_size,
rank = rank,
device = int(gpu_id)%10,
)
log_file = os.path.join(cfg.work_dir, 'rank{}_root.log'.format(rank))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
model = build_model(cfg.model, train_cfg=cfg.train_cfg, eval_cfg=cfg.eval_cfg) # 此时参数已经随机化完成
datasets = [build_dataset(cfg.data.train)]
train(model, datasets, cfg, rank)
def main():
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.dynamic = args.dynamic
if args.work_dir is not None:
cfg.work_dir = args.work_dir
else:
assert cfg.get('work_dir', None) is not None, 'if do not set work_dir in args, please set in config file'
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.work_dir = os.path.join(cfg.work_dir, timestamp)
mkdir_or_exist(os.path.abspath(cfg.work_dir))
log_file = os.path.join(cfg.work_dir, 'root.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
logger.info('Config:\n{}'.format(cfg.text))
gpu_list = [ item.strip() for item in args.gpuids.split(",")]
if gpu_list[0] == "-1":
world_size = 0 # use cpu
logger.info('training use only cpu')
else:
world_size = len(gpu_list)
logger.info('training gpus num: {}'.format(world_size))
if world_size == 0: # use cpu
mge.set_default_device(device='cpux')
elif world_size == 1:
mge.set_default_device(device='gpu' + gpu_list[0])
else:
pass
if world_size > 1:
# scale weight decay in "SUM" mode
port = dist.util.get_free_ports(1)[0]
server = dist.Server(port)
processes = []
for rank in range(world_size):
logger.info("init distributed process group {} / {}".format(rank, world_size))
p = mp.Process(target=worker, args=(rank, world_size, cfg, gpu_list[rank], port))
p.start()
processes.append(p)
for rank in range(world_size):
processes[rank].join()
code = processes[rank].exitcode
assert code == 0, "subprocess {} exit with code {}".format(rank, code)
else:
worker(0, 1, cfg)
if __name__ == "__main__":
main() | 0.261237 | 0.125012 |
from __future__ import annotations
from typing import Union, Optional
import importlib
import os
import pathlib
from enum import Enum
from route import quick_invalid, write, Cause
from route import error as e
class Method(Enum):
GET = "GET"
HEAD = "HEAD"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
@staticmethod
def values() -> list:
return [ev.value for ev in Method]
def __radd__(self, other):
return str(self) + "|" + str(other)
def __and__(self, other):
return str(self) + "|" + str(other)
def __or__(self, other):
return str(self) + "|" + str(other)
def __str__(self):
return self.value
class Document:
def __init__(self,
title: str = None, # title or summary required
summary: str = None,
# description: str = "Description",
# desc: str = "Description",
types: Union[list, str] = "application/octet-stream",
example: Union[dict, str, int, float, bool, any] = None,
security: Optional[dict] = None,
responses: Optional[list] = None,
tags: Optional[list] = None,
format_type: Optional[str] = None,
**more_args):
if title is None and summary is None:
raise ValueError("Title or Summary must not be None.")
self.title = summary if title is None else title
# self.description = desc if description is None else description
self.types = [types] if isinstance(types, str) else types
self.example = example
self.security = security
self.more = more_args
self.responses = [] if responses is None else responses
self.tags = [] if tags is None else tags
self.format = format_type
def http(method: str,
require_auth: bool = True,
args: Union[tuple, list, Argument] = (),
docs: Optional[Document] = None):
def _context(handler):
path = None
file = handler.__globals__["__file__"]
if "___" in os.path.normpath(file).split(os.path.sep):
raise IsADirectoryError("Path-argument like directory found.")
ep_dir = os.path.dirname(file)
if file.endswith("___.py") and \
len([name for name in os.listdir(ep_dir) if os.path.isfile(ep_dir + "/" + name)]) >= 2:
raise FileExistsError("Endpoint conflict")
for base in loader.known_source:
if os.path.abspath(file).startswith(os.path.abspath(base)):
path = os.path.relpath(file, os.path.relpath(base))
if path is None:
raise FileNotFoundError("Base path not found.")
path = path.replace(os.sep, "/")
pp = 0
if isinstance(args, Argument):
arg3 = (args,)
else:
arg3 = args
for arg in arg3:
if arg.arg_in == "path":
if (path != "___" and path in "___") or "__" not in path:
raise ValueError("Can't routing to this endpoint.")
if arg.arg_in == "path":
pp += 1
if isinstance(method, str):
if method == "*":
fig = method.split("|")
for met in Method.values():
if met in fig:
continue
loader.signals.append({
"method": met,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return
if "|" in method:
for met in method.split("|"):
loader.signals.append({
"method": met,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return
loader.signals.append({
"method": method,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return handler
return _context
class Documented:
def __init__(self, document: Optional[Document] = None):
self.docs = document
class Undefined: pass
class Argument(Documented):
def __init__(self,
name: str,
arg_type: str,
arg_in: str,
required: bool = True,
auto_cast: bool = True,
minimum: int = -1,
maximum: int = -1,
must_be: Union[tuple, list] = (),
doc: Optional[Document] = None,
format_type: Optional[str] = None,
ignore_check_expect100: bool = False,
enum: Union[tuple, list] = (),
default: Optional[any] = Undefined):
super().__init__(doc)
if arg_type not in ["str", "string", "bool", "boolean", "number", "int", "long",
"double", "decimal", "float", "other"]:
raise ValueError("Argument type is must be valid type.")
if arg_in not in ["path", "query", "body"]:
raise ValueError("Argument location is mut be valid type.")
self.name = name
self.type = arg_type
self.arg_in = arg_in
self.required = required
self.auto_cast = auto_cast
self.min = minimum
self.max = maximum
self.must_be = must_be if enum is None else enum
self.document = doc
self.format = format_type
self.ignore_check_expect100 = ignore_check_expect100
self.default = default
def norm_type(self, val: Optional[any] = None) -> Optional[any]:
if "str" in self.type:
return "string" if val is None else str(val)
elif "bool" in self.type:
return "bool" if val is None else bool(val)
elif self.type == "number" or "int" in self.type:
return "integer" if val is None else int(val)
elif self.type == "long":
return "integer" if val is None else int(val)
else:
return "number" if val is None else float(val)
def validate(self, param_dict: dict) -> int:
# NOT_FOUND = -1, OK = 0, NOT_MATCH = 1, TYPE_ERR = 2, MINIMUM_ERR = 3, MAXIMUM_ERR = 4
name = self.name
typ = self.type
cast = self.auto_cast
must_be = self.must_be
min_val = self.min
max_val = self.max
if name not in param_dict:
if self.default != Undefined:
if cast:
param_dict[name] = self.norm_type(self.default)
else:
param_dict[name] = self.default
return 0
if self.required:
if self.ignore_check_expect100:
return 0
return -1
else:
return 0
value = param_dict[name]
if "str" in typ:
if len(must_be) != 0 and value not in must_be:
return 1
if min_val != -1 and len(value) < min_val:
return 3
if max_val != -1 and len(value) > max_val:
return 4
if cast:
param_dict[name] = str(value)
elif "bool" in typ:
if value not in ("true", "false") + self.must_be:
return 1
if cast:
param_dict[name] = bool(value)
elif typ == "other":
return 0
else:
try:
if "int" in self.type or self.type == "number":
val = int(value)
else:
val = float(value)
except ValueError:
return 2
if len(must_be) != 0 and val not in must_be:
return 1
if min_val != -1 and val < min_val:
return 3
if max_val != -1 and val > max_val:
return 4
if cast:
param_dict[name] = val
return 0
class EndPoint(Documented):
def __init__(self,
method: str,
route_path: str,
rel_path: str,
handler,
auth_required: bool = True,
args: Optional[list] = None,
path_arg: bool = False,
doc: Optional[Document] = None):
super().__init__(doc)
self.method = method
self.route_path = route_path
self.rel_path = rel_path
self.handler = handler
self.auth_required = auth_required
self.args = () if args is None else args
self.path_arg = path_arg
def handle(self, handler, params: dict, queries: dict, path_param: dict) -> Union[Response, any]:
if self.auth_required and handler.do_auth():
return
if not self.validate_arg(handler, params, queries, path_param):
return
return self.handler(handler, params)
def validate_arg(self, handler, params: dict, queries: dict, path_param: dict) -> bool:
missing = []
for arg in self.args:
arg: Argument
if arg.arg_in == "query":
code = arg.validate(queries)
if code != -1 and arg.name not in queries:
continue
elif arg.arg_in == "body":
code = arg.validate(params)
if code != -1 and arg.name not in params:
continue
elif arg.arg_in == "path":
code = arg.validate(path_param)
if code != -1 and arg.name not in path_param:
continue
else:
raise ValueError(f"Validate failed: N:{arg.name} - T:{arg.type} - I:{arg.arg_in}")
if code == -1:
missing.append(arg.name)
continue
elif code == 1:
if "bool" in arg.type:
quick_invalid(handler, arg.name, "[" + ", ".join(("true", "false") + arg.must_be) + "]")
return False
else:
quick_invalid(handler, arg.name, "[" + ", ".join(arg.must_be) + "]")
return False
elif code == 2:
quick_invalid(handler, arg.name, arg.norm_type())
return False
elif code == 3:
if "str" in arg.name:
quick_invalid(handler, arg.name, f"at least {arg.min} character")
return False
else:
quick_invalid(handler, arg.name, f"at least {arg.min}")
return False
elif code == 4:
if "str" in arg.name:
quick_invalid(handler, arg.name, f"less than {arg.max} character")
return False
else:
quick_invalid(handler, arg.name, f"less than {arg.max}")
return False
if arg.arg_in == "query":
val = arg.norm_type(queries[arg.name]) if arg.auto_cast else queries[arg.name]
params[arg.name] = val
elif arg.arg_in == "path":
val = arg.norm_type(path_param[arg.name]) if arg.auto_cast else path_param[arg.name]
params[arg.name] = val
if len(missing) != 0:
write(handler, 400, e(Cause.MISSING_FIELD, Cause.MISSING_FIELD[2]
.replace("%0", str(len(missing)))
.replace("%1", ", ".join(missing))))
return False
return True
class Response(Documented):
def __init__(self,
code: int = 0,
body: Optional[any] = None,
raw_body: bool = False,
content_type: Union[str, list] = None,
headers: Optional[dict] = None,
doc: Optional[Document] = None):
super().__init__(doc)
self.code = code
self.docs = doc
self.headers = {} if headers is None else headers
self.body_data = body
self.raw = raw_body
self.cont_type = content_type
def header(self, name: str, value: str) -> Response:
self.headers[name] = value
return self
def body(self, value: any, raw: bool = False) -> Response:
self.body_data = value
self.raw = raw
return self
def content_type(self, value: str) -> Response:
self.cont_type = value
self.header("Content-Type", value)
return self
def get_code(self) -> int:
return self.code
class SuccessResponse(Response):
pass
class ErrorResponse(Response):
def __init__(self,
cause: Optional[Cause] = None,
code: int = 0,
headers: Optional[dict] = None,
body: Optional[any] = None,
content_type: Optional[Union[str, list]] = None,
doc: Optional[Document] = None):
if cause is not None:
super().__init__(cause[0], headers, cause[2], content_type, doc)
else:
super().__init__(code, headers, body, content_type, doc)
self.cause = cause
def success(code) -> SuccessResponse:
return SuccessResponse(code)
def error(cause: Optional[Cause] = None, code: int = 0, message: Optional[str] = None) -> ErrorResponse:
if cause is not None:
return ErrorResponse(cause)
return ErrorResponse(code=code, body=message)
global loader
class EPManager:
def __init__(self):
global loader
self.signals = []
self.index_tree = {}
self.known_source = []
self.count = 0
loader = self
def load(self, root: str) -> None:
if root in self.known_source:
raise ValueError("Endpoint base already loaded.")
else:
self.known_source.append(root)
for file in pathlib.Path(root).glob("**/*.py"):
self.load_single(str(file), False)
self.make_cache()
def load_single(self, path: str, build_cache: bool = True) -> bool:
try:
m = ".".join(pathlib.Path(path).parts)[4:-3]
importlib.import_module(m)
except (ModuleNotFoundError, TypeError):
return False
if build_cache:
root = os.path.dirname(path)
if root not in self.known_source:
self.known_source.append(root)
self.make_cache()
return True
def enumerate(self, dic: Optional[dict] = None) -> list:
result = []
if dic is None:
dic = self.index_tree
for item in dic.items():
i = item[1]
if isinstance(i, dict):
result += self.enumerate(i)
else:
result.append(i)
return result
def make_cache(self) -> None:
for s in self.signals:
method = s["method"]
function = s["func"]
path = s["path"]
auth = s["require_auth"]
args = s["args"]
docs = s["docs"]
rt = path
if rt.endswith(".py"):
rt = rt[:-3]
cursor = self.index_tree
slt = rt.split("/")
qt_paths = 0
for i, part in enumerate(slt, 1):
if part in cursor:
if i == len(slt):
if method in cursor[part]:
raise ValueError("Duplicate endpoint found:" + rt)
else:
cursor[part] = {}
cursor = cursor[part]
if part == "__" or part == "___":
qt_paths += 1
paths = 0
for arg in args:
if arg.arg_in == "path":
paths += 1
elif arg.arg_in == "body" and method.upper() in ["GET", "HEAD", "TRACE", "OPTIONS"]:
raise TypeError("This method does not get a request body.")
if paths != qt_paths:
raise ValueError("Path argument count mismatch.")
cursor[method] = EndPoint(method, rt, path, function, auth, args, bool(paths), docs)
self.count += 1
def get_endpoint(self, method: str, path: str, params: Optional[dict] = None) -> Optional[EndPoint]:
cursor = self.index_tree
if path == "/":
path = "_"
if path.startswith("/"):
path = path[1:]
slt = path.split("/")
args = []
for i, part in enumerate(slt):
if part in cursor:
cursor = cursor[part]
continue
if "_" in cursor and part == "":
break
if "___" in cursor:
args.append("/".join(slt[i:]))
cursor = cursor["___"]
break
if "__" in cursor:
args.append(part)
cursor = cursor["__"]
continue
return None
if "_" in cursor:
cursor = cursor["_"]
if method in cursor:
result = cursor[method]
if result.path_arg:
count = 0
for arg in result.args:
if arg.arg_in == "path":
params[arg.name] = args[count]
count += 1
return result
return None
def reload(self):
self.signals.clear()
self.index_tree.clear()
for source in list(self.known_source):
self.known_source.remove(source)
self.load(source)
__all__ = [
"http", "Argument", "EndPoint", "Document", "Method", "success", "error",
"SuccessResponse", "ErrorResponse", "Response"
] | src/endpoint.py | from __future__ import annotations
from typing import Union, Optional
import importlib
import os
import pathlib
from enum import Enum
from route import quick_invalid, write, Cause
from route import error as e
class Method(Enum):
GET = "GET"
HEAD = "HEAD"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
@staticmethod
def values() -> list:
return [ev.value for ev in Method]
def __radd__(self, other):
return str(self) + "|" + str(other)
def __and__(self, other):
return str(self) + "|" + str(other)
def __or__(self, other):
return str(self) + "|" + str(other)
def __str__(self):
return self.value
class Document:
def __init__(self,
title: str = None, # title or summary required
summary: str = None,
# description: str = "Description",
# desc: str = "Description",
types: Union[list, str] = "application/octet-stream",
example: Union[dict, str, int, float, bool, any] = None,
security: Optional[dict] = None,
responses: Optional[list] = None,
tags: Optional[list] = None,
format_type: Optional[str] = None,
**more_args):
if title is None and summary is None:
raise ValueError("Title or Summary must not be None.")
self.title = summary if title is None else title
# self.description = desc if description is None else description
self.types = [types] if isinstance(types, str) else types
self.example = example
self.security = security
self.more = more_args
self.responses = [] if responses is None else responses
self.tags = [] if tags is None else tags
self.format = format_type
def http(method: str,
require_auth: bool = True,
args: Union[tuple, list, Argument] = (),
docs: Optional[Document] = None):
def _context(handler):
path = None
file = handler.__globals__["__file__"]
if "___" in os.path.normpath(file).split(os.path.sep):
raise IsADirectoryError("Path-argument like directory found.")
ep_dir = os.path.dirname(file)
if file.endswith("___.py") and \
len([name for name in os.listdir(ep_dir) if os.path.isfile(ep_dir + "/" + name)]) >= 2:
raise FileExistsError("Endpoint conflict")
for base in loader.known_source:
if os.path.abspath(file).startswith(os.path.abspath(base)):
path = os.path.relpath(file, os.path.relpath(base))
if path is None:
raise FileNotFoundError("Base path not found.")
path = path.replace(os.sep, "/")
pp = 0
if isinstance(args, Argument):
arg3 = (args,)
else:
arg3 = args
for arg in arg3:
if arg.arg_in == "path":
if (path != "___" and path in "___") or "__" not in path:
raise ValueError("Can't routing to this endpoint.")
if arg.arg_in == "path":
pp += 1
if isinstance(method, str):
if method == "*":
fig = method.split("|")
for met in Method.values():
if met in fig:
continue
loader.signals.append({
"method": met,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return
if "|" in method:
for met in method.split("|"):
loader.signals.append({
"method": met,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return
loader.signals.append({
"method": method,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return handler
return _context
class Documented:
def __init__(self, document: Optional[Document] = None):
self.docs = document
class Undefined: pass
class Argument(Documented):
def __init__(self,
name: str,
arg_type: str,
arg_in: str,
required: bool = True,
auto_cast: bool = True,
minimum: int = -1,
maximum: int = -1,
must_be: Union[tuple, list] = (),
doc: Optional[Document] = None,
format_type: Optional[str] = None,
ignore_check_expect100: bool = False,
enum: Union[tuple, list] = (),
default: Optional[any] = Undefined):
super().__init__(doc)
if arg_type not in ["str", "string", "bool", "boolean", "number", "int", "long",
"double", "decimal", "float", "other"]:
raise ValueError("Argument type is must be valid type.")
if arg_in not in ["path", "query", "body"]:
raise ValueError("Argument location is mut be valid type.")
self.name = name
self.type = arg_type
self.arg_in = arg_in
self.required = required
self.auto_cast = auto_cast
self.min = minimum
self.max = maximum
self.must_be = must_be if enum is None else enum
self.document = doc
self.format = format_type
self.ignore_check_expect100 = ignore_check_expect100
self.default = default
def norm_type(self, val: Optional[any] = None) -> Optional[any]:
if "str" in self.type:
return "string" if val is None else str(val)
elif "bool" in self.type:
return "bool" if val is None else bool(val)
elif self.type == "number" or "int" in self.type:
return "integer" if val is None else int(val)
elif self.type == "long":
return "integer" if val is None else int(val)
else:
return "number" if val is None else float(val)
def validate(self, param_dict: dict) -> int:
# NOT_FOUND = -1, OK = 0, NOT_MATCH = 1, TYPE_ERR = 2, MINIMUM_ERR = 3, MAXIMUM_ERR = 4
name = self.name
typ = self.type
cast = self.auto_cast
must_be = self.must_be
min_val = self.min
max_val = self.max
if name not in param_dict:
if self.default != Undefined:
if cast:
param_dict[name] = self.norm_type(self.default)
else:
param_dict[name] = self.default
return 0
if self.required:
if self.ignore_check_expect100:
return 0
return -1
else:
return 0
value = param_dict[name]
if "str" in typ:
if len(must_be) != 0 and value not in must_be:
return 1
if min_val != -1 and len(value) < min_val:
return 3
if max_val != -1 and len(value) > max_val:
return 4
if cast:
param_dict[name] = str(value)
elif "bool" in typ:
if value not in ("true", "false") + self.must_be:
return 1
if cast:
param_dict[name] = bool(value)
elif typ == "other":
return 0
else:
try:
if "int" in self.type or self.type == "number":
val = int(value)
else:
val = float(value)
except ValueError:
return 2
if len(must_be) != 0 and val not in must_be:
return 1
if min_val != -1 and val < min_val:
return 3
if max_val != -1 and val > max_val:
return 4
if cast:
param_dict[name] = val
return 0
class EndPoint(Documented):
def __init__(self,
method: str,
route_path: str,
rel_path: str,
handler,
auth_required: bool = True,
args: Optional[list] = None,
path_arg: bool = False,
doc: Optional[Document] = None):
super().__init__(doc)
self.method = method
self.route_path = route_path
self.rel_path = rel_path
self.handler = handler
self.auth_required = auth_required
self.args = () if args is None else args
self.path_arg = path_arg
def handle(self, handler, params: dict, queries: dict, path_param: dict) -> Union[Response, any]:
if self.auth_required and handler.do_auth():
return
if not self.validate_arg(handler, params, queries, path_param):
return
return self.handler(handler, params)
def validate_arg(self, handler, params: dict, queries: dict, path_param: dict) -> bool:
missing = []
for arg in self.args:
arg: Argument
if arg.arg_in == "query":
code = arg.validate(queries)
if code != -1 and arg.name not in queries:
continue
elif arg.arg_in == "body":
code = arg.validate(params)
if code != -1 and arg.name not in params:
continue
elif arg.arg_in == "path":
code = arg.validate(path_param)
if code != -1 and arg.name not in path_param:
continue
else:
raise ValueError(f"Validate failed: N:{arg.name} - T:{arg.type} - I:{arg.arg_in}")
if code == -1:
missing.append(arg.name)
continue
elif code == 1:
if "bool" in arg.type:
quick_invalid(handler, arg.name, "[" + ", ".join(("true", "false") + arg.must_be) + "]")
return False
else:
quick_invalid(handler, arg.name, "[" + ", ".join(arg.must_be) + "]")
return False
elif code == 2:
quick_invalid(handler, arg.name, arg.norm_type())
return False
elif code == 3:
if "str" in arg.name:
quick_invalid(handler, arg.name, f"at least {arg.min} character")
return False
else:
quick_invalid(handler, arg.name, f"at least {arg.min}")
return False
elif code == 4:
if "str" in arg.name:
quick_invalid(handler, arg.name, f"less than {arg.max} character")
return False
else:
quick_invalid(handler, arg.name, f"less than {arg.max}")
return False
if arg.arg_in == "query":
val = arg.norm_type(queries[arg.name]) if arg.auto_cast else queries[arg.name]
params[arg.name] = val
elif arg.arg_in == "path":
val = arg.norm_type(path_param[arg.name]) if arg.auto_cast else path_param[arg.name]
params[arg.name] = val
if len(missing) != 0:
write(handler, 400, e(Cause.MISSING_FIELD, Cause.MISSING_FIELD[2]
.replace("%0", str(len(missing)))
.replace("%1", ", ".join(missing))))
return False
return True
class Response(Documented):
def __init__(self,
code: int = 0,
body: Optional[any] = None,
raw_body: bool = False,
content_type: Union[str, list] = None,
headers: Optional[dict] = None,
doc: Optional[Document] = None):
super().__init__(doc)
self.code = code
self.docs = doc
self.headers = {} if headers is None else headers
self.body_data = body
self.raw = raw_body
self.cont_type = content_type
def header(self, name: str, value: str) -> Response:
self.headers[name] = value
return self
def body(self, value: any, raw: bool = False) -> Response:
self.body_data = value
self.raw = raw
return self
def content_type(self, value: str) -> Response:
self.cont_type = value
self.header("Content-Type", value)
return self
def get_code(self) -> int:
return self.code
class SuccessResponse(Response):
pass
class ErrorResponse(Response):
def __init__(self,
cause: Optional[Cause] = None,
code: int = 0,
headers: Optional[dict] = None,
body: Optional[any] = None,
content_type: Optional[Union[str, list]] = None,
doc: Optional[Document] = None):
if cause is not None:
super().__init__(cause[0], headers, cause[2], content_type, doc)
else:
super().__init__(code, headers, body, content_type, doc)
self.cause = cause
def success(code) -> SuccessResponse:
return SuccessResponse(code)
def error(cause: Optional[Cause] = None, code: int = 0, message: Optional[str] = None) -> ErrorResponse:
if cause is not None:
return ErrorResponse(cause)
return ErrorResponse(code=code, body=message)
global loader
class EPManager:
def __init__(self):
global loader
self.signals = []
self.index_tree = {}
self.known_source = []
self.count = 0
loader = self
def load(self, root: str) -> None:
if root in self.known_source:
raise ValueError("Endpoint base already loaded.")
else:
self.known_source.append(root)
for file in pathlib.Path(root).glob("**/*.py"):
self.load_single(str(file), False)
self.make_cache()
def load_single(self, path: str, build_cache: bool = True) -> bool:
try:
m = ".".join(pathlib.Path(path).parts)[4:-3]
importlib.import_module(m)
except (ModuleNotFoundError, TypeError):
return False
if build_cache:
root = os.path.dirname(path)
if root not in self.known_source:
self.known_source.append(root)
self.make_cache()
return True
def enumerate(self, dic: Optional[dict] = None) -> list:
result = []
if dic is None:
dic = self.index_tree
for item in dic.items():
i = item[1]
if isinstance(i, dict):
result += self.enumerate(i)
else:
result.append(i)
return result
def make_cache(self) -> None:
for s in self.signals:
method = s["method"]
function = s["func"]
path = s["path"]
auth = s["require_auth"]
args = s["args"]
docs = s["docs"]
rt = path
if rt.endswith(".py"):
rt = rt[:-3]
cursor = self.index_tree
slt = rt.split("/")
qt_paths = 0
for i, part in enumerate(slt, 1):
if part in cursor:
if i == len(slt):
if method in cursor[part]:
raise ValueError("Duplicate endpoint found:" + rt)
else:
cursor[part] = {}
cursor = cursor[part]
if part == "__" or part == "___":
qt_paths += 1
paths = 0
for arg in args:
if arg.arg_in == "path":
paths += 1
elif arg.arg_in == "body" and method.upper() in ["GET", "HEAD", "TRACE", "OPTIONS"]:
raise TypeError("This method does not get a request body.")
if paths != qt_paths:
raise ValueError("Path argument count mismatch.")
cursor[method] = EndPoint(method, rt, path, function, auth, args, bool(paths), docs)
self.count += 1
def get_endpoint(self, method: str, path: str, params: Optional[dict] = None) -> Optional[EndPoint]:
cursor = self.index_tree
if path == "/":
path = "_"
if path.startswith("/"):
path = path[1:]
slt = path.split("/")
args = []
for i, part in enumerate(slt):
if part in cursor:
cursor = cursor[part]
continue
if "_" in cursor and part == "":
break
if "___" in cursor:
args.append("/".join(slt[i:]))
cursor = cursor["___"]
break
if "__" in cursor:
args.append(part)
cursor = cursor["__"]
continue
return None
if "_" in cursor:
cursor = cursor["_"]
if method in cursor:
result = cursor[method]
if result.path_arg:
count = 0
for arg in result.args:
if arg.arg_in == "path":
params[arg.name] = args[count]
count += 1
return result
return None
def reload(self):
self.signals.clear()
self.index_tree.clear()
for source in list(self.known_source):
self.known_source.remove(source)
self.load(source)
__all__ = [
"http", "Argument", "EndPoint", "Document", "Method", "success", "error",
"SuccessResponse", "ErrorResponse", "Response"
] | 0.834744 | 0.149159 |
import pprint
import re # noqa: F401
import six
from ncloud_server.model.common_code import CommonCode # noqa: F401,E501
from ncloud_server.model.port_forwarding_rule import PortForwardingRule # noqa: F401,E501
from ncloud_server.model.zone import Zone # noqa: F401,E501
class DeletePortForwardingRulesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'port_forwarding_configuration_no': 'str',
'port_forwarding_public_ip': 'str',
'zone': 'Zone',
'internet_line_type': 'CommonCode',
'total_rows': 'int',
'port_forwarding_rule_list': 'list[PortForwardingRule]'
}
attribute_map = {
'port_forwarding_configuration_no': 'portForwardingConfigurationNo',
'port_forwarding_public_ip': 'portForwardingPublicIp',
'zone': 'zone',
'internet_line_type': 'internetLineType',
'total_rows': 'totalRows',
'port_forwarding_rule_list': 'portForwardingRuleList'
}
def __init__(self, port_forwarding_configuration_no=None, port_forwarding_public_ip=None, zone=None, internet_line_type=None, total_rows=None, port_forwarding_rule_list=None): # noqa: E501
"""DeletePortForwardingRulesResponse - a model defined in Swagger""" # noqa: E501
self._port_forwarding_configuration_no = None
self._port_forwarding_public_ip = None
self._zone = None
self._internet_line_type = None
self._total_rows = None
self._port_forwarding_rule_list = None
self.discriminator = None
if port_forwarding_configuration_no is not None:
self.port_forwarding_configuration_no = port_forwarding_configuration_no
if port_forwarding_public_ip is not None:
self.port_forwarding_public_ip = port_forwarding_public_ip
if zone is not None:
self.zone = zone
if internet_line_type is not None:
self.internet_line_type = internet_line_type
if total_rows is not None:
self.total_rows = total_rows
if port_forwarding_rule_list is not None:
self.port_forwarding_rule_list = port_forwarding_rule_list
@property
def port_forwarding_configuration_no(self):
"""Gets the port_forwarding_configuration_no of this DeletePortForwardingRulesResponse. # noqa: E501
포트포워딩설정번호 # noqa: E501
:return: The port_forwarding_configuration_no of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: str
"""
return self._port_forwarding_configuration_no
@port_forwarding_configuration_no.setter
def port_forwarding_configuration_no(self, port_forwarding_configuration_no):
"""Sets the port_forwarding_configuration_no of this DeletePortForwardingRulesResponse.
포트포워딩설정번호 # noqa: E501
:param port_forwarding_configuration_no: The port_forwarding_configuration_no of this DeletePortForwardingRulesResponse. # noqa: E501
:type: str
"""
self._port_forwarding_configuration_no = port_forwarding_configuration_no
@property
def port_forwarding_public_ip(self):
"""Gets the port_forwarding_public_ip of this DeletePortForwardingRulesResponse. # noqa: E501
포트포워딩공인IP # noqa: E501
:return: The port_forwarding_public_ip of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: str
"""
return self._port_forwarding_public_ip
@port_forwarding_public_ip.setter
def port_forwarding_public_ip(self, port_forwarding_public_ip):
"""Sets the port_forwarding_public_ip of this DeletePortForwardingRulesResponse.
포트포워딩공인IP # noqa: E501
:param port_forwarding_public_ip: The port_forwarding_public_ip of this DeletePortForwardingRulesResponse. # noqa: E501
:type: str
"""
self._port_forwarding_public_ip = port_forwarding_public_ip
@property
def zone(self):
"""Gets the zone of this DeletePortForwardingRulesResponse. # noqa: E501
ZONE # noqa: E501
:return: The zone of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: Zone
"""
return self._zone
@zone.setter
def zone(self, zone):
"""Sets the zone of this DeletePortForwardingRulesResponse.
ZONE # noqa: E501
:param zone: The zone of this DeletePortForwardingRulesResponse. # noqa: E501
:type: Zone
"""
self._zone = zone
@property
def internet_line_type(self):
"""Gets the internet_line_type of this DeletePortForwardingRulesResponse. # noqa: E501
인터넷회선구분 # noqa: E501
:return: The internet_line_type of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: CommonCode
"""
return self._internet_line_type
@internet_line_type.setter
def internet_line_type(self, internet_line_type):
"""Sets the internet_line_type of this DeletePortForwardingRulesResponse.
인터넷회선구분 # noqa: E501
:param internet_line_type: The internet_line_type of this DeletePortForwardingRulesResponse. # noqa: E501
:type: CommonCode
"""
self._internet_line_type = internet_line_type
@property
def total_rows(self):
"""Gets the total_rows of this DeletePortForwardingRulesResponse. # noqa: E501
:return: The total_rows of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: int
"""
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
"""Sets the total_rows of this DeletePortForwardingRulesResponse.
:param total_rows: The total_rows of this DeletePortForwardingRulesResponse. # noqa: E501
:type: int
"""
self._total_rows = total_rows
@property
def port_forwarding_rule_list(self):
"""Gets the port_forwarding_rule_list of this DeletePortForwardingRulesResponse. # noqa: E501
:return: The port_forwarding_rule_list of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: list[PortForwardingRule]
"""
return self._port_forwarding_rule_list
@port_forwarding_rule_list.setter
def port_forwarding_rule_list(self, port_forwarding_rule_list):
"""Sets the port_forwarding_rule_list of this DeletePortForwardingRulesResponse.
:param port_forwarding_rule_list: The port_forwarding_rule_list of this DeletePortForwardingRulesResponse. # noqa: E501
:type: list[PortForwardingRule]
"""
self._port_forwarding_rule_list = port_forwarding_rule_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeletePortForwardingRulesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | lib/services/server/ncloud_server/model/delete_port_forwarding_rules_response.py | import pprint
import re # noqa: F401
import six
from ncloud_server.model.common_code import CommonCode # noqa: F401,E501
from ncloud_server.model.port_forwarding_rule import PortForwardingRule # noqa: F401,E501
from ncloud_server.model.zone import Zone # noqa: F401,E501
class DeletePortForwardingRulesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'port_forwarding_configuration_no': 'str',
'port_forwarding_public_ip': 'str',
'zone': 'Zone',
'internet_line_type': 'CommonCode',
'total_rows': 'int',
'port_forwarding_rule_list': 'list[PortForwardingRule]'
}
attribute_map = {
'port_forwarding_configuration_no': 'portForwardingConfigurationNo',
'port_forwarding_public_ip': 'portForwardingPublicIp',
'zone': 'zone',
'internet_line_type': 'internetLineType',
'total_rows': 'totalRows',
'port_forwarding_rule_list': 'portForwardingRuleList'
}
def __init__(self, port_forwarding_configuration_no=None, port_forwarding_public_ip=None, zone=None, internet_line_type=None, total_rows=None, port_forwarding_rule_list=None): # noqa: E501
"""DeletePortForwardingRulesResponse - a model defined in Swagger""" # noqa: E501
self._port_forwarding_configuration_no = None
self._port_forwarding_public_ip = None
self._zone = None
self._internet_line_type = None
self._total_rows = None
self._port_forwarding_rule_list = None
self.discriminator = None
if port_forwarding_configuration_no is not None:
self.port_forwarding_configuration_no = port_forwarding_configuration_no
if port_forwarding_public_ip is not None:
self.port_forwarding_public_ip = port_forwarding_public_ip
if zone is not None:
self.zone = zone
if internet_line_type is not None:
self.internet_line_type = internet_line_type
if total_rows is not None:
self.total_rows = total_rows
if port_forwarding_rule_list is not None:
self.port_forwarding_rule_list = port_forwarding_rule_list
@property
def port_forwarding_configuration_no(self):
"""Gets the port_forwarding_configuration_no of this DeletePortForwardingRulesResponse. # noqa: E501
포트포워딩설정번호 # noqa: E501
:return: The port_forwarding_configuration_no of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: str
"""
return self._port_forwarding_configuration_no
@port_forwarding_configuration_no.setter
def port_forwarding_configuration_no(self, port_forwarding_configuration_no):
"""Sets the port_forwarding_configuration_no of this DeletePortForwardingRulesResponse.
포트포워딩설정번호 # noqa: E501
:param port_forwarding_configuration_no: The port_forwarding_configuration_no of this DeletePortForwardingRulesResponse. # noqa: E501
:type: str
"""
self._port_forwarding_configuration_no = port_forwarding_configuration_no
@property
def port_forwarding_public_ip(self):
"""Gets the port_forwarding_public_ip of this DeletePortForwardingRulesResponse. # noqa: E501
포트포워딩공인IP # noqa: E501
:return: The port_forwarding_public_ip of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: str
"""
return self._port_forwarding_public_ip
@port_forwarding_public_ip.setter
def port_forwarding_public_ip(self, port_forwarding_public_ip):
"""Sets the port_forwarding_public_ip of this DeletePortForwardingRulesResponse.
포트포워딩공인IP # noqa: E501
:param port_forwarding_public_ip: The port_forwarding_public_ip of this DeletePortForwardingRulesResponse. # noqa: E501
:type: str
"""
self._port_forwarding_public_ip = port_forwarding_public_ip
@property
def zone(self):
"""Gets the zone of this DeletePortForwardingRulesResponse. # noqa: E501
ZONE # noqa: E501
:return: The zone of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: Zone
"""
return self._zone
@zone.setter
def zone(self, zone):
"""Sets the zone of this DeletePortForwardingRulesResponse.
ZONE # noqa: E501
:param zone: The zone of this DeletePortForwardingRulesResponse. # noqa: E501
:type: Zone
"""
self._zone = zone
@property
def internet_line_type(self):
"""Gets the internet_line_type of this DeletePortForwardingRulesResponse. # noqa: E501
인터넷회선구분 # noqa: E501
:return: The internet_line_type of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: CommonCode
"""
return self._internet_line_type
@internet_line_type.setter
def internet_line_type(self, internet_line_type):
"""Sets the internet_line_type of this DeletePortForwardingRulesResponse.
인터넷회선구분 # noqa: E501
:param internet_line_type: The internet_line_type of this DeletePortForwardingRulesResponse. # noqa: E501
:type: CommonCode
"""
self._internet_line_type = internet_line_type
@property
def total_rows(self):
"""Gets the total_rows of this DeletePortForwardingRulesResponse. # noqa: E501
:return: The total_rows of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: int
"""
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
"""Sets the total_rows of this DeletePortForwardingRulesResponse.
:param total_rows: The total_rows of this DeletePortForwardingRulesResponse. # noqa: E501
:type: int
"""
self._total_rows = total_rows
@property
def port_forwarding_rule_list(self):
"""Gets the port_forwarding_rule_list of this DeletePortForwardingRulesResponse. # noqa: E501
:return: The port_forwarding_rule_list of this DeletePortForwardingRulesResponse. # noqa: E501
:rtype: list[PortForwardingRule]
"""
return self._port_forwarding_rule_list
@port_forwarding_rule_list.setter
def port_forwarding_rule_list(self, port_forwarding_rule_list):
"""Sets the port_forwarding_rule_list of this DeletePortForwardingRulesResponse.
:param port_forwarding_rule_list: The port_forwarding_rule_list of this DeletePortForwardingRulesResponse. # noqa: E501
:type: list[PortForwardingRule]
"""
self._port_forwarding_rule_list = port_forwarding_rule_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeletePortForwardingRulesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.673299 | 0.061255 |
__author__ = 'healplease'
import os
import json
import time
import requests
FT_APP_ICON = 0
FT_APP_VIDEO_AND_POSTER = 1
FT_APP_SCREENSHOT = 2
FT_RECOMMENDATION_VIDEO = 3
FT_RECOMMENDATION_IMAGE = 4
FT_APK_OR_RPK = 5
FT_PROXY_CERTIFICATE_OR_COPYRIGHT_IMAGE = 6
FT_CERTIFICATE_PDF = 7
FT_CULTURE_OPERATION_SCREENSHOT = 8
FT_CULTURE_OPERATION_IMAGE_OR_PDF = 9
FT_VR_COVER_IMAGE = 10
FT_VR_APP_SCREENSHOT = 11
FT_VR_APP_RECOMMENDATION_IMAGE = 12
FT_VR_COVER_LAYERING_IMAGE = 13
FT_VR_IMAGE_4_TO_3_RATIO = 14
FT_VR_IMAGE_1_TO_1_RATIO = 15
FT_VR_IMAGE_PANORAMA = 16
class Credentials():
'''Initialize the Credentials object.
Keyword parameters have higher priority then environ JSON file.'''
def __init__(self, client_id: str=None, client_secret: str=None, grant_type: str=None):
if os.environ.get('HUAWEI_CREDENTIALS_PATH'):
with open(os.environ['HUAWEI_CREDENTIALS_PATH'], 'r', encoding='utf-8') as credentials_json:
credentials_parsed = json.load(credentials_json)
self.client_id = credentials_parsed.get('client_id')
self.client_secret = credentials_parsed.get('client_secret')
self.grant_type = credentials_parsed.get('grant_type')
self.client_id = client_id if client_id else self.client_id
self.client_secret = client_secret if client_secret else self.client_secret
self.grant_type = grant_type if grant_type else self.grant_type
class AccessToken():
def __init__(self, parsed: dict):
self.token = parsed.get('access_token')
self.expires_in = int(parsed.get('expires_in'))
def __repr__(self):
return self.token
def auth(self):
return 'Bearer {}'.format(self.token)
def is_expired(self):
return time.time() > self.expires_in
class Upload():
def __init__(self, parsed: dict):
self.URL = parsed.get('uploadUrl')
self.chunk_URL = parsed.get('chunkUploadUrl')
self.verification_code = parsed.get('authCode')
def upload_file(self, filepath: str, count: int=1, parse_type: int=0, name: str=None):
data = {
'authCode': self.verification_code,
'fileCount': count
}
if name:
data.update({ 'name': name })
if parse_type:
data.update({ 'parseType': parse_type })
files = {
'file': open(filepath, 'rb')
}
self.last_response = requests.post(self.URL, data, files=files)
response = json.loads(self.last_response.text, encoding='utf-8')
if self.last_response.status_code == 200:
info = response.get('result').get('UploadFileRsp').get('fileInfoList')
return FileInfo(info[0])
else:
raise requests.RequestException(f'Unsuccessful request. Error code: {self.last_response.status_code}')
class Message():
def __init__(self, parsed: dict):
self.code = parsed.get('code', 0)
self.description = parsed.get('msg', '')
class HuaweiException(BaseException):
def __init__(self, *args, **kwargs):
ret = kwargs.get('response', None)
if ret:
self.code = ret['code']
self.msg = ret['msg']
super(HuaweiException, self).__init__(f'\nError {self.code}: {self.msg}')
class AppInfo():
def __init__(self, parsed: dict):
self.releaseState = parsed.get('releaseState')
self.defaultLang = parsed.get('defaultLang')
self.parentType = parsed.get('parentType')
self.childType = parsed.get('childType')
self.grandChildType = parsed.get('grandChildType')
self.privacyPolicy = parsed.get('privacyPolicy')
self.appNetType = parsed.get('appNetType')
self.isFree = parsed.get('isFree')
self.price = parsed.get('price')
self.publishCountry = parsed.get('publishCountry')
self.contentRate = parsed.get('contentRate')
self.isAppForcedUpdate = parsed.get('isAppForcedUpdate')
self.sensitivePermissionDesc = parsed.get('sensitivePermissionDesc')
self.hispaceAutoDown = parsed.get('hispaceAutoDown')
self.appTariffType = parsed.get('appTariffType')
self.publicationNumber = parsed.get('publicationNumber')
self.cultureRecordNumber = parsed.get('cultureRecordNumber')
self.developerAddr = parsed.get('developerAddr')
self.developerEmail = parsed.get('developerEmail')
self.developerPhone = parsed.get('developerPhone')
self.developerWebsite = parsed.get('developerWebsite')
self.developerNameCn = parsed.get('developerNameCn')
self.developerNameEn = parsed.get('developerNameEn')
self.elecCertificateUrl = parsed.get('elecCertificateUrl')
self.certificateURLs = parsed.get('certificateURLs')
self.publicationURLs = parsed.get('publicationURLs')
self.cultureRecordURLs = parsed.get('cultureRecordURLs')
self.updateTime = parsed.get('updateTime')
self.versionNumber = parsed.get('versionNumber')
self.familyShareTag = parsed.get('familyShareTag')
def JSON(self):
return json.dumps(self, default=lambda x: x.__dict__)
class LangInfo():
def __init__(self, parsed: dict):
self.lang = parsed.get('lang')
self.appName = parsed.get('appName')
self.appDesc = parsed.get('appDesc')
self.briefInfo = parsed.get('briefInfo')
self.newFeatures = parsed.get('newFeatures')
self.icon = parsed.get('icon')
self.showType = parsed.get('showType')
self.videoShowType = parsed.get('videoShowType')
self.introPic = parsed.get('introPic')
self.introVideo = parsed.get('introVideo')
self.rcmdPic = parsed.get('rcmdPic')
self.rcmdVideo = parsed.get('rcmdVideo')
def JSON(self):
return json.dumps(self, default=lambda x: x.__dict__)
class AuditInfo():
def __init__(self, parsed: dict):
self.auditOpinion = parsed.get('auditOpinion')
self.copyRightAuditResult = parsed.get('copyRightAuditResult')
self.copyRightAuditOpinion = parsed.get('copyRightAuditOpinion')
self.copyRightCodeAuditResult = parsed.get('copyRightCodeAuditResult')
self.copyRightCodeAuditOpinion = parsed.get('copyRightCodeAuditOpinion')
self.recordAuditResult = parsed.get('recordAuditResult')
self.recordAuditOpinion = parsed.get('recordAuditOpinion')
def JSON(self):
return json.dumps(self, default=lambda x: x.__dict__)
class FileInfo():
def __init__(self, parsed: dict):
self.destination_URL = parsed.get('fileDestUlr')
self.name = self.destination_URL.split('/')[-1]
self.size = parsed.get('size')
self.image_resolution = parsed.get('imageResolution')
self.image_resolution_signature = parsed.get('imageResolutionSingature')
def JSON(self):
return json.dumps({
'fileDestUlr': self.destination_URL,
'name': self.name,
'size': self.size,
'imageResolution': self.image_resolution,
'imageResolutionSingature': self.image_resolution_signature
}) | appgallery/utils.py |
__author__ = 'healplease'
import os
import json
import time
import requests
FT_APP_ICON = 0
FT_APP_VIDEO_AND_POSTER = 1
FT_APP_SCREENSHOT = 2
FT_RECOMMENDATION_VIDEO = 3
FT_RECOMMENDATION_IMAGE = 4
FT_APK_OR_RPK = 5
FT_PROXY_CERTIFICATE_OR_COPYRIGHT_IMAGE = 6
FT_CERTIFICATE_PDF = 7
FT_CULTURE_OPERATION_SCREENSHOT = 8
FT_CULTURE_OPERATION_IMAGE_OR_PDF = 9
FT_VR_COVER_IMAGE = 10
FT_VR_APP_SCREENSHOT = 11
FT_VR_APP_RECOMMENDATION_IMAGE = 12
FT_VR_COVER_LAYERING_IMAGE = 13
FT_VR_IMAGE_4_TO_3_RATIO = 14
FT_VR_IMAGE_1_TO_1_RATIO = 15
FT_VR_IMAGE_PANORAMA = 16
class Credentials():
'''Initialize the Credentials object.
Keyword parameters have higher priority then environ JSON file.'''
def __init__(self, client_id: str=None, client_secret: str=None, grant_type: str=None):
if os.environ.get('HUAWEI_CREDENTIALS_PATH'):
with open(os.environ['HUAWEI_CREDENTIALS_PATH'], 'r', encoding='utf-8') as credentials_json:
credentials_parsed = json.load(credentials_json)
self.client_id = credentials_parsed.get('client_id')
self.client_secret = credentials_parsed.get('client_secret')
self.grant_type = credentials_parsed.get('grant_type')
self.client_id = client_id if client_id else self.client_id
self.client_secret = client_secret if client_secret else self.client_secret
self.grant_type = grant_type if grant_type else self.grant_type
class AccessToken():
def __init__(self, parsed: dict):
self.token = parsed.get('access_token')
self.expires_in = int(parsed.get('expires_in'))
def __repr__(self):
return self.token
def auth(self):
return 'Bearer {}'.format(self.token)
def is_expired(self):
return time.time() > self.expires_in
class Upload():
def __init__(self, parsed: dict):
self.URL = parsed.get('uploadUrl')
self.chunk_URL = parsed.get('chunkUploadUrl')
self.verification_code = parsed.get('authCode')
def upload_file(self, filepath: str, count: int=1, parse_type: int=0, name: str=None):
data = {
'authCode': self.verification_code,
'fileCount': count
}
if name:
data.update({ 'name': name })
if parse_type:
data.update({ 'parseType': parse_type })
files = {
'file': open(filepath, 'rb')
}
self.last_response = requests.post(self.URL, data, files=files)
response = json.loads(self.last_response.text, encoding='utf-8')
if self.last_response.status_code == 200:
info = response.get('result').get('UploadFileRsp').get('fileInfoList')
return FileInfo(info[0])
else:
raise requests.RequestException(f'Unsuccessful request. Error code: {self.last_response.status_code}')
class Message():
def __init__(self, parsed: dict):
self.code = parsed.get('code', 0)
self.description = parsed.get('msg', '')
class HuaweiException(BaseException):
def __init__(self, *args, **kwargs):
ret = kwargs.get('response', None)
if ret:
self.code = ret['code']
self.msg = ret['msg']
super(HuaweiException, self).__init__(f'\nError {self.code}: {self.msg}')
class AppInfo():
def __init__(self, parsed: dict):
self.releaseState = parsed.get('releaseState')
self.defaultLang = parsed.get('defaultLang')
self.parentType = parsed.get('parentType')
self.childType = parsed.get('childType')
self.grandChildType = parsed.get('grandChildType')
self.privacyPolicy = parsed.get('privacyPolicy')
self.appNetType = parsed.get('appNetType')
self.isFree = parsed.get('isFree')
self.price = parsed.get('price')
self.publishCountry = parsed.get('publishCountry')
self.contentRate = parsed.get('contentRate')
self.isAppForcedUpdate = parsed.get('isAppForcedUpdate')
self.sensitivePermissionDesc = parsed.get('sensitivePermissionDesc')
self.hispaceAutoDown = parsed.get('hispaceAutoDown')
self.appTariffType = parsed.get('appTariffType')
self.publicationNumber = parsed.get('publicationNumber')
self.cultureRecordNumber = parsed.get('cultureRecordNumber')
self.developerAddr = parsed.get('developerAddr')
self.developerEmail = parsed.get('developerEmail')
self.developerPhone = parsed.get('developerPhone')
self.developerWebsite = parsed.get('developerWebsite')
self.developerNameCn = parsed.get('developerNameCn')
self.developerNameEn = parsed.get('developerNameEn')
self.elecCertificateUrl = parsed.get('elecCertificateUrl')
self.certificateURLs = parsed.get('certificateURLs')
self.publicationURLs = parsed.get('publicationURLs')
self.cultureRecordURLs = parsed.get('cultureRecordURLs')
self.updateTime = parsed.get('updateTime')
self.versionNumber = parsed.get('versionNumber')
self.familyShareTag = parsed.get('familyShareTag')
def JSON(self):
return json.dumps(self, default=lambda x: x.__dict__)
class LangInfo():
def __init__(self, parsed: dict):
self.lang = parsed.get('lang')
self.appName = parsed.get('appName')
self.appDesc = parsed.get('appDesc')
self.briefInfo = parsed.get('briefInfo')
self.newFeatures = parsed.get('newFeatures')
self.icon = parsed.get('icon')
self.showType = parsed.get('showType')
self.videoShowType = parsed.get('videoShowType')
self.introPic = parsed.get('introPic')
self.introVideo = parsed.get('introVideo')
self.rcmdPic = parsed.get('rcmdPic')
self.rcmdVideo = parsed.get('rcmdVideo')
def JSON(self):
return json.dumps(self, default=lambda x: x.__dict__)
class AuditInfo():
def __init__(self, parsed: dict):
self.auditOpinion = parsed.get('auditOpinion')
self.copyRightAuditResult = parsed.get('copyRightAuditResult')
self.copyRightAuditOpinion = parsed.get('copyRightAuditOpinion')
self.copyRightCodeAuditResult = parsed.get('copyRightCodeAuditResult')
self.copyRightCodeAuditOpinion = parsed.get('copyRightCodeAuditOpinion')
self.recordAuditResult = parsed.get('recordAuditResult')
self.recordAuditOpinion = parsed.get('recordAuditOpinion')
def JSON(self):
return json.dumps(self, default=lambda x: x.__dict__)
class FileInfo():
def __init__(self, parsed: dict):
self.destination_URL = parsed.get('fileDestUlr')
self.name = self.destination_URL.split('/')[-1]
self.size = parsed.get('size')
self.image_resolution = parsed.get('imageResolution')
self.image_resolution_signature = parsed.get('imageResolutionSingature')
def JSON(self):
return json.dumps({
'fileDestUlr': self.destination_URL,
'name': self.name,
'size': self.size,
'imageResolution': self.image_resolution,
'imageResolutionSingature': self.image_resolution_signature
}) | 0.243193 | 0.056993 |
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.layers import SavitzkyGolayFilter
from tests.utils import skip_if_no_cuda
# Zero-padding trivial tests
TEST_CASE_SINGLE_VALUE = [
{"window_length": 3, "order": 1},
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Input data: Single value
torch.Tensor([1 / 3]).unsqueeze(0).unsqueeze(0), # Expected output: With a window length of 3 and polyorder 1
# output should be equal to mean of 0, 1 and 0 = 1/3 (because input will be zero-padded and a linear fit performed)
1e-15, # absolute tolerance
]
TEST_CASE_1D = [
{"window_length": 3, "order": 1},
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Input data
torch.Tensor([2 / 3, 1.0, 2 / 3])
.unsqueeze(0)
.unsqueeze(0), # Expected output: zero padded, so linear interpolation
# over length-3 windows will result in output of [2/3, 1, 2/3].
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_2 = [
{"window_length": 3, "order": 1}, # along default axis (2, first spatial dim)
torch.ones((3, 2)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[2 / 3, 2 / 3], [1.0, 1.0], [2 / 3, 2 / 3]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_3 = [
{"window_length": 3, "order": 1, "axis": 3}, # along axis 3 (second spatial dim)
torch.ones((2, 3)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[2 / 3, 1.0, 2 / 3], [2 / 3, 1.0, 2 / 3]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
# Replicated-padding trivial tests
TEST_CASE_SINGLE_VALUE_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"},
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Input data: Single value
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Expected output: With a window length of 3 and polyorder 1
# output will be equal to mean of [1, 1, 1] = 1 (input will be nearest-neighbour-padded and a linear fit performed)
1e-15, # absolute tolerance
]
TEST_CASE_1D_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"},
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Input data
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Expected output: zero padded, so linear interpolation
# over length-3 windows will result in output of [2/3, 1, 2/3].
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_2_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"}, # along default axis (2, first spatial dim)
torch.ones((3, 2)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_3_REP = [
{"window_length": 3, "order": 1, "axis": 3, "mode": "replicate"}, # along axis 3 (second spatial dim)
torch.ones((2, 3)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
# Sine smoothing
TEST_CASE_SINE_SMOOTH = [
{"window_length": 3, "order": 1},
# Sine wave with period equal to savgol window length (windowed to reduce edge effects).
torch.as_tensor(np.sin(2 * np.pi * 1 / 3 * np.arange(100)) * np.hanning(100)).unsqueeze(0).unsqueeze(0),
# Should be smoothed out to zeros
torch.zeros(100).unsqueeze(0).unsqueeze(0),
# tolerance chosen by examining output of SciPy.signal.savgol_filter when provided the above input
2e-2, # absolute tolerance
]
class TestSavitzkyGolayCPU(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_SINGLE_VALUE,
TEST_CASE_1D,
TEST_CASE_2D_AXIS_2,
TEST_CASE_2D_AXIS_3,
TEST_CASE_SINE_SMOOTH,
]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image)
np.testing.assert_allclose(result, expected_data, atol=atol)
class TestSavitzkyGolayCPUREP(unittest.TestCase):
@parameterized.expand(
[TEST_CASE_SINGLE_VALUE_REP, TEST_CASE_1D_REP, TEST_CASE_2D_AXIS_2_REP, TEST_CASE_2D_AXIS_3_REP]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image)
np.testing.assert_allclose(result, expected_data, atol=atol)
@skip_if_no_cuda
class TestSavitzkyGolayGPU(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_SINGLE_VALUE,
TEST_CASE_1D,
TEST_CASE_2D_AXIS_2,
TEST_CASE_2D_AXIS_3,
TEST_CASE_SINE_SMOOTH,
]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image.to(device="cuda"))
np.testing.assert_allclose(result.cpu(), expected_data, atol=atol)
@skip_if_no_cuda
class TestSavitzkyGolayGPUREP(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_SINGLE_VALUE_REP,
TEST_CASE_1D_REP,
TEST_CASE_2D_AXIS_2_REP,
TEST_CASE_2D_AXIS_3_REP,
]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image.to(device="cuda"))
np.testing.assert_allclose(result.cpu(), expected_data, atol=atol) | tests/test_savitzky_golay_filter.py |
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.layers import SavitzkyGolayFilter
from tests.utils import skip_if_no_cuda
# Zero-padding trivial tests
TEST_CASE_SINGLE_VALUE = [
{"window_length": 3, "order": 1},
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Input data: Single value
torch.Tensor([1 / 3]).unsqueeze(0).unsqueeze(0), # Expected output: With a window length of 3 and polyorder 1
# output should be equal to mean of 0, 1 and 0 = 1/3 (because input will be zero-padded and a linear fit performed)
1e-15, # absolute tolerance
]
TEST_CASE_1D = [
{"window_length": 3, "order": 1},
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Input data
torch.Tensor([2 / 3, 1.0, 2 / 3])
.unsqueeze(0)
.unsqueeze(0), # Expected output: zero padded, so linear interpolation
# over length-3 windows will result in output of [2/3, 1, 2/3].
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_2 = [
{"window_length": 3, "order": 1}, # along default axis (2, first spatial dim)
torch.ones((3, 2)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[2 / 3, 2 / 3], [1.0, 1.0], [2 / 3, 2 / 3]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_3 = [
{"window_length": 3, "order": 1, "axis": 3}, # along axis 3 (second spatial dim)
torch.ones((2, 3)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[2 / 3, 1.0, 2 / 3], [2 / 3, 1.0, 2 / 3]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
# Replicated-padding trivial tests
TEST_CASE_SINGLE_VALUE_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"},
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Input data: Single value
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Expected output: With a window length of 3 and polyorder 1
# output will be equal to mean of [1, 1, 1] = 1 (input will be nearest-neighbour-padded and a linear fit performed)
1e-15, # absolute tolerance
]
TEST_CASE_1D_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"},
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Input data
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Expected output: zero padded, so linear interpolation
# over length-3 windows will result in output of [2/3, 1, 2/3].
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_2_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"}, # along default axis (2, first spatial dim)
torch.ones((3, 2)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_3_REP = [
{"window_length": 3, "order": 1, "axis": 3, "mode": "replicate"}, # along axis 3 (second spatial dim)
torch.ones((2, 3)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
# Sine smoothing
TEST_CASE_SINE_SMOOTH = [
{"window_length": 3, "order": 1},
# Sine wave with period equal to savgol window length (windowed to reduce edge effects).
torch.as_tensor(np.sin(2 * np.pi * 1 / 3 * np.arange(100)) * np.hanning(100)).unsqueeze(0).unsqueeze(0),
# Should be smoothed out to zeros
torch.zeros(100).unsqueeze(0).unsqueeze(0),
# tolerance chosen by examining output of SciPy.signal.savgol_filter when provided the above input
2e-2, # absolute tolerance
]
class TestSavitzkyGolayCPU(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_SINGLE_VALUE,
TEST_CASE_1D,
TEST_CASE_2D_AXIS_2,
TEST_CASE_2D_AXIS_3,
TEST_CASE_SINE_SMOOTH,
]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image)
np.testing.assert_allclose(result, expected_data, atol=atol)
class TestSavitzkyGolayCPUREP(unittest.TestCase):
@parameterized.expand(
[TEST_CASE_SINGLE_VALUE_REP, TEST_CASE_1D_REP, TEST_CASE_2D_AXIS_2_REP, TEST_CASE_2D_AXIS_3_REP]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image)
np.testing.assert_allclose(result, expected_data, atol=atol)
@skip_if_no_cuda
class TestSavitzkyGolayGPU(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_SINGLE_VALUE,
TEST_CASE_1D,
TEST_CASE_2D_AXIS_2,
TEST_CASE_2D_AXIS_3,
TEST_CASE_SINE_SMOOTH,
]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image.to(device="cuda"))
np.testing.assert_allclose(result.cpu(), expected_data, atol=atol)
@skip_if_no_cuda
class TestSavitzkyGolayGPUREP(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_SINGLE_VALUE_REP,
TEST_CASE_1D_REP,
TEST_CASE_2D_AXIS_2_REP,
TEST_CASE_2D_AXIS_3_REP,
]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image.to(device="cuda"))
np.testing.assert_allclose(result.cpu(), expected_data, atol=atol) | 0.781372 | 0.728 |
import argparse
import guitarpro
TOMS = set([38, 40, 37, 41, 43, 45, 47, 48, 50])
CYMBALS = set([49, 57, 52, 55, 51, 53, 54, 56, 59, 42, 44, 46])
HAND_NOTES = TOMS.union(CYMBALS)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_file', dest='input_file', help='Input file name',
type=str, required=True
)
parser.add_argument(
'--output_file', dest='output_file', help='Output file name',
type=str, required=True
)
return parser.parse_args()
def _simplify_note(note):
"""Unify references to bass drum, snare drum or ride cymbal."""
if note.value == 35: # Bass Drum 1 -> Bass Drum 2
note.value = 36
elif note.value == 40: # Snare Drum 2 -> Snare Drum 1
note.value = 38
elif note.value == 59: # Ride Cymbal 2 -> Ride CYmbal 1
note.value = 51
return note
def _place_note(note, note_values):
"""Place note on a string depending on other notes' values."""
if note.value == 36: # Bass drum on string 5
note.string = 5
elif note.value == 38: # Snare drum on string 3
note.string = 3
elif note.value == 44: # Pedal Hi-hat on string 6
note.string = 6
elif note.value == 49: # Crash Cymbal 1 on string 1
note.string = 1
elif note.value in CYMBALS:
if any( # Another cymbal occupies string 1
value in CYMBALS and (value < note.value or value == 49)
for value in note_values
):
note.string = 2
else: # Main cymbal on string 3
note.string = 1
elif note.value in TOMS:
if any(
value in TOMS and (value < note.value or value == 38)
for value in note_values
):
note.string = 4 # Another tom occupies string 3
else:
note.string = 3 # First tom found for this beat on string 3
return note
def _trim_note_values(notes):
"""Trim notes so as maximum 2 are played with the hands."""
note_values = set(note.value for note in notes)
hand_values = note_values.intersection(HAND_NOTES)
if len(hand_values) > 2:
return_values = set()
if 38 in note_values: # Snare Drum is prioritized
return_values.add(38)
if 49 in note_values: # Crash Cymbal 1
return_values.add(49)
if len(return_values) < 2 and 57 in note_values: # Crash Cymbal 1
return_values.add(49)
while len(return_values) < 2:
if any( # Try to append a cymbal
value in CYMBALS and value not in return_values
for value in note_values
):
return_values.add(next(
value
for value in note_values
if value in CYMBALS and value not in return_values
))
if len(return_values) < 2 and any( # Try to append a tom
value in TOMS and value not in return_values
for value in note_values
):
return_values.add(next(
value
for value in note_values
if value in TOMS and value not in return_values
))
return_values = set(return_values)
else:
return_values = hand_values
return_values.update(note_values.difference(HAND_NOTES))
return [note for note in notes if note.value in return_values]
def standardize_track(track):
"""Standardize tab format and clean unplayable parts."""
for measure in track.measures:
for voice in measure.voices:
for beat in voice.beats:
if not any(beat.notes):
continue
beat.notes = list({
note.value: note
for note in [_simplify_note(note) for note in beat.notes]
}.values())
beat.notes = _trim_note_values(beat.notes)
note_values = set(note.value for note in beat.notes)
beat.notes = [
_place_note(note, note_values) for note in beat.notes
]
return track
def standardize(input_filename, output_filename):
"""Read a gp file 'input_filename', write to 'output_filename'."""
song = guitarpro.parse(input_filename)
song.tracks = [
standardize_track(track) if track.isPercussionTrack
else track
for track in song.tracks
]
guitarpro.write(song, output_filename)
def main():
"""Score standardization pipeline."""
args = parse_args()
standardize(args.input_file, args.output_file)
if __name__ == "__main__":
main() | standardize_percussion_tracks.py | import argparse
import guitarpro
TOMS = set([38, 40, 37, 41, 43, 45, 47, 48, 50])
CYMBALS = set([49, 57, 52, 55, 51, 53, 54, 56, 59, 42, 44, 46])
HAND_NOTES = TOMS.union(CYMBALS)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_file', dest='input_file', help='Input file name',
type=str, required=True
)
parser.add_argument(
'--output_file', dest='output_file', help='Output file name',
type=str, required=True
)
return parser.parse_args()
def _simplify_note(note):
"""Unify references to bass drum, snare drum or ride cymbal."""
if note.value == 35: # Bass Drum 1 -> Bass Drum 2
note.value = 36
elif note.value == 40: # Snare Drum 2 -> Snare Drum 1
note.value = 38
elif note.value == 59: # Ride Cymbal 2 -> Ride CYmbal 1
note.value = 51
return note
def _place_note(note, note_values):
"""Place note on a string depending on other notes' values."""
if note.value == 36: # Bass drum on string 5
note.string = 5
elif note.value == 38: # Snare drum on string 3
note.string = 3
elif note.value == 44: # Pedal Hi-hat on string 6
note.string = 6
elif note.value == 49: # Crash Cymbal 1 on string 1
note.string = 1
elif note.value in CYMBALS:
if any( # Another cymbal occupies string 1
value in CYMBALS and (value < note.value or value == 49)
for value in note_values
):
note.string = 2
else: # Main cymbal on string 3
note.string = 1
elif note.value in TOMS:
if any(
value in TOMS and (value < note.value or value == 38)
for value in note_values
):
note.string = 4 # Another tom occupies string 3
else:
note.string = 3 # First tom found for this beat on string 3
return note
def _trim_note_values(notes):
"""Trim notes so as maximum 2 are played with the hands."""
note_values = set(note.value for note in notes)
hand_values = note_values.intersection(HAND_NOTES)
if len(hand_values) > 2:
return_values = set()
if 38 in note_values: # Snare Drum is prioritized
return_values.add(38)
if 49 in note_values: # Crash Cymbal 1
return_values.add(49)
if len(return_values) < 2 and 57 in note_values: # Crash Cymbal 1
return_values.add(49)
while len(return_values) < 2:
if any( # Try to append a cymbal
value in CYMBALS and value not in return_values
for value in note_values
):
return_values.add(next(
value
for value in note_values
if value in CYMBALS and value not in return_values
))
if len(return_values) < 2 and any( # Try to append a tom
value in TOMS and value not in return_values
for value in note_values
):
return_values.add(next(
value
for value in note_values
if value in TOMS and value not in return_values
))
return_values = set(return_values)
else:
return_values = hand_values
return_values.update(note_values.difference(HAND_NOTES))
return [note for note in notes if note.value in return_values]
def standardize_track(track):
"""Standardize tab format and clean unplayable parts."""
for measure in track.measures:
for voice in measure.voices:
for beat in voice.beats:
if not any(beat.notes):
continue
beat.notes = list({
note.value: note
for note in [_simplify_note(note) for note in beat.notes]
}.values())
beat.notes = _trim_note_values(beat.notes)
note_values = set(note.value for note in beat.notes)
beat.notes = [
_place_note(note, note_values) for note in beat.notes
]
return track
def standardize(input_filename, output_filename):
"""Read a gp file 'input_filename', write to 'output_filename'."""
song = guitarpro.parse(input_filename)
song.tracks = [
standardize_track(track) if track.isPercussionTrack
else track
for track in song.tracks
]
guitarpro.write(song, output_filename)
def main():
"""Score standardization pipeline."""
args = parse_args()
standardize(args.input_file, args.output_file)
if __name__ == "__main__":
main() | 0.586168 | 0.495789 |
from __future__ import print_function, unicode_literals
import io
import subprocess
from argparse import ArgumentParser
try:
prompt = raw_input
except NameError:
prompt = input
parser = ArgumentParser()
parser.add_argument(
'-r', '--requirements', type=str, default='requirements.txt',
help='Specify the location of the requirements.txt file')
def get_installed_requirement(entry):
installed_name, installed_version = None, None
name = entry.split('[', 1)[0]
info = (subprocess.check_output(['pip', 'show', name.strip()])
.decode('utf-8', 'replace'))
for line in info.split('\n'):
line = line.strip()
if 'Name: ' in line:
installed_name = line[len('Name: '):]
if 'Version: ' in line:
installed_version = line[len('Version: '):]
if not installed_name or not installed_version:
raise ValueError('Could not info for {!r}'.format(entry))
return entry.replace(name, installed_name, 1), installed_version
def main(args=None):
args = parser.parse_args(args)
# Read pinned requirements
try:
with io.open(args.requirements) as f:
print('Reading {}...'.format(args.requirements))
requirements = [r.split('#', 1)[0].strip() for r in f.readlines()]
except:
print('Error: No requirements.txt found')
return
# Get names of requirements to run 'pip install --upgrade' on
upgrades = []
for requirement in requirements:
if not requirement:
continue
# TODO: Handle other version instructions
if '==' not in requirement:
print('Error: Can only work with pinned requirements for now.')
name, version = requirement.split('==')
upgrades.append(name)
# Edge case
if len(upgrades) == 0:
print('No requirements to upgrade')
return
# Confirm
answer = prompt('Upgrade {} requirements (y/N)? '.format(len(upgrades)))
if answer != 'y':
return
print()
# Run 'pip install --upgrade' on all requirements
for name in upgrades:
print('$ pip install --upgrade', name)
exit_code = subprocess.call(['pip', 'install', '--upgrade', name])
if exit_code != 0:
return
print()
# Show message
print('Collecting installed versions...')
# Generate resulting requirements.txt content
result = ''
for name in upgrades:
installed_name, installed_version = get_installed_requirement(name)
result = '{}{}=={}\n'.format(result, installed_name, installed_version)
# Save upgraded requirements
with io.open(args.requirements, 'w') as f:
f.write(result)
print('Wrote {}'.format(args.requirements))
if __name__ == '__main__':
main() | venv/lib/python3.8/site-packages/upgrade_requirements.py | from __future__ import print_function, unicode_literals
import io
import subprocess
from argparse import ArgumentParser
try:
prompt = raw_input
except NameError:
prompt = input
parser = ArgumentParser()
parser.add_argument(
'-r', '--requirements', type=str, default='requirements.txt',
help='Specify the location of the requirements.txt file')
def get_installed_requirement(entry):
installed_name, installed_version = None, None
name = entry.split('[', 1)[0]
info = (subprocess.check_output(['pip', 'show', name.strip()])
.decode('utf-8', 'replace'))
for line in info.split('\n'):
line = line.strip()
if 'Name: ' in line:
installed_name = line[len('Name: '):]
if 'Version: ' in line:
installed_version = line[len('Version: '):]
if not installed_name or not installed_version:
raise ValueError('Could not info for {!r}'.format(entry))
return entry.replace(name, installed_name, 1), installed_version
def main(args=None):
args = parser.parse_args(args)
# Read pinned requirements
try:
with io.open(args.requirements) as f:
print('Reading {}...'.format(args.requirements))
requirements = [r.split('#', 1)[0].strip() for r in f.readlines()]
except:
print('Error: No requirements.txt found')
return
# Get names of requirements to run 'pip install --upgrade' on
upgrades = []
for requirement in requirements:
if not requirement:
continue
# TODO: Handle other version instructions
if '==' not in requirement:
print('Error: Can only work with pinned requirements for now.')
name, version = requirement.split('==')
upgrades.append(name)
# Edge case
if len(upgrades) == 0:
print('No requirements to upgrade')
return
# Confirm
answer = prompt('Upgrade {} requirements (y/N)? '.format(len(upgrades)))
if answer != 'y':
return
print()
# Run 'pip install --upgrade' on all requirements
for name in upgrades:
print('$ pip install --upgrade', name)
exit_code = subprocess.call(['pip', 'install', '--upgrade', name])
if exit_code != 0:
return
print()
# Show message
print('Collecting installed versions...')
# Generate resulting requirements.txt content
result = ''
for name in upgrades:
installed_name, installed_version = get_installed_requirement(name)
result = '{}{}=={}\n'.format(result, installed_name, installed_version)
# Save upgraded requirements
with io.open(args.requirements, 'w') as f:
f.write(result)
print('Wrote {}'.format(args.requirements))
if __name__ == '__main__':
main() | 0.237222 | 0.04982 |
import os
import sys
import logging
# This is the main prefix used for logging
LOGGER_BASENAME = '''_CI._initialize_template'''
LOGGER = logging.getLogger(LOGGER_BASENAME)
LOGGER.addHandler(logging.NullHandler())
def add_ci_directory_to_path():
current_file_path = os.path.dirname(os.path.abspath(__file__))
ci_path = os.path.abspath(os.path.join(current_file_path, '..'))
if ci_path not in sys.path:
sys.path.append(ci_path)
def initialize_template_environment():
from configuration import (LOGGING_LEVEL,
ENVIRONMENT_VARIABLES,
PREREQUISITES)
from library import (setup_logging,
validate_binary_prerequisites,
validate_environment_variable_prerequisites,
is_venv_created,
execute_command,
load_environment_variables,
load_dot_env_file,
activate_virtual_environment)
load_environment_variables(ENVIRONMENT_VARIABLES)
load_dot_env_file()
if not validate_binary_prerequisites(PREREQUISITES.get('executables', [])):
LOGGER.error('Prerequisite binary missing, cannot continue.')
raise SystemExit(1)
if not validate_environment_variable_prerequisites(PREREQUISITES.get('environment_variables', [])):
LOGGER.error('Prerequisite environment variable missing, cannot continue.')
raise SystemExit(1)
if not is_venv_created():
LOGGER.debug('Trying to create virtual environment.')
success = execute_command('pipenv install --dev --ignore-pipfile')
if success:
activate_virtual_environment()
from emoji import emojize
LOGGER.info('%s Successfully created virtual environment and loaded it! %s',
emojize(':white_heavy_check_mark:'),
emojize(':thumbs_up:'))
else:
LOGGER.error('Creation of virtual environment failed, cannot continue, '
'please clean up .venv directory and try again...')
raise SystemExit(1)
setup_logging(os.environ.get('LOGGING_LEVEL') or LOGGING_LEVEL)
def bootstrap_template():
add_ci_directory_to_path()
from library import activate_template
activate_template()
initialize_template_environment()
bootstrap_template() | _CI/scripts/_initialize_template.py | import os
import sys
import logging
# This is the main prefix used for logging
LOGGER_BASENAME = '''_CI._initialize_template'''
LOGGER = logging.getLogger(LOGGER_BASENAME)
LOGGER.addHandler(logging.NullHandler())
def add_ci_directory_to_path():
current_file_path = os.path.dirname(os.path.abspath(__file__))
ci_path = os.path.abspath(os.path.join(current_file_path, '..'))
if ci_path not in sys.path:
sys.path.append(ci_path)
def initialize_template_environment():
from configuration import (LOGGING_LEVEL,
ENVIRONMENT_VARIABLES,
PREREQUISITES)
from library import (setup_logging,
validate_binary_prerequisites,
validate_environment_variable_prerequisites,
is_venv_created,
execute_command,
load_environment_variables,
load_dot_env_file,
activate_virtual_environment)
load_environment_variables(ENVIRONMENT_VARIABLES)
load_dot_env_file()
if not validate_binary_prerequisites(PREREQUISITES.get('executables', [])):
LOGGER.error('Prerequisite binary missing, cannot continue.')
raise SystemExit(1)
if not validate_environment_variable_prerequisites(PREREQUISITES.get('environment_variables', [])):
LOGGER.error('Prerequisite environment variable missing, cannot continue.')
raise SystemExit(1)
if not is_venv_created():
LOGGER.debug('Trying to create virtual environment.')
success = execute_command('pipenv install --dev --ignore-pipfile')
if success:
activate_virtual_environment()
from emoji import emojize
LOGGER.info('%s Successfully created virtual environment and loaded it! %s',
emojize(':white_heavy_check_mark:'),
emojize(':thumbs_up:'))
else:
LOGGER.error('Creation of virtual environment failed, cannot continue, '
'please clean up .venv directory and try again...')
raise SystemExit(1)
setup_logging(os.environ.get('LOGGING_LEVEL') or LOGGING_LEVEL)
def bootstrap_template():
add_ci_directory_to_path()
from library import activate_template
activate_template()
initialize_template_environment()
bootstrap_template() | 0.155367 | 0.093347 |
import discord
from necrobot.race.racerstatus import RacerStatus
from necrobot.user import userlib
from necrobot.user.necrouser import NecroUser
from necrobot.util import racetime
from necrobot.util.necrodancer import level
FIELD_UNKNOWN = int(-1)
class Racer(object):
def __init__(self, member: discord.Member):
self._user = None
self._discord_id = int(member.id)
self._state = RacerStatus.unready # see RacerState notes above
self.time = FIELD_UNKNOWN # hundredths of a second
self.igt = FIELD_UNKNOWN # hundredths of a second
self.level = level.LEVEL_NOS # level of death (or LEVEL_FINISHED or LEVEL_UNKNOWN_DEATH)
self.comment = '' # a comment added with .comment
async def initialize(self):
self._user = await userlib.get_user(discord_id=self._discord_id, register=True)
@property
def user(self) -> NecroUser:
return self._user
@property
def member(self) -> discord.Member:
return self.user.member
@property
def name(self) -> str:
return self.member.display_name
@property
def user_id(self) -> int:
return self.user.user_id
@property
def status_str(self) -> str:
return self._status_str(False)
@property
def short_status_str(self) -> str:
return self._status_str(True)
def _status_str(self, short: bool) -> str:
status = ''
if self._state == RacerStatus.finished:
status += racetime.to_str(self.time)
if not self.igt == FIELD_UNKNOWN and not short:
status += ' (igt {})'.format(racetime.to_str(self.igt))
else:
status += str(self._state)
if self._state == RacerStatus.forfeit and not short:
status += ' (rta {}'.format(racetime.to_str(self.time))
if 0 < self.level < 22:
status += ', ' + level.to_str(self.level)
if not self.igt == FIELD_UNKNOWN:
status += ', igt {}'.format(racetime.to_str(self.igt))
status += ')'
if not self.comment == '' and not short:
status += ': ' + self.comment
return status
@property
def time_str(self) -> str:
return racetime.to_str(self.time)
@property
def is_ready(self) -> bool:
return self._state == RacerStatus.ready
@property
def has_begun(self) -> bool:
return self._state > RacerStatus.ready
@property
def is_racing(self) -> bool:
return self._state == RacerStatus.racing
@property
def is_forfeit(self) -> bool:
return self._state == RacerStatus.forfeit
@property
def is_finished(self) -> bool:
return self._state == RacerStatus.finished
@property
def is_done_racing(self) -> bool:
return self._state > RacerStatus.racing
def ready(self) -> bool:
if self._state == RacerStatus.unready:
self._state = RacerStatus.ready
return True
return False
def unready(self) -> bool:
if self._state == RacerStatus.ready:
self._state = RacerStatus.unready
return True
return False
def begin_race(self) -> bool:
if self._state == RacerStatus.ready:
self._state = RacerStatus.racing
return True
return False
def forfeit(self, time) -> bool:
if self._state == RacerStatus.racing or self._state == RacerStatus.finished:
self._state = RacerStatus.forfeit
self.time = time
self.level = level.LEVEL_UNKNOWN_DEATH
self.igt = FIELD_UNKNOWN
return True
return False
def unforfeit(self) -> bool:
if self._state == RacerStatus.forfeit:
self._state = RacerStatus.racing
self.time = FIELD_UNKNOWN
self.igt = FIELD_UNKNOWN
self.level = level.LEVEL_NOS
return True
return False
def finish(self, time) -> bool:
if self._state == RacerStatus.racing or self._state == RacerStatus.forfeit:
self._state = RacerStatus.finished
self.time = time
self.level = level.LEVEL_FINISHED
return True
return False
def unfinish(self) -> bool:
if self._state == RacerStatus.finished:
self._state = RacerStatus.racing
self.time = FIELD_UNKNOWN
self.igt = FIELD_UNKNOWN
self.level = level.LEVEL_NOS
return True
return False
def add_comment(self, comment: str):
self.comment = comment | necrobot/race/racer.py | import discord
from necrobot.race.racerstatus import RacerStatus
from necrobot.user import userlib
from necrobot.user.necrouser import NecroUser
from necrobot.util import racetime
from necrobot.util.necrodancer import level
FIELD_UNKNOWN = int(-1)
class Racer(object):
def __init__(self, member: discord.Member):
self._user = None
self._discord_id = int(member.id)
self._state = RacerStatus.unready # see RacerState notes above
self.time = FIELD_UNKNOWN # hundredths of a second
self.igt = FIELD_UNKNOWN # hundredths of a second
self.level = level.LEVEL_NOS # level of death (or LEVEL_FINISHED or LEVEL_UNKNOWN_DEATH)
self.comment = '' # a comment added with .comment
async def initialize(self):
self._user = await userlib.get_user(discord_id=self._discord_id, register=True)
@property
def user(self) -> NecroUser:
return self._user
@property
def member(self) -> discord.Member:
return self.user.member
@property
def name(self) -> str:
return self.member.display_name
@property
def user_id(self) -> int:
return self.user.user_id
@property
def status_str(self) -> str:
return self._status_str(False)
@property
def short_status_str(self) -> str:
return self._status_str(True)
def _status_str(self, short: bool) -> str:
status = ''
if self._state == RacerStatus.finished:
status += racetime.to_str(self.time)
if not self.igt == FIELD_UNKNOWN and not short:
status += ' (igt {})'.format(racetime.to_str(self.igt))
else:
status += str(self._state)
if self._state == RacerStatus.forfeit and not short:
status += ' (rta {}'.format(racetime.to_str(self.time))
if 0 < self.level < 22:
status += ', ' + level.to_str(self.level)
if not self.igt == FIELD_UNKNOWN:
status += ', igt {}'.format(racetime.to_str(self.igt))
status += ')'
if not self.comment == '' and not short:
status += ': ' + self.comment
return status
@property
def time_str(self) -> str:
return racetime.to_str(self.time)
@property
def is_ready(self) -> bool:
return self._state == RacerStatus.ready
@property
def has_begun(self) -> bool:
return self._state > RacerStatus.ready
@property
def is_racing(self) -> bool:
return self._state == RacerStatus.racing
@property
def is_forfeit(self) -> bool:
return self._state == RacerStatus.forfeit
@property
def is_finished(self) -> bool:
return self._state == RacerStatus.finished
@property
def is_done_racing(self) -> bool:
return self._state > RacerStatus.racing
def ready(self) -> bool:
if self._state == RacerStatus.unready:
self._state = RacerStatus.ready
return True
return False
def unready(self) -> bool:
if self._state == RacerStatus.ready:
self._state = RacerStatus.unready
return True
return False
def begin_race(self) -> bool:
if self._state == RacerStatus.ready:
self._state = RacerStatus.racing
return True
return False
def forfeit(self, time) -> bool:
if self._state == RacerStatus.racing or self._state == RacerStatus.finished:
self._state = RacerStatus.forfeit
self.time = time
self.level = level.LEVEL_UNKNOWN_DEATH
self.igt = FIELD_UNKNOWN
return True
return False
def unforfeit(self) -> bool:
if self._state == RacerStatus.forfeit:
self._state = RacerStatus.racing
self.time = FIELD_UNKNOWN
self.igt = FIELD_UNKNOWN
self.level = level.LEVEL_NOS
return True
return False
def finish(self, time) -> bool:
if self._state == RacerStatus.racing or self._state == RacerStatus.forfeit:
self._state = RacerStatus.finished
self.time = time
self.level = level.LEVEL_FINISHED
return True
return False
def unfinish(self) -> bool:
if self._state == RacerStatus.finished:
self._state = RacerStatus.racing
self.time = FIELD_UNKNOWN
self.igt = FIELD_UNKNOWN
self.level = level.LEVEL_NOS
return True
return False
def add_comment(self, comment: str):
self.comment = comment | 0.507324 | 0.250065 |
import time
import sys
import signal
import threading
from picam_lib import PicamImpl
#### Setup
print("Starting camera, connecting robot")
camera = PicamImpl(PicamImpl.RAW_BAYER)
subdir = "swc_" % int(time.time())
camera.set_save_directory(subdir)
print("Startup complete")
#### Functions
exit_all_loops = False
exit_code = 1
def _sigint_handler(sig, frame):
global exit_all_loops
exit_all_loops = True
def _sigterm_handler(sig, frame):
global exit_all_loops, exit_code
exit_all_loops = True
exit_code = 0
signal.signal(signal.SIGINT, _sigint_handler)
signal.signal(signal.SIGTERM, _sigterm_handler)
CONTINUOUS = 1
BURST = 2
capture = CONTINUOUS
burst_duration = 2
def user_input_monitor():
global capture, exit_all_loops
while not exit_all_loops:
try:
c = input()
if c == str(CONTINUOUS):
print("Capture continuous")
capture = int(c)
elif c == str(BURST):
print("Capture burst")
capture = int(c)
elif c == "0":
print("Exiting")
exit_all_loops = True
else:
print("Got keyboard input %s" % c)
except IOError: pass
#### Start capture
user_keyboard_input_thread = threading.Thread(target=user_input_monitor)
user_keyboard_input_thread.daemon = True
user_keyboard_input_thread.start()
print("Start mission capture")
try:
while True:
if exit_all_loops:
break
try:
if capture == CONTINUOUS:
camera.capture_single()
elif capture == BURST:
print("Start burst capture")
camera.capture_burst(burst_duration)
capture = CONTINUOUS
print("End burst capture")
except Exception as e:
print(e)
except Exception as e:
print(e)
finally:
print("Closing script...")
exit_all_loops = True
signal.setitimer(signal.ITIMER_REAL, 5)
user_keyboard_input_thread.join()
sys.exit(exit_code) | picamera/scripts/switch_capture.py |
import time
import sys
import signal
import threading
from picam_lib import PicamImpl
#### Setup
print("Starting camera, connecting robot")
camera = PicamImpl(PicamImpl.RAW_BAYER)
subdir = "swc_" % int(time.time())
camera.set_save_directory(subdir)
print("Startup complete")
#### Functions
exit_all_loops = False
exit_code = 1
def _sigint_handler(sig, frame):
global exit_all_loops
exit_all_loops = True
def _sigterm_handler(sig, frame):
global exit_all_loops, exit_code
exit_all_loops = True
exit_code = 0
signal.signal(signal.SIGINT, _sigint_handler)
signal.signal(signal.SIGTERM, _sigterm_handler)
CONTINUOUS = 1
BURST = 2
capture = CONTINUOUS
burst_duration = 2
def user_input_monitor():
global capture, exit_all_loops
while not exit_all_loops:
try:
c = input()
if c == str(CONTINUOUS):
print("Capture continuous")
capture = int(c)
elif c == str(BURST):
print("Capture burst")
capture = int(c)
elif c == "0":
print("Exiting")
exit_all_loops = True
else:
print("Got keyboard input %s" % c)
except IOError: pass
#### Start capture
user_keyboard_input_thread = threading.Thread(target=user_input_monitor)
user_keyboard_input_thread.daemon = True
user_keyboard_input_thread.start()
print("Start mission capture")
try:
while True:
if exit_all_loops:
break
try:
if capture == CONTINUOUS:
camera.capture_single()
elif capture == BURST:
print("Start burst capture")
camera.capture_burst(burst_duration)
capture = CONTINUOUS
print("End burst capture")
except Exception as e:
print(e)
except Exception as e:
print(e)
finally:
print("Closing script...")
exit_all_loops = True
signal.setitimer(signal.ITIMER_REAL, 5)
user_keyboard_input_thread.join()
sys.exit(exit_code) | 0.167832 | 0.094052 |
from __future__ import annotations
from .ExpireMessage import ExpireMessage, highest_message_number, message_number_range, valid_message_number_range
from .interfaces import IExpireReceiveListener, IExpireReceiveService
from ..receive import IReceiveListener, IReceiveService
from ..util.Atomic import Atomic
from ..util.ConnectionDetails import ConnectionDetails
from ..util.Listeners import Listeners
class ExpireReceiveService(IExpireReceiveService, IReceiveListener, IReceiveService):
def __init__(self, receive_service: IReceiveService) -> None:
self.last_message_number = Atomic(0)
self.expire_listeners: Listeners[IExpireReceiveListener] = Listeners()
self.receive_listeners: Listeners[IReceiveListener] = Listeners()
receive_service.add_receive_listener(self)
def on_receive(self, message: bytes, details: ConnectionDetails) -> None:
expire_message = ExpireMessage.decode(message)
with self.last_message_number as (last_message_number, set_last_message_number):
end_of_valid_range = (last_message_number + valid_message_number_range) % message_number_range
if expire_message.message_number == highest_message_number:
is_valid = True
elif end_of_valid_range < last_message_number:
is_valid = not end_of_valid_range <= expire_message.message_number <= last_message_number
else:
is_valid = last_message_number < expire_message.message_number < end_of_valid_range
if is_valid:
set_last_message_number(expire_message.message_number)
if is_valid:
self.receive_listeners.for_each(lambda listener: listener.on_receive(expire_message.payload, details))
else:
self.expire_listeners.for_each(lambda listener: listener.on_expire_receive(expire_message.payload, details))
def add_receive_listener(self, listener: IReceiveListener) -> ExpireReceiveService:
self.receive_listeners.add_listener(listener)
return self
def add_expire_listener(self, listener: IExpireReceiveListener) -> ExpireReceiveService:
self.expire_listeners.add_listener(listener)
return self
def reset(self) -> None:
with self.last_message_number as (_, set_last_message_number):
set_last_message_number(0) | src/rcpicar/expire/ExpireReceiveService.py | from __future__ import annotations
from .ExpireMessage import ExpireMessage, highest_message_number, message_number_range, valid_message_number_range
from .interfaces import IExpireReceiveListener, IExpireReceiveService
from ..receive import IReceiveListener, IReceiveService
from ..util.Atomic import Atomic
from ..util.ConnectionDetails import ConnectionDetails
from ..util.Listeners import Listeners
class ExpireReceiveService(IExpireReceiveService, IReceiveListener, IReceiveService):
def __init__(self, receive_service: IReceiveService) -> None:
self.last_message_number = Atomic(0)
self.expire_listeners: Listeners[IExpireReceiveListener] = Listeners()
self.receive_listeners: Listeners[IReceiveListener] = Listeners()
receive_service.add_receive_listener(self)
def on_receive(self, message: bytes, details: ConnectionDetails) -> None:
expire_message = ExpireMessage.decode(message)
with self.last_message_number as (last_message_number, set_last_message_number):
end_of_valid_range = (last_message_number + valid_message_number_range) % message_number_range
if expire_message.message_number == highest_message_number:
is_valid = True
elif end_of_valid_range < last_message_number:
is_valid = not end_of_valid_range <= expire_message.message_number <= last_message_number
else:
is_valid = last_message_number < expire_message.message_number < end_of_valid_range
if is_valid:
set_last_message_number(expire_message.message_number)
if is_valid:
self.receive_listeners.for_each(lambda listener: listener.on_receive(expire_message.payload, details))
else:
self.expire_listeners.for_each(lambda listener: listener.on_expire_receive(expire_message.payload, details))
def add_receive_listener(self, listener: IReceiveListener) -> ExpireReceiveService:
self.receive_listeners.add_listener(listener)
return self
def add_expire_listener(self, listener: IExpireReceiveListener) -> ExpireReceiveService:
self.expire_listeners.add_listener(listener)
return self
def reset(self) -> None:
with self.last_message_number as (_, set_last_message_number):
set_last_message_number(0) | 0.588889 | 0.059074 |
from django.forms import widgets
from django import forms
from django.utils.text import slugify
__all__ = [
'CheckboxSelectInlineLabelMultiple',
'CheckboxWithInlineLabel',
'ChoiceWidget',
'PrettyIDsMixin',
'RadioSelect',
'SelectMultipleAutocomplete',
'TextInputWithSubmitButton',
]
class PrettyIDsMixin:
def __init__(self, use_nice_ids=False, *args, **kwargs):
self.use_nice_ids = use_nice_ids
self.id_separator = '_'
if use_nice_ids:
self.add_id_index = False
self.id_separator = '-'
super().__init__(*args, **kwargs)
def create_option(
self, name, value, label, selected, index,
subindex=None, attrs=None):
"""Patch to use nicer ids."""
index = str(index) if subindex is None else "%s%s%s" % (
index, self.id_separator, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(
self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
if self.use_nice_ids:
option_attrs['id'] = "%s%s%s" % (
option_attrs['id'],
self.id_separator,
slugify(label.lower())
)
else:
option_attrs['id'] = self.id_for_label(
option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
class ChoiceWidget(PrettyIDsMixin, widgets.ChoiceWidget):
pass
class RadioSelect(ChoiceWidget):
template_name = 'great_components/form_widgets/multiple_input.html'
option_template_name = 'great_components/form_widgets/radio_option.html'
css_class_name = 'g-select-multiple'
input_type = 'radio'
class CheckboxWithInlineLabel(forms.widgets.CheckboxInput):
template_name = 'great_components/form_widgets/checkbox_inline.html'
container_css_classes = 'form-group'
def __init__(self, label='', help_text=None, *args, **kwargs):
self.label = label
self.help_text = help_text
super().__init__(*args, **kwargs)
def get_context(self, *args, **kwargs):
context = super().get_context(*args, **kwargs)
context['label'] = self.label
context['help_text'] = self.help_text
return context
class CheckboxSelectInlineLabelMultiple(PrettyIDsMixin, widgets.CheckboxSelectMultiple):
template_name = 'great_components/form_widgets/multiple_input.html'
option_template_name = 'great_components/form_widgets/checkbox_inline_multiple.html'
css_class_name = 'g-select-multiple'
input_type = 'checkbox'
def __init__(self, attrs=None, use_nice_ids=False):
super().__init__(attrs=attrs, use_nice_ids=use_nice_ids)
self.attrs['class'] = self.attrs.get('class', self.css_class_name)
class SelectMultipleAutocomplete(widgets.SelectMultiple):
container_css_classes = 'g-multi-select-autocomplete'
class Media:
css = {
'all': ('great_components/js/vendor/accessible-autocomplete.min.css',)
}
js = (
'great_components/js/vendor/accessible-autocomplete.min.js',
'great_components/js/dit.components.multiselect-autocomplete.js',
)
class RadioNestedWidget(RadioSelect):
option_template_name = 'great_components/form_widgets/nested-radio.html'
container_css_classes = 'form-group g-radio-nested-container'
def create_option(self, *args, **kwargs):
return {
**super().create_option(*args, **kwargs),
'nested_form': self.nested_form,
'nested_form_choice': self.nested_form_choice,
}
def bind_nested_form(self, form):
self.nested_form = form
class TextInputWithSubmitButton(forms.TextInput):
container_css_classes = 'text-input-with-submit-button-container'
template_name = 'great_components/form_widgets/text-input-with-submit-button.html' | great_components/forms/widgets.py | from django.forms import widgets
from django import forms
from django.utils.text import slugify
__all__ = [
'CheckboxSelectInlineLabelMultiple',
'CheckboxWithInlineLabel',
'ChoiceWidget',
'PrettyIDsMixin',
'RadioSelect',
'SelectMultipleAutocomplete',
'TextInputWithSubmitButton',
]
class PrettyIDsMixin:
def __init__(self, use_nice_ids=False, *args, **kwargs):
self.use_nice_ids = use_nice_ids
self.id_separator = '_'
if use_nice_ids:
self.add_id_index = False
self.id_separator = '-'
super().__init__(*args, **kwargs)
def create_option(
self, name, value, label, selected, index,
subindex=None, attrs=None):
"""Patch to use nicer ids."""
index = str(index) if subindex is None else "%s%s%s" % (
index, self.id_separator, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(
self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
if self.use_nice_ids:
option_attrs['id'] = "%s%s%s" % (
option_attrs['id'],
self.id_separator,
slugify(label.lower())
)
else:
option_attrs['id'] = self.id_for_label(
option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
class ChoiceWidget(PrettyIDsMixin, widgets.ChoiceWidget):
pass
class RadioSelect(ChoiceWidget):
template_name = 'great_components/form_widgets/multiple_input.html'
option_template_name = 'great_components/form_widgets/radio_option.html'
css_class_name = 'g-select-multiple'
input_type = 'radio'
class CheckboxWithInlineLabel(forms.widgets.CheckboxInput):
template_name = 'great_components/form_widgets/checkbox_inline.html'
container_css_classes = 'form-group'
def __init__(self, label='', help_text=None, *args, **kwargs):
self.label = label
self.help_text = help_text
super().__init__(*args, **kwargs)
def get_context(self, *args, **kwargs):
context = super().get_context(*args, **kwargs)
context['label'] = self.label
context['help_text'] = self.help_text
return context
class CheckboxSelectInlineLabelMultiple(PrettyIDsMixin, widgets.CheckboxSelectMultiple):
template_name = 'great_components/form_widgets/multiple_input.html'
option_template_name = 'great_components/form_widgets/checkbox_inline_multiple.html'
css_class_name = 'g-select-multiple'
input_type = 'checkbox'
def __init__(self, attrs=None, use_nice_ids=False):
super().__init__(attrs=attrs, use_nice_ids=use_nice_ids)
self.attrs['class'] = self.attrs.get('class', self.css_class_name)
class SelectMultipleAutocomplete(widgets.SelectMultiple):
container_css_classes = 'g-multi-select-autocomplete'
class Media:
css = {
'all': ('great_components/js/vendor/accessible-autocomplete.min.css',)
}
js = (
'great_components/js/vendor/accessible-autocomplete.min.js',
'great_components/js/dit.components.multiselect-autocomplete.js',
)
class RadioNestedWidget(RadioSelect):
option_template_name = 'great_components/form_widgets/nested-radio.html'
container_css_classes = 'form-group g-radio-nested-container'
def create_option(self, *args, **kwargs):
return {
**super().create_option(*args, **kwargs),
'nested_form': self.nested_form,
'nested_form_choice': self.nested_form_choice,
}
def bind_nested_form(self, form):
self.nested_form = form
class TextInputWithSubmitButton(forms.TextInput):
container_css_classes = 'text-input-with-submit-button-container'
template_name = 'great_components/form_widgets/text-input-with-submit-button.html' | 0.532668 | 0.056757 |
from abc import ABC
import logging
import tensorflow as tf
import numpy as np
from submarine.ml.model.abstract_model import AbstractModel
from submarine.ml.registries import input_fn_registry
from submarine.ml.parameters import default_parameters
from submarine.utils.env import get_from_registry, get_from_dicts, get_from_json
from submarine.utils.tf_utils import get_tf_config
logger = logging.getLogger(__name__)
# pylint: disable=W0221
class BaseTFModel(AbstractModel, ABC):
def __init__(self, model_params=None, json_path=None):
super().__init__()
self.model_params = get_from_dicts(model_params, default_parameters)
self.model_params = get_from_json(json_path, self.model_params)
self._sanity_checks()
logging.info("Model parameters : %s", self.model_params)
self.input_type = self.model_params['input']['type']
self.model_dir = self.model_params['output']['save_model_dir']
self.config = get_tf_config(self.model_params)
self.model = tf.estimator.Estimator(
model_fn=self.model_fn, model_dir=self.model_dir,
params=self.model_params, config=self.config)
def train(self, train_input_fn=None, eval_input_fn=None, **kwargs):
"""
Trains a pre-defined tensorflow estimator model with given training data
:param train_input_fn: A function that provides input data for training.
:param eval_input_fn: A function that provides input data for evaluating.
:return: None
"""
if train_input_fn is None:
train_input_fn = get_from_registry(
self.input_type, input_fn_registry)(
filepath=self.model_params['input']['train_data'],
**self.model_params['training'])
if eval_input_fn is None:
eval_input_fn = get_from_registry(
self.input_type, input_fn_registry)(
filepath=self.model_params['input']['valid_data'],
**self.model_params['training'])
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(self.model, train_spec, eval_spec, **kwargs)
def evaluate(self, eval_input_fn=None, **kwargs):
"""
Evaluates a pre-defined Tensorflow estimator model with given evaluate data
:param eval_input_fn: A function that provides input data for evaluating.
:return: A dict containing the evaluation metrics specified in `eval_input_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed
"""
if eval_input_fn is None:
eval_input_fn = get_from_registry(
self.input_type, input_fn_registry)(
filepath=self.model_params['input']['valid_data'],
**self.model_params['training'])
return self.model.evaluate(input_fn=eval_input_fn, **kwargs)
def predict(self, predict_input_fn=None, **kwargs):
"""
Yields predictions with given features.
:param predict_input_fn: A function that constructs the features.
Prediction continues until input_fn raises an end-of-input exception
:return: Evaluated values of predictions tensors.
"""
if predict_input_fn is None:
predict_input_fn = get_from_registry(
self.input_type, input_fn_registry)(
filepath=self.model_params['input']['test_data'],
**self.model_params['training'])
return self.model.predict(input_fn=predict_input_fn, **kwargs)
def _sanity_checks(self):
assert 'input' in self.model_params, (
'Does not define any input parameters'
)
assert 'type' in self.model_params['input'], (
'Does not define any input type'
)
assert 'output' in self.model_params, (
'Does not define any output parameters'
)
def model_fn(self, features, labels, mode, params):
seed = params["training"]["seed"]
np.random.seed(seed)
tf.set_random_seed(seed) | submarine-sdk/pysubmarine/submarine/ml/model/base_tf_model.py |
from abc import ABC
import logging
import tensorflow as tf
import numpy as np
from submarine.ml.model.abstract_model import AbstractModel
from submarine.ml.registries import input_fn_registry
from submarine.ml.parameters import default_parameters
from submarine.utils.env import get_from_registry, get_from_dicts, get_from_json
from submarine.utils.tf_utils import get_tf_config
logger = logging.getLogger(__name__)
# pylint: disable=W0221
class BaseTFModel(AbstractModel, ABC):
def __init__(self, model_params=None, json_path=None):
super().__init__()
self.model_params = get_from_dicts(model_params, default_parameters)
self.model_params = get_from_json(json_path, self.model_params)
self._sanity_checks()
logging.info("Model parameters : %s", self.model_params)
self.input_type = self.model_params['input']['type']
self.model_dir = self.model_params['output']['save_model_dir']
self.config = get_tf_config(self.model_params)
self.model = tf.estimator.Estimator(
model_fn=self.model_fn, model_dir=self.model_dir,
params=self.model_params, config=self.config)
def train(self, train_input_fn=None, eval_input_fn=None, **kwargs):
"""
Trains a pre-defined tensorflow estimator model with given training data
:param train_input_fn: A function that provides input data for training.
:param eval_input_fn: A function that provides input data for evaluating.
:return: None
"""
if train_input_fn is None:
train_input_fn = get_from_registry(
self.input_type, input_fn_registry)(
filepath=self.model_params['input']['train_data'],
**self.model_params['training'])
if eval_input_fn is None:
eval_input_fn = get_from_registry(
self.input_type, input_fn_registry)(
filepath=self.model_params['input']['valid_data'],
**self.model_params['training'])
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(self.model, train_spec, eval_spec, **kwargs)
def evaluate(self, eval_input_fn=None, **kwargs):
"""
Evaluates a pre-defined Tensorflow estimator model with given evaluate data
:param eval_input_fn: A function that provides input data for evaluating.
:return: A dict containing the evaluation metrics specified in `eval_input_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed
"""
if eval_input_fn is None:
eval_input_fn = get_from_registry(
self.input_type, input_fn_registry)(
filepath=self.model_params['input']['valid_data'],
**self.model_params['training'])
return self.model.evaluate(input_fn=eval_input_fn, **kwargs)
def predict(self, predict_input_fn=None, **kwargs):
"""
Yields predictions with given features.
:param predict_input_fn: A function that constructs the features.
Prediction continues until input_fn raises an end-of-input exception
:return: Evaluated values of predictions tensors.
"""
if predict_input_fn is None:
predict_input_fn = get_from_registry(
self.input_type, input_fn_registry)(
filepath=self.model_params['input']['test_data'],
**self.model_params['training'])
return self.model.predict(input_fn=predict_input_fn, **kwargs)
def _sanity_checks(self):
assert 'input' in self.model_params, (
'Does not define any input parameters'
)
assert 'type' in self.model_params['input'], (
'Does not define any input type'
)
assert 'output' in self.model_params, (
'Does not define any output parameters'
)
def model_fn(self, features, labels, mode, params):
seed = params["training"]["seed"]
np.random.seed(seed)
tf.set_random_seed(seed) | 0.884233 | 0.219735 |
from .util import number_to_string
from .ellipticcurve import INFINITY
from .keys import SigningKey, VerifyingKey
__all__ = [
"ECDH",
"NoKeyError",
"NoCurveError",
"InvalidCurveError",
"InvalidSharedSecretError",
]
class NoKeyError(Exception):
"""ECDH. Key not found but it is needed for operation."""
pass
class NoCurveError(Exception):
"""ECDH. Curve not set but it is needed for operation."""
pass
class InvalidCurveError(Exception):
"""
ECDH. Raised in case the public and private keys use different curves.
"""
pass
class InvalidSharedSecretError(Exception):
"""ECDH. Raised in case the shared secret we obtained is an INFINITY."""
pass
class ECDH(object):
"""
Elliptic-curve Diffie-Hellman (ECDH). A key agreement protocol.
Allows two parties, each having an elliptic-curve public-private key
pair, to establish a shared secret over an insecure channel
"""
def __init__(self, curve=None, private_key=None, public_key=None):
"""
ECDH init.
Call can be initialised without parameters, then the first operation
(loading either key) will set the used curve.
All parameters must be ultimately set before shared secret
calculation will be allowed.
:param curve: curve for operations
:type curve: Curve
:param private_key: `my` private key for ECDH
:type private_key: SigningKey
:param public_key: `their` public key for ECDH
:type public_key: VerifyingKey
"""
self.curve = curve
self.private_key = None
self.public_key = None
if private_key:
self.load_private_key(private_key)
if public_key:
self.load_received_public_key(public_key)
def _get_shared_secret(self, remote_public_key):
if not self.private_key:
raise NoKeyError(
"Private key needs to be set to create shared secret"
)
if not self.public_key:
raise NoKeyError(
"Public key needs to be set to create shared secret"
)
if not (
self.private_key.curve == self.curve == remote_public_key.curve
):
raise InvalidCurveError(
"Curves for public key and private key is not equal."
)
# shared secret = PUBKEYtheirs * PRIVATEKEYours
result = (
remote_public_key.pubkey.point
* self.private_key.privkey.secret_multiplier
)
if result == INFINITY:
raise InvalidSharedSecretError("Invalid shared secret (INFINITY).")
return result.x()
def set_curve(self, key_curve):
"""
Set the working curve for ecdh operations.
:param key_curve: curve from `curves` module
:type key_curve: Curve
"""
self.curve = key_curve
def generate_private_key(self):
"""
Generate local private key for ecdh operation with curve that was set.
:raises NoCurveError: Curve must be set before key generation.
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
if not self.curve:
raise NoCurveError("Curve must be set prior to key generation.")
return self.load_private_key(SigningKey.generate(curve=self.curve))
def load_private_key(self, private_key):
"""
Load private key from SigningKey (keys.py) object.
Needs to have the same curve as was set with set_curve method.
If curve is not set - it sets from this SigningKey
:param private_key: Initialised SigningKey class
:type private_key: SigningKey
:raises InvalidCurveError: private_key curve not the same as self.curve
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
if not self.curve:
self.curve = private_key.curve
if self.curve != private_key.curve:
raise InvalidCurveError("Curve mismatch.")
self.private_key = private_key
return self.private_key.get_verifying_key()
def load_private_key_bytes(self, private_key):
"""
Load private key from byte string.
Uses current curve and checks if the provided key matches
the curve of ECDH key agreement.
Key loads via from_string method of SigningKey class
:param private_key: private key in bytes string format
:type private_key: :term:`bytes-like object`
:raises NoCurveError: Curve must be set before loading.
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
if not self.curve:
raise NoCurveError("Curve must be set prior to key load.")
return self.load_private_key(
SigningKey.from_string(private_key, curve=self.curve)
)
def load_private_key_der(self, private_key_der):
"""
Load private key from DER byte string.
Compares the curve of the DER-encoded key with the ECDH set curve,
uses the former if unset.
Note, the only DER format supported is the RFC5915
Look at keys.py:SigningKey.from_der()
:param private_key_der: string with the DER encoding of private ECDSA
key
:type private_key_der: string
:raises InvalidCurveError: private_key curve not the same as self.curve
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
return self.load_private_key(SigningKey.from_der(private_key_der))
def load_private_key_pem(self, private_key_pem):
"""
Load private key from PEM string.
Compares the curve of the DER-encoded key with the ECDH set curve,
uses the former if unset.
Note, the only PEM format supported is the RFC5915
Look at keys.py:SigningKey.from_pem()
it needs to have `EC PRIVATE KEY` section
:param private_key_pem: string with PEM-encoded private ECDSA key
:type private_key_pem: string
:raises InvalidCurveError: private_key curve not the same as self.curve
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
return self.load_private_key(SigningKey.from_pem(private_key_pem))
def get_public_key(self):
"""
Provides a public key that matches the local private key.
Needs to be sent to the remote party.
:return: public (verifying) key from local private key.
:rtype: VerifyingKey object
"""
return self.private_key.get_verifying_key()
def load_received_public_key(self, public_key):
"""
Load public key from VerifyingKey (keys.py) object.
Needs to have the same curve as set as current for ecdh operation.
If curve is not set - it sets it from VerifyingKey.
:param public_key: Initialised VerifyingKey class
:type public_key: VerifyingKey
:raises InvalidCurveError: public_key curve not the same as self.curve
"""
if not self.curve:
self.curve = public_key.curve
if self.curve != public_key.curve:
raise InvalidCurveError("Curve mismatch.")
self.public_key = public_key
def load_received_public_key_bytes(self, public_key_str):
"""
Load public key from byte string.
Uses current curve and checks if key length corresponds to
the current curve.
Key loads via from_string method of VerifyingKey class
:param public_key_str: public key in bytes string format
:type public_key_str: :term:`bytes-like object`
"""
return self.load_received_public_key(
VerifyingKey.from_string(public_key_str, self.curve)
)
def load_received_public_key_der(self, public_key_der):
"""
Load public key from DER byte string.
Compares the curve of the DER-encoded key with the ECDH set curve,
uses the former if unset.
Note, the only DER format supported is the RFC5912
Look at keys.py:VerifyingKey.from_der()
:param public_key_der: string with the DER encoding of public ECDSA key
:type public_key_der: string
:raises InvalidCurveError: public_key curve not the same as self.curve
"""
return self.load_received_public_key(
VerifyingKey.from_der(public_key_der)
)
def load_received_public_key_pem(self, public_key_pem):
"""
Load public key from PEM string.
Compares the curve of the PEM-encoded key with the ECDH set curve,
uses the former if unset.
Note, the only PEM format supported is the RFC5912
Look at keys.py:VerifyingKey.from_pem()
:param public_key_pem: string with PEM-encoded public ECDSA key
:type public_key_pem: string
:raises InvalidCurveError: public_key curve not the same as self.curve
"""
return self.load_received_public_key(
VerifyingKey.from_pem(public_key_pem)
)
def generate_sharedsecret_bytes(self):
"""
Generate shared secret from local private key and remote public key.
The objects needs to have both private key and received public key
before generation is allowed.
:raises InvalidCurveError: public_key curve not the same as self.curve
:raises NoKeyError: public_key or private_key is not set
:return: shared secret
:rtype: byte string
"""
return number_to_string(
self.generate_sharedsecret(), self.private_key.curve.order
)
def generate_sharedsecret(self):
"""
Generate shared secret from local private key and remote public key.
The objects needs to have both private key and received public key
before generation is allowed.
It's the same for local and remote party.
shared secret(local private key, remote public key ) ==
shared secret (local public key, remote private key)
:raises InvalidCurveError: public_key curve not the same as self.curve
:raises NoKeyError: public_key or private_key is not set
:return: shared secret
:rtype: int
"""
return self._get_shared_secret(self.public_key) | src/ecdsa/ecdh.py | from .util import number_to_string
from .ellipticcurve import INFINITY
from .keys import SigningKey, VerifyingKey
__all__ = [
"ECDH",
"NoKeyError",
"NoCurveError",
"InvalidCurveError",
"InvalidSharedSecretError",
]
class NoKeyError(Exception):
"""ECDH. Key not found but it is needed for operation."""
pass
class NoCurveError(Exception):
"""ECDH. Curve not set but it is needed for operation."""
pass
class InvalidCurveError(Exception):
"""
ECDH. Raised in case the public and private keys use different curves.
"""
pass
class InvalidSharedSecretError(Exception):
"""ECDH. Raised in case the shared secret we obtained is an INFINITY."""
pass
class ECDH(object):
"""
Elliptic-curve Diffie-Hellman (ECDH). A key agreement protocol.
Allows two parties, each having an elliptic-curve public-private key
pair, to establish a shared secret over an insecure channel
"""
def __init__(self, curve=None, private_key=None, public_key=None):
"""
ECDH init.
Call can be initialised without parameters, then the first operation
(loading either key) will set the used curve.
All parameters must be ultimately set before shared secret
calculation will be allowed.
:param curve: curve for operations
:type curve: Curve
:param private_key: `my` private key for ECDH
:type private_key: SigningKey
:param public_key: `their` public key for ECDH
:type public_key: VerifyingKey
"""
self.curve = curve
self.private_key = None
self.public_key = None
if private_key:
self.load_private_key(private_key)
if public_key:
self.load_received_public_key(public_key)
def _get_shared_secret(self, remote_public_key):
if not self.private_key:
raise NoKeyError(
"Private key needs to be set to create shared secret"
)
if not self.public_key:
raise NoKeyError(
"Public key needs to be set to create shared secret"
)
if not (
self.private_key.curve == self.curve == remote_public_key.curve
):
raise InvalidCurveError(
"Curves for public key and private key is not equal."
)
# shared secret = PUBKEYtheirs * PRIVATEKEYours
result = (
remote_public_key.pubkey.point
* self.private_key.privkey.secret_multiplier
)
if result == INFINITY:
raise InvalidSharedSecretError("Invalid shared secret (INFINITY).")
return result.x()
def set_curve(self, key_curve):
"""
Set the working curve for ecdh operations.
:param key_curve: curve from `curves` module
:type key_curve: Curve
"""
self.curve = key_curve
def generate_private_key(self):
"""
Generate local private key for ecdh operation with curve that was set.
:raises NoCurveError: Curve must be set before key generation.
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
if not self.curve:
raise NoCurveError("Curve must be set prior to key generation.")
return self.load_private_key(SigningKey.generate(curve=self.curve))
def load_private_key(self, private_key):
"""
Load private key from SigningKey (keys.py) object.
Needs to have the same curve as was set with set_curve method.
If curve is not set - it sets from this SigningKey
:param private_key: Initialised SigningKey class
:type private_key: SigningKey
:raises InvalidCurveError: private_key curve not the same as self.curve
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
if not self.curve:
self.curve = private_key.curve
if self.curve != private_key.curve:
raise InvalidCurveError("Curve mismatch.")
self.private_key = private_key
return self.private_key.get_verifying_key()
def load_private_key_bytes(self, private_key):
"""
Load private key from byte string.
Uses current curve and checks if the provided key matches
the curve of ECDH key agreement.
Key loads via from_string method of SigningKey class
:param private_key: private key in bytes string format
:type private_key: :term:`bytes-like object`
:raises NoCurveError: Curve must be set before loading.
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
if not self.curve:
raise NoCurveError("Curve must be set prior to key load.")
return self.load_private_key(
SigningKey.from_string(private_key, curve=self.curve)
)
def load_private_key_der(self, private_key_der):
"""
Load private key from DER byte string.
Compares the curve of the DER-encoded key with the ECDH set curve,
uses the former if unset.
Note, the only DER format supported is the RFC5915
Look at keys.py:SigningKey.from_der()
:param private_key_der: string with the DER encoding of private ECDSA
key
:type private_key_der: string
:raises InvalidCurveError: private_key curve not the same as self.curve
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
return self.load_private_key(SigningKey.from_der(private_key_der))
def load_private_key_pem(self, private_key_pem):
"""
Load private key from PEM string.
Compares the curve of the DER-encoded key with the ECDH set curve,
uses the former if unset.
Note, the only PEM format supported is the RFC5915
Look at keys.py:SigningKey.from_pem()
it needs to have `EC PRIVATE KEY` section
:param private_key_pem: string with PEM-encoded private ECDSA key
:type private_key_pem: string
:raises InvalidCurveError: private_key curve not the same as self.curve
:return: public (verifying) key from this private key.
:rtype: VerifyingKey object
"""
return self.load_private_key(SigningKey.from_pem(private_key_pem))
def get_public_key(self):
"""
Provides a public key that matches the local private key.
Needs to be sent to the remote party.
:return: public (verifying) key from local private key.
:rtype: VerifyingKey object
"""
return self.private_key.get_verifying_key()
def load_received_public_key(self, public_key):
"""
Load public key from VerifyingKey (keys.py) object.
Needs to have the same curve as set as current for ecdh operation.
If curve is not set - it sets it from VerifyingKey.
:param public_key: Initialised VerifyingKey class
:type public_key: VerifyingKey
:raises InvalidCurveError: public_key curve not the same as self.curve
"""
if not self.curve:
self.curve = public_key.curve
if self.curve != public_key.curve:
raise InvalidCurveError("Curve mismatch.")
self.public_key = public_key
def load_received_public_key_bytes(self, public_key_str):
"""
Load public key from byte string.
Uses current curve and checks if key length corresponds to
the current curve.
Key loads via from_string method of VerifyingKey class
:param public_key_str: public key in bytes string format
:type public_key_str: :term:`bytes-like object`
"""
return self.load_received_public_key(
VerifyingKey.from_string(public_key_str, self.curve)
)
def load_received_public_key_der(self, public_key_der):
"""
Load public key from DER byte string.
Compares the curve of the DER-encoded key with the ECDH set curve,
uses the former if unset.
Note, the only DER format supported is the RFC5912
Look at keys.py:VerifyingKey.from_der()
:param public_key_der: string with the DER encoding of public ECDSA key
:type public_key_der: string
:raises InvalidCurveError: public_key curve not the same as self.curve
"""
return self.load_received_public_key(
VerifyingKey.from_der(public_key_der)
)
def load_received_public_key_pem(self, public_key_pem):
"""
Load public key from PEM string.
Compares the curve of the PEM-encoded key with the ECDH set curve,
uses the former if unset.
Note, the only PEM format supported is the RFC5912
Look at keys.py:VerifyingKey.from_pem()
:param public_key_pem: string with PEM-encoded public ECDSA key
:type public_key_pem: string
:raises InvalidCurveError: public_key curve not the same as self.curve
"""
return self.load_received_public_key(
VerifyingKey.from_pem(public_key_pem)
)
def generate_sharedsecret_bytes(self):
"""
Generate shared secret from local private key and remote public key.
The objects needs to have both private key and received public key
before generation is allowed.
:raises InvalidCurveError: public_key curve not the same as self.curve
:raises NoKeyError: public_key or private_key is not set
:return: shared secret
:rtype: byte string
"""
return number_to_string(
self.generate_sharedsecret(), self.private_key.curve.order
)
def generate_sharedsecret(self):
"""
Generate shared secret from local private key and remote public key.
The objects needs to have both private key and received public key
before generation is allowed.
It's the same for local and remote party.
shared secret(local private key, remote public key ) ==
shared secret (local public key, remote private key)
:raises InvalidCurveError: public_key curve not the same as self.curve
:raises NoKeyError: public_key or private_key is not set
:return: shared secret
:rtype: int
"""
return self._get_shared_secret(self.public_key) | 0.940223 | 0.292027 |
import copy
import os.path as osp
from loguru import logger
import torch
import torch.multiprocessing as mp
from videoanalyst.evaluation.got_benchmark.experiments import ExperimentGOT10k
from ..tester_base import TRACK_TESTERS, TesterBase
from .utils.got_benchmark_helper import PipelineTracker
@TRACK_TESTERS.register
class GOT10kTester(TesterBase):
r"""GOT-10k tester
Hyper-parameters
----------------
device_num: int
number of gpus. If set to non-positive number, then use cpu
data_root: str
path to got-10k root
subsets: List[str]
list of subsets name (val|test)
"""
extra_hyper_params = dict(
device_num=1,
data_root="/data/img_120_split",
subsets=["val"], # (val|test)
)
def __init__(self, *args, **kwargs):
super(GOT10kTester, self).__init__(*args, **kwargs)
# self._experiment = None
def update_params(self):
# set device state
num_gpu = self._hyper_params["device_num"]
if num_gpu > 0:
all_devs = [torch.device("cuda:%d" % i) for i in range(num_gpu)]
# all_devs = [torch.device("cuda:1")] #gaidevice
else:
all_devs = [torch.device("cpu")]
self._state["all_devs"] = all_devs
def test(self, ):
tracker_name = self._hyper_params["exp_name"]
all_devs = self._state["all_devs"]
nr_devs = len(all_devs)
for subset in self._hyper_params["subsets"]:
root_dir = self._hyper_params["data_root"]
dataset_name = "GOT-Benchmark" # the name of benchmark toolkit, shown under "repo/logs" directory
save_root_dir = osp.join(self._hyper_params["exp_save"],
dataset_name)
result_dir = osp.join(save_root_dir, "result")
report_dir = osp.join(save_root_dir, "report")
experiment = ExperimentGOT10k(root_dir,
subset=subset,
result_dir=result_dir,
report_dir=report_dir)
# single worker
if nr_devs == 1:
dev = all_devs[0]
self._pipeline.set_device(dev)
pipeline_tracker = PipelineTracker(tracker_name, self._pipeline)
experiment.run(pipeline_tracker)
# multi-worker
else:
procs = []
slicing_step = 1.0 / nr_devs
for dev_id, dev in enumerate(all_devs):
slicing_quantile = (slicing_step * dev_id,
slicing_step * (dev_id + 1))
proc = mp.Process(target=self.worker,
args=(dev_id, dev, subset,
slicing_quantile))
proc.start()
procs.append(proc)
for p in procs:
p.join()
# evalutate
performance = experiment.report([tracker_name], plot_curves=False)
test_result_dict = dict()
if performance is not None:
test_result_dict["main_performance"] = performance[tracker_name][
"overall"]["ao"]
else:
test_result_dict["main_performance"] = -1
return test_result_dict
def worker(self, dev_id, dev, subset, slicing_quantile):
self.set_random_seed()
logger.debug("Worker starts: slice {} at {}".format(
slicing_quantile, dev))
tracker_name = self._hyper_params["exp_name"]
pipeline = self._pipeline
pipeline.set_device(dev)
pipeline_tracker = PipelineTracker(tracker_name, pipeline)
root_dir = self._hyper_params["data_root"]
dataset_name = "GOT-Benchmark" # the name of benchmark toolkit, shown under "repo/logs" directory
save_root_dir = osp.join(self._hyper_params["exp_save"], dataset_name)
result_dir = osp.join(save_root_dir, "result")
report_dir = osp.join(save_root_dir, "report")
experiment = ExperimentGOT10k(root_dir,
subset=subset,
result_dir=result_dir,
report_dir=report_dir)
experiment.run(pipeline_tracker, slicing_quantile=slicing_quantile)
logger.debug("Worker ends: slice {} at {}".format(
slicing_quantile, dev))
GOT10kTester.default_hyper_params = copy.deepcopy(
GOT10kTester.default_hyper_params)
GOT10kTester.default_hyper_params.update(GOT10kTester.extra_hyper_params) | videoanalyst/engine/tester/tester_impl/got10k.py | import copy
import os.path as osp
from loguru import logger
import torch
import torch.multiprocessing as mp
from videoanalyst.evaluation.got_benchmark.experiments import ExperimentGOT10k
from ..tester_base import TRACK_TESTERS, TesterBase
from .utils.got_benchmark_helper import PipelineTracker
@TRACK_TESTERS.register
class GOT10kTester(TesterBase):
r"""GOT-10k tester
Hyper-parameters
----------------
device_num: int
number of gpus. If set to non-positive number, then use cpu
data_root: str
path to got-10k root
subsets: List[str]
list of subsets name (val|test)
"""
extra_hyper_params = dict(
device_num=1,
data_root="/data/img_120_split",
subsets=["val"], # (val|test)
)
def __init__(self, *args, **kwargs):
super(GOT10kTester, self).__init__(*args, **kwargs)
# self._experiment = None
def update_params(self):
# set device state
num_gpu = self._hyper_params["device_num"]
if num_gpu > 0:
all_devs = [torch.device("cuda:%d" % i) for i in range(num_gpu)]
# all_devs = [torch.device("cuda:1")] #gaidevice
else:
all_devs = [torch.device("cpu")]
self._state["all_devs"] = all_devs
def test(self, ):
tracker_name = self._hyper_params["exp_name"]
all_devs = self._state["all_devs"]
nr_devs = len(all_devs)
for subset in self._hyper_params["subsets"]:
root_dir = self._hyper_params["data_root"]
dataset_name = "GOT-Benchmark" # the name of benchmark toolkit, shown under "repo/logs" directory
save_root_dir = osp.join(self._hyper_params["exp_save"],
dataset_name)
result_dir = osp.join(save_root_dir, "result")
report_dir = osp.join(save_root_dir, "report")
experiment = ExperimentGOT10k(root_dir,
subset=subset,
result_dir=result_dir,
report_dir=report_dir)
# single worker
if nr_devs == 1:
dev = all_devs[0]
self._pipeline.set_device(dev)
pipeline_tracker = PipelineTracker(tracker_name, self._pipeline)
experiment.run(pipeline_tracker)
# multi-worker
else:
procs = []
slicing_step = 1.0 / nr_devs
for dev_id, dev in enumerate(all_devs):
slicing_quantile = (slicing_step * dev_id,
slicing_step * (dev_id + 1))
proc = mp.Process(target=self.worker,
args=(dev_id, dev, subset,
slicing_quantile))
proc.start()
procs.append(proc)
for p in procs:
p.join()
# evalutate
performance = experiment.report([tracker_name], plot_curves=False)
test_result_dict = dict()
if performance is not None:
test_result_dict["main_performance"] = performance[tracker_name][
"overall"]["ao"]
else:
test_result_dict["main_performance"] = -1
return test_result_dict
def worker(self, dev_id, dev, subset, slicing_quantile):
self.set_random_seed()
logger.debug("Worker starts: slice {} at {}".format(
slicing_quantile, dev))
tracker_name = self._hyper_params["exp_name"]
pipeline = self._pipeline
pipeline.set_device(dev)
pipeline_tracker = PipelineTracker(tracker_name, pipeline)
root_dir = self._hyper_params["data_root"]
dataset_name = "GOT-Benchmark" # the name of benchmark toolkit, shown under "repo/logs" directory
save_root_dir = osp.join(self._hyper_params["exp_save"], dataset_name)
result_dir = osp.join(save_root_dir, "result")
report_dir = osp.join(save_root_dir, "report")
experiment = ExperimentGOT10k(root_dir,
subset=subset,
result_dir=result_dir,
report_dir=report_dir)
experiment.run(pipeline_tracker, slicing_quantile=slicing_quantile)
logger.debug("Worker ends: slice {} at {}".format(
slicing_quantile, dev))
GOT10kTester.default_hyper_params = copy.deepcopy(
GOT10kTester.default_hyper_params)
GOT10kTester.default_hyper_params.update(GOT10kTester.extra_hyper_params) | 0.466603 | 0.148109 |
import numpy as np
import os,sys
from plugins.core.logger import logger
from plugins.core.base_plugin_opt import BasePluginOptimizer
"""
Define user customized plugin optimizer for hyper parameter tuning
The class name "PluginOptimizer" should not be changed
user need to implement the search method at minimal
"""
class PluginOptimizer(BasePluginOptimizer):
"""
create an Optimizer with parameters
param:
- name, string, plugin optimizer name
- hyper_parameters, list, hyper parameters that need to be tuned
- kwargs, dict, algorithm parameters passed by hpo task submission
rest body, the parameter value type is string
"""
def __init__(self, name, hyper_parameters, **kwargs):
super(PluginOptimizer, self).__init__(name, hyper_parameters, **kwargs)
# get all hyper parameters that need to be tuned
logger.info("all tuning hyper parameters: \n{}".format(hyper_parameters))
self._hyper_parameters = hyper_parameters
self._exp_history = []
# get all optimizer search parameters that user passed
logger.info("all optimizer search parameters: \n{}".format(kwargs))
# get optimizer parameters, the parameters value is string
if kwargs.get('random_seed'):
self._random_seed = int(kwargs.get('random_seed'))
np.random.seed(self._random_seed)
#self.rnd = np.random.RandomState(1234)
"""
search new set of candidate hyper-parameters
param:
- number_samples, int, number of hyper parameter candidates requested
- last_exp_results, list, the execution results of last suggested hyper-
parameter sets
return: hyper_params, list, suggested hyper-parameter sets to run
"""
def search(self, number_samples, last_exp_results):
logger.info("last exps results:\n{}".format(last_exp_results))
if not last_exp_results is None and len(last_exp_results) > 0:
self._exp_history.extend(last_exp_results)
# start random search of the hyper-parameters
exp_list = []
for i in range(number_samples):
hypers = {}
for hp in self._hyper_parameters:
type = hp.get('type')
if type == "Range":
val = self._getRandomValueFromRange(hp)
elif type == "Discrete":
val = self._getRandomValueFromDiscrete(hp)
else:
raise Exception("un-supported type {} for random search.".format(type))
hypers[hp.get('name')] = val
exp_list.append(hypers)
logger.info("suggest next exps list:\n{}".format(exp_list))
return exp_list
def get_state(self):
return {'rng_state': np.random.get_state()}
def set_state(self, state_dict):
np.random.set_state(state_dict.get('rng_state'))
def _getRandomValueFromRange(self, hp):
data_type = hp.get('dataType')
if data_type == "DOUBLE":
val = hp.get('minDbVal') + np.random.rand() * (hp.get('maxDbVal') - hp.get('minDbVal'))
elif data_type == "INT":
val = np.random.randint(hp.get('minIntVal'), hp.get('maxIntVal'))
else:
raise Exception("un-supported data type {} for random range search.".format(data_type))
logger.debug("next {} val: {}".format(hp.get('name'), val))
return val
def _getRandomValueFromDiscrete(self, hp):
data_type = hp.get('dataType')
if data_type == "DOUBLE":
vals = hp.get('discreteDbVal')
elif data_type == "INT":
vals = hp.get('discreteIntVal')
else:
vals = hp.get('discreateStrVal')
val = vals[np.random.randint(len(vals))]
logger.debug("next {} val: {}".format(hp.get('name'), val))
return val | HPO-demonstration/random/optimizer.py | import numpy as np
import os,sys
from plugins.core.logger import logger
from plugins.core.base_plugin_opt import BasePluginOptimizer
"""
Define user customized plugin optimizer for hyper parameter tuning
The class name "PluginOptimizer" should not be changed
user need to implement the search method at minimal
"""
class PluginOptimizer(BasePluginOptimizer):
"""
create an Optimizer with parameters
param:
- name, string, plugin optimizer name
- hyper_parameters, list, hyper parameters that need to be tuned
- kwargs, dict, algorithm parameters passed by hpo task submission
rest body, the parameter value type is string
"""
def __init__(self, name, hyper_parameters, **kwargs):
super(PluginOptimizer, self).__init__(name, hyper_parameters, **kwargs)
# get all hyper parameters that need to be tuned
logger.info("all tuning hyper parameters: \n{}".format(hyper_parameters))
self._hyper_parameters = hyper_parameters
self._exp_history = []
# get all optimizer search parameters that user passed
logger.info("all optimizer search parameters: \n{}".format(kwargs))
# get optimizer parameters, the parameters value is string
if kwargs.get('random_seed'):
self._random_seed = int(kwargs.get('random_seed'))
np.random.seed(self._random_seed)
#self.rnd = np.random.RandomState(1234)
"""
search new set of candidate hyper-parameters
param:
- number_samples, int, number of hyper parameter candidates requested
- last_exp_results, list, the execution results of last suggested hyper-
parameter sets
return: hyper_params, list, suggested hyper-parameter sets to run
"""
def search(self, number_samples, last_exp_results):
logger.info("last exps results:\n{}".format(last_exp_results))
if not last_exp_results is None and len(last_exp_results) > 0:
self._exp_history.extend(last_exp_results)
# start random search of the hyper-parameters
exp_list = []
for i in range(number_samples):
hypers = {}
for hp in self._hyper_parameters:
type = hp.get('type')
if type == "Range":
val = self._getRandomValueFromRange(hp)
elif type == "Discrete":
val = self._getRandomValueFromDiscrete(hp)
else:
raise Exception("un-supported type {} for random search.".format(type))
hypers[hp.get('name')] = val
exp_list.append(hypers)
logger.info("suggest next exps list:\n{}".format(exp_list))
return exp_list
def get_state(self):
return {'rng_state': np.random.get_state()}
def set_state(self, state_dict):
np.random.set_state(state_dict.get('rng_state'))
def _getRandomValueFromRange(self, hp):
data_type = hp.get('dataType')
if data_type == "DOUBLE":
val = hp.get('minDbVal') + np.random.rand() * (hp.get('maxDbVal') - hp.get('minDbVal'))
elif data_type == "INT":
val = np.random.randint(hp.get('minIntVal'), hp.get('maxIntVal'))
else:
raise Exception("un-supported data type {} for random range search.".format(data_type))
logger.debug("next {} val: {}".format(hp.get('name'), val))
return val
def _getRandomValueFromDiscrete(self, hp):
data_type = hp.get('dataType')
if data_type == "DOUBLE":
vals = hp.get('discreteDbVal')
elif data_type == "INT":
vals = hp.get('discreteIntVal')
else:
vals = hp.get('discreateStrVal')
val = vals[np.random.randint(len(vals))]
logger.debug("next {} val: {}".format(hp.get('name'), val))
return val | 0.413714 | 0.149656 |
from datetime import datetime
from django.utils.translation.trans_real import translation
from .base import MessageBase
from ..errors import NoRouterError
from ..conf import settings
class OutgoingMessage(MessageBase):
"""
"""
def __init__(self, connection=None, template=None, **kwargs):
self._parts = []
if template is not None:
self.append(template, **kwargs)
self._connection = connection
self.sent_at = None
@property
def language(self):
"""
Return the language which this message will be sent in. If
possible, this is fetched from the recipient Contact model.
Otherwise, it defaults to the ``LANGUAGE_CODE`` setting.
"""
if self._connection.contact is not None:
if self._connection.contact.language:
return self._connection.contact.language
return settings.LANGUAGE_CODE
def append(self, template, **kwargs):
self._parts.append((template, kwargs))
def __repr__(self):
return "<OutgoingMessage (%s): %s>" %\
(self.language, self.text)
def _render_part(self, template, **kwargs):
t = translation(self.language)
tmpl = t.gettext(template)
return tmpl % kwargs
@property
def text(self):
return unicode(" ".join([
self._render_part(template, **kwargs)
for template, kwargs in self._parts
]))
@property
def date(self):
return self.sent_at
def send(self):
"""
Send this message via the router, triggering the _outgoing_
phase (giving any app the opportunity to modify or cancel it).
Return True if the message was sent successfully.
If the router is not running (as is usually the case outside of
the ``runrouter`` process), NoRouterError is raised.
Warning: This method blocks the current thread until the backend
accepts or rejects the message, which takes as long as it takes.
There is currently no way to send messages asynchronously.
"""
from rapidsms.router import router
if not router.running:
raise NoRouterError()
return router.outgoing(self)
def send_now(self):
"""
Send this message immediately via the physical backend. This
should probably only be called by the Router.
"""
from ..router import router
backend_name = self.connection.backend.name
self.sent = router.backends[backend_name].send(self)
if self.sent: self.sent_at = datetime.now()
return self.sent | lib/rapidsms/messages/outgoing.py |
from datetime import datetime
from django.utils.translation.trans_real import translation
from .base import MessageBase
from ..errors import NoRouterError
from ..conf import settings
class OutgoingMessage(MessageBase):
"""
"""
def __init__(self, connection=None, template=None, **kwargs):
self._parts = []
if template is not None:
self.append(template, **kwargs)
self._connection = connection
self.sent_at = None
@property
def language(self):
"""
Return the language which this message will be sent in. If
possible, this is fetched from the recipient Contact model.
Otherwise, it defaults to the ``LANGUAGE_CODE`` setting.
"""
if self._connection.contact is not None:
if self._connection.contact.language:
return self._connection.contact.language
return settings.LANGUAGE_CODE
def append(self, template, **kwargs):
self._parts.append((template, kwargs))
def __repr__(self):
return "<OutgoingMessage (%s): %s>" %\
(self.language, self.text)
def _render_part(self, template, **kwargs):
t = translation(self.language)
tmpl = t.gettext(template)
return tmpl % kwargs
@property
def text(self):
return unicode(" ".join([
self._render_part(template, **kwargs)
for template, kwargs in self._parts
]))
@property
def date(self):
return self.sent_at
def send(self):
"""
Send this message via the router, triggering the _outgoing_
phase (giving any app the opportunity to modify or cancel it).
Return True if the message was sent successfully.
If the router is not running (as is usually the case outside of
the ``runrouter`` process), NoRouterError is raised.
Warning: This method blocks the current thread until the backend
accepts or rejects the message, which takes as long as it takes.
There is currently no way to send messages asynchronously.
"""
from rapidsms.router import router
if not router.running:
raise NoRouterError()
return router.outgoing(self)
def send_now(self):
"""
Send this message immediately via the physical backend. This
should probably only be called by the Router.
"""
from ..router import router
backend_name = self.connection.backend.name
self.sent = router.backends[backend_name].send(self)
if self.sent: self.sent_at = datetime.now()
return self.sent | 0.608012 | 0.085289 |
from preprocess.application import application
from preprocess.bureau_and_balance import bureau_and_balance
from preprocess.previous_application import previous_application
from preprocess.pos_cash import pos_cash
from preprocess.installments_payments import installments_payments
from preprocess.credit_card_balance import credit_card_balance
from preprocess.utils import timer, get_execution_date, get_temp_bucket_prefix
TMP_BUCKET = get_temp_bucket_prefix()
def generate_features(execution_date):
"""Generate features."""
print("\nProcess application")
df = application(execution_date)
print(" Application df shape:", df.shape)
print("\nProcess bureau and bureau_balance")
with timer("processing bureau and bureau_balance"):
bureau = bureau_and_balance()
print(" Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
print("\nProcess previous application")
with timer("processing previous application"):
prev = previous_application()
print(" Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
print("\nProcess POS-CASH balance")
with timer("processing POS-CASH balance"):
pos = pos_cash()
print(" Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
print("\nProcess installments payments")
with timer("processing installments payments"):
ins = installments_payments()
print(" Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
print("\nProcess credit card balance")
with timer("processing credit card balance"):
cc = credit_card_balance()
print(" Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
# [LightGBM] [Fatal] Do not support special JSON characters in feature name.
new_cols = ["".join(c if c.isalnum() else "_" for c in str(x)) for x in df.columns]
df.columns = new_cols
print("\nSave train data")
print(" Train data shape:", df.shape)
df.to_csv(TMP_BUCKET + "credit_train/train.csv", index=False)
def main():
execution_date = get_execution_date()
print(execution_date.strftime("\nExecution date is %Y-%m-%d"))
generate_features(execution_date)
if __name__ == "__main__":
main() | credit_risk/task_features_trainer.py | from preprocess.application import application
from preprocess.bureau_and_balance import bureau_and_balance
from preprocess.previous_application import previous_application
from preprocess.pos_cash import pos_cash
from preprocess.installments_payments import installments_payments
from preprocess.credit_card_balance import credit_card_balance
from preprocess.utils import timer, get_execution_date, get_temp_bucket_prefix
TMP_BUCKET = get_temp_bucket_prefix()
def generate_features(execution_date):
"""Generate features."""
print("\nProcess application")
df = application(execution_date)
print(" Application df shape:", df.shape)
print("\nProcess bureau and bureau_balance")
with timer("processing bureau and bureau_balance"):
bureau = bureau_and_balance()
print(" Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
print("\nProcess previous application")
with timer("processing previous application"):
prev = previous_application()
print(" Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
print("\nProcess POS-CASH balance")
with timer("processing POS-CASH balance"):
pos = pos_cash()
print(" Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
print("\nProcess installments payments")
with timer("processing installments payments"):
ins = installments_payments()
print(" Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
print("\nProcess credit card balance")
with timer("processing credit card balance"):
cc = credit_card_balance()
print(" Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
# [LightGBM] [Fatal] Do not support special JSON characters in feature name.
new_cols = ["".join(c if c.isalnum() else "_" for c in str(x)) for x in df.columns]
df.columns = new_cols
print("\nSave train data")
print(" Train data shape:", df.shape)
df.to_csv(TMP_BUCKET + "credit_train/train.csv", index=False)
def main():
execution_date = get_execution_date()
print(execution_date.strftime("\nExecution date is %Y-%m-%d"))
generate_features(execution_date)
if __name__ == "__main__":
main() | 0.409693 | 0.290402 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['UserRule']
class UserRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
configuration_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AddressPrefixItemArgs']]]]] = None,
destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
direction: Optional[pulumi.Input[Union[str, 'SecurityConfigurationRuleDirection']]] = None,
display_name: Optional[pulumi.Input[str]] = None,
network_manager_name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[Union[str, 'SecurityConfigurationRuleProtocol']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AddressPrefixItemArgs']]]]] = None,
source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Network security admin rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] configuration_name: The name of the network manager security Configuration.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AddressPrefixItemArgs']]]] destination: The destination address prefixes. CIDR or destination IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: The destination port ranges.
:param pulumi.Input[Union[str, 'SecurityConfigurationRuleDirection']] direction: Indicates if the traffic matched against the rule in inbound or outbound.
:param pulumi.Input[str] display_name: A friendly name for the rule.
:param pulumi.Input[str] network_manager_name: The name of the network manager.
:param pulumi.Input[Union[str, 'SecurityConfigurationRuleProtocol']] protocol: Network protocol this rule applies to.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] rule_name: The name of the rule.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AddressPrefixItemArgs']]]] source: The CIDR or source IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: The source port ranges.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if configuration_name is None and not opts.urn:
raise TypeError("Missing required property 'configuration_name'")
__props__['configuration_name'] = configuration_name
__props__['description'] = description
__props__['destination'] = destination
__props__['destination_port_ranges'] = destination_port_ranges
if direction is None and not opts.urn:
raise TypeError("Missing required property 'direction'")
__props__['direction'] = direction
__props__['display_name'] = display_name
if network_manager_name is None and not opts.urn:
raise TypeError("Missing required property 'network_manager_name'")
__props__['network_manager_name'] = network_manager_name
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['rule_name'] = rule_name
__props__['source'] = source
__props__['source_port_ranges'] = source_port_ranges
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20210201preview:UserRule"), pulumi.Alias(type_="azure-native:network:UserRule"), pulumi.Alias(type_="azure-nextgen:network:UserRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(UserRule, __self__).__init__(
'azure-native:network/v20210201preview:UserRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'UserRule':
"""
Get an existing UserRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = None
__props__["destination"] = None
__props__["destination_port_ranges"] = None
__props__["direction"] = None
__props__["display_name"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["protocol"] = None
__props__["provisioning_state"] = None
__props__["source"] = None
__props__["source_port_ranges"] = None
__props__["system_data"] = None
__props__["type"] = None
return UserRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def destination(self) -> pulumi.Output[Optional[Sequence['outputs.AddressPrefixItemResponse']]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> pulumi.Output[str]:
"""
Indicates if the traffic matched against the rule in inbound or outbound.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
A friendly name for the rule.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the security Configuration resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional[Sequence['outputs.AddressPrefixItemResponse']]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata related to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | sdk/python/pulumi_azure_native/network/v20210201preview/user_rule.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['UserRule']
class UserRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
configuration_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AddressPrefixItemArgs']]]]] = None,
destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
direction: Optional[pulumi.Input[Union[str, 'SecurityConfigurationRuleDirection']]] = None,
display_name: Optional[pulumi.Input[str]] = None,
network_manager_name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[Union[str, 'SecurityConfigurationRuleProtocol']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AddressPrefixItemArgs']]]]] = None,
source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Network security admin rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] configuration_name: The name of the network manager security Configuration.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AddressPrefixItemArgs']]]] destination: The destination address prefixes. CIDR or destination IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: The destination port ranges.
:param pulumi.Input[Union[str, 'SecurityConfigurationRuleDirection']] direction: Indicates if the traffic matched against the rule in inbound or outbound.
:param pulumi.Input[str] display_name: A friendly name for the rule.
:param pulumi.Input[str] network_manager_name: The name of the network manager.
:param pulumi.Input[Union[str, 'SecurityConfigurationRuleProtocol']] protocol: Network protocol this rule applies to.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] rule_name: The name of the rule.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AddressPrefixItemArgs']]]] source: The CIDR or source IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: The source port ranges.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if configuration_name is None and not opts.urn:
raise TypeError("Missing required property 'configuration_name'")
__props__['configuration_name'] = configuration_name
__props__['description'] = description
__props__['destination'] = destination
__props__['destination_port_ranges'] = destination_port_ranges
if direction is None and not opts.urn:
raise TypeError("Missing required property 'direction'")
__props__['direction'] = direction
__props__['display_name'] = display_name
if network_manager_name is None and not opts.urn:
raise TypeError("Missing required property 'network_manager_name'")
__props__['network_manager_name'] = network_manager_name
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['rule_name'] = rule_name
__props__['source'] = source
__props__['source_port_ranges'] = source_port_ranges
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20210201preview:UserRule"), pulumi.Alias(type_="azure-native:network:UserRule"), pulumi.Alias(type_="azure-nextgen:network:UserRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(UserRule, __self__).__init__(
'azure-native:network/v20210201preview:UserRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'UserRule':
"""
Get an existing UserRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = None
__props__["destination"] = None
__props__["destination_port_ranges"] = None
__props__["direction"] = None
__props__["display_name"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["protocol"] = None
__props__["provisioning_state"] = None
__props__["source"] = None
__props__["source_port_ranges"] = None
__props__["system_data"] = None
__props__["type"] = None
return UserRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def destination(self) -> pulumi.Output[Optional[Sequence['outputs.AddressPrefixItemResponse']]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> pulumi.Output[str]:
"""
Indicates if the traffic matched against the rule in inbound or outbound.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
A friendly name for the rule.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the security Configuration resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional[Sequence['outputs.AddressPrefixItemResponse']]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata related to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | 0.825871 | 0.073132 |
import numpy as np
TWO_PI = 2 * np.pi
WORD_CHAR = list(r"Ff+-|[]#!@{}><&()")
def _force_0_to_2pi(x: float) -> float:
# make sure angle_min in (0, 2 pi)
k = np.floor((TWO_PI - x) / TWO_PI)
return x + k * TWO_PI
def _iter_expand(axiom, idict, depth=1, maxdepth=4):
olist = []
for c in axiom:
if c in idict:
if depth >= maxdepth:
v = idict[c]
else:
v = _iter_expand(idict[c], idict, depth+1, maxdepth)
olist.append(v)
else:
olist.append(c)
return "".join(olist)
def angle_360_2pi(x: float) -> float:
return x / 360.0 * TWO_PI
def expand_Lcmd(cmd: str, maxdepth=4) -> str:
"""
The first line is axiom
separated by ';'
```
B;
B=A+A--A+A;
A=F+F--F+F;
```
"""
lines = cmd.split(';')
axiom = lines[0].strip()
idict = {}
for line in lines[1::]:
line = line.strip()
if len(line) == 0:
continue
item = line.split('=')
c = item[0].strip()
assert len(c) == 1 and c.isalpha(
), "only one [a-zA-Z] char is supported: %s" % c
idict[c] = item[1].strip()
return _iter_expand(axiom, idict, 1, maxdepth)
class LSys:
def __init__(
self,
cmd: str, # L cmd
max_expand: int = 4, # max iteration for expand cmd
angle: float = np.pi / 3.0, # (pi / 3) 60 degree
linelen: float = 0.01, # basic line length
loc_start: tuple = (0.5, 0.0), # start location
angle_delta: float = np.pi/6.0, # pi / 6
linescale: float = 0.6, # line length scale factor for '<' and '>'
lineinc: float = 0.01, # line size increment factor
linewidth: float = 1.0, # basic line width
) -> None:
"""
Lsystem:
"""
self.cmd = cmd
self.expand_max_iter = max_expand
self.expanded_cmd = ""
self.angle = _force_0_to_2pi(angle)
self.angle_delta = _force_0_to_2pi(angle_delta)
self.loc_start = loc_start
self.linescale = linescale
self.lineinc = lineinc
self.linelen = linelen
self.linewidth = linewidth
def expand_cmd(self):
self.expanded_cmd = expand_Lcmd(self.cmd, self.expand_max_iter)
def step(self,
loc: tuple = None,
angle: float = None,
linelen: float = None,
linewidth: float = None):
"""
process the L-system
- loc: start point
"""
if loc is None:
x, y = self.loc_start
else:
x, y = loc
if angle is None:
angle = self.angle
if linelen is None:
linelen = self.linelen
if linewidth is None:
linewidth = self.linewidth
if self.expanded_cmd == "":
self.expand_cmd()
Lstart = []
Lend = []
Lwidth = []
Lcirc = []
Lradius = []
cacheStack = []
turning_angle = self.angle
angle_sign = 1.0
pstart = (0, 0)
for c in self.expanded_cmd:
if c == 'F':
# move forward forward
Lstart.append((x, y))
x = x + linelen * np.cos(angle)
y = y + linelen * np.sin(angle)
Lend.append((x, y))
Lwidth.append(linewidth)
elif c == 'f':
# move forward by line length without drawing a line
x = x + linelen * np.cos(angle)
y = y + linelen * np.sin(angle)
elif c == '+':
# turn left
angle -= angle_sign * turning_angle
elif c == '-':
# turn right
angle += angle_sign * turning_angle
elif c == '|':
# reverse direction
angle += np.pi
elif c == '[':
# push current drawing state onto stack
cacheStack.append((x, y, angle, linelen, linewidth))
elif c == ']':
# pop current drawing state from stack
x, y, angle, linelen, linewidth = cacheStack.pop(-1)
elif c == '#':
# increment the line with by line width increment
linewidth += self.lineinc
elif c == '!':
# decrease the line width by line width increment
linewidth -= self.lineinc
elif c == '@':
# draw a dot with line width radius
Lcirc.append((x, y))
Lradius.append(linelen)
elif c == '{':
# open a polygon
pstart = (x, y)
elif c == '}':
# close a polygon and fill it with fill color
Lstart.append(pstart)
Lend.append((x, y))
Lwidth.append(linelen)
elif c == '>':
# multiply the line length by scalar
linelen *= self.linescale
elif c == '<':
# divide the line length by scalar
linelen /= self.linescale
elif c == '&':
# swap the meaning of '+' and '-'
angle_sign *= -1.0
elif c == '(':
# decrease turring angle
turning_angle -= self.angle_delta
elif c == ')':
# increase turning angle
turning_angle += self.angle_delta
else:
continue
# print(f"character {c} is not supported")
return np.asarray(Lstart), np.asarray(Lend), np.asarray(Lwidth), np.asarray(Lcirc), np.asarray(Lradius) | LSystem.py | import numpy as np
TWO_PI = 2 * np.pi
WORD_CHAR = list(r"Ff+-|[]#!@{}><&()")
def _force_0_to_2pi(x: float) -> float:
# make sure angle_min in (0, 2 pi)
k = np.floor((TWO_PI - x) / TWO_PI)
return x + k * TWO_PI
def _iter_expand(axiom, idict, depth=1, maxdepth=4):
olist = []
for c in axiom:
if c in idict:
if depth >= maxdepth:
v = idict[c]
else:
v = _iter_expand(idict[c], idict, depth+1, maxdepth)
olist.append(v)
else:
olist.append(c)
return "".join(olist)
def angle_360_2pi(x: float) -> float:
return x / 360.0 * TWO_PI
def expand_Lcmd(cmd: str, maxdepth=4) -> str:
"""
The first line is axiom
separated by ';'
```
B;
B=A+A--A+A;
A=F+F--F+F;
```
"""
lines = cmd.split(';')
axiom = lines[0].strip()
idict = {}
for line in lines[1::]:
line = line.strip()
if len(line) == 0:
continue
item = line.split('=')
c = item[0].strip()
assert len(c) == 1 and c.isalpha(
), "only one [a-zA-Z] char is supported: %s" % c
idict[c] = item[1].strip()
return _iter_expand(axiom, idict, 1, maxdepth)
class LSys:
def __init__(
self,
cmd: str, # L cmd
max_expand: int = 4, # max iteration for expand cmd
angle: float = np.pi / 3.0, # (pi / 3) 60 degree
linelen: float = 0.01, # basic line length
loc_start: tuple = (0.5, 0.0), # start location
angle_delta: float = np.pi/6.0, # pi / 6
linescale: float = 0.6, # line length scale factor for '<' and '>'
lineinc: float = 0.01, # line size increment factor
linewidth: float = 1.0, # basic line width
) -> None:
"""
Lsystem:
"""
self.cmd = cmd
self.expand_max_iter = max_expand
self.expanded_cmd = ""
self.angle = _force_0_to_2pi(angle)
self.angle_delta = _force_0_to_2pi(angle_delta)
self.loc_start = loc_start
self.linescale = linescale
self.lineinc = lineinc
self.linelen = linelen
self.linewidth = linewidth
def expand_cmd(self):
self.expanded_cmd = expand_Lcmd(self.cmd, self.expand_max_iter)
def step(self,
loc: tuple = None,
angle: float = None,
linelen: float = None,
linewidth: float = None):
"""
process the L-system
- loc: start point
"""
if loc is None:
x, y = self.loc_start
else:
x, y = loc
if angle is None:
angle = self.angle
if linelen is None:
linelen = self.linelen
if linewidth is None:
linewidth = self.linewidth
if self.expanded_cmd == "":
self.expand_cmd()
Lstart = []
Lend = []
Lwidth = []
Lcirc = []
Lradius = []
cacheStack = []
turning_angle = self.angle
angle_sign = 1.0
pstart = (0, 0)
for c in self.expanded_cmd:
if c == 'F':
# move forward forward
Lstart.append((x, y))
x = x + linelen * np.cos(angle)
y = y + linelen * np.sin(angle)
Lend.append((x, y))
Lwidth.append(linewidth)
elif c == 'f':
# move forward by line length without drawing a line
x = x + linelen * np.cos(angle)
y = y + linelen * np.sin(angle)
elif c == '+':
# turn left
angle -= angle_sign * turning_angle
elif c == '-':
# turn right
angle += angle_sign * turning_angle
elif c == '|':
# reverse direction
angle += np.pi
elif c == '[':
# push current drawing state onto stack
cacheStack.append((x, y, angle, linelen, linewidth))
elif c == ']':
# pop current drawing state from stack
x, y, angle, linelen, linewidth = cacheStack.pop(-1)
elif c == '#':
# increment the line with by line width increment
linewidth += self.lineinc
elif c == '!':
# decrease the line width by line width increment
linewidth -= self.lineinc
elif c == '@':
# draw a dot with line width radius
Lcirc.append((x, y))
Lradius.append(linelen)
elif c == '{':
# open a polygon
pstart = (x, y)
elif c == '}':
# close a polygon and fill it with fill color
Lstart.append(pstart)
Lend.append((x, y))
Lwidth.append(linelen)
elif c == '>':
# multiply the line length by scalar
linelen *= self.linescale
elif c == '<':
# divide the line length by scalar
linelen /= self.linescale
elif c == '&':
# swap the meaning of '+' and '-'
angle_sign *= -1.0
elif c == '(':
# decrease turring angle
turning_angle -= self.angle_delta
elif c == ')':
# increase turning angle
turning_angle += self.angle_delta
else:
continue
# print(f"character {c} is not supported")
return np.asarray(Lstart), np.asarray(Lend), np.asarray(Lwidth), np.asarray(Lcirc), np.asarray(Lradius) | 0.576184 | 0.660344 |
import torch
import torch.nn as nn
from torch.nn import functional as F
from collections import OrderedDict
from ..builder import BACKBONES
class PyramidFeatures(nn.Module):
'''
FPN pyramid layer
'''
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(PyramidFeatures, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
self.P7_1 = nn.ReLU()
self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5 = self.P5_1(C5)
P5_up = self.P5_upsampled(P5)
P5 = self.P5_2(P5)
P4 = self.P4_1(C4)
P4 = P4 + P5_up
P4_up = self.P4_upsampled(P4)
P4 = self.P4_2(P4)
P3 = self.P3_1(C3)
P3 = P3 + P4_up
P3 = self.P3_2(P3)
P6 = self.P6(C5)
P7 = self.P7_1(P6)
P7 = self.P7_2(P7)
return [P3, P4, P5, P6, P7]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, \
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes))
self.stride = stride
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.downsample(x)
out = F.relu(out)
return out
@BACKBONES.register_module()
class FPN18(nn.Module):
def __init__(self):
super(FPN18, self).__init__()
num_blocks = [2,2,2,2]
bb_block = BasicBlock
self.f_in_planes_det = 64
# For RGB Feature Network
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer_det(bb_block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer_det(bb_block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer_det(bb_block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer_det(bb_block, 512, num_blocks[3], stride=2)
fpn_sizes = [
self.layer2[1].conv2.out_channels,
self.layer3[1].conv2.out_channels,
self.layer4[1].conv2.out_channels]
self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2])
def _make_layer_det(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.f_in_planes_det, planes, stride))
self.f_in_planes_det = planes * block.expansion
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
pth_path = 'pretrained/FPN18_retinanet_968.pth'
pre_weights = torch.load(pth_path)
new_res_state_dict = OrderedDict()
model_dict = self.state_dict()
for k,v in pre_weights['state_dict'].items():
if ('regressionModel' not in k) and ('classificationModel' not in k):
# name = k.replace('module', 'rpn')
name = '.'.join(k.split('.')[1:])
new_res_state_dict[name] = v
model_dict.update(new_res_state_dict)
self.load_state_dict(model_dict)
def forward(self, x):
"""Forward function."""
f1 = self.maxpool(F.relu(self.bn1(self.conv1(x))))
f2 = self.layer1(f1)
f3 = self.layer2(f2)
f4 = self.layer3(f3)
f5 = self.layer4(f4)
x = self.fpn([f3, f4, f5])
return x | mmdet3d/models/backbones/fpn18.py | import torch
import torch.nn as nn
from torch.nn import functional as F
from collections import OrderedDict
from ..builder import BACKBONES
class PyramidFeatures(nn.Module):
'''
FPN pyramid layer
'''
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(PyramidFeatures, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
self.P7_1 = nn.ReLU()
self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5 = self.P5_1(C5)
P5_up = self.P5_upsampled(P5)
P5 = self.P5_2(P5)
P4 = self.P4_1(C4)
P4 = P4 + P5_up
P4_up = self.P4_upsampled(P4)
P4 = self.P4_2(P4)
P3 = self.P3_1(C3)
P3 = P3 + P4_up
P3 = self.P3_2(P3)
P6 = self.P6(C5)
P7 = self.P7_1(P6)
P7 = self.P7_2(P7)
return [P3, P4, P5, P6, P7]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, \
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes))
self.stride = stride
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.downsample(x)
out = F.relu(out)
return out
@BACKBONES.register_module()
class FPN18(nn.Module):
def __init__(self):
super(FPN18, self).__init__()
num_blocks = [2,2,2,2]
bb_block = BasicBlock
self.f_in_planes_det = 64
# For RGB Feature Network
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer_det(bb_block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer_det(bb_block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer_det(bb_block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer_det(bb_block, 512, num_blocks[3], stride=2)
fpn_sizes = [
self.layer2[1].conv2.out_channels,
self.layer3[1].conv2.out_channels,
self.layer4[1].conv2.out_channels]
self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2])
def _make_layer_det(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.f_in_planes_det, planes, stride))
self.f_in_planes_det = planes * block.expansion
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
pth_path = 'pretrained/FPN18_retinanet_968.pth'
pre_weights = torch.load(pth_path)
new_res_state_dict = OrderedDict()
model_dict = self.state_dict()
for k,v in pre_weights['state_dict'].items():
if ('regressionModel' not in k) and ('classificationModel' not in k):
# name = k.replace('module', 'rpn')
name = '.'.join(k.split('.')[1:])
new_res_state_dict[name] = v
model_dict.update(new_res_state_dict)
self.load_state_dict(model_dict)
def forward(self, x):
"""Forward function."""
f1 = self.maxpool(F.relu(self.bn1(self.conv1(x))))
f2 = self.layer1(f1)
f3 = self.layer2(f2)
f4 = self.layer3(f3)
f5 = self.layer4(f4)
x = self.fpn([f3, f4, f5])
return x | 0.93759 | 0.457985 |
import proto # type: ignore
from google.cloud.osconfig_v1beta.types import patch_jobs
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import datetime_pb2 # type: ignore
from google.type import dayofweek_pb2 # type: ignore
from google.type import timeofday_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.osconfig.v1beta',
manifest={
'PatchDeployment',
'OneTimeSchedule',
'RecurringSchedule',
'WeeklySchedule',
'MonthlySchedule',
'WeekDayOfMonth',
'CreatePatchDeploymentRequest',
'GetPatchDeploymentRequest',
'ListPatchDeploymentsRequest',
'ListPatchDeploymentsResponse',
'DeletePatchDeploymentRequest',
},
)
class PatchDeployment(proto.Message):
r"""Patch deployments are configurations that individual patch jobs use
to complete a patch. These configurations include instance filter,
package repository settings, and a schedule. For more information
about creating and managing patch deployments, see `Scheduling patch
jobs </compute/docs/os-patch-management/schedule-patch-jobs>`__.
Attributes:
name (str):
Unique name for the patch deployment resource in a project.
The patch deployment name is in the form:
``projects/{project_id}/patchDeployments/{patch_deployment_id}``.
This field is ignored when you create a new patch
deployment.
description (str):
Optional. Description of the patch
deployment. Length of the description is limited
to 1024 characters.
instance_filter (google.cloud.osconfig_v1beta.types.PatchInstanceFilter):
Required. VM instances to patch.
patch_config (google.cloud.osconfig_v1beta.types.PatchConfig):
Optional. Patch configuration that is
applied.
duration (google.protobuf.duration_pb2.Duration):
Optional. Duration of the patch. After the
duration ends, the patch times out.
one_time_schedule (google.cloud.osconfig_v1beta.types.OneTimeSchedule):
Required. Schedule a one-time execution.
recurring_schedule (google.cloud.osconfig_v1beta.types.RecurringSchedule):
Required. Schedule recurring executions.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time the patch deployment was created.
Timestamp is in RFC3339 text format.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time the patch deployment was last updated.
Timestamp is in RFC3339 text format.
last_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The last time a patch job was started by this
deployment. Timestamp is in RFC3339 text format.
"""
name = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=2,
)
instance_filter = proto.Field(
proto.MESSAGE,
number=3,
message=patch_jobs.PatchInstanceFilter,
)
patch_config = proto.Field(
proto.MESSAGE,
number=4,
message=patch_jobs.PatchConfig,
)
duration = proto.Field(
proto.MESSAGE,
number=5,
message=duration_pb2.Duration,
)
one_time_schedule = proto.Field(
proto.MESSAGE,
number=6,
oneof='schedule',
message='OneTimeSchedule',
)
recurring_schedule = proto.Field(
proto.MESSAGE,
number=7,
oneof='schedule',
message='RecurringSchedule',
)
create_time = proto.Field(
proto.MESSAGE,
number=8,
message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
last_execute_time = proto.Field(
proto.MESSAGE,
number=10,
message=timestamp_pb2.Timestamp,
)
class OneTimeSchedule(proto.Message):
r"""Sets the time for a one time patch deployment. Timestamp is in
RFC3339 text format.
Attributes:
execute_time (google.protobuf.timestamp_pb2.Timestamp):
Required. The desired patch job execution
time.
"""
execute_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
class RecurringSchedule(proto.Message):
r"""Sets the time for recurring patch deployments.
Attributes:
time_zone (google.type.datetime_pb2.TimeZone):
Required. Defines the time zone that ``time_of_day`` is
relative to. The rules for daylight saving time are
determined by the chosen time zone.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The time that the recurring schedule becomes
effective. Defaults to ``create_time`` of the patch
deployment.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The end time at which a recurring
patch deployment schedule is no longer active.
time_of_day (google.type.timeofday_pb2.TimeOfDay):
Required. Time of the day to run a recurring
deployment.
frequency (google.cloud.osconfig_v1beta.types.RecurringSchedule.Frequency):
Required. The frequency unit of this
recurring schedule.
weekly (google.cloud.osconfig_v1beta.types.WeeklySchedule):
Required. Schedule with weekly executions.
monthly (google.cloud.osconfig_v1beta.types.MonthlySchedule):
Required. Schedule with monthly executions.
last_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the last patch job ran
successfully.
next_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the next patch job is
scheduled to run.
"""
class Frequency(proto.Enum):
r"""Specifies the frequency of the recurring patch deployments."""
FREQUENCY_UNSPECIFIED = 0
WEEKLY = 1
MONTHLY = 2
DAILY = 3
time_zone = proto.Field(
proto.MESSAGE,
number=1,
message=datetime_pb2.TimeZone,
)
start_time = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
time_of_day = proto.Field(
proto.MESSAGE,
number=4,
message=timeofday_pb2.TimeOfDay,
)
frequency = proto.Field(
proto.ENUM,
number=5,
enum=Frequency,
)
weekly = proto.Field(
proto.MESSAGE,
number=6,
oneof='schedule_config',
message='WeeklySchedule',
)
monthly = proto.Field(
proto.MESSAGE,
number=7,
oneof='schedule_config',
message='MonthlySchedule',
)
last_execute_time = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
next_execute_time = proto.Field(
proto.MESSAGE,
number=10,
message=timestamp_pb2.Timestamp,
)
class WeeklySchedule(proto.Message):
r"""Represents a weekly schedule.
Attributes:
day_of_week (google.type.dayofweek_pb2.DayOfWeek):
Required. Day of the week.
"""
day_of_week = proto.Field(
proto.ENUM,
number=1,
enum=dayofweek_pb2.DayOfWeek,
)
class MonthlySchedule(proto.Message):
r"""Represents a monthly schedule. An example of a valid monthly
schedule is "on the third Tuesday of the month" or "on the 15th
of the month".
Attributes:
week_day_of_month (google.cloud.osconfig_v1beta.types.WeekDayOfMonth):
Required. Week day in a month.
month_day (int):
Required. One day of the month. 1-31
indicates the 1st to the 31st day. -1 indicates
the last day of the month. Months without the
target day will be skipped. For example, a
schedule to run "every month on the 31st" will
not run in February, April, June, etc.
"""
week_day_of_month = proto.Field(
proto.MESSAGE,
number=1,
oneof='day_of_month',
message='WeekDayOfMonth',
)
month_day = proto.Field(
proto.INT32,
number=2,
oneof='day_of_month',
)
class WeekDayOfMonth(proto.Message):
r"""Represents one week day in a month. An example is "the 4th
Sunday".
Attributes:
week_ordinal (int):
Required. Week number in a month. 1-4
indicates the 1st to 4th week of the month. -1
indicates the last week of the month.
day_of_week (google.type.dayofweek_pb2.DayOfWeek):
Required. A day of the week.
"""
week_ordinal = proto.Field(
proto.INT32,
number=1,
)
day_of_week = proto.Field(
proto.ENUM,
number=2,
enum=dayofweek_pb2.DayOfWeek,
)
class CreatePatchDeploymentRequest(proto.Message):
r"""A request message for creating a patch deployment.
Attributes:
parent (str):
Required. The project to apply this patch deployment to in
the form ``projects/*``.
patch_deployment_id (str):
Required. A name for the patch deployment in the project.
When creating a name the following rules apply:
- Must contain only lowercase letters, numbers, and
hyphens.
- Must start with a letter.
- Must be between 1-63 characters.
- Must end with a number or a letter.
- Must be unique within the project.
patch_deployment (google.cloud.osconfig_v1beta.types.PatchDeployment):
Required. The patch deployment to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
patch_deployment_id = proto.Field(
proto.STRING,
number=2,
)
patch_deployment = proto.Field(
proto.MESSAGE,
number=3,
message='PatchDeployment',
)
class GetPatchDeploymentRequest(proto.Message):
r"""A request message for retrieving a patch deployment.
Attributes:
name (str):
Required. The resource name of the patch deployment in the
form ``projects/*/patchDeployments/*``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class ListPatchDeploymentsRequest(proto.Message):
r"""A request message for listing patch deployments.
Attributes:
parent (str):
Required. The resource name of the parent in the form
``projects/*``.
page_size (int):
Optional. The maximum number of patch
deployments to return. Default is 100.
page_token (str):
Optional. A pagination token returned from a
previous call to ListPatchDeployments that
indicates where this listing should continue
from.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListPatchDeploymentsResponse(proto.Message):
r"""A response message for listing patch deployments.
Attributes:
patch_deployments (Sequence[google.cloud.osconfig_v1beta.types.PatchDeployment]):
The list of patch deployments.
next_page_token (str):
A pagination token that can be used to get
the next page of patch deployments.
"""
@property
def raw_page(self):
return self
patch_deployments = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='PatchDeployment',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class DeletePatchDeploymentRequest(proto.Message):
r"""A request message for deleting a patch deployment.
Attributes:
name (str):
Required. The resource name of the patch deployment in the
form ``projects/*/patchDeployments/*``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest)) | google/cloud/osconfig/v1beta/osconfig-v1beta-py/google/cloud/osconfig_v1beta/types/patch_deployments.py | import proto # type: ignore
from google.cloud.osconfig_v1beta.types import patch_jobs
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import datetime_pb2 # type: ignore
from google.type import dayofweek_pb2 # type: ignore
from google.type import timeofday_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.osconfig.v1beta',
manifest={
'PatchDeployment',
'OneTimeSchedule',
'RecurringSchedule',
'WeeklySchedule',
'MonthlySchedule',
'WeekDayOfMonth',
'CreatePatchDeploymentRequest',
'GetPatchDeploymentRequest',
'ListPatchDeploymentsRequest',
'ListPatchDeploymentsResponse',
'DeletePatchDeploymentRequest',
},
)
class PatchDeployment(proto.Message):
r"""Patch deployments are configurations that individual patch jobs use
to complete a patch. These configurations include instance filter,
package repository settings, and a schedule. For more information
about creating and managing patch deployments, see `Scheduling patch
jobs </compute/docs/os-patch-management/schedule-patch-jobs>`__.
Attributes:
name (str):
Unique name for the patch deployment resource in a project.
The patch deployment name is in the form:
``projects/{project_id}/patchDeployments/{patch_deployment_id}``.
This field is ignored when you create a new patch
deployment.
description (str):
Optional. Description of the patch
deployment. Length of the description is limited
to 1024 characters.
instance_filter (google.cloud.osconfig_v1beta.types.PatchInstanceFilter):
Required. VM instances to patch.
patch_config (google.cloud.osconfig_v1beta.types.PatchConfig):
Optional. Patch configuration that is
applied.
duration (google.protobuf.duration_pb2.Duration):
Optional. Duration of the patch. After the
duration ends, the patch times out.
one_time_schedule (google.cloud.osconfig_v1beta.types.OneTimeSchedule):
Required. Schedule a one-time execution.
recurring_schedule (google.cloud.osconfig_v1beta.types.RecurringSchedule):
Required. Schedule recurring executions.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time the patch deployment was created.
Timestamp is in RFC3339 text format.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time the patch deployment was last updated.
Timestamp is in RFC3339 text format.
last_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The last time a patch job was started by this
deployment. Timestamp is in RFC3339 text format.
"""
name = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=2,
)
instance_filter = proto.Field(
proto.MESSAGE,
number=3,
message=patch_jobs.PatchInstanceFilter,
)
patch_config = proto.Field(
proto.MESSAGE,
number=4,
message=patch_jobs.PatchConfig,
)
duration = proto.Field(
proto.MESSAGE,
number=5,
message=duration_pb2.Duration,
)
one_time_schedule = proto.Field(
proto.MESSAGE,
number=6,
oneof='schedule',
message='OneTimeSchedule',
)
recurring_schedule = proto.Field(
proto.MESSAGE,
number=7,
oneof='schedule',
message='RecurringSchedule',
)
create_time = proto.Field(
proto.MESSAGE,
number=8,
message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
last_execute_time = proto.Field(
proto.MESSAGE,
number=10,
message=timestamp_pb2.Timestamp,
)
class OneTimeSchedule(proto.Message):
r"""Sets the time for a one time patch deployment. Timestamp is in
RFC3339 text format.
Attributes:
execute_time (google.protobuf.timestamp_pb2.Timestamp):
Required. The desired patch job execution
time.
"""
execute_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
class RecurringSchedule(proto.Message):
r"""Sets the time for recurring patch deployments.
Attributes:
time_zone (google.type.datetime_pb2.TimeZone):
Required. Defines the time zone that ``time_of_day`` is
relative to. The rules for daylight saving time are
determined by the chosen time zone.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The time that the recurring schedule becomes
effective. Defaults to ``create_time`` of the patch
deployment.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The end time at which a recurring
patch deployment schedule is no longer active.
time_of_day (google.type.timeofday_pb2.TimeOfDay):
Required. Time of the day to run a recurring
deployment.
frequency (google.cloud.osconfig_v1beta.types.RecurringSchedule.Frequency):
Required. The frequency unit of this
recurring schedule.
weekly (google.cloud.osconfig_v1beta.types.WeeklySchedule):
Required. Schedule with weekly executions.
monthly (google.cloud.osconfig_v1beta.types.MonthlySchedule):
Required. Schedule with monthly executions.
last_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the last patch job ran
successfully.
next_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the next patch job is
scheduled to run.
"""
class Frequency(proto.Enum):
r"""Specifies the frequency of the recurring patch deployments."""
FREQUENCY_UNSPECIFIED = 0
WEEKLY = 1
MONTHLY = 2
DAILY = 3
time_zone = proto.Field(
proto.MESSAGE,
number=1,
message=datetime_pb2.TimeZone,
)
start_time = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
time_of_day = proto.Field(
proto.MESSAGE,
number=4,
message=timeofday_pb2.TimeOfDay,
)
frequency = proto.Field(
proto.ENUM,
number=5,
enum=Frequency,
)
weekly = proto.Field(
proto.MESSAGE,
number=6,
oneof='schedule_config',
message='WeeklySchedule',
)
monthly = proto.Field(
proto.MESSAGE,
number=7,
oneof='schedule_config',
message='MonthlySchedule',
)
last_execute_time = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
next_execute_time = proto.Field(
proto.MESSAGE,
number=10,
message=timestamp_pb2.Timestamp,
)
class WeeklySchedule(proto.Message):
r"""Represents a weekly schedule.
Attributes:
day_of_week (google.type.dayofweek_pb2.DayOfWeek):
Required. Day of the week.
"""
day_of_week = proto.Field(
proto.ENUM,
number=1,
enum=dayofweek_pb2.DayOfWeek,
)
class MonthlySchedule(proto.Message):
r"""Represents a monthly schedule. An example of a valid monthly
schedule is "on the third Tuesday of the month" or "on the 15th
of the month".
Attributes:
week_day_of_month (google.cloud.osconfig_v1beta.types.WeekDayOfMonth):
Required. Week day in a month.
month_day (int):
Required. One day of the month. 1-31
indicates the 1st to the 31st day. -1 indicates
the last day of the month. Months without the
target day will be skipped. For example, a
schedule to run "every month on the 31st" will
not run in February, April, June, etc.
"""
week_day_of_month = proto.Field(
proto.MESSAGE,
number=1,
oneof='day_of_month',
message='WeekDayOfMonth',
)
month_day = proto.Field(
proto.INT32,
number=2,
oneof='day_of_month',
)
class WeekDayOfMonth(proto.Message):
r"""Represents one week day in a month. An example is "the 4th
Sunday".
Attributes:
week_ordinal (int):
Required. Week number in a month. 1-4
indicates the 1st to 4th week of the month. -1
indicates the last week of the month.
day_of_week (google.type.dayofweek_pb2.DayOfWeek):
Required. A day of the week.
"""
week_ordinal = proto.Field(
proto.INT32,
number=1,
)
day_of_week = proto.Field(
proto.ENUM,
number=2,
enum=dayofweek_pb2.DayOfWeek,
)
class CreatePatchDeploymentRequest(proto.Message):
r"""A request message for creating a patch deployment.
Attributes:
parent (str):
Required. The project to apply this patch deployment to in
the form ``projects/*``.
patch_deployment_id (str):
Required. A name for the patch deployment in the project.
When creating a name the following rules apply:
- Must contain only lowercase letters, numbers, and
hyphens.
- Must start with a letter.
- Must be between 1-63 characters.
- Must end with a number or a letter.
- Must be unique within the project.
patch_deployment (google.cloud.osconfig_v1beta.types.PatchDeployment):
Required. The patch deployment to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
patch_deployment_id = proto.Field(
proto.STRING,
number=2,
)
patch_deployment = proto.Field(
proto.MESSAGE,
number=3,
message='PatchDeployment',
)
class GetPatchDeploymentRequest(proto.Message):
r"""A request message for retrieving a patch deployment.
Attributes:
name (str):
Required. The resource name of the patch deployment in the
form ``projects/*/patchDeployments/*``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class ListPatchDeploymentsRequest(proto.Message):
r"""A request message for listing patch deployments.
Attributes:
parent (str):
Required. The resource name of the parent in the form
``projects/*``.
page_size (int):
Optional. The maximum number of patch
deployments to return. Default is 100.
page_token (str):
Optional. A pagination token returned from a
previous call to ListPatchDeployments that
indicates where this listing should continue
from.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListPatchDeploymentsResponse(proto.Message):
r"""A response message for listing patch deployments.
Attributes:
patch_deployments (Sequence[google.cloud.osconfig_v1beta.types.PatchDeployment]):
The list of patch deployments.
next_page_token (str):
A pagination token that can be used to get
the next page of patch deployments.
"""
@property
def raw_page(self):
return self
patch_deployments = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='PatchDeployment',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class DeletePatchDeploymentRequest(proto.Message):
r"""A request message for deleting a patch deployment.
Attributes:
name (str):
Required. The resource name of the patch deployment in the
form ``projects/*/patchDeployments/*``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest)) | 0.642993 | 0.145146 |
from django.test import TestCase
from django.contrib.sites.models import Site
from .factories import GalleryFactory, PhotoFactory
class SitesTest(TestCase):
urls = 'photologue.tests.test_urls'
def setUp(self):
"""
Create two example sites that we can use to test what gets displayed
where.
"""
super(SitesTest, self).setUp()
self.site1, created1 = Site.objects.get_or_create(
domain="example.com", name="example.com")
self.site2, created2 = Site.objects.get_or_create(
domain="example.org", name="example.org")
with self.settings(PHOTOLOGUE_MULTISITE=True):
# Be explicit about linking Galleries/Photos to Sites."""
self.gallery1 = GalleryFactory(slug='test-gallery', sites=[self.site1])
self.gallery2 = GalleryFactory(slug='not-on-site-gallery')
self.photo1 = PhotoFactory(slug='test-photo', sites=[self.site1])
self.photo2 = PhotoFactory(slug='not-on-site-photo')
self.gallery1.photos.add(self.photo1, self.photo2)
# I'd like to use factory_boy's mute_signal decorator but that
# will only available once factory_boy 2.4 is released. So long
# we'll have to remove the site association manually
self.photo2.sites.clear()
def tearDown(self):
super(SitesTest, self).tearDown()
self.gallery1.delete()
self.gallery2.delete()
self.photo1.delete()
self.photo2.delete()
def test_basics(self):
""" See if objects were added automatically (by the factory) to the current site. """
self.assertEqual(list(self.gallery1.sites.all()), [self.site1])
self.assertEqual(list(self.photo1.sites.all()), [self.site1])
def test_auto_add_sites(self):
"""
Objects should not be automatically associated with a particular site when
``PHOTOLOGUE_MULTISITE`` is ``True``.
"""
with self.settings(PHOTOLOGUE_MULTISITE=False):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [self.site1])
self.assertEqual(list(photo.sites.all()), [self.site1])
photo.delete()
with self.settings(PHOTOLOGUE_MULTISITE=True):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [])
self.assertEqual(list(photo.sites.all()), [])
photo.delete()
def test_gallery_list(self):
response = self.client.get('/ptests/gallerylist/')
self.assertEqual(list(response.context['object_list']), [self.gallery1])
def test_gallery_detail(self):
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(response.context['object'], self.gallery1)
response = self.client.get('/ptests/gallery/not-on-site-gallery/')
self.assertEqual(response.status_code, 404)
def test_photo_list(self):
response = self.client.get('/ptests/photolist/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photo_detail(self):
response = self.client.get('/ptests/photo/test-photo/')
self.assertEqual(response.context['object'], self.photo1)
response = self.client.get('/ptests/photo/not-on-site-photo/')
self.assertEqual(response.status_code, 404)
def test_photo_archive(self):
response = self.client.get('/ptests/photo/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photos_in_gallery(self):
"""
Only those photos are supposed to be shown in a gallery that are
also associated with the current site.
"""
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(list(response.context['object'].public()), [self.photo1])
def test_orphaned_photos(self):
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery2.photos.add(self.photo2)
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery1.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2])
self.photo1.sites.clear()
self.photo2.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2]) | photologue/tests/test_sites.py | from django.test import TestCase
from django.contrib.sites.models import Site
from .factories import GalleryFactory, PhotoFactory
class SitesTest(TestCase):
urls = 'photologue.tests.test_urls'
def setUp(self):
"""
Create two example sites that we can use to test what gets displayed
where.
"""
super(SitesTest, self).setUp()
self.site1, created1 = Site.objects.get_or_create(
domain="example.com", name="example.com")
self.site2, created2 = Site.objects.get_or_create(
domain="example.org", name="example.org")
with self.settings(PHOTOLOGUE_MULTISITE=True):
# Be explicit about linking Galleries/Photos to Sites."""
self.gallery1 = GalleryFactory(slug='test-gallery', sites=[self.site1])
self.gallery2 = GalleryFactory(slug='not-on-site-gallery')
self.photo1 = PhotoFactory(slug='test-photo', sites=[self.site1])
self.photo2 = PhotoFactory(slug='not-on-site-photo')
self.gallery1.photos.add(self.photo1, self.photo2)
# I'd like to use factory_boy's mute_signal decorator but that
# will only available once factory_boy 2.4 is released. So long
# we'll have to remove the site association manually
self.photo2.sites.clear()
def tearDown(self):
super(SitesTest, self).tearDown()
self.gallery1.delete()
self.gallery2.delete()
self.photo1.delete()
self.photo2.delete()
def test_basics(self):
""" See if objects were added automatically (by the factory) to the current site. """
self.assertEqual(list(self.gallery1.sites.all()), [self.site1])
self.assertEqual(list(self.photo1.sites.all()), [self.site1])
def test_auto_add_sites(self):
"""
Objects should not be automatically associated with a particular site when
``PHOTOLOGUE_MULTISITE`` is ``True``.
"""
with self.settings(PHOTOLOGUE_MULTISITE=False):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [self.site1])
self.assertEqual(list(photo.sites.all()), [self.site1])
photo.delete()
with self.settings(PHOTOLOGUE_MULTISITE=True):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [])
self.assertEqual(list(photo.sites.all()), [])
photo.delete()
def test_gallery_list(self):
response = self.client.get('/ptests/gallerylist/')
self.assertEqual(list(response.context['object_list']), [self.gallery1])
def test_gallery_detail(self):
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(response.context['object'], self.gallery1)
response = self.client.get('/ptests/gallery/not-on-site-gallery/')
self.assertEqual(response.status_code, 404)
def test_photo_list(self):
response = self.client.get('/ptests/photolist/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photo_detail(self):
response = self.client.get('/ptests/photo/test-photo/')
self.assertEqual(response.context['object'], self.photo1)
response = self.client.get('/ptests/photo/not-on-site-photo/')
self.assertEqual(response.status_code, 404)
def test_photo_archive(self):
response = self.client.get('/ptests/photo/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photos_in_gallery(self):
"""
Only those photos are supposed to be shown in a gallery that are
also associated with the current site.
"""
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(list(response.context['object'].public()), [self.photo1])
def test_orphaned_photos(self):
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery2.photos.add(self.photo2)
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery1.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2])
self.photo1.sites.clear()
self.photo2.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2]) | 0.606615 | 0.464476 |