text string | size int64 | token_count int64 |
|---|---|---|
some_list = [1, 2, 3]
some_dict = {
"key_1": 1,
"key_2": 2,
"key_3": 3
}
some_list = some_list.append(4)
some_dict = some_dict.update({"key_4": 4})
print(some_list,some_list)
"""
output:
None None
""" | 208 | 108 |
import sqlite3 #importa a bibliotéca sqlite3
desc = ["Código", "Nome", "Telefone"]
lista = [None, None]
menu = [' 1 - Cadastrar:', ' 2 - Consultar:', ' 3 - Excluir/Criar Tabela:', ' 9 - Sair e Salvar']
conector = sqlite3.connect('teste.db') #conecta o banco de dados
cursor = conector.cursor() #inicia o cursor
while True:
print(f"*CRUD* Teste\nBanco de Dados 'teste.db'\n{menu[0]}\n{menu[1]}\n{menu[2]}\n{menu[3]}")
op = int(input('Digite uma opção: '))
if op == 1:
lista[0] = str(input('Digite o nome: '))
lista[1] = str(input('Digite o telefone: '))
print(lista)
sql = "insert into cadastro (nome, tel) values (?, ?)" #objeto sql recebe comando para inserir os dados, e ? aponta para a posição do conteudo da lista
cursor.execute(sql, lista)#o cursor executa o conteudo do objeto sql, e usa a lista para preecher os valores
print("...dados inseridos com sucesso!")
elif op == 2:
sql = "select * from cadastro" #o objeto recebe a consulta dos dados na tabela
cursor.execute(sql) #o cursor executa o conteudo do objeto sql
dados = cursor.fetchall() #o objeto dados recebe os dados obtidos na consulta da tabela
print("Dados da tabela 'cadastro'")
print(f"{len(dados)} registros Encontrados")
print("-" * 37)
print(f"{desc[0]:^7} {desc[1]:^20} {desc[2]:^8}")
print("- " * 19)
for d in dados:
print(f"{d[0]:^7} {d[1]:20} {d[2]:^8}")
print("-" * 37)
elif op == 3:
sql = "drop table if exists cadastro" #objeto sql recebe comando para excluir a tabela caso exista
cursor.execute(sql) #o cursor executa o conteudo do objeto sql
sql = "create table if not exists cadastro (id integer primary key autoincrement, nome varchar(30), tel varchar(10))" #objeto sql recebe comando para cria tabela
cursor.execute(sql) #o cursor executa o conteudo do objeto sql
print('Tabela cadastro excluida e recriada')
elif op == 9:
conector.commit() #conector executado commit gravando os dados no banco
break
else:
print('Opção inválida!')
cursor.close() #fecha o cursor
conector.close() #desconecta o banco
print("\nFim do programa")
| 2,254 | 806 |
from codino.data import CodonDesign
cd = CodonDesign()
def test_default_freq():
assert cd.first.freq == {"A": 0, "T": 0, "C": 0, "G": 0}
assert cd.second.freq == {"A": 0, "T": 0, "C": 0, "G": 0}
assert cd.third.freq == {"A": 0, "T": 0, "C": 0, "G": 0}
def test_set_codon_design():
cd.set_codon_design(first={"A": 0.5, "T": 0, "C": 0.5, "G": 0},
second={"T": 1},
third={"G": 1})
assert cd.first.freq == {"A": 0.5, "T": 0, "C": 0.5, "G": 0}
assert cd.second.freq == {"A": 0, "T": 1, "C": 0, "G": 0}
assert cd.third.freq == {"A": 0, "T": 0, "C": 0, "G": 1}
| 638 | 319 |
from daraja_api.clients.abstract_api_client import AbstractApiClient
class AbstractB2CApiClient(AbstractApiClient):
pass | 125 | 36 |
class Solution(object):
def shortestCompletingWord(self, licensePlate, words):
words.sort(key = len)
s = collections.Counter()
for c in licensePlate.lower():
if c.isalpha(): s[c]+=1
for word in words:
if len(s - collections.Counter(word.lower())) == 0:
return word
return ""
| 386 | 107 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ActressList(scrapy.Item):
actress_id = scrapy.Field()
actress_link = scrapy.Field()
actress_name = scrapy.Field()
actress_photo = scrapy.Field()
class ActressDetail(scrapy.Item):
actress_id = scrapy.Field()
actress_name = scrapy.Field()
actress_photo = scrapy.Field()
actress_height = scrapy.Field()
actress_category = scrapy.Field()
actress_filmography = scrapy.Field()
class ActressBio(scrapy.Item):
actress_id = scrapy.Field()
actress_bio = scrapy.Field()
actress_birth = scrapy.Field()
actress_height = scrapy.Field()
actress_personal_detail = scrapy.Field()
class ActressPhoto(scrapy.Item):
actress_id = scrapy.Field()
actress_media = scrapy.Field()
class FilmList(scrapy.Item):
film_id = scrapy.Field()
film_year = scrapy.Field()
film_title = scrapy.Field()
film_photo = scrapy.Field()
film_length = scrapy.Field()
film_rating = scrapy.Field()
film_genre = scrapy.Field()
film_description_short = scrapy.Field()
class FilmDetail(scrapy.Item):
film_id = scrapy.Field()
film_title = scrapy.Field()
film_description_short = scrapy.Field()
film_director = scrapy.Field()
film_writer = scrapy.Field()
film_stars = scrapy.Field()
film_photo = scrapy.Field()
film_creator = scrapy.Field()
film_year = scrapy.Field()
film_type = scrapy.Field()
film_length = scrapy.Field()
film_rating = scrapy.Field()
film_genre = scrapy.Field()
film_date_release = scrapy.Field()
film_content_rating = scrapy.Field()
film_storyline = scrapy.Field()
class FilmSynopsis(scrapy.Item):
film_id = scrapy.Field()
film_synopsis = scrapy.Field()
class FilmPhoto(scrapy.Item):
film_id = scrapy.Field()
film_media = scrapy.Field()
class FilmCrew(scrapy.Item):
film_id = scrapy.Field()
film_crew = scrapy.Field()
film_cast = scrapy.Field() | 2,085 | 726 |
import os
import sys
import unittest
from datetime import datetime
import requests
sys.path.append(os.path.abspath('..'))
from src.earthquake_query import EarthquakeQuery
from src.timeframe import TimeFrame
from src.location import Rectangle, Circle, RadiusUnit, GeoRectangle
from src.enum.contributor import Contributor
class TestEarthquakeQuery(unittest.TestCase):
def test_constructor_time_location(self):
# Test the EarthquakeQuery constructor to see if it can successfully set time and location
location = [Rectangle(), Circle(latitude=1, longitude=1, radius_unit=RadiusUnit.KM, radius=100)]
time = [TimeFrame(datetime(2010, 1, 1), datetime(2011, 1, 1))]
query = EarthquakeQuery(time=time, location=location)
self.assertEqual(location, query.get_location())
self.assertEqual(time, query.get_time())
def test_constructor_kwargs_no_key(self):
# Test the kwargs in the constructor to see if a ValueError
# is raised if the client is trying to set a non-existing parameter
self.assertRaises(ValueError, EarthquakeQuery, nokey="test")
def test_constructor_kwargs_set(self):
# Test if the client can set the other and extension parameters using kwargs in the constructor
minmagnitude = 5
contributor = Contributor.CONTRIBUTOR_AK
query = EarthquakeQuery(minmagnitude=minmagnitude, contributor=contributor)
self.assertEqual(minmagnitude, query.get_min_magnitude())
self.assertEqual(contributor, query.get_contributor())
def test_search_by_event_id(self):
# Test search by event id
event_id = "usc000lvb5"
url = "https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&eventid={}".format(event_id)
response = requests.get(url)
json1 = response.json()
detail = EarthquakeQuery.search_by_event_id(event_id=event_id)
self.assertEqual(json1, detail.get_raw_json())
def test_set_methods(self):
# Test set methods
query = EarthquakeQuery()
# set time
timeframe = TimeFrame(start_time=datetime(2010, 1, 1), end_time=datetime(2011, 1, 1))
time = [timeframe]
query.set_time(time)
self.assertEqual(query.get_time(), time)
# set location
EarthquakeQuery.set_geocode_key_path("../key.txt")
location = [GeoRectangle("Los Angeles")]
query.set_location(location)
self.assertEqual(query.get_location(), location)
# set min magnitude
min_magnitude = 5.0
query.set_min_magnitude(min_magnitude)
self.assertEqual(query.get_min_magnitude(), min_magnitude)
# set contributor
query.set_contributor(Contributor.CONTRIBUTOR_AK)
self.assertEqual(query.get_contributor(), Contributor.CONTRIBUTOR_AK)
def test_get_parameters(self):
# Test get query parameters method
start_time = datetime(2014, 1, 1)
end_time = datetime(2014, 1, 2)
query = EarthquakeQuery(time=[TimeFrame(start_time, end_time)])
parameter = {"time": [
{"starttime": start_time.isoformat().split(".")[0], "endtime": end_time.isoformat().split(".")[0]}],
"limit": 20000}
self.assertEqual(parameter, query.get_query_parameters())
if __name__ == '__main__':
unittest.main()
| 3,355 | 1,059 |
"""
requests-respectful
A simple but powerful addon to the beloved requests library.
Seamlessly respect service rate limits. Be a good Netizen.
Keeps track of any amount of realms simultaneously. Split-second precision.
:copyright: (c) 2016 by Nicholas Brochu.
:license: Apache 2, see LICENSE for more details.
"""
__author__ = "Nicholas Brochu"
__version__ = "0.1.2"
from .respectful_requester import RespectfulRequester
from .exceptions import *
| 476 | 153 |
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SdwanRouterNode(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'create_time': 'datetime',
'domain_group_moid': 'str',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'shared_scope': 'str',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'ancestors': 'list[MoBaseMoRef]',
'parent': 'MoBaseMoRef',
'permission_resources': 'list[MoBaseMoRef]',
'device_template': 'str',
'name': 'str',
'network_configuration': 'list[SdwanNetworkConfigurationType]',
'template_inputs': 'list[SdwanTemplateInputsType]',
'uuid': 'str',
'organization': 'OrganizationOrganizationRef',
'profile': 'SdwanProfileRef',
'server_node': 'AssetDeviceRegistrationRef'
}
attribute_map = {
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'device_template': 'DeviceTemplate',
'name': 'Name',
'network_configuration': 'NetworkConfiguration',
'template_inputs': 'TemplateInputs',
'uuid': 'Uuid',
'organization': 'Organization',
'profile': 'Profile',
'server_node': 'ServerNode'
}
def __init__(self, account_moid=None, create_time=None, domain_group_moid=None, mod_time=None, moid=None, object_type=None, owners=None, shared_scope=None, tags=None, version_context=None, ancestors=None, parent=None, permission_resources=None, device_template=None, name=None, network_configuration=None, template_inputs=None, uuid=None, organization=None, profile=None, server_node=None):
"""
SdwanRouterNode - a model defined in Swagger
"""
self._account_moid = None
self._create_time = None
self._domain_group_moid = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._shared_scope = None
self._tags = None
self._version_context = None
self._ancestors = None
self._parent = None
self._permission_resources = None
self._device_template = None
self._name = None
self._network_configuration = None
self._template_inputs = None
self._uuid = None
self._organization = None
self._profile = None
self._server_node = None
if account_moid is not None:
self.account_moid = account_moid
if create_time is not None:
self.create_time = create_time
if domain_group_moid is not None:
self.domain_group_moid = domain_group_moid
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if shared_scope is not None:
self.shared_scope = shared_scope
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if ancestors is not None:
self.ancestors = ancestors
if parent is not None:
self.parent = parent
if permission_resources is not None:
self.permission_resources = permission_resources
if device_template is not None:
self.device_template = device_template
if name is not None:
self.name = name
if network_configuration is not None:
self.network_configuration = network_configuration
if template_inputs is not None:
self.template_inputs = template_inputs
if uuid is not None:
self.uuid = uuid
if organization is not None:
self.organization = organization
if profile is not None:
self.profile = profile
if server_node is not None:
self.server_node = server_node
@property
def account_moid(self):
"""
Gets the account_moid of this SdwanRouterNode.
The Account ID for this managed object.
:return: The account_moid of this SdwanRouterNode.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this SdwanRouterNode.
The Account ID for this managed object.
:param account_moid: The account_moid of this SdwanRouterNode.
:type: str
"""
self._account_moid = account_moid
@property
def create_time(self):
"""
Gets the create_time of this SdwanRouterNode.
The time when this managed object was created.
:return: The create_time of this SdwanRouterNode.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this SdwanRouterNode.
The time when this managed object was created.
:param create_time: The create_time of this SdwanRouterNode.
:type: datetime
"""
self._create_time = create_time
@property
def domain_group_moid(self):
"""
Gets the domain_group_moid of this SdwanRouterNode.
The DomainGroup ID for this managed object.
:return: The domain_group_moid of this SdwanRouterNode.
:rtype: str
"""
return self._domain_group_moid
@domain_group_moid.setter
def domain_group_moid(self, domain_group_moid):
"""
Sets the domain_group_moid of this SdwanRouterNode.
The DomainGroup ID for this managed object.
:param domain_group_moid: The domain_group_moid of this SdwanRouterNode.
:type: str
"""
self._domain_group_moid = domain_group_moid
@property
def mod_time(self):
"""
Gets the mod_time of this SdwanRouterNode.
The time when this managed object was last modified.
:return: The mod_time of this SdwanRouterNode.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this SdwanRouterNode.
The time when this managed object was last modified.
:param mod_time: The mod_time of this SdwanRouterNode.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this SdwanRouterNode.
The unique identifier of this Managed Object instance.
:return: The moid of this SdwanRouterNode.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this SdwanRouterNode.
The unique identifier of this Managed Object instance.
:param moid: The moid of this SdwanRouterNode.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this SdwanRouterNode.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:return: The object_type of this SdwanRouterNode.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this SdwanRouterNode.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:param object_type: The object_type of this SdwanRouterNode.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this SdwanRouterNode.
The array of owners which represent effective ownership of this object.
:return: The owners of this SdwanRouterNode.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this SdwanRouterNode.
The array of owners which represent effective ownership of this object.
:param owners: The owners of this SdwanRouterNode.
:type: list[str]
"""
self._owners = owners
@property
def shared_scope(self):
"""
Gets the shared_scope of this SdwanRouterNode.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:return: The shared_scope of this SdwanRouterNode.
:rtype: str
"""
return self._shared_scope
@shared_scope.setter
def shared_scope(self, shared_scope):
"""
Sets the shared_scope of this SdwanRouterNode.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:param shared_scope: The shared_scope of this SdwanRouterNode.
:type: str
"""
self._shared_scope = shared_scope
@property
def tags(self):
"""
Gets the tags of this SdwanRouterNode.
The array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this SdwanRouterNode.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this SdwanRouterNode.
The array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this SdwanRouterNode.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this SdwanRouterNode.
The versioning info for this managed object.
:return: The version_context of this SdwanRouterNode.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this SdwanRouterNode.
The versioning info for this managed object.
:param version_context: The version_context of this SdwanRouterNode.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def ancestors(self):
"""
Gets the ancestors of this SdwanRouterNode.
The array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this SdwanRouterNode.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this SdwanRouterNode.
The array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this SdwanRouterNode.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def parent(self):
"""
Gets the parent of this SdwanRouterNode.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this SdwanRouterNode.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this SdwanRouterNode.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this SdwanRouterNode.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def permission_resources(self):
"""
Gets the permission_resources of this SdwanRouterNode.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:return: The permission_resources of this SdwanRouterNode.
:rtype: list[MoBaseMoRef]
"""
return self._permission_resources
@permission_resources.setter
def permission_resources(self, permission_resources):
"""
Sets the permission_resources of this SdwanRouterNode.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:param permission_resources: The permission_resources of this SdwanRouterNode.
:type: list[MoBaseMoRef]
"""
self._permission_resources = permission_resources
@property
def device_template(self):
"""
Gets the device_template of this SdwanRouterNode.
Name of the Cisco vManage device template that the current device should be attached to. A device template consists of many feature templates that contain SD-WAN vEdge router configuration.
:return: The device_template of this SdwanRouterNode.
:rtype: str
"""
return self._device_template
@device_template.setter
def device_template(self, device_template):
"""
Sets the device_template of this SdwanRouterNode.
Name of the Cisco vManage device template that the current device should be attached to. A device template consists of many feature templates that contain SD-WAN vEdge router configuration.
:param device_template: The device_template of this SdwanRouterNode.
:type: str
"""
self._device_template = device_template
@property
def name(self):
"""
Gets the name of this SdwanRouterNode.
Name of the router node object.
:return: The name of this SdwanRouterNode.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this SdwanRouterNode.
Name of the router node object.
:param name: The name of this SdwanRouterNode.
:type: str
"""
self._name = name
@property
def network_configuration(self):
"""
Gets the network_configuration of this SdwanRouterNode.
The configuration required on the hypervisor for setting up SD-WAN networking.
:return: The network_configuration of this SdwanRouterNode.
:rtype: list[SdwanNetworkConfigurationType]
"""
return self._network_configuration
@network_configuration.setter
def network_configuration(self, network_configuration):
"""
Sets the network_configuration of this SdwanRouterNode.
The configuration required on the hypervisor for setting up SD-WAN networking.
:param network_configuration: The network_configuration of this SdwanRouterNode.
:type: list[SdwanNetworkConfigurationType]
"""
self._network_configuration = network_configuration
@property
def template_inputs(self):
"""
Gets the template_inputs of this SdwanRouterNode.
Dynamic inputs that are expected based on the template inputs specified in the feature templates attached to the device template.
:return: The template_inputs of this SdwanRouterNode.
:rtype: list[SdwanTemplateInputsType]
"""
return self._template_inputs
@template_inputs.setter
def template_inputs(self, template_inputs):
"""
Sets the template_inputs of this SdwanRouterNode.
Dynamic inputs that are expected based on the template inputs specified in the feature templates attached to the device template.
:param template_inputs: The template_inputs of this SdwanRouterNode.
:type: list[SdwanTemplateInputsType]
"""
self._template_inputs = template_inputs
@property
def uuid(self):
"""
Gets the uuid of this SdwanRouterNode.
Uniquely identifies the router by its chassis number.
:return: The uuid of this SdwanRouterNode.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""
Sets the uuid of this SdwanRouterNode.
Uniquely identifies the router by its chassis number.
:param uuid: The uuid of this SdwanRouterNode.
:type: str
"""
self._uuid = uuid
@property
def organization(self):
"""
Gets the organization of this SdwanRouterNode.
Relationship to the Organization that owns the Managed Object.
:return: The organization of this SdwanRouterNode.
:rtype: OrganizationOrganizationRef
"""
return self._organization
@organization.setter
def organization(self, organization):
"""
Sets the organization of this SdwanRouterNode.
Relationship to the Organization that owns the Managed Object.
:param organization: The organization of this SdwanRouterNode.
:type: OrganizationOrganizationRef
"""
self._organization = organization
@property
def profile(self):
"""
Gets the profile of this SdwanRouterNode.
Relationship to the SD-WAN profile object.
:return: The profile of this SdwanRouterNode.
:rtype: SdwanProfileRef
"""
return self._profile
@profile.setter
def profile(self, profile):
"""
Sets the profile of this SdwanRouterNode.
Relationship to the SD-WAN profile object.
:param profile: The profile of this SdwanRouterNode.
:type: SdwanProfileRef
"""
self._profile = profile
@property
def server_node(self):
"""
Gets the server_node of this SdwanRouterNode.
Relationship to the server node on which vEdge router is to be provisioned.
:return: The server_node of this SdwanRouterNode.
:rtype: AssetDeviceRegistrationRef
"""
return self._server_node
@server_node.setter
def server_node(self, server_node):
"""
Sets the server_node of this SdwanRouterNode.
Relationship to the server node on which vEdge router is to be provisioned.
:param server_node: The server_node of this SdwanRouterNode.
:type: AssetDeviceRegistrationRef
"""
self._server_node = server_node
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SdwanRouterNode):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 23,170 | 6,242 |
from pathlib import Path
from typing import Iterable, List, Tuple
Sample_Input = """[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]
"""
def parse_input(input: str) -> tuple:
return input.strip().split("\n")
def find_errors(lines: Iterable[str]) -> int:
illegal_chars = {")": 3, "]": 57, "}": 1197, ">": 25137}
_, _, errors = parse_lines(lines)
return sum([illegal_chars[e] for e in errors])
def complete_incomplete(lines: Iterable[str]) -> int:
closing_chars = {"(": ")", "[": "]", "{": "}", "<": ">"}
score_chars = {")": 1, "]": 2, "}": 3, ">": 4}
_, incomplete, _ = parse_lines(lines)
scores = []
for line in incomplete:
score = 0
for symbol in line[::-1]:
score *= 5
score += score_chars[closing_chars[symbol]]
scores.append(score)
scores.sort()
return scores[len(scores) // 2]
def parse_lines(lines: Iterable[str]) -> Tuple[List[int], List[int], List[int]]:
errors = []
incomplete = []
complete = []
for line in lines:
status, value = checker(line)
if status == "complete":
complete.append(line)
elif status == "open":
incomplete.append(value)
else:
errors.append(value)
return complete, incomplete, errors
def checker(line: str) -> Tuple[str, str]:
open_chars = {")": "(", "]": "[", "}": "{", ">": "<"}
stack = []
for l in line:
if l in "([{<":
stack.append(l)
else:
if len(stack) == 0:
return ("error", l)
last_char = stack.pop()
if open_chars[l] != last_char:
return ("error", l)
if len(stack) == 0:
return ("complete", "")
else:
return ("open", stack)
if __name__ == "__main__":
input_data = (Path.cwd() / "2021" / "data" / f"{Path(__file__).stem}_input.txt").read_text()
lines = parse_input(input_data)
print(f"Error Score is: {find_errors(lines)}")
print(f"Incomplete Score is: {complete_incomplete(lines)}")
| 2,256 | 854 |
#!/usr/bin/env python
# coding=utf-8
# Author : Simon
# Notes : Utilise un message specifique
# Input : Reçois les commandes des autres noeuds pour les moteurs sous la forme d'une pin et d'une commande en pwm
import rospy
from std_msgs.msg import Float32
from maestro.maestro import Controller
# from maestro_sim.maestro import Controller # En cas de problème de driver, celui-ci est plus compréhensible et se debug bien
from ros_maestro.msg import PwmCmd
# 1000 : marche avant
# 1500 : statique
# 2000 : marche arrière
# Channel : 0:L 1:R
class PWMBoard(Controller):
def __init__(self, port, actuators, sensors, data_types):
Controller.__init__(self, ttyStr=port)
# Formattage des données (quelle pin de la carte associée à quoi)
self.devices_by_pins = self.gen_dic_by_pin_keys(actuators)
self.devices_by_name = actuators
self.types = data_types
self.sensors = sensors
print 'devices_by_pins : ', self.devices_by_pins
for device in self.devices_by_name:
pin = self.devices_by_name[device]['pin']
data_type = self.devices_by_name[device]['data_type']
self.setAccel(pin, self.types[data_type]['accel'])
def gen_dic_by_pin_keys(self, devices):
"""
Transforme la table de hachage où on accède aux numéros des pins par le nom de l'appareil en une table de
hachage où on accède au nom de l'appareil par son numéro de pin associé
:param pwm_devices:
:return pin_dic:
"""
pin_dic = dict()
for device in devices:
print 'device :', device
pin = int(devices[device]['pin'])
pin_dic[pin] = device
return pin_dic
def cb_pwm(self, msg):
print 'pin :', msg.pin # pin en int
print 'cmd :', msg.command # commande en float
# Gestion du type de commande
device_name = self.devices_by_pins[msg.pin]
print 'device_name', device_name
type = self.devices_by_name[device_name]['data_type']
print 'type', type
range = self.types[type]['range']
range_min = range[0]
range_max = range[1]
range_tot = range_max - range_min
range_zero = range_min + range_tot / 2.0
print 'range', range
# Calcul de la commande en pwm
cmd = (msg.command - range_zero) * 1000 / range_tot + 1500
print 'pwm sent to board :', int(cmd)
# Envoi de la commande (traduction en polulu 0-2000 = 0-8192)
cmd = int(cmd*4.000)
print 'cmd sent to board :', int(cmd), 'on pin', msg.pin
self.setTarget(int(msg.pin), int(cmd))
def publish(self, sensors):
for device in sensors:
pub = sensors[device]['publisher']
pin = int(sensors[device]['pin'])
# rospy.loginfo("getting positions")
val = self.getPosition(pin)
# rospy.loginfo("Sensors values")
pub.publish(val)
if __name__ == '__main__':
rospy.init_node('driver_maestro')
rospy.loginfo("driver_maestro Node Initialised")
port = rospy.get_param('~port', "/dev/ttyACM0")
devices = rospy.get_param('maestro/device')
data_types = rospy.get_param('maestro/data_types')
actuators = {}
sensors = {}
for device in devices:
print data_types[devices[device]['data_type']]['type']
if data_types[devices[device]['data_type']]['type']=='input':
sensors[device] = devices[device]
sensors[device]['publisher'] = rospy.Publisher(device, Float32, queue_size=1)
if data_types[devices[device]['data_type']]['type']=='output':
actuators[device] = devices[device]
maestro = PWMBoard(port, actuators, sensors, data_types)
rospy.Subscriber('cmd_pwm', PwmCmd, maestro.cb_pwm)
while not rospy.is_shutdown():
try:
rospy.rostime.wallsleep(0.1)
maestro.publish(sensors)
except rospy.ROSInterruptException:
maestro.close()
| 4,028 | 1,351 |
import os, time, shutil, random, re
from xml.etree import cElementTree as ET
def run_test(cooja, dir, test, seed):
# run test simulation with seed
command = f"java -jar {cooja} -nogui={dir}{test} -random-seed={seed}"
os.system(command)
def remove_command_in_test(dir, test):
file = f"{dir}{test}"
root = ET.parse(file)
for element in root.iter():
for subelement in element:
if subelement.tag == "commands":
element.remove(subelement)
root.write(file)
def update_firmware_in_test(dir, test, firmware_network, firmware_joining):
file = f"{dir}{test}"
root = ET.parse(file)
for element in root.iter():
if element.tag == "motetype":
motetype = ""
for subelement in element:
if subelement.tag == "identifier":
motetype = subelement.text
for subelement in element:
if subelement.tag == "firmware":
firmware = firmware_joining if motetype == "z1-joining-node" else firmware_network
subelement.text = f"[CONTIKI_DIR]/examples/6tisch/simple-node/{firmware}"
root.write(file)
def add_scriptrunner_in_test(dir, test):
file = f"{dir}{test}"
scriptrunner = ""
if test.startswith("join-"):
scriptrunner = open(f"{scripts_dir}join.js").read()
elif test.startswith("create-"):
scriptrunner = open(f"{scripts_dir}create.js").read()
elif test.startswith("rejoin-"):
scriptrunner = open(f"{scripts_dir}rejoin.js").read()
root = ET.parse(file)
for element in root.iter():
if element.tag == "simconf":
plugin = ET.Element("plugin")
plugin.text = "org.contikios.cooja.plugins.ScriptRunner"
conf = ET.Element("plugin_config")
script = ET.Element("script")
script.text = scriptrunner
active = ET.Element("active")
active.text = "true"
conf.append(script)
conf.append(active)
plugin.append(conf)
element.append(plugin)
root.write(file)
def add_mobility_in_test(dir, test):
file = f"{dir}{test}"
root = ET.parse(file)
for element in root.iter():
if element.tag == "simconf":
plugin = ET.Element("plugin")
plugin.text = "Mobility"
conf = ET.Element("plugin_config")
positions = ET.Element("positions")
positions.set("EXPORT", "copy")
positions.text = f"[CONTIKI_DIR]/examples/6tisch/simple-node/simulations/positions/{test.replace('.csc', '.dat')}"
conf.append(positions)
plugin.append(conf)
element.append(plugin)
root.write(file)
def add_powertracker_in_test(dir, test):
file = f"{dir}{test}"
root = ET.parse(file)
for element in root.iter():
if element.tag == "simconf":
plugin = ET.Element("plugin")
plugin.text = "PowerTracker"
element.append(plugin)
root.write(file)
def check_if_test_successful(test_output_file_path, test):
f = open(test_output_file_path, "r")
test_output = f.read()
if("TEST FAILED" in test_output):
print("##### Test failed: 'TEST FAILED' found #####")
return False
if("TEST OK" not in test_output):
print("##### Test failed: 'TEST OK' not found #####")
return False
if(test.startswith("join") and "Network created with " not in test_output):
print("##### Test failed: network not established #####")
return False
# network_established_time = float(test_output.split("Network established time: ")[1].split(". First")[0])
# first_eb_time = float(test_output.split("First EB time: ")[1].split(". Join")[0])
# join_time = float(test_output.split("Join time:")[1].split(". Parents")[0])
# parents_considered = int(test_output.split("Parents considered: ")[1].split(".\n")[0])
#
# if join_time >= network_established_time: #if joining EB joins before network is completely established
# print("##### Test failed: Joining node finishes network #####")
# return False
return True
def add_testlog_parameters_csv(seed, test, firmware, test_output_file_path):
test_params = test.split(".")[0].split("-")
nodes = test_params[2]
topology = test_params[1]
firmware_params = firmware.split(".")[0].split("-")
tsch_version = firmware_params[1]
channels = re.sub("[^0-9]", "", firmware_params[2])
assoc_timeout = re.sub("[^0-9]", "", firmware_params[3])
csv_result = [seed, nodes, channels, topology, tsch_version, assoc_timeout]
csv_result_str = ",".join([str(elem) for elem in csv_result])
file = open(test_output_file_path, "r")
new_file_content = ""
for line in file:
if "," in line:
new_file_content += f"{csv_result_str},{line}"
else:
new_file_content += line
file.close()
file = open(test_output_file_path, "w")
file.write(new_file_content)
file.close()
def add_testlog_firmwares(firmware_network, firmware_joining, test_output_file_path):
file = open(test_output_file_path, "r")
content = file.read()
file.close()
new_file_content = f"z1-network-node firmware: {firmware_network}\n" + f"z1-joining-node firmware: {firmware_joining}\n" + content
file = open(test_output_file_path, "w")
file.write(new_file_content)
file.close()
if os.path.isdir("/home/user/"):
cooja_jar = "/home/user/contiki-ng/tools/cooja/dist/cooja.jar"
run_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/"
sim_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/"
tests_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/tests/"
scripts_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/scriptrunners/"
pos_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/positions/"
log_dir = "/home/user/contiki-ng/examples/6tisch/simple-node/simulations/logs/"
if not os.path.isdir(tests_dir):
os.mkdir(tests_dir)
if not os.path.isdir(scripts_dir):
os.mkdir(scripts_dir)
if not os.path.isdir(pos_dir):
os.mkdir(pos_dir)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
# seeds = [15557,65890,237601,268521,537634,571714,881378,928542,963159,978437]
seeds = random.sample(range(0,999999), 15) # 15 random seeds
seeds.sort()
firmwares = [
{"joining": "node-custom-16c-16s-2eb.z1", "network": "node-network-16c.z1"}
# {"joining": "node-classic-16c-180s-2eb.z1", "network": "node-network-16c.z1"}
]
tests = [f for f in os.listdir(tests_dir) if os.path.isfile(f"{tests_dir}{f}")]
tests.sort()
print("")
print(f"Running tests {tests}.")
print(f"Running tests on firmwares {firmwares}.")
print(f"Running tests on seeds {seeds}.")
os.chdir(run_dir) # change working directory to run_dir
for test in tests: # run each test from tests_dir
for seed in seeds: # run test with each seed
for firmware in firmwares: # run test with every firmware
shutil.copy(f"{tests_dir}{test}", f"{run_dir}{test}") # copy test to run_dir
remove_command_in_test(run_dir, test) # remove commands in simulation test file
update_firmware_in_test(run_dir, test, firmware["network"], firmware["joining"]) # change firmware in file
add_mobility_in_test(run_dir, test) # add mobility plugin + file to test
add_powertracker_in_test(run_dir, test) # add powertracker plugin to test
add_scriptrunner_in_test(run_dir, test) # add scriptrunner to test for extraction of data and controlling test
print(f"\n\n ########### Now running test '{test}' with firmware '{firmware}' and seed '{seed}' ##############\n")
run_test(cooja_jar, run_dir, test, seed) # run simulation with seed
local_seed = seed
while not check_if_test_successful(f"{run_dir}COOJA.testlog", test): # evaluate if test is OK
local_seed = random.randint(0,999999)
run_test(cooja_jar, run_dir, test, local_seed)
add_testlog_parameters_csv(local_seed, test, firmware["joining"], f"{run_dir}COOJA.testlog") # add test parameters to csv line in file
add_testlog_firmwares(firmware["network"], firmware["joining"], f"{run_dir}COOJA.testlog") # add node firmwares used to top of file
os.rename(f"{run_dir}COOJA.testlog", f"{log_dir}{test.split('.')[0]}_{firmware['joining'].split('.')[0]}_{local_seed}.testlog") # move simulation result log
os.remove(f"{run_dir}{test}") # delete test from run_dir
time.sleep(1)
| 8,733 | 2,987 |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
# The DefaultRouter auto generates the urls for a viewset.
# One viewset may have multiple urls.
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipes', views.RecipeViewSet)
# So the reverse function may find
app_name = 'recipe'
urlpatterns = [
# router.urls NOT a str
path('', include(router.urls)),
]
| 520 | 151 |
#from gluon.debug import dbg
def show_page():
"""
Show the requested page
"""
from gluon.tools import prettydate
manager_toolbar = ManagerToolbar('page')
if request.args(0) and request.args(0).isdigit():
page = db.page(request.args(0))
else:
page = db(db.page.url==request.args(0)).select().first()
#if the page has no content, we select the fisrt child (len < 8 to avoid having a page with just "<br />")
if page and len(page.content) < 8:
child = db(db.page.parent==page).select(orderby=db.page.rank|db.page.title).first()
if child:
page=child
if not page:
if request.args(0) and request.args(0).lower() == 'images':
redirect(URL('images'))
else:
page = db(db.page.is_index==True).select().first()
disqus_shortname = None
if page.allow_disqus and WEBSITE_PARAMETERS.disqus_shortname:
disqus_shortname = WEBSITE_PARAMETERS.disqus_shortname
pretty_date = prettydate(page.modified_on, T)
header_component = db.page_component(page.header_component)
left_sidebar_component = db.page_component(page.left_sidebar_component)
right_sidebar_component = db.page_component(page.right_sidebar_component)
left_footer_component = db.page_component(page.left_footer_component)
middle_footer_component = db.page_component(page.middle_footer_component)
right_footer_component = db.page_component(page.right_footer_component)
central_component = db.page_component(page.central_component)
return dict(page=page,
header_component=header_component,
left_sidebar_enabled=page.left_sidebar_enabled,
right_sidebar_enabled=page.right_sidebar_enabled,
left_sidebar_component=left_sidebar_component,
right_sidebar_component=right_sidebar_component,
left_footer_component=left_footer_component,
middle_footer_component=middle_footer_component,
right_footer_component=right_footer_component,
central_component=central_component,
manager_toolbar=manager_toolbar,
pretty_date=pretty_date,
disqus_shortname=disqus_shortname)
@auth.requires_membership('manager')
def delete_page():
if request.args(0) and request.args(0).isdigit():
page = db.page(request.args(0))
else:
page = db(db.page.url==request.args(0)).select().first()
if len(request.args) and page:
form = FORM.confirm(T('Yes, I really want to delete this page'),{T('Back'):URL('show_page', args=page.id)})
if form.accepted:
#remove images linked to the page
pathname = path.join(request.folder,'static','images', 'pages_content', str(form.vars.id))
if path.exists(pathname):
shutil.rmtree(pathname)
#remove the page
db(db.page.id==page.id).delete()
session.flash = T('Page deleted')
redirect(URL('default', 'index'))
return dict(page=page, form=form)
@auth.requires_membership('manager')
def edit_page():
"""
"""
advanced_fields = ["{}_{}__row".format(db.page, field) for field in
[db.page.rank.name,
db.page.url.name,
db.page.is_index.name,
db.page.is_enabled.name,
db.page.header_component.name,
db.page.left_sidebar_enabled.name,
db.page.right_sidebar_enabled.name,
db.page.left_footer_component.name,
db.page.middle_footer_component.name,
db.page.right_footer_component.name,
db.page.central_component.name,
db.page.allow_disqus.name,
db.page.max_content_height.name]
]
page_id = request.args(0)
if page_id:
if page_id.isdigit():
page = db.page(page_id)
else:
page = db(db.page.url==page_id).select().first()
if len(request.args) and page:
crud.settings.update_deletable = False
form = crud.update(db.page,page,next=URL('show_page', args=page.id))
my_extra_element = XML("""
<div id="wysiwyg_management">
<ul class="nav nav-pills">
<li id="activate_wysiwyg" class="active">
<a href="#">%s</a>
</li>
<li id="remove_wysiwyg" >
<a href="#">%s</a>
</li>
</ul>
</div>
""" %(T('WYSIWYG view'),T('HTML view')))
form[0][4].append( my_extra_element)
else:
#Hide the "content" of the page : the page has no title
#and this is impossible to initialise the upload field with page.url
db.page.content.readable = db.page.content.writable = False
form = crud.create(db.page,next='edit_page/[id]')
return dict(form=form, advanced_fields=advanced_fields)
| 5,217 | 1,547 |
"""
Tests for units.py
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
from astropy.units import Quantity, UnitsError, UnitConversionError
import numpy as np
import pytest
from hsr4hci.units import (
flux_ratio_to_magnitudes,
InstrumentUnitsContext,
magnitude_to_flux_ratio,
)
# -----------------------------------------------------------------------------
# TEST CASES
# -----------------------------------------------------------------------------
def test__instrument_units_context() -> None:
"""
Test `hsr4hci.units.InstrumentUnitsContext`.
"""
# Case 1 (illegal constructor argument: pixscale)
with pytest.raises(UnitsError) as units_error:
InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
assert "Argument 'pixscale' to function" in str(units_error)
# Case 2 (illegal constructor argument: lambda_over_d)
with pytest.raises(UnitsError) as units_error:
InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'gram'),
)
assert "Argument 'lambda_over_d' to function" in str(units_error)
instrument_units_context = InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
# Case 3 (conversion from pixel to arcsec / lambda_over_d)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.0271
assert quantity.to('lambda_over_d').value == 0.28229166666666666
# Case 4 (context is re-usable)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.0271
assert quantity.to('lambda_over_d').value == 0.28229166666666666
# Case 5 (context is local; conversions do not work outside the context)
with pytest.raises(UnitConversionError) as unit_conversion_error:
_ = quantity.to('arcsec').value
assert "'pix' and 'arcsec' (angle) are not" in str(unit_conversion_error)
# Case 6 (conversion from arcsec to pixel / lambda_over_d)
with instrument_units_context:
quantity = Quantity(1.0, 'arcsec')
assert quantity.to('pixel').value == 36.90036900369004
assert quantity.to('lambda_over_d').value == 10.416666666666666
# Case 7 (conversion from lambda_over_d to arcsec / pixel)
with instrument_units_context:
quantity = Quantity(1.0, 'lambda_over_d')
assert quantity.to('arcsec').value == 0.096
assert quantity.to('pixel').value == 3.5424354243542435
# Case 8 (contexts can be overwritten / re-defined)
instrument_units_context = InstrumentUnitsContext(
pixscale=Quantity(0.271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.96, 'arcsec'),
)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.271
assert quantity.to('lambda_over_d').value == 0.2822916666666667
# Case 9 (different contexts can co-exist)
context_a = InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
context_b = InstrumentUnitsContext(
pixscale=Quantity(0.271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.96, 'arcsec'),
)
quantity = Quantity(1.0, 'pixel')
with context_a:
assert quantity.to('arcsec').value == 0.0271
with context_b:
assert quantity.to('arcsec').value == 0.271
def test__flux_ratio_to_magnitudes() -> None:
"""
Test `hsr4hci.units.flux_ratio_to_magnitudes`.
"""
assert flux_ratio_to_magnitudes(100) == -5
assert np.allclose(
flux_ratio_to_magnitudes(np.array([100, 0.01])), np.array([-5, 5])
)
def test__magnitude_to_flux_ratio() -> None:
"""
Test `hsr4hci.units.magnitude_to_flux_ratio`.
"""
assert magnitude_to_flux_ratio(-5) == 100
assert np.allclose(
magnitude_to_flux_ratio(np.array([-5, 5])), np.array([100, 0.01])
)
| 4,307 | 1,536 |
"""
Initialization code for apps using Praneeth's blender python module.
Also useful for initalizing blender's workspace.
Usage:
from bpn_init import *
pn.reload() # useful when developing the module alongside the app
env.reset() # if you want to delete objects, this is useful!
"""
#pylint:disable=unused-import
# Imports from the standard library
import os
import sys
import inspect
from importlib import reload
from timeit import default_timer as timer
# Installed using _requirements
import numpy as np
import pandas as pd
# Blender's library
import bpy #pylint: disable=import-error
import bmesh #pylint: disable=import-error
import mathutils #pylint: disable=import-error
# Peronal library
import pntools as pn
from pntools import sampled
import bpn
# modules
from bpn import new, env, demo, utils, turtle, vef, trf, io, mantle, core
# classes
from bpn.mantle import Pencil, Screen
# functions
from bpn.utils import get
from pntools import run
# # Convenience
# Matrix = mathutils.Matrix
# Vector = mathutils.Vector
# for using matplotlib from within blender
if os.name == 'nt':
import multiprocess
multiprocess.set_executable(pn.locate_command('python', 'conda', verbose=False).split('\r\n')[0])
from plots import plot
def reset():
env.reset()
core.ThingDB = core._ThingDB()
# bridge to julia
# pylint: disable=no-name-in-module, wrong-import-order
# from julia import Pkg
# Pkg.activate(os.path.join(os.path.dirname(__file__), "SimPN"))
# from julia import SimPN as spn
# Pkg.activate() # switch back to the default environment in julia
# pylint: enable=no-name-in-module, wrong-import-order
# # bridge to MATLAB
# try:
# import matlab.engine
# ml_eng_installed = True
# except ImportError:
# print("Unable to detect MATLAB engine.")
# ml_eng_installed = False
# if ml_eng_installed:
# all_ml = matlab.engine.find_matlab()
# if len(all_ml) == 0:
# print("No shared MATLAB engine sessions found. Type matlab.engine.shareEngine at the MATLAB command prompt")
# elif len(all_ml) == 1:
# future = matlab.engine.connect_matlab(all_ml[0], background=True)
# eng = future.result()
# mw = eng.workspace
# elif len(all_ml) > 1:
# print("Multiple MATLAB sessions found. Please connect manually.")
| 2,309 | 740 |
import chevalley as chv
tmp = chv.LieType("a", 2)
tmp.curtis.report_latex_all()
| 81 | 36 |
import hashlib
import json
import logging
import warnings
from io import StringIO
from typing import TextIO, BinaryIO, Union, List, Optional, Dict, Any, Tuple
from pynmrstar import definitions, utils, loop as loop_mod, parser as parser_mod, saveframe as saveframe_mod
from pynmrstar._internal import _json_serialize, _interpret_file, _get_entry_from_database, write_to_file
from pynmrstar.exceptions import InvalidStateError
from pynmrstar.schema import Schema
class Entry(object):
"""An object oriented representation of a BMRB entry. You can initialize this
object several ways; (e.g. from a file, from the official database,
from scratch) see the class methods below. """
def __contains__(self, item: Any):
""" Check if the given item is present in the entry. """
# Prepare for processing
if isinstance(item, (list, tuple)):
to_process: List[Union[str, saveframe_mod.Saveframe, loop_mod.Loop]] = list(item)
elif isinstance(item, (loop_mod.Loop, saveframe_mod.Saveframe, str)):
to_process = [item]
else:
return False
for item in to_process:
if isinstance(item, saveframe_mod.Saveframe):
if item not in self._frame_list:
return False
elif isinstance(item, (loop_mod.Loop, str)):
found = False
for saveframe in self._frame_list:
if item in saveframe:
found = True
break
if not found:
return False
else:
return False
return True
def __delitem__(self, item: Union['saveframe_mod.Saveframe', int, str]) -> None:
"""Remove the indicated saveframe."""
if isinstance(item, int):
try:
del self._frame_list[item]
except IndexError:
raise IndexError(f'Index out of range: no saveframe at index: {item}')
else:
self.remove_saveframe(item)
def __eq__(self, other) -> bool:
"""Returns True if this entry is equal to another entry, false
if it is not equal."""
if not isinstance(other, Entry):
return False
return (self.entry_id, self._frame_list) == (other.entry_id, other._frame_list)
def __getitem__(self, item: Union[int, str]) -> 'saveframe_mod.Saveframe':
"""Get the indicated saveframe."""
try:
return self._frame_list[item]
except TypeError:
return self.get_saveframe_by_name(item)
def __init__(self, **kwargs) -> None:
""" You should not directly instantiate an Entry using this method.
Instead use the class methods:
:py:meth:`Entry.from_database`, :py:meth:`Entry.from_file`,
:py:meth:`Entry.from_string`, :py:meth:`Entry.from_scratch`,
:py:meth:`Entry.from_json`, and :py:meth:`Entry.from_template`"""
# Default initializations
self._entry_id: Union[str, int] = 0
self._frame_list: List[saveframe_mod.Saveframe] = []
self.source: Optional[str] = None
# They initialized us wrong
if len(kwargs) == 0:
raise ValueError("You should not directly instantiate an Entry using this method. Instead use the "
"class methods: Entry.from_database(), Entry.from_file(), Entry.from_string(), "
"Entry.from_scratch(), and Entry.from_json().")
if 'the_string' in kwargs:
# Parse from a string by wrapping it in StringIO
star_buffer: StringIO = StringIO(kwargs['the_string'])
self.source = "from_string()"
elif 'file_name' in kwargs:
star_buffer = _interpret_file(kwargs['file_name'])
self.source = f"from_file('{kwargs['file_name']}')"
# Creating from template (schema)
elif 'all_tags' in kwargs:
self._entry_id = kwargs['entry_id']
saveframe_categories: dict = {}
schema = utils.get_schema(kwargs['schema'])
schema_obj = schema.schema
for tag in [schema_obj[x.lower()] for x in schema.schema_order]:
category = tag['SFCategory']
if category not in saveframe_categories:
saveframe_categories[category] = True
templated_saveframe = saveframe_mod.Saveframe.from_template(category, category + "_1",
entry_id=self._entry_id,
all_tags=kwargs['all_tags'],
default_values=kwargs['default_values'],
schema=schema)
self._frame_list.append(templated_saveframe)
entry_saveframe = self.get_saveframes_by_category('entry_information')[0]
entry_saveframe['NMR_STAR_version'] = schema.version
entry_saveframe['Original_NMR_STAR_version'] = schema.version
return
else:
# Initialize a blank entry
self._entry_id = kwargs['entry_id']
self.source = "from_scratch()"
return
# Load the BMRB entry from the file
parser: parser_mod.Parser = parser_mod.Parser(entry_to_parse_into=self)
parser.parse(star_buffer.read(), source=self.source, convert_data_types=kwargs.get('convert_data_types', False))
def __iter__(self) -> saveframe_mod.Saveframe:
""" Yields each of the saveframes contained within the entry. """
for saveframe in self._frame_list:
yield saveframe
def __len__(self) -> int:
""" Returns the number of saveframes in the entry."""
return len(self._frame_list)
def __repr__(self) -> str:
"""Returns a description of the entry."""
return f"<pynmrstar.Entry '{self._entry_id}' {self.source}>"
def __setitem__(self, key: Union[int, str], item: 'saveframe_mod.Saveframe') -> None:
"""Set the indicated saveframe."""
# It is a saveframe
if isinstance(item, saveframe_mod.Saveframe):
# Add by ordinal
if isinstance(key, int):
self._frame_list[key] = item
# TODO: Consider stripping this behavior out - it isn't clear it is useful
else:
# Add by key
contains_frame: bool = False
for pos, frame in enumerate(self._frame_list):
if frame.name == key:
if contains_frame:
raise ValueError(f"Cannot replace the saveframe with the name '{frame.name} "
f"because multiple saveframes in the entry have the same name. "
f'This library does not allow that normally, as it is '
f'invalid NMR-STAR. Did you manually edit the Entry.frame_list '
f'object? Please use the Entry.add_saveframe() method instead to '
f'add new saveframes.')
self._frame_list[pos] = item
contains_frame = True
if not contains_frame:
raise ValueError(f"Saveframe with name '{key}' does not exist and therefore cannot be "
f"written to. Use the add_saveframe() method to add new saveframes.")
else:
raise ValueError("You can only assign a saveframe to an entry splice. You attempted to assign: "
f"'{repr(item)}'")
def __str__(self, skip_empty_loops: bool = False, skip_empty_tags: bool = False, show_comments: bool = True) -> str:
"""Returns the entire entry in STAR format as a string."""
sf_strings = []
seen_saveframes = {}
for saveframe_obj in self:
if saveframe_obj.category in seen_saveframes:
sf_strings.append(saveframe_obj.format(skip_empty_loops=skip_empty_loops,
skip_empty_tags=skip_empty_tags, show_comments=False))
else:
sf_strings.append(saveframe_obj.format(skip_empty_loops=skip_empty_loops,
skip_empty_tags=skip_empty_tags, show_comments=show_comments))
seen_saveframes[saveframe_obj.category] = True
return f"data_{self.entry_id}\n\n" + "\n".join(sf_strings)
@property
def category_list(self) -> List[str]:
""" Returns a list of the unique categories present in the entry. """
category_list = []
for saveframe in self._frame_list:
category = saveframe.category
if category and category not in category_list:
category_list.append(category)
return list(category_list)
@property
def empty(self) -> bool:
""" Check if the entry has no data. Ignore the structural tags."""
for saveframe in self._frame_list:
if not saveframe.empty:
return False
return True
@property
def entry_id(self) -> Union[str, int]:
""" When read, fetches the entry ID.
When set, updates the entry ID for the Entry, and updates all the tags which
are foreign keys of the Entry_ID. (For example, Entry.ID and
Citation.Entry_ID will be updated, if present.)
"""
return self._entry_id
@entry_id.setter
def entry_id(self, value: Union[str, int]) -> None:
self._entry_id = value
schema = utils.get_schema()
for saveframe in self._frame_list:
for tag in saveframe.tags:
fqtn = (saveframe.tag_prefix + "." + tag[0]).lower()
try:
if schema.schema[fqtn]['entryIdFlg'] == 'Y':
tag[1] = self._entry_id
except KeyError:
pass
for loop in saveframe.loops:
for tag in loop.tags:
fqtn = (loop.category + "." + tag).lower()
try:
if schema.schema[fqtn]['entryIdFlg'] == 'Y':
loop[tag] = [self._entry_id] * len(loop[tag])
except KeyError:
pass
@property
def frame_dict(self) -> Dict[str, 'saveframe_mod.Saveframe']:
"""Returns a dictionary of saveframe name -> saveframe object mappings."""
fast_dict = dict((frame.name, frame) for frame in self._frame_list)
# If there are no duplicates then continue
if len(fast_dict) == len(self._frame_list):
return fast_dict
# Figure out where the duplicate is
frame_dict = {}
for frame in self._frame_list:
if frame.name in frame_dict:
raise InvalidStateError("The entry has multiple saveframes with the same name. That is not allowed in "
"the NMR-STAR format. Please remove or rename one. Duplicate name: "
f"'{frame.name}'. Furthermore, please use Entry.add_saveframe() and "
f"Entry.remove_saveframe() rather than manually editing the Entry.frame_list "
f"list, which will prevent this state from existing in the future.")
frame_dict[frame.name] = frame
return frame_dict
@property
def frame_list(self) -> List['saveframe_mod.Saveframe']:
return self._frame_list
@classmethod
def from_database(cls, entry_num: Union[str, int], convert_data_types: bool = False):
"""Create an entry corresponding to the most up to date entry on
the public BMRB server. (Requires ability to initiate outbound
HTTP connections.)
Setting convert_data_types to True will automatically convert
the data loaded from the file into the corresponding python type as
determined by loading the standard BMRB schema. This would mean that
all floats will be represented as decimal.Decimal objects, all integers
will be python int objects, strings and vars will remain strings, and
dates will become datetime.date objects. When printing str() is called
on all objects. Other that converting uppercase "E"s in scientific
notation floats to lowercase "e"s this should not cause any change in
the way re-printed NMR-STAR objects are displayed."""
return _get_entry_from_database(entry_num, convert_data_types=convert_data_types)
@classmethod
def from_file(cls, the_file: Union[str, TextIO, BinaryIO], convert_data_types: bool = False):
"""Create an entry by loading in a file. If the_file starts with
http://, https://, or ftp:// then we will use those protocols to
attempt to open the file.
Setting convert_data_types to True will automatically convert
the data loaded from the file into the corresponding python type as
determined by loading the standard BMRB schema. This would mean that
all floats will be represented as decimal.Decimal objects, all integers
will be python int objects, strings and vars will remain strings, and
dates will become datetime.date objects. When printing str() is called
on all objects. Other that converting uppercase "E"s in scientific
notation floats to lowercase "e"s this should not cause any change in
the way re-printed NMR-STAR objects are displayed."""
return cls(file_name=the_file, convert_data_types=convert_data_types)
@classmethod
def from_json(cls, json_dict: Union[dict, str]):
"""Create an entry from JSON (serialized or unserialized JSON)."""
# If they provided a string, try to load it using JSON
if not isinstance(json_dict, dict):
try:
json_dict = json.loads(json_dict)
except (TypeError, ValueError):
raise ValueError("The JSON you provided was neither a Python dictionary nor a JSON string.")
# Make sure it has the correct keys
if "saveframes" not in json_dict:
raise ValueError("The JSON you provide must be a hash and must contain the key 'saveframes' - even if the "
"key points to 'None'.")
if "entry_id" not in json_dict and "bmrb_id" not in json_dict:
raise ValueError("The JSON you provide must be a hash and must contain the key 'entry_id' - even if the"
" key points to 'None'.")
# Until the migration is complete, 'bmrb_id' is a synonym for
# 'entry_id'
if 'entry_id' not in json_dict:
json_dict['entry_id'] = json_dict['bmrb_id']
# Create an entry from scratch and populate it
ret = Entry.from_scratch(json_dict['entry_id'])
ret._frame_list = [saveframe_mod.Saveframe.from_json(x) for x in json_dict['saveframes']]
ret.source = "from_json()"
# Return the new loop
return ret
@classmethod
def from_string(cls, the_string: str, convert_data_types: bool = False):
"""Create an entry by parsing a string.
Setting convert_data_types to True will automatically convert
the data loaded from the file into the corresponding python type as
determined by loading the standard BMRB schema. This would mean that
all floats will be represented as decimal.Decimal objects, all integers
will be python int objects, strings and vars will remain strings, and
dates will become datetime.date objects. When printing str() is called
on all objects. Other that converting uppercase "E"s in scientific
notation floats to lowercase "e"s this should not cause any change in
the way re-printed NMR-STAR objects are displayed."""
return cls(the_string=the_string, convert_data_types=convert_data_types)
@classmethod
def from_scratch(cls, entry_id: Union[str, int]):
"""Create an empty entry that you can programmatically add to.
You must pass a value corresponding to the Entry ID.
(The unique identifier "xxx" from "data_xxx".)"""
return cls(entry_id=entry_id)
@classmethod
def from_template(cls, entry_id, all_tags=False, default_values=False, schema=None) -> 'Entry':
""" Create an entry that has all of the saveframes and loops from the
schema present. No values will be assigned. Specify the entry
ID when calling this method.
The optional argument 'all_tags' forces all tags to be included
rather than just the mandatory tags.
The optional argument 'default_values' will insert the default
values from the schema.
The optional argument 'schema' allows providing a custom schema."""
schema = utils.get_schema(schema)
entry = cls(entry_id=entry_id, all_tags=all_tags, default_values=default_values, schema=schema)
entry.source = f"from_template({schema.version})"
return entry
def add_saveframe(self, frame) -> None:
"""Add a saveframe to the entry."""
if not isinstance(frame, saveframe_mod.Saveframe):
raise ValueError("You can only add instances of saveframes using this method. You attempted to add "
f"the object: '{repr(frame)}'.")
# Do not allow the addition of saveframes with the same name
# as a saveframe which already exists in the entry
if frame.name in self.frame_dict:
raise ValueError(f"Cannot add a saveframe with name '{frame.name}' since a saveframe with that "
f"name already exists in the entry.")
self._frame_list.append(frame)
def compare(self, other) -> List[str]:
"""Returns the differences between two entries as a list.
Non-equal entries will always be detected, but specific differences
detected depends on order of entries."""
diffs = []
if self is other:
return []
if isinstance(other, str):
if str(self) == other:
return []
else:
return ['String was not exactly equal to entry.']
elif not isinstance(other, Entry):
return ['Other object is not of class Entry.']
try:
if str(self.entry_id) != str(other.entry_id):
diffs.append(f"Entry ID does not match between entries: '{self.entry_id}' vs '{other.entry_id}'.")
if len(self._frame_list) != len(other.frame_list):
diffs.append(f"The number of saveframes in the entries are not equal: '{len(self._frame_list)}' vs "
f"'{len(other.frame_list)}'.")
for frame in self._frame_list:
other_frame_dict = other.frame_dict
if frame.name not in other_frame_dict:
diffs.append(f"No saveframe with name '{frame.name}' in other entry.")
else:
comp = frame.compare(other_frame_dict[frame.name])
if len(comp) > 0:
diffs.append(f"Saveframes do not match: '{frame.name}'.")
diffs.extend(comp)
except AttributeError as err:
diffs.append(f"An exception occurred while comparing: '{err}'.")
return diffs
def add_missing_tags(self, schema: 'Schema' = None, all_tags: bool = False) -> None:
""" Automatically adds any missing tags (according to the schema)
to all saveframes and loops and sorts the tags. """
for saveframe in self._frame_list:
saveframe.add_missing_tags(schema=schema, all_tags=all_tags)
def delete_empty_saveframes(self) -> None:
""" Deprecated. Please use `py:meth:pynmrstar.Entry.remove_empty_saveframes`. """
warnings.warn('Deprecated. Please use remove_empty_saveframes() instead.', DeprecationWarning)
return self.remove_empty_saveframes()
def format(self, skip_empty_loops: bool = True, skip_empty_tags: bool = False, show_comments: bool = True) -> str:
""" The same as calling str(Entry), except that you can pass options
to customize how the entry is printed.
skip_empty_loops will omit printing loops with no tags at all. (A loop with null tags is not "empty".)
skip_empty_tags will omit tags in the saveframes and loops which have no non-null values.
show_comments will show the standard comments before a saveframe."""
return self.__str__(skip_empty_loops=skip_empty_loops, skip_empty_tags=skip_empty_tags,
show_comments=show_comments)
def get_json(self, serialize: bool = True) -> Union[dict, str]:
""" Returns the entry in JSON format. If serialize is set to
False a dictionary representation of the entry that is
serializeable is returned instead."""
frames = [x.get_json(serialize=False) for x in self._frame_list]
entry_dict = {
"entry_id": self.entry_id,
"saveframes": frames
}
if serialize:
return json.dumps(entry_dict, default=_json_serialize)
else:
return entry_dict
def get_loops_by_category(self, value: str) -> List['loop_mod.Loop']:
"""Allows fetching loops by category."""
value = utils.format_category(value).lower()
results = []
for frame in self._frame_list:
for one_loop in frame.loops:
if one_loop.category.lower() == value:
results.append(one_loop)
return results
def get_saveframe_by_name(self, saveframe_name: str) -> 'saveframe_mod.Saveframe':
"""Allows fetching a saveframe by name."""
frames = self.frame_dict
if saveframe_name in frames:
return frames[saveframe_name]
else:
raise KeyError(f"No saveframe with name '{saveframe_name}'")
def get_saveframes_by_category(self, value: str) -> List['saveframe_mod.Saveframe']:
"""Allows fetching saveframes by category."""
return self.get_saveframes_by_tag_and_value("sf_category", value)
def get_saveframes_by_tag_and_value(self, tag_name: str, value: Any) -> List['saveframe_mod.Saveframe']:
"""Allows fetching saveframe(s) by tag and tag value."""
ret_frames = []
for frame in self._frame_list:
results = frame.get_tag(tag_name)
if results != [] and results[0] == value:
ret_frames.append(frame)
return ret_frames
def get_tag(self, tag: str, whole_tag: bool = False) -> list:
""" Given a tag (E.g. _Assigned_chem_shift_list.Data_file_name)
return a list of all values for that tag. Specify whole_tag=True
and the [tag_name, tag_value] pair will be returned."""
if "." not in str(tag):
raise ValueError("You must provide the tag category to call this method at the entry level. For "
"example, you must provide 'Entry.Title' rather than 'Title' as the tag if calling"
" this at the Entry level. You can call Saveframe.get_tag('Title') without issue.")
results = []
for frame in self._frame_list:
results.extend(frame.get_tag(tag, whole_tag=whole_tag))
return results
def get_tags(self, tags: list) -> Dict[str, list]:
""" Given a list of tags, get all of the tags and return the
results in a dictionary."""
# All tags
if tags is None or not isinstance(tags, list):
raise ValueError("Please provide a list of tags.")
results = {}
for tag in tags:
results[tag] = self.get_tag(tag)
return results
def normalize(self, schema: Optional['Schema'] = None) -> None:
""" Sorts saveframes, loops, and tags according to the schema
provided (or BMRB default if none provided).
Also re-assigns ID tag values and updates tag links to ID values."""
# Assign all the ID tags, and update all links to ID tags
my_schema = utils.get_schema(schema)
# Sort the saveframes according to ID, if an ID exists. Otherwise, still sort by category
ordering = my_schema.category_order
def sf_key(_: saveframe_mod.Saveframe) -> [int, Union[int, float]]:
""" Helper function to sort the saveframes.
Returns (category order, saveframe order) """
# If not a real category, generate an artificial but stable order > the real saveframes
try:
category_order = ordering.index(_.tag_prefix)
except (ValueError, KeyError):
if _.category is None:
category_order = float('infinity')
else:
category_order = len(ordering) + abs(int(hashlib.sha1(str(_.category).encode()).hexdigest(), 16))
# See if there is an ID tag, and it is a number
saveframe_id = float('infinity')
try:
saveframe_id = int(_.get_tag("ID")[0])
except (ValueError, KeyError, IndexError, TypeError):
# Either there is no ID, or it is not a number. By default it will sort at the end of saveframes of its
# category. Note that the entry_information ID tag has a different meaning, but since there should
# only ever be one saveframe of that category, the sort order for it can be any value.
pass
return category_order, saveframe_id
def loop_key(_) -> Union[int, float]:
""" Helper function to sort the loops."""
try:
return ordering.index(_.category)
except ValueError:
# Generate an arbitrary sort order for loops that aren't in the schema but make sure that they
# always come after loops in the schema
return len(ordering) + abs(int(hashlib.sha1(str(_.category).encode()).hexdigest(), 16))
# Go through all the saveframes
for each_frame in self._frame_list:
each_frame.sort_tags(schema=my_schema)
# Iterate through the loops
for each_loop in each_frame:
each_loop.sort_tags(schema=my_schema)
# See if we can sort the rows (in addition to tags)
try:
each_loop.sort_rows("Ordinal")
except ValueError:
pass
each_frame.loops.sort(key=loop_key)
self._frame_list.sort(key=sf_key)
# Calculate all the categories present
categories: set = set()
for each_frame in self._frame_list:
categories.add(each_frame.category)
# tag_prefix -> tag -> original value -> mapped value
mapping: dict = {}
# Reassign the ID tags first
for each_category in categories:
# First in the saveframe tags
id_counter: int = 1
for each_frame in self.get_saveframes_by_category(each_category):
for tag in each_frame.tags:
tag_schema = my_schema.schema.get(f"{each_frame.tag_prefix}.{tag[0]}".lower())
if not tag_schema:
continue
# Make sure the capitalization of the tag is correct
tag[0] = tag_schema['Tag field']
if tag_schema['lclSfIdFlg'] == 'Y':
# If it's an Entry_ID tag, set it that way
if tag_schema['entryIdFlg'] == 'Y':
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{tag[1]}'] = self._entry_id
tag[1] = self._entry_id
# Must be an integer to avoid renumbering the chem_comp ID, for example
elif tag_schema['BMRB data type'] == "int":
prev_tag = tag[1]
if isinstance(tag[1], str):
tag[1] = str(id_counter)
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{prev_tag}'] = str(id_counter)
else:
tag[1] = id_counter
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{prev_tag}'] = id_counter
# We need to still store all the other tag values too
else:
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{tag[1]}'] = tag[1]
else:
mapping[f'{each_frame.tag_prefix[1:]}.{tag[0]}.{tag[1]}'] = tag[1]
# Then in the loop
for loop in each_frame:
for x, tag in enumerate(loop.tags):
tag_schema = my_schema.schema.get(f"{loop.category}.{tag}".lower())
if not tag_schema:
continue
# Make sure the tags have the proper capitalization
loop.tags[x] = tag_schema['Tag field']
for row in loop.data:
# We don't re-map loop IDs, but we should still store them
mapping[f'{loop.category[1:]}.{tag}.{row[x]}'] = row[x]
if tag_schema['lclSfIdFlg'] == 'Y':
# If it's an Entry_ID tag, set it that way
if tag_schema['entryIdFlg'] == 'Y':
row[x] = self._entry_id
# Must be an integer to avoid renumbering the chem_comp ID, for example
elif tag_schema['BMRB data type'] == "int":
if row[x] in definitions.NULL_VALUES:
if isinstance(row[x], str):
row[x] = str(id_counter)
else:
row[x] = id_counter
# Handle chem_comp and it's ilk
else:
parent_id_tag = f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']}"
parent_id_value = each_frame.get_tag(parent_id_tag)[0]
if isinstance(row[x], str):
row[x] = str(parent_id_value)
else:
row[x] = parent_id_value
id_counter += 1
# Now fix any other references
for saveframe in self:
for tag in saveframe.tags:
tag_schema = my_schema.schema.get(f"{saveframe.tag_prefix}.{tag[0]}".lower())
if not tag_schema:
continue
if tag_schema['Foreign Table'] and tag_schema['Sf pointer'] != 'Y':
if tag[1] in definitions.NULL_VALUES:
if tag_schema['Nullable']:
continue
else:
logging.warning("A foreign key tag that is not nullable was set to "
f"a null value. Tag: {saveframe.tag_prefix}.{tag[1]} Primary key: "
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']} "
f"Value: {tag[1]}")
try:
tag[1] = mapping[f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']}.{tag[1]}"]
except KeyError:
logging.warning(f'The tag {saveframe.tag_prefix}.{tag[0]} has value {tag[1]} '
f'but there is no valid primary key.')
# Now apply the remapping to loops...
for loop in saveframe:
for x, tag in enumerate(loop.tags):
tag_schema = my_schema.schema.get(f"{loop.category}.{tag}".lower())
if not tag_schema:
continue
if tag_schema['Foreign Table'] and tag_schema['Sf pointer'] != 'Y':
for row in loop.data:
if row[x] in definitions.NULL_VALUES:
if tag_schema['Nullable']:
continue
else:
logging.warning("A foreign key reference tag that is not nullable was set to "
f"a null value. Tag: {loop.category}.{tag} Foreign key: "
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']} "
f"Value: {row[x]}")
try:
row[x] = mapping[
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']}.{row[x]}"]
except KeyError:
if (loop.category == '_Atom_chem_shift' or loop.category == '_Entity_comp_index') and \
(tag == 'Atom_ID' or tag == 'Comp_ID'):
continue
logging.warning(f'The tag {loop.category}.{tag} has value {row[x]} '
f'but there is no valid primary key '
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']} "
f"with the tag value.")
# If there is both a label tag and an ID tag, do the reassignment
# We found a framecode reference
if tag_schema['Foreign Table'] and tag_schema['Foreign Column'] == 'Sf_framecode':
# Check if there is a tag pointing to the 'ID' tag
for conditional_tag in loop.tags:
conditional_tag_schema = my_schema.schema.get(f"{loop.category}.{conditional_tag}".lower())
if not conditional_tag_schema:
continue
if conditional_tag_schema['Foreign Table'] == tag_schema['Foreign Table'] and \
conditional_tag_schema['Foreign Column'] == 'ID' and \
conditional_tag_schema['entryIdFlg'] != 'Y':
# We found the matching tag
tag_pos = loop.tag_index(conditional_tag)
for row in loop.data:
# Check if the tag is null
if row[x] in definitions.NULL_VALUES:
if tag_schema['Nullable']:
continue
else:
logging.info(f"A foreign saveframe reference tag that is not nullable was "
f"set to a null value. Tag: {loop.category}.{tag} "
"Foreign saveframe: "
f"{tag_schema['Foreign Table']}.{tag_schema['Foreign Column']}"
)
continue
try:
row[tag_pos] = self.get_saveframe_by_name(row[x][1:]).get_tag('ID')[0]
except IndexError:
logging.info(f"Getting {self.get_saveframe_by_name(row[x][1:]).get_tag('ID')}")
except KeyError:
logging.warning(f"Missing frame of type {tag} pointed to by {conditional_tag}")
# Renumber the 'ID' column in a loop
for each_frame in self._frame_list:
for loop in each_frame.loops:
if loop.tag_index('ID') is not None and loop.category != '_Experiment':
loop.renumber_rows('ID')
def print_tree(self) -> None:
"""Prints a summary, tree style, of the frames and loops in
the entry."""
print(repr(self))
frame: saveframe_mod.Saveframe
for pos, frame in enumerate(self):
print(f"\t[{pos}] {repr(frame)}")
for pos2, one_loop in enumerate(frame):
print(f"\t\t[{pos2}] {repr(one_loop)}")
def remove_empty_saveframes(self) -> None:
""" This method will remove all empty saveframes in an entry
(the loops in the saveframe must also be empty for the saveframe
to be deleted). "Empty" means no values in tags, not no tags present."""
for pos, entry in enumerate(self._frame_list):
if entry.empty:
del self._frame_list[pos]
def remove_saveframe(self, item: Union[str, List[str], Tuple[str], 'saveframe_mod.Saveframe',
List['saveframe_mod.Saveframe'], Tuple['saveframe_mod.Saveframe']]) -> None:
""" Removes one or more saveframes from the entry. You can remove saveframes either by passing the saveframe
object itself, the saveframe name (as a string), or a list or tuple of either."""
parsed_list: list
if isinstance(item, tuple):
parsed_list = list(item)
elif isinstance(item, list):
parsed_list = item
elif isinstance(item, (str, saveframe_mod.Saveframe)):
parsed_list = [item]
else:
raise ValueError('The item you provided was not one or more saveframe objects or saveframe names (strings).'
f' Item type: {type(item)}')
frames_to_remove = []
for saveframe in parsed_list:
if isinstance(saveframe, str):
try:
frames_to_remove.append(self.frame_dict[saveframe])
except KeyError:
raise ValueError('At least one saveframe specified to remove was not found in this saveframe. '
f'First missing saveframe: {saveframe}')
elif isinstance(saveframe, saveframe_mod.Saveframe):
if saveframe not in self._frame_list:
raise ValueError('At least one loop specified to remove was not found in this saveframe. First '
f'missing loop: {saveframe}')
frames_to_remove.append(saveframe)
else:
raise ValueError('One of the items you provided was not a saveframe object or saveframe name '
f'(string). Item: {repr(saveframe)}')
self._frame_list = [_ for _ in self._frame_list if _ not in frames_to_remove]
def rename_saveframe(self, original_name: str, new_name: str) -> None:
""" Renames a saveframe and updates all pointers to that
saveframe in the entry with the new name."""
# Strip off the starting $ in the names
if original_name.startswith("$"):
original_name = original_name[1:]
if new_name.startswith("$"):
new_name = new_name[1:]
# Make sure there is no saveframe called what the new name is
if [x.name for x in self._frame_list].count(new_name) > 0:
raise ValueError(f"Cannot rename the saveframe '{original_name}' as '{new_name}' because a "
f"saveframe with that name already exists in the entry.")
# This can raise a ValueError, but no point catching it since it really is a ValueError if they provide a name
# of a saveframe that doesn't exist in the entry.
change_frame = self.get_saveframe_by_name(original_name)
# Update the saveframe
change_frame.name = new_name
# What the new references should look like
old_reference = "$" + original_name
new_reference = "$" + new_name
# Go through all the saveframes
for each_frame in self:
# Iterate through the tags
for each_tag in each_frame.tags:
if each_tag[1] == old_reference:
each_tag[1] = new_reference
# Iterate through the loops
for each_loop in each_frame:
for each_row in each_loop:
for pos, val in enumerate(each_row):
if val == old_reference:
each_row[pos] = new_reference
def validate(self, validate_schema: bool = True, schema: 'Schema' = None,
validate_star: bool = True) -> List[str]:
"""Validate an entry in a variety of ways. Returns a list of
errors found. 0-length list indicates no errors found. By
default all validation modes are enabled.
validate_schema - Determines if the entry is validated against
the NMR-STAR schema. You can pass your own custom schema if desired,
otherwise the cached schema will be used.
validate_star - Determines if the STAR syntax checks are ran."""
errors = []
# They should validate for something...
if not validate_star and not validate_schema:
errors.append("Validate() should be called with at least one validation method enabled.")
if validate_star:
# Check for saveframes with same name
saveframe_names = sorted(x.name for x in self)
for ordinal in range(0, len(saveframe_names) - 2):
if saveframe_names[ordinal] == saveframe_names[ordinal + 1]:
errors.append(f"Multiple saveframes with same name: '{saveframe_names[ordinal]}'")
# Check for dangling references
fdict = self.frame_dict
for each_frame in self:
# Iterate through the tags
for each_tag in each_frame.tags:
tag_copy = str(each_tag[1])
if (tag_copy.startswith("$")
and tag_copy[1:] not in fdict):
errors.append(f"Dangling saveframe reference '{each_tag[1]}' in "
f"tag '{each_frame.tag_prefix}.{each_tag[0]}'")
# Iterate through the loops
for each_loop in each_frame:
for each_row in each_loop:
for pos, val in enumerate(each_row):
val = str(val)
if val.startswith("$") and val[1:] not in fdict:
errors.append(f"Dangling saveframe reference '{val}' in tag "
f"'{each_loop.category}.{each_loop.tags[pos]}'")
# Ask the saveframes to check themselves for errors
for frame in self:
errors.extend(frame.validate(validate_schema=validate_schema, schema=schema, validate_star=validate_star))
return errors
def write_to_file(self, file_name: str, format_: str = "nmrstar", show_comments: bool = True,
skip_empty_loops: bool = False, skip_empty_tags: bool = False) -> None:
""" Writes the entry to the specified file in NMR-STAR format.
Optionally specify:
show_comments=False to disable the comments that are by default inserted. Ignored when writing json.
skip_empty_loops=False to force printing loops with no tags at all (loops with null tags are still printed)
skip_empty_tags=True will omit tags in the saveframes and loops which have no non-null values.
format_=json to write to the file in JSON format."""
write_to_file(self, file_name=file_name, format_=format_, show_comments=show_comments,
skip_empty_loops=skip_empty_loops, skip_empty_tags=skip_empty_tags)
| 45,045 | 11,550 |
import os
from io import StringIO
from pytest import mark, raises
from preacher.compilation.yaml import YamlError, load
@mark.parametrize(('content', 'expected_message'), [
('!include []', '", line 1, column 1'),
('!include {}', '", line 1, column 1'),
])
def test_given_invalid_inclusion(content, expected_message):
stream = StringIO(content)
with raises(YamlError) as error_info:
load(stream)
assert expected_message in str(error_info.value)
def test_given_recursive_inclusion_error(mocker):
included_stream = StringIO('\n !foo')
open_mock = mocker.patch('builtins.open')
open_mock.return_value = included_stream
stream = StringIO('!include foo.yml')
with raises(YamlError) as error_info:
load(stream)
message = str(error_info.value)
assert '!foo' in message
assert '", line 1, column 1' in message
assert '", line 2, column 2' in message
def test_given_recursive_inclusion(mocker):
stream = StringIO('''
list:
- !include item.yml
- key: !include value.yml
recursive: !include recursive.yml
''')
answer_map = {
os.path.join('base', 'dir', 'item.yml'): 'item',
os.path.join('base', 'dir', 'value.yml'): 'value',
os.path.join('base', 'dir', 'recursive.yml'): '!include inner.yml',
os.path.join('base', 'dir', 'inner.yml'): 'inner',
}
open_mock = mocker.patch('builtins.open')
open_mock.side_effect = lambda path: StringIO(answer_map[path])
actual = load(stream, origin=os.path.join('base', 'dir'))
assert actual == {
'list': [
'item',
{'key': 'value'},
],
'recursive': 'inner',
}
def test_given_wildcard_inclusion(mocker):
iglob_mock = mocker.patch('glob.iglob')
iglob_mock.side_effect = lambda path, recursive: iter([f'glob:{path}:{recursive}'])
stream = StringIO(r'''
'asterisk': !include '*.yml'
'double-asterisk': !include '**.yml'
'question': !include '?.yml'
'parenthesis-only-opening': !include '[.yml'
'parenthesis-only-closing': !include '].yml'
'empty-parenthesis': !include '[].yml'
'filled-parenthesis': !include '[abc].yml'
''')
open_mock = mocker.patch('builtins.open')
open_mock.side_effect = lambda path: StringIO(path)
actual = load(stream, origin='base/path/')
assert isinstance(actual, dict)
assert actual['asterisk'] == ['glob:base/path/*.yml:True']
assert actual['double-asterisk'] == ['glob:base/path/**.yml:True']
assert actual['question'] == ['glob:base/path/?.yml:True']
assert actual['parenthesis-only-closing'] == 'base/path/].yml'
assert actual['parenthesis-only-opening'] == 'base/path/[.yml'
assert actual['empty-parenthesis'] == 'base/path/[].yml'
assert actual['filled-parenthesis'] == ['glob:base/path/[abc].yml:True']
| 2,862 | 968 |
import tcod as libtcod
from render_functions import RenderOrder
from game_states import GameStates
from game_messages import Message
def kill_player(player):
player.char = '%'
player.color = libtcod.dark_red
return Message('YOU DIED', libtcod.red), GameStates.PLAYER_DEAD
def kill_monster(monster):
death_message = Message('{0} has been slain.'.format(monster.name), libtcod.orange)
monster.char = '%'
monster.color = libtcod.dark_red
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = monster.name + ' remains'
monster.render_order = RenderOrder.CORPSE
return death_message | 657 | 222 |
#-*- coding: utf-8 -*-
#from datetime import datetime
#import facebook
import urllib2
import mechanize
import json
import sys, os
from config import fb_email, fb_pass, app_id, group_id, limit
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
facebook_app_link='https://www.facebook.com/dialog/oauth?scope=manage_pages,status_update,publish_stream&redirect_uri=http://strender.tistory.com&response_type=token&client_id='+str(app_id)
br_mech = mechanize.Browser()
br_mech.set_handle_robots(False)
br_mech.open(facebook_app_link)
br_mech.form = list(br_mech.forms())[0]
control = br_mech.form.find_control("email")
control.value=fb_email
control = br_mech.form.find_control("pass")
control.value=fb_pass
br_mech.submit()
access_token = br_mech.geturl().split("token=")[1].split("&expires")[0]
group_post_link = 'https://graph.facebook.com/'+str(group_id)+'/feed?access_token='+access_token+'&limit='+str(limit)
saveFile = open('document.html', 'w')
start_str = "<!DOCTYPE html><html lang='en'><head><meta charset='UTF-8'><meta http-equiv='X-UA-Compatible' content='IE=edge'><meta name='viewport'content='width=device-width, initial-scale=1'><title>facebook group feed</title><link rel='stylesheet' href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap.min.css'></head><body>"
end_str = "<script src='https://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js'></script><script src='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/js/bootstrap.min.js'></script></body></html>"
saveFile.write(start_str)
j = urllib2.urlopen(group_post_link)
j = json.loads(j.read())
#print j
for x in j['data']:
try:
if (x['message'] and x['comments']):
saveFile.write("<div class='well'><h5>Post</h5><p>Date: %s Time: %s by %s </p>" % (x['created_time'].split('T')[0], x['created_time'].split('T')[1].split('+')[0], x['from']['name'].encode('utf-8')))
saveFile.write("<p>%s</p><br />" % x['message'].encode('utf-8'))
if x['comments']:
saveFile.write("<h5>Comment</h5>")
for y in x['comments']['data']:
saveFile.write("<p>%s</p>" % y['from']['name'].encode('utf-8'))
saveFile.write("<p>%s</p>" % y['message'].encode('utf-8'))
saveFile.write("</div>")
else:
pass
# saveFile.write("</div>")
else:
pass
saveFile.write("</div>")
except Exception, e:
pass
saveFile.write(end_str)
saveFile.close()
| 2,370 | 940 |
from daemonapp import DaemonApp
from tank import Tank
__version__ = "0.1.1"
| 77 | 29 |
import os
import sys
from datetime import datetime, timedelta
import boto3
from moto import mock_ssm
from . import TestBase
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from ssm_cache import SSMParameter, InvalidParam
@mock_ssm
class TestSSMCache(TestBase):
def setUp(self):
names = ["my_param", "my_param_1", "my_param_2", "my_param_3"]
self._create_params(names)
def test_creation(self):
# single string
cache = SSMParameter("my_param")
self.assertEqual(1, len(cache._names))
self.assertTrue(cache._with_decryption)
self.assertIsNone(cache._max_age)
self.assertIsNone(cache._last_refresh_time)
# list of params
cache = SSMParameter(["my_param_1", "my_param_2"])
self.assertEqual(2, len(cache._names))
# invalid params
with self.assertRaises(ValueError):
SSMParameter()
with self.assertRaises(ValueError):
SSMParameter(None)
with self.assertRaises(ValueError):
SSMParameter([])
def test_should_refresh(self):
# without max age
cache = SSMParameter("my_param")
self.assertFalse(cache._should_refresh())
# with max age and no data
cache = SSMParameter("my_param", max_age=10)
self.assertTrue(cache._should_refresh())
# with max age and last refreshed date OK
cache._last_refresh_time = datetime.utcnow()
self.assertFalse(cache._should_refresh())
# with max age and last refreshed date KO
cache._last_refresh_time = datetime.utcnow() - timedelta(seconds=20)
self.assertTrue(cache._should_refresh())
def test_main(self):
cache = SSMParameter("my_param")
my_value = cache.value()
self.assertEqual(my_value, self.PARAM_VALUE)
def test_unexisting(self):
cache = SSMParameter("my_param_invalid_name")
with self.assertRaises(InvalidParam):
cache.value()
def test_not_configured(self):
cache = SSMParameter(["param_1", "param_2"])
with self.assertRaises(InvalidParam):
cache.value("param_3")
def test_main_with_expiration(self):
cache = SSMParameter("my_param", max_age=300) # 5 minutes expiration time
my_value = cache.value()
self.assertEqual(my_value, self.PARAM_VALUE)
def test_main_without_encryption(self):
cache = SSMParameter("my_param", with_decryption=False)
my_value = cache.value()
self.assertEqual(my_value, self.PARAM_VALUE)
def test_main_with_multiple_params(self):
cache = SSMParameter(["my_param_1", "my_param_2", "my_param_3"])
# one by one
my_value_1 = cache.value("my_param_1")
my_value_2 = cache.value("my_param_2")
my_value_3 = cache.value("my_param_3")
self.assertEqual(my_value_1, self.PARAM_VALUE)
self.assertEqual(my_value_2, self.PARAM_VALUE)
self.assertEqual(my_value_3, self.PARAM_VALUE)
with self.assertRaises(TypeError):
cache.value() # name is required
# or all together
my_value_1, my_value_2, my_value_3 = cache.values()
self.assertEqual(my_value_1, self.PARAM_VALUE)
self.assertEqual(my_value_2, self.PARAM_VALUE)
self.assertEqual(my_value_3, self.PARAM_VALUE)
# or a subset
my_value_1, my_value_2 = cache.values(["my_param_1", "my_param_2"])
self.assertEqual(my_value_1, self.PARAM_VALUE)
self.assertEqual(my_value_2, self.PARAM_VALUE)
def test_main_with_explicit_refresh(self):
cache = SSMParameter("my_param") # will not expire
class InvalidCredentials(Exception):
pass
def do_something():
my_value = cache.value()
if my_value == self.PARAM_VALUE:
raise InvalidCredentials()
try:
do_something()
except InvalidCredentials:
# manually update value
self._create_params(["my_param"], "new_value")
cache.refresh() # force refresh
do_something() # won't fail anymore
def test_main_lambda_handler(self):
cache = SSMParameter("my_param")
def lambda_handler(event, context):
secret_value = cache.value()
return 'Hello from Lambda with secret %s' % secret_value
lambda_handler(None, None)
| 4,458 | 1,392 |
import abc
import atexit
import enum
import re
import OpenGL.error
from .variable import glsl_types
from .. import gl
import uuid
class BindSemanticObject(metaclass=abc.ABCMeta):
def __enter__(self):
self._activate()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._deactivate()
@abc.abstractmethod
def _activate(self):
raise NotImplementedError
@abc.abstractmethod
def _deactivate(self):
raise NotImplementedError
class GpuObject:
def __init__(self, gl_type, gl_id):
self._id = gl_id
self._uuid = uuid.uuid1()
self._type = gl_type
atexit.register(self.__delete)
def __delete(self):
try:
# if the context is alive, delete the resource from GPU
self._delete()
except OpenGL.error.NullFunctionError as error:
# do nothing; context is not existing anymore
pass
except gl.GLError as error:
# do nothing, context doesn't exists anymore
if error.err != 1282:
raise error
@abc.abstractmethod
def _delete(self):
raise NotImplementedError
@property
def handle(self):
return self._id
@property
def uuid(self):
return self._uuid
@property
def gl_type(self):
return self._type
def is_valid(self):
return self._id > 0
def delete(self):
self._delete()
@enum.unique
class DrawingMode(enum.Enum):
Points = gl.GL_POINTS
Lines = gl.GL_LINES
LineStrip = gl.GL_LINE_STRIP
LineLoop = gl.GL_LINE_LOOP
Triangles = gl.GL_TRIANGLES
TrianglesAdjacency = gl.GL_TRIANGLES_ADJACENCY
TriangleStrip = gl.GL_TRIANGLE_STRIP
TriangleStripAdjacency = gl.GL_TRIANGLE_STRIP_ADJACENCY
TriangleFan = gl.GL_TRIANGLE_FAN
# Valid OpenGL buffer binding targets.
@enum.unique
class BufferBindingTarget(enum.Enum):
ArrayBuffer = gl.GL_ARRAY_BUFFER # Vertex attributes
AtomicCounterBuffer = gl.GL_ATOMIC_COUNTER_BUFFER # Atomic counter storage, requires GL version >= 4.2
CopyReadBuffer = gl.GL_COPY_READ_BUFFER # Buffer copy source, requires GL version >= 3.1
CopyWriteBuffer = gl.GL_COPY_WRITE_BUFFER # Buffer copy destination
DispatchIndirectBuffer = gl.GL_DISPATCH_INDIRECT_BUFFER # Indirect compute dispatch commands, requires GL version >= 4.3
DrawIndirectBuffer = gl.GL_DRAW_INDIRECT_BUFFER # Indirect command arguments
ElementArrayBuffer = gl.GL_ELEMENT_ARRAY_BUFFER # Vertex array indices
PixelPackBuffer = gl.GL_PIXEL_PACK_BUFFER # Pixel read target
PixelUnpackBuffer = gl.GL_PIXEL_UNPACK_BUFFER # Texture data source
QueryBuffer = gl.GL_QUERY_BUFFER # Query result buffer, requires GL version >= 4.4
ShaderStorageBuffer = gl.GL_SHADER_STORAGE_BUFFER # Read-write storage for shaders, requires GL version >= 4.3
TextureBuffer = gl.GL_TEXTURE_BUFFER # Texture data buffer, requires GL version >= 3.1
TransformFeedbackBuffer = gl.GL_TRANSFORM_FEEDBACK_BUFFER # Transform feedback buffer
UniformBuffer = gl.GL_UNIFORM_BUFFER # Uniform block storage, requires GL version >= 3.1
# Valid OpenGL buffer data storage.
# https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glBufferStorage.xhtml
@enum.unique
class BufferStorageFlag(enum.Flag):
Dynamic = gl.GL_DYNAMIC_STORAGE_BIT
MapRead = gl.GL_MAP_READ_BIT
MapWrite = gl.GL_MAP_WRITE_BIT
MapPersistent = gl.GL_MAP_PERSISTENT_BIT
MapCoherent = gl.GL_MAP_COHERENT_BIT
ClientStorage = gl.GL_CLIENT_STORAGE_BIT
@enum.unique
class AccessPolicy(enum.Enum):
"""
Access policy for OpenGL buffer object.
"""
ReadOnly = gl.GL_READ_ONLY
WriteOnly = gl.GL_WRITE_ONLY
ReadWrite = gl.GL_READ_WRITE
@enum.unique
class DataUsage(enum.Enum):
"""
Usage pattern of stored data.
https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glBufferData.xhtml
Frequency of access:
- Stream: the data store contents will be modified once and used at most a few times.
- Static: the data store contents will be modified once and used many times.
- Dynamic: the data store contents will be modified repeatedly and used many times.
Nature of access:
- Draw:
the data store contents are modified by the application, and used as the source for GL drawing
and image specification commands.
- Read:
the data store contents are modified by reading data from the GL, and used to return that data
when queried by the application.
- Copy:
the data store contents are modified by reading data from the GL, and used as the source for GL
drawing and image specification commands.
"""
StreamDraw = gl.GL_STREAM_DRAW
StreamRead = gl.GL_STREAM_READ
StreamCopy = gl.GL_STREAM_COPY
StaticDraw = gl.GL_STATIC_DRAW
StaticRead = gl.GL_STATIC_READ
StaticCopy = gl.GL_STATIC_COPY
DynamicDraw = gl.GL_DYNAMIC_DRAW
DynamicRead = gl.GL_DYNAMIC_READ
DynamicCopy = gl.GL_DYNAMIC_COPY
class ShaderCodeParser:
@classmethod
def parse(self, code):
pass
@classmethod
def preprocess(cls, code: str):
uniforms = []
attribs = []
if code:
cls.remove_comments(code)
structs = cls.parse_defined_struct(code)
uniforms = cls.parse_declarations(code, 'uniform', user_defined_types=structs)
attribs = cls.parse_declarations(code, 'in')
return code, uniforms, attribs
@classmethod
def remove_comments(cls, code: str) -> str:
"""
Replace C-style comment from GLSL code string.
Args:
code (str): GLSL code string.
Returns:
GLSL code string without comments.
"""
regex = re.compile(r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*\n)", re.MULTILINE | re.DOTALL)
return regex.sub(
lambda matched: "" if matched.group(2) is not None else matched.group(1),
code
)
@classmethod
def parse_defined_struct(cls, code: str):
struct_regex = re.compile(r"(?s)struct\s*(?P<sname>\w+)\s*\{(?P<sdef>.*?)\};", re.MULTILINE)
struct_field_regex = re.compile(r"\s+(?P<type>\w+)\s+(?P<name>[\w,\[\]\n = \.$]+);")
structs = {}
for matched in re.finditer(struct_regex, code):
sname = matched.group('sname')
structs[sname] = []
for field in re.finditer(struct_field_regex, matched.group('sdef')):
var_type = field.group('type')
var_name = field.group('name')
structs[sname].append((var_type, var_name))
return structs
@classmethod
def remove_version(cls, code: str) -> str:
"""
Remove OpenGL version directive.
Args:
code (str): GLSL code string.
Returns:
GLSL code string with version directive removed.
"""
regex = re.compile('\#\s*version[^\r\n]*\n', re.MULTILINE | re.DOTALL)
return regex.sub('\n', code)
@classmethod
def parse_declarations(cls, code, qualifiers='', user_defined_types={}):
"""Extract declaraions of different types (type qualifiers) inside of shader.
Note:
Do NOT pass multiple qualifiers (not working for the moment).
Args:
code (str):
Shader source code string.
qualifier (str):
GLSL type qualifiers. See https://www.khronos.org/opengl/wiki/Type_Qualifier_(GLSL).
A string of qualifier(s) separated by comma.
Returns:
list of parsed declarations.
"""
# todo: deal with multiple qualifiers.
if qualifiers != '':
variables = []
# qualifiers = f"({'|'.join(list(map(str.strip, qualifiers.split(','))))})"
regex = re.compile(f'{qualifiers}\s+(?P<type>\w+)\s+(?P<names>[\w,\[\]\n = \.$]+);')
for matched in re.finditer(regex, code):
var_type = matched.group('type')
var_names = list(map(str.strip, matched.group('names').split(',')))
if var_type not in glsl_types and var_type in user_defined_types:
user_defined = user_defined_types[var_type]
for var_name in var_names:
for field_type, field_name in user_defined:
variables.append((f'{var_name}.{field_name}', field_type))
else:
for var_name in var_names:
variables.append((var_name, var_type))
return variables
else:
return ''
| 8,797 | 2,755 |
"""Author: Brandon Trabucco, Copyright 2019
Implements dynamic computational graphs with an interface like pytorch.
Also uses the ADAM optimizer."""
import numpy as np
import autograd.nodes
####################
#### OPTIMIZERS ####
####################
class Adam(autograd.nodes.Optimizer):
def __init__(self, alpha=0.0001, beta_one=0.9, beta_two=0.999, epsilon=1e-8):
"""Creates an ADAM optimizer."""
super(Adam, self).__init__("adam")
self.t = 0
self.alpha = alpha
self.beta_one = beta_one
self.beta_two = beta_two
self.epsilon = epsilon
self.m = None
self.v = None
def forward(self, variable):
"""Computes the result of this operation."""
if self.m is None:
self.m = np.zeros(variable.shape)
if self.v is None:
self.v = np.zeros(variable.shape)
return variable.data
def backward(self, gradient, variable):
"""Computes the gradient with respect to *args."""
self.t += 1
self.m = self.beta_one * self.m + (1 - self.beta_one) * gradient
self.v = self.beta_two * self.v + (1 - self.beta_two) * gradient**2
m_hat = self.m / (1 + self.beta_one**self.t)
v_hat = self.v / (1 + self.beta_two**self.t)
return [self.alpha * m_hat / np.sqrt(v_hat + self.epsilon)]
| 1,403 | 494 |
"""
The authentication module holds the Auth class which is is not only responsible
for the action of logging in, but also providing accessor method to the data
obtained upon a successful login.
"""
import logging
from lcmap.client.api import routes
log = logging.getLogger(__name__)
context = routes.auth_context
login_context = context + "/login"
user_context = context + "/me"
class Auth(dict):
def __init__(self, cfg=None, http=None, username="", password="", data={}):
super(Auth, self).__init__(data)
self.cfg = cfg
self.http = http
self.login(username, password)
self.refresh_token = self.login
def login(self, username="", password=""):
log.debug("Logging in ...")
log.debug(login_context)
if not username:
username = self.cfg.get_username()
if not password:
password = self.cfg.get_password()
result = self.http.post(
login_context,
data={"username": username, "password": password})
if result.errors:
log.error("Login unsuccessful: {}".format(result.errors))
self.update(result.result)
self.http.set_auth(self)
return self
def get_token(self):
return self.get("token")
def get_userid(self):
return self.get("user-id")
def get_username(self):
return self.get("username")
def get_roles(self):
return self.get("roles")
def get_email(self):
return self.get("email")
| 1,524 | 440 |
from django.db import models
from django.utils.translation import ugettext as _
from datetime import date
# Create your models here.
class Pip(models.Model):
class Meta:
ordering = ['pipId']
pipId = models.CharField(_('Id'), max_length=8, unique=True)
title = models.CharField(_('Título'), max_length=90, unique=True)
orgUnit = models.CharField(_('Unidade Solicitante'), max_length=10)
client = models.CharField(_('Cliente'), max_length=30)
justification = models.TextField(_('Justificativa'))
objectives = models.TextField(_('Objetivos'))
cost_estimates = models.CharField(_('Estimativa de Gastos'), max_length=60, blank=True)
def __str__(self):
return "{} - {}".format(self.pipId, self.title)
def __get_next_pipId(self):
q = Pip.objects.filter(pipId__startswith='P{}'.format(date.today().year))
proposals_this_year = q.count()
return "P{}{:03d}".format(date.today().year, proposals_this_year+1)
def save(self, *args, **kwargs):
if not self.pipId:
self.pipId = self.__get_next_pipId()
super().save(*args, **kwargs) | 1,132 | 368 |
'''
Print out all of the strings in the following array in alphabetical order, each on a separate line.
['Waltz', 'Tango', 'Viennese Waltz', 'Foxtrot', 'Cha Cha', 'Samba', 'Rumba', 'Paso Doble', 'Jive']
The expected output is:
'Cha Cha'
'Foxtrot'
'Jive'
'Paso Doble'
'Rumba'
'Samba'
'Tango'
'Viennese Waltz'
'Waltz'
You may use whatever programming language you'd like.
Verbalize your thought process as much as possible before writing any code. Run through the UPER problem solving framework while going through your thought process.
'''
def alpha_print(array):
# sort alphabetically
array.sort()
# loop through sorted array
for item in array:
# print each item
print(item)
array_1 = ['Waltz', 'Tango', 'Viennese Waltz', 'Foxtrot',
'Cha Cha', 'Samba', 'Rumba', 'Paso Doble', 'Jive']
array_1.sort()
print(*array_1, sep="\n")
| 872 | 303 |
import tensorflow as tf
import tensorflow_addons as tfa
import builders
from builders.layers.helpers import build_cnn, get_normalization_2d
from builders.layers import stylegan
from builders.layers.spectral import SNConv2D
from builders.layers.syncbn import SyncBatchNormalization
def build_mask_net(hidden_channel_dim, mask_size, norm='batch'):
output_dim = 1
cur_size = 1
model = tf.keras.models.Sequential()
while cur_size < mask_size:
model.add(tf.keras.layers.UpSampling2D(size=(2, 2), interpolation='nearest'))
model.add(tf.keras.layers.Conv2D(hidden_channel_dim, kernel_size=3, padding='same'))
model.add(get_normalization_2d(norm))
model.add(tf.keras.layers.Activation('relu'))
cur_size *= 2
if cur_size != mask_size:
raise ValueError('Mask size must be a power of 2')
model.add(tf.keras.layers.Conv2D(output_dim, kernel_size=1, padding='same'))
return model
class AppearanceEncoder(tf.keras.layers.Layer):
def __init__(self, arch, normalization='none', activation='relu',
padding='same', vecs_size=1024, pooling='avg'):
super().__init__()
cnn, channels = build_cnn(arch=arch,
normalization=normalization,
activation=activation,
pooling=pooling,
padding=padding)
self.cnn = tf.keras.models.Sequential()
self.cnn.add(cnn)
self.cnn.add(tf.keras.layers.GlobalMaxPooling2D())
self.cnn.add(tf.keras.layers.Dense(vecs_size))
def call(self, crops):
return self.cnn(crops)
class LayoutToImageGenerator(tf.keras.layers.Layer):
def __init__(self, input_shape, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer='batch',
padding_type='same', final_activation='tanh', extra_mult=2):
super().__init__()
assert (n_blocks >= 0)
if norm_layer == 'batch':
norm_layer_builder = tf.keras.layers.BatchNormalization
elif norm_layer == 'instance':
norm_layer_builder = tfa.layers.InstanceNormalization
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Lambda(
lambda x: tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]]),
input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(ngf, kernel_size=7, padding='valid'))
model.add(norm_layer_builder())
model.add(tf.keras.layers.Activation('relu'))
# downsample
for i in range(n_downsampling):
n_filters = int(ngf * (2 ** i) * extra_mult)
model.add(tf.keras.layers.Conv2D(n_filters, kernel_size=3, strides=2, padding='same'))
model.add(norm_layer_builder())
model.add(tf.keras.layers.Activation('relu'))
# resnet blocks
n_filters = int(ngf * (2 ** (n_downsampling - 1)) * extra_mult)
for i in range(n_blocks):
model.add(builders.layers.resnet.ResidualBlock(n_filters, padding=padding_type, activation='relu', normalization=norm_layer))
# upsample
for i in range(n_downsampling):
n_filters = int(ngf * (2 ** (n_downsampling - i)) * extra_mult / 2)
model.add(tf.keras.layers.Conv2DTranspose(n_filters, kernel_size=3, strides=2, padding='same', output_padding=1))
model.add(norm_layer_builder())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Lambda(
lambda x: tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]])))
model.add(tf.keras.layers.Conv2D(output_nc, kernel_size=7, padding='valid'))
model.add(tf.keras.layers.Activation(final_activation))
self.model = model
def call(self, input):
return self.model(input)
class LayoutToImageGeneratorSPADESArch(tf.keras.layers.Layer):
def __init__(self, nf=512, image_size=128, input_c=205,
random_input=False, use_sn=False, add_noise=True,
norm='instance'):
super().__init__()
self.nf = nf
self.image_size = image_size
self.Conv2D = tf.keras.layers.Conv2D if not use_sn else SNConv2D
if norm == 'instance':
self.Norm = tfa.layers.InstanceNormalization
elif norm == 'batch':
self.Norm = SyncBatchNormalization
self.use_sn = use_sn
self.add_noise = add_noise
self.random_input = random_input
layout_in = tf.keras.Input((image_size, image_size, input_c))
if not self.random_input:
self.initial_image = tf.Variable(tf.ones((1, 4, 4, self.nf)), trainable=True, name='initial_image', aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
x = tf.tile(self.initial_image, [tf.shape(layout_in)[0], 1, 1, 1])
else:
self.initial_image = tf.random.normal((tf.shape(layout_in)[0], 256), 0., 1.)
x = tf.keras.layers.Dense(self.nf * 4 * 4)(self.initial_image)
x = tf.reshape(x, [tf.shape(layout_in)[0], 4, 4, self.nf])
x = self.mescheder_resblock(x, layout_in, c=self.nf)
x = self.upsample(x, 2) # 8
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=self.nf)
x = self.upsample(x, 2) # 16
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=self.nf)
x = self.upsample(x, 2) # 32
cur_nf = self.nf // 2
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=cur_nf, learn_skip=True)
x = self.upsample(x, 2) # 64
cur_nf = cur_nf // 2
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=cur_nf, learn_skip=True)
x = self.upsample(x, 2) # 128
if self.image_size == 256:
cur_nf = cur_nf // 2
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=cur_nf, learn_skip=True)
x = self.upsample(x, 2) # 256
cur_nf = cur_nf // 2
x = self.adaptive_spacial_instance_norm(x, layout_in)
x = self.mescheder_resblock(x, layout_in, c=cur_nf, learn_skip=True)
x = tf.keras.layers.LeakyReLU(0.2)(x)
x = self.Conv2D(3, kernel_size=3, padding='same')(x)
x = tf.keras.layers.Activation('tanh')(x)
self.model = tf.keras.Model(
name='stylegan_spades_mix_generator',
inputs=[layout_in],
outputs=x)
def adaptive_spacial_instance_norm(self, x, layout):
if self.add_noise:
x = stylegan.AddNoiseToEachChannel()(x)
x = tf.keras.layers.LeakyReLU(0.2)(x)
x = self.Norm()(x)
x = stylegan.LayoutBasedModulation(self.use_sn)([x, layout])
return x
def upsample(self, x, scale_factor=2):
h, w = x.shape[1], x.shape[2]
new_size = [h * scale_factor, w * scale_factor]
return tf.image.resize(x, size=new_size, method='bilinear')
def mescheder_resblock(self, x, layout, c=1024, learn_skip=False):
out = self.Conv2D(c, kernel_size=3, padding='same')(x)
out = self.adaptive_spacial_instance_norm(out, layout)
out = self.Conv2D(c, kernel_size=3, padding='same')(out)
out = self.adaptive_spacial_instance_norm(out, layout)
if learn_skip:
x = self.Conv2D(c, kernel_size=3, padding='same')(x)
x = self.adaptive_spacial_instance_norm(x, layout)
return x + out
def call(self, input):
return self.model(input)
| 7,740 | 2,802 |
"""Base class for nbviewer tests.
Derived from IPython.html notebook test case in 2.0
"""
# -----------------------------------------------------------------------------
# Copyright (C) Jupyter Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# -----------------------------------------------------------------------------
import os
import sys
import time
from contextlib import contextmanager
from subprocess import DEVNULL as devnull
from subprocess import Popen
from unittest import skipIf
from unittest import TestCase
import requests
from tornado.escape import to_unicode
from tornado.log import app_log
from nbviewer.providers.github.client import AsyncGitHubClient
from nbviewer.utils import url_path_join
class NBViewerTestCase(TestCase):
"""A base class for tests that need a running nbviewer server."""
port = 12341
environment_variables = {}
def assertIn(self, observed, expected, *args, **kwargs):
return super().assertIn(
to_unicode(observed), to_unicode(expected), *args, **kwargs
)
def assertNotIn(self, observed, expected, *args, **kwargs):
return super().assertNotIn(
to_unicode(observed), to_unicode(expected), *args, **kwargs
)
@classmethod
def wait_until_alive(cls):
"""Wait for the server to be alive"""
while True:
try:
requests.get(cls.url())
except Exception:
time.sleep(0.1)
else:
break
@classmethod
def wait_until_dead(cls):
"""Wait for the server to stop getting requests after shutdown"""
while True:
try:
requests.get(cls.url())
except Exception:
break
else:
time.sleep(0.1)
@classmethod
def get_server_cmd(cls):
return [sys.executable, "-m", "nbviewer", "--port=%d" % cls.port]
@classmethod
def setup_class(cls):
server_cmd = cls.get_server_cmd()
cls.server = Popen(
server_cmd,
stdout=devnull,
stderr=devnull,
# Set environment variables if any
env=dict(os.environ, **cls.environment_variables),
)
cls.wait_until_alive()
@classmethod
def teardown_class(cls):
cls.server.terminate()
cls.wait_until_dead()
@classmethod
def url(cls, *parts):
return url_path_join("http://localhost:%i" % cls.port, *parts)
class FormatMixin(object):
@classmethod
def url(cls, *parts):
return url_path_join(
"http://localhost:%i" % cls.port, "format", cls.key, *parts
)
class FormatHTMLMixin(object):
key = "html"
class FormatSlidesMixin(object):
key = "slides"
@contextmanager
def assert_http_error(status, msg=None):
try:
yield
except requests.HTTPError as e:
real_status = e.response.status_code
assert real_status == status, "Expected status %d, got %d" % (
real_status,
status,
)
if msg:
assert msg in str(e), e
else:
assert False, "Expected HTTP error status"
def skip_unless_github_auth(f):
"""Decorates a function to skip a test unless credentials are available for
AsyhncGitHubClient to authenticate.
Avoids noisy test failures on PRs due to GitHub API rate limiting with a
valid token that might obscure test failures that are actually meaningful.
Paraameters
-----------
f: callable
test function to decorate
Returns
-------
callable
unittest.skipIf decorated function
"""
cl = AsyncGitHubClient(log=app_log)
can_auth = "access_token" in cl.auth or (
"client_id" in cl.auth and "client_secret" in cl.auth
)
return skipIf(not can_auth, "github creds not available")(f)
| 3,993 | 1,166 |
import sys
import codecs
import json
import os
from extraction.Landmark import RuleSet
from extraction import Landmark
def main(argv=None):
baseUrl = 'http://www.imfdb.org'
rules_file = '/Users/bamana/Documents/InferLink/workspace/memex/memexpython/input/_rules/imfdb_gun_urls_rules.txt'
imfdb_gun_urls_dir = '/Users/bamana/Documents/InferLink/workspace/memex/memexpython/input/imfdb_gun_urls'
urls = []
with codecs.open(rules_file, "r", "utf-8") as myfile:
json_str = myfile.read().encode('utf-8')
json_object = json.loads(json_str)
rules = RuleSet(json_object)
for subdir, dirs, files in os.walk(imfdb_gun_urls_dir):
for the_file in files:
if the_file.startswith('.'):
continue
with codecs.open(os.path.join(subdir, the_file), "r", "utf-8") as myfile:
page_str = myfile.read().encode('utf-8')
result = rules.extract(page_str)
result = Landmark.flattenResult(result)
for extracted_url in result['urls']:
urls.append(baseUrl + extracted_url)
print len(urls)
for url in urls:
print url
if __name__ == '__main__':
sys.exit(main()) | 1,311 | 421 |
# server.py
# coding:utf-8
# 从wsgiref模块导入
from wsgiref.simple_server import make_server
# 导入我们自己编写的application函数:
from wsgi import application
# 创建一个服务器,IP地址为空,端口是8000,处理函数是application:
httpd = make_server('', 8000, application)
print("Serving HTTP on port 8000...")
# 开始监听HTTP请求:
httpd.serve_forever()
print("Listening") | 322 | 161 |
from bxcommon.connections.abstract_connection import AbstractConnection
from bxcommon.messages.abstract_message import AbstractMessage
from bxcommon.services.broadcast_service import BroadcastService
class GatewayBroadcastService(BroadcastService[AbstractMessage, AbstractConnection]):
def should_broadcast_to_connection(self, message: AbstractMessage, connection: AbstractConnection) -> bool:
# gateway does not really care about network numbers
return True
| 481 | 112 |
from vk_advanced_api.vkapi import VKAPI
from vk_advanced_api.Auth import Auth | 78 | 31 |
import datetime
import os
import pickle
from abc import ABC, abstractmethod
import gym
import numpy as np
class BasePolicy(ABC):
@abstractmethod
def get_action(self, obs, deterministic=True):
raise NotImplementedError()
def evaluate(self, env, N=10, rollout=True):
if not rollout:
print("Warning: Rolling out policy despite rollout=False")
res = 0
for _ in range(N):
obs = env.reset()
done = False
while not done:
a = self.get_action(obs)
obs, reward, done, _ = env.step(a)
res += reward
return res / N
class EpsGreedyPolicy(BasePolicy):
def __init__(self, greedy_policy: BasePolicy, eps: float, action_space: gym.Space):
self.greedy = greedy_policy
self.eps = eps
self.action_space = action_space
def get_action(self, obs, deterministic=False):
if deterministic or np.random.random() > self.eps:
return self.greedy.get_action(obs, deterministic=True)
else:
return self.action_space.sample()
class TabularPolicy(BasePolicy):
def __init__(self, policy: np.ndarray):
self.matrix = np.copy(policy)
def get_action(self, state, deterministic=True):
if deterministic:
return np.argmax(self.matrix[state, :])
else:
return np.random.choice(
range(self.matrix.shape[1]), p=self.matrix[state, :]
)
def evaluate(self, env, N=1, rollout=False):
assert env.observation_type == "state"
if rollout:
return super().evaluate(env, N)
else:
return env.evaluate_policy(self)
def __eq__(self, other):
return np.all(self.matrix == other.matrix)
class FixedPolicy(BasePolicy):
def __init__(self, policy: np.ndarray):
self.matrix = np.copy(policy)
def get_action(self, state, deterministic=True):
t = int(state[-1])
return self.matrix[t]
def __eq__(self, other):
return np.all(self.matrix == other.matrix)
class LinearPolicy(BasePolicy):
def __init__(self, w, obs_mean=None, obs_std=None, env=None):
self.w = w
self.obs_mean = obs_mean
self.obs_std = obs_std
if env is not None:
self.alow = env.action_space.low
self.ahigh = env.action_space.high
else:
self.alow = -np.inf
self.ahigh = np.inf
def normalize(self, obs):
if self.obs_mean is not None and self.obs_std is not None:
return (obs - self.obs_mean) / self.obs_std
else:
return obs
def get_action(self, obs, deterministic=True):
obs = self.normalize(obs)
a = np.dot(self.w, obs)
a = np.clip(a, self.alow, self.ahigh)
return a
def save(self, path):
policy_dict = {
"w": list(self.w),
"mean": list(self.obs_mean),
"std": list(self.obs_std),
}
with open(path, "wb") as f:
pickle.dump(policy_dict, f)
@classmethod
def load(cls, path, env=None):
with open(path, "rb") as f:
policy_dict = pickle.load(f)
policy = cls(
policy_dict["w"],
obs_mean=policy_dict["mean"],
obs_std=policy_dict["std"],
env=env,
)
return policy
class StableBaselinesPolicy(BasePolicy):
def __init__(self, model):
# save and load the model as a workaround for creating a copy of the policy
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
filename = f"tmp_model_{timestamp}.zip"
model.save(filename)
self.model = model.__class__.load(filename)
try:
os.remove(filename)
except FileNotFoundError:
pass
def get_action(self, obs, deterministic=True):
a, _ = self.model.predict(obs, deterministic=deterministic)
return a
class CombinedPolicy(BasePolicy):
def __init__(self, policies, p=None):
self.policies = policies
for policy in self.policies:
assert issubclass(policy.__class__, BasePolicy)
if p is None:
n = len(self.policies)
p = np.ones(n) / n
self.p = p
def get_action(self, obs, deterministic=True):
policy_idx = np.random.choice(np.arange(len(self.policies)), p=self.p)
policy = self.policies[policy_idx]
return policy.get_action(obs, deterministic=deterministic)
class GaussianNoisePolicy(BasePolicy):
def __init__(self, policy: BasePolicy, sigma: float):
self.policy = policy
self.sigma = sigma
def get_action(self, obs, deterministic=False):
action = self.policy.get_action(obs, deterministic=deterministic)
action += np.random.normal(loc=0, scale=self.sigma, size=action.shape)
return action
| 4,960 | 1,529 |
import asyncio
import logging
import threading
import uuid
from cachetools import TTLCache
from pyre import Pyre
from serde.msgpack import from_msgpack, to_msgpack
from typing import List, Union
from ..boards.memory_board import MemoryBoard
from ..messages.append_entries import AppendEntriesMessage, LogEntry, Command
from ..messages.base import BaseMessage, Peer
from ..states.state import State
from .server import HashedLog, Server
logger = logging.getLogger("raft")
class ZREServer(Server):
"This implementation is suitable for multi-process testing"
DIGEST_SIZE = 32
def __init__(
self,
group,
name,
state: State,
node: Pyre,
# DBM file that stores stable storage state for raft
stable_storage,
log=None,
messageBoard=None,
parent=None,
):
if log is None:
log = HashedLog()
log.append(LogEntry(term=0)) # According to the raft spec
if messageBoard is None:
messageBoard = MemoryBoard()
super().__init__(
node.uuid().hex,
state,
log,
messageBoard,
[],
set(),
set(),
_stable_storage=stable_storage,
)
self.group = group
self._node = node
self._human_name = name
self._outstanding_index = TTLCache(maxsize=128, ttl=10)
# Sometimes several instances of consensus are arranged in a
# hierarchy. In order to become a candidate in the child consensus,
# you have to be a leader in the parent. Note that in the presence
# of failures, the parent and the child consensus could have
# different leaders at the same time.
self._parent = parent
def add_neighbor(self, neighbor: Peer):
loop = asyncio.get_event_loop()
task = loop.create_task(self.quorum_set(str(neighbor), "add"))
self._neighbors.append(neighbor)
return task
def remove_neighbor(self, neighbor: Peer):
loop = asyncio.get_event_loop()
task = loop.create_task(self.quorum_set(str(neighbor), "remove"))
self._neighbors.remove(neighbor)
if neighbor in self._quorum:
self._quorum.remove(neighbor)
if neighbor in self._live_quorum:
self._live_quorum.remove(neighbor)
return task
def quorum_update(self, entries: List[LogEntry]) -> None:
for entry in entries:
assert entry.command == Command.QUORUM_PUT
if entry.value == "add":
self._quorum.add(entry.key)
elif entry.value == "remove":
if entry.key in self._quorum:
self._quorum.remove(entry.key)
if entry.key in self._live_quorum:
self._live_quorum.remove(entry.key)
# TODO: if the leader is removed, needs to step down
self._total_nodes = len(self._quorum)
async def send_message(self, message: Union[BaseMessage, bytes]):
logger.debug(f"sending: {self._state}: {message}")
if isinstance(message, AppendEntriesMessage):
self._outstanding_index[message.id] = message
if isinstance(message, bytes):
self._node.shout(self.group, b"/raft " + message)
else:
if message.receiver == self._name:
await self._receive_message(message)
return
elif message.receiver is not None:
# Disambiguate in cases where a peer is in multiple groups
message.group = self.group
message_bytes = to_msgpack(message, ext_dict=BaseMessage.EXT_DICT_REVERSED)
digest = message.hash().digest()
assert len(digest) == self.DIGEST_SIZE
message_bytes = digest + message_bytes
if message.receiver is None:
self._node.shout(self.group, b"/raft " + message_bytes)
else:
if type(message.receiver) != str:
raise Exception(
f"Expected node.uuid().hex here, got: {message.receiver}"
)
self._node.whisper(
uuid.UUID(message.receiver), # type: ignore
b"/raft " + message_bytes,
)
async def receive_message(self, message_bytes: bytes):
try:
message_hash, message_bytes = (
message_bytes[0 : self.DIGEST_SIZE],
message_bytes[self.DIGEST_SIZE :],
)
message = from_msgpack(
BaseMessage, message_bytes, ext_dict=BaseMessage.EXT_DICT
)
if message_hash != message.hash().digest():
raise Exception(f"message hash {message_hash} doesn't match {message}")
except Exception as e:
logger.info(f"Got exception: {e}")
return
if message.group is not None and message.group != self.group:
return
await self._receive_message(message)
async def _receive_message(self, message: BaseMessage):
await self.on_message(message)
await self.post_message(message)
async def post_message(self, message):
await self._messageBoard.post_message(message)
async def on_message(self, message):
logger.debug(f"---------- on_message start -----------")
logger.debug(f"{self._state}: {message}")
state, response = await self._state.on_message(message)
logger.debug(f"{state}: {response}")
logger.debug(f"---------- on_message end -----------")
self._state = state
async def wait_for(self, expected_index, expected_id) -> None:
def check_condition():
return (
self._commitIndex >= expected_index
and self._log[expected_index].id == expected_id
)
async with self._condition:
await self._condition.wait_for(check_condition)
entries = [
e
for e in self._server._log[expected_index : self._commitIndex + 1]
if e.command == Command.QUORUM_PUT
]
self.quorum_update(entries)
self._condition_event.set()
async def set(self, key: str, value: str):
leader = self._state.leader
if leader is not None:
append_entries = AppendEntriesMessage(
self._name,
leader,
self._currentTerm,
entries=[
LogEntry(
term=self._currentTerm,
index=self._commitIndex,
key=key,
value=value,
)
],
)
expected_index = self._commitIndex + 1
await self.send_message(append_entries)
self._condition_event = threading.Event()
return (self.wait_for, expected_index, append_entries.id)
else:
raise Exception("Leader not found")
async def get(self, key: str):
return await self._messageBoard.get(key)
async def quorum_set(self, neighbor: str, op: str):
leader = self._state.leader
if leader is not None:
if leader != self._name:
# Let the leader handle this
async def nop():
pass
return nop
append_entries = AppendEntriesMessage(
self._name,
leader,
self._currentTerm,
id="set", # Just so all nodes compute the same hash
entries=[
LogEntry(
command=Command.QUORUM_PUT,
term=self._currentTerm,
index=self._commitIndex,
key=neighbor,
value=op,
)
],
)
expected_index = self._commitIndex + 1
await self.send_message(append_entries)
self._condition_event = threading.Event()
return (self.wait_for, expected_index, append_entries.entries[0].id)
else:
if self._currentTerm > 0:
raise Exception("Leader not found")
| 8,384 | 2,204 |
#----------------------------------------------#
# Karakter Dizilerinin İçeriğini Karşılaştırma
#----------------------------------------------#
#elimizde iki farklı metin var
ilkMetin = "asdasfddgdhfjfdgdşfkgjdfklgşjdfklgjdfkghdfjghjklsdhajlsdhjkjhkhjjh"
ikinciMetin = "sdfsuıdoryeuıfsjkdfhdjklghjdfklruseldhfjlkdshfljskeeuf"
#programımızda ilkMetin de olup ikinciMetin de yer almayan öğeleri ayırmak istiyoruz
for s in ilkMetin:#ilkMetin'deki s adını verdiğimiz her bir öğe için
if not s in ikinciMetin: #eğer s adlı bir öğe ikinciMetin de yoksa
print(s)#s adlı öğeyi ekrana yazdır
#a
#a
#ş
#ş
#a
#ikinciMetin de olan fakat ilkMetin de olmayan öğeleri bulalım
for m in ikinciMetin:
if not m in ilkMetin:
print(m)
#u ı o r y e u ı r u e e e u
#birden fazla aynı öğenin yazılmasını istemiyorsak
fark = ""
for p in ikinciMetin: #ikinciMetin de p dediğim bütün öğeler için
if not p in ilkMetin: # eğer p ilkMetin in içide yoksa
if not p in fark: # eğer p öğesi fark yoksa
fark += p#bu öğeyi fark değişkenine ekle
print(fark)#u ı o r y e
#eğer karakter dizi ile birleşirme gerçekleştiriyorsak bu işlem değişkenin önceki değerini değiştirmez
a = "istihza"
print(a + ".com")#istihza.com
print(a)#istihza
#bu işlemin kalıcı hale getirmek için ise yeni işlemi yeni bir değişkene atayarak yaparız
a = a +".com" # a += ".com" olarakda yazabilirdik
print(a)#istihza.com
#yukarıdaki işlemi şöyle kolayca da yazabilirdik:
firstString = "asadlaskdnlnceıfeşsdje9"
twoString = "asşdlmasejmşşvawldad"
cikarma = ""
for e in firstString:
if not e in twoString and not e in cikarma:
cikarma += e
print(cikarma)#k n c ı f 9
"""
#--------------------------------------#
# DOSYALARIN İÇERİĞİNİ KARŞILAŞTIRMA
#--------------------------------------#
#değişkenlerimizi karşılaştırmıştık
#şimdi ise dosyaları karşılaştıralım
#elimizde isimler1.txt ve isimler2.txt adlı iki dosya var
d1 = open("isimler1.txt")#dosyayı açıyoruz
d1Satirlar = d1.readlines() #satırları okuyoruz
d2 = open("isimler2.txt")#dosyayı açıyoruz
d2Satirlar = d2.readlines()#satırları okuyoruz
for i in d2Satirlar:
if not i in d1Satirlar:
print(i)
d1.close()
d2.close()
"""
#-----------------------------------------#
# KARAKTER DİZİSİNDEKİ KARAKTERLERİ SAYMA
#-----------------------------------------#
#metin de her harfin kaç kere geçtiğini gösteren program:
metin = """Bu programlama dili Guido Van Rossum adlı Hollandalı bir programcı
tarafından 90’lı yılların başında geliştirilmeye başlanmıştır. Çoğu insan,
isminin Python olmasına aldanarak, bu programlama dilinin, adını piton
yılanından aldığını düşünür. Ancak zannedildiğinin aksine bu programlama
dilinin adı piton yılanından gelmez. Guido Van Rossum bu programlama dilini,
The Monty Python adlı bir İngiliz komedi grubunun, Monty Python’s Flying
Circus adlı gösterisinden esinlenerek adlandırmıştır. Ancak her ne kadar
gerçek böyle olsa da, Python programlama dilinin pek çok yerde bir yılan
figürü ile temsil edilmesi neredeyse bir gelenek halini almıştır."""
harf = input("Sorgulamak istediğiniz harf: ")
number = ""
for l in metin: #metin in içinde s adını verdiğimiz her bi öğe için:
if harf == l: #eğer kullancıdan gelen harf l ile aynıysa
number += harf #kullanıcıdan gelen bu harfi sayı değişkenine ata
print(len(number))
#eğer 5 tane a varsa number değişkenine aaaaa yazar
#print fonksiyonu sayesinde number ın eleman sayısını öğreniriz
#bunun yerine şöyle de yazabiliriz
metinOne =""" Bu programlama dili Guido Van Rossum adlı Hollandalı bir programcı
tarafından 90’lı yılların başında geliştirilmeye başlanmıştır. Çoğu insan,
isminin Python olmasına aldanarak, bu programlama dilinin, adını piton
yılanından aldığını düşünür. Ancak zannedildiğinin aksine bu programlama
dilinin adı piton yılanından gelmez. Guido Van Rossum bu programlama dilini,
The Monty Python adlı bir İngiliz komedi grubunun, Monty Python’s Flying
Circus adlı gösterisinden esinlenerek adlandırmıştır. Ancak her ne kadar
gerçek böyle olsa da, Python programlama dilinin pek çok yerde bir yılan
figürü ile temsil edilmesi neredeyse bir gelenek halini almıştır."""
harfOne = input("Sorgulamak istediğin harf:")
sayi = 0
for i in metinOne:
if harfOne == i:
sayi += 1
print(sayi)
#eğer kullanıcıdan gelen harf kullanıldıysa ona bir ekle diyoru
#böylelikle o harf kaç kere kullanıldıysa sayinin değeri 1 artacak
#-----------------------------------------------#
# DOSYA İÇİNDEKİ KARAKTERLERİ SAYMA
#-----------------------------------------------#
#bir önceki metnin değişken olarka değilde dosya içinde okunan bir metin farzedelim
hakkinda = open("hakkında.txt",encoding="utf-8")#dosyamızı açıyoruz
#harfTwo = input("Sorgulamak istenilen harf") #kullanıcıdan harf istiyoruz
sayiOne = 0 #değerini sıfır yapıyoruz
for karakterDizisi in hakkinda: #dosyanın içindeki karakterDizi adlı her bir öğe için
for karakter in karakterDizisi: #karakterDizisinin karakter adlı her bir öğe için
if harfTwo == karakter: #eğer kullanıcın verdiği harf karaktere eşitse
sayiOne += 1 # sayiOne değerine 1 ekle
#print(sayiOne) #ekrana sayiOne yazdır
hakkinda.close() #bütün işlemleri kaydetmek için dosyayı kapatıyoruz
#eğer bir satırın ayrı bir karakter dizisi olduğunu görmek için repr() yararlanırız
"""
for karakterDizisi in hakkina:
print(repr(karakterDizisi))
Çıktı:
'Bu programlama dili Guido Van Rossum adlı Hollandalı bir programcı\n'
'tarafından 90’lı yılların başında geliştirilmeye başlanmıştır. Çoğu insan,\n'
"""
"""
Bu çıktıya çok dikkatlice bakın. repr() fonksiyonu sayesinde Python’ın alttan alta neler çevirdiğini bariz bir biçimde görüyoruz. Karakter dizisinin başlangıç ve bitişini gösteren tırnak işaretleri ve \n kaçış dizilerinin görünür vaziyette olması sayesinde her bir satırın ayrı bir karakter dizisi olduğunu daha net bir şekilde görebiliyoruz.
Biz yazdığımız kodlarda, kullanıcıdan bir harf girmesini istiyoruz. Kullandığımız algoritma gereğince bu harfi metindeki karakter dizileri içinde geçen her bir karakterle tek tek karşılaştırmamız gerekiyor. input() metodu aracılığıyla kullanıcıdan tek bir karakter alıyoruz. Kullandığımız for döngüsü ise bize bir karakter yerine her satırda bir karakter dizisi veriyor. Dolayısıyla mesela kullanıcı ‘a’ harfini sorgulamışsa, ilk for döngüsü bu harfin karşısına ‘Bu programlama dili Guido Van Rossum adlı Hollandalı bir programcın’ adlı karakter dizisini çıkaracaktır. Dolayısıyla bizim bir seviye daha alta inerek, ilk for döngüsünden elde edilen değişken üzerinde başka bir for döngüsü daha kurmamız gerekiyor. Bu yüzden şöyle bir kod yazıyoruz:
"""
"""
for karakter_dizisi in hakkında:
for karakter in karakter_dizisi:
"""
| 6,828 | 3,102 |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from tableschema import constraints
# Tests
@pytest.mark.parametrize('constraint, value, result', [
(0, [1], True),
(1, [1], True),
(2, [1], False),
])
def test_check_minLength(constraint, value, result):
assert constraints.check_minLength(constraint, value) == result
| 476 | 154 |
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from dopamine.discrete_domains import run_experiment
from dopamine.utils.example_viz_lib import MyRunner
from dopamine.agents.implicit_quantile import implicit_quantile_agent
from dopamine.utils import agent_visualizer
from dopamine.utils import atari_plotter
from dopamine.utils import bar_plotter
from dopamine.utils import line_plotter
from dopamine.utils import plotter
import gin
import numpy as np
import tensorflow.compat.v1 as tf
import pygame
import gin.tf
@gin.configurable
def create_runner(base_dir, schedule='continuous_train_and_eval', level=0):
"""Creates an Bubble Runner.
- originally copied via run_experiment.create_runner
Args:
level: the initial stage level to start (reset condition)
"""
assert base_dir is not None
from dopamine.discrete_domains.run_experiment import TrainRunner
from dopamine.discrete_domains.run_experiment import create_agent
# Continuously runs training and evaluation until max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return BubbleRunner(base_dir, create_agent, game_level=level)
# Continuously runs training until max num_iterations is hit.
elif schedule == 'continuous_train':
return TrainRunner(base_dir, create_agent)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
@gin.configurable
class BubbleRunner(run_experiment.Runner):
"""BubbleRunner
- customized for bubble runner
Args:
proc_queue: instance of `multiprocessing.Queue`
"""
def __init__(self, base_dir, create_agent_fn, proc_queue=None, game_level=0):
'''initialize bubble-runner'''
print('! BubbleRunner(%s)' % (base_dir))
assert create_agent_fn is not None
BubbleRunner.init_logger(base_dir)
super(BubbleRunner, self).__init__(base_dir, create_agent_fn)
self.proc_queue = proc_queue
self.game_level = game_level
def post_message(self, data):
self.proc_queue.put(data) if self.proc_queue is not None else None
def current(self):
import time
return int(round(time.time() * 1000))
def _initialize_episode(self):
env = self._environment
obs = env.reset(self.game_level) if self.game_level > 0 else env.reset()
return self._agent.begin_episode(obs)
def _run_one_step(self, action):
observation, reward, is_terminal, info = self._environment.step(action)
return observation, reward, is_terminal, info
def _run_one_episode(self):
step_number = 0
total_reward = 0.
agent_lives = 0
action = self._initialize_episode()
is_terminal = False
is_death = False
# Keep interacting until we reach a terminal state.
while True:
observation, reward, is_terminal, info = self._run_one_step(action)
curr_lives = int(info['lives']) if 'lives' in info else 0
total_reward += reward
step_number += 1
#! end the episode if death.
is_death = True if curr_lives < agent_lives else is_death
agent_lives = curr_lives
#! determine terminal & EOE
if (self.end_on_death and is_death):
break
# TODO(steve) - need to clip reward really?!!
reward = np.clip(reward, -1, 1)
if (self._environment.game_over or step_number == self._max_steps_per_episode):
break
elif is_terminal:
self._agent.end_episode(reward)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation, info)
self._end_episode(reward)
#! report status and returns
self.post_message({'episode': {'length': step_number, 'return': total_reward}})
return step_number, total_reward, int(info['score']), int(info['level'])
def _run_one_phase(self, min_steps, statistics, run_mode_str):
step_count = 0
num_episodes = 0
sum_returns = 0.
time_started = self.current()
self.post_message({'phase': {'steps': min_steps, 'mode': run_mode_str, 'level':self.game_level }})
while step_count < min_steps:
episode_length, episode_return, episode_score, episode_level = self._run_one_episode()
statistics.append({
'{}_episode_lengths'.format(run_mode_str): episode_length,
'{}_episode_returns'.format(run_mode_str): episode_return
})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
sec_per_step = ((self.current() - time_started)/1000.0/step_count)
sec_remained = int((min_steps - step_count)*sec_per_step)
time_display = '{:1.1f}m'.format(sec_remained/60) if sec_remained > 60*5 else '{}s'.format(sec_remained)
sys.stdout.write('Steps: {:6.0f} {:2.0f}% '.format(step_count, step_count/min_steps*100.) +
'Remains: {} '.format(time_display) +
'Episode[{}].len: {} '.format(num_episodes, episode_length) +
'Return: {:.1f} S:{} L:{}'.format(episode_return, episode_score, episode_level)+
' \r')
sys.stdout.flush()
return step_count, sum_returns, num_episodes
def _run_one_iteration(self, iteration):
# print('! run_one_iteration({}) - L{}'.format(iteration, self.game_level))
ret = super(BubbleRunner, self)._run_one_iteration(iteration)
self.game_level = min(99, self.game_level + 1)
return ret
@staticmethod
def init_logger(base_dir):
'''initialize logger to save into file'''
import logging, os
# get TF logger
log = logging.getLogger('tensorflow')
log.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if os.path.exists(os.path.join(base_dir, 'tensorflow.log')):
fh = logging.FileHandler(os.path.join(base_dir, 'tensorflow.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
log.addHandler(fh)
# print log header..
tf.logging.info('---'*32)
tf.logging.info('BubbleRunner() starts!!')
tf.logging.info('---'*32)
class VizBubbleRunner(BubbleRunner):
"""VizBubbleRunner: runner to visualize playing w/ checkpoint"""
def __init__(self, base_dir, trained_agent_ckpt_path, create_agent_fn, use_legacy_checkpoint = False, game_level = 0):
print('! VizBubbleRunner({})'.format(base_dir))
self._trained_agent_ckpt_path = trained_agent_ckpt_path
self._use_legacy_checkpoint = use_legacy_checkpoint
super(VizBubbleRunner, self).__init__(base_dir, create_agent_fn, game_level=game_level)
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
self._agent.reload_checkpoint(self._trained_agent_ckpt_path, self._use_legacy_checkpoint)
self._start_iteration = 0
def _run_one_iteration(self, iteration):
from dopamine.discrete_domains import iteration_statistics
statistics = iteration_statistics.IterationStatistics()
tf.logging.info('Starting iteration %d', iteration)
_, _ = self._run_eval_phase(statistics)
return statistics.data_lists
def _run_one_episode(self):
step_number = 0
total_reward = 0.
action = self._initialize_episode()
is_terminal = False
# Keep interacting until we reach a terminal state.
while True:
observation, reward, is_terminal, info = self._run_one_step(action)
total_reward += reward
step_number += 1
reward = np.clip(reward, -1, 1)
if (self._environment.game_over or step_number == self._max_steps_per_episode):
break
elif is_terminal:
self._agent.end_episode(reward)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation, info)
self._end_episode(reward)
return step_number, total_reward
def visualize(self, record_path, num_global_steps=500):
'''customize viz for bubble
- origin from MyRunner.visualize()
'''
print('RUN> visualize(%s, %d)'%(record_path, num_global_steps))
if not tf.gfile.Exists(record_path):
tf.gfile.MakeDirs(record_path)
self._agent.eval_mode = True
# Set up the game playback rendering.
atari_params = {'environment': self._environment,
'width': 240,
'height': 224 }
atari_plot = atari_plotter.AtariPlotter(parameter_dict=atari_params)
# Plot the rewards received next to it.
reward_params = {'x': atari_plot.parameters['width'],
'xlabel': 'Timestep',
'ylabel': 'Reward',
'title': 'Rewards',
'get_line_data_fn': self._agent.get_rewards}
#reward_plot = line_plotter.LinePlotter(parameter_dict=reward_params)
reward_plot = MyLinePlotter(parameter_dict=reward_params)
action_names = ['Action {}'.format(x) for x in range(self._agent.num_actions)]
# Plot Observation at left-bottom
obsrv_params = {
'x': atari_plot.parameters['x'],
'y': atari_plot.parameters['height'] - 10,
'width': atari_plot.parameters['width'],
'height': atari_plot.parameters['height'],
}
obsrv_plot = MyObservationPlotter(parameter_dict=obsrv_params)
# Plot Q-values (DQN) or Q-value distributions (Rainbow).
q_params = {'x': atari_plot.parameters['width'],
'y': atari_plot.parameters['height'],
'legend': action_names }
if 'DQN' in self._agent.__class__.__name__:
q_params['xlabel'] = 'Timestep'
q_params['ylabel'] = 'Q-Value'
q_params['title'] = 'Q-Values'
q_params['get_line_data_fn'] = self._agent.get_q_values
q_plot = MyLinePlotter(parameter_dict = q_params)
else:
q_params['xlabel'] = 'Return'
q_params['ylabel'] = 'Return probability'
q_params['title'] = 'Return distribution'
q_params['get_bar_data_fn'] = self._agent.get_probabilities
q_plot = MyBarPlotter(parameter_dict = q_params)
# Screen Size
screen_width = (atari_plot.parameters['width'] + reward_plot.parameters['width'])
screen_height = (atari_plot.parameters['height'] + q_plot.parameters['height'])
# Dimensions need to be divisible by 2:
screen_width += 1 if screen_width % 2 > 0 else 0
screen_height += 1 if screen_height % 2 > 0 else 0
# build visualizer.
visualizer = agent_visualizer.AgentVisualizer(
record_path=record_path, plotters=[
atari_plot, reward_plot, obsrv_plot, q_plot
],
screen_width=screen_width, screen_height=screen_height)
# run loop in global_step
global_step = 0
while global_step < num_global_steps:
initial_observation = self._environment.reset()
action = self._agent.begin_episode(initial_observation)
while True:
observation, reward, is_terminal, info = self._environment.step(action)
global_step += 1
obsrv_plot.setObservation(observation)
visualizer.visualize()
if self._environment.game_over or global_step >= num_global_steps:
break
elif is_terminal:
self._agent.end_episode(reward)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation, info)
self._end_episode(reward)
visualizer.generate_video()
class MyObservationPlotter(plotter.Plotter):
"""MyObservationPlotter: plot observation via step()"""
_defaults = { 'x': 0, 'y': 0 }
def __init__(self, parameter_dict = {}, screen_size = 84):
super(MyObservationPlotter, self).__init__(parameter_dict)
self.width = self.parameters['width'] if 'width' in self.parameters else screen_size
self.height = self.parameters['height'] if 'height' in self.parameters else screen_size
self.game_surface = pygame.Surface((screen_size, screen_size))
self.obs = None
def setObservation(self, obs):
self.obs = obs
def draw(self):
numpy_surface = np.frombuffer(self.game_surface.get_buffer(), dtype=np.int32)
if self.obs is not None:
obs = self.obs
# obs = np.transpose(obs)
# obs = np.swapaxes(obs, 1, 2)
# obs = obs[0] | (obs[0] << 8) | (obs[0] << 16) # must be grey-scale image (or single channel)
np.copyto(numpy_surface, obs.ravel())
return pygame.transform.scale(self.game_surface, (self.width, self.height))
class MyLinePlotter(line_plotter.LinePlotter):
"""MyLinePlotter: plot observation via step()"""
def __init__(self, parameter_dict):
myDef = {'font': {
'family': 'DejaVu Sans',
'weight': 'regular',
'size': 26 },
'figsize': (12, 9),
}
myDef.update(parameter_dict)
super(MyLinePlotter, self).__init__(parameter_dict = myDef)
#! use 2nd axes for score
self.ax1 = self.plot.axes
self.ax2 = self.ax1.twinx() if 1>0 else None
self.ax2.set_ylabel('Score', color='b') if self.ax2 else None
def draw(self):
import pygame
"""Draw the line plot.
If `parameter_dict` contains a 'legend' key pointing to a list of labels,
this will be used as the legend labels in the plot.
Returns:
object to be rendered by AgentVisualizer.
"""
self._setup_plot() # draw
num_colors = len(self.parameters['colors'])
max_xlim = 0
line_data = self.parameters['get_line_data_fn']()
for i in range(len(line_data)):
plot_axes = self.ax2 if self.ax2 and i + 1 >= len(line_data) else self.ax1
plot_axes.plot(line_data[i],
linewidth=self.parameters['linewidth'],
color=self.parameters['colors'][i % num_colors])
max_xlim = max(max_xlim, len(line_data[i]))
min_xlim = max(0, max_xlim - self.parameters['max_width'])
self.plot.set_xlim(min_xlim, max_xlim)
if 'legend' in self.parameters:
self.plot.legend(self.parameters['legend'])
self.fig.canvas.draw()
# Now transfer to surface.
width, height = self.fig.canvas.get_width_height()
if self.plot_surface is None:
self.plot_surface = pygame.Surface((width, height))
plot_buffer = np.frombuffer(self.fig.canvas.buffer_rgba(), np.uint32)
surf_buffer = np.frombuffer(self.plot_surface.get_buffer(),
dtype=np.int32)
np.copyto(surf_buffer, plot_buffer)
return pygame.transform.smoothscale(
self.plot_surface,
(self.parameters['width'], self.parameters['height']))
class MyBarPlotter(bar_plotter.BarPlotter):
"""MyBarPlotter: plot observation via step()"""
def __init__(self, parameter_dict):
myDef = {'font': {
'family': 'DejaVu Sans',
'weight': 'regular',
'size': 26 },
}
myDef.update(parameter_dict)
super(MyBarPlotter, self).__init__(parameter_dict = myDef)
def draw(self):
return super(MyBarPlotter, self).draw()
| 16,667 | 5,100 |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
class Client(BaseClient):
def __init__(self, base_url, *args, **kwarg):
super().__init__(base_url, *args, **kwarg)
def test_module(client: Client) -> str:
r = client._http_request(
"GET", "users", resp_type="response", ok_codes=(200, 401, 404)
)
if r.status_code == 404:
return "Page not found, possibly wrong base_url"
if r.status_code == 401:
return "Bad API Key"
return "ok"
def query_samples(client, **args) -> CommandResults:
params = {"subset": args.get("subset")}
r = client._http_request("GET", "samples", params=params)
results = CommandResults(
outputs_prefix="Triage.samples", outputs_key_field="id", outputs=r["data"]
)
return results
def submit_sample(client: Client, **args) -> CommandResults:
data = {"kind": args.get("kind"), "interactive": False}
if args.get("profiles", []):
profiles_data = []
for i in args.get("profiles", "").split(","):
profiles_data.append({"profile": i, "pick": "sample"})
data["profiles"] = profiles_data
if data["kind"] == "url":
data.update({"url": args.get("data")})
r = client._http_request("POST", "samples", json_data=data)
elif data["kind"] == "file":
file_path = demisto.getFilePath(demisto.args().get("data")).get("path")
with open(file_path, "rb") as f:
files = {"file": f}
r = client._http_request("POST", "samples", json_data=data, files=files)
else:
return_error(
f'Type of sample needs to be selected, either "file" or "url", the selected type was: {data["kind"]}'
)
results = CommandResults(
outputs_prefix="Triage.submissions", outputs_key_field="id", outputs=r
)
return results
def get_sample(client: Client, **args) -> CommandResults:
sample_id = args.get("sample_id")
r = client._http_request("GET", f"samples/{sample_id}")
results = CommandResults(
outputs_prefix="Triage.samples", outputs_key_field="id", outputs=r
)
return results
def get_sample_summary(client: Client, **args) -> CommandResults:
sample_id = args.get("sample_id")
r = client._http_request("GET", f"samples/{sample_id}/summary")
results = CommandResults(
outputs_prefix="Triage.sample-summaries", outputs_key_field="sample", outputs=r
)
return results
def delete_sample(client: Client, **args) -> str:
sample_id = args.get("sample_id")
client._http_request("DELETE", f"samples/{sample_id}")
return f"Sample {sample_id} successfully deleted"
def set_sample_profile(client: Client, **args) -> str:
"""
Used to move a submitted sample from static analysis to behavioural by giving it a profile to run under
"""
sample_id = args.get("sample_id")
data = {
"auto": argToBoolean(args.get("auto", True)),
"pick": argToList(args.get("pick", [])),
}
if args.get("profiles"):
data.update({"profiles": [{"profile": args.get("profiles", "")}]})
data = json.dumps(data)
client._http_request("POST", f"samples/{sample_id}/profile", data=data)
return f"Profile successfully set for sample {sample_id}"
def get_static_report(client: Client, **args) -> CommandResults:
"""
Get's the static analysis report from a given sample
"""
sample_id = args.get("sample_id")
r = client._http_request("GET", f"samples/{sample_id}/reports/static")
results = CommandResults(
outputs_prefix="Triage.sample.reports.static",
outputs_key_field="sample.sample",
outputs=r,
)
return results
def get_report_triage(client: Client, **args) -> CommandResults:
"""
Outputs a score, should map to a DBot score
"""
sample_id = args.get("sample_id")
task_id = args.get("task_id")
r = client._http_request("GET", f"samples/{sample_id}/{task_id}/report_triage.json")
results = CommandResults(
outputs_prefix="Triage.sample.reports.triage",
outputs_key_field="sample.id",
outputs=r,
)
return results
def get_kernel_monitor(client: Client, **args) -> dict:
sample_id = args.get("sample_id")
task_id = args.get("task_id")
r = client._http_request(
"GET", f"samples/{sample_id}/{task_id}/logs/onemon.json", resp_type="text"
)
return_results("Kernel monitor results:")
results = fileResult(f"{sample_id}-{task_id}-kernel-monitor.json", r)
return results
def get_pcap(client: Client, **args) -> dict:
sample_id = args.get("sample_id")
task_id = args.get("task_id")
r = client._http_request(
"GET", f"samples/{sample_id}/{task_id}/dump.pcap", resp_type="response"
)
filename = f"{sample_id}.pcap"
file_content = r.content
return_results("PCAP results:")
return fileResult(filename, file_content)
def get_dumped_files(client: Client, **args) -> dict:
sample_id = args.get("sample_id")
task_id = args.get("task_id")
file_name = args.get("file_name")
r = client._http_request(
"GET", f"samples/{sample_id}/{task_id}/{file_name}", resp_type="content"
)
results = fileResult(f"{file_name}", r)
return results
def get_users(client: Client, **args) -> CommandResults:
if args.get("userID"):
url_suffix = f'users/{args.get("userID")}'
else:
url_suffix = "users"
r = client._http_request("GET", url_suffix)
# Depending on the api endpoint used, the results are either in the 'data' key or not
if r.get("data"):
r = r["data"]
results = CommandResults(
outputs_prefix="Triage.users", outputs_key_field="id", outputs=r
)
return results
def create_user(client: Client, **args) -> CommandResults:
data = {
"username": args.get("username"),
"first_name": args.get("firstName"),
"last_name": args.get("lastName"),
"password": args.get("password"),
"permissions": argToList(args.get("permissions")),
}
data = json.dumps(data)
r = client._http_request("POST", "users", data=data)
results = CommandResults(
outputs_prefix="Triage.users", outputs_key_field="id", outputs=r
)
return results
def delete_user(client: Client, **args) -> str:
userID = args.get("userID")
client._http_request("DELETE", f"users/{userID}")
results = "User successfully deleted"
return results
def create_apikey(client: Client, **args) -> CommandResults:
userID = args.get("userID")
name = args.get("name")
data = json.dumps({"name": name})
r = client._http_request("POST", f"users/{userID}/apikeys", data=data)
results = CommandResults(
outputs_prefix="Triage.apikey", outputs_key_field="key", outputs=r
)
return results
def get_apikey(client: Client, **args) -> CommandResults:
userID = args.get("userID")
r = client._http_request("GET", f"users/{userID}/apikeys")
results = CommandResults(
outputs_prefix="Triage.apikey", outputs_key_field="key", outputs=r.get("data")
)
return results
def delete_apikey(client: Client, **args) -> str:
userID = args.get("userID")
apiKeyName = args.get("name")
client._http_request("DELETE", f"users/{userID}/apikeys/{apiKeyName}")
results = f"API key {apiKeyName} was successfully deleted"
return results
def get_profile(client: Client, **args) -> CommandResults:
profileID = args.get("profileID")
if profileID:
url_suffix = f"profiles/{profileID}"
else:
url_suffix = "profiles"
r = client._http_request("GET", url_suffix)
if not profileID and r.get("data"):
r = r["data"]
results = CommandResults(
outputs_prefix="Triage.profiles", outputs_key_field="id", outputs=r
)
return results
def create_profile(client: Client, **args) -> CommandResults:
data = json.dumps(
{
"name": args.get("name"),
"tags": argToList(args.get("tags")),
"timeout": int(args.get("timeout", 120)),
"network": args.get("network"),
"browser": args.get("browser"),
}
)
r = client._http_request("POST", "profiles", data=data)
results = CommandResults(
outputs_prefix="Triage.profiles", outputs_key_field="id", outputs=r
)
return results
def update_profile(client: Client, **args) -> str:
profileID = args.get("profileID")
data = {}
for arg in args:
if arg == "timeout":
data[arg] = int(args.get(arg, 60))
if arg == "tags":
data[arg] = argToList(args.get(arg))
if arg == "timeout":
data[arg] = args.get(arg, None)
client._http_request("PUT", f"profiles/{profileID}", data=json.dumps(data))
results = "Profile updated successfully"
return results
def delete_profile(client: Client, **args) -> str:
profileID = args.get("profileID")
client._http_request("DELETE", f"profiles/{profileID}")
results = f"Profile {profileID} successfully deleted"
return results
def main():
params = demisto.params()
args = demisto.args()
client = Client(
params.get("base_url"),
verify=params.get("Verify SSL"),
headers={"Authorization": f'Bearer {params.get("API Key")}'},
)
commands = {
"test-module": test_module,
"triage-query-samples": query_samples,
"triage-submit-sample": submit_sample,
"triage-get-sample": get_sample,
"triage-get-sample-summary": get_sample_summary,
"triage-delete-sample": delete_sample,
"triage-set-sample-profile": set_sample_profile,
"triage-get-static-report": get_static_report,
"triage-get-report-triage": get_report_triage,
"triage-get-kernel-monitor": get_kernel_monitor,
"triage-get-pcap": get_pcap,
"triage-get-dumped-file": get_dumped_files,
"triage-get-users": get_users,
"triage-create-user": create_user,
"triage-delete-user": delete_user,
"triage-create-api-key": create_apikey,
"triage-get-api-key": get_apikey,
"triage-delete-api-key": delete_apikey,
"triage-get-profiles": get_profile,
"triage-create-profile": create_profile,
"triage-update-profile": update_profile,
"triage-delete-profile": delete_profile,
}
command = demisto.command()
if command in commands:
return_results(commands[command](client, **args)) # type: ignore
else:
return_error(f"Command {command} is not available in this integration")
if __name__ in ["__main__", "__builtin__", "builtins"]:
main()
| 10,830 | 3,517 |
NCBI_GeneID = "NCBI:GeneID"
HGNC_GeneName = "HGNC:Symbol"
UMLS_ConceptID = "UMLS:CUI"
MedGen_ConceptID= "MedGen:UID"
class Vocab:
"""
Annotator Vocabulary source and type of concept.
examples: ncbi:GeneID, hgnc:GeneName, snomed:ClinicalTerm
"""
def __init__(self, qualified_label, entry=None):
self.qualified_label = qualified_label
self.entry = entry
def __str__(self):
if self.entry:
return str({self.qualified_label:self.entry})
else:
return str(self.qualified_label) | 557 | 206 |
"""
Mixin that overrides Pandas functions to retype.
"""
import pandas as pd
from pandas.core.frame import DataFrame as _InternalDataFrame
from typeddfs.df_errors import UnsupportedOperationError
class _RetypeMixin:
def __add__(self, other):
x = super().__add__(other)
return self._change_if_df(x)
def __radd__(self, other):
x = super().__radd__(other)
return self._change_if_df(x)
def __sub__(self, other):
x = super().__sub__(other)
return self._change_if_df(x)
def __rsub__(self, other):
x = super().__rsub__(other)
return self._change_if_df(x)
def __mul__(self, other):
x = super().__mul__(other)
return self._change_if_df(x)
def __rmul__(self, other):
x = super().__rmul__(other)
return self._change_if_df(x)
def __truediv__(self, other):
x = super().__truediv__(other)
return self._change_if_df(x)
def __rtruediv__(self, other):
x = super().__rtruediv__(other)
return self._change_if_df(x)
def __divmod__(self, other):
x = super().__divmod__(other)
return self._change_if_df(x)
def __rdivmod__(self, other):
x = super().__rdivmod__(other)
return self._change_if_df(x)
def __mod__(self, other):
x = super().__mod__(other)
return self._change_if_df(x)
def __rmod__(self, other):
x = super().__rmod__(other)
return self._change_if_df(x)
def __pow__(self, other):
x = super().__pow__(other)
return self._change_if_df(x)
def __rpow__(self, other):
x = super().__rpow__(other)
return self._change_if_df(x)
def drop_duplicates(self, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
return self.__class__._change(super().drop_duplicates(**kwargs))
def reindex(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
return self.__class__._change(super().reindex(*args, **kwargs))
def sort_values(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().sort_values(*args, **kwargs)
return self.__class__._change(df)
def reset_index(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().reset_index(*args, **kwargs)
return self.__class__._change(df)
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
) -> __qualname__:
self._no_inplace(dict(inplace=inplace))
if len(keys) == 0 and append:
return self
elif len(keys) == 0:
return self.__class__._change(super().reset_index(drop=drop))
df = super().set_index(
keys=keys,
drop=drop,
append=append,
inplace=inplace,
verify_integrity=verify_integrity,
)
return self.__class__._change(df)
# noinspection PyFinal
def asfreq(self, *args, **kwargs) -> __qualname__:
return super().asfreq(*args, **kwargs)
# noinspection PyFinal
def shift(self, *args, **kwargs) -> __qualname__:
return super().shift(*args, **kwargs)
# noinspection PyFinal
def tz_localize(self, *args, **kwargs) -> __qualname__:
return super().tz_localize(*args, **kwargs)
# noinspection PyFinal
def tz_convert(self, *args, **kwargs) -> __qualname__:
return super().tz_convert(*args, **kwargs)
# noinspection PyFinal
def to_timestamp(self, *args, **kwargs) -> __qualname__:
return super().to_timestamp(*args, **kwargs)
# noinspection PyFinal
def to_period(self, *args, **kwargs) -> __qualname__:
return super().to_period(*args, **kwargs)
# noinspection PyFinal
def convert_dtypes(self, *args, **kwargs) -> __qualname__:
df = super().convert_dtypes(*args, **kwargs)
return self.__class__._change(df)
# noinspection PyFinal
def infer_objects(self, *args, **kwargs) -> __qualname__:
df = super().infer_objects(*args, **kwargs)
return self.__class__._change(df)
def dropna(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().dropna(*args, **kwargs)
return self.__class__._change(df)
def fillna(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().fillna(*args, **kwargs)
return self.__class__._change(df)
# noinspection PyFinal
def copy(self, deep: bool = False) -> __qualname__:
df = super().copy(deep=deep)
return self.__class__._change(df)
def assign(self, **kwargs) -> __qualname__:
df = self.vanilla_reset()
df = df.assign(**kwargs)
return self.__class__._change(df)
def append(self, *args, **kwargs) -> __qualname__:
df = super().append(*args, **kwargs)
return self.__class__._change(df)
def transpose(self, *args, **kwargs) -> __qualname__:
df = super().transpose(*args, **kwargs)
return self.__class__._change(df)
def truncate(self, *args, **kwargs) -> __qualname__:
df = super().truncate(*args, **kwargs)
return self.__class__._change(df)
# noinspection PyFinal
def ffill(self, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().ffill(**kwargs)
return self.__class__._change(df)
# noinspection PyFinal
def bfill(self, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().bfill(**kwargs)
return self.__class__._change(df)
# noinspection PyFinal
def abs(self) -> __qualname__:
return self.__class__._change(super().abs())
def rename(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().rename(*args, **kwargs)
return self.__class__._change(df)
def replace(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().replace(*args, **kwargs)
return self.__class__._change(df)
def applymap(self, *args, **kwargs) -> __qualname__:
df = super().applymap(*args, **kwargs)
return self.__class__._change(df)
def astype(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().astype(*args, **kwargs)
return self.__class__._change(df)
def drop(self, *args, **kwargs) -> __qualname__:
self._no_inplace(kwargs)
df = super().drop(*args, **kwargs)
return self.__class__._change(df)
@classmethod
def _convert_typed(cls, df: pd.DataFrame):
# not great, but works ok
# if this is a BaseDf, use convert
# otherwise, just use check_and_change
if hasattr(cls, "convert"):
return cls.convert(df)
else:
return cls._change(df)
@classmethod
def _change_if_df(cls, df):
if isinstance(df, _InternalDataFrame):
df.__class__ = cls
return df
@classmethod
def _change(cls, df) -> __qualname__:
df.__class__ = cls
return df
def _no_inplace(self, kwargs):
if kwargs.get("inplace") is True: # pragma: no cover
raise UnsupportedOperationError("inplace not supported. Use vanilla() if needed.")
__all__ = ["_RetypeMixin"]
| 7,390 | 2,389 |
from django.db import models
__all__ = [
'GenericModel',
'GenericAtiveModel',
]
class GenericModel(models.Model):
created = models.DateTimeField('생성일', auto_now_add=True)
updated = models.DateTimeField('수정일', auto_now=True)
class Meta:
abstract = True
class GenericActiveModel(models.Model):
is_active = models.BooleanField('활성화 여부', default=True)
created = models.DateTimeField('생성일', auto_now_add=True)
updated = models.DateTimeField('수정일', auto_now=True)
class Meta:
abstract = True
| 551 | 189 |
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
| 89 | 25 |
import logging
from tasks.celery import registry_dispatcher_document
logger = logging.getLogger(__name__)
class Depositor(object):
def deposit_by_pids(self, pids_list):
"""
Receive a list of pids and collection to registry their dois.
scl
"""
for item in pids_list:
collection, code = item.split('_')
registry_dispatcher_document.delay(code, collection)
logger.info('enqueued deposit for "%s"', item)
| 488 | 142 |
with open('example.txt') as file:
all_lines = file.readlines()
print(all_lines)
for line in all_lines:
if line == "\n":
continue
print(line)
| 181 | 57 |
from pprint import pprint, pformat
import datetime
import os
from sgmock import Fixture
from sgmock import TestCase
_shotgun_server = os.environ.get('SHOTGUN', 'mock')
if _shotgun_server == 'mock':
from sgmock import Shotgun, ShotgunError, Fault
else:
from shotgun_api3 import ShotgunError, Fault
import shotgun_api3_registry
def Shotgun():
return shotgun_api3_registry.connect('sgsession.tests', server=_shotgun_server)
from sgsession import Session, Entity
def mini_uuid():
return os.urandom(4).encode('hex')
def timestamp():
return datetime.datetime.now().strftime('%Y%m%d%H%M%S')
def minimal(entity):
return dict(type=entity['type'], id=entity['id'])
| 698 | 238 |
"""
SimData1.py
Ported to Python by Sandy Barbour - 28/04/2005
Ported to XPPython3 by Peter Buckner - 2-Aug-2020
This example demonstrates how to interact with X-Plane by reading and writing
data. This example creates menus items that change the nav-1 radio frequency.
"""
import xp
class PythonInterface:
def XPluginStart(self):
self.Name = "SimData1"
self.Sig = "simData1.demos.xppython3"
self.Desc = "A plugin that changes sim data."
mySubMenuItem = xp.appendMenuItem(xp.findPluginsMenu(), "Python - Sim Data 1", 0)
self.myMenu = xp.createMenu("Sim Data", xp.findPluginsMenu(), mySubMenuItem, self.MyMenuHandlerCallback, 0)
xp.appendMenuItem(self.myMenu, "Decrement Nav1", -1000)
xp.appendMenuItem(self.myMenu, "Increment Nav1", +1000)
self.DataRef = xp.findDataRef("sim/cockpit/radios/nav1_freq_hz")
return self.Name, self.Sig, self.Desc
def XPluginStop(self):
xp.destroyMenu(self.myMenu)
def XPluginEnable(self):
return 1
def XPluginDisable(self):
pass
def XPluginReceiveMessage(self, inFromWho, inMessage, inParam):
pass
def MyMenuHandlerCallback(self, inMenuRef, inItemRef):
"""
This is our handler for the menu item. Our inItemRef is the refcon
we registered in our XPLMAppendMenuItem calls. It is either +1000 or
-1000 depending on which menu item is picked.
"""
if (self.DataRef != 0):
# We read the data ref, add the increment and set it again.
# This changes the nav frequency.
xp.setDatai(self.DataRef, xp.getDatai(self.DataRef) + inItemRef)
| 1,729 | 590 |
import os
from chimera_app.settings import Settings
import chimera_app.context as context
from chimera_app.ftp.server import Server as FTPServer
from chimera_app.authenticator import Authenticator, generate_password
from chimera_app.ssh_keys import SSHKeys
from chimera_app.steamgrid.steamgrid import Steamgrid
from chimera_app.streaming import StreamServer
from chimera_app.mangohud_config import MangoHudConfig
RESOURCE_DIR = os.getcwd()
if not os.path.isfile(os.path.join(RESOURCE_DIR, 'views/base.tpl')):
RESOURCE_DIR = "/usr/share/chimera"
AUTHENTICATOR_PATH = os.path.abspath('bin/chimera-authenticator')
if not os.path.isfile(AUTHENTICATOR_PATH):
AUTHENTICATOR_PATH = "/usr/share/chimera/bin/chimera-authenticator"
SHORTCUT_DIR = context.SHORTCUT_DIRS
BANNER_DIR = context.DATA_HOME + '/chimera/banners'
CONTENT_DIR = context.DATA_HOME + '/chimera/content'
RECORDINGS_DIR = context.DATA_HOME + '/chimera/recordings'
SETTINGS_DIR = context.CONFIG_HOME + '/chimera'
UPLOADS_DIR = os.path.join(context.CACHE_HOME, 'chimera', 'uploads')
MANGOHUD_DIR = context.CONFIG_HOME + "/MangoHud"
PLATFORMS = {
"32x": "32X",
"3do": "3DO",
"arcade": "Arcade",
"atari-2600": "Atari 2600",
"dreamcast": "Dreamcast",
"epic-store": "Epic Games Store",
"flathub": "Flathub",
"gb": "Game Boy",
"gba": "Game Boy Advance",
"gbc": "Game Boy Color",
"gc": "GameCube",
"gog": "GOG",
"sgg": "Game Gear",
"genesis": "Genesis",
"jaguar": "Jaguar",
"sms": "Master System",
"neo-geo": "Neo Geo",
"nes": "Nintendo",
"n64": "Nintendo 64",
"ps1": "PlayStation",
"ps2": "PlayStation 2",
"psp": "PlayStation Portable",
"saturn": "Saturn",
"sega-cd": "Sega CD",
"snes": "Super Nintendo",
"tg-16": "TurboGrafx-16"
}
SETTINGS_DEFAULT = {
"enable_ftp_server": False,
"ftp_username": "gamer",
"ftp_password": generate_password(12),
"ftp_port": 2121,
"keep_password": False,
"recordings_dir": RECORDINGS_DIR,
"sls_conf_file": RESOURCE_DIR + "/config/sls.conf",
"ffmpeg_inputs":
["-f x11grab -i :0",
"-f alsa -i pulse"],
"ffmpeg_vcodec": "",
"ffmpeg_acodec": ""
}
SESSION_OPTIONS = {
'session.cookie_expires': True,
'session.httponly': True,
'session.timeout': 3600 * 2,
'session.type': 'memory',
'session.validate_key': True,
}
SETTINGS_HANDLER = Settings(SETTINGS_DIR, SETTINGS_DEFAULT)
AUTHENTICATOR = Authenticator(AUTHENTICATOR_PATH, password_length=8)
FTP_SERVER = FTPServer(SETTINGS_HANDLER)
SSH_KEY_HANDLER = SSHKeys(os.path.expanduser('~/.ssh/authorized_keys'))
STEAMGRID_HANDLER = Steamgrid("f092e3045f4f041c4bf8a9db2cb8c25c")
STREAMING_HANDLER = StreamServer(SETTINGS_HANDLER)
MANGOHUD_HANDLER = MangoHudConfig(MANGOHUD_DIR)
| 2,964 | 1,200 |
import time
import playsound
import os
import random
from gtts.lang import tts_langs
from deep_translator import (GoogleTranslator)
import pickle
import json
# my classes
import random_test
import mike
import lang_tests
import level_one
import level_two
import level_three
import extras
# game progress vars
parts = {"l1_colours": "0", "l1_numbers": "0", "l1_animals": "0", "l2_greetings": "0", "l2_transport": "0",
"l2_sports": "0", "l3_food": "0", "l3_clothes": "0", "l3_buildings": "0"}
progress = {"name": "", "language": "",
"level": "0", "lvlpts": "0", "partsDone": parts, "points": "0", "saveTime": "0"}
def set_l1(p1=False, p2=False, p3=False, all=False):
if all:
parts['l1_colours'] = '1'
parts['l1_animals'] = '1'
parts['l1_numbers'] = '1'
else:
if p1:
parts['l1_colours'] = p1
if p2:
parts['l1_animals'] = p2
if p3:
parts['l1_numbers'] = p3
def set_l2(p1=False, p2=False, p3=False, all=False):
if all:
parts['l2_greetings'] = '1'
parts['l2_transport'] = '1'
parts['l2_sports'] = '1'
else:
if p1:
parts['l2_greetings'] = p1
if p2:
parts['l2_transport'] = p2
if p3:
parts['l2_sports'] = p3
def set_l3(p1=False, p2=False, p3=False, all=False):
if all:
parts['l3_food'] = '1'
parts['l3_clothes'] = '1'
parts['l3_buildings'] = '1'
else:
if p1:
parts['l3_food'] = p1
if p2:
parts['l3_clothes'] = p2
if p3:
parts['l3_buildings'] = p3
# LANG METHODS
def get_name():
confirmed = False
mike.mike('Welcome to uno lingo. What is your name?')
name = mike.record_audio()
print(name)
while confirmed == False:
voice_confirm = mike.record_audio(
"Your name is " + name + ". Is that correct? Say yes to confirm")
if "yes" in voice_confirm:
confirmed = True
else:
name = mike.record_audio('What is your name?')
print(name)
mike.mike('Nice to meet you, ' + name+'.')
mike.mike("My name is Mike.")
return name
def choose_language():
valid = False
chosen = False
langs_dict = GoogleTranslator.get_supported_languages(as_dict=True)
print("Languages available: ")
for key in langs_dict:
if langs_dict[key] in tts_langs():
print(key)
while valid == False:
while chosen == False:
choice = mike.record_audio("Please select a new language to learn")
try:
languageToLearn = extras.get_language_short(choice)
if languageToLearn in tts_langs():
valid = True
if valid == True:
response = mike.record_audio(
"You have chosen " + choice+", say yes to start learning this language.")
if 'yes' in response:
chosen = True
return languageToLearn
except:
return 0
# SETUP - LEVELS
def get_level():
response = ''
name = progress['name']
mike.mike(
name + ", would you like to take a test to find out what language level you are on?")
while len(response) < 1:
response = mike.record_audio()
if 'yes' in response:
level = test_Level()
elif 'no' in response:
mike.mike('What level are you on?')
level = mike.record_audio()
level = int(level)
while level > 3 or level < 1:
level = mike.record_audio(
"I'm sorry, please choose a level between one and four.")
set_parts(level)
progress['partsDone'] = parts
return str(level)
def test_Level():
percent = 0
percent = lang_tests.test_l1(progress['language'])
if percent >= 70:
percent = lang_tests.test_l2(progress['language'])
if percent >= 70:
level = 3
else:
level = 2
elif percent >= 40:
level = 2
else:
level = 1
mike.mike("The tests have determined you are at level " + str(level))
return level
# PLAY - LEVELS
def begin_level():
# "switch" for levels here
if progress['level'] == '1':
load_lvl_1()
elif progress['level'] == '2':
load_lvl_2()
elif progress['level'] == '3':
load_lvl_3()
return 0
def set_parts(lvl, p1=False, p2=False, p3=False):
if lvl == "1":
set_l1(p1, p2, p3)
elif lvl == "2":
set_l1(all="1")
set_l2(p1, p2, p3)
elif lvl == "3":
set_l1(all="1")
set_l2(all="1")
set_l3(p1, p2, p3)
def part_choice(lvl):
user_input = " "
top = []
topicStr = ""
validChoice = False
if lvl == '1':
top.append(progress['partsDone']['l1_colours'])
top.append(progress['partsDone']['l1_animals'])
top.append(progress['partsDone']['l1_numbers'])
topics = ["colours", "animals", "numbers"]
elif lvl == '2':
top.append(progress['partsDone']['l2_greetings'])
top.append(progress['partsDone']['l2_transport'])
top.append(progress['partsDone']['l2_sports'])
topics = ["greetings", "transport", "sports"]
elif lvl == '3':
print("level: ", lvl)
top.append(progress['partsDone']['l3_food'])
top.append(progress['partsDone']['l3_clothes'])
top.append(progress['partsDone']['l3_buildings'])
topics = ["food", "clothes", "buildings"]
else:
Exception
topic = []
j = 0
while j < 3:
if top[j] == "0":
topic.append(str(topics[j]))
topicStr = topicStr + str(topics[j]) + ", "
j = j+1
topicStr = topicStr[:-2]
if topicStr == "":
return 10
else:
user_input = mike.record_audio(
"Please select a topic. Your options include " + topicStr)
while validChoice == False:
print("You said: " + user_input)
if user_input == "":
user_input = " "
if user_input in topicStr:
validChoice = True
# return user_input
else:
if user_input == " ":
user_input = mike.record_audio(
"That is not a valid option. Please select one of the following: " + topicStr)
else:
user_input = mike.record_audio(
user_input + " is not a valid option. Please select one of the following: " + topicStr)
x = 0
while x < len(topics):
if user_input in str(topics[x]):
part_no = x
return part_no
x = x+1
return part_no
# Easiest level, teach colours, numbers and animals
def load_lvl_1():
save_progress()
mike.mike("\n\nLevel One")
print("---------------------------------------------------------")
load_this = part_choice('1')
if load_this == 0:
l1_colours()
load_lvl_1()
if load_this == 1:
l1_animals()
load_lvl_1()
if load_this == 2:
l1_numbers()
load_lvl_1()
if load_this == 10:
mike.mike("\n\nCongratulations, you have finished level one!")
print("Total points this level: ", progress['points'])
load_lvl_2()
def l1_numbers():
points = progress['points']
lan = progress['language']
newP = level_one.numbers(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l1_numbers'] = "1"
progress['points'] = points
def l1_animals():
points = progress['points']
lan = progress['language']
newP = level_one.animals(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l1_animals'] = "1"
progress['points'] = points
def l1_colours():
points = progress['points']
lan = progress['language']
newP = level_one.colours(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l1_colours'] = "1"
progress['points'] = points
# Slightly more difficult level, teach more complex words and some adjectives
def load_lvl_2():
save_progress()
mike.mike("\n\nLevel Two")
print("---------------------------------------------------------")
load_this = part_choice('2')
if load_this == 0:
l2_greetings()
load_lvl_2()
if load_this == 1:
l2_transport()
load_lvl_2()
if load_this == 2:
l2_sports()
load_lvl_2()
if load_this == 10:
mike.mike("\n\nCongratulations, you have finished level two!")
print("Total points this level: ", progress['points'])
load_lvl_3()
# Medium level, teach basic sentences
def l2_transport():
points = progress['points']
lan = progress['language']
newP = level_two.transport(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l2_transport'] = "1"
progress['points'] = points
def l2_sports():
points = progress['points']
lan = progress['language']
newP = level_two.sports(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l2_sports'] = "1"
progress['points'] = points
def l2_greetings():
points = progress['points']
lan = progress['language']
newP = level_two.greetings(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l2_greetings'] = "1"
progress['points'] = points
def load_lvl_3():
save_progress()
mike.mike("\n\nLevel Three")
print("---------------------------------------------------------")
load_this = part_choice('3')
if load_this == 0:
l3_food()
load_lvl_3()
if load_this == 1:
l3_clothes()
load_lvl_3()
if load_this == 2:
l3_buildings()
load_lvl_3()
if load_this == 10:
mike.mike("\n\nCongratulations, you have finished level three!")
print("Total points this level: ", progress['points'])
def l3_food():
points = progress['points']
lan = progress['language']
newP = level_three.food(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l3_food'] = "1"
progress['points'] = points
def l3_clothes():
points = progress['points']
lan = progress['language']
newP = level_three.clothes(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l3_clothes'] = "1"
progress['points'] = points
def l3_buildings():
points = progress['points']
lan = progress['language']
newP = level_three.buildings(lan)
print("---------------------------------------------------------")
print("Points this level: ", newP,
"\nPossible points this level: 11")
points = int(points)+newP
print("Total points: ", points)
points = str(points)
# set this part to done so it won't be available again
progress['partsDone']['l3_buildings'] = "1"
progress['points'] = points
def graduate():
mike.mike("Congratulations, you have completed the course")
print("---------------------------------------------------------")
user_answer = mike.record_audio("Say new game to learn a new language.")
return user_answer
# SAVES
def save_progress():
res = load_progress(progress['name'])
newPrint = res
check = "'language': " + "'"+str(progress['language']+"'")
progress['saveTime'] = extras.make_time()
if(not(check in str(res))):
try:
with open('saves/'+progress['name'] + '.txt', 'ab+') as f:
pickle.dump(progress, f)
except:
os.makedirs("saves")
with open('saves/'+progress['name'] + '.txt', 'ab+') as f:
pickle.dump(progress, f)
else:
with open('saves/'+progress['name'] + '.txt', 'wb') as f:
pickle.dump("", f)
x = 0
while x < len(newPrint):
if check in str(newPrint[x]):
if str(progress['saveTime'] >= newPrint[x]['saveTime']):
if progress in newPrint:
del newPrint[x]
else:
newPrint[x] = progress
x = x + 1
for n in newPrint:
with open('saves/'+progress['name'] + '.txt', 'ab+') as f:
pickle.dump(n, f)
def load_progress(n):
results = []
try:
with open('saves/'+n + '.txt', 'rb') as f:
try:
while True:
results.append(pickle.load(f))
except EOFError:
pass
return results
except:
print("no saved file")
return 0
def check_saves(name, language=False):
langsSaved = []
savedP = load_progress(name)
if language:
try:
savedP = str(savedP).replace('\'', '\"')
ps = json.loads(str(savedP))
for p in ps:
if p['language'] == language:
level = p['level']
progress['partsDone'] = p['partsDone']
return level
except:
return 0
else:
try:
savedP = str(savedP).replace("'',", "")
savedP = str(savedP).replace('\'', '\"')
ps = json.loads(str(savedP))
for p in ps:
langsSaved.append(p['language'])
return langsSaved
except:
return langsSaved
def choose_save(saves):
valid = False
mike.mike("saved languages include ")
for s in saves:
mike.mike(extras.get_language_long(s))
language = mike.record_audio(
"Which language would you like to continue learning?")
print("You have chosen: " + language)
while valid == False:
try:
language = extras.get_language_short(language)
except:
return
if language in saves:
level = check_saves(progress['name'], language)
progress['language'] = language
progress['level'] = str(level)
valid = True
else:
language = mike.record_audio(
"I'm sorry, that is not a valid option. Choose a different language.")
print("You have chosen: " + language)
# PART METHODS
def startup():
# get users name
name = get_name()
progress['name'] = name
# check if user has saved file
saves = check_saves(name)
if saves:
answered = False
answer = ""
while answered == False:
answer = mike.record_audio(
"You have saved progress available. Would you like to continue progress?")
if "yes" in answer:
answered = True
progBool = True
if "no" in answer:
answered = True
progBool = False
else:
progBool = False
if progBool == True:
# if yes - ask which save to load
choose_save(saves)
return
else:
# ask language to learn
progress['language'] = choose_language()
progress['level'] = get_level()
save_progress()
return
# TEST METHODS
def print_prog():
print("Name: " + progress['name'])
print("Language: " + progress['language'])
print("Level: " + progress['level'])
print("Parts: ", progress['partsDone'])
def easy_set(name=False, lang=False, level=False):
if name and lang and level:
progress['name'] = name
progress['language'] = lang
progress['level'] = level
else:
progress['name'] = "John"
progress['language'] = 'es'
progress['level'] = '1'
set_parts(progress['level'])
if __name__ == "__main__":
answer = "new game"
while(answer == "new game"):
startup()
begin_level()
answer = graduate()
if answer == "quit game":
mike.mike("Goodbye, ", progress['name'])
exit()
| 18,234 | 5,625 |
# -*- coding: utf-8 -*-
# import module(s)
from . import pima
from . import iris
from . import mnist
from . import cifar
from . import digits
from . import boston
from . import fashion
from . import data_set
# import from data_utils.py
from .data_set import DataSet
| 268 | 89 |
import heapq
from flask import current_app
from typing import Any, Callable, cast, Dict, List, Tuple, TypeVar # NOQA
Normalized = TypeVar('Normalized')
def shard(objects, max_shards, object_stats, avg_time, normalize_object_name=cast(Callable[[str], Normalized], lambda x: x)):
# type: (List[str], int, Dict[Normalized, int], int, Callable[[str], Normalized]) -> List[Tuple[int, List[str]]]
"""
Breaks a set of objects into shards.
Args:
objects (list): A list of object names.
max_shards (int): Maximum amount of shards over which to distribute the objects.
test_stats (dict): A mapping from normalized object name to duration.
avg_test_time (int): Average duration of a single object.
normalize_object_name (str -> Tuple[str, ...]): a function that normalizes object names.
This function can return anything, as long as it is consistent with `test_stats`.
Returns:
list: Shards. Each element is a pair containing the weight for that
shard and the object names assigned to that shard.
"""
def get_object_duration(test_name):
# type: (str) -> int
normalized = normalize_object_name(test_name)
result = object_stats.get(normalized)
if result is None:
if object_stats:
current_app.logger.info('No existing duration found for test %r', test_name)
result = avg_time
return result
# don't use more shards than there are objects
num_shards = min(len(objects), max_shards)
# Each element is a pair (weight, objects).
groups = [(0, []) for _ in range(num_shards)] # type: List[Tuple[int, List[str]]]
# Groups is already a proper heap, but we'll call this to guarantee it.
heapq.heapify(groups)
weighted_tests = [(get_object_duration(t), t) for t in objects]
for weight, test in sorted(weighted_tests, reverse=True):
group_weight, group_tests = heapq.heappop(groups)
group_weight += 1 + weight
group_tests.append(test)
heapq.heappush(groups, (group_weight, group_tests))
return groups
| 2,135 | 637 |
from lpd.enums.metric_method import MetricMethod
import torch as T
from lpd.enums.confusion_matrix_based_metric import ConfusionMatrixBasedMetric
class MetricBase:
"""
Args:
metric_method - from lpd.enums.MetricMethod, use this to dictate how this metric is behaving over the batches,
whether its accumulates the MEAN, or the SUM, or taking the LAST value (for example in MetricConfusionMatrixBase)
"""
def __init__(self, name: str, metric_method: MetricMethod):
self.name = name
self.metric_method = metric_method
def __call__(self, y_pred: T.Tensor, y_true: T.Tensor):
raise NotImplementedError('Missing __call__ implementation for metric')
class MetricConfusionMatrixBase(MetricBase):
"""
confusion_matrix_ is for INTERNAL USE ONLY!
the confusion matrix is being handled by TrainerStats, that way there is only one
confusion matrix per State (TRAIN/VAL/TEST).
TrainerStats will inject the most updated confusion matrix here
"""
confusion_matrix_ = None
def __init__(self, name, num_classes, labels, predictions_to_classes_convertor, threshold):
super(MetricConfusionMatrixBase, self).__init__(name=name, metric_method=MetricMethod.LAST)
self.num_classes = num_classes
self.labels = labels
if self.labels and len(self.labels) != num_classes:
raise ValueError(f'[{self.name}] - expecting same number for labels as num_classes, but got num_classes = {num_classes}, and {len(self.labels)} labels')
self.predictions_to_classes_convertor = predictions_to_classes_convertor
self.threshold = threshold
def _is_binary(self):
return MetricConfusionMatrixBase.confusion_matrix_.num_classes == 2
def get_stats(self, metric: ConfusionMatrixBasedMetric):
stats = MetricConfusionMatrixBase.confusion_matrix_.get_stats()
result_per_class = T.Tensor([stats_per_class[metric] for stats_per_class in stats.values()])
if self._is_binary():
return result_per_class[1]
return result_per_class
def get_confusion_matrix(self):
return MetricConfusionMatrixBase.confusion_matrix_.get_confusion_matrix()
| 2,250 | 646 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygame
try:
import pygame._view
except ImportError:
pass
from choice import *
from bgm import *
from dialog import *
from settings import *
from text import *
from log import *
def main():
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode((800, 600), 0, 32)
pygame.display.set_caption('alpha')
imglib = {}
imglib['load'] = pygame.image.load(resource_path('img/load.png')).convert_alpha()
screen.blit(imglib['load'], (0, 0))
pygame.display.update()
imgres = open(resource_path('src/img.txt'), 'r')
for img in imgres:
tag, tar = map(str, img.strip().split(' '))
imglib[tag] = pygame.image.load(resource_path(tar)).convert_alpha()
sfxlib = {}
sfxres = open(resource_path('src/sfx.txt'), 'r')
for sfx in sfxres:
tag, tar = map(str, sfx.strip().split(' '))
sfxlib[tag] = resource_path(tar)
sfplayer = Bgm('')
ft18 = pygame.font.SysFont('simhei', 18)
ft24 = pygame.font.SysFont('simhei', 24)
ftpk = (ft24, ft18)
setting = Settings(ft18)
cho = Text(resource_path('src/cho.ga'))
dia = Text(resource_path('src/dia.ga'))
dialoglib = {}
choicelib = {}
dpos = 'main'
cpos = '-1'
pick = -1
vmode = 0
'''
0 = normal
1 = image
2 = log
'''
clock = pygame.time.Clock()
san = 0
ddone = False
if dia.has():
while True:
ne = dia.parse()
if ne[0] == -1:
break
elif ne[0] == 0:
dialoglib[ne[7]] = ne
ddone = True
del dia
if cho.has():
while True:
ne = cho.parse()
if ne[0] == -1:
break
elif ne[0] == 1:
choicelib[ne[2]] = ne
del cho
if not ddone:
pygame.quit()
sys.exit()
ddone = False
cdone = False
ce = []
log = Log()
while True:
if not ddone:
dg = Dialog(dialoglib[dpos][1], dialoglib[dpos][2], dialoglib[dpos][3],
dialoglib[dpos][4], dialoglib[dpos][5], dialoglib[dpos][6],
dialoglib[dpos][8], dialoglib[dpos][9])
log.add(dg.log())
ddone = True
cpos = dg.ask()
if not cdone:
if cpos != '-1':
ce = []
for chi in choicelib[cpos][1]:
ce.append(Choice(chi[0], ft18, chi[1], chi[2], chi[3]))
cdone = True
(x, y) = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 3:
if vmode == 0:
vmode = 3
elif vmode == 3:
vmode = 0
if event.button == 4:
if vmode == 0:
vmode = 2
if event.button == 5:
if vmode == 2:
vmode = 0
if event.button == 1:
scl = setting.click((x, y), dpos, cpos, san)
if scl[0] == 0:
#reverse show
pass
elif scl[0] == 1:
#save
pass
elif scl[0] == 2:
#load
dg.reset()
dpos = scl[1][0]
cpos = scl[1][1]
san = scl[1][2]
if vmode == 0 and scl[0] == -1:
if cpos != u'-1':
for c in ce:
(lx, ly) = cgetpos(c.id())
if (x >= lx and x <= lx + 350 and
y >= ly and y <= ly + 50):
pick = c.id()
if pick != -1:
pass
else:
if dg.check():
if dg.nxt() != '-1':
if dg.nxt() == '-2':
pygame.quit()
sys.exit()
dg.reset()
dpos = dg.next(san)
ce = []
ddone = False
cdone = False
screen.blit(imglib['bk'], (0, 0))
if vmode == 0:
dg.blit(screen, whe(dg.wh()), imglib,
sfxlib, sfplayer, pygame.time.get_ticks(), ftpk)
if len(ce) > 0:
for c in ce:
(lx, ly) = cgetpos(c.id())
if (x >= lx and x <= lx + 350 and
y >= ly and y <= ly + 50):
c.blit(screen, (lx, ly), imglib['chiy'])
else:
c.blit(screen, (lx, ly), imglib['chin'])
else:
dg.showimg(screen, whe(dg.wh()), imglib, False)
if vmode == 1:
dg.showimg(screen, whe(dg.wh()), imglib, False)
elif vmode == 2:
screen.blit(imglib['lg'], (200, 100))
log.blit(screen, ft24)
setting.blit(screen, imglib, (x, y))
pygame.display.update()
if pick != -1:
pygame.time.delay(300)
dg.reset()
log.add(ce[pick].log())
dpos = ce[pick].to()
san += ce[pick].w()
ddone = False
cdone = False
ce = []
cpos = -1
pick = -1
clock.tick(60)
#if python says run, then we should run
if __name__ == '__main__':
main()
| 5,989 | 1,893 |
import numpy as np
import torch
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from torch.nn import PReLU
from torch_geometric.nn import GCNConv, DeepGraphInfomax
from Result import Result
class Encoder(torch.nn.Module):
def __init__(self, hidden, data):
super(Encoder, self).__init__()
self.conv = GCNConv(data.num_features, hidden, cached=True)
self.prelu = PReLU(hidden)
def forward(self, x, edge_index):
x = self.conv(x, edge_index)
return self.prelu(x)
def corruption(x, edge_index):
return x[torch.randperm(x.size(0))], edge_index
def main_model_dgi(data, hidden, if_all=False):
torch.backends.cudnn.deterministic = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = DeepGraphInfomax(
hidden_channels=hidden,
encoder=Encoder(hidden, data),
summary=lambda z, *args, **kwargs: torch.sigmoid(z.mean(dim=0)),
corruption=corruption)
data.split_train_valid()
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
best_acc_valid = 0
for epoch in range(10):
model.train()
optimizer.zero_grad()
pos_z, neg_z, summary = model(data.x, data.edge_index)
lr = LogisticRegression().fit(pos_z[data.mask_train].detach().cpu().numpy().reshape(-1, hidden),
data.y[data.mask_train].cpu().numpy())
valid_pred = lr.predict(pos_z[data.mask_valid].detach().cpu().numpy().reshape(-1, hidden))
acc_valid = accuracy_score(data.y[data.mask_valid].cpu().numpy(),
valid_pred)
if acc_valid > best_acc_valid:
best_acc_valid = acc_valid
result = pos_z
loss = model.loss(pos_z.to(device), neg_z.to(device), summary.to(device))
loss.backward()
optimizer.step()
lr = LogisticRegression().fit(result[data.mask_train].detach().cpu().numpy().reshape(-1, hidden),
data.y[data.mask_train].cpu().numpy())
train_pred = lr.predict(result[data.mask_train].detach().cpu().numpy().reshape(-1, hidden))
all_pred = lr.predict(result.detach().cpu().numpy().reshape(-1, hidden))
if if_all:
return Result(
result=torch.tensor(np.eye(data.num_class)[all_pred]).float().cpu(),
loss_train=-1,
loss_valid=-1,
acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy(),
train_pred),
acc_valid=best_acc_valid,
epoch=10,
)
else:
return Result(
result=all_pred[data.mask_test],
loss_train=-1,
loss_valid=-1,
acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy(),
train_pred),
acc_valid=best_acc_valid,
epoch=10,
)
| 3,045 | 1,006 |
import hashlib
from django.utils import timezone
from account.decorators import super_admin_required
from judge.dispatcher import process_pending_task
from judge.languages import languages, spj_languages
from options.options import SysOptions
from utils.api import APIView, CSRFExemptAPIView, validate_serializer
from .models import JudgeServer
from .serializers import (CreateEditWebsiteConfigSerializer,
CreateSMTPConfigSerializer, EditSMTPConfigSerializer,
JudgeServerHeartbeatSerializer,
JudgeServerSerializer, TestSMTPConfigSerializer)
class SMTPAPI(APIView):
@super_admin_required
def get(self, request):
smtp = SysOptions.smtp_config
if not smtp:
return self.success(None)
smtp.pop("password")
return self.success(smtp)
@validate_serializer(CreateSMTPConfigSerializer)
@super_admin_required
def post(self, request):
SysOptions.smtp_config = request.data
return self.success()
@validate_serializer(EditSMTPConfigSerializer)
@super_admin_required
def put(self, request):
smtp = SysOptions.smtp_config
data = request.data
for item in ["server", "port", "email", "tls"]:
smtp[item] = data[item]
if "password" in data:
smtp["password"] = data["password"]
SysOptions.smtp_config = smtp
return self.success()
class SMTPTestAPI(APIView):
@super_admin_required
@validate_serializer(TestSMTPConfigSerializer)
def post(self, request):
return self.success({"result": True})
class WebsiteConfigAPI(APIView):
def get(self, request):
ret = {key: getattr(SysOptions, key) for key in
["website_base_url", "website_name", "website_name_shortcut",
"website_footer", "allow_register", "submission_list_show_all"]}
return self.success(ret)
@validate_serializer(CreateEditWebsiteConfigSerializer)
@super_admin_required
def post(self, request):
for k, v in request.data.items():
setattr(SysOptions, k, v)
return self.success()
class JudgeServerAPI(APIView):
@super_admin_required
def get(self, request):
servers = JudgeServer.objects.all().order_by("-last_heartbeat")
return self.success({"token": SysOptions.judge_server_token,
"servers": JudgeServerSerializer(servers, many=True).data})
@super_admin_required
def delete(self, request):
hostname = request.GET.get("hostname")
if hostname:
JudgeServer.objects.filter(hostname=hostname).delete()
return self.success()
class JudgeServerHeartbeatAPI(CSRFExemptAPIView):
@validate_serializer(JudgeServerHeartbeatSerializer)
def post(self, request):
data = request.data
client_token = request.META.get("HTTP_X_JUDGE_SERVER_TOKEN")
if hashlib.sha256(SysOptions.judge_server_token.encode("utf-8")).hexdigest() != client_token:
return self.error("Invalid token")
service_url = data.get("service_url")
try:
server = JudgeServer.objects.get(hostname=data["hostname"])
server.judger_version = data["judger_version"]
server.cpu_core = data["cpu_core"]
server.memory_usage = data["memory"]
server.cpu_usage = data["cpu"]
server.service_url = service_url
server.ip = request.META["HTTP_X_REAL_IP"]
server.last_heartbeat = timezone.now()
server.save()
except JudgeServer.DoesNotExist:
JudgeServer.objects.create(hostname=data["hostname"],
judger_version=data["judger_version"],
cpu_core=data["cpu_core"],
memory_usage=data["memory"],
cpu_usage=data["cpu"],
ip=request.META["REMOTE_ADDR"],
service_url=service_url,
last_heartbeat=timezone.now(),
)
# 新server上线 处理队列中的,防止没有新的提交而导致一直waiting
process_pending_task()
return self.success()
class LanguagesAPI(APIView):
def get(self, request):
return self.success({"languages": languages, "spj_languages": spj_languages})
| 4,496 | 1,262 |
import os
import re
import sys
import shutil
import logging
logger = logging.getLogger(__name__)
import configparser
import numpy as np
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from ..utils.obslog import read_obslog
from ..utils.misc import write_system_info
from . import common
from . import (feros, foces, hds, hires, levy, lhrs, sarg, xinglong216hrs)
instrument_lst = [
('foces', 'Fraunhofer', 'FOCES'),
('xinglong216hrs', 'Xinglong216', 'HRS'),
('hires', 'Keck-I', 'HIRES'),
('levy', 'APF', 'Levy'),
('hds', 'Subaru', 'HDS'),
('lhrs', 'LAMOST', 'HRS'),
('feros', 'MPG/ESO-2.2m', 'FEROS'),
]
def reduce_echelle():
"""Automatically select the instrument and reduce echelle spectra
accordingly.
Available instruments include:
* *FOCES*: FOCES on 2m Fraunhofer Telescope in Wendelstein Observatory,
Germany.
* *Xinglong216HRS*: HRS on 2.16m telescope in Xinglong Station, China.
"""
log_filename = 'gamse.log'
# initialize running log
log_fmt = ' '.join(['*',
'%(asctime)s.%(msecs)03d',
'[%(levelname)s]',
'%(name)s - %(lineno)d - %(funcName)s():'+os.linesep,
' %(message)s'+os.linesep+'-'*80,
])
# check if there's already an existing log file
if os.path.exists(log_filename):
# if logfile already exists, rename it with its creation time
time_str = None
file1 = open(log_filename)
for row in file1:
# find the first time string in the contents
mobj = re.search('(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})', row)
if mobj:
time_str = mobj.group()
break
file1.close()
if time_str is None:
# time string not found
# rename it to gamse.DDDD.log, where DDD is an increasing number
i = 1
while(True):
newfilename = 'gamse.{}.log'.format(i)
if os.path.exists(newfilename):
i += 1
continue
else:
break
else:
# time string is found, rename it to gamse.YYYY-MM-DDTHH-MM-SS.log
time_str = time_str.replace(':', '-')
newfilename = 'gamse.{}.log'.format(time_str)
# rename the existing gamse.log file
shutil.move(log_filename, newfilename)
# load config file in current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# the level of running log depends on the mode in the config
mode = config['reduce']['mode']
if mode == 'normal':
level = logging.INFO
elif mode == 'debug':
level = logging.DEBUG
else:
level = logging.INFO
# initialize running log
logging.basicConfig(
filename = log_filename,
level = level,
format = log_fmt,
datefmt = '%Y-%m-%dT%H:%M:%S',
)
logger = logging.getLogger(__name__)
# write some system info into the running log
write_system_info()
# find telescope and instrument from config file
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
logger.info('Start reducing {}, {} data'.format(telescope, instrument))
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).reduce_rawdata()
exit()
print('Unknown Instrument: {} - {}'.format(telescope, instrument))
def make_obslog():
"""Scan the path to the raw FITS files and generate an observing log.
Before generating the observing log file, this function will scan the local
directory and look for *all* files with their names ending with ".cfg", and
read them as config files.
The config files are used to find the name of the instrument that the data
was obtained with.
"""
config_file_lst = []
# find local config file
for fname in os.listdir(os.curdir):
if fname.endswith('.cfg'):
config_file_lst.append(fname)
# load ALL local config files
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# find the telescope and instrument name
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).make_obslog()
exit()
print('Unknown Instrument: {} - {}'.format(telescope, instrument))
def make_config():
"""Print a list of supported instrument and generate a config file according
to user's selection.
"""
# display a list of supported instruments
print('List of supported instruments:')
for i, row in enumerate(instrument_lst):
telescope = row[1]
instrument = row[2]
print('[{}] {}/{}'.format(i+1, telescope, instrument))
# select instrument
while(True):
string = input('Select the instrument: ')
if string.isdigit():
select = int(string)
break
else:
print('Error: invalid input')
continue
# use individual functions in each pipeline
modulename = instrument_lst[select-1][0]
eval(modulename).make_config()
def show_onedspec():
"""Show 1-D spectra in a pop-up window.
Args:
filename_lst (list): List of filenames of 1-D spectra.
"""
# load obslog
logname_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.obslog')]
if len(logname_lst)==0:
logtable = None
else:
logtable = read_obslog(logname_lst[0])
# load config files in the current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
filename_lst = []
for arg in sys.argv[2:]:
# first, check if argument is a filename.
if os.path.exists(arg):
filename_lst.append(arg)
# if not a filename, try to find the corresponding items in obslog
else:
if config is None:
config = load_config('\S*\.cfg$')
if logtable is None:
logtable = load_obslog('\S*\.obslog$')
# if arg is a number, find the corresponding filename in obslog
if arg.isdigit():
arg = int(arg)
section = config['reduce']
for logitem in logtable:
if arg == logitem['frameid']:
# get the path to the 1d spectra
odspath = section.get('odspath', None)
if odspath is None:
odspath = section.get('oned_spec')
# get the filename suffix for 1d spectra
oned_suffix = config['reduce'].get('oned_suffix')
fname = '{}_{}.fits'.format(
logitem['fileid'], oned_suffix)
filename = os.path.join(odspath, fname)
if os.path.exists(filename):
filename_lst.append(filename)
break
if len(filename_lst)==0:
exit()
spec_lst = []
for filename in filename_lst:
data = fits.getdata(filename)
# determine the column name of flux that will be shown
if 'flux' in data.dtype.names:
flux_key = 'flux'
elif 'flux_sum' in data.dtype.names:
flux_key = 'flux_sum'
else:
flux_key = ''
pass
if 'fiber' in data.dtype.names:
# multi fiber
for fiber in np.unique(data['fiber']):
spec = {}
mask = data['fiber']==fiber
for row in data[mask]:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
spec[order] = (wave, flux)
label = os.path.basename(filename) + ' Fiber {}'.format(fiber)
spec_lst.append((spec, label))
else:
spec = {}
for row in data:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
spec[order] = (wave, flux)
label = os.path.basename(filename)
spec_lst.append((spec, label))
################################################
fig = plt.figure(figsize=(15, 8), dpi=150)
ax = fig.add_axes([0.07, 0.1, 0.88, 0.8])
def plot_order(order):
ax.cla()
ax.currentorder = order
wave_min, wave_max = 1e9, 0
flux_min = 1e9
for i, (spec, label) in enumerate(spec_lst):
if order in spec:
wave = spec[order][0]
flux = spec[order][1]
ax.plot(wave, flux, '-', alpha=0.8, lw=0.8, label=label)
wave_min = min(wave_min, wave.min())
wave_max = max(wave_max, wave.max())
flux_min = min(flux_min, flux.min())
leg = ax.legend(loc='upper right')
leg.get_frame().set_alpha(0.1)
ax.set_xlabel(u'Wavelength (\xc5)', fontsize=12)
ax.set_ylabel('Flux', fontsize=12)
ax.set_title('Order %d'%(order), fontsize=14)
ax.set_xlim(wave_min, wave_max)
ax.axhline(y=0, color='k', ls='--', lw=0.5)
if flux_min > 0:
ax.set_ylim(0,)
ax.xaxis.set_major_formatter(tck.FormatStrFormatter('%g'))
ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g'))
fig.canvas.draw()
def on_key(event):
if event.key == 'up':
can_plot = False
for spec, label in spec_lst:
if ax.currentorder + 1 in spec:
can_plot=True
break
if can_plot:
plot_order(ax.currentorder + 1)
elif event.key == 'down':
can_plot = False
for spec, label in spec_lst:
if ax.currentorder - 1 in spec:
can_plot=True
break
if can_plot:
plot_order(ax.currentorder - 1)
else:
pass
order0 = list(spec_lst[0][0].keys())[0]
plot_order(order0)
fig.canvas.mpl_connect('key_press_event', on_key)
plt.show()
def plot_spectra1d():
# load config files in the current directory
config_file_lst = [fname for fname in os.listdir(os.curdir)
if fname.endswith('.cfg')]
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
config.read(config_file_lst)
# find telescope and instrument from config file
section = config['data']
telescope = section['telescope']
instrument = section['instrument']
for row in instrument_lst:
if telescope == row[1] and instrument == row[2]:
eval(row[0]).plot_spectra1d()
exit()
def convert_onedspec():
"""Convert one-dimensional spectra.
"""
config = common.load_config('\S*\.cfg$', verbose=False)
logtable = common.load_obslog('\S*\.obslog$', fmt='astropy', verbose=False)
section = config['reduce']
odspath = section.get('odspath', None)
oned_suffix = section.get('oned_suffix')
filename_lst = []
if len(sys.argv)==2:
# no addtional args. convert all of the onedspec
for fname in sorted(os.listdir(odspath)):
if fname.endswith('.fits') or fname.endswith('.fit'):
filename = os.path.join(odspath, fname)
filename_lst.append(filename)
else:
for arg in sys.argv[2:]:
if os.path.exists(arg):
filename_lst.append(arg)
elif os.path.exists(os.path.join(odspath, arg)):
filename_lst.append(os.path.join(odspath, arg))
else:
if arg.isdigit():
arg = int(arg)
for logitem in logtable:
if arg == logitem['frameid'] or arg == logitem['fileid']:
pattern = str(logitem['fileid'])+'\S*'
for fname in sorted(os.listdir(odspath)):
filename = os.path.join(odspath, fname)
if os.path.isfile(filename) \
and re.match(pattern, fname):
filename_lst.append(filename)
for filename in filename_lst:
data = fits.getdata(filename)
if 'flux' in data.dtype.names:
flux_key = 'flux'
elif 'flux_sum' in data.dtype.names:
flux_key = 'flux_sum'
else:
pass
spec = {}
for row in data:
order = row['order']
wave = row['wavelength']
flux = row[flux_key]
if wave[0]> wave[-1]:
wave = wave[::-1]
flux = flux[::-1]
spec[order] = (wave, flux)
ascii_prefix = os.path.splitext(os.path.basename(filename))[0]
target_path = os.path.join(odspath, ascii_prefix)
target_fname = '{}_order_{:03d}.txt'.format(ascii_prefix, order)
target_filename = os.path.join(target_path, target_fname)
if not os.path.exists(target_path):
os.mkdir(target_path)
if os.path.exists(target_filename):
print('Warning: {} is overwritten'.format(target_filename))
outfile = open(target_filename, 'w')
for w, f in zip(wave, flux):
outfile.write('{:11.5f} {:+16.8e}'.format(w, f)+os.linesep)
outfile.close()
print('Convert {} to {} files with ASCII formats in {}'.format(
filename, len(data), target_path))
| 15,026 | 4,457 |
import os
from django.db import models
from django.utils import timezone
from django.utils.module_loading import import_string
from smartmin.models import SmartModel
def generate_file_path(instance, filename):
file_path_prefix = 'csv_imports/'
name, extension = os.path.splitext(filename)
if len(name) + len(extension) >= 100:
name = name[:100-len(extension)-len(file_path_prefix)]
return "%s%s%s" % (file_path_prefix, name, extension)
class ImportTask(SmartModel):
PENDING = 'PENDING'
STARTED = 'STARTED'
RUNNING = 'RUNNING'
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
READY_STATES = [SUCCESS, FAILURE]
csv_file = models.FileField(upload_to=generate_file_path, verbose_name="Import file",
help_text="A comma delimited file of records to import")
model_class = models.CharField(max_length=255, help_text="The model we are importing for")
import_params = models.TextField(blank=True, null=True, help_text="JSON blob of form parameters on task creation")
import_log = models.TextField()
import_results = models.TextField(blank=True, null=True, help_text="JSON blob of result values on task completion")
task_id = models.CharField(null=True, max_length=64)
task_status = models.CharField(max_length=32, default=PENDING)
def start(self):
from .tasks import csv_import
self.log("Queued import at %s" % timezone.now())
self.task_status = self.STARTED
self.save(update_fields=['import_log', 'task_status'])
result = csv_import.delay(self.pk)
self.task_id = result.task_id
self.save(update_fields=['task_id'])
def done(self):
if self.task_id:
return self.task_status in self.READY_STATES
def status(self):
return self.task_status
def log(self, message):
self.import_log += "%s\n" % message
self.modified_on = timezone.now()
self.save(update_fields=['import_log', 'modified_on'])
def __unicode__(self):
return "%s Import" % import_string(self.model_class)._meta.verbose_name.title()
| 2,137 | 688 |
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Run all processing steps on data loaded from fixtures'
def add_arguments(self, parser):
parser.add_argument(
'-s',
'--startid',
type=int,
help='The start of the list ID range to process',
default=2,
)
parser.add_argument(
'-e',
'--endid',
type=int,
help='The end of the list ID range to process',
default=16,
)
def handle(self, *args, **options):
start_id = options['startid']
end_id = options['endid']
for list_id in range(start_id, end_id):
for action in ('parse', 'geocode', 'match'):
call_command('batch_process',
'--list-id', list_id,
'--action', action)
| 975 | 262 |
#!/usr/bin/env python
# encoding: utf-8
"""
changefiles.py
Created by Peter Bakker on 2017-10-04.
Copyright (c) 2017 . All rights reserved.
"""
import sys
import os
import numpy as np
import pandas as pd
import datetime
from pytz import timezone
import pytz
import zipline.utils.calendars as cal
from zipline.data.session_bars import SessionBarReader
from zipline.data.bar_reader import (NoDataAfterDate,NoDataBeforeDate,NoDataOnDate)
from zipline.utils.calendars import get_calendar
from pandas import (DataFrame,DatetimeIndex,isnull,NaT,read_csv,read_sql,to_datetime,Timestamp,)
utc = pytz.utc
dirs = ['/root/data/minute','/root/data/daily']
calcal = cal.get_calendar( name='NYSE')
def check_sessions(table, frequency='daily'):
calendar = get_calendar('NYSE')
# from IPython import embed; embed()
earliest_date = table.index[0]
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
asset_first_day = table.index[0]
asset_last_day = table.index[-1]
sessions = calendar.sessions_in_range(asset_first_day ,asset_last_day)
asset_sessions = sessions[sessions.slice_indexer(asset_first_day, asset_last_day)]
if frequency =='minute':
minutes_passed = len(table)
asset_first_day = calendar.minute_to_session_label(asset_first_day, direction='next')
asset_last_day = calendar.minute_to_session_label(asset_last_day, direction='previous')
minutes_in_session = calendar.minutes_for_sessions_in_range(asset_first_day, asset_last_day)
table = table[table.index.isin(minutes_in_session)]
if (minutes_passed) > len(minutes_in_session): print 'Removed '+ str((minutes_passed) - len(minutes_in_session))+' minutes'
elif frequency =='daily' and len(table) != len(asset_sessions):
missing_sessions = asset_sessions.difference(to_datetime(np.array(table.index),unit='s',utc=True,) ).tolist()
extra_sessions = to_datetime(np.array(table.index),unit='s',utc=True,).difference(asset_sessions).tolist()
for missing_session in missing_sessions:
#add stuff
prev_date = calendar.previous_session_label(missing_session)
row_to_copy = table[(table.index == prev_date)]
row_to_copy_val = row_to_copy.values
# from IPython import embed; embed()
table.loc[missing_session] = row_to_copy_val[0]
table.loc[missing_session].volume = 0
#row = row_to_copy
#table.append(row)
print 'Added session at '+str(missing_session)
for extra_session in extra_sessions:
#delete stuff
table.drop(extra_session)
print 'Removed session at '+str(extra_session)
return table
for dir in dirs:
print "parsing ", dir
for item in os.listdir(dir):
if item.endswith('TRADES_RTH.csv'):
symbol = item.split('_')[0]
exchange = 'SMART'
asset_type = 'STK'
if symbol in ['GDX','GLD']: exchange = 'ARCA'
if symbol in ['VIX','VIX3M', 'VXMT', 'VXST', 'GVZ']:
exchange = 'CBOE'
asset_type ='IND'
print "parsing ", symbol, " in file ",item
if dir == dirs[1]:
dfData=pd.read_csv(dir+"/"+item, parse_dates={'dates':[0]}, header=None, index_col=0).sort_index()
dfData.rename(columns={1:'open',2:'high', 3:'low',4:'close',5:'volume',6:'temp'}, inplace=True)
dfData.index=pd.to_datetime(dfData.index, utc=True)
dfData=dfData.tz_localize(utc, axis=0, level=None, copy=False, ambiguous='raise')
dfData = check_sessions(dfData)
else:
dfData=pd.read_csv(dir+"/"+item, parse_dates={'dates':[0, 1]}, header=None, index_col=0).sort_index()
dfData.rename(columns={2:'open',3:'high', 4:'low',5:'close',6:'volume',7:'temp'}, inplace=True)
dfData=dfData.tz_localize(utc, axis=0, level=None, copy=False, ambiguous='raise')
dfData =check_sessions(dfData,'minute')
# remove= []
# for dt in dfData.index:
# # if not calcal.is_open_on_minute(dt):
# # remove.append(pd.to_datetime(str(dt)))
# dfData = dfData[~dfData.index.isin(remove)]
dfData = dfData.drop('temp', axis=1, errors='ignore')
# dfData = check_sessions(dfData)
# from IPython import embed; embed()
#dfData.index=dfData.index.tz_localize('UTC')
#dfData.index=dfData.index.tz_convert('UTC')
dfData.index=dfData.index.tz_convert(None)
with open(dir+'/'+symbol+'_'+exchange+'_'+asset_type+'.csv','w') as f:
dfData.to_csv(f)
f.close()
os.rename(dir+'/'+item, dir+'/temp/'+item)
| 4,835 | 1,645 |
# Goes through coordinates of body pieces and find where they are
# relative to the head.
#
# The coordinate values were manually found out by looking at the image
# in gimp. There was a fully "mounted" character at ~2400,~370 on the image
# (y goes up from top->bottom in these coordinates). The values were looked up
# by hand and written here.
#
# If you suspect this is wrong, check tatermand's art and manually look at the
# image. The absolute values are not really important.
head = ("head", 2440, 399, 2496, 456)
head_x = abs(2440 + 2496)/2
head_y = abs(399 + 456)/2
pieces = [
("head", 2440, 399, 2496, 456),
("shoulder left", 2392, 407, 2463, 483),
("shoulder right", 2482, 403, 2542, 485),
("torso", 2419, 402, 2520, 502),
("arm left", 2372, 379, 2449, 495),
("weapon", 2460, 306, 2542, 455),
("arm right", 2480, 339, 2537, 452)
]
for piece in pieces:
x = piece[1] + piece[3]
x = abs(x/2)
y = piece[2] + piece[4]
y = abs(y/2)
# y - head_y is negated so that y is "more" for up and "less" for
# down (instead of "less" for up [0,0 is usually point UP and left in
# screens] and "more" for down). This will make the computed delta
# more useful. This is because if y is ABOVE head_y, then the
# result will end up being positive. If negation wasn't there,
# it'd have been negative.
print " //%s\n { Float2(%s.0f, %s.0f) }," % \
(piece[0], x - head_x, -(y - head_y))
| 1,422 | 622 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# --------------------------
# Author fzl
# Date 2017/7/20 15:13
# EMAIL fangjalylong@qq.com
# Desc
# --------------------------
import json
import requests
import sys
from ExeUtil import ExeUtil
type = sys.getfilesystemencoding()
print( "***************************************************************")
print( "* *").decode('utf-8').encode(type)
print( "*******************欢迎使用免费代理更新程序********************").decode('utf-8').encode(type)
print( "* *").decode('utf-8').encode(type)
print( "*******************正在更新配置文件,请稍后********************").decode('utf-8').encode(type)
print( "* *").decode('utf-8').encode(type)
print( "***************************************************************")
url = 'http://your url:ort/path/getAccount'
data = requests.get(url)
beans=json.loads(data.text)
file=open("gui-config.json","w+")
file.write('''{
"configs": ['''+'\n')
for index in range(len(beans)):
file.writelines('{')
file.write('''"server": "'''+beans[index]['address'].strip()+'''"'''+',\n')
file.write('''"server_port": "'''+beans[index]['port'].strip()+'''"'''+',\n')
file.write('''"password": "'''+beans[index]['password'].strip()+'''"'''+',\n')
file.write('''"method": "'''+beans[index]['method'].strip()+'''"'''+',\n')
file.write('''"remarks": "'''+beans[index]['address'].strip()+'''"'''+',\n')
file.write('''"timeout": "'''+'5'+'''"'''+'\n')
if index==len(beans)-1:
file.writelines('}')
else:
file.writelines('},')
file.write(''' ],
"strategy": null,
"index": 0,
"global": true,
"enabled": false,
"shareOverLan": true,
"isDefault": false,
"localPort": 1080,
"pacUrl": null,
"useOnlinePac": false,
"secureLocalPac": true,
"availabilityStatistics": false,
"autoCheckUpdate": true,
"checkPreRelease": false,
"isVerboseLogging": true,
"logViewer": {
"topMost": false,
"wrapText": true,
"toolbarShown": false,
"Font": "Consolas, 8pt",
"BackgroundColor": "Black",
"TextColor": "White"
},
"proxy": {
"useProxy": false,
"proxyType": 0,
"proxyServer": "",
"proxyPort": 0,
"proxyTimeout": 3
},
"hotkey": {
"SwitchSystemProxy": "",
"SwitchSystemProxyMode": "",
"SwitchAllowLan": "",
"ShowLogs": "",
"ServerMoveUp": "",
"ServerMoveDown": ""
}
}''')
file.close()
print( "* *").decode('utf-8').encode(type)
print( "* SS默认选中第一个代理账号,如不可用请尝试切换其他账号 *").decode('utf-8').encode(type)
print( "* *").decode('utf-8').encode(type)
print( "* *").decode('utf-8').encode(type)
print( "* 配置文件已经更新,Shadowsocks已经启动 *").decode('utf-8').encode(type)
print( "* *").decode('utf-8').encode(type)
print( "* *").decode('utf-8').encode(type)
print( "* by:flyou *").decode('utf-8').encode(type)
print( "* *").decode('utf-8').encode(type)
print( "* http://www.flyou.ren *").decode('utf-8').encode(type)
print( "* *").decode('utf-8').encode(type)
print( "***************************************************************")
exeUtil=ExeUtil('.','Shadowsocks.exe')
exeUtil.openExe()
| 3,773 | 1,203 |
import unittest
from unittest import mock
from tests import mocks
from hpstatus import status
class SystemTest(unittest.TestCase):
@mock.patch("hpstatus.status._get_system_feature", side_effect=mocks._get_system_feature)
def test_get_fans(self, mock_get_system_feature):
data = status.get_fans()
self.assertIsInstance(data, list)
self.assertNotEqual(len(data), 0)
row = data[0]
self.assertIsInstance(row, dict)
self.assertIn("id", row)
self.assertIn("location", row)
self.assertIn("present", row)
self.assertIn("speed", row)
self.assertIn("percentage", row)
self.assertIn("redundant", row)
self.assertIn("partner", row)
self.assertIn("hot_pluggable", row)
@mock.patch("hpstatus.status._get_system_feature", side_effect=mocks._get_system_feature)
def test_get_powermeter(self, mock_get_system_feature):
data = status.get_powermeter()
self.assertIsInstance(data, list)
self.assertNotEqual(len(data), 0)
row = data[0]
self.assertIsInstance(row, dict)
self.assertIn("id", row)
self.assertIn("reading", row)
@mock.patch("hpstatus.status._get_system_feature", side_effect=mocks._get_system_feature)
def test_get_powersupply(self, mock_get_system_feature):
data = status.get_powersupply()
self.assertIsInstance(data, list)
self.assertNotEqual(len(data), 0)
row = data[0]
self.assertIsInstance(row, dict)
self.assertIn("id", row)
self.assertIn("present", row)
self.assertIn("redundant", row)
self.assertIn("condition", row)
self.assertIn("hotplug", row)
self.assertIn("reading", row)
@mock.patch("hpstatus.status._get_system_feature", side_effect=mocks._get_system_feature)
def test_get_temp(self, mock_get_system_feature):
data = status.get_temp()
self.assertIsInstance(data, list)
self.assertNotEqual(len(data), 0)
row = data[0]
self.assertIsInstance(row, dict)
self.assertIn("id", row)
self.assertIn("location", row)
self.assertIn("temp", row)
self.assertIn("threshold", row)
class StorageTest(unittest.TestCase):
@mock.patch("hpstatus.status._get_storage_controllers", side_effect=mocks._get_storage_controllers)
def test_get_storage_controllers(self, mock_get_storage_controllers):
data = status.get_storage_controllers()
self.assertIsInstance(data, list)
self.assertNotEqual(len(data), 0)
row = data[0]
self.assertIsInstance(row, dict)
self.assertIn("id", row)
self.assertIn("model", row)
self.assertIn("status", row)
self.assertIn("cache", row)
self.assertIn("battery", row)
@mock.patch("hpstatus.status._get_storage_drives", side_effect=mocks._get_storage_drives)
def test_get_storage_drives(self, mock_get_storage_drives):
data = status.get_storage_drives(1)
self.assertIsInstance(data, list)
self.assertNotEqual(len(data), 0)
row = data[0]
self.assertIsInstance(row, dict)
self.assertIn("location", row)
self.assertIn("port", row)
self.assertIn("box", row)
self.assertIn("bay", row)
self.assertIn("size", row)
self.assertIn("status", row)
@mock.patch("hpstatus.status._get_storage_drives_detail", side_effect=mocks._get_storage_drives_detail)
def test_get_storage_drives_detail(self, mock_get_storage_drives_detail):
data = status.get_storage_drives_detail(1)
self.assertIsInstance(data, list)
self.assertNotEqual(len(data), 0)
row = data[0]
self.assertIsInstance(row, dict)
self.assertIn("location", row)
self.assertIn("port", row)
self.assertIn("box", row)
self.assertIn("bay", row)
self.assertIn("size", row)
self.assertIn("status", row)
self.assertIn("serial", row)
self.assertIn("temp", row)
self.assertIn("max_temp", row)
if __name__ == "__main__":
unittest.main()
| 3,651 | 1,421 |
#!/bin/env python
"""
language_model.py.
Written by joshualoehr.
https://github.com/joshualoehr/ngram-language-model
Edited by Brighton Pauli, 4/20/2022.
"""
import argparse
from itertools import product
from pathlib import Path
import numpy as np
import nltk
from preprocess import preprocess, EOS, UNK
def load_data(data_dir):
"""Load train and test corpora from a directory.
Directory must contain two files: train.txt and test.txt.
Newlines will be stripped out.
Args:
data_dir (Path) -- pathlib.Path of the directory to use.
Returns:
The train and test sets, as lists of sentences.
"""
train_path = data_dir.joinpath('train.txt').absolute().as_posix()
test_path = data_dir.joinpath('test.txt').absolute().as_posix()
with open(train_path, 'r') as file:
train_data = [line.strip() for line in file.readlines()]
with open(test_path, 'r') as file:
test_data = [line.strip() for line in file.readlines()]
return train_data, test_data
class LanguageModel:
"""An n-gram language model trained on a given corpus.
For a given n and given training corpus, constructs an n-gram language
model for the corpus by:
1. preprocessing the corpus (adding SOS/EOS/UNK tokens)
2. calculating (smoothed) probabilities for each n-gram
Also contains methods for calculating the perplexity of the model
against another corpus, and for generating sentences.
"""
def __init__(self, train_data, n_val, laplace=1):
"""Create a LanguageModel object.
Args:
train_data (list of str): list of sentences comprising the training
corpus.
n (int): the order of language model to build (i.e. 1 for unigram,
2 for bigram, etc.).
laplace (int): lambda multiplier to use for laplace smoothing
(default 1 for add-1 smoothing).
"""
self.n_val = n_val
self.laplace = laplace
self.tokens = preprocess(train_data, n_val)
self.vocab = nltk.FreqDist(self.tokens)
self.model = self._create_model()
self.masks = list(reversed(list(product((0, 1), repeat=n_val))))
def _smooth(self):
"""Apply Laplace smoothing to n-gram frequency distribution.
Here, n_grams refers to the n-grams of the tokens in the training
corpus, while m_grams refers to the first (n-1) tokens of each n-gram.
Returns:
dict: Mapping of each n-gram (tuple of str) to its Laplace-smoothed
probability (float).
"""
vocab_size = len(self.vocab)
n_grams = nltk.ngrams(self.tokens, self.n_val)
n_vocab = nltk.FreqDist(n_grams)
m_grams = nltk.ngrams(self.tokens, self.n_val-1)
m_vocab = nltk.FreqDist(m_grams)
def smoothed_count(n_gram, n_count):
m_gram = n_gram[:-1]
m_count = m_vocab[m_gram]
numer = (n_count + self.laplace)
denom = (m_count + self.laplace * vocab_size)
return numer / denom
return {n_gram: smoothed_count(n_gram, count)
for n_gram, count in n_vocab.items()}
def _create_model(self):
"""Create a probability distribution for vocab of the training corpus.
If building a unigram model, the probabilities are simple relative
frequencies of each token with the entire corpus.
Otherwise, the probabilities are Laplace-smoothed relative frequencies.
Returns:
A dict mapping each n-gram (tuple of str) to its probability
(float).
"""
if self.n_val == 1:
num_tokens = len(self.tokens)
return {(unigram,): count / num_tokens
for unigram, count in self.vocab.items()}
return self._smooth()
def _convert_oov(self, ngram):
"""Convert, if necessary, a given n-gram to one known by the model.
Starting with the unmodified ngram, check each possible permutation of
the n-gram with each index of the n-gram containing either the original
token or <UNK>. Stop when the model contains an entry for that
permutation.
This is achieved by creating a 'bitmask' for the n-gram tuple, and
swapping out each flagged token for <UNK>. Thus, in the worst case,
this function checks 2^n possible n-grams before returning.
Returns:
The n-gram with <UNK> tokens in certain positions such that the
model contains an entry for it.
"""
def mask(ngram, bitmask):
return tuple(
token if flag else UNK for token, flag in zip(ngram, bitmask)
)
ngram = (ngram,) if isinstance(ngram, str) else ngram
for possible_known in [mask(ngram, bitmask) for bitmask in self.masks]:
if possible_known in self.model:
return possible_known
raise LookupError(f"Model failed to find n-gram {str(ngram)}.")
def perplexity(self, test_data):
"""Calculate the perplexity of the model against a given test corpus.
Args:
test_data (list of str): sentences comprising the training corpus.
Returns:
The perplexity of the model as a float.
"""
test_tokens = preprocess(test_data, self.n_val)
test_ngrams = nltk.ngrams(test_tokens, self.n_val)
total = len(test_tokens)
known_ngrams = (self._convert_oov(ngram) for ngram in test_ngrams)
probabilities = [self.model[ngram] for ngram in known_ngrams]
return np.exp((-1/total) * sum(map(np.log, probabilities)))
def sentence_surprisal(self, sent):
"""Return the surprisal for each token in the sentence.
Args:
sent (tuple OR str): sequence of words to get surprisals of.
Returns:
numpy array of the same length as sent, where each number
corresponds to the surprisal of the token at the same index.
"""
if isinstance(sent, str):
sent = sent.split()
probs = []
prev = ["<s>"] * (self.n_val - 1)
for word in sent:
prev.append(word)
key = self._convert_oov(prev)
print('\t', key)
probs.append(self.model[key])
del prev[0]
return -np.log(np.array(probs))
def _best_candidate(self, prev, i, without=None):
"""Choose the most likely next token given the previous (n-1) tokens.
If selecting the first word of the sentence (after the SOS tokens),
the i'th best candidate will be selected, to create variety.
If no candidates are found, the EOS token is returned with a
probability of 1.
Args:
prev (tuple of str): the previous n-1 tokens of the sentence.
i (int): which candidate to select if not the most probable one.
without (list of str): tokens to exclude from the candidates list.
Returns:
A tuple with the next most probable token and its corresponding
probability.
"""
blacklist = ["UNK"]
if without:
blacklist += without
candidates = ((ngram[-1], prob) for ngram, prob in self.model.items()
if ngram[:-1] == prev)
candidates = filter(
lambda candidate: candidate[0] not in blacklist, candidates)
candidates = sorted(
candidates, key=lambda candidate: candidate[1], reverse=True)
if len(candidates) == 0:
return (EOS, 1)
return candidates[0 if prev != () and prev[-1] != "<s>" else i]
def generate_sentences(self, num, min_len=12, max_len=24):
"""Generate num random sentences using the language model.
Sentences always begin with the SOS token and end with the EOS token.
While unigram model sentences will only exclude the UNK token, n>1
models will also exclude all other words already in the sentence.
Args:
num (int): the number of sentences to generate.
min_len (int): minimum allowed sentence length.
max_len (int): maximum allowed sentence length.
Yields:
A tuple with the generated sentence and the combined probability
(in log-space) of all of its n-grams.
"""
for i in range(num):
sent, total_prob = ["<s>"] * max(1, self.n_val-1), 1
while sent[-1] != EOS:
prev = () if self.n_val == 1 else tuple(sent[-(self.n_val-1):])
blacklist = sent + ([EOS] if len(sent) < min_len else [])
next_token, next_prob = self._best_candidate(
prev, i, without=blacklist)
sent.append(next_token)
total_prob *= next_prob
if len(sent) >= max_len:
sent.append(EOS)
yield ' '.join(sent), -1/np.log(total_prob)
if __name__ == '__main__':
parser = argparse.ArgumentParser("N-gram Language Model")
parser.add_argument('--data', type=str, required=True,
help='Location of the data directory containing '
'train.txt and test.txt')
parser.add_argument('--n', type=int, required=True,
help='Order of N-gram model to create (i.e. 1 for '
'unigram, 2 for bigram, etc.)')
parser.add_argument('--laplace', type=float, default=0.01,
help='Lambda parameter for Laplace smoothing (default '
'is 0.01 -- use 1 for add-1 smoothing)')
parser.add_argument('--num', type=int, default=10,
help='Number of sentences to generate (default 10)')
args = parser.parse_args()
# Load and prepare train/test data
data_path = Path(args.data)
train, test = load_data(data_path)
print("Loading {}-gram model...".format(args.n))
lm = LanguageModel(train, args.n, laplace=args.laplace)
print("Vocabulary size: {}".format(len(lm.vocab)))
# print("Generating sentences...")
# for sentence, prob in lm.generate_sentences(args.num):
# print("{} ({:.5f})".format(sentence, prob))
print("Generating surprisals...")
sent1 = "I brought salt and pepper ."
print(f"\t{sent1}")
print('\t', lm.sentence_surprisal(sent1))
sent2 = "I brought pepper and salt ."
print(f"\t{sent2}")
print('\t', lm.sentence_surprisal(sent2))
perplexity = lm.perplexity(test)
print("Model perplexity: {:.3f}".format(perplexity))
print("")
| 10,703 | 3,181 |
"""
Collection of functions for helping to segment microtubes in tomograms
# Author: Antonio Martinez-Sanchez (Max Planck Institute for Biochemistry)
# Date: 1.07.17
"""
import csv
import vtk
from .utils import *
from sklearn.cluster import MeanShift
__author__ = 'Antonio Martinez-Sanchez'
# Clean an directory contents (directory is preserved)
# dir: directory path
def clean_dir(dir):
for root, dirs, files in os.walk(dir):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def get_sub_copy(tomo, sub_pt, sub_shape):
'''
Returns the a subvolume of a tomogram from a center and a shape
:param tomo: input tomogram
:param sub_pt: subtomogram center point
:param sub_shape: output subtomogram shape (all dimensions must be even)
:return: a copy with the subvolume or a VOI
'''
# Initialization
nx, ny, nz = int(sub_shape[0]), int(sub_shape[1]), int(sub_shape[2])
mx, my, mz = tomo.shape[0], tomo.shape[1], tomo.shape[2]
mx1, my1, mz1 = mx - 1, my - 1, mz - 1
hl_x, hl_y, hl_z = int(nx * .5), int(ny * .5), int(nz * .5)
x, y, z = int(round(sub_pt[0])), int(round(sub_pt[1])), int(round(sub_pt[2]))
# Compute bounding restriction
# off_l_x, off_l_y, off_l_z = x - hl_x + 1, y - hl_y + 1, z - hl_z + 1
off_l_x, off_l_y, off_l_z = x - hl_x, y - hl_y, z - hl_z
# off_h_x, off_h_y, off_h_z = x + hl_x + 1, y + hl_y + 1, z + hl_z + 1
off_h_x, off_h_y, off_h_z = x + hl_x, y + hl_y, z + hl_z
dif_l_x, dif_l_y, dif_l_z = 0, 0, 0
dif_h_x, dif_h_y, dif_h_z = nx, ny, nz
if off_l_x < 0:
# dif_l_x = abs(off_l_x) - 1
dif_l_x = abs(off_l_x)
off_l_x = 0
if off_l_y < 0:
# dif_l_y = abs(off_l_y) - 1
dif_l_y = abs(off_l_y)
off_l_y = 0
if off_l_z < 0:
# dif_l_z = abs(off_l_z) - 1
dif_l_z = abs(off_l_z)
off_l_z = 0
if off_h_x >= mx:
dif_h_x = nx - off_h_x + mx1
off_h_x = mx1
if off_h_y >= my:
dif_h_y = ny - off_h_y + my1
off_h_y = my1
if off_h_z >= mz:
dif_h_z = nz - off_h_z + mz1
off_h_z = mz1
# Make the subvolume copy
hold_sv = np.zeros(shape=np.asarray(sub_shape, dtype=np.int), dtype=tomo.dtype)
hold_sv[dif_l_x:dif_h_x, dif_l_y:dif_h_y, dif_l_z:dif_h_z] = tomo[off_l_x:off_h_x, off_l_y:off_h_y, off_l_z:off_h_z]
return hold_sv
# Read microtubes centerline samples and group them by microtube ID
# fname: CSV file name
# coords_cols: X, Y and Z column numbers in the CSV file
# id_col: microtube ID colum number in the CSV file
# Returns: a dictionary indexed with the microtube id with centerline coodinates in a list
def read_csv_mts(fname, coords_cols, id_col):
# Initialization
mt_dict = None
# Open the file to read
with open(fname, 'r') as in_file:
reader = csv.reader(in_file)
# Reading loop
coords, ids = list(), list()
for row in reader:
x, y, z, idx = float(row[coords_cols[0]]), float(row[coords_cols[1]]), float(row[coords_cols[2]]),\
int(row[id_col])
coords.append(np.asarray((x, y, z), dtype=np.float))
ids.append(idx)
# Dictionary creation
mt_dict = dict.fromkeys(set(ids))
for key in mt_dict.keys():
mt_dict[key] = list()
for key, coord in zip(ids, coords):
mt_dict[key].append(coord)
return mt_dict
# Converts as set of points into a binary mask
# points: iterable with the points coordinates
# mask_shape: shape of the output mask
# inv: if False (default) then True-fg and False-bg, otherwise these values are inverted
# Returns: a 3D numpy binray array
def points_to_mask(points, mask_shape, inv=False):
mask = np.zeros(shape=mask_shape, dtype=np.bool)
for point in points:
i, j, k = int(round(point[0])), int(round(point[1])), int(round(point[2]))
if (i < 0) or (j < 0) or (k < 0) or \
(i >= mask_shape[0]) or (j >= mask_shape[1]) or (k >= mask_shape[2]):
continue
else:
mask[i, j, k] = True
if inv:
return np.invert(mask)
else:
return mask
# Mean shift clustering for points in 3D space
# coords: points 3D coordinates in numpy array with size [n_points, 3]
# bandwith: bandwidth used in RBF kernel
# cluster_all: if True standard behaviour, if not all points and clustered so orphand point are
# associated to trivial clusters
# Returns: cluster labels array [n_points]
def cluster_3d_mean_shift(coords, bandwidth, cluster_all=False):
# Input parsing
if (not isinstance(coords, np.ndarray)) or (len(coords.shape) != 2) or (coords.shape[1] != 3):
error_msg = 'Input coords must be numpy array of 3D coordinates (size=[n_points, 3]).'
raise pexceptions.PySegInputError(expr='cluster_3D_mean_shift', msg=error_msg)
if bandwidth <= 0:
error_msg = 'Input bandwith must be greater than zero.'
raise pexceptions.PySegInputError(expr='cluster_3D_mean_shift', msg=error_msg)
bw_f = float(bandwidth)
# Call to MeanShift
mshift = MeanShift(bandwidth=bw_f, cluster_all=True, bin_seeding=True)
mshift.fit(coords)
labels = np.asarray(mshift.labels_)
# Orphans processing
if cluster_all:
labels_max = labels.max()
for i, lbl in enumerate(labels):
if lbl == -1:
labels_max += 1
labels[i] = labels_max
return labels
# Computes center of gravity for every cluster
# coords: coordinates array [n_points, 3]
# labels: cluster labels array [n_points]
def clusters_cg(coords, labels):
# Input parsing
if (not isinstance(coords, np.ndarray)) or (len(coords.shape) != 2) or (coords.shape[1] != 3):
error_msg = 'Input coords must be numpy array of 3D coordinates (size=[n_points, 3]).'
raise pexceptions.PySegInputError(expr='clusters_cg', msg=error_msg)
if (not isinstance(labels, np.ndarray)) or (len(labels.shape) != 1) or \
(labels.shape[0] != coords.shape[0]):
error_msg = 'Input labels must be array with size=[n_points].'
raise pexceptions.PySegInputError(expr='clusters_cg', msg=error_msg)
# Center of gravity loop computations
u_labels = np.unique(labels)
n_lbls = len(u_labels)
n_points_lut = dict.fromkeys(u_labels)
cgs = dict.fromkeys(u_labels)
for lbl in u_labels:
cgs[lbl] = np.zeros(shape=3, dtype=np.float)
n_points_lut[lbl] = 0
for point, lbl in zip(coords, labels):
cgs[lbl] += point
n_points_lut[lbl] += 1
# Averaging loop
for lbl in u_labels:
cgs[lbl] *= (1./float(n_points_lut[lbl]))
return np.asarray(list(cgs.values()), dtype=np.float)
# Converts cluster of points int a vtkPolyData
# points: array with 3D points coordinates [n_points, 3]
# labels: array with point labels [n_points] with cluster labels,
# if None default every point correspond with a cluster
# centers: cluster centers array [n_unique_labels] (default None)
def clusters_to_poly(points, labels=None, centers=None):
# Input parsing
if (not isinstance(points, np.ndarray)) or (len(points.shape) != 2) or (points.shape[1] != 3):
error_msg = 'Input coords must be numpy array of 3D coordinates (size=[n_points, 3]).'
raise pexceptions.PySegInputError(expr='points_to_poly', msg=error_msg)
if labels is not None:
if (not isinstance(labels, np.ndarray)) or (len(labels.shape) != 1) or \
(labels.shape[0] != points.shape[0]):
error_msg = 'Input labels must be array with size=[n_points].'
raise pexceptions.PySegInputError(expr='points_to_poly', msg=error_msg)
if centers is not None:
if not isinstance(centers, np.ndarray):
error_msg = 'Input centers must be array with size=[n_unique_labels].'
raise pexceptions.PySegInputError(expr='points_to_poly', msg=error_msg)
# Initialization
poly = vtk.vtkPolyData()
p_points = vtk.vtkPoints()
p_cells = vtk.vtkCellArray()
plabels = vtk.vtkIntArray()
plabels.SetNumberOfComponents(1)
plabels.SetName('label')
pcenters = vtk.vtkIntArray()
pcenters.SetNumberOfComponents(1)
pcenters.SetName('center')
# Points loop
for i, point in enumerate(points):
p_points.InsertNextPoint(point)
p_cells.InsertNextCell(1)
p_cells.InsertCellPoint(i)
if labels is None:
plabels.InsertTuple1(i, i)
else:
plabels.InsertTuple1(i, labels[i])
if centers is None:
pcenters.InsertTuple1(i, 1)
else:
pcenters.InsertTuple1(i, -1)
# Inserting centers
if centers is not None:
for i in range(centers.shape[0]):
p_i = points.shape[0] + i
p_points.InsertNextPoint(centers[i])
p_cells.InsertNextCell(1)
p_cells.InsertCellPoint(p_i)
plabels.InsertTuple1(p_i, -1)
pcenters.InsertTuple1(p_i, 1)
# Building the polydata
poly.SetPoints(p_points)
poly.SetVerts(p_cells)
poly.GetCellData().AddArray(plabels)
poly.GetCellData().AddArray(pcenters)
return poly
# Computes rotation angles of from an input vector to fit reference [0,0,1] vector having a free Euler angle
# in Relion format
# First Euler angle (Rotation) is assumed 0
# v_in: input vector
# mode: either 'active' (default) or 'pasive'
# Returns: a 2-tuple with the Euler angles in Relion format
def vect_to_zrelion(v_in, mode='active'):
# Normalization
v_m = np.asarray((v_in[1], v_in[0], v_in[2]), dtype=np.float32)
try:
n = v_m / math.sqrt((v_m*v_m).sum())
except ZeroDivisionError:
print('WARNING (vect_rotation_ref): vector with module 0 cannot be rotated!')
return 0., 0., 0.
# Computing angles in Extrinsic ZYZ system
alpha = np.arccos(n[2])
beta = np.arctan2(n[1], n[0])
# Transform to Relion system (intrinsic ZY'Z'' where rho is free)
rot, tilt, psi = 0., unroll_angle(math.degrees(alpha), deg=True), \
unroll_angle(180.-math.degrees(beta), deg=True)
# By default is active, invert if passive
if mode == 'passive':
M = rot_mat_relion(rot, tilt, psi, deg=True)
rot, tilt, psi = rot_mat_eu_relion(M.T, deg=True)
return rot, tilt, psi
def randomize_voxel_mask(vol, mask, ref='fg'):
"""
Function to randomize voxel density value in masked volumes
:param vol: volume with the density map
:param mask: volume with the binary mask (fg: True, bg: False)
:param ref: 'fg' (default) indicates that (ref: fg, ref: bg)
:return: a copy of vol but with the pixel in region marked as 'fg' in 'ref'
"""
# Initialization
o_vol = np.copy(vol)
# Finding 'bg' and reference
bg_ids = np.where(mask == False)
if ref == 'fg':
ref_ids = np.where(mask)
else:
ref_ids = np.where(mask == False)
# Randomization
rnd_ids = np.random.randint(0, len(ref_ids[0]), size=len(bg_ids[0]))
for i in range(len(bg_ids[0])):
rnd_id = rnd_ids[i]
x, y, z = bg_ids[0][i], bg_ids[1][i], bg_ids[2][i]
rnd_x, rnd_y, rnd_z = ref_ids[0][rnd_id], ref_ids[1][rnd_id], ref_ids[2][rnd_id]
o_vol[x, y, z] = vol[rnd_x, rnd_y, rnd_z]
return o_vol
| 11,484 | 4,303 |
import os
import time
import pyfiglet
os.system("clear")
time.sleep(1)
print(" ############################")
print(" # Script By Mr.Jøhñ #")
print(" # INDIAN HACKERS #")
print(" ############################")
print()
print(" ##################################")
print()
char1 = input("Enter First Character >")
char2 = input("Enter Second Character >")
char3 = input("Enter Third Character >")
char4 = input("Enter Fourth Character >")
char5 = input("Enter Fifth Character >")
char6 = input("Enter Sixth Character >")
char7 = input("Enter Seventh Character >")
char8 = input("Enter Eighth Character >")
char9 = input("Enter Ninth Character >")
char10 = input("Enter Tenth Character >")
time.sleep(1)
os.system("clear")
os.system("sleep 2")
print(char1 + char2 + char3 + char4 + char5 + char6 + char7 + char8 + char9 + char10)
os.system("sleep 2")
print(char2 + char3 + char4 + char5 + char6 + char7 + char8 + char9 + char10 + char1)
os.system("sleep 2")
print(char3 + char4 + char5 + char6 + char7 + char8 + char9 + char10 + char1 + char2)
os.system("sleep 2")
print(char4 + char5 + char6 + char7 + char8 + char9 + char10 + char1 + char2 + char3)
os.system("sleep 2")
print(char5 + char6 + char7 + char8 + char9 + char10 + char1 + char2 + char3 + char4)
os.system("sleep 2")
print(char6 + char7 + char8 + char9 + char10 + char1 + char2 + char3 + char4 + char5)
os.system("sleep 2")
print(char7 + char8 + char9 + char10 + char1 + char2 + char3 + char4 + char5 + char6)
os.system("sleep 2")
print(char8 + char9 + char10 + char1 + char2 + char3 + char4 + char5 + char6 + char7)
os.system("sleep 2")
print(char9 + char10 + char1 + char2 + char3 + char4 + char5 + char6 + char7 + char8)
os.system("sleep 2")
print(char10 + char1 + char2 + char3 + char4 + char5 + char6 + char7 + char8 + char9)
os.system("sleep 2")
print(char1 + char3 + char5 + char7 + char9 + char2 + char4 + char6 + char8 + char10)
os.system("sleep 2")
print(char10 + char9 + char8 + char7 + char6 + char5 + char4 + char3 + char2 + char1)
os.system("sleep 2")
print(char2 + char4 + char6 + char8 + char10 + char1 + char3 + char5 + char7 + char9)
os.system("sleep 2")
print(char1 + char10 + char2 + char9 + char3 + char8 + char4 + char7 + char5 + char6)
time.sleep(1)
i = input("Press Enter to quit...")
if i=="":
time.sleep(2)
os.system("clear")
print(" #######################")
print(" # Thanks For using #")
print(" # Like It Please 🙏.. #")
print(" #######################")
print()
| 2,518 | 922 |
# ------------------------------
# 396. Rotate Function
#
# Description:
# Given an array of integers A and let n to be its length.
#
# Assume Bk to be an array obtained by rotating the array A k positions clock-wise,
# we define a "rotation function" F on A as follow:
#
# F(k) = 0 * Bk[0] + 1 * Bk[1] + ... + (n-1) * Bk[n-1].
#
# Calculate the maximum value of F(0), F(1), ..., F(n-1).
#
# Note:
# n is guaranteed to be less than 105.
#
# Example:
# A = [4, 3, 2, 6]
#
# F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
# F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
# F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
# F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
#
# So the maximum value of F(0), F(1), F(2), F(3) is F(3) = 26.
#
# Version: 1.0
# 10/09/19 by Jianfa
# ------------------------------
class Solution:
def maxRotateFunction(self, A: List[int]) -> int:
if not A:
return 0
n = len(A)
lastValue = 0
for i in range(n):
lastValue += i * A[i]
maxValue = lastValue
summ = sum(A)
for i in range(1, n):
# F(k) - F(k-1) = summ - n * A[n-k]
# F(k) = F(k-1) + summ - n * A[n-k]
currentValue = lastValue + summ - n * A[n-i]
if currentValue > maxValue:
maxValue = currentValue
lastValue = currentValue
return maxValue
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Math solution, O(3n) complexity
# F(k) - F(k-1) = summ - n * A[n-k]
# F(k) = F(k-1) + summ - n * A[n-k] | 1,725 | 762 |
#
# @lc app=leetcode id=932 lang=python3
#
# [932] Beautiful Array
#
# https://leetcode.com/problems/beautiful-array/description/
#
# algorithms
# Medium (61.61%)
# Likes: 650
# Dislikes: 933
# Total Accepted: 29K
# Total Submissions: 46K
# Testcase Example: '4'
#
# An array nums of length n is beautiful if:
#
#
# nums is a permutation of the integers in the range [1, n].
# For every 0 <= i < j < n, there is no index k with i < k < j where 2 *
# nums[k] == nums[i] + nums[j].
#
#
# Given the integer n, return any beautiful array nums of length n. There will
# be at least one valid answer for the given n.
#
#
# Example 1:
# Input: n = 4
# Output: [2,1,4,3]
# Example 2:
# Input: n = 5
# Output: [3,1,2,5,4]
#
#
# Constraints:
#
#
# 1 <= n <= 1000
#
#
#
# @lc code=start
class Solution:
def beautifulArray(self, n: int) -> List[int]:
res = [1]
while len(res) < n:
res = [2 * i - 1 for i in res] + [i * 2 for i in res]
return [i for i in res if i <= n]
def beautifulArrayRecursion(self, n: int) -> List[int]:
if n == 1:
return [1]
odd = [2 * i - 1 for i in self.beautifulArrayRecursion(n // 2 + n % 2)]
even = [2 * i for i in self.beautifulArrayRecursion(n // 2)]
res = odd + even
return [i for i in res if i <= n]
def beautifulArrayBinary(self, n: int) -> List[int]:
return sorted(range(1, n + 1), key=lambda x: bin(x)[:1:-1])
# @lc code=end
| 1,482 | 611 |
from konlpy.tag import Mecab
from gensim.models.word2vec import Word2Vec
from W2VData import tokenize, W2VData
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
ADD_POS = True
LOWER = True
SG = 0
ITER = 10
MIN_COUNT = 3
EMBED_SIZE = 100
VOCAB_SIZE = 10000
tokenizer = Mecab()
sentences = W2VData('./data/nsmc/ratings_all.txt', tokenizer, tokenize, ADD_POS, LOWER)
# train
model = Word2Vec(sentences = sentences, size = EMBED_SIZE, window = 5, min_count = MIN_COUNT, max_vocab_size = VOCAB_SIZE, max_final_vocab = VOCAB_SIZE, workers = 4, sg = SG, iter = ITER)
# save lots of memory (not trainable)
model.init_sims(replace = True)
# save KeyedVectors (small and fast load, not trainable) instead of full model
model.wv.save("./model/word2vec_sg%s_mc%s_es%s_vc%s_pos_lower.kv" % (SG, MIN_COUNT, EMBED_SIZE, len(model.wv.vocab)))
| 892 | 372 |
class NicBotException(Exception):
pass
class UpdateNewFile(NicBotException):
pass
| 94 | 33 |
# encoding: utf-8
import sys
sys.path.append(r'./Ind')
from IndStatePlotter import IndStatePlotter
from IndTypes import IndType
from ModellingMode import ModellingMode
import Meta as cc
from Ind import IndParser
from Ind import IndStateAnalyzer as Isa
# from IndStateAnalyzer import IndStateAnalyzer
DEFAULT_MODE = ModellingMode.FIRST_SECOND
drawChart = lambda pdf, stateCode: IndStatePlotter.basicChart(pdf, stateCode)
lorentz = lambda isa, df, stateCode: fitter(stateCode, isa.lorentzianModel)
poly = lambda isa, df, stateCode: fitter(stateCode, isa.polyModel)
exp = lambda isa, df, stateCode: fitter(stateCode, isa.expModel)
gauss = lambda isa, df, stateCode: fitter(stateCode, isa.gaussianModel)
def model(stateCode, fittingFunc=poly, mode=DEFAULT_MODE):
[idf, sdf, isa] = ind(stateCode, mode=mode)
IndStatePlotter.chartSingleSeries(sdf, stateCode)
"""
IndStatePlotter.chartMultipleSeries(idf[idf['Status'].str.contains(IndType.CONFIRMED.value)],
[*cc.IndStateAbbrMap.keys()])
"""
fittingFunc(isa, sdf, stateCode)
# IndStatePlotter.drawAllCharts()
def ind(stateCode, routine=None, mode=DEFAULT_MODE):
indParser = IndParser.IndParser()
df = indParser.fetchStateWiseData()
"""
print(df.columns)
print(df.head(2))
print(Meta.StateAbbrMap['UP'])
"""
isa = Isa.IndStateAnalyzer(df, mode)
print ('Running for ' + stateCode)
if routine is None:
pdf = isa.singleStateMetric(stateCode,
IndType.CONFIRMED.value,
lambda series : Isa.movavg(5, series))
else:
pdf = isa.singleStateMetric(stateCode,
IndType.CONFIRMED.value,
lambda series : routine(series))
return [df, pdf, isa]
def csp(countryName, provinceName):
global pcs, snl # Just to make it easy when running via iPython
from cov19sir import PlottingCs
pcs = PlottingCs.PlottingCs()
snl = pcs.loadOneCountry(countryName, provinceName)
pcs.trendPeltEbf(snl, 7)
pcs.defaultEstimate(snl, 10, '8th')
return snl
def indChart(smoothingFunc, stateCode):
[idf, sdf, isa] = ind(stateCode)
drawChart(smoothingFunc(sdf), stateCode)
return [idf, sdf, isa]
def fitter(stateCode, fitterFunction):
[params, model, result] = fitterFunction(stateCode)
IndStatePlotter.chartLmfitModel(result)
IndStatePlotter.predict(model, params)
if __name__ == '__main__':
province = None
# country = 'USA'
country = 'IN'
# country = 'UK'
# province = 'KL'
# csp(cc.longName(country), cc.inStateName(province))
model('UP')
print ('Done')
## To run it in an interactive Python Shell
## exec(open('main.py').read())
## OR
## In IPython
## import main as m
## [idf, sdf] = m.indChart(lambda x: x.tail(60), 'KL')
## OR for the lorentzianModel() :: [idf, sdf] = m.indChart(lambda x: x.tail(60), 'KL')
"""
https://stackoverflow.com/questions/3433486/how-to-do-exponential-and-logarithmic-curve-fitting-in-python-i-found-only-poly
https://lmfit.github.io/lmfit-py/builtin_models.html
"""
| 3,338 | 1,121 |
'''
Copyright (c) 2022 Marco Diniz Sousa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
'''
import unittest
from antlr4_verilog import InputStream, CommonTokenStream, ParseTreeWalker
from antlr4_verilog.verilog import VerilogLexer, VerilogParser, VerilogParserListener
from antlr4_verilog.systemverilog import SystemVerilogLexer, SystemVerilogParser, SystemVerilogParserListener
class TestVerilog(unittest.TestCase):
def setUp(self):
design = '''
module ha(a, b, sum, c);
input a, b;
output sum, c;
assign sum = a ^ b;
assign c = a & b;
endmodule
'''
lexer = VerilogLexer(InputStream(design))
stream = CommonTokenStream(lexer)
parser = VerilogParser(stream)
self.tree = parser.source_text()
self.walker = ParseTreeWalker()
def test_module_identifier(self):
class ModuleIdentifierListener(VerilogParserListener):
def exitModule_declaration(self, ctx):
self.identifier = ctx.module_identifier().getText()
listener = ModuleIdentifierListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.identifier, 'ha')
def test_module_inputs(self):
class ModuleInputListener(VerilogParserListener):
def __init__(self):
self.declarations = []
def exitInput_declaration(self, ctx):
for child in ctx.list_of_port_identifiers().getChildren():
if isinstance(child, VerilogParser.Port_identifierContext):
self.declarations.append(child.identifier().getText())
listener = ModuleInputListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.declarations, ['a', 'b'])
class TestSystemVerilog(unittest.TestCase):
def setUp(self):
design = '''
module hello;
string s = "Hello";
initial begin
$display("%s", s);
end
endmodule
'''
lexer = SystemVerilogLexer(InputStream(design))
stream = CommonTokenStream(lexer)
parser = SystemVerilogParser(stream)
self.tree = parser.source_text()
self.walker = ParseTreeWalker()
def test_module_identifier(self):
class ModuleIdentifierListener(SystemVerilogParserListener):
def exitModule_declaration(self, ctx):
self.identifier = ctx.module_ansi_header().module_identifier().getText()
listener = ModuleIdentifierListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.identifier, 'hello')
def test_variable_assignment(self):
class VariableAssignmentListener(SystemVerilogParserListener):
def exitVariable_decl_assignment(self, ctx):
self.identifier = ctx.variable_identifier().getText()
self.expression = ctx.expression().getText()
listener = VariableAssignmentListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.identifier, 's')
self.assertEqual(listener.expression, '"Hello"')
def test_system_task(self):
class SystemTaskListener(SystemVerilogParserListener):
def exitSystem_tf_call(self, ctx):
self.identifier = ctx.system_tf_identifier().getText()
listener = SystemTaskListener()
self.walker.walk(listener, self.tree)
self.assertEqual(listener.identifier, '$display')
if __name__ == '__main__':
unittest.main()
| 4,203 | 1,080 |
# -*- coding: utf-8 -*-
"""
Test module for User endpoint responses
"""
__author__ = "@canimus"
__license__ = "MIT"
__revision__ = "1.0"
import unittest
from test.ep.utils import ApiRequest as http_client
class FileEndPointTest(unittest.TestCase):
'''Files endpoint tests'''
def test_file_upload(self):
"""
Upload a text valid file
"""
file_uploaded = http_client().post(":5000/files", "user_id=1 file@/sw/apps2/qualibrate-api/LICENSE")
self.assertTrue(file_uploaded['mime'] == 'text/plain')
| 543 | 189 |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: QuickSelect
Description :
Author : zdf
date: 2018/9/26
-------------------------------------------------
Change Activity:
2018/9/26:13:11
-------------------------------------------------
"""
def partition(test, left, right, mid):
tmp = test[mid]
while left < right:
while test[right] > tmp and left < right:
right -= 1
while test[left] < tmp and left < right:
left += 1
if left < right:
test[left], test[right] = test[right], test[left]
right -= 1
left += 1
return mid
def quickselect(test, left, right, k):
if left == right:
return test[left]
mid = (left + right) // 2
mid = partition(test, left, right, mid)
if k == mid:
return test[k]
if k < mid:
return quickselect(test, left, mid - 1, k)
if k > mid:
return quickselect(test, mid + 1, right, k)
if __name__ == "__main__":
test = [1, 4, 2, 3.6, -1, 0, 25, -34, 8, 9, 1, 0]
print("Sorted list:", sorted(test))
for k in range(1,12):
print("The", k, "th smallest number is", quickselect(test, 0, len(test) - 1, k))
| 1,286 | 433 |
import os
import io
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models,transforms
from PIL import Image
import time
from flask import jsonify
import logging
logging.basicConfig(level=logging.INFO)
# lazy global
device = None
model = None
imagenet_class_index = None
def img_to_tensor(image_bytes):
"""Converts byte arrya to torch.tensor with transforms
Args:
-----
img: byte
input image as raw bytes
Returns:
--------
img_tensor: torch.Tensor
image as Tensor for using with deep learning model
"""
# transformations for raw image
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
img = Image.open(io.BytesIO(image_bytes))
img_tensor = transform(img)
img_tensor = img_tensor.unsqueeze(0)
return img_tensor.to(device)
def get_prediction(image_bytes):
"""perform predictions using model defined globally
Args:
-----
image_bytes:bytes
raw image bytes recieved via POST
Returns:
--------
class_id: int
id defined in imagenet_class_index.json
class_name: str
top predicted category
prob: float
confidence score for prediction
"""
tensor = img_to_tensor(image_bytes=image_bytes)
outputs = F.softmax(model.forward(tensor),dim=1)
prob, y_hat = outputs.max(1)
prob = prob.item()
predicted_idx = str(y_hat.item())
class_id, class_name = imagenet_class_index[predicted_idx]
return class_id, class_name, prob
def handler(request):
"""Entry point for cloud function
Args:
-----
request: Flask.request
contains incoming data via HTTP POST
Return:
-------
inference results as Flask.jsonify object
"""
global device, model, imagenet_class_index
if device is None:
logging.info("device created")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
if model is None:
logging.info("creating resnet18 model")
model = models.resnet18(pretrained=True)
model.eval()
model.to(device)
if imagenet_class_index is None:
logging.info("loading imagenet class names ")
imagenet_class_index = json.load(open('imagenet_class_index.json'))
if request.method=='POST':
logging.info("postrequest received")
file = request.files['file']
img_bytes = file.read()
class_id, class_name, prob = get_prediction(image_bytes=img_bytes)
return jsonify({'class_id': class_id, 'class_name': class_name})
else:
return "Please specify image"
| 3,009 | 926 |
import random
import string
from datetime import date
from django.db import models
from django.core.validators import validate_email
from django.core.validators import validate_slug
from django.core.validators import validate_unicode_slug
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from utils.validators.identity.validator import IdentityValidator
User = get_user_model()
class Organization(models.Model):
"""Organization Model definitions"""
owner = models.ForeignKey(
User,
verbose_name=_('Owner'),
related_name="organizations",
on_delete=models.DO_NOTHING
)
cnpj = models.CharField(
verbose_name='CNPJ',
blank=True, null=True,
max_length=30, unique=True,
validators=[IdentityValidator()]
)
name = models.CharField(
unique=True,
verbose_name='Nome da Organização',
max_length=150, blank=False, null=False,
)
intro = models.CharField(
max_length=255, verbose_name='Apresentação',
blank=False, null=False
)
about = models.TextField(
verbose_name='Sobre a Organização',
blank=False, null=False
)
founder = models.CharField(
max_length=150, blank=False, null=False,
verbose_name='Fundador',
)
founded_at = models.IntegerField(
null=False, blank=False, verbose_name='Desde',
help_text='Ano em que a Organização foi fundada.',
choices=[(i, i) for i in range(1900, date.today().year + 1)],
)
causes = models.ManyToManyField(
'core.Cause', blank=True,
verbose_name='Causas',
related_name='organization_causes'
)
address = models.OneToOneField(
'core.Address', blank=True, null=True,
verbose_name='Endereço',
on_delete=models.DO_NOTHING,
)
website = models.URLField(
blank=True, null=True,
verbose_name='Web Site',
help_text='Digite a URL completa do web site.'
)
email = models.EmailField(
blank=True, null=True,
validators=[validate_email],
verbose_name='E-mail para Contatos',
help_text='Digite um e-mail válido.'
)
members = models.ManyToManyField(
'core.Member',
verbose_name='Membros',
blank=True
)
social_networks = models.ManyToManyField(
'core.SocialNetwork',
verbose_name='Membros',
blank=True
)
slug = models.SlugField(
verbose_name='Slug', max_length=60,
help_text='URL de exibição da ONG.',
unique=True, null=False, blank=False,
auto_created=True, allow_unicode=False,
validators=[validate_slug, validate_unicode_slug],
default=''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(60))
)
created_at = models.DateTimeField(
verbose_name='Criado em',
auto_now_add=True,
)
updated_at = models.DateTimeField(
verbose_name='Editado em',
auto_now=True
)
logo = models.ForeignKey(
'core.ImageUploader',
on_delete=models.DO_NOTHING, null=True,
verbose_name='Logomarca da ONG',
related_name='logomarca_ong'
)
cover_image = models.ForeignKey(
'core.ImageUploader',
on_delete=models.DO_NOTHING, null=True,
verbose_name='Imagem da Capa',
related_name='coverimage_ong'
)
# Date
created_at = models.DateTimeField(
verbose_name='Criado em', auto_now_add=True,
)
updated_at = models.DateTimeField(
verbose_name='Editado em', auto_now=True
)
| 3,686 | 1,142 |
import os
import pytest
files = ['test1.yaml', 'test2.yaml', 'test3.yaml']
@pytest.fixture(scope='module')
def desired_config_files():
return [os.path.join(os.getcwd(), 'data_test', file) for file in files]
params_configs = [
{
'mail_server': {
'host': 'example11.securevison.intra.net',
'port': '28031'
}
},
{
'kafka': {
'bootstrap_servers': 'example22.securevision.intra.net'
}
},
{
'sql_alchemy': {
'host': 'example33.securevision.intra.net',
'port': '27017',
'user': 'test',
'password': 'test'
}
}
]
params_files_configs = list(tuple(zip(files, params_configs)))
@pytest.fixture(scope='module', params=params_files_configs)
def desired_configs(request):
return request.param
@pytest.fixture(scope='module')
def whole_config():
wc = {}
for conf in params_configs:
wc.update(conf)
return wc
| 988 | 348 |
import logging
from src.config import get_env_var, get_yaml_file
from src.discord.bot import DiscordBot
from src.service import AppService
if __name__ == '__main__':
base_config = get_yaml_file('base')
logging.basicConfig(**base_config['logging'])
try:
logging.info('Starting Bot')
discord_config = get_yaml_file('discord')
token = get_env_var('DISCORD_TOKEN')
DiscordBot(discord_config, AppService()).run(token)
except KeyboardInterrupt:
logging.info('Stop keys detected')
except Exception as err:
logging.exception(err)
finally:
logging.info('Stopping Bot')
| 643 | 198 |
"""
Views:
- `sign_up (/signup)`: Register a new user in the system
- `login (/login)`: Sign in to an existing account
- `logout (/logout)`: Sing out from current account
- `profile (/profile)`: User profile with functions:
- Shows user vacancies
- Create a new vacancy
"""
# pylint: disable=ungrouped-imports
# pylint: disable=logging-fstring-interpolation
import json
import logging
from flask import Blueprint, request, redirect, url_for, render_template, flash
from flask_login import login_user, logout_user, login_required, current_user
from flask_admin.contrib.sqla import ModelView
from app.service.auth_service import util_signup, util_login
from app.service.vacancy_service import VacancyService
from app.service.user_service import UserService
from app.configs.config import TestBaseConfig
auth_view = Blueprint("auth", __name__)
@auth_view.route("/signup", methods=["GET", "POST"])
def sign_up():
"""
Registration account
:return: rendered template
"""
if request.method == "POST":
logging.info("User POST data through registration form")
email = request.form.get("emailAddress")
password_1 = request.form.get("password1")
password_2 = request.form.get("password2")
user = util_signup(email, password_1, password_2)
if user:
login_user(user)
return redirect(url_for("base.home"))
flash("Problem with registration", category='error')
return render_template("user/registration.html")
@auth_view.route("/login", methods=["GET", "POST"])
def login():
"""
Log in account
:return: rendered template
"""
if request.method == "POST":
logging.info("User POST data through login form")
email = request.form.get("emailAddress")
password = request.form.get("password")
user_status, user_error_msg, user = util_login(email, password)
if user_status:
logging.info("Login user")
login_user(user, remember=True)
return redirect(url_for("auth.profile"))
flash(f"Problem - {user_error_msg}", category='error')
return render_template("user/login.html")
@auth_view.route("/logout", methods=["GET"])
@login_required
def logout():
"""
Sing out
:return: rendered template
"""
logging.info("Logout user")
logout_user()
return redirect(url_for("base.home"))
@auth_view.route("/profile", methods=["GET", "DELETE"])
@login_required
def profile():
"""
Profile of the user with his vacancies and the ability to create new ones
Can delete own vacancies
:return: rendered template
"""
if request.method == "DELETE":
data = json.loads(request.data)
logging.info(f"Deleted data - {data['name']}")
if data["name"]:
VacancyService.delete_vacancy_by_name_user(data["name"], current_user.id)
return "Deleted"
logging.info("User open profile")
user = UserService.find_user_by_email(current_user.email)
content = {"user": current_user, "exists_vacancies": user.vacancies}
return render_template("user/profile.html", **content)
class JobAdminModelView(ModelView):
"""
User admin check
"""
def is_accessible(self):
"""
Check whether the user is registered and logged in as an administrator
"""
if not current_user.is_authenticated or current_user.email != TestBaseConfig.ADMIN_MAIL:
return False
return True
def inaccessible_callback(self, name, **kwargs):
"""
Redirects to the registration page
"""
return redirect(url_for('auth.login'))
| 3,691 | 1,067 |
import numpy as np
import pandas as pd
import lightgbm as lgb
from pathlib import Path
from functools import reduce
from sklearn.metrics import roc_auc_score
import hyperopt
from hyperopt import STATUS_OK, Trials, hp, space_eval, tpe
import utils, feature_selector, preprocess
global_params = {
'CK_var_threshold': 1,
'NASA_var_threshold': 30,
'na_threshold': 0.5,
'col_threshold': 0.98,
'remove_collinear_threshold': 700,
'lgb_num_round': 200,
'lgb_early_stop_rounds': 100,
'hyperopt_rounds': 50,
'model_num': 15,
'hyperopt_per_mode': False,
'remove_non_label_cols': True,
'last_model_weight': 8,
'global_metric':'auc'
}
def train_val_split(X_train, y_train, random_seed):
y_val = pd.concat([y_train.loc[lambda x: x == -1].sample(frac=0.3, random_state=random_seed),
y_train.loc[lambda x: x == 1].sample(frac=0.3, random_state=random_seed)])
y_trn = y_train.drop(y_val.index)
X_trn = X_train.loc[y_trn.index, :]
X_val = X_train.loc[y_val.index, :]
return X_trn, y_trn, X_val, y_val
def train_lightgbm(params, X_trn, y_trn, X_val, y_val): # , test_data, test_label):
train_data = lgb.Dataset(X_trn, label=y_trn)
val_data = lgb.Dataset(X_val, label=y_val)
model = lgb.train(params, train_data, global_params['lgb_num_round'], val_data,
early_stopping_rounds=global_params['lgb_early_stop_rounds'],
verbose_eval=100)
return model
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, X_val, y_val):
## fixed lightgbm params
params = {
"objective": "binary",
"metric": global_params['global_metric'],
"verbosity": -1,
"seed": 1,
"num_threads": 4,
"feature_fraction": .6,
"bagging_fraction": 0.8,
"bagging_freq": 5,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
# "learning_rate": 0.1,
# "num_leaves": 32
}
## space for var_threshold search
space1 = hp.choice('var_threshold', np.linspace(0, 20, 15, dtype=int))
## space for lightgbm hyperparam search
space = {
"learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.2)),
# "max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
"num_leaves": hp.choice("num_leaves", np.linspace(16, 64, 4, dtype=int)),
# "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
# "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
# "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 10, 1, dtype=int)),
# "reg_alpha": hp.uniform("reg_alpha", 0, 2),
# "reg_lambda": hp.uniform("reg_lambda", 0, 2),
# "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
# "scale_pos_weight": hp.uniform('x', 0, 5),
}
var_series = X_train.var()
def objective(hyperparams):
# X_trn=X_train.loc[:,var_series>hyperparams]
# X_trn = X_train
# X_trn, y_trn, X_val, y_val = train_val_split(X_trn, y_train,random_seed)
model = train_lightgbm({**params, **hyperparams}, X_train, y_train, X_val, y_val)
# model=train_lightgbm(params,X_trn,y_trn,X_val,y_val)
score = model.best_score["valid_0"][global_params['global_metric']]
to_drop = X_train.columns[model.feature_importance('gain') == 0]
print(f'to drop:{len(to_drop)}')
# in classification, less is better
return {'loss': -score, 'status': STATUS_OK, "drop_feature": to_drop, "best_iter": model.best_iteration}
trials = Trials()
best = hyperopt.fmin(fn=objective, space=space, trials=trials,
algo=tpe.suggest, max_evals=global_params['hyperopt_rounds'], verbose=1,
rstate=np.random.RandomState(1))
hyperparams = space_eval(space, best)
print(f"hyperopt auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
drop_feature = \
reduce(lambda r1, r2: {'drop_feature': r1['drop_feature'].union(r2['drop_feature'])}, trials.results)[
'drop_feature']
print(f'drop features:{len(drop_feature)}')
return {**params, **hyperparams}, drop_feature, trials.best_trial['result']['best_iter']
def train_all_data(hyperparams, best_num_round, X_train, y_train):
train_data = lgb.Dataset(X_train, label=y_train)
model = lgb.train(hyperparams, train_data, best_num_round, verbose_eval=100)
return model
def lightgbm_predict(models, X_test, y_test):
res_dict = {}
for index, model in enumerate(models):
ypred = model.predict(X_test)
res_dict[f'model_{index}'] = ypred
print(f'model_{index} predict finished')
res_df = pd.DataFrame(res_dict)
res_df.iloc[:, -1] = res_df.iloc[:, -1]*global_params['last_model_weight']
return roc_auc_score(y_test, res_df.mean(axis=1))
def test_dataset(dataset_name, train_file_path, test_file_path):
df = pd.read_csv(str(train_file_path))
df = preprocess.process_extra_label(df, global_params['remove_non_label_cols'])
print('finish reading train data')
df_test = pd.read_csv(str(test_file_path))
df_test = preprocess.process_extra_label(df_test, global_params['remove_non_label_cols'])
print('finish reading test data')
df_test_data = df_test.drop(columns=['l'])
df_test_label = df_test['l']
# df.isnull().sum().any()
X_train = df.drop(columns=['l'])
y_train = df['l']
y_trn = y_train
X_trn = feature_selector.remove_many_na_col(X_train, global_params['na_threshold'])
print('finish remove na cols')
X_trn = feature_selector.remove_single_unique(X_trn)
print('finish remove single unique cols')
X_trn = feature_selector.remove_small_variance(X_trn, global_params[f'{dataset_name}_var_threshold'])
print('finish remove small var cols')
if len(X_trn.index) < global_params['remove_collinear_threshold']:
X_trn = feature_selector.remove_collinear_col(X_trn, global_params['col_threshold'])
print('finish remove collinear cols')
X_trn, y_trn, X_val, y_val = train_val_split(X_trn, y_trn, 0)
print('finish split data')
hyperparams, drop_features, best_num_round = hyperopt_lightgbm(X_trn, y_trn, X_val, y_val)
print(f'drop_features: {drop_features}')
X_trn = X_trn.drop(columns=drop_features)
X_val = X_val.drop(columns=drop_features)
to_drop=feature_importance_iter(hyperparams,X_trn,y_trn,X_val,y_val)
X_trn=X_trn.drop(columns=to_drop)
print(f'X_trn columns:{X_trn.columns}')
print(f'X_trn columns:{len(X_trn.columns)}')
models, num_round_list = train_multiple_models(hyperparams, best_num_round, X_trn, y_trn)
num_round_list.append(best_num_round)
final_model = train_all_data(hyperparams, int(np.mean(num_round_list)), X_trn, y_trn)
#final_model = train_all_data(hyperparams, global_params['lgb_num_round'], X_trn, y_trn)
print("train all data finished")
models.append(final_model)
return lightgbm_predict(models, df_test_data.loc[:, X_trn.columns], df_test_label)
def train_multiple_models(hyperparams, num_rounds, X_train, y_train):
models = []
num_round_list = []
for i in range(0, global_params['model_num'] - 1):
X_trn, y_trn, X_val, y_val = train_val_split(X_train, y_train, i)
if global_params['hyperopt_per_mode']:
hyperparams, drop_features, best_num_round = hyperopt_lightgbm(X_trn, y_trn, X_val, y_val)
model = train_lightgbm(hyperparams, X_trn, y_trn, X_val, y_val)
num_round_list.append(model.best_iteration)
models.append(model)
print(f"Train model_{i} finished")
return models, num_round_list
def feature_importance_iter(hyperparams,X_trn,y_trn,X_val,y_val):
#X_trn,y_trn,X_val,y_val=train_val_split(X_train,y_train,0)
model=train_lightgbm(hyperparams, X_trn,y_trn,X_val,y_val)
best_score = model.best_score["valid_0"][global_params['global_metric']]
importance_df=pd.DataFrame()
importance_df['feature']=X_trn.columns
importance_df['importance']=model.feature_importance('gain')
importance_df=importance_df.sort_values('importance')
to_drop=[]
for row in importance_df.iterrows():
X_trn=X_trn.drop(columns=[row[1]['feature']])
X_val=X_val.drop(columns=[row[1]['feature']])
model=train_lightgbm(hyperparams, X_trn,y_trn,X_val,y_val)
score = model.best_score["valid_0"][global_params['global_metric']]
if score>=best_score:
to_drop.append(row[1]['feature'])
best_score=score
else:
break
print(f'best_score: {best_score}')
print(f'to_drop_imp_features: {to_drop}')
return to_drop
def run_on_data(ds_name, percentage):
train_path = Path(f'./data_split/{ds_name}/{ds_name}Train/{percentage}')
test_path = Path(f'./data_split/{ds_name}/{ds_name}Test/{percentage}')
aucs = {}
for train_file_path in train_path.iterdir():
test_file_path = test_path / train_file_path.name.replace('train', 'test')
aucs[train_file_path.name] = test_dataset(ds_name, train_file_path, test_file_path)
print(aucs)
utils.write_to_file(aucs, Path(f'./results/{ds_name}_{percentage}_results.json'))
@utils.debug_wrapper
def main():
ds_name = 'CK'
per = '30'
train_path = Path(f'./data_split/{ds_name}/{ds_name}Train/{per}')
test_path = Path(f'./data_split/{ds_name}/{ds_name}Test/{per}')
train_file_path = train_path / 'alltrain.csv'
test_file_path = test_path / 'alltest.csv'
auc = test_dataset(ds_name, train_file_path, test_file_path)
print(f'auc: {auc}')
#main()
run_on_data('CK',10)
run_on_data('CK',20)
run_on_data('CK',30)
run_on_data('NASA',30)
run_on_data('NASA',20)
run_on_data('NASA',10)
| 9,821 | 3,820 |
import logging
logger = logging
logger.basicConfig(
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
logger.getLogger().setLevel(logging.INFO)
| 174 | 65 |
from imdb import IMDB
from pascal_voc import PascalVOC
from coco import coco
from retina import retina
| 103 | 33 |
'''A python script for batch converting heic files.'''
import argparse
import os
import subprocess
import time
CMD = "heif-convert"
QUALITY = "-q"
QUALITY_ARG = "99"
AUX_FILE_SUFFIX = "-urn:com:apple:photo:2020:aux:hdrgainmap"
def parse_args() -> argparse.Namespace:
'''Get the args from the command line'''
parser = argparse.ArgumentParser()
parser.add_argument("--daux", action="store_true", help="Delete aux files")
parser.add_argument("--dorig", action="store_true", help="Delete original files")
parser.add_argument("--mt", action="store_true", help="Run in multi-threaded mode")
parser.add_argument("-workers", type=int, default=4, help="Number of workers to use in multi-threaded mode")
args = parser.parse_args()
return args
def find_all_heics() -> None:
'''Find all heic files in the current directory, regardless of capitalization'''
all_files = os.listdir('.')
heics = [x for x in all_files if x.endswith('.heic') or x.endswith('.HEIC')]
return heics
def convert_and_delete(file_name: str, delete_aux: bool = False, delete_orig:bool = False) -> None:
'''On the original file name, call heif-convert, and delete the aux and original files'''
base_file = file_name[:-5]
original_file_extension = file_name[-5:]
# create the new file
new_file = base_file + ".JPG"
args = [CMD, QUALITY, QUALITY_ARG, file_name, new_file]
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
string_output = output.decode("utf-8")
print(string_output)
if delete_aux:
# delete the auxilary file
aux_file_name = base_file + AUX_FILE_SUFFIX + ".JPG"
os.remove(aux_file_name)
print(f"Deleted aux file: {aux_file_name}")
if delete_orig:
# delete the original file
orig_file_name = base_file + original_file_extension
os.remove(orig_file_name)
print(f"Deleted original file: {orig_file_name}\n")
if __name__ == "__main__":
start_time = time.time()
args = parse_args()
heics = find_all_heics()
print(f'Found {len(heics)} heic files in this directory.')
if args.mt:
print("Running in multi-threaded mode.")
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=args.workers) as executor:
for heic in heics:
executor.submit(convert_and_delete, heic, args.daux, args.dorig)
else:
for file in heics:
convert_and_delete(file, args.daux, args.dorig)
end_time = time.time()
print(f'Converted {len(heics)} heic files in {end_time - start_time} seconds.') | 2,693 | 892 |
# Script to create www/assets/sprites/{box,stack}_*.png
# This file is NOT a part of the www/ folder and DOES NOT need to be deployed. It is used for development only.
from PIL import Image
SOURCE_DIRECTORY = "www/assets/matches gens_1-12"
DEST_DIRECTORY = "www/assets/sprites"
for i in range(1, 13):
original_filename = SOURCE_DIRECTORY + f"/{i}MatchInside.png"
im = Image.open(original_filename)
# save the image for the box
(left, upper, right, lower) = (486, 272, 1920, 1356)
box = im.crop((left, upper, right, lower)) # we crop the image because the originals have a lot of transparent space around the box
new_box_filename = DEST_DIRECTORY + f"/box_{i}.png"
box.save(new_box_filename)
# now we construct the image for the stack, by stacking `i` copies of the box on top of each other
box_height = 140 # how much higher is one box than the previous
stack_height = box.height + box_height * (i - 1)
stack = Image.new('RGBA', (box.width, stack_height), (0, 0, 0, 0))
for j in range(i):
y = stack_height - box.height - (box_height * j)
stack.alpha_composite(box, (0, y))
new_stack_filename = DEST_DIRECTORY + f"/stack_{i}.png"
stack.save(new_stack_filename)
print("DONE") | 1,251 | 444 |
"""
Copyright 2021-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import dateutil
import tldextract
from datetime import datetime, timedelta
from flask import abort, request
from flask_restx import Resource
from backend.app.namespace.authorization import validate_token_header
from backend.app.shared.client import query_acme_challenge_records
from backend.app.shared.network import Device
from backend.app.shared import client
from backend.app.models import (api_namespace, authentication_parser,
asset_output, asset_input,
developer_permissions,
privileged_permissions,
admin_permissions, PUBLIC_KEY)
# HTTP Status Codes: https://docs.python.org/3/library/http.html
CONF_ROUTE_FILE = os.path.join(
os.path.dirname(__file__), '../config/route.json')
dynamodb_client = client.DynamoDBClient(
region_name=os.environ['AWS_DEFAULT_REGION'], table_name=os.environ['TABLE'])
def authentication_header_parser(value, secret):
data = validate_token_header(value, secret)
if data is None:
abort(401)
return data
def filter(primary_index, secondary_index):
system_names = {d['system_name'] for d in primary_index}
output = [x for x in secondary_index if x['system_name'] in system_names]
return output
def query_expired_certificates(days_until_expiration):
response = dynamodb_client.scan_table()
data = response['Items']
expired_certificates = list()
days_until_expiration = int(days_until_expiration)
for host in data:
if host['certificate_expiration'] != 'None':
expiration_calculation = (dateutil.parser.parse(
host['certificate_expiration']) - timedelta(days=days_until_expiration)).isoformat()
if datetime.utcnow().isoformat() > expiration_calculation:
expired_certificates.append(host)
return expired_certificates
@api_namespace.route('/v1/search', methods=['GET'])
class Search(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('search', responses={403: 'Invalid Role'}, params={'host_platform': {'description': 'Host OS Platform', 'in': 'query', 'type': 'str', 'required': False}, 'data_center': {'description': 'Data Center', 'in': 'query', 'type': 'str', 'required': False}, 'ip_address': {'description': 'IPv4 Address: [10.0.0.1]', 'in': 'query', 'type': 'str', 'required': False}, 'system_name': {'description': 'System Name: [subdomain.example.com]', 'in': 'query', 'type': 'str', 'required': False}, 'days_until_expiration': {'description': 'Number of Days (i.e. 30) Or Less Days Until Certificate Expires', 'in': 'query', 'type': 'int', 'required': False}, 'origin': {'description': 'Source of Asset: [API]', 'in': 'query', 'type': 'str', 'required': False}}, description='Search Asset Inventory')
@api_namespace.response(model=asset_output, code=200, description='Success', as_list=True)
def get(self):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
try:
if role in developer_permissions:
ip_address = request.args.get('ip_address')
system_name = request.args.get('system_name')
data_center = request.args.get('data_center')
host_platform = request.args.get('host_platform')
days_until_expiration = request.args.get(
'days_until_expiration')
origin = request.args.get('origin')
conversion = {
'ip_address': ip_address,
'system_name': system_name,
'data_center': data_center,
'host_platform': host_platform,
'origin': origin
}
query = ['ip_address', 'system_name',
'data_center', 'host_platform', 'origin']
unique_list_output = None
# Scan Table for Expiration
if days_until_expiration is not None:
unique_list_output = query_expired_certificates(
days_until_expiration)
# No Certificate Expiration Match
if len(unique_list_output) == 0:
return unique_list_output
# Query Table Based on Global Secondary Indexes
for elem in query:
if conversion[elem] is not None and unique_list_output is None:
unique_list_output = dynamodb_client.query_index(
'{}_index'.format(elem), elem, conversion[elem])['Items']
elif conversion[elem] is not None:
query_output = dynamodb_client.query_index(
'{}_index'.format(elem), elem, conversion[elem])['Items']
unique_list_output = filter(
query_output, unique_list_output)
else:
pass
return unique_list_output
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 500
except Exception as error:
api_namespace.abort(
500, error.__doc__, statusCode='500')
@api_namespace.route('/v1/assets', methods=['POST', 'PUT'])
class Assets(Resource):
@api_namespace.doc('create_asset', responses={403: 'Invalid Role', 500: 'Input Validation Error'}, description='Add Device to Asset Inventory')
@api_namespace.response(model=asset_input, code=201, description='Success')
@api_namespace.expect(asset_input, authentication_parser)
def post(self):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in privileged_permissions:
json_data = request.json
system_name = json_data.get('system_name')
common_name = json_data.get('common_name')
certificate_authority = json_data.get('certificate_authority')
data_center = json_data.get('data_center')
device_model = json_data.get('device_model')
host_platform = json_data.get('host_platform')
ip_address = json_data.get('ip_address')
os_version = json_data.get('os_version')
subject_alternative_name = json_data.get(
'subject_alternative_name')
if not subject_alternative_name:
subject_alternative_name = [common_name]
host = Device(
ip_address=ip_address, system_name=system_name, common_name=common_name, certificate_authority=certificate_authority, host_platform=host_platform, os_version=os_version, data_center=data_center, device_model=device_model, subject_alternative_name=subject_alternative_name, origin='API')
device = dynamodb_client.query_primary_key(
system_name).get('Items')
if bool(device):
api_namespace.abort(500, status="Device Exists: {device}".format(
device=system_name), statusCode='500')
else:
dynamodb_client.create_item(host)
return api_namespace.marshal(host, asset_input), 201
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 500
@api_namespace.doc('update_asset', responses={500: 'Invalid Role', 500: 'Input Validation Error'}, description='Update Device in Asset Inventory')
@api_namespace.response(model=asset_input, code=200, description='Success')
@api_namespace.expect(asset_input, authentication_parser)
def put(self):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in privileged_permissions:
json_data = request.json
system_name = json_data.get('system_name')
common_name = json_data.get('common_name')
certificate_authority = json_data.get('certificate_authority')
data_center = json_data.get('data_center')
device_model = json_data.get('device_model')
host_platform = json_data.get('host_platform')
ip_address = json_data.get('ip_address')
os_version = json_data.get('os_version')
subject_alternative_name = json_data.get(
'subject_alternative_name')
if not subject_alternative_name:
subject_alternative_name = [common_name]
host = Device(
ip_address=ip_address, system_name=system_name, common_name=common_name, certificate_authority=certificate_authority, host_platform=host_platform, os_version=os_version, data_center=data_center, device_model=device_model, subject_alternative_name=subject_alternative_name, origin='API')
device = dynamodb_client.query_primary_key(
system_name).get('Items')
if bool(device):
dynamodb_client.update_item(host)
return api_namespace.marshal(host, asset_input), 200
else:
api_namespace.abort(500, status="Device Does Not Exist: {device}".format(
device=system_name), statusCode='500')
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 500
@api_namespace.route('/v1/assets/delete/<string:system_name>', methods=['DELETE'])
class DeleteAsset(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('delete_asset', responses={204: 'Success', 200: 'Invalid Host', 500: 'Invalid Role'}, description='Delete Device in Asset Inventory')
def delete(self, system_name):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in privileged_permissions:
device = dynamodb_client.query_primary_key(
system_name).get('Items')
if not bool(device):
return {'Invalid Host': '{}'.format(system_name)}, 200
response = dynamodb_client.delete_item(system_name)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return '', 204
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 500
@api_namespace.route('/v1/certificate/rotate/<string:system_name>', methods=['POST'])
class RotateExpiredCertificate(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('rotate_expired_certificate', responses={200: 'Invalid Host', 204: 'Success', 403: 'Invalid Role'}, description='Rotate Certificate for Device')
def post(self, system_name):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in privileged_permissions:
query = dynamodb_client.query_primary_key(system_name)
if not query['Items']:
return {'Invalid Host': '{}'.format(system_name)}, 200
else:
device = query.get('Items')[0]
common_name = device.get('common_name')
# Route53 DNS Mapping
output = tldextract.extract(common_name)
domain = output.domain + '.' + output.suffix
subdomain = output.subdomain
if not query_acme_challenge_records(domain, subdomain):
return {'Route53 Error': 'DNS CNAME Record Not Found for {}'.format(common_name)}, 200
client.start_execution(device)
return '', 204
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 403
@api_namespace.route('/v1/management/certificate-validation/unset/<string:system_name>', methods=['PATCH'])
class UnsetCertificateValidation(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('unset_certificate_validation', responses={200: 'Success', 403: 'Invalid Role'}, description='Set Database to Allow HTTP Requests Against Target Device with Self-Signed or Invalid Certificates')
def patch(self, system_name):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in admin_permissions:
query = dynamodb_client.query_primary_key(system_name)
if not query['Items']:
return {'Invalid Host': '{}'.format(system_name)}, 200
else:
response = dynamodb_client.set_certificate_validation(
system_name=system_name, status='False')
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return {f'Certificate Validation Unset': f'Certificate validation disabled for the next execution on {system_name}. Please ensure this endpoint was only executed if the current certification on {system_name} is either a self-signed or an invalid certificate.'}, 200
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 403
@api_namespace.route('/v1/management/certificate-validation/set/<string:system_name>', methods=['PATCH'])
class SetCertificateValidation(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('set_certificate_validation', responses={200: 'Success', 403: 'Invalid Role'}, description='Set Database to Allow Certificate Verification for HTTP Requests on Target Device')
def patch(self, system_name):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in admin_permissions:
query = dynamodb_client.query_primary_key(system_name)
if not query['Items']:
return {'Invalid Host': '{}'.format(system_name)}, 200
else:
response = dynamodb_client.set_certificate_validation(
system_name=system_name, status='True')
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return {f'Certificate Validation Enabled': f'Certificate validation enabled on {system_name}. Please ensure {system_name} does not currently have a self-signed or invalid certificate.'}, 200
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 403
| 15,237 | 4,152 |
#!/usr/bin/env python3
import os
import yaml
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def get_package_file(package, file_path):
"""Get the location of a file installed in an ament package"""
package_path = get_package_share_directory(package)
absolute_file_path = os.path.join(package_path, file_path)
return absolute_file_path
def load_file(file_path):
"""Load the contents of a file into a string"""
try:
with open(file_path, 'r') as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def load_yaml(file_path):
"""Load a yaml file into a dictionary"""
try:
with open(file_path, 'r') as file:
return yaml.safe_load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def run_xacro(xacro_file):
"""Run xacro and output a file in the same directory with the same name, w/o a .xacro suffix"""
urdf_file, ext = os.path.splitext(xacro_file)
if ext != '.xacro':
raise RuntimeError(f'Input file to xacro must have a .xacro extension, got {xacro_file}')
os.system(f'xacro {xacro_file} -o {urdf_file}')
return urdf_file
def generate_launch_description():
xacro_file = get_package_file('my_robot', 'urdf/my_robot.urdf.xacro')
urdf_file = run_xacro(xacro_file)
pkg_gazebo_ros = get_package_share_directory('gazebo_ros')
gazebo = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(pkg_gazebo_ros, 'launch', 'gazebo.launch.py'),
)
)
pkg_name = 'my_robot'
world_file_name = 'my_robot.world'
model_xacro_file_name = 'my_robot.urdf.xacro'
pkg_dir = get_package_share_directory(pkg_name)
world = os.path.join(pkg_dir, 'world', world_file_name)
xacro_path = os.path.join(pkg_dir, 'urdf', model_xacro_file_name)
my_robot_node = Node(
package='my_robot',
node_executable='my_robot',
output='screen',
arguments=[urdf_file],
)
print(xacro_file)
return LaunchDescription([
DeclareLaunchArgument(
'world',
default_value=[os.path.join(pkg_name, 'world', 'my_robot.world'), ''],
description='SDF world file for the robot'
),
gazebo
# my_robot_node
])
| 2,803 | 918 |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, render_to_response
from django.template import RequestContext
from models import GrandChallengeGame, GrandChallengeUser
from wouso.core.ui import register_sidebar_block
from wouso.interface import render_string
@login_required
def index(request):
""" Shows all rounds played by the current user """
profile = request.user.get_profile()
gc_user = profile.get_extension(GrandChallengeUser)
active = gc_user.get_active()
played = gc_user.get_played()
if not gc_user in GrandChallengeGame.base_query():
messages.error(request, _('We are sorry, you are not part of the tournament'))
return render(request, 'grandchallenge/message.html')
return render_to_response('grandchallenge/index.html',
{'active': active, 'played': played, 'gcuser': gc_user, 'gc': GrandChallengeGame},
context_instance=RequestContext(request))
def sidebar_widget(context):
user = context.get('user', None)
gc = GrandChallengeGame
if gc.disabled() or not user or not user.is_authenticated():
return ''
gc_user = user.get_profile().get_extension(GrandChallengeUser)
return render_string('grandchallenge/sidebar.html', {'gc': gc, 'gcuser': gc_user, 'id': 'grandchallenge'})
register_sidebar_block('grandchallenge', sidebar_widget) | 1,429 | 422 |
from __future__ import division, print_function
import os
from my_utils.tests import test_function
import string
def spin(inst_string, programs):
nr = int(inst_string)
if nr == 0:
return programs
old_programs = programs[:]
programs[0:nr] = old_programs[-nr:]
programs[nr:] = old_programs[0:-nr]
return programs
def exchange(inst_string, programs):
pid1, pid2 = [int(ii) for ii in inst_string.split('/')]
val1 = programs[pid1]
val2 = programs[pid2]
programs[pid1] = val2
programs[pid2] = val1
return programs
def partner(inst_string, programs):
pname1, pname2 = inst_string.split('/')
pid1 = programs.index(pname1)
pid2 = programs.index(pname2)
programs[pid1] = pname2
programs[pid2] = pname1
return programs
MOVE_FUN = {
's': spin,
'x': exchange,
'p': partner
}
def part_1(inst_string, programs=list(string.ascii_lowercase[:16])):
"""Function which calculates the solution to part 1
Arguments
---------
Returns
-------
"""
instruction_list = inst_string.split(',')
for instr in instruction_list:
programs = MOVE_FUN[instr[0]](instr[1:], programs)
return ''.join(programs)
def part_2(inst_string, nr_dances=int(1e9),
programs=list(string.ascii_lowercase[:16])):
"""Function which calculates the solution to part 2
Arguments
---------
Returns
-------
"""
configs = {''.join(programs): 0}
cycle_detected = 0
for ii in range(1, nr_dances+1):
this_programs = part_1(inst_string, programs=programs)
# print('iter {}: {}'.format(ii, this_programs))
if this_programs in configs:
cycle_detected = 1
break
else:
configs[this_programs] = ii
programs = list(this_programs)
if cycle_detected:
programs = list(this_programs)
first_occurence = configs[this_programs]
cycle_len = ii - first_occurence
remaining_iters = (nr_dances - ii) % cycle_len
# print('cycle detected @{}, focc {}, cyclen {}, remain {} from {}'.\
# format(ii, first_occurence, cycle_len, remaining_iters, nr_dances))
for jj in range(remaining_iters):
programs = list(part_1(inst_string, programs=programs))
return ''.join(programs)
def main(test_datas, functions, puzzle_input=None, test_functions=None):
if test_functions is None:
test_functions = functions
for ii, (test_data, fun) in enumerate(zip(test_datas, test_functions)):
nr_errors = test_function(fun, test_data)
if nr_errors == 0:
print('Pt. {} Tests Passed'.format(ii+1))
if puzzle_input is not None:
fn = os.path.basename(__file__)
for ii, fun in enumerate(functions):
ans = fun(puzzle_input)
print('{} Pt. {} Solution: {}'.format(fn, ii+1, ans))
if __name__ == "__main__":
# Testing data:
# - each element of input list will be passed to function
# - the relative element in output list is the expected output
test_data1 = {
'inputs': ['s1,x3/4,pe/b'],
'outputs': ['baedc']
}
test_data2 = {
'inputs': ['s1,x3/4,pe/b'],
'outputs': ['ceadb'] # contains a cycle of length 4
# this answer at iter 2 % 4
}
# Code to import the actual puzzle input
with open('./inputs/day_16.txt') as f:
puzzle_input = f.read().strip()
# puzzle_input = [line.rstrip('\n') for line in f]
part_1_test = lambda x: part_1(x, programs=list(string.ascii_lowercase[:5]))
part_2_test = lambda x: part_2(x, nr_dances=18, programs=list(string.ascii_lowercase[:5]))
# Main call: performs testing and calculates puzzle outputs
main(test_datas=[test_data2],
functions=[part_2],
puzzle_input=puzzle_input,
test_functions=[part_2_test])
# main(test_datas=[test_data1, test_data2],
# functions=[part_1, part_2],
# puzzle_input=puzzle_input)
| 4,091 | 1,375 |
# Copyright (C) 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RequestHandlers for starter project."""
__author__ = 'alainv@google.com (Alain Vongsouvanh)'
import jinja2
import webapp2
import util
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'),
autoescape=True)
class IndexHandler(webapp2.RequestHandler):
"""Request handler to display the index page."""
def get(self):
"""Display the index page."""
approval_prompt = 'auto'
button_display = 'none'
if self.request.get('approvalPrompt') == 'force':
approval_prompt = 'force'
button_display = 'block'
template_data = {
'approvalPrompt': approval_prompt,
'buttonDisplay': button_display,
'clientId': util.get_client_id(),
'scope': ' '.join(util.SCOPES),
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_data))
INDEX_ROUTES = [
('/', IndexHandler),
]
| 1,513 | 488 |
import os
from bot import bot
from dotenv import load_dotenv
# Read environment variables from .env
load_dotenv()
if __name__ == "__main__":
# Start the bot
bot.bot.run(os.getenv("DISCORD_TOKEN"))
| 208 | 73 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
def web_socket_do_extra_handshake(request):
time.sleep(1) # Wait for 1 second and then always accept.
def web_socket_transfer_data(request):
pass # do nothing
| 346 | 114 |
from netapp.connection import NaConnection
from extension_list_info import ExtensionListInfo # 1 properties
from event_name import EventName # 0 properties
from fpolicy_policy_get_iter_key_td import FpolicyPolicyGetIterKeyTd # 2 properties
from monitored_operation_info import MonitoredOperationInfo # 1 properties
from fpolicy_event_options_config import FpolicyEventOptionsConfig # 6 properties
from secondary_server_info import SecondaryServerInfo # 1 properties
from fpolicy_policy_event_get_iter_key_td import FpolicyPolicyEventGetIterKeyTd # 2 properties
from fpolicy_proto import FpolicyProto # 0 properties
from fpolicy_policy_status_info import FpolicyPolicyStatusInfo # 4 properties
from fpolicy_volumes_list_info import FpolicyVolumesListInfo # 1 properties
from fpolicy_filter import FpolicyFilter # 0 properties
from fpolicy_policy_info import FpolicyPolicyInfo # 7 properties
from engine_name import EngineName # 0 properties
from policy_info import PolicyInfo # 10 properties
from fpolicy_policy_external_engine_get_iter_key_td import FpolicyPolicyExternalEngineGetIterKeyTd # 2 properties
from fpolicy_external_engine_info import FpolicyExternalEngineInfo # 17 properties
from fpolicy_server_status_info import FpolicyServerStatusInfo # 9 properties
from fpolicy_operation import FpolicyOperation # 0 properties
from server_info import ServerInfo # 11 properties
from fpolicy_server_type import FpolicyServerType # 0 properties
from fpolicy_policy_status_get_iter_key_td import FpolicyPolicyStatusGetIterKeyTd # 2 properties
from common_name import CommonName # 0 properties
from fpolicy_ssl_opts import FpolicySslOpts # 0 properties
from fpolicy_scope_config import FpolicyScopeConfig # 11 properties
from fpolicy_server_status_get_iter_key_td import FpolicyServerStatusGetIterKeyTd # 4 properties
from monitored_protocol_info import MonitoredProtocolInfo # 1 properties
from fpolicy_policy_scope_get_iter_key_td import FpolicyPolicyScopeGetIterKeyTd # 2 properties
from fpolicy_server_status import FpolicyServerStatus # 0 properties
from external_engine_type import ExternalEngineType # 0 properties
class FpolicyConnection(NaConnection):
def fpolicy_server_disconnect(self, node, policy_name, server):
"""
Terminate connection to FPolicy server
:param node: Cluster node name.
:param policy_name: Name of the policy.
:param server: FPolicy server.
"""
return self.request( "fpolicy-server-disconnect", {
'node': [ node, 'node', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'server': [ server, 'server', [ basestring, 'ip-address' ], False ],
}, {
} )
def fpolicy_volume_list_set(self, policy_name, list_type, volumes):
"""
Manipulate a list of volumes in an exclude or include set.
This limits the set of volumes for which client requests
trigger (include) or suppress (exclude) fpolicy processing
for the provided policy.
The list provided will replace the list currently in place,
if any. Note that if a policy has both an exclude list and
an include list, the include list is ignored by the filer.
:param policy_name: Name of the policy.
:param list_type: Defines to which set (exclude or include) a list
will be applied.
Possible values: "exclude", "include".
:param volumes: List of volume specifications.
"""
return self.request( "fpolicy-volume-list-set", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'list_type': [ list_type, 'list-type', [ basestring, 'None' ], False ],
'volumes': [ volumes, 'volumes', [ FpolicyVolumesListInfo, 'None' ], True ],
}, {
} )
def fpolicy_set_required(self, policy_name, required):
"""
Sets policy's "required" option to on/off.
:param policy_name: Name of the policy.
:param required: Indicator if the policy is required. If set to true,
the request will fail if there is no server to evaluate it.
If it's false, the request will succeed.
"""
return self.request( "fpolicy-set-required", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'required': [ required, 'required', [ bool, 'None' ], False ],
}, {
} )
def fpolicy_enable(self):
"""
Sets options fpolicy enable to on.
"""
return self.request( "fpolicy-enable", {
}, {
} )
def fpolicy_server_stop(self, server_ip, policy_name):
"""
Stops specific primary server serving the policy.
Effectively, this will unregister the fpolicy server.
:param server_ip: The ip address, in dotted-decimal format, of the server.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-server-stop", {
'server_ip': [ server_ip, 'server-ip', [ basestring, 'ip-address' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_server_connect(self, node, policy_name, server):
"""
Make a connection to FPolicy server
:param node: Cluster node name.
:param policy_name: Name of the policy.
:param server: FPolicy server.
"""
return self.request( "fpolicy-server-connect", {
'node': [ node, 'node', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'server': [ server, 'server', [ basestring, 'ip-address' ], False ],
}, {
} )
def fpolicy_get_required_info(self, policy_name):
"""
Shows current options for the policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-get-required-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'is-required': [ bool, False ],
} )
def fpolicy_disable_policy(self, policy_name):
"""
Disables a specific named policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-disable-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_enable_policy(self, policy_name, sequence_number):
"""
Enables a specific named policy. The operation will fail
if the policy doesn't exist.
:param policy_name: Name of the policy.
:param sequence_number: Policy Sequence Number
"""
return self.request( "fpolicy-enable-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'sequence_number': [ sequence_number, 'sequence-number', [ int, 'None' ], False ],
}, {
} )
def fpolicy_policy_modify(self, policy_name, engine_name=None, privileged_user_name=None, events=None, is_mandatory=None, allow_privileged_access=None):
"""
Modify a policy.
:param policy_name: Name of the policy.
:param engine_name: Name of the Engine. Default Engine is 'native'.
:param privileged_user_name: User name for privileged access. No default value is set for this
attribute.
:param events: Events for file access monitoring.
:param is_mandatory: Indicator if the screening with this policy is required, i.e. it
will fail if no servers are able process the notification
registered as a part of external engine. If set to true, the
request will fail if there is no server to evaluate it. If it's
false, the request will succeed. Default value is true.
:param allow_privileged_access: Indicator if privileged access should be given to FPolicy servers
registered for the policy. Default Value is no.
"""
return self.request( "fpolicy-policy-modify", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'privileged_user_name': [ privileged_user_name, 'privileged-user-name', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'events': [ events, 'events', [ basestring, 'event-name' ], True ],
'is_mandatory': [ is_mandatory, 'is-mandatory', [ bool, 'None' ], False ],
'allow_privileged_access': [ allow_privileged_access, 'allow-privileged-access', [ bool, 'None' ], False ],
}, {
} )
def fpolicy_policy_create(self, engine_name, policy_name, events, privileged_user_name=None, return_record=None, is_mandatory=None, allow_privileged_access=None):
"""
Create a policy.
:param engine_name: Name of the Engine. Default Engine is 'native'.
:param policy_name: Name of the policy.
:param events: Events for file access monitoring.
:param privileged_user_name: User name for privileged access. No default value is set for this
attribute.
:param return_record: If set to true, returns the fpolicy-policy on successful
creation.
Default: false
:param is_mandatory: Indicator if the screening with this policy is required, i.e. it
will fail if no servers are able process the notification
registered as a part of external engine. If set to true, the
request will fail if there is no server to evaluate it. If it's
false, the request will succeed. Default value is true.
:param allow_privileged_access: Indicator if privileged access should be given to FPolicy servers
registered for the policy. Default Value is no.
"""
return self.request( "fpolicy-policy-create", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'privileged_user_name': [ privileged_user_name, 'privileged-user-name', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'events': [ events, 'events', [ basestring, 'event-name' ], True ],
'is_mandatory': [ is_mandatory, 'is-mandatory', [ bool, 'None' ], False ],
'allow_privileged_access': [ allow_privileged_access, 'allow-privileged-access', [ bool, 'None' ], False ],
}, {
'result': [ FpolicyPolicyInfo, False ],
} )
def fpolicy_policy_event_modify(self, event_name, volume_operation=None, protocol=None, file_operations=None, filter_string=None):
"""
Set FPolicy event options. FPolicy event is consist of protocol,
file operation, volume operation and f
ilters.
:param event_name: Name of the Event.
:param volume_operation: Indicator if the volume operation required for the event.Default
Value is false.
:param protocol: Name of protocol for which event is created. By default no
protocol is selected.
Possible values:
<ul>
<li> "cifs" - CIFS protocol,
<li> "nfsv3" - NFSv3 protocol,
<li> "nfsv4" - NFSv4 protocol
</ul>
:param file_operations: Name of file operations. By default no operations are monitored.
Possible values:
<ul>
<li> "close" - File close operation,
<li> "create" - File create operation,
<li> "create_dir" - File create directory operation,
<li> "delete" - File delete operation,
<li> "delete_dir" - Directory delete operation,
<li> "getattr" - Get attribute operation,
<li> "link" - Link operation,
<li> "lookup" - Lookup operation,
<li> "open" - File open operation,
<li> "read" - File read operation,
<li> "write" - File write operation,
<li> "rename" - File rename operation,
<li> "rename_dir" - Directory rename operation,
<li> "setattr" - Set attribute operation,
<li> "symlink" - Symbolic link operation
</ul>
:param filter_string: Name of filters. It is notification filtering parameters. By
default no filters are selected.
Possible values:
<ul>
<li> "monitor_ads" - Monitor alternate data
stream,
<li> "close_with_modification" - Filter close with
modification,
<li> "close_without_modification" - Filter close without
modification,
<li> "first_read" - Filter first read,
<li> "first_write" - Filter first write,
<li> "offline_bit" - Filter offline bit set,
<li> "open_with_delete_intent" - Filter open with delete
intent,
<li> "open_with_write_intent" - Filter open with write
intent,
<li> "write_with_size_change" - Filter write with size
change
</ul>
"""
return self.request( "fpolicy-policy-event-modify", {
'volume_operation': [ volume_operation, 'volume-operation', [ bool, 'None' ], False ],
'protocol': [ protocol, 'protocol', [ basestring, 'fpolicy-proto' ], False ],
'file_operations': [ file_operations, 'file-operations', [ basestring, 'fpolicy-operation' ], True ],
'event_name': [ event_name, 'event-name', [ basestring, 'event-name' ], False ],
'filter_string': [ filter_string, 'filter-string', [ basestring, 'fpolicy-filter' ], True ],
}, {
} )
def fpolicy_operations_list_set(self, monitored_operations, policy_name, monitored_protocols, force=None, offline_only=None):
"""
Manipulate a list of operations and network protocols
for a policy.
This determines which user requests cause the filer to
notify fpolicy servers for this policy.
The list provided will replace the list currently in place,
if any. Note that this can be confusing to a server which has
already connected to a policy and provided a list of
operations. For example, it may have requested notifications
when users open files, but start receiving notifications
when users create symlinks.
This API is provided in support of "native file blocking"
in which there is no server connected to the filer for a
policy.
Note that it is possible to get the list of operations and
protocols currently set for a policy with the
fpolicy-list-info API.
:param monitored_operations: List of operations related values.
:param policy_name: Name of the policy.
:param monitored_protocols: List of protocol related values.
:param force: If a server is connected to the filer and has already
set the list of operations, should this API override
the server's setting? If "force" is "true", the policy's
set of operations will be dropped and replaced with the
values provided by this API.
Default value is false.
:param offline_only: Sets the state of offline filtering. If offline filtering
is set, then only user requests for files which are marked
"offline" cause notifications.
Default value is false.
"""
return self.request( "fpolicy-operations-list-set", {
'monitored_operations': [ monitored_operations, 'monitored-operations', [ MonitoredOperationInfo, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'force': [ force, 'force', [ bool, 'None' ], False ],
'offline_only': [ offline_only, 'offline-only', [ bool, 'None' ], False ],
'monitored_protocols': [ monitored_protocols, 'monitored-protocols', [ MonitoredProtocolInfo, 'None' ], True ],
}, {
} )
def fpolicy_volume_list_info(self, policy_name):
"""
Returns a volume-regular-expression list for an exclude
or include set.
The list describes limits to the set of volumes for which
client requests trigger (include) or suppress (exclude)
fpolicy processing for the provided policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-volume-list-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'include-volumes': [ FpolicyVolumesListInfo, True ],
'exclude-volumes': [ FpolicyVolumesListInfo, True ],
} )
def fpolicy_policy_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Returns information about policies.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy object.
All fpolicy-policy objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyPolicyInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyPolicyInfo, 'None' ], False ],
}, {
'attributes-list': [ FpolicyPolicyInfo, True ],
} )
def fpolicy_policy_event_delete(self, event_name):
"""
Delete FPolicy event.
:param event_name: Name of the Event.
"""
return self.request( "fpolicy-policy-event-delete", {
'event_name': [ event_name, 'event-name', [ basestring, 'event-name' ], False ],
}, {
} )
def fpolicy_extensions_list_info(self, policy_name):
"""
Returns information on existing extension sets.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-extensions-list-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'exclude-extensions': [ ExtensionListInfo, True ],
'include-extensions': [ ExtensionListInfo, True ],
} )
def fpolicy_policy_external_engine_create(self, engine_name, port_number, primary_servers, ssl_option, certificate_serial=None, server_progress_timeout=None, secondary_servers=None, certificate_ca=None, request_cancel_timeout=None, return_record=None, certificate_common_name=None, keep_alive_interval=None, extern_engine_type=None, max_connection_retries=None, request_abort_timeout=None, max_server_requests=None, status_request_interval=None):
"""
Create an external engine.
:param engine_name: Name of the external engine.
:param port_number: Port number of the FPolicy server application.
:param primary_servers: Primary FPolicy servers.
:param ssl_option: SSL option for external communication. No default value is set
for this field.
Possible values:
<ul>
<li> "no_auth" - Communication over TCP,
<li> "server_auth" - Authentication of FPolicy server only,
<li> "mutual_auth" - Mutual authentication of storage system
and FPolicy server
</ul>
:param certificate_serial: Serial number of certificate. No default value is set for this
field.
:param server_progress_timeout: Timeout in seconds in which a throttled FPolicy server must
complete at least one screen request. If no request is processed
within the timeout, connection to FPolicy server is terminated.
Default value set for this field is 60 seconds.
:param secondary_servers: Secondary FPolicy servers. No default value is set for this
field.
:param certificate_ca: Certificate authority name. No default value is set for this
field.
:param request_cancel_timeout: Timeout in seconds for a screen request to be processed by an
FPolicy server. Default value set for this field is 20 seconds.
:param return_record: If set to true, returns the fpolicy-policy-external-engine on
successful creation.
Default: false
:param certificate_common_name: FQDN or custom common name of certificate. No default value is
set for this field.
:param keep_alive_interval: Interval time in seconds for storage appliance to send keep-alive
message to FPolicy server. Default value set for this field is 10
seconds.
:param extern_engine_type: External engine type. If the engine is asynchronous, no reply is
sent from FPolicy servers. Default value set for this field is
synchronous.
Possible values:
<ul>
<li> "synchronous" - Synchronous External Engine,
<li> "asynchronous" - Asynchronous External Engine
</ul>
:param max_connection_retries: Number of times storage appliance will attempt to establish a
broken connection to FPolicy server. Default value set for this
field is 5.
:param request_abort_timeout: Timeout in seconds for a screen request to be aborted by storage
appliance. Default value set for this field is 40 seconds.
:param max_server_requests: Maximum number of outstanding screen requests that will be queued
for an FPolicy Server. Default value set for this field is 50.
:param status_request_interval: Interval time in seconds for storage appliance to query status
request from FPolicy server. Default value set for this field is
10 seconds.
"""
return self.request( "fpolicy-policy-external-engine-create", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'certificate_serial': [ certificate_serial, 'certificate-serial', [ basestring, 'None' ], False ],
'server_progress_timeout': [ server_progress_timeout, 'server-progress-timeout', [ int, 'None' ], False ],
'secondary_servers': [ secondary_servers, 'secondary-servers', [ basestring, 'ip-address' ], True ],
'certificate_ca': [ certificate_ca, 'certificate-ca', [ basestring, 'None' ], False ],
'request_cancel_timeout': [ request_cancel_timeout, 'request-cancel-timeout', [ int, 'None' ], False ],
'port_number': [ port_number, 'port-number', [ int, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'certificate_common_name': [ certificate_common_name, 'certificate-common-name', [ basestring, 'common-name' ], False ],
'keep_alive_interval': [ keep_alive_interval, 'keep-alive-interval', [ int, 'None' ], False ],
'primary_servers': [ primary_servers, 'primary-servers', [ basestring, 'ip-address' ], True ],
'extern_engine_type': [ extern_engine_type, 'extern-engine-type', [ basestring, 'external-engine-type' ], False ],
'max_connection_retries': [ max_connection_retries, 'max-connection-retries', [ int, 'None' ], False ],
'request_abort_timeout': [ request_abort_timeout, 'request-abort-timeout', [ int, 'None' ], False ],
'ssl_option': [ ssl_option, 'ssl-option', [ basestring, 'fpolicy-ssl-opts' ], False ],
'max_server_requests': [ max_server_requests, 'max-server-requests', [ int, 'None' ], False ],
'status_request_interval': [ status_request_interval, 'status-request-interval', [ int, 'None' ], False ],
}, {
'result': [ FpolicyExternalEngineInfo, False ],
} )
def fpolicy_policy_status_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Returns FPolicy policy status information.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy-status object.
All fpolicy-policy-status objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-status-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyPolicyStatusInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyPolicyStatusInfo, 'None' ], False ],
}, {
'attributes-list': [ FpolicyPolicyStatusInfo, True ],
} )
def fpolicy_policy_delete(self, policy_name):
"""
Delete a policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-policy-delete", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
}, {
} )
def fpolicy_set_policy_options(self, policy_name, reqcancel_timeout=None, is_required=None, is_ads_monitored=None, secondary_servers=None, serverprogress_timeout=None, is_cifs_disconnect_check_enabled=None, is_cifs_setattr_enabled=None):
"""
Sets policy's options to on/off.
:param policy_name: Name of the policy.
:param reqcancel_timeout: Timeout (in secs) for a screen request to be processed by an
FPolicy server.
Range : [0..4294967].
:param is_required: Indicator if the screening with this policy is required,
i.e. will it fail if the server is not registered.
If set to true, the request will fail if there is no
server to evaluate it. If it's false, the request will succeed.
Default is false.
:param is_ads_monitored: Indicates if the policy monitors the cifs operations
on Alternate Data Streams.
Default is false.
:param secondary_servers: List of server's IP addresses. Servers registered
from these IP will be considered as secondary servers.
:param serverprogress_timeout: Timeout (in secs) in which a throttled FPolicy server must
complete at least one screen request.
Range : [0..4294967].
:param is_cifs_disconnect_check_enabled: 'true' if requests associated with disconnected CIFS sessions
must not be screened, 'false' otherwise.
:param is_cifs_setattr_enabled: Indicator whether cifs-setattr support is enabled
on this policy or not. If set to true, cifs setattr
operations will be screened.
Default is false.
"""
return self.request( "fpolicy-set-policy-options", {
'reqcancel_timeout': [ reqcancel_timeout, 'reqcancel-timeout', [ int, 'None' ], False ],
'is_required': [ is_required, 'is-required', [ bool, 'None' ], False ],
'is_ads_monitored': [ is_ads_monitored, 'is-ads-monitored', [ bool, 'None' ], False ],
'secondary_servers': [ secondary_servers, 'secondary-servers', [ SecondaryServerInfo, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'serverprogress_timeout': [ serverprogress_timeout, 'serverprogress-timeout', [ int, 'None' ], False ],
'is_cifs_disconnect_check_enabled': [ is_cifs_disconnect_check_enabled, 'is-cifs-disconnect-check-enabled', [ bool, 'None' ], False ],
'is_cifs_setattr_enabled': [ is_cifs_setattr_enabled, 'is-cifs-setattr-enabled', [ bool, 'None' ], False ],
}, {
} )
def fpolicy_set_secondary_servers(self, secondary_servers, policy_name):
"""
Sets secondary servers information in a form of
a list of ip addresses. These servers will be used
if all primary servers are not available, thus increasing
system availabilty.
:param secondary_servers: List of servers' IP addresses.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-set-secondary-servers", {
'secondary_servers': [ secondary_servers, 'secondary-servers', [ SecondaryServerInfo, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_server_status_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Returns FPolicy server status information.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-server-status object.
All fpolicy-server-status objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-server-status-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyServerStatusInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyServerStatusInfo, 'None' ], False ],
}, {
'attributes-list': [ FpolicyServerStatusInfo, True ],
} )
def fpolicy_policy_external_engine_modify(self, engine_name, certificate_serial=None, server_progress_timeout=None, secondary_servers=None, certificate_ca=None, request_cancel_timeout=None, port_number=None, certificate_common_name=None, keep_alive_interval=None, primary_servers=None, extern_engine_type=None, max_connection_retries=None, request_abort_timeout=None, ssl_option=None, max_server_requests=None, status_request_interval=None):
"""
Modify an external engine. External engine can be modified only
when none of the enabled policies are using it.
:param engine_name: Name of the external engine.
:param certificate_serial: Serial number of certificate. No default value is set for this
field.
:param server_progress_timeout: Timeout in seconds in which a throttled FPolicy server must
complete at least one screen request. If no request is processed
within the timeout, connection to FPolicy server is terminated.
Default value set for this field is 60 seconds.
:param secondary_servers: Secondary FPolicy servers. No default value is set for this
field.
:param certificate_ca: Certificate authority name. No default value is set for this
field.
:param request_cancel_timeout: Timeout in seconds for a screen request to be processed by an
FPolicy server. Default value set for this field is 20 seconds.
:param port_number: Port number of the FPolicy server application.
:param certificate_common_name: FQDN or custom common name of certificate. No default value is
set for this field.
:param keep_alive_interval: Interval time in seconds for storage appliance to send keep-alive
message to FPolicy server. Default value set for this field is 10
seconds.
:param primary_servers: Primary FPolicy servers.
:param extern_engine_type: External engine type. If the engine is asynchronous, no reply is
sent from FPolicy servers. Default value set for this field is
synchronous.
Possible values:
<ul>
<li> "synchronous" - Synchronous External Engine,
<li> "asynchronous" - Asynchronous External Engine
</ul>
:param max_connection_retries: Number of times storage appliance will attempt to establish a
broken connection to FPolicy server. Default value set for this
field is 5.
:param request_abort_timeout: Timeout in seconds for a screen request to be aborted by storage
appliance. Default value set for this field is 40 seconds.
:param ssl_option: SSL option for external communication. No default value is set
for this field.
Possible values:
<ul>
<li> "no_auth" - Communication over TCP,
<li> "server_auth" - Authentication of FPolicy server only,
<li> "mutual_auth" - Mutual authentication of storage system
and FPolicy server
</ul>
:param max_server_requests: Maximum number of outstanding screen requests that will be queued
for an FPolicy Server. Default value set for this field is 50.
:param status_request_interval: Interval time in seconds for storage appliance to query status
request from FPolicy server. Default value set for this field is
10 seconds.
"""
return self.request( "fpolicy-policy-external-engine-modify", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'certificate_serial': [ certificate_serial, 'certificate-serial', [ basestring, 'None' ], False ],
'server_progress_timeout': [ server_progress_timeout, 'server-progress-timeout', [ int, 'None' ], False ],
'secondary_servers': [ secondary_servers, 'secondary-servers', [ basestring, 'ip-address' ], True ],
'certificate_ca': [ certificate_ca, 'certificate-ca', [ basestring, 'None' ], False ],
'request_cancel_timeout': [ request_cancel_timeout, 'request-cancel-timeout', [ int, 'None' ], False ],
'port_number': [ port_number, 'port-number', [ int, 'None' ], False ],
'certificate_common_name': [ certificate_common_name, 'certificate-common-name', [ basestring, 'common-name' ], False ],
'keep_alive_interval': [ keep_alive_interval, 'keep-alive-interval', [ int, 'None' ], False ],
'primary_servers': [ primary_servers, 'primary-servers', [ basestring, 'ip-address' ], True ],
'extern_engine_type': [ extern_engine_type, 'extern-engine-type', [ basestring, 'external-engine-type' ], False ],
'max_connection_retries': [ max_connection_retries, 'max-connection-retries', [ int, 'None' ], False ],
'request_abort_timeout': [ request_abort_timeout, 'request-abort-timeout', [ int, 'None' ], False ],
'ssl_option': [ ssl_option, 'ssl-option', [ basestring, 'fpolicy-ssl-opts' ], False ],
'max_server_requests': [ max_server_requests, 'max-server-requests', [ int, 'None' ], False ],
'status_request_interval': [ status_request_interval, 'status-request-interval', [ int, 'None' ], False ],
}, {
} )
def fpolicy_get_secondary_servers_info(self, policy_name):
"""
Shows current options for the policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-get-secondary-servers-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'secondary-servers': [ SecondaryServerInfo, True ],
} )
def fpolicy_disable(self):
"""
Sets options fpolicy enable to off.
"""
return self.request( "fpolicy-disable", {
}, {
} )
def fpolicy_create_policy(self, policy_name, policy_type):
"""
Creates a new policy.
:param policy_name: Name of the policy.
:param policy_type: Type of the policy. Possible values: "screen".
"""
return self.request( "fpolicy-create-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'policy_type': [ policy_type, 'policy-type', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_server_list_info(self, policy_name):
"""
Shows a list of primary servers serving the policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-server-list-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'servers': [ ServerInfo, True ],
} )
def fpolicy_policy_event_create(self, event_name, protocol=None, volume_operation=None, return_record=None, filter_string=None, file_operations=None):
"""
Create FPolicy Event.
:param event_name: Name of the Event.
:param protocol: Name of protocol for which event is created. By default no
protocol is selected.
Possible values:
<ul>
<li> "cifs" - CIFS protocol,
<li> "nfsv3" - NFSv3 protocol,
<li> "nfsv4" - NFSv4 protocol
</ul>
:param volume_operation: Indicator if the volume operation required for the event.Default
Value is false.
:param return_record: If set to true, returns the fpolicy-policy-event on successful
creation.
Default: false
:param filter_string: Name of filters. It is notification filtering parameters. By
default no filters are selected.
Possible values:
<ul>
<li> "monitor_ads" - Monitor alternate data
stream,
<li> "close_with_modification" - Filter close with
modification,
<li> "close_without_modification" - Filter close without
modification,
<li> "first_read" - Filter first read,
<li> "first_write" - Filter first write,
<li> "offline_bit" - Filter offline bit set,
<li> "open_with_delete_intent" - Filter open with delete
intent,
<li> "open_with_write_intent" - Filter open with write
intent,
<li> "write_with_size_change" - Filter write with size
change
</ul>
:param file_operations: Name of file operations. By default no operations are monitored.
Possible values:
<ul>
<li> "close" - File close operation,
<li> "create" - File create operation,
<li> "create_dir" - File create directory operation,
<li> "delete" - File delete operation,
<li> "delete_dir" - Directory delete operation,
<li> "getattr" - Get attribute operation,
<li> "link" - Link operation,
<li> "lookup" - Lookup operation,
<li> "open" - File open operation,
<li> "read" - File read operation,
<li> "write" - File write operation,
<li> "rename" - File rename operation,
<li> "rename_dir" - Directory rename operation,
<li> "setattr" - Set attribute operation,
<li> "symlink" - Symbolic link operation
</ul>
"""
return self.request( "fpolicy-policy-event-create", {
'protocol': [ protocol, 'protocol', [ basestring, 'fpolicy-proto' ], False ],
'volume_operation': [ volume_operation, 'volume-operation', [ bool, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'event_name': [ event_name, 'event-name', [ basestring, 'event-name' ], False ],
'filter_string': [ filter_string, 'filter-string', [ basestring, 'fpolicy-filter' ], True ],
'file_operations': [ file_operations, 'file-operations', [ basestring, 'fpolicy-operation' ], True ],
}, {
'result': [ FpolicyEventOptionsConfig, False ],
} )
def fpolicy_policy_scope_create(self, policy_name, export_policies_to_include=None, volumes_to_exclude=None, file_extensions_to_exclude=None, export_policies_to_exclude=None, check_extensions_on_directories=None, return_record=None, volumes_to_include=None, shares_to_exclude=None, file_extensions_to_include=None, shares_to_include=None):
"""
Set FPolicy scope options. FPolicy Scope is consist of share,
volume, export policy, volume, file extention.
:param policy_name: Name of the policy.
:param export_policies_to_include: Export policies to include for file access monitoring. By default
no export policy is selected.
:param volumes_to_exclude: Volumes that are inactive for the file policy. The list can
include items which are regular expressions, such as 'vol*' or
'user?'. Note that if a policy has both an exclude list and an
include list, the include list is ignored by the filer when
processing user requests. By default no volume is selected.
:param file_extensions_to_exclude: File extensions excluded for screening. By default no file
extension is selected.
:param export_policies_to_exclude: Export Policies to exclude for file access monitoring. By default
no export policy is selected.
:param check_extensions_on_directories: Indicates whether directory names are also subjected to
extensions check, similar to file names. By default, the value is
false.
:param return_record: If set to true, returns the fpolicy-policy-scope on successful
creation.
Default: false
:param volumes_to_include: Volumes that are active for the file policy. The list can include
items which are regular expressions, such as 'vol*' or 'user?'.
By default no volume is selected.
:param shares_to_exclude: Shares to exclude for file access monitoring. By default no share
is selected.
:param file_extensions_to_include: File extensions included for screening. By default no file
extension is selected.
:param shares_to_include: Shares to include for file access monitoring. By default no share
is selected.
"""
return self.request( "fpolicy-policy-scope-create", {
'export_policies_to_include': [ export_policies_to_include, 'export-policies-to-include', [ basestring, 'None' ], True ],
'volumes_to_exclude': [ volumes_to_exclude, 'volumes-to-exclude', [ basestring, 'None' ], True ],
'file_extensions_to_exclude': [ file_extensions_to_exclude, 'file-extensions-to-exclude', [ basestring, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'export_policies_to_exclude': [ export_policies_to_exclude, 'export-policies-to-exclude', [ basestring, 'None' ], True ],
'check_extensions_on_directories': [ check_extensions_on_directories, 'check-extensions-on-directories', [ bool, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'volumes_to_include': [ volumes_to_include, 'volumes-to-include', [ basestring, 'None' ], True ],
'shares_to_exclude': [ shares_to_exclude, 'shares-to-exclude', [ basestring, 'None' ], True ],
'file_extensions_to_include': [ file_extensions_to_include, 'file-extensions-to-include', [ basestring, 'None' ], True ],
'shares_to_include': [ shares_to_include, 'shares-to-include', [ basestring, 'None' ], True ],
}, {
'result': [ FpolicyScopeConfig, False ],
} )
def fpolicy_policy_scope_modify(self, policy_name, export_policies_to_include=None, volumes_to_exclude=None, file_extensions_to_exclude=None, export_policies_to_exclude=None, check_extensions_on_directories=None, volumes_to_include=None, shares_to_exclude=None, file_extensions_to_include=None, shares_to_include=None):
"""
Set FPolicy scope options. FPolicy Scope is consist of share,
volume, export policy, volume, file extention.
:param policy_name: Name of the policy.
:param export_policies_to_include: Export policies to include for file access monitoring. By default
no export policy is selected.
:param volumes_to_exclude: Volumes that are inactive for the file policy. The list can
include items which are regular expressions, such as 'vol*' or
'user?'. Note that if a policy has both an exclude list and an
include list, the include list is ignored by the filer when
processing user requests. By default no volume is selected.
:param file_extensions_to_exclude: File extensions excluded for screening. By default no file
extension is selected.
:param export_policies_to_exclude: Export Policies to exclude for file access monitoring. By default
no export policy is selected.
:param check_extensions_on_directories: Indicates whether directory names are also subjected to
extensions check, similar to file names. By default, the value is
false.
:param volumes_to_include: Volumes that are active for the file policy. The list can include
items which are regular expressions, such as 'vol*' or 'user?'.
By default no volume is selected.
:param shares_to_exclude: Shares to exclude for file access monitoring. By default no share
is selected.
:param file_extensions_to_include: File extensions included for screening. By default no file
extension is selected.
:param shares_to_include: Shares to include for file access monitoring. By default no share
is selected.
"""
return self.request( "fpolicy-policy-scope-modify", {
'export_policies_to_include': [ export_policies_to_include, 'export-policies-to-include', [ basestring, 'None' ], True ],
'volumes_to_exclude': [ volumes_to_exclude, 'volumes-to-exclude', [ basestring, 'None' ], True ],
'file_extensions_to_exclude': [ file_extensions_to_exclude, 'file-extensions-to-exclude', [ basestring, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'export_policies_to_exclude': [ export_policies_to_exclude, 'export-policies-to-exclude', [ basestring, 'None' ], True ],
'check_extensions_on_directories': [ check_extensions_on_directories, 'check-extensions-on-directories', [ bool, 'None' ], False ],
'volumes_to_include': [ volumes_to_include, 'volumes-to-include', [ basestring, 'None' ], True ],
'shares_to_exclude': [ shares_to_exclude, 'shares-to-exclude', [ basestring, 'None' ], True ],
'file_extensions_to_include': [ file_extensions_to_include, 'file-extensions-to-include', [ basestring, 'None' ], True ],
'shares_to_include': [ shares_to_include, 'shares-to-include', [ basestring, 'None' ], True ],
}, {
} )
def fpolicy_list_info(self, policy_name=None):
"""
Returns a list of existing policies.
:param policy_name: Name of the policy. If this parameter is set, policies
will have information pertaining to the policy named. If
there is no such a policy, policies will be empty.
"""
return self.request( "fpolicy-list-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'policies': [ PolicyInfo, True ],
} )
def fpolicy_policy_scope_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Get a list of rows for FPolicy scope options. FPolicy Scope
consists of share, volume, export policy, volume, file
extention.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy-scope object.
All fpolicy-policy-scope objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-scope-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyScopeConfig, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyScopeConfig, 'None' ], False ],
}, {
'attributes-list': [ FpolicyScopeConfig, True ],
} )
def fpolicy_destroy_policy(self, policy_name):
"""
Destroys existing policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-destroy-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_policy_external_engine_delete(self, engine_name):
"""
Delete an external engine.
:param engine_name: Name of the external engine.
"""
return self.request( "fpolicy-policy-external-engine-delete", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
}, {
} )
def fpolicy_status(self):
"""
Returns status of options fpolicy enable.
"""
return self.request( "fpolicy-status", {
}, {
'is-enabled': [ bool, False ],
} )
def fpolicy_policy_scope_delete(self, policy_name):
"""
Delete a scope.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-policy-scope-delete", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
}, {
} )
def fpolicy_extensions(self, policy_name, set_name, command, extensions=None):
"""
Manipulates with list of extensions in
exclude or include set. Exlude set defines extension patterns
that won't trigger fpolicy processing.
:param policy_name: Name of the policy.
:param set_name: Defines to which set (exclude or include) a command
(add, remove, etc) will be applied to. For instance,
command = add, set-name = include will add specified
list of extensions to the include set.
Possible values: "exclude", "include".
:param command: Command to be applied on the specified set.
Supported values: "add", "remove", "set", "reset".
:param extensions: List of extensions. This element is required if the
the command input value is "add", "set" or "remove".
"""
return self.request( "fpolicy-extensions", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'set_name': [ set_name, 'set-name', [ basestring, 'None' ], False ],
'command': [ command, 'command', [ basestring, 'None' ], False ],
'extensions': [ extensions, 'extensions', [ ExtensionListInfo, 'None' ], True ],
}, {
} )
def fpolicy_policy_external_engine_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Returns information on external engines.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy-external-engine object.
All fpolicy-policy-external-engine objects matching this query up
to 'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-external-engine-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyExternalEngineInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyExternalEngineInfo, 'None' ], False ],
}, {
'attributes-list': [ FpolicyExternalEngineInfo, True ],
} )
def fpolicy_get_policy_options(self, policy_name):
"""
Shows value of policy options.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-get-policy-options", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'reqcancel-timeout': [ int, False ],
'is-required': [ bool, False ],
'is-ads-monitored': [ bool, False ],
'secondary-servers': [ SecondaryServerInfo, True ],
'serverprogress-timeout': [ int, False ],
'is-cifs-disconnect-check-enabled': [ bool, False ],
'is-cifs-setattr-enabled': [ bool, False ],
} )
def fpolicy_policy_event_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Get a list of rows for FPolicy event options. FPolicy event is
consist of protocol, file operations, vo
lume operation and filters.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy-event object.
All fpolicy-policy-event objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-event-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyEventOptionsConfig, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyEventOptionsConfig, 'None' ], False ],
}, {
'attributes-list': [ FpolicyEventOptionsConfig, True ],
} )
| 61,477 | 15,819 |
DEBUG = False
import os
import sys
import re
import ast
try:
from MirroredDirectory import MirroredDirectory
from Std import Std
from FileSystem import FileSystem
from Importer import Importer
except ImportError:
from .MirroredDirectory import MirroredDirectory
from .Std import Std
from .FileSystem import FileSystem
from .Importer import Importer
class TemplateFileCreator:
def __init__(self, fileName = "", defaultFileExtension = ""):
self._settings = None
self.fileSystem = FileSystem()
self._fileComponents = MirroredDirectory()
self.importer = Importer()
self._templateDir = None
self.set(fileName, defaultFileExtension)
def set(self, fileName, defaultFileExtension = ""):
if DEBUG: print("TemplateFileCreator: setting dir to: '" + fileName + "'")
self._fileComponents.set(fileName)
if DEBUG: print("TemplateFileCreator: dir set to: '" + str(self._fileComponents.getOriginalFileName()) + "'")
self._cursors = []
def createFromTemplate(self):
templatePath = os.path.join(self.getTemplateDir(), self._fileComponents.getExtension(), self.classifyKind() + ".template")
variablePath = os.path.join(self.getTemplateDir(), self._fileComponents.getExtension(), self.classifyKind() + ".variables")
functionPath = os.path.join(self.getTemplateDir(), self._fileComponents.getExtension(), "functions.py")
templateContent = self.fileSystem.getFileContent(templatePath)
variableContent = self.fileSystem.getFileContent(variablePath)
functionCollectionObject = self.importer.getObjectInstance(functionPath, "FunctionCollection")()
content = self.getReplacementContent(templateContent, variableContent,
functionCollectionObject)
if DEBUG: print("TemplateFileCreator: creating file: " + self._fileComponents.getOriginalFileName())
return self.fileSystem.createFile(self._fileComponents.getOriginalFileName(), content)
def setBasePath(self, basePath):
self._fileComponents.setBasePath(basePath)
def setSettings(self, settings):
self._settings = settings
def setTemplateDir(self, templateDir):
self._templateDir = templateDir
def getCursors(self):
return self._cursors
def getFileName(self):
return self._fileComponents.getOriginalFileName()
def setDefaultExtension(self, fileExtension):
self._fileComponents.setDefaultExtension(fileExtension)
def getArgsDictFromVarContent(self, VarContent):
result = dict()
try:
varDictionary = ast.literal_eval(VarContent)
except:
raise TypeError("the content of VarContent could not be converted to a dict.")
for templateVar in varDictionary:
variableName = templateVar["variable"]
settingsValues = dict()
if "fromSettings" in templateVar:
for settingsVariable in templateVar["fromSettings"]:
settingsValues[settingsVariable] = self._settings.get(settingsVariable)
args = dict()
args["settings"] = str(settingsValues)
args["name"] = variableName
args["dir"] = self._fileComponents.getFileName()
#args["basePath"] = ""
args["command"] = templateVar["command"]
result[variableName] = args
return result
"""def getReplacements(self, args, functionCollectionObject):
# TODO: this check has loopholes...
if isinstance(functionCollectionObject, (int, float, complex, str)) or functionCollectionObject is None:
raise Exception("The functionCollectionObject argument must be an instance of an object, " + str(type(functionCollectionObject)) + " passed instead.")
result = dict()
for name, arg in Std.getIterItems(args):
function = getattr(functionCollectionObject, arg["command"])
result["/* @" + name + " */"] = function(arg)
return result"""
def getReplacements(self, args, functionCollectionObject):
# TODO: this check has loopholes...
if isinstance(functionCollectionObject, (int, float, complex, str)) or functionCollectionObject is None:
raise Exception("The functionCollectionObject argument must be an instance of an object, " + str(type(functionCollectionObject)) + " passed instead.")
result = dict()
for name, arg in Std.getIterItems(args):
function = getattr(functionCollectionObject, arg["command"])
result["/* @" + name + " */"] = function(arg)
return result
def getCursorsFromContent(self, templateContent):
lines = templateContent.splitlines()
cursorString = "/* @cursor */"
lineNbr = 0
cursors = []
for line in lines:
while cursorString in line:
row = line.find(cursorString)
line = line[:row] + line[row + len(cursorString):]
cursors.append((lineNbr, row))
lineNbr += 1
return cursors
def getSearchStringForNone(self, templateContent, searchString):
regexSearchString = searchString.replace("/", "\\/")
regexSearchString = regexSearchString.replace("*", "\\*")
regexString = ".*(" + regexSearchString + ").*\\n?\\r?"
match = re.search(regexString, templateContent)
if match:
line = match.group()
lineRemoved1 = line.replace(searchString, "")
lineRemoved2 = lineRemoved1.replace("*", "")
lineRemoved3 = lineRemoved2.replace("/", "")
lineRemoved4 = lineRemoved3.replace("#", "")
lineRemoved5 = lineRemoved4.replace("\"\"\"", "")
lineRemoved6 = lineRemoved5.replace("'''", "")
lineRemoved7 = lineRemoved6.strip(' \n\r\t')
if len(lineRemoved7) < 1:
searchString = line
return searchString
def getReplacementContent(self, templateContent, variableContent, functionCollectionObject):
args = self.getArgsDictFromVarContent(variableContent)
replacements = self.getReplacements(args, functionCollectionObject)
for searchString, replacement in Std.getIterItems(replacements):
if replacement is None:
replacement = ""
searchString = self.getSearchStringForNone(templateContent, searchString)
templateContent = templateContent.replace(searchString, replacement)
self._cursors = self.getCursorsFromContent(templateContent)
templateContent = templateContent.replace("/* @cursor */", "")
return templateContent
def getTemplateDir(self):
return self._templateDir
def classifyKind(self):
return self._fileComponents.getKind()
def setKind(self, kind):
self._fileComponents.setKind(kind) | 6,970 | 1,754 |
import requests
import os
from bs4 import BeautifulSoup, Comment
import pandas as pd
import numpy as np
import enchant
from sportrefpy.nfl.league import NFL
from sportrefpy.errors.not_found import PlayerNotFound
class NFLPlayer(NFL):
def __init__(self, player):
super().__init__()
player_dict = enchant.PyPWL(
os.path.dirname(os.path.dirname(__file__)) + "\\assets\\nfl_players.txt"
)
first_letter_last_name = player.split()[1][0].upper()
response = requests.get(self.url + f"/players/{first_letter_last_name}")
soup = BeautifulSoup(response.text, features="lxml")
players = soup.find("div", attrs={"id": "div_players"})
if player in players.text:
for choice in players:
if player in choice.text:
self.full_name = player
self.player_url = self.url + choice.find("a")["href"]
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# self.pitcher = True if 'Pitcher' in \
# soup.find_all('p')[0].text else False
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'batting_postseason' in str(comment) or 'pitching_postseason' in str(comment):
# tables.append(str(comment))
# if tables:
# self.playoffs = True
# else:
# self.playoffs = False
else:
try:
suggestion = player_dict.suggest(player)[0]
message = f"""<{player}> not found.
Is it possible you meant {suggestion}?
Player names are case-sensitive."""
except:
message = f"""<{player}> not found.
Player names are case-sensitive."""
raise PlayerNotFound(message)
# def regular_season_batting(self, season=None, stat=None):
# '''
# Returns a players regular seasons batting stats by career.
# '''
# if not self.pitcher:
# batting = pd.read_html(self.player_url, attrs={'id': 'batting_standard'})[0]
# batting.dropna(how='any', axis='rows', subset='Year', inplace=True)
# batting = batting[~batting['Year'].str.contains('Yrs|yrs|yr|Avg')]
# batting = batting[batting['Lg'].str.contains('NL|AL|MLB')]
# batting = batting.apply(pd.to_numeric, errors='ignore')
# batting.set_index('Year', inplace=True)
# elif self.pitcher:
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'batting_standard' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# batting = tables[0][0]
# batting.dropna(how='any', axis='rows', subset='Year', inplace=True)
# batting = batting[~batting['Year'].str.contains('Yrs|yrs|yr|Avg')]
# batting = batting[batting['Lg'].str.contains('NL|AL|MLB')]
# batting = batting.apply(pd.to_numeric, errors='ignore')
# batting.set_index('Year', inplace=True)
# if season:
# try:
# return batting.loc[season]
# except KeyError:
# return None
# return batting
# def regular_season_pitching(self, season=None):
# '''
# Returns a players regular seasons pitching stats by career.
# '''
# if self.pitcher:
# pitching = pd.read_html(self.player_url, attrs={'id': 'pitching_standard'})[0]
# pitching.dropna(how='any', axis='rows', subset='Year', inplace=True)
# pitching = pitching[~pitching['Year'].str.contains('Yrs|yrs|yr|Avg')]
# pitching = pitching[pitching['Lg'].str.contains('NL|AL|MLB')]
# pitching = pitching.apply(pd.to_numeric, errors='ignore')
# pitching.set_index('Year', inplace=True)
# if season:
# try:
# return pitching.loc[season]
# except KeyError:
# return None
# return pitching
# else:
# return None
# def regular_season_fielding(self, season=None):
# '''
# Returns a players regular seasons fielding stats by career.
# '''
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'standard_fielding' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# fielding = tables[0][0]
# fielding.dropna(how='any', axis='rows', subset='Year', inplace=True)
# fielding = fielding[~fielding['Year'].str.contains('Seasons')]
# fielding = fielding[fielding['Lg'].str.contains('NL|AL|MLB')]
# fielding = fielding.apply(pd.to_numeric, errors='ignore')
# fielding.set_index('Year', inplace=True)
# if season:
# try:
# return fielding.loc[season]
# except KeyError:
# return None
# return fielding
# def post_season_batting(self, season=None):
# if not self.playoffs:
# return None
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'batting_postseason' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# batting = tables[0][0]
# batting.dropna(how='any', axis='rows', subset='Year', inplace=True)
# batting = batting[~batting['Year'].str.\
# contains('ALWC|NLWC|ALDS|NLDS|ALCS|NLCS|WS')]
# batting = batting[batting['Lg'].str.contains('NL|AL|MLB')]
# batting = batting.apply(pd.to_numeric, errors='ignore')
# batting.set_index('Year', inplace=True)
# if season:
# try:
# return batting.loc[season]
# except KeyError:
# return None
# return batting
# def post_season_pitching(self, season=None):
# if not self.pitcher:
# return None
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'pitching_postseason' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# pitching = tables[0][0]
# pitching.dropna(how='any', axis='rows', subset='Year', inplace=True)
# pitching = pitching[~pitching['Year'].str.\
# contains('ALWC|NLWC|ALDS|NLDS|ALCS|NLCS|WS')]
# pitching = pitching[pitching['Lg'].str.contains('NL|AL|MLB')]
# pitching = pitching.apply(pd.to_numeric, errors='ignore')
# pitching.set_index('Year', inplace=True)
# if season:
# try:
# return pitching.loc[season]
# except KeyError:
# return None
# return pitching
# def career_totals_pitching(self, stat=None):
# if self.pitcher:
# reg = pd.read_html(self.player_url, attrs={'id': 'pitching_standard'})[0]
# reg = reg[reg['Year'].str.contains('Yrs', na=False)]
# reg = reg.apply(pd.to_numeric, errors='ignore')
# reg.reset_index(drop=True, inplace=True)
# reg.drop(columns={'Year', 'Age', 'Tm', 'Lg', 'Awards'},
# inplace=True)
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'pitching_postseason' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# post = tables[0][0]
# post = post[post['Year'].str.contains('Yrs', na=False)]
# post = post.apply(pd.to_numeric, errors='ignore')
# post.drop(columns={'Year', 'Age', 'Tm', 'Lg'},
# inplace=True)
# career = reg.merge(post, how='outer')
# career.drop(columns={'Series', 'Rslt', 'Opp', 'WPA', 'cWPA'}, inplace=True)
# career = pd.DataFrame(career.sum())
# career.columns = ['Totals']
# if stat:
# try:
# return career.loc[stat]
# except KeyError:
# return None
# return career
# else:
# return None
| 9,801 | 2,995 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""argparse and entry point script for undiscord flask/cheroot server"""
import argparse
import os
import sys
from logging import getLogger
from cheroot.wsgi import Server as WSGIServer, PathInfoDispatcher
import undiscord.server.server
from undiscord.common import add_log_parser, init_logging
__log__ = getLogger(__name__)
def get_parser() -> argparse.ArgumentParser:
"""Create and return the argparser for undiscord flask/cheroot server"""
parser = argparse.ArgumentParser(
description="Start the UnDiscord flask/cheroot server",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
group = parser.add_argument_group("server")
group.add_argument("-d", "--host", default='0.0.0.0',
help="Hostname to listen on")
group.add_argument("-p", "--port", default=8080, type=int,
help="Port of the webserver")
group.add_argument("-g", "--graph-dir", dest="graph_dir", default="graph",
help="Directory to store generated graphs")
group.add_argument("--debug", action="store_true",
help="Run the server in Flask debug mode")
add_log_parser(parser)
return parser
def main(argv=sys.argv[1:]) -> int:
"""main entry point undiscord flask/cheroot server"""
parser = get_parser()
args = parser.parse_args(argv)
init_logging(args, "undiscord_server.log")
graph_dir = os.path.abspath(args.graph_dir)
os.makedirs(graph_dir, exist_ok=True)
__log__.info("starting server: host: {} port: {} graph_dir: {}".format(args.host, args.port, graph_dir))
undiscord.server.server.GRAPH_DIR = graph_dir
if args.debug:
undiscord.server.server.APP.run(
host=args.host,
port=args.port,
debug=True
)
else:
path_info_dispatcher = PathInfoDispatcher({'/': undiscord.server.server.APP})
server = WSGIServer((args.host, args.port), path_info_dispatcher)
try:
server.start()
except KeyboardInterrupt:
__log__.info("stopping server: KeyboardInterrupt detected")
server.stop()
return 0
except Exception:
__log__.exception("stopping server: unexpected exception")
raise
if __name__ == "__main__":
sys.exit(main())
| 2,395 | 730 |
from .compute_changed_event_data import ComputeChangedEventData
from .compute_client import ComputeClient
from .compute_client_impl import ComputeClientImpl
from .compute_requirement_helper import ComputeRequirementHelper
from .compute_requirement_instances_changed_event_listener import \
ComputeRequirementInstancesChangedEventListener
from .compute_requirement_instances_changed_event_listener_impl import \
ComputeRequirementInstancesChangedEventListenerImpl
from .compute_service_proxy import ComputeServiceProxy
from .predicated_compute_subscription_event_listener import PredicatedComputeSubscriptionEventListener
__all__ = [
"ComputeClient",
"ComputeChangedEventData",
"ComputeClientImpl",
"ComputeRequirementHelper",
"ComputeRequirementInstancesChangedEventListener",
"ComputeRequirementInstancesChangedEventListenerImpl",
"ComputeServiceProxy",
"PredicatedComputeSubscriptionEventListener"
]
| 943 | 244 |
"""
Contains the logic needed to run a game of Connect Four.
"""
# Built-in modules
from copy import deepcopy
# User-defined modules
from .components import ConnectFourGrid, Disc
from .exceptions import IllegalAction, IllegalState, InvalidSpace
from ..utilities import js_callback
DISC_COLORS = [
'#F5473E', # red
'#FEEC49', # yellow
'#048B44', # green
'#293777', # blue
]
class Player:
def __init__(self, player_id: int, name: str):
self.id = player_id
self.name = name
def __deepcopy__(self, memodict={}):
return Player(self.id, self.name)
@property
def state(self):
return { 'id' : self.id, 'name' : self.name }
class ConnectFourGame:
"""
Encapsulates logic for setting up the initial parameters of a Connect Four game and establishing its rules.
"""
def __init__(self, player_names:'list[str]'=['Player One', 'Player Two'], width=7, height=6, victory_condition=4):
"""
Sets up a game of Connect Four.
:param `player_names`: List of the names of the players participating in this game.
:param `width`: Width of the grid.
:param `height`: Height of the grid.
:param `victory_condition`: Number of discs that need to line up horizontally,
vertically, or diagonally on the grid for a single player to win the game.
"""
# Checks for valid number of players
if len(player_names) < 2:
raise IllegalState('Game cannot be setup without at least two players.')
elif len(player_names) > len(DISC_COLORS):
raise IllegalState('Game cannot be setup with more than {0} players.'.format(len(DISC_COLORS)))
self.players = [Player(index, name) for index, name in enumerate(player_names)]
self.current_player = 0 # Starts with first player
self.discs = [Disc(player.id, DISC_COLORS[index]) for index, player in enumerate(self.players)]
self.grid = ConnectFourGrid(width, height)
self.victory_condition = victory_condition
self.winner_id = None
def __repr__(self):
"""
Produces visual representation of the Connect Four grid (from top to bottom), displaying _ for empty spaces,
and player ids wherever a player's disc is inserted. It will also show the id of the player who is next to move.
:return: String representing state of the Connect Four grid.
"""
board_repr = str(self.grid)
board_repr += '-------------------------\n'
board_repr += 'Next Player: {0}'.format(self.players[self.current_player].name)
return board_repr
def __deepcopy__(self, memodict={}):
game = ConnectFourGame(height=0, width=0) # Short-circuits initial setup logic for efficiency
game.players = deepcopy(self.players)
game.current_player = self.current_player
game.discs = deepcopy(self.discs)
game.grid = deepcopy(self.grid)
game.victory_condition = self.victory_condition
game.winner_id = self.winner_id
return game
@js_callback
def get_state(self):
state = {
'players' : [player.state for player in self.players],
'current_player' : self.current_player,
'discs' : [disc.state for disc in self.discs],
'grid' : self.grid.state,
'victory_condition' : self.victory_condition,
'winner_id' : self.winner_id
}
return state
def _get_player_chain(self, player_id: int, start_row: int, start_col: int, row_inc: int, col_inc: int):
"""
Gets a list of discs that belong to the given player, starting from the given start row and column,
continuing into a given direction based on the given row and column increments, and ending once either a disc
belonging to a different player is reached or the edge of the grid is reached.
:param `player_id`: Player id whose discs are being checked for.
:param `start_row`: Starting row to check for discs.
:param `start_col`: Starting column to check for discs.
:param `row_inc`: Increments the row after every check is made for a disc.
:param `col_inc`: Increments the column after every check is made for a disc.
:return: A list of discs belonging to the given player, within a direction determined by the row and column
increments.
"""
chain = []
row = start_row
col = start_col
while row >= 0 and row < self.grid.height and col >= 0 and col < self.grid.width:
disc = self.grid.grid_spaces[col][row].disc
if disc is not None and disc.player_id == player_id:
chain.append(self.grid.grid_spaces[col][row].disc)
row += row_inc
col += col_inc
else:
break
return chain
@js_callback
def check_for_discs_in_row(self, row: int, col: int, discs_in_row: int, player_id: int = None):
"""
Checks for a line of horizontal, vertical, or diagonal discs that are at least the
given number of discs in a row for a single player.
:param `row`: Starting row to check for discs in a row from.
:param `col`: Starting column to check for discs in a row from.
:param `discs_in_row`: Number of discs in a row to check for.
:param `player_id`: Player id whose discs are being looked for. If none is provided, the player id
of the disc at the given row and column will be used instead.
:return: Player id with the given number of discs in a row, or None if given discs in a row can't be found
at given starting row and column.
"""
if row < 0 or row > self.grid.height or col < 0 or col > self.grid.width:
raise InvalidSpace("Attempted to check a space that doesn't exist on the grid!")
player_id = player_id if player_id is not None else self.grid.grid_spaces[col][row].disc.player_id
# Checks for vertical line of discs
upper = self._get_player_chain(player_id, row + 1, col, 1, 0)
lower = self._get_player_chain(player_id, row - 1, col, -1, 0)
if len(upper) + len(lower) + 1 >= discs_in_row:
return player_id
# Checks for horizontal line of discs
left = self._get_player_chain(player_id, row, col - 1, 0, -1)
right = self._get_player_chain(player_id, row, col + 1, 0, 1)
if len(left) + len(right) + 1 >= discs_in_row:
return player_id
# Checks for downward-right diagonal line of discs
upper_left = self._get_player_chain(player_id, row + 1, col - 1, 1, -1)
lower_right = self._get_player_chain(player_id, row - 1, col + 1, -1, 1)
if len(upper_left) + len(lower_right) + 1 >= discs_in_row:
return player_id
# Checks for upward-right diagonal line of discs
lower_left = self._get_player_chain(player_id, row - 1, col - 1, -1, -1)
upper_right = self._get_player_chain(player_id, row + 1, col + 1, 1, 1)
if len(lower_left) + len(upper_right) + 1 >= discs_in_row:
return player_id
return None
@js_callback
def change_player(self, player_id: int = None):
"""
Changes player. If player id is given, that player id is explicitly set, otherwise goes to the next
player in the list of players.
:param `player_id`: Id of player to set.
:return: Id of player being changed to.
"""
if player_id is None:
self.current_player = self.current_player + 1 if self.current_player + 1 < len(self.players) else 0
else:
if player_id >= len(self.players):
raise IllegalAction('Player id does not exist in the list of players')
self.current_player = player_id
return self.current_player
@js_callback
def drop_disc(self, col_num: int):
"""
Drops disc belonging to the current player in the given column, and switches to the next player.
:return: Player id if player has won the game, None otherwise.
"""
disc = self.discs[self.current_player]
row_num = self.grid.drop_disc(disc, col_num)
player_id = self.check_for_discs_in_row(row_num, col_num, self.victory_condition)
# Has next player make move if current player has not won
if player_id is None:
self.change_player()
else:
self.winner_id = player_id
return player_id
@js_callback
def reset_game(self):
"""
Starts a new game, reverting grid and game conditions to their initial states.
:return: State of game after reset.
"""
self.grid.setup_grid()
self.current_player = 0
self.winner_id = None
return self.get_state() | 8,943 | 2,672 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from clinicgen.text.textfilter import LowerTextFilter
class TestLowerTextFilter(unittest.TestCase):
def test_filter(self):
text = 'Hello NLP!'
tfilter = LowerTextFilter()
self.assertEqual(tfilter.filter(text), 'hello nlp!')
| 317 | 103 |
# -*- coding: utf-8 -*-
"""
Transfomer: handles the Dublin Core recommendation for XHTML for adding DC values. What this means is that:
- DC namespaces are defined via C{<link rel="schema.XX" value="...."/>}
- The 'XX.term' is used much like QNames in C{<link>} and C{<meta>} elements. For the latter, the namespaced names are added to a C{@property} attribute.
This transformer adds "real" namespaces and changes the DC references in link and meta elements to abide to the
RDFa namespace syntax.
@summary: Dublin Core transformer
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@contact: Ivan Herman, ivan@w3.org
"""
"""
@version: $Id: DublinCore.py,v 1.4 2012-01-18 14:16:44 ivan Exp $
$Date: 2012-01-18 14:16:44 $
"""
def DC_transform(html, options, state) :
"""
@param html: a DOM node for the top level html element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
from ..host import HostLanguage
if not( options.host_language in [ HostLanguage.xhtml, HostLanguage.html5, HostLanguage.xhtml5 ] ) :
return
# the head element is necessary; to be sure, the namespaces are set
# on that level only
head = None
try :
head = html.getElementsByTagName("head")[0]
except :
# no head....
return
# At first, the DC namespaces must be found
dcprefixes = {}
for link in html.getElementsByTagName("link") :
if link.hasAttribute("rel") :
rel = link.getAttribute("rel")
uri = link.getAttribute("href")
if uri != None and rel != None and rel.startswith("schema.") :
# bingo...
try :
localname = rel.split(".")[1]
head.setAttributeNS("", "xmlns:"+localname,uri)
dcprefixes[localname] = uri
except :
# problem with the split; just ignore
pass
# get the link elements now to find the dc elements
for link in html.getElementsByTagName("link") :
if link.hasAttribute("rel") :
newProp = ""
for rel in link.getAttribute("rel").strip().split() :
# see if there is '.' to separate the attributes
if rel.find(".") != -1 :
key = rel.split(".",1)[0]
lname = rel.split(".",1)[1]
if key in dcprefixes and lname != "" :
# yep, this is one of those...
newProp += " " + key + ":" + lname
else :
newProp += " " + rel
else :
newProp += " " + rel
link.setAttribute("rel",newProp.strip())
# do almost the same with the meta elements...
for meta in html.getElementsByTagName("meta") :
if meta.hasAttribute("name") :
newProp = ""
for name in meta.getAttribute("name").strip().split() :
# see if there is '.' to separate the attributes
if name.find(".") != -1 :
key = name.split(".",1)[0]
lname = name.split(".",1)[1]
if key in dcprefixes and lname != "" :
# yep, this is one of those...
newProp += " " + key + ":" + lname
else :
newProp += " " + name
else :
newProp += " " + name
meta.setAttribute("property", newProp.strip())
| 3,321 | 1,282 |
from snake import Environment, GUI, PlayerController
if __name__ == '__main__':
try:
env = Environment()
gui = GUI(env)
controller = PlayerController(env)
running, _ = env.reset()
while running:
running, snake_pos = env.step()
print('running', running)
finally:
try:
gui._terminate_curses()
except NameError:
pass
except Exception as e:
raise e | 478 | 128 |