index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
25,000 | 4ed9ea2dd2ae307ae6e1dc8593982fdaee94847d | # encoding=utf8
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chameleon/security/identity/service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from protobuf import annotations_pb2 as protobuf_dot_annotations__pb2
from protobuf.google.api import annotations_pb2 as protobuf_dot_google_dot_api_dot_annotations__pb2
from chameleon.security.identity import data_pb2 as chameleon_dot_security_dot_identity_dot_data__pb2
from chameleon.security.identity import service_message_pb2 as chameleon_dot_security_dot_identity_dot_service__message__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='chameleon/security/identity/service.proto',
package='chameleon.security.identity',
syntax='proto3',
serialized_options=b'\n\037com.chameleon.security.identityB\014ServiceProtoP\001Z$chameleon/security/identity;identity\210\001\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n)chameleon/security/identity/service.proto\x12\x1b\x63hameleon.security.identity\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1aprotobuf/annotations.proto\x1a%protobuf/google/api/annotations.proto\x1a&chameleon/security/identity/data.proto\x1a\x31\x63hameleon/security/identity/service_message.proto2\xea\x08\n\x0bRoleManager\x12\x95\x01\n\x03Get\x12+.chameleon.security.identity.GetRoleRequest\x1a!.chameleon.security.identity.Role\">\x82\xd3\xe4\x93\x02\x19\x12\x17/security/identity/role\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.role\x12\xab\x01\n\x04List\x12,.chameleon.security.identity.ListRoleRequest\x1a-.chameleon.security.identity.ListRoleResponse\"F\x82\xd3\xe4\x93\x02!\"\x1c/security/identity/role/list:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.role\x12\x91\x01\n\x06\x43reate\x12!.chameleon.security.identity.Role\x1a!.chameleon.security.identity.Role\"A\x82\xd3\xe4\x93\x02\x1c\"\x17/security/identity/role:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.role\x12\x93\x01\n\x06Update\x12..chameleon.security.identity.UpdateRoleRequest\x1a\x16.google.protobuf.Empty\"A\x82\xd3\xe4\x93\x02\x1c\x32\x17/security/identity/role:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.role\x12\x9c\x01\n\x07\x41\x64\x64Rule\x12/.chameleon.security.identity.AddRoleRuleRequest\x1a\x16.google.protobuf.Empty\"H\x82\xd3\xe4\x93\x02#\"!/security/identity/role/{id}/rule\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.role\x12\xa6\x01\n\tResetRule\x12\x31.chameleon.security.identity.ResetRoleRuleRequest\x1a\x16.google.protobuf.Empty\"N\x82\xd3\xe4\x93\x02)\"\'/security/identity/role/{id}/rule/reset\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.role\x12\xa2\x01\n\nDeleteRule\x12\x32.chameleon.security.identity.DeleteRoleRuleRequest\x1a\x16.google.protobuf.Empty\"H\x82\xd3\xe4\x93\x02#*!/security/identity/role/{id}/rule\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.role2\xd3\x13\n\x0bUserManager\x12\xaf\x01\n\x05\x43ount\x12-.chameleon.security.identity.CountUserRequest\x1a..chameleon.security.identity.CountUserResponse\"G\x82\xd3\xe4\x93\x02\"\"\x1d/security/identity/user/count:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\xab\x01\n\x04List\x12,.chameleon.security.identity.ListUserRequest\x1a-.chameleon.security.identity.ListUserResponse\"F\x82\xd3\xe4\x93\x02!\"\x1c/security/identity/user/list:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12s\n\x03Who\x12\x16.google.protobuf.Empty\x1a(.chameleon.security.identity.WhoResponse\"*\x82\xd3\xe4\x93\x02\x1d\x12\x1b/security/identity/user/who\x82\x82\x87\x03\x02\x08\x02\x12\x96\x01\n\x05\x45xist\x12/.chameleon.security.identity.IsUserExistRequest\x1a\x16.google.protobuf.Empty\"D\x82\xd3\xe4\x93\x02\x1f\x12\x1d/security/identity/user/exist\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\xb7\x01\n\x06\x45xists\x12\x30.chameleon.security.identity.IsUserExistsRequest\x1a\x31.chameleon.security.identity.IsUserExistsResponse\"H\x82\xd3\xe4\x93\x02#\"\x1e/security/identity/user/exists:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\x95\x01\n\x03Get\x12+.chameleon.security.identity.GetUserRequest\x1a!.chameleon.security.identity.User\">\x82\xd3\xe4\x93\x02\x19\x12\x17/security/identity/user\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\xab\x01\n\x04Gets\x12,.chameleon.security.identity.GetsUserRequest\x1a-.chameleon.security.identity.GetsUserResponse\"F\x82\xd3\xe4\x93\x02!\"\x1c/security/identity/user/gets:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\x91\x01\n\x06\x43reate\x12!.chameleon.security.identity.User\x1a!.chameleon.security.identity.User\"A\x82\xd3\xe4\x93\x02\x1c\"\x17/security/identity/user:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\x93\x01\n\x06Update\x12..chameleon.security.identity.UpdateUserRequest\x1a\x16.google.protobuf.Empty\"A\x82\xd3\xe4\x93\x02\x1c\x32\x17/security/identity/user:\x01*\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\x98\x01\n\x0bUpdateField\x12\x33.chameleon.security.identity.UpdateUserFieldRequest\x1a\x16.google.protobuf.Empty\"<\x82\xd3\xe4\x93\x02/2\'/security/identity/user/default/{field}:\x04\x64\x61ta\x82\x82\x87\x03\x02\x08\x02\x12\xa1\x01\n\x08GetRoles\x12\x30.chameleon.security.identity.GetUserRolesRequest\x1a\x31.chameleon.security.identity.GetUserRolesResponse\"0\x82\xd3\xe4\x93\x02#\x12!/security/identity/user/{id}/role\x82\x82\x87\x03\x02\x08\x02\x12\x9c\x01\n\x07\x41\x64\x64Role\x12/.chameleon.security.identity.AddUserRoleRequest\x1a\x16.google.protobuf.Empty\"H\x82\xd3\xe4\x93\x02#\"!/security/identity/user/{id}/role\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\xa2\x01\n\nDeleteRole\x12\x32.chameleon.security.identity.DeleteUserRoleRequest\x1a\x16.google.protobuf.Empty\"H\x82\xd3\xe4\x93\x02#*!/security/identity/user/{id}/role\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\x95\x01\n\x06\x44\x65lete\x12..chameleon.security.identity.DeleteUserRequest\x1a\x16.google.protobuf.Empty\"C\x82\xd3\xe4\x93\x02\x1e*\x1c/security/identity/user/{id}\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12\x9f\x01\n\x07Restore\x12/.chameleon.security.identity.RestoreUserRequest\x1a\x16.google.protobuf.Empty\"K\x82\xd3\xe4\x93\x02&\"$/security/identity/user/{id}/restore\x82\x82\x87\x03\x1a\x08\x02\x12\x16security.identity.user\x12o\n\x06Verify\x12*.chameleon.security.identity.VerifyRequest\x1a\x16.google.protobuf.Empty\"!\x82\xd3\xe4\x93\x02\x1b\"\x19/security/identity/verify2\x99*\n\x12\x41pplicationManager\x12\xcb\x01\n\x05\x43ount\x12\x34.chameleon.security.identity.CountApplicationRequest\x1a\x35.chameleon.security.identity.CountApplicationResponse\"U\x82\xd3\xe4\x93\x02)\"$/security/identity/application/count:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xc7\x01\n\x04List\x12\x33.chameleon.security.identity.ListApplicationRequest\x1a\x34.chameleon.security.identity.ListApplicationResponse\"T\x82\xd3\xe4\x93\x02(\"#/security/identity/application/list:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xb0\x01\n\x05\x45xist\x12\x36.chameleon.security.identity.IsApplicationExistRequest\x1a\x16.google.protobuf.Empty\"W\x82\xd3\xe4\x93\x02+\x12)/security/identity/application/exist/{id}\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xd3\x01\n\x06\x45xists\x12\x37.chameleon.security.identity.IsApplicationExistsRequest\x1a\x38.chameleon.security.identity.IsApplicationExistsResponse\"V\x82\xd3\xe4\x93\x02*\"%/security/identity/application/exists:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xb6\x01\n\x03Get\x12\x32.chameleon.security.identity.GetApplicationRequest\x1a(.chameleon.security.identity.Application\"Q\x82\xd3\xe4\x93\x02%\x12#/security/identity/application/{id}\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xc7\x01\n\x04Gets\x12\x33.chameleon.security.identity.GetsApplicationRequest\x1a\x34.chameleon.security.identity.GetsApplicationResponse\"T\x82\xd3\xe4\x93\x02(\"#/security/identity/application/gets:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xad\x01\n\x06\x43reate\x12(.chameleon.security.identity.Application\x1a(.chameleon.security.identity.Application\"O\x82\xd3\xe4\x93\x02#\"\x1e/security/identity/application:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xa8\x01\n\x06Update\x12\x35.chameleon.security.identity.UpdateApplicationRequest\x1a\x16.google.protobuf.Empty\"O\x82\xd3\xe4\x93\x02#2\x1e/security/identity/application:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xaa\x01\n\x06\x44\x65lete\x12\x35.chameleon.security.identity.DeleteApplicationRequest\x1a\x16.google.protobuf.Empty\"Q\x82\xd3\xe4\x93\x02%*#/security/identity/application/{id}\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xb4\x01\n\x07Restore\x12\x36.chameleon.security.identity.RestoreApplicationRequest\x1a\x16.google.protobuf.Empty\"Y\x82\xd3\xe4\x93\x02-\x12+/security/identity/application/restore/{id}\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xbd\x01\n\nSetOptions\x12\x39.chameleon.security.identity.SetApplicationOptionsRequest\x1a\x16.google.protobuf.Empty\"\\\x82\xd3\xe4\x93\x02\x30\x1a+/security/identity/application/{id}/options:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xb4\x01\n\x07SetTags\x12\x36.chameleon.security.identity.SetApplicationTagsRequest\x1a\x16.google.protobuf.Empty\"Y\x82\xd3\xe4\x93\x02-\x1a(/security/identity/application/{id}/tags:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xe6\x01\n\x0bResetSecret\x12:.chameleon.security.identity.ResetApplicationSecretRequest\x1a;.chameleon.security.identity.ResetApplicationSecretResponse\"^\x82\xd3\xe4\x93\x02\x32\x1a\x30/security/identity/application/{id}/secret/reset\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xdb\x01\n\x0cGetSecretKey\x12;.chameleon.security.identity.GetApplicationSecretKeyRequest\x1a\x31.chameleon.security.identity.ApplicationSecretKey\"[\x82\xd3\xe4\x93\x02/\x12-/security/identity/application/{id}/secretkey\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xc8\x01\n\x12GetSecretPublicKey\x12\x41.chameleon.security.identity.GetApplicationSecretPublicKeyRequest\x1a\x31.chameleon.security.identity.ApplicationSecretKey\"<\x82\xd3\xe4\x93\x02\x36\x12\x34/security/identity/application/{id}/secretkey/public\x12\xdb\x01\n\x0c\x41\x64\x64SecretKey\x12;.chameleon.security.identity.AddApplicationSecretKeyRequest\x1a\x31.chameleon.security.identity.ApplicationSecretKey\"[\x82\xd3\xe4\x93\x02/\"-/security/identity/application/{id}/secretkey\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xd6\x01\n\x13SetDefaultSecretKey\x12\x42.chameleon.security.identity.SetApplicationDefaultSecretKeyRequest\x1a\x16.google.protobuf.Empty\"c\x82\xd3\xe4\x93\x02\x37\"5/security/identity/application/{id}/secretkey/default\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xce\x01\n\x0f\x44\x65leteSecretKey\x12>.chameleon.security.identity.DeleteApplicationSecretKeyRequest\x1a\x16.google.protobuf.Empty\"c\x82\xd3\xe4\x93\x02\x37*5/security/identity/application/{id}/secretkey/{keyID}\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xe5\x01\n\x15GetDefaultRedirectURI\x12\x44.chameleon.security.identity.GetApplicationDefaultRedirectURIRequest\x1a\x45.chameleon.security.identity.GetApplicationDefaultRedirectURIResponse\"?\x82\xd3\xe4\x93\x02\x39\x12\x37/security/identity/application/{id}/redirecturi/default\x12\xdc\x01\n\x15SetDefaultRedirectURI\x12\x44.chameleon.security.identity.SetApplicationDefaultRedirectURIRequest\x1a\x16.google.protobuf.Empty\"e\x82\xd3\xe4\x93\x02\x39\"7/security/identity/application/{id}/redirecturi/default\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\x80\x02\n\x14GetWhiteRedirectURIs\x12\x43.chameleon.security.identity.GetApplicationWhiteRedirectURIsRequest\x1a\x44.chameleon.security.identity.GetApplicationWhiteRedirectURIsResponse\"]\x82\xd3\xe4\x93\x02\x31\x12//security/identity/application/{id}/redirecturi\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xf5\x01\n\x13\x41\x64\x64WhiteRedirectURI\x12\x42.chameleon.security.identity.AddApplicationWhiteRedirectURIRequest\x1a\x38.chameleon.security.identity.ApplicationWhiteRedirectURI\"`\x82\xd3\xe4\x93\x02\x34\"//security/identity/application/{id}/redirecturi:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xde\x01\n\x16\x44\x65leteWhiteRedirectURI\x12\x45.chameleon.security.identity.DeleteApplicationWhiteRedirectURIRequest\x1a\x16.google.protobuf.Empty\"e\x82\xd3\xe4\x93\x02\x39*7/security/identity/application/{id}/redirecturi/{uriID}\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xd6\x01\n\x16\x43learWhiteRedirectURIs\x12\x45.chameleon.security.identity.ClearApplicationWhiteRedirectURIsRequest\x1a\x16.google.protobuf.Empty\"]\x82\xd3\xe4\x93\x02\x31*//security/identity/application/{id}/redirecturi\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xc3\x01\n\x12\x41\x64\x64\x41pplicationRole\x12\x36.chameleon.security.identity.AddApplicationRoleRequest\x1a\x16.google.protobuf.Empty\"]\x82\xd3\xe4\x93\x02\x31\",/security/identity/application/{id}/role/add:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.application\x12\xcc\x01\n\x15RemoveApplicationRole\x12\x39.chameleon.security.identity.RemoveApplicationRoleRequest\x1a\x16.google.protobuf.Empty\"`\x82\xd3\xe4\x93\x02\x34\"//security/identity/application/{id}/role/remove:\x01*\x82\x82\x87\x03!\x08\x02\x12\x1dsecurity.identity.applicationBZ\n\x1f\x63om.chameleon.security.identityB\x0cServiceProtoP\x01Z$chameleon/security/identity;identity\x88\x01\x01\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,protobuf_dot_annotations__pb2.DESCRIPTOR,protobuf_dot_google_dot_api_dot_annotations__pb2.DESCRIPTOR,chameleon_dot_security_dot_identity_dot_data__pb2.DESCRIPTOR,chameleon_dot_security_dot_identity_dot_service__message__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
_ROLEMANAGER = _descriptor.ServiceDescriptor(
name='RoleManager',
full_name='chameleon.security.identity.RoleManager',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=262,
serialized_end=1392,
methods=[
_descriptor.MethodDescriptor(
name='Get',
full_name='chameleon.security.identity.RoleManager.Get',
index=0,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETROLEREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._ROLE,
serialized_options=b'\202\323\344\223\002\031\022\027/security/identity/role\202\202\207\003\032\010\002\022\026security.identity.role',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='chameleon.security.identity.RoleManager.List',
index=1,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._LISTROLEREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._LISTROLERESPONSE,
serialized_options=b'\202\323\344\223\002!\"\034/security/identity/role/list:\001*\202\202\207\003\032\010\002\022\026security.identity.role',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='chameleon.security.identity.RoleManager.Create',
index=2,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_data__pb2._ROLE,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._ROLE,
serialized_options=b'\202\323\344\223\002\034\"\027/security/identity/role:\001*\202\202\207\003\032\010\002\022\026security.identity.role',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='chameleon.security.identity.RoleManager.Update',
index=3,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._UPDATEROLEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002\0342\027/security/identity/role:\001*\202\202\207\003\032\010\002\022\026security.identity.role',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddRule',
full_name='chameleon.security.identity.RoleManager.AddRule',
index=4,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ADDROLERULEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002#\"!/security/identity/role/{id}/rule\202\202\207\003\032\010\002\022\026security.identity.role',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ResetRule',
full_name='chameleon.security.identity.RoleManager.ResetRule',
index=5,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._RESETROLERULEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002)\"\'/security/identity/role/{id}/rule/reset\202\202\207\003\032\010\002\022\026security.identity.role',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteRule',
full_name='chameleon.security.identity.RoleManager.DeleteRule',
index=6,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._DELETEROLERULEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002#*!/security/identity/role/{id}/rule\202\202\207\003\032\010\002\022\026security.identity.role',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ROLEMANAGER)
DESCRIPTOR.services_by_name['RoleManager'] = _ROLEMANAGER
_USERMANAGER = _descriptor.ServiceDescriptor(
name='UserManager',
full_name='chameleon.security.identity.UserManager',
file=DESCRIPTOR,
index=1,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1395,
serialized_end=3910,
methods=[
_descriptor.MethodDescriptor(
name='Count',
full_name='chameleon.security.identity.UserManager.Count',
index=0,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._COUNTUSERREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._COUNTUSERRESPONSE,
serialized_options=b'\202\323\344\223\002\"\"\035/security/identity/user/count:\001*\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='chameleon.security.identity.UserManager.List',
index=1,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._LISTUSERREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._LISTUSERRESPONSE,
serialized_options=b'\202\323\344\223\002!\"\034/security/identity/user/list:\001*\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Who',
full_name='chameleon.security.identity.UserManager.Who',
index=2,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._WHORESPONSE,
serialized_options=b'\202\323\344\223\002\035\022\033/security/identity/user/who\202\202\207\003\002\010\002',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Exist',
full_name='chameleon.security.identity.UserManager.Exist',
index=3,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ISUSEREXISTREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002\037\022\035/security/identity/user/exist\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Exists',
full_name='chameleon.security.identity.UserManager.Exists',
index=4,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ISUSEREXISTSREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ISUSEREXISTSRESPONSE,
serialized_options=b'\202\323\344\223\002#\"\036/security/identity/user/exists:\001*\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Get',
full_name='chameleon.security.identity.UserManager.Get',
index=5,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETUSERREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._USER,
serialized_options=b'\202\323\344\223\002\031\022\027/security/identity/user\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Gets',
full_name='chameleon.security.identity.UserManager.Gets',
index=6,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETSUSERREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETSUSERRESPONSE,
serialized_options=b'\202\323\344\223\002!\"\034/security/identity/user/gets:\001*\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='chameleon.security.identity.UserManager.Create',
index=7,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_data__pb2._USER,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._USER,
serialized_options=b'\202\323\344\223\002\034\"\027/security/identity/user:\001*\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='chameleon.security.identity.UserManager.Update',
index=8,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._UPDATEUSERREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002\0342\027/security/identity/user:\001*\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateField',
full_name='chameleon.security.identity.UserManager.UpdateField',
index=9,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._UPDATEUSERFIELDREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002/2\'/security/identity/user/default/{field}:\004data\202\202\207\003\002\010\002',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetRoles',
full_name='chameleon.security.identity.UserManager.GetRoles',
index=10,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETUSERROLESREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETUSERROLESRESPONSE,
serialized_options=b'\202\323\344\223\002#\022!/security/identity/user/{id}/role\202\202\207\003\002\010\002',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddRole',
full_name='chameleon.security.identity.UserManager.AddRole',
index=11,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ADDUSERROLEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002#\"!/security/identity/user/{id}/role\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteRole',
full_name='chameleon.security.identity.UserManager.DeleteRole',
index=12,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._DELETEUSERROLEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002#*!/security/identity/user/{id}/role\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Delete',
full_name='chameleon.security.identity.UserManager.Delete',
index=13,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._DELETEUSERREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002\036*\034/security/identity/user/{id}\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Restore',
full_name='chameleon.security.identity.UserManager.Restore',
index=14,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._RESTOREUSERREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002&\"$/security/identity/user/{id}/restore\202\202\207\003\032\010\002\022\026security.identity.user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Verify',
full_name='chameleon.security.identity.UserManager.Verify',
index=15,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._VERIFYREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002\033\"\031/security/identity/verify',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_USERMANAGER)
DESCRIPTOR.services_by_name['UserManager'] = _USERMANAGER
_APPLICATIONMANAGER = _descriptor.ServiceDescriptor(
name='ApplicationManager',
full_name='chameleon.security.identity.ApplicationManager',
file=DESCRIPTOR,
index=2,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3913,
serialized_end=9314,
methods=[
_descriptor.MethodDescriptor(
name='Count',
full_name='chameleon.security.identity.ApplicationManager.Count',
index=0,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._COUNTAPPLICATIONREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._COUNTAPPLICATIONRESPONSE,
serialized_options=b'\202\323\344\223\002)\"$/security/identity/application/count:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='chameleon.security.identity.ApplicationManager.List',
index=1,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._LISTAPPLICATIONREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._LISTAPPLICATIONRESPONSE,
serialized_options=b'\202\323\344\223\002(\"#/security/identity/application/list:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Exist',
full_name='chameleon.security.identity.ApplicationManager.Exist',
index=2,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ISAPPLICATIONEXISTREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002+\022)/security/identity/application/exist/{id}\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Exists',
full_name='chameleon.security.identity.ApplicationManager.Exists',
index=3,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ISAPPLICATIONEXISTSREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ISAPPLICATIONEXISTSRESPONSE,
serialized_options=b'\202\323\344\223\002*\"%/security/identity/application/exists:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Get',
full_name='chameleon.security.identity.ApplicationManager.Get',
index=4,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETAPPLICATIONREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._APPLICATION,
serialized_options=b'\202\323\344\223\002%\022#/security/identity/application/{id}\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Gets',
full_name='chameleon.security.identity.ApplicationManager.Gets',
index=5,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETSAPPLICATIONREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETSAPPLICATIONRESPONSE,
serialized_options=b'\202\323\344\223\002(\"#/security/identity/application/gets:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='chameleon.security.identity.ApplicationManager.Create',
index=6,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_data__pb2._APPLICATION,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._APPLICATION,
serialized_options=b'\202\323\344\223\002#\"\036/security/identity/application:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='chameleon.security.identity.ApplicationManager.Update',
index=7,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._UPDATEAPPLICATIONREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002#2\036/security/identity/application:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Delete',
full_name='chameleon.security.identity.ApplicationManager.Delete',
index=8,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._DELETEAPPLICATIONREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002%*#/security/identity/application/{id}\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Restore',
full_name='chameleon.security.identity.ApplicationManager.Restore',
index=9,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._RESTOREAPPLICATIONREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002-\022+/security/identity/application/restore/{id}\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SetOptions',
full_name='chameleon.security.identity.ApplicationManager.SetOptions',
index=10,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._SETAPPLICATIONOPTIONSREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0020\032+/security/identity/application/{id}/options:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SetTags',
full_name='chameleon.security.identity.ApplicationManager.SetTags',
index=11,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._SETAPPLICATIONTAGSREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002-\032(/security/identity/application/{id}/tags:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ResetSecret',
full_name='chameleon.security.identity.ApplicationManager.ResetSecret',
index=12,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._RESETAPPLICATIONSECRETREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._RESETAPPLICATIONSECRETRESPONSE,
serialized_options=b'\202\323\344\223\0022\0320/security/identity/application/{id}/secret/reset\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetSecretKey',
full_name='chameleon.security.identity.ApplicationManager.GetSecretKey',
index=13,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETAPPLICATIONSECRETKEYREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._APPLICATIONSECRETKEY,
serialized_options=b'\202\323\344\223\002/\022-/security/identity/application/{id}/secretkey\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetSecretPublicKey',
full_name='chameleon.security.identity.ApplicationManager.GetSecretPublicKey',
index=14,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETAPPLICATIONSECRETPUBLICKEYREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._APPLICATIONSECRETKEY,
serialized_options=b'\202\323\344\223\0026\0224/security/identity/application/{id}/secretkey/public',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddSecretKey',
full_name='chameleon.security.identity.ApplicationManager.AddSecretKey',
index=15,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ADDAPPLICATIONSECRETKEYREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._APPLICATIONSECRETKEY,
serialized_options=b'\202\323\344\223\002/\"-/security/identity/application/{id}/secretkey\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SetDefaultSecretKey',
full_name='chameleon.security.identity.ApplicationManager.SetDefaultSecretKey',
index=16,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._SETAPPLICATIONDEFAULTSECRETKEYREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0027\"5/security/identity/application/{id}/secretkey/default\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteSecretKey',
full_name='chameleon.security.identity.ApplicationManager.DeleteSecretKey',
index=17,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._DELETEAPPLICATIONSECRETKEYREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0027*5/security/identity/application/{id}/secretkey/{keyID}\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetDefaultRedirectURI',
full_name='chameleon.security.identity.ApplicationManager.GetDefaultRedirectURI',
index=18,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETAPPLICATIONDEFAULTREDIRECTURIREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETAPPLICATIONDEFAULTREDIRECTURIRESPONSE,
serialized_options=b'\202\323\344\223\0029\0227/security/identity/application/{id}/redirecturi/default',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SetDefaultRedirectURI',
full_name='chameleon.security.identity.ApplicationManager.SetDefaultRedirectURI',
index=19,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._SETAPPLICATIONDEFAULTREDIRECTURIREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0029\"7/security/identity/application/{id}/redirecturi/default\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetWhiteRedirectURIs',
full_name='chameleon.security.identity.ApplicationManager.GetWhiteRedirectURIs',
index=20,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETAPPLICATIONWHITEREDIRECTURISREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._GETAPPLICATIONWHITEREDIRECTURISRESPONSE,
serialized_options=b'\202\323\344\223\0021\022//security/identity/application/{id}/redirecturi\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddWhiteRedirectURI',
full_name='chameleon.security.identity.ApplicationManager.AddWhiteRedirectURI',
index=21,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ADDAPPLICATIONWHITEREDIRECTURIREQUEST,
output_type=chameleon_dot_security_dot_identity_dot_data__pb2._APPLICATIONWHITEREDIRECTURI,
serialized_options=b'\202\323\344\223\0024\"//security/identity/application/{id}/redirecturi:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteWhiteRedirectURI',
full_name='chameleon.security.identity.ApplicationManager.DeleteWhiteRedirectURI',
index=22,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._DELETEAPPLICATIONWHITEREDIRECTURIREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0029*7/security/identity/application/{id}/redirecturi/{uriID}\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ClearWhiteRedirectURIs',
full_name='chameleon.security.identity.ApplicationManager.ClearWhiteRedirectURIs',
index=23,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._CLEARAPPLICATIONWHITEREDIRECTURISREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0021*//security/identity/application/{id}/redirecturi\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddApplicationRole',
full_name='chameleon.security.identity.ApplicationManager.AddApplicationRole',
index=24,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._ADDAPPLICATIONROLEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0021\",/security/identity/application/{id}/role/add:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RemoveApplicationRole',
full_name='chameleon.security.identity.ApplicationManager.RemoveApplicationRole',
index=25,
containing_service=None,
input_type=chameleon_dot_security_dot_identity_dot_service__message__pb2._REMOVEAPPLICATIONROLEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0024\"//security/identity/application/{id}/role/remove:\001*\202\202\207\003!\010\002\022\035security.identity.application',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_APPLICATIONMANAGER)
DESCRIPTOR.services_by_name['ApplicationManager'] = _APPLICATIONMANAGER
# @@protoc_insertion_point(module_scope)
|
25,001 | b5be06e3f060ecdd05e186798c0a2262c344e3fd | #!/usr/bin/env python
# ------------------------------------------
# 0713 MMXVI Gregorian AST UTC-0400
# ------------------------------------------
# Atelier-Velvet Corporation et alia
# Monserrate-Mills-Malvo 1.7 Attack 0.1
#
# Carro Cruz, Manuel Alberto
# Copyright 2016 Atelier-Velvet Corporation.
# -------------------------------------------
# Escuela de Ingenieria Jose Domingo Perez
# Sistema Universitario Ana G Mendez
# Universidad del Turabo
# CPEN 503: Computer and Network Security
# Term Project Monserrate-Mills Crypto
# Prof. Almodovar J, PhD, PE
# -------------------------------------------
import os, sys
from Crypto.Cipher import PKCS1_OAEP, AES
from Crypto.PublicKey import RSA
from Crypto import Random
def mills_malvo():
print "Monserrate-Mills-Malvo 1.7 Attack 0.1 CPEN 503 Final Project Crypto"
print "Copyright 2016 Atelier-Velvet Corporation."
# IMPORTATION OF THE KEYS
privateA = RSA.importKey(open('KA.der').read())
kpublicA = RSA.importKey(open('KA.der.pub').read())
privateB = RSA.importKey(open('KB.der').read())
kpublicB = RSA.importKey(open('KB.der.pub').read())
privattA = RSA.importKey(open('KAattack.der').read())
kpubattA = RSA.importKey(open('KAattack.der.pub').read())
privattB = RSA.importKey(open('KBattack.der').read())
kpubattB = RSA.importKey(open('KBattack.der.pub').read())
# REDEFINITION OF THE RSA KEYS SO THAT THEY MATCH THE CORRECT LENGTH AND ORDER (INTERNAL RSA KEY MUST BE SHORTER IN LENGTH THAN EXTERNAL RSA KEY)
KAPRI = kpublicB
KAPUB = privateB
KBPRI = privateA
KBPUB = kpublicA
KAMPRI = kpubattB
KAMPUB = privattB
KBMPRI = privattA
KBMPUB = kpubattA
# ATTACK ON THE "SECURE" SCHEMA POSTULATED BY THE BOOK: RSA[KBPUB, RSA[KAPRI, K]]--->ARCRSA[KBPRI, ARCRSA[KAPUB, RSA[KBPUB, RSA[KAPRI, K]]]]
print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
print "ATTACK ON THE ``SECURE'' SCHEMA POSTULATED BY THE BOOK: RSA[KBPUB, RSA[KAPRI, K]]--->ARCRSA[KBPRI, ARCRSA[KAPUB, RSA[KBPUB, RSA[KAPRI, K]]]]:"
print " "
print "Order of Events:"
print " "
print "INTERCEPTION OF THE PUBLIC KEYS:"
print " "
print "[TRANSMISSION FROM ALICE INTENDED TO BOB]: Alice sends Bob her public key, KAPUB."
print "[TRANSMISSION FROM ALICE TO BOB INTERCEPTED BY MALVO]: Malvo swaps Alice's public key, KAPUB, with a public key from the pairs of his own, KAMPUB."
print "[TRANSMISSION FROM MALVO TO BOB]: Malvo sends Bob the swap of Alice's public key, KAMPUB, and stores Alice's public key, KAPUB, in his key repository."
print "[TRANSMISSION FROM BOB INTENDED TO ALICE]: Bob sends Alice his public key, KBPUB."
print "[TRANSMISSION FROM BOB TO ALICE INTERCEPTED BY MALVO]: Malvo swaps Bob's public key, KBPUB, with another public key from the pairs of his own, KBMPUB."
print "[TRANSMISSION FROM MALVO TO ALICE]: Malvo sends Alice the swap of Bob's public key, KBMPUB, and stores Bob's public key, KBPUB, in his key repository."
print " "
print "EXTRACTION OF THE SYMMETRIC KEY:"
print " "
print "[GENERATION OF THE 16 BYTE SYMMETRIC KEY BY ALICE]:"
K = raw_input("Alice, enter the symmetric key and press enter to send to Bob: ")
print "Allice entered: ", K
print " "
print "[DOUBLE RSA ENCRYPTION AND TRANSMITTAL OF SYMMETRIC KEY USING ALICE'S PRIVATE KEY, THEN MALVO'S BOB COMPROMISED PUBLIC KEY]:"
print " "
cipher = PKCS1_OAEP.new(KAPRI)
ciphertextAM0 = cipher.encrypt(K)
cipher = PKCS1_OAEP.new(KBMPUB)
ciphertextAM1 = cipher.encrypt(ciphertextAM0)
print "[MALVO'S DOUBLE RSA DECRYPTION OF ALICE'S CIPHERTEXT TO BOB USING ALICE'S PUBLIC KEY AND MALVO'S BOB PRIVATE KEY, KBMPRI]:"
cipher = PKCS1_OAEP.new(KBMPRI)
ciphertextMA1 = cipher.decrypt(ciphertextAM1)
cipher = PKCS1_OAEP.new(KAPUB)
plaintextAM0 = cipher.decrypt(ciphertextMA1)
print "[MALVO'S EXTRACTED SYMMETRIC KEY PAR INTERCEPTION FROM ALICE TO BOB IS]: ", plaintextAM0
print " "
print "[GENERATION OF COMPROMISED SYMMETRIC KEY FOR MALVO TO SEND BOB AS IF WERE COMMING FROM ALICE]"
K_hat = raw_input("Enter the compromised symmetric key to send to Bob in the name of Alice, Malvoo .... : ")
print "Malvo entered: ", K_hat
print " "
print "[DOUBLE RSA ENCRYPTION AND TRANSMITTAL OF COMPROMISED SYMMETRIC KEY USING MALVO'S ALICE PRIVATE KEY, THEN BOB'S PUBLIC KEY]:"
cipher = PKCS1_OAEP.new(KAMPRI)
ciphertextMB0 = cipher.encrypt(K_hat)
cipher = PKCS1_OAEP.new(KBPUB)
ciphertextMB1 = cipher.encrypt(ciphertextMB0)
print " "
print "[BOB'S DOUBLE RSA DECRYPTION OF MALVO'S COMPROMISED CIPHERTEXT USING BOB'S PRIVATE KEY AND MALVO'S ALICE PUBLIC KEY, KAMPUB]:"
cipher = PKCS1_OAEP.new(KBPRI)
ciphertextBM1 = cipher.decrypt(ciphertextMB1)
cipher = PKCS1_OAEP.new(KAMPUB)
plaintextBM0 = cipher.decrypt(ciphertextBM1)
print "[BOB RECEIVES COMPROMISED SYMMETRIC KEY]: ", plaintextBM0
print " "
print "[ALICE AND BOB NOW THINK THEY SHARE THE SAME SYMMETRIC KEY ... MALVO KNOWS THEY'LL BE USING AES TO TRANSMIT ACROSS THE CHANNEL ....]"
print " "
print "[ALICE ---> BOB]: ALICE AES ENCRYPTS MESSAGE MA ON THE BLOCKSIZE (16 BYTES) WITH KEY K (16, 24, OR 32 BITES) AND SENDS IT TO BOB"
print " "
BLOCK_SIZE = 16
MA = raw_input("Alice, enter your message to bob ... It's secure !: ")
print "Alice's message MA was: ", MA
key = plaintextAM0
iv = Random.new().read(BLOCK_SIZE)
cipher = AES.new(key.encode(), AES.MODE_CFB, iv)
aciphertextAM = iv + cipher.encrypt(MA.encode())
print " "
print "[MALVO INTERCEPTS ALICE'S AES ENCRYPTED CIPHERTEXT TO BOB AND DECRYPS IT USING ALICE'S EXTRACTED SYMMETRIC KEY]"
AM = cipher.decrypt(aciphertextAM)
print "[MALVO RECOVERS AND READS ALICE'S MESSAGE TO BOB ...]: ", AM
print " "
print "[MALVO NOW RE AES ENCRYPTS ALICE'S MESSAGE TO BOB BUT USING THE COMPROMISED SYMMETRIC KEY, K HAT]:"
key_hat = K_hat
iv = Random.new().read(BLOCK_SIZE)
cipher = AES.new(key_hat.encode(), AES.MODE_CFB, iv)
aciphertextMB = iv + cipher.encrypt(AM)
print " "
print "[BOB DECRYPTS THE COMPROMISED AES CIPHERTEXT USING THE COMPROMISED SYMMETRIC KEY, K HAT]:"
BM = cipher.decrypt(aciphertextMB)
print "Here it is Bob, the message so securely sent by Alice ;): ", BM
if __name__ == '__main__':
mills_malvo()
|
25,002 | 9617587f852d32c08158856584753e46baf0b305 | '''
Write a script that demonstrates a try/except/else.
'''
try:
with open("next_travel_destinations", "r") as f1:
except FileNotFoundError:
with open("next_travel_destinations", "w+") as f1:
f1.write("Enter all the places you would like to go to: ")
else:
f1.write("I want to go to Iceland") |
25,003 | 41a8a9ca4760389916bf085c141c63c9be13dc01 | def array_advance(A):
if not A:
return False
furthest_reach = 0
last_idx = len(A)-1
i = 0
while i <= furthest_reach and furthest_reach < last_idx:
furthest_reach = max(furthest_reach, A[i]+i)
i += 1
return furthest_reach >= last_idx
# implementation
A1 = [3, 3, 1, 0, 2, 0, 1]
A2 = [3, 2, 1, 0, 0, 0, 1]
A3 = []
print(array_advance(A1))
print(array_advance(A2))
print(array_advance(A3))
# plus one algorithm
def plus_one_map(A):
s = ''.join(map(str, A))
s = int(s)+1
return s
def plus_one_alg(A):
if not A:
return False
A[-1] += 1
for i in reversed(range(1, len(A))):
if A[i] != 10:
break
A[i] = 0
A[i-1] += 1
if A[0] == 10:
A[0] = 1
A.append(0)
return A
A1 = [1, 4, 9]
B1 = [9, 9, 9]
print(plus_one_map(A1))
print(plus_one_map(B1))
print(plus_one_alg(A1))
print(plus_one_alg(B1))
|
25,004 | c4a6800a0af89fcde130901b0f5a1f53195f9811 |
# coding: utf-8
# In[1]:
import sys
sys.path.insert(0, './sentiment/')
import utility
import Fake_master
import pickle
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from nltk.tokenize import word_tokenize
#Fake_master.main()
# In[ ]:
if __name__ == "__main__":
file = open('./modeldata/nb.txt', 'rb')
nb = pickle.load(file)
file.close()
file = open('./modeldata/cvec.txt', 'rb')
cvec = pickle.load(file)
file.close()
file = open('./modeldata/feature_weight_dict.txt', 'rb')
feature_weight_dict = pickle.load(file)
file.close()
if len(sys.argv)==3:
sample = sys.argv[1]
num_features = sys.argv[2]
class_names = ['TheOnion', 'nottheonion']
'''
word_weight_dict = Fake_master.my_analysis(nb, cvec, sample, feature_weight_dict)
hasdict = Fake_master.plot_myanalysis(word_weight_dict)
'''
figpath = utility.generate_dynamic_analysis(nb, cvec, 'fakenews', class_names, sample)
# fig path is the image path for dynamic analysis images
Fake_master.prediction(sample,nb,cvec)
limepath, piepath = utility.generate_lime(nb, cvec, class_names, sample, 'fakenews', num_features)
# lime path is the image path for lime analysis images
# In[2]:
#sample = "This Man Is Pretending To Piss For A Little Longer At The Urinal So He Doesn’t Have To Talk To One Of His Coworkers Washing His Hands"
|
25,005 | 9b6596215f897e3cfc51ad394e9997b8b6725722 | from sqlalchemy import Column, Integer, String, Sequence
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Conferences(Base):
__tablename__ = 'conferences'
conferenceid = Column (Integer, Sequence('conferences_conferenceid_seq'), primary_key=True)
meeting = Column (String)
location = Column (String)
def __repr__(self):
return "<Conference(meeting='%s', location='%s')>" % (self.meeting, self.location)
|
25,006 | 92fb5a3e3ab364f8926096426e0cb9b81e8c136b | import pandas as pd
import numpy as np
data=pd.read_csv(r'C:\Users\hdsr\Desktop\Internship\Machine Learning\Data Preprocessing\Churn\Churn_Modelling.csv')
X=data.iloc[0:5,1:5]
Y=data.iloc[0:5,-1]
print(X)
print(Y)
#-----------------One Hot Encoding using pandas-----------------
# print(X)
# X=pd.get_dummies(X,prefix_sep='_',drop_first=True)
# print(X)
#get_dummies() ====> Convert categorical variable into dummy/indicator variables.
Geeography=pd.get_dummies(X['Geography'], prefix = 'geography')
Surname=pd.get_dummies(X['Surname'], prefix = 'Surname')
data=pd.concat([Geeography,Surname],axis=1)
X=X.drop(['Geography','Surname'],axis=1)
X=pd.concat([X,data],axis=1)
print(X)
#-----------------One Hot Encoding using sklearn---------------
# from sklearn.preprocessing import LabelEncoder
# from sklearn.preprocessing import OneHotEncoder
# data=X.iloc[:,1]
# print(data)
# values=np.array(data)
# print(values)
# label_encoder=LabelEncoder()
# integer_encoder = label_encoder.fit_transform(values)
# print(integer_encoder)
# onehot_encoder=OneHotEncoder(sparse=False)
# integer_encoder= integer_encoder.reshape(len(integer_encoder),1)
# print(integer_encoder)
# one_encoder = onehot_encoder.fit_transform(integer_encoder)
# print(one_encoder)
|
25,007 | 142532840e495ebba484e10f96964fecd7261bbb | from youtube_transcript_api import YouTubeTranscriptApi
import re
import numpy as np
from textblob import TextBlob
import pandas as pd
# These are for the wordcloud
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
#%matplotlib inline
import plotly.offline as py
#py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
#Thesea re for the streamlit and working word cloud
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import streamlit as st
from PIL import Image
# Get Transcript from Youtube
def getTranscript(url):
ytID = (url[url.index('=')+1:])
sermonDict = YouTubeTranscriptApi.get_transcript(ytID)
sermonList = [d['text'] for d in sermonDict]
transcript = " ".join(sermonList)
return transcript
# Split sermon from Music
def get_sermon(text):
# create segment list
service_segments = []
# find all music instances
for m in re.finditer('\[Music\]', text):
service_segments.append(m.end())
# generate list of difference between segments between [Music]
diff_segments = [j-i for i, j in zip(service_segments[:-1], service_segments[1:])]
# create var that contains the longest segment between music and it will be understood as the sermon
sermon = text[(service_segments[diff_segments.index(max(diff_segments))]):
(service_segments[diff_segments.index(max(diff_segments))+1])]
# print sermon for troubleshooting
# print(sermon)
return sermon
# Full Sentiment
def get_full_sentiment(text):
return TextBlob(text).sentiment.polarity
# Split Lines by 100 words and get sentiment per
def split_lines_func(sermon_input):
# Create list from text of individual entities
newlist = [i for j in sermon_input.split() for i in (j, ' ')][:-1]
# create list of 100 list items per string
z = 0
sentences = []
while z <= len(newlist):
sentences.append(''.join(newlist[z:z + 100]))
z = z + 100
# create sentence_df
sentence_df = pd.DataFrame(sentences)
sentence_df = sentence_df.rename(columns={0: 'sentence'})
def detect_polarity(text):
return TextBlob(text).sentiment.polarity
sentence_df['polarity'] = sentence_df.sentence.apply(detect_polarity)
sentence_df.head()
return sentence_df
# Word Cloud
def cloud(text):
stopwords = set(STOPWORDS)
# Create and generate a word cloud image:
wordcloud = WordCloud(stopwords=stopwords, background_color='white').generate(text)
# Display the generated image:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
st.pyplot(plt)
def gauge(row):
fig = go.Figure(go.Indicator(
domain={'x': [0, 1], 'y': [0, 1]},
value=row.mean(),
mode="gauge+number",
title={'text': "Polarity"},
# delta = {'reference': 380},
gauge={'axis': {'range': [-1, 1]},
'steps': [
{'range': [-1, 1], 'color': "red"},
{'range': [-0.5, 0.5], 'color': "gray"}],
'threshold': {'line': {'color': "red", 'width': 4}, 'thickness': 0.75, 'value': 0}}))
return st.write(fig)
def line_chart(df,col):
plt.xticks(df[col], df.index.values) # location, labels
plt.plot(df[col])
st.pyplot(plt)
def main():
st.write("# Analyze Sermon Sentiment and WordCloud")
st.write("[Ethan Chandler](https://ethanc.dev)")
yt_link = st.text_area("Add full Youtube Link", value='https://www.youtube.com/watch?v=fHbmtZMkXQs&t=4585s')
trans = get_sermon(getTranscript(yt_link))
if yt_link is not None:
if st.button("Analyze"):
st.write(cloud(trans))
sermon_table = split_lines_func(trans)
gauge(sermon_table.polarity)
st.write("## Sermon Transcript")
st.write(trans)
st.sidebar.write("Sermon Video")
st.sidebar.video(yt_link)
#st.sidebar.write(line_chart(sermon_table, 'polarity'))
if __name__=="__main__":
main()
# x = getTranscript('https://www.youtube.com/watch?v=fHbmtZMkXQs&t=4583s')
#
# y = get_sermon(x)
#
# fullSentimentNum = get_full_sentiment(y)
#
# table = split_lines_func(y)
#
# print(table)
#plt.xticks( sentence_df['polarity'], sentence_df.index.values ) # location, labels
#plt.plot( sentence_df['polarity'] )
#plt.show()
|
25,008 | 3944c742faa7b69878b75cb0f4c99917dbe4755b | from typing import Dict, List
__author__ = ["Eleftheria Chatziargyriou <ele.hatzy@gmail.com>"]
__license__ = "MIT License. See LICENSE."
"""
Stemming present a significant challenge in ME, as it is exceptionally
difficult to account for the orthographical variations sometimes even
occurring within a single text. The affix algorithm attempts to account
for variations in spelling, but still Mostly relies on a relatively narrow
hard-coded list (Middle English Dictionary(MED) https://quod.lib.umich.edu/m/med/)
TODO: Improve on the affix stemmer by implementing an accurate spell checker
TODO: Implement a stochastic algorithm/Implement overarching stemmer class
"""
SUFFIXES = [
"rightes",
"eresse",
"kinnes",
"lechen",
"licher",
"linges",
"lokest",
"longes",
"wardes",
"atour",
"aunce",
"enger",
"estre",
"evous",
"iende",
"iinde",
"istre",
"ivous",
"lesse",
"liche",
"liece",
"liest",
"lyese",
"nesce",
"neshe",
"nissa",
"nisse",
"omlie",
"right",
"somes",
"trice",
"eren",
"erie",
"acle",
"ager",
"aten",
"atif",
"aunt",
"cund",
"elet",
"ende",
"erel",
"esse",
"fold",
"ible",
"ical",
"ieth",
"inde",
"ioun",
"ious",
"iple",
"laes",
"laus",
"leas",
"lech",
"lese",
"lice",
"ling",
"long",
"lous",
"lyas",
"ment",
"most",
"nece",
"rede",
"ship",
"soum",
"uous",
"ward",
"ade",
"age",
"ail",
"ain",
"air",
"and",
"ard",
"ari",
"dom",
"ede",
"els",
"eon",
"ere",
"est",
"eth",
"eur",
"ful",
"gat",
"hed",
"ial",
"ien",
"ier",
"ild",
"ing",
"ise",
"ish",
"ist",
"ith",
"kin",
"lac",
"les",
"leu",
"lez",
"læs",
"mel",
"mor",
"nes",
"nez",
"oir",
"orn",
"oun",
"our",
"ous",
"som",
"ure",
"wil",
"al",
"an",
"ar",
"at",
"ed",
"el",
"en",
"er",
"es",
"et",
"fi",
"if",
"ik",
"il",
"in",
"ir",
"it",
"li",
"ok",
"om",
"on",
"ot",
"re",
"se",
"te",
"th",
"ti",
"ur",
]
PREFIXES = [
"yester",
"yister",
"yistyr",
"yistyr",
"yuster",
"forth",
"yond",
"eth",
"toe",
"too",
"tou",
"tow",
"tuo",
"two",
"at",
"ef",
"et",
"ex",
"ta",
"te",
"th",
"to",
"tu",
]
# Used for attaching endings to suffixes, catches more orthographical variations (e.g 'ir', 'ire')
ENDS = ["", "s", "e", "en", "es"]
def stem(
word: str,
exception_list: Dict[str, str] = dict(),
strip_pref: bool = True,
strip_suf: bool = True,
) -> str:
"""
:param words: string list
The affix stemmer works by rule-based stripping. It can work on prefixes,
>>> stem('yesterday')
'day'
suffixes,
>>> stem('likingnes')
'liking'
or both
>>> stem('yisterdayes')
'day'
You can also define whether the stemmer will strip suffixes
>>> stem('yisterdayes', strip_suf = False)
'dayes'
or prefixes
>>> stem('yisterdayes', strip_pref = False)
'yisterday'
The stemmer also accepts a user-defined dictionary, that essentially serves
the function of a dictionary look-up stemmer
>>> stem('arisnesse', exception_list = {'arisnesse':'rise'})
'rise'
"""
if word in exception_list:
return exception_list[word]
if len(word) <= 4:
return word
if strip_pref:
for prefix in PREFIXES:
if word.startswith(prefix):
word = word[len(prefix) :]
break
if strip_suf:
for en in ENDS:
if len(word) <= 4:
break
# Strip suffixes
for suffix in SUFFIXES:
if len(suffix) <= len(en):
break
if (word + en).endswith(suffix):
word = word[: -len(suffix) + len(en)]
break
if len(word) <= 4:
break
return word
|
25,009 | a97594bde732dbe4ab2ba1be52612f5b5c373148 | '''
The model with both two branches.
"Deep learning of Human Visual Sensitivity in Image Quality Assessment Framework"
'''
import torch
import torch.nn as nn
torch.set_default_tensor_type('torch.FloatTensor')
class deepIQA_model(nn.Module):
def __init__(self):
super(deepIQA_model, self).__init__()
self.conv1_1 = nn.Conv2d(in_channels=1,
out_channels=32,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=True)
self.conv1_2 = nn.Conv2d(in_channels=1,
out_channels=32,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=True)
self.conv2_1 = nn.Conv2d(in_channels=32,
out_channels=32,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1),
bias=True)
self.conv2_2 = nn.Conv2d(in_channels=32,
out_channels=32,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1),
bias=True)
self.conv3 = nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=True)
self.conv4 = nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1),
bias=True)
self.conv5 = nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=True)
self.conv6 = nn.Conv2d(in_channels=64,
out_channels=1,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=True)
self.conv6.bias.data.fill_(1.)
self.relu = nn.ReLU(inplace=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.01)
self.globalpooling = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.ave_pooling = nn.AvgPool2d(kernel_size=(4, 4), stride=4)
self.fc1 = nn.Linear(1, 4, bias=True)
self.fc2 = nn.Linear(4, 1, bias=True)
def forward(self, *x):
img, error = x[0], x[1]
upper = self.leakyrelu(self.conv1_1(img))
upper = self.leakyrelu(self.conv2_1(upper))
err = error.unsqueeze(1)
lower = self.leakyrelu(self.conv1_2(err))
lower = self.leakyrelu(self.conv2_2(lower))
x = torch.cat((upper, lower), dim=1)
x = self.leakyrelu(self.conv3(x))
x = self.leakyrelu(self.conv4(x))
x = self.leakyrelu(self.conv5(x))
x = self.relu(self.conv6(x)).squeeze()
error = self.ave_pooling(error)
# print('error', error.shape, 'x', x.shape)
p = x*error
p = p[:, 4:-4, 4:-4]
# print('p', p.shape)
s = self.globalpooling(p)
s = self.leakyrelu(self.fc1(s))
s = self.relu(self.fc2(s))
# print('s', s.shape)
return s.squeeze(), x
if __name__ == '__main__':
import torch
from torch.autograd import Variable
import time
from scipy import misc
import numpy as np
torch.set_num_threads(1)
i = misc.ascent()
imh, imw = 384, 384
img = np.zeros((15, 1, imh, imw))
img[0, 0, :imh, :imw] = i[:imh, :imw]
print(img.shape)
test_image = Variable(torch.from_numpy(img))
test_image = test_image.type('torch.FloatTensor')
model = deepIQA_model()
# On GPU
model = model.cuda()
test_image = test_image.cuda()
# # Test BP
# loss = nn.MSELoss()
# pred = model(test_image)
# output = loss(pred, Variable(torch.cuda.FloatTensor(1,20,imh,imw)))
# output.backward()
time_all = []
for i in range(0, 100):
t0 = time.clock()
score, senMap = model(test_image, test_image[:, 0, :imh, :imw])
t1 = (time.clock() - t0) * 1000
fps = 1000 / t1
print(i)
print(score.shape)
print(score.type())
print(senMap.shape)
# print(importance.size())
print('Forward Time: {:.2f} ms'.format(t1))
print('Forward FPS: {:.2f} f/s'.format(fps))
print('')
time_all.append(t1)
print('Mean Time: {:.2f} ms'.format(torch.Tensor(time_all).median())) |
25,010 | 0934bb6a1bf22dcff8e618277b57cb32fac247de | import json
import csv
import pandas as pd
import numpy as np
files = ["/home/asdhruv2/scratch/mag_authors_" + str(i) + ".txt" for i in range(13)]
i = 0
with open('affiliations.csv', mode='w') as affiliations:
affiliations_writer = csv.writer(affiliations, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
affiliations_writer.writerow([':START_ID',':END_ID',':TYPE' ,'NAME', 'N_NAME', 'N_PUBS:int'])
print("Starting csv job;...")
failures = []
with open('affiliations.csv', mode='a') as affiliations:
affiliations_writer = csv.writer(affiliations, delimiter=',', quotechar='"')
for file in files:
with open(file, "r", encoding="utf-8") as f:
for line in f:
try:
obj = json.loads(line)
id_ = obj["id"] if "id" in obj else "None"
name_ = obj["name"] if "name" in obj else "None"
n_name_ = obj["normalized_name"]if "normalized_name" in obj else "none"
orgs_ = obj["orgs"] if "orgs" in obj else []
n_pubs_ = obj["n_pubs"] if "n_pubs" in obj else -1
if orgs_ != []:
org_list = [[id_,org,'AFFILIATED', name_, n_name_, n_pubs_] for org in orgs_]
for org in org_list:
affiliations_writer.writerow(org)
except:
i += 1
failures.append(line)
print("Failures: " , i)
print("sucess...")
with open("failures2.txt", "w") as file:
for line in failures:
file.write(line)
file.write("\n")
|
25,011 | 9eb95b6e44b7be9dc375e8cea37a5d5b8d0ae416 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.secret_key = "wassup"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///dojosninjas.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db) |
25,012 | 446e3ffb1681958fa99b61c325e024b7c107c16f | import json
import math
import requests
from .loggings import logger
from .tcpping import ping
IP_CHECKER_API = 'http://api.ipify.org/?format=json'
IP_CHECKER_API_SSL = 'https://api.ipify.org/?format=json'
__CURRENT_IP__ = None
GET_GEO_OF_IP_HEADERS = {
'authority': 'api.ip.sb',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?1',
'sec-ch-ua-platform': '"Android"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Mobile Safari/537.36',
}
def get_current_ip():
global __CURRENT_IP__
if __CURRENT_IP__:
# logger.debug('get_current_ip from cache')
return __CURRENT_IP__
else:
# logger.debug('fetch current_ip')
r = requests.get(IP_CHECKER_API)
j = json.loads(r.text)
__CURRENT_IP__ = j['ip']
return __CURRENT_IP__
class Validator(object):
def __init__(self, host: str, port: int, using_https: bool = False):
self._host = host
self._port = port
self._using_https = using_https
# default values
self._success_rate = 0.0
self._latency = float('inf')
self._anonymous = False
self._valid = False
self._meta = None
def validate_latency(self):
try:
(self._latency, self._success_rate) = ping(self._host, self._port)
except ConnectionRefusedError:
self._latency, self._success_rate = math.inf, 0.0
def validate_proxy(self):
protocol = 'https' if self._using_https else 'http'
proxy_str = '{}://{}:{}'.format(protocol, self._host, self._port)
checking_api = IP_CHECKER_API_SSL if self._using_https else IP_CHECKER_API
# First request for checking IP
try:
r = requests.get(checking_api, proxies={
'https': proxy_str, 'http': proxy_str}, verify=False, timeout=15)
if r.ok:
logger.debug(f'Checking api text {r.text}')
j = json.loads(r.text)
if j['ip'] != get_current_ip():
self._anonymous = True
self._valid = True
# A second request for meta info
try:
r2 = requests.get(
'https://api.ip.sb/geoip/{}'.format(j['ip']), timeout=15, headers=GET_GEO_OF_IP_HEADERS)
if r2.ok:
logger.debug(f'Get geo text {r2.text}')
jresponse = r2.json()
# Load meta data
# TODO: better location check
meta = {
'location': '{},{}'.format(jresponse['latitude'], jresponse['longitude']),
'organization': jresponse['organization'] if 'organization' in jresponse else None,
'region': jresponse['region'],
'country': jresponse['country_code'],
'city': jresponse['city'],
}
self._meta = meta
except Exception as e:
logger.debug(
f'Get GEO of ip addrss {r2.host} failed, error is {e}.')
return
except Exception as e:
logger.debug(f'Checking ip addrss {r.host} failed, error is {e}.')
return
def validate(self):
self.validate_latency()
self.validate_proxy()
@property
def latency(self):
return self._latency
@property
def success_rate(self):
return self._success_rate
@property
def valid(self):
return self._valid
@property
def anonymous(self):
return self._anonymous
@property
def meta(self):
return self._meta
@property
def using_https(self):
return self._using_https
|
25,013 | 6df79e23e39705c62e5b1dd66dca465cff8b2542 | from sqlalchemy import or_
from .models import Contact, Phone, Address, Date
def db_search(search_string):
search = f"%{search_string}%"
return (
Contact.query.outerjoin(Phone)
.outerjoin(Address)
.outerjoin(Date)
.filter(
or_(
Contact.fname.like(search),
Contact.lname.like(search),
Contact.mname.like(search),
Phone.phone_type.like(search),
Phone.area_code.like(search),
Phone.number.like(search),
Address.address_type.like(search),
Address.address.like(search),
Address.city.like(search),
Address.state.like(search),
Address.zip.like(search),
Date.date_type.like(search),
Date.date.like(search),
)
)
)
|
25,014 | 263e6fd8377c5e44805b00396f766bbd484f2aee | from magic_timer import MagicTimer
import time
def my_slow_function(t):
time.sleep(t)
def test_MagicTimer():
timer = MagicTimer()
my_slow_function(1.95)
assert str(timer) == "2.0 seconds", "timing error"
timer = MagicTimer()
my_slow_function(1.03)
assert str(timer) == "1.1 seconds", "timing error"
|
25,015 | 7a39691409b663f067d73235d1c61768c801aa8a | #!/usr/bin/env python3
from typing import List
class Solution:
def canVisitAllRooms(self, rooms: List[List[int]]) -> bool:
n = len(rooms)
visited = set()
visited.add(0)
queue = [0]
while len(queue) > 0:
curr = queue.pop(0)
for neighbor in rooms[curr]:
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)
return len(visited) == n
|
25,016 | 41442ba98f2ccbc9da5c71dded2eff6bac04bd03 | # Import standard Python Modules
import time
import sys
# Import RPi.GPIO Module
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script")
# Define Function "main", way to manage errors
def main():
# Setup GPIO setmode
GPIO.setmode(GPIO.BCM)#con los numeros de
# List with all GPIO pin numbers
pinList=[2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]
# Set GPIO pin signal OUT and initial value "shutdown"
GPIO.setup(pinList, GPIO.OUT, initial=GPIO.LOW)
# Set signal up to all pins one to one in order
for i in pinList:
GPIO.output(i, GPIO.HIGH)
time.sleep(0.5)
# Set signal down to all pins one to one in reversed order
for i in reversed(pinList):
GPIO.output(i, GPIO.LOW)
time.sleep(0.5)
GPIO.cleanup()
if __name__ == '__main__':
try:
main()
except:
print("{} line {}".format(sys.exc_info()[0], sys.exc_info()[-1].tb_lineno))
GPIO.cleanup()
|
25,017 | 9196dc78ab946654b0f3cce347be127a8c86e7b1 | #!/usr/bin/env python3
#
# Copyright 2020 IBM
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.IBM Confidential
#
import datetime as dt
import fastapi.encoders as encoders
import sqlalchemy.orm as orm
import app.crud.base as app_crud_base
import app.models as models
import app.schemas as schemas
class CRUDModel(app_crud_base.CRUDBase[models.Endpoint, schemas.EndpointCreate, schemas.EndpointUpdate]):
def create_with_model(
self, db: orm.Session, *, obj_in: schemas.EndpointCreate, model_id: app_crud_base.IdType
) -> models.Endpoint:
# noinspection PyArgumentList
db_obj = self.model(
**encoders.jsonable_encoder(obj_in),
deployed_at=dt.datetime.now(tz=dt.timezone.utc),
id=model_id
)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def create_with_model_and_binary(
self,
db: orm.Session,
*,
ec: schemas.EndpointCreate,
bc: schemas.BinaryMlModelCreate,
model_id: app_crud_base.IdType,
):
# noinspection PyArgumentList
endpoint_db_obj = self.model(
**encoders.jsonable_encoder(ec),
id=model_id,
deployed_at=dt.datetime.now(tz=dt.timezone.utc)
)
# noinspection PyArgumentList
binary_db_obj = models.BinaryMlModel(
**bc.dict(),
id=model_id
)
db.add(endpoint_db_obj)
db.add(binary_db_obj)
db.commit()
db.refresh(endpoint_db_obj)
return endpoint_db_obj
def update_binary(
self,
db: orm.Session,
*,
e: models.Endpoint,
bu: schemas.BinaryMlModelUpdate
):
endpoint_original = encoders.jsonable_encoder(e)
for field in endpoint_original:
if field == 'deployed_at':
setattr(e, field, dt.datetime.now(tz=dt.timezone.utc))
binary_in_db = db.query(models.BinaryMlModel).filter(models.BinaryMlModel.id == e.id).first()
assert binary_in_db is not None
update_data = bu.dict(exclude_unset=True)
for field in ('input_data_structure', 'output_data_structure', 'format', 'file'):
if field in update_data:
setattr(binary_in_db, field, update_data[field])
db.add(e)
db.add(binary_in_db)
db.commit()
db.refresh(e)
return e
endpoint = CRUDModel(models.Endpoint)
|
25,018 | e9e5c0c593b0e0e4d7e467e33128b4f4cf3aaa1f | from __future__ import print_function
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
import datetime
def ARMA_process(i,filename,p_q,test_error):
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y%m%d')
data = pd.read_csv(filename,parse_dates='Date', index_col='Date',date_parser=dateparse)
data = data.asfreq('D')
ts = data['play']
data.fillna(0,inplace=True)
data['play'] = data['play'].astype(float)
#print (type(data['play']['2015-06-01']))
#data.plot(figsize=(12,8));
#plt.show()
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(data.values.squeeze(), lags=90, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(data, lags=90, ax=ax2)
#plt.show()
#arma_mod20 = sm.tsa.ARMA(data, (3,0)).fit()
#print(arma_mod20.params)
print (p_q)
'''exception = [3,21,25,27,42]
if i in exception:
order = (p_q[0],0,p_q[1])
else:
order = (p_q[0],1,p_q[1])'''
order = (13,0)
arma_mod30 = sm.tsa.ARMA(data, order).fit()
#print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic)
#print(arma_mod30.params)
sm.stats.durbin_watson(arma_mod30.resid.values)
#fig = plt.figure(figsize=(12,8))
#ax = fig.add_subplot(111)
#ax = arma_mod30.resid.plot(ax=ax)
#plt.show()
resid = arma_mod30.resid
stats.normaltest(resid)
#fig = plt.figure(figsize=(12,8))
#ax = fig.add_subplot(111)
#fig = qqplot(resid, line='q', ax=ax, fit=True)
#fig = plt.figure(figsize=(12,8))
#ax1 = fig.add_subplot(211)
#fig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1)
#ax2 = fig.add_subplot(212)
#fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2)
r,q,p = sm.tsa.acf(resid.values.squeeze(), nlags=90,qstat=True)
data = np.c_[range(1,91), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
#plt.show()
predict_sunspots = arma_mod30.predict('2015-08-01','2015-10-30', dynamic=True)
print(predict_sunspots)
result = 0
for s in xrange(1,30):
if s < 10:
ele = '2015-08-0'+str(s)
else:
ele = '2015-08-'+str(s)
result += (ts[ele]-predict_sunspots[ele])
print (result * result)
test_error += result*result
fig, ax = plt.subplots(figsize=(12, 8))
#ax = data.ix['2015-03-02':].plot(ax=ax)
fig = arma_mod30.plot_predict('2015-08-01','2015-10-30', dynamic=True, ax=ax, plot_insample=False)
#plt.show()
return predict_sunspots['2015-09-01':'2015-10-30'],test_error
if __name__ == "__main__":
import glob
p_q = [(3,13),(3,10),(2,11),(2,4),(3,11),(3,3),(2,9),(5,13),(3,5),(2,4),(2,8),(3,5),(4,10),(3,9),(3,9),(3,2),(2,2),(3,7),(2,7),(2,2),(3,8),(2,4),(2,11),(6,13),(4,10),(3,10),(3,3),(11,3),(2,2),(2,10),(2,2),(3,5),(2,14),(5,10),(2,2),(4,13),(3,8),(4,5),(3,8),(3,3),(4,6),(1,1),(2,9),(2,8),(4,14),(3,3),(4,12),(4,6),(2,3),(3,8)]
test_error = 0
for i,filename in enumerate(glob.glob('../play_number/*.csv')):
print (i,filename)
predict_result,test_error = ARMA_process(i,filename,p_q[i],test_error)
name = '../test/'+filename.split('/')[-1].split('.')[0] + '_result.csv'
predict_result.to_csv(name)
print (test_error)
|
25,019 | 395aa7bbc2282739fea2d637579946ef6bbb285f | import os,sys
import pygame
import loader
from pygame.locals import *
class redMonkey(pygame.sprite.Sprite):
"""Ook ook ook. MAG/AGL"""
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.rmf1 = pygame.transform.scale(loader.load_image("redmonkeyfront1.png",-1),(32,32))
self.rmf2 = pygame.transform.scale(loader.load_image("redmonkeyfront2.png",-1),(32,32))
self.rmb1 = pygame.transform.scale(loader.load_image("redmonkeyback1.png",-1),(32,32))
self.rmb2 = pygame.transform.scale(loader.load_image("redmonkeyback2.png",-1),(32,32))
self.rml1 = pygame.transform.scale(loader.load_image("redmonkeyleft1.png",-1),(32,32))
self.rml2 = pygame.transform.scale(loader.load_image("redmonkeyleft2.png",-1),(32,32))
self.rmr1 = pygame.transform.scale(loader.load_image("redmonkeyright1.png",-1),(32,32))
self.rmr2 = pygame.transform.scale(loader.load_image("redmonkeyright2.png",-1),(32,32))
self.image = self.rmf1
self.rect = self.image.get_rect()
self.rect.topleft = 0*32,0*32
self.updateimage=0
self.collision=0
self.statlist=(10,1,1,5,5) #HP,ATK,DFC,AGL,MAG or... HADAM
def keyMove(self,direction,target):
if direction == "up" and self.rect.y>0*32:###########
for i in target:
if self.rect.move(0,-32).colliderect(i.rect):
self.collision=1
if self.collision!=1:
self.rect.y-=32
if not self.image == self.rmb1 or self.image == self.rmb2:
self.image = self.rmb1
elif direction =="down" and self.rect.y<32*9:##########
for i in target:
if self.rect.move(0,32).colliderect(i.rect):
self.collision=1
if self.collision!=1:
self.rect.y+=32
if not self.image == self.rmf1 or self.image == self.rmf2:
self.image = self.rmf1
elif direction == "left" and self.rect.x>0*32:##########
for i in target:
if self.rect.move(-32,0).colliderect(i.rect):
self.collision=1
if self.collision!=1:
self.rect.x-=32
if not self.image == self.rml1 or self.image == self.rml2:
self.image = self.rml1
elif direction == "right" and self.rect.x<9*32:###########
for i in target:
if self.rect.move(32,0).colliderect(i.rect):
self.collision=1
if self.collision!=1:
self.rect.x+=32
if not self.image == self.rmr1 or self.image == self.rmr2:
self.image = self.rmr1
self.collision=0
def update(self):
if self.updateimage>0:
self.updateimage+=1
elif self.image == self.rmf1 and self.updateimage==0:
self.image = self.rmf2
self.updateimage+=1
elif self.image == self.rmf2 and self.updateimage==0:
self.image = self.rmf1
self.updateimage+=1
elif self.image == self.rmb1 and self.updateimage==0:
self.image = self.rmb2
self.updateimage+=1
elif self.image == self.rmb2 and self.updateimage==0:
self.image = self.rmb1
self.updateimage+=1
elif self.image == self.rml1 and self.updateimage==0:
self.image = self.rml2
self.updateimage+=1
elif self.image == self.rml2 and self.updateimage==0:
self.image = self.rml1
self.updateimage+=1
elif self.image == self.rmr1 and self.updateimage==0:
self.image = self.rmr2
self.updateimage+=1
elif self.image == self.rmr2 and self.updateimage==0:
self.image = self.rmr1
self.updateimage+=1
if self.updateimage>=30:
self.updateimage=0
|
25,020 | 18b8b9f2db7d8c78224e9ee8f261d9470d9cd92c | from typing import List
import sys
def calculate_max_profit(prices: List[int]) -> int:
""" Calculate Max Profit
Args:
prices: time series of a stock price
Returns:
max_profit: maximum profit possible
"""
min_price = prices[0]
max_profit = prices[1] - prices[0]
for curr_price in prices[1:]:
if curr_price - min_price > max_profit:
max_profit = curr_price - min_price
if curr_price < min_price:
min_price = curr_price
return max_profit
def calculate_max_profit_with_short_sell(prices: List[int]) -> int:
""" Calculate Max Profit with Short Selling
Args:
prices: time series of a stock price
Returns:
max_profit: maximum profit possible with short selling
"""
min_price = prices[0]
max_price = prices[0]
for curr_price in prices[1:]:
min_price = min(min_price, curr_price)
max_price = max(max_price, curr_price)
return max_price - min_price
if __name__ == "__main__":
prices = [int(x) for x in sys.argv[1].split(',')]
print(f"Max profit: {calculate_max_profit(prices)}")
print(f"Max profit with short sell: {calculate_max_profit_with_short_sell(prices)}") |
25,021 | cc63faac39b10bae60c493303a3d91d672ce7e13 | class Rectangle:
def __init__(self, userinput):
if len(userinput) > 2:
x = userinput
self.width = abs(float(x[1]) - float(x[3]))
self.height = abs(float(x[0]) - float(x[2]))
else:
y = userinput
self.width = abs(float(y[0]))
self.height = abs(float(y[1]))
def calculatearea(self):
"""calculates the area of the rectangle"""
return self.width * self.height
def calculateperimeter(self):
"""calculates the perimeter of the rectangle"""
return (self.width * 2) + (self.height * 2)
def issquare(self):
"""returns True is rectangle is a square"""
if self.width == self.height:
return True
else:
return False
if __name__ == "__main__":
while True:
userinput = tuple(input("\tTop left and bottom right coordinates (Ex. 2.9,6,6.6,2 OR\n\t"
"Horizontal side and vertical side integers (Ex. 8.8,5.9): ").split(','))
myRectangle = Rectangle(userinput)
print("The area of your rectangle is " + str(myRectangle.calculatearea()))
print("The perimeter of your rectangle is " + str(myRectangle.calculateperimeter()))
print("Your rectangle is a square: " + str(myRectangle.issquare())) |
25,022 | d4bc29322964098fe7d120d49218002dd29dd8a7 | while True:
try:
c = []
c = (input().lower()).split()
t = 0
y = 0
for i, j in enumerate(c):
c[i] = j[0]
if c[i] == c[i-1] and y == 0:
y = 1
t += 1
elif c[i] != c[i-1]:
y = 0
print(t)
except EOFError:
break |
25,023 | d3596c53ae0fab1f9c458fd12c1566cbf47d9a65 | from csv import QUOTE_MINIMAL, reader, writer
def read_from_csv(last):
with open('contacts.csv', newline='') as f:
contacts = reader(f, delimiter=' ', quotechar='|')
for row in contacts:
if row[1] == last:
contact = {}
contact.update(
{"first": row[0], "last": row[1], "phone": row[2]})
return contact
def write_to_csv(contact):
with open('contacts.csv', 'a', newline='') as f:
contacts = writer(
f, delimiter=' ', quotechar='|', quoting=QUOTE_MINIMAL)
contacts.writerow([contact["first"],
contact["last"], contact["phone"]])
def create(first, last, phone):
contact = {}
contact.update({"first": first, "last": last, "phone": phone})
write_to_csv(contact)
def read(last):
contact = read_from_csv(last)
print(f'{contact["first"]} {contact["last"]} {contact["phone"]}.')
return contact
def update(last, phone):
# ! Not updating, adding new entry.
contact = read_from_csv(last)
contact["phone"] = phone
print(
f'{contact["first"]} {contact["last"]} phone updated to {contact["phone"]}.')
write_to_csv(contact)
def delete(last):
# ! Not working
contact = read_from_csv(last)
print(f'{contact["first"]} {contact["last"]} removed.')
contact.update({"first": '', "last": '', "phone": ''})
write_to_csv(contact)
while True:
try:
query = int(
input('1: Create\n2. Read\n3: Update\n4: Delete\n5: Quit\n\n'))
except ValueError:
print('Please enter a value from 1 to 5.')
continue
if query == 1:
first = input('What is the first name? ')
last = input('What is the last name? ')
phone = input('What is the phone number? ')
create(first, last, phone)
elif query == 2:
key = input('Look up by last name. ')
read(key)
elif query == 3:
key = input('Which entry would you like to update? ')
phone = input('What is the phone number? ')
update(key, phone)
elif query == 4:
key = input('Enter a last name to remove: ')
delete(key)
elif query == 5:
quit()
|
25,024 | d3037e3b6ee8931707d0c7cb5aa6fc7c606dfc49 | '''
Data loading and pre-processing for the IWSLT'16 EN-DE dataset.
'''
import re
from data.annotated import AnnotatedTextDataset
class IWSLTDataset(AnnotatedTextDataset):
''' Class that encapsulates the IWSLT dataset '''
NAME = 'iwslt'
LANGUAGE_PAIR = ('en', 'de')
# WORD_COUNT = (4215814, 4186988)
WORD_COUNT = (1.0360595565014956, 1)
URLS = [
('iwslt_en_de.tgz', 'https://wit3.fbk.eu/archive/2016-01/texts/en/de/en-de.tgz'),
('iwslt_test_en_de.tgz', 'https://wit3.fbk.eu/archive/2016-01-test/texts/en/de/en-de.tgz'),
('iwslt_test_de_en.tgz', 'https://wit3.fbk.eu/archive/2016-01-test/texts/de/en/de-en.tgz'),
]
RAW_SPLITS = {
'train': [
('en-de/train.tags.en-de.en', 'en-de/train.tags.en-de.de')
],
'dev': [
('en-de/IWSLT16.TED.tst2013.en-de.en.xml', 'en-de/IWSLT16.TED.tst2013.en-de.de.xml'),
],
'valid': [
('en-de/IWSLT16.TED.dev2010.en-de.en.xml', 'en-de/IWSLT16.TED.dev2010.en-de.de.xml'),
('en-de/IWSLT16.TED.tst2010.en-de.en.xml', 'en-de/IWSLT16.TED.tst2010.en-de.de.xml'),
('en-de/IWSLT16.TED.tst2011.en-de.en.xml', 'en-de/IWSLT16.TED.tst2011.en-de.de.xml'),
('en-de/IWSLT16.TED.tst2012.en-de.en.xml', 'en-de/IWSLT16.TED.tst2012.en-de.de.xml'),
('en-de/IWSLT16.TED.tst2013.en-de.en.xml', 'en-de/IWSLT16.TED.tst2013.en-de.de.xml'),
('en-de/IWSLT16.TED.tst2014.en-de.en.xml', 'en-de/IWSLT16.TED.tst2014.en-de.de.xml'),
],
'test': [
('en-de/IWSLT16.QED.tst2016.en-de.en.xml', 'de-en/IWSLT16.QED.tst2016.de-en.de.xml'),
('en-de/IWSLT16.TED.tst2015.en-de.en.xml', 'de-en/IWSLT16.TED.tst2015.de-en.de.xml'),
('en-de/IWSLT16.TED.tst2016.en-de.en.xml', 'de-en/IWSLT16.TED.tst2016.de-en.de.xml'),
]
}
SPLITS = {
'train': 'train.tok',
'valid': 'valid.tok',
'dev': 'dev.tok',
'test': 'test.tok'
}
IGNORE_REGEX_LIST = [
re.compile(fr'<\s*{tag}\s*[^>]*\s*>[^<]*<\s*/{tag}\s*>')
for tag in
(
'url', 'keywords', 'speaker', 'talkid',
'title', 'description', 'reviewer', 'translator'
)
]
class IWSLTEnJpDataset(AnnotatedTextDataset):
NAME = "iwslt"
LANGUAGE_PAIR = ('en', 'ja')
WORD_COUNT = (5114050, 3576290)
URLS = [
('iwslt_en_ja.tgz', 'https://wit3.fbk.eu/archive/2017-01-trnted/texts/en/ja/en-ja.tgz'),
('iwslt_en_ja_test.tgz',
'https://wit3.fbk.eu/archive/2017-01-ted-test/texts/en/ja/en-ja.tgz'),
('iwslt_en_ja_test_ja_en.tgz', 'https://wit3.fbk.eu/archive/2017-01-ted-test/texts/ja/en/ja-en.tgz')
]
RAW_SPLITS = {
'train': [
('en-ja/train.tags.en-ja.en', 'en-ja/train.tags.en-ja.ja')
],
'dev': [
('en-ja/IWSLT17.TED.dev2010.en-ja.en.xml', 'en-ja/IWSLT17.TED.dev2010.en-ja.ja.xml'),
('en-ja/IWSLT17.TED.tst2010.en-ja.en.xml', 'en-ja/IWSLT17.TED.tst2010.en-ja.ja.xml'),
('en-ja/IWSLT17.TED.tst2011.en-ja.en.xml', 'en-ja/IWSLT17.TED.tst2011.en-ja.ja.xml'),
('en-ja/IWSLT17.TED.tst2012.en-ja.en.xml', 'en-ja/IWSLT17.TED.tst2012.en-ja.ja.xml'),
('en-ja/IWSLT17.TED.tst2013.en-ja.en.xml', 'en-ja/IWSLT17.TED.tst2013.en-ja.ja.xml'),
('en-ja/IWSLT17.TED.tst2014.en-ja.en.xml', 'en-ja/IWSLT17.TED.tst2014.en-ja.ja.xml'),
('en-ja/IWSLT17.TED.tst2015.en-ja.en.xml', 'en-ja/IWSLT17.TED.tst2015.en-ja.ja.xml')
],
'test': [
('en-ja/IWSLT17.TED.tst2016.en-ja.en.xml', 'ja-en/IWSLT17.TED.tst2016.ja-en.ja.xml'),
('en-ja/IWSLT17.TED.tst2017.en-ja.en.xml', 'ja-en/IWSLT17.TED.tst2017.ja-en.ja.xml')
]
}
SPLITS = {
'train': 'train.tok',
'dev': 'dev.tok',
'test': 'test.tok'
} |
25,025 | e94ca0cfac2358718dee447376c49a282fee8c9e | import datetime
import sys
import logging
def get_current_date():
"""
:return: DateTime object
"""
return datetime.datetime
def get_current_platform():
"""
:return: current platform
"""
return sys.platform
def filtr_number(filtr):
numbers=range(0,101)
if filtr=="True":
msg = "Парні елементи: "
elif filtr=="False":
msg = "Непарні елементи: "
for num in numbers:
if (filtr == "True") & (num%2 == 0):
msg += str(num) + " "
elif (filtr == "False") & (num%2 != 0):
msg += str(num) + " "
return msg
def view_array():
x=[5,9,6,3]
print("Масив X[]:", x)
index = int(input("Введіть номер елемента масиву який хочете вивести: "))
try:
print(f"X[{index}] = {x[index]}")
except IndexError:
logging.error("Ви ввели число за межами проміжку 0-3")
else:
logging.info("Ви ввели коректні дані")
|
25,026 | 1d8347bfdd8763efeb000599b95417fd983dbda5 | def quantos_uns(n):
n=str(n)
c=0
q=0
for i in n:
if i=='1':
q+=1
return(q) |
25,027 | 7e5349719bbc5e63f9ee8fa26a516e243db7d657 | import os
import app
BASE_DIR = os.path.dirname(app.__file__)
class BaseConfig:
JSON_AS_ASCII = False
REDIS_QUEUES = ["default"]
pass
class DevConfig(BaseConfig):
DEBUG = True
REDIS_URL = 'redis://localhost:6379/0'
pass
class ProdConfig(BaseConfig):
DEBUG = False
REDIS_URL = 'redis://redis:6379/0'
pass
configs = {
'dev': DevConfig,
'prod': ProdConfig,
'default': ProdConfig
}
|
25,028 | dc2421e64dbce1769269d888dd8b9c290f996320 | import re
file = open("./cards/card.txt", 'r')
data = file.read()
file.close()
data = data.split("\n\n")
for i in range(len(data)):
data[i] = data[i].split('\n')
pattern1 = "[a-zA-Z' ]+"
prog1 = re.compile(pattern1)
pattern2 = "^[0-9]* "
prog2 = re.compile(pattern2)
pattern3 = "^-?[0-9]*"
prog3 = re.compile(pattern3)
cards = []
file = open("./cards/result.txt", 'a')
file.write("const data = '[")
maxdesc = 0
maxname = 0
for i in range(len(data)):
data[i].insert(1, data[i].pop())
res1 = prog1.search(data[i][0])
if res1 is not None:
data[i].insert(1, res1[0].strip())
res2 = prog2.match(data[i][0])
if res2 is not None:
data[i][0] = int(res2[0])
if data[i][2][:6] == "Value:":
data[i][2] = data[i][2][7:]
res3 = prog3.match(data[i][2])
if res3 is not None:
data[i][2] = int(res3[0])
if len(data[i]) > 4:
for j in range(4,len(data[i])):
data[i][3] = data[i][3] + data[i][j]
data[i] = data[i][:4]
string ='{"id":'+str(data[i][0])+', "name": "'+str(data[i][1])+'", "value": '+str(data[i][2])+', "descr": "'+str(data[i][3])+'"},'
file.write(string)
if len(data[i][3]) > maxdesc:
maxdesc = len(data[i][3])
if len(data[i][1]) > maxname:
maxname = len(data[i][1])
file.write(']')
file.write("]'\n\nmodule.exports = data;")
file.close()
print("desc: {} name: {}".format(maxdesc,maxname)) |
25,029 | c0745691f7f8ed67a89bb8a04ba321faeba7591f | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from frappe.model.utils.rename_field import update_users_report_view_settings
from erpbee.patches.v4_0.fields_to_be_renamed import rename_map
def execute():
for dt, field_list in rename_map.items():
for field in field_list:
update_users_report_view_settings(dt, field[0], field[1])
|
25,030 | 779627e2719aca8710f10d5144ec0a8dff5b57e6 | # -*- coding: utf-8 -*-
# @Author: ty
# @File name: user.py
# @IDE: PyCharm
# @Create time: 1/25/21 5:05 PM
# @Description:
from flask_login import current_user
from app.models.order.order_status import OrderStatus
def get_user_info(user):
"""
:param user:
:return:
"""
coin_wallet = user.coin_wallet
coins = coin_wallet.balance
cash = coin_wallet.cash
status = OrderStatus.by_user(user_id=user.id)
info = dict(
id=str(user.id),
name=user.name,
avatar_url=user.avatar_url,
avatar_thumb=user.avatar_thumb,
coins=coins,
cash=cash,
consumable_coupons=[c.to_json() for c in user.wallet.consumable_coupons if not c.is_espired],
num_followers=user.num_followers,
num_followings=user.num_followings,
total=status.received,
num_orders=status.num_orders,
num_waiting=status.num_waiting,
num_unpaid=status.num_unpaid,
num_favors=user.num_favors,
)
return info
def user_json(user):
"""
:param user:
:return:
"""
return dict(
id=str(user.id),
name=user.name,
avatar_url=user.avatar_url,
avatar_thumb=user.avatar_thumb,
num_followers=user.num_followers,
num_followings=user.num_followings,
is_following=current_user.is_following(user) if current_user and current_user.is_authenticated else False,
)
|
25,031 | 1eb01ee2033927b20ec8aa839345e6801e1c5a96 | #!/usr/bin/env python3
import base64
import os
import subprocess
import sys
import yaml
EDITOR = os.environ.get('EDITOR', 'vi')
class NoDatesSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
"""
Remove implicit resolvers for a particular tag
Takes care not to modify resolvers in super classes.
We want to load datetimes as strings, not dates, because we
go on to serialise as json which doesn't have the advanced types
of yaml, and leads to incompatibilities down the track.
"""
if 'yaml_implicit_resolvers' not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [
(tag, regexp)
for tag, regexp in mappings
if tag != tag_to_remove
]
def repr_str(dumper, data):
if '\n' in data:
return dumper.represent_scalar(
u'tag:yaml.org,2002:str', data, style='|')
return dumper.orig_represent_str(data)
def decode(secret):
if 'data' in secret:
secret['data'] = {
k: base64.b64decode(v).decode('utf8')
for k, v in secret['data'].items()
}
return secret
def encode(secret):
if 'data' in secret:
secret['data'] = {
k: base64.b64encode(v.encode())
for k, v in secret['data'].items()
}
return secret
def edit(fname):
with open(fname, 'r') as fid:
secret = yaml.load(fid, Loader=NoDatesSafeLoader)
decoded = decode(secret)
with open(fname, 'w') as fid:
fid.write(yaml.safe_dump(decoded, default_flow_style=False))
subprocess.check_call(EDITOR.split() + [fname])
with open(fname, 'r') as fid:
edited = yaml.load(fid, Loader=NoDatesSafeLoader)
encoded = encode(edited)
with open(fname, 'w') as fid:
fid.write(yaml.safe_dump(encoded, default_flow_style=False))
def main():
NoDatesSafeLoader.remove_implicit_resolver('tag:yaml.org,2002:timestamp')
yaml.SafeDumper.orig_represent_str = yaml.SafeDumper.represent_str
yaml.add_representer(str, repr_str, Dumper=yaml.SafeDumper)
fname = sys.argv[1]
edit(fname)
if __name__ == '__main__':
main()
|
25,032 | c895b24fa105ba2f4d17956bf5097f0049d58d27 |
"""
Insert an addintional output in restart file
Last update 2016-03-29 leva@astro.princetone.edu
"""
import numpy as np
import struct
import math
import sys
import getopt
import os
import time
from multiprocessing import Pool
n = 1 # number of processors (equal to the number of folders)
for i in range(n):
filename = "".join(["heat-id",str(i),".0002.rst"]) # name of the restart file. change 0000 to apropriate number
if i == 0:
filename = "".join(["heat.0002.rst"]) # name of the restart file. change 0000 to apropriate number
# read the contents of the restart file
f = open(filename, "r")
contents = f.readlines()
f.close()
# 9th element in contents contains maxout. need to change it
contents[8] = 'maxout = 8 \n'
# insert information about the new output (start from line number 88)
start = 89
contents.insert(start , '<output8> \n')
contents.insert(start+ 1, 'out_fmt = heat \n')
contents.insert(start+ 2, 'dt = 1.0 \n')
contents.insert(start+ 3, 'time = 2.401000000000000e+03 # Next Output Time\n')
contents.insert(start+ 4, 'num = 0 # Next Output Number\n')
contents.insert(start+ 5, 'level = -1 # Default Value\n')
contents.insert(start+ 6, 'domain = -1 # Default Value\n')
contents.insert(start+ 7, 'id = out8 # Default Value\n')
contents.insert(start+ 8, 'out = all # Default Value\n')
contents.insert(start+ 9, 'pargrid = 0 # Default Value\n')
contents.insert(start+10, '\n')
# combine and rewrite the restart file
f = open(filename, "w")
contents = "".join(contents)
f.write(contents)
f.close()
print i
|
25,033 | fa2aaa90c806cfe2c180cd783d71893804430c59 | import numpy as np
import pandas as pd
from problem2 import *
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 3: (Moneyball) Data Preprocessing in Baseball Dataset (24 points)
In this problem, you will practise data preprocessing with baseball dataset
A list of all variables being used in this problem is provided at the end of this file.
'''
#----------------------------------------------------
'''
(Loading Data) Let's start with the raw data, 'moneyball_batting.csv'. Let's load this CSV file into a pandas dataframe (X)..
---- Inputs: --------
* filename: the file name of a CSV file, a string.
---- Outputs: --------
* X: a dataframe containing the batting data of all players in all years, loaded from moneyball_batting.csv.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def load_batting(filename='moneyball_batting.csv'):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return X
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_load_batting
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Filtering by Year) The dataset contains records of all years. In this study, suppose we just want to choose players for the year 2002, based upon data of year 2001. We need to first search the data records of year 2001 only..
---- Inputs: --------
* X: a dataframe containing the batting data of all players in all years, loaded from moneyball_batting.csv.
* year: an integer scalar, the year of the data to be used.
---- Outputs: --------
* X1: a dataframe containing the batting data only in the searched year (2001).
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def filter_batting(X, year):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return X1
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_filter_batting
---------------------------------------------------
'''
#----------------------------------------------------
'''
If you have passed the previous test case, the result data frame should have been saved into a file, called 'moneyball_X1.csv'. This 2001 dataset contains multiple records for each player: a same player may have two/three records, because the player has changed team in year 2001. For example, playerID='guilljo01' (or 'houstty01') has two rows. We need to sum the game statistics of the same player together, so that each player only contains one row in the data frame. For example if the same player with ID 'player1' has three rows of records, 'player2' has two rows of records:
player ID | H | AB
---------------------
player 1 | 5 | 10
player 1 | 3 | 20
player 1 | 1 | 30
player 2 | 1 | 40
player 2 | 2 | 50
player 3 | 1 | 60
---------------------
we should sum the data for each player into one row:
player ID | H | AB
-----------------------------------------
player 1 | 9=(5+3+1) | 60 = (10+20+30)
player 2 | 3=(1+2) | 90 = (40+50)
player 3 | 1 | 60
-----------------------------------------
(Group by playerID) Given a data frame of batting statistics (X1), group the data records with respect to playerID, so that the game statistics are added together for each player. For example, player 'houstty01' has two rows, where the number of hits (column H) has values: 58, 4 We want to combine these two rows into one row, such that all the game statistics are the sum of the raw values (for example, number hits now should be 58+4 = 62) .
---- Inputs: --------
* X1: a dataframe containing the batting data only in the searched year (2001).
---- Outputs: --------
* X2: a dataframe containing the batting data in the year (2001) after grouping the statistics for players.
---- Hints: --------
* You could use some function implemented in problem2.py to solve this problem.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def group_batting(X1):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return X2
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_group_batting
---------------------------------------------------
'''
#----------------------------------------------------
'''
If you have passed the previous test case, the result data frame should have been saved into a file, called 'moneyball_X2.csv. Now the dataset only contains game statistics, but no information about the players is available, like first name, last name, weight, height, etc. We have another CSV file 'moneyball_player.csv', which contains the player information, such as first name, weight, height, etc. It would be better if we can combine these two datasets into one data frame, so the new data frame contains both game statistics and player information.
(Merge the two dataframes) Given a data frame (X2) of batting statistics , and a data frame (Y) of player information (loaded from 'moneyball_player.csv'), Combine the two data frames into one, according to the playerID column. .
---- Inputs: --------
* X2: a dataframe containing the batting data in the year (2001) after grouping the statistics for players.
* Y: a dataframe containing the player information, such as first name, weight, height, which is loaded from moneyball_player.csv.
---- Outputs: --------
* X3: a dataframe containing both batting data and player information in the year (2001).
---- Hints: --------
* You could use some function implemented in problem2.py to solve this problem.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def merge_player(X2, Y):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return X3
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_merge_player
---------------------------------------------------
'''
#----------------------------------------------------
'''
If you have passed the previous test case, the result data frame should have been saved into a file, called 'moneyball_X3.csv'. Now the dataset contains both game statistics and player information. However, we still need to know the salary of each player in year 2002, which represents the market price of each player in 2002, in order to hire the player into our team. We have another CSV file 'moneyball_salary.csv', which contains the player's salary information in all years. We first need to find the players' salaries only in year 2002, then we want to merge the salary information into the dataset.
(Filter salary for year 2002) Given the dataframe (Z) containing players' salary data of all years, filter the dataframe with year 2002, return the salary data only in year 2002.
---- Inputs: --------
* Z: a dataframe containing the salary data of all players in all years.
* year: an integer scalar, the year of the data to be used.
---- Outputs: --------
* Z1: a dataframe containing the salary data only in the searched year (2002).
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def filter_salary(Z, year):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return Z1
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_filter_salary
---------------------------------------------------
'''
#----------------------------------------------------
'''
If you have passed the previous test case, the result data frame should have been saved into a file, called 'moneyball_Z1.csv'. Now let's merge the salary information into the dataset.
(Join the batting data with salary data) Given a data frame X3 (containing both batting statistics and player information, loaded from 'moneyball_X3.csv'), and a dataframe (Z1) of salary information (loaded from 'moneyball_Z1.csv'), combine the two data frames into one, according to the 'playerID' column.
---- Inputs: --------
* X3: a dataframe containing both batting data and player information in the year (2001).
* Z1: a dataframe containing the salary data only in the searched year (2002).
---- Outputs: --------
* X4: a dataframe containing all the required data (batting data, player information and salary data) for player evaluation in the year (2001).
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def merge_salary(X3, Z1):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return X4
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_merge_salary
---------------------------------------------------
'''
#----------------------------------------------------
'''
If you have passed the previous test case, the result data frame should have been saved into a file, called 'moneyball_X4.csv'. This file contains all the information we need for player evaluation.
(Filter At-Bats) Given a dataframe (X4) of all the players on the market in year 2002, find the candidate players who have sufficient experience: the players with minimum number of At-Bats(AB). Any player who has smaller number of AB than min_AB in X4 should be excluded. The remaining players are candidate players (in the dataframe X5), who have sufficient previous experience (AB >= min_AB) .
---- Inputs: --------
* X4: a dataframe containing all the required data (batting data, player information and salary data) for player evaluation in the year (2001).
* min_AB: an integer scalar, the threshold on AB (at-Bat). To find good players, we should exclude those without sufficient experience. The players with AB less than the min_AB should be excluded from the ranked list..
---- Outputs: --------
* X5: a dataframe containing all the candidate players for evaluation in the year (2001), who have sufficient experience (at least min_AB).
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def filter_min_AB(X4, min_AB):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return X5
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_filter_min_AB
---------------------------------------------------
'''
#----------------------------------------------------
'''
If you have passed the previous test case, the result data frame should have been saved into a file, called 'moneyball_X5.csv'. Now let's remove the players who are too expensive.
(Find Affordable Players) Given a dataframe (X5) of all the players with sufficient experience, find the candidate players who are affordable: the players with salary no higher than max_salary. Any player who has higher salary than max_salary in X5 should be excluded. The remaining players are candidate players (in the dataframe X6), who have both sufficient experience (AB >= min_AB) and are affordable (salary < max_salary).
---- Inputs: --------
* X5: a dataframe containing all the candidate players for evaluation in the year (2001), who have sufficient experience (at least min_AB).
* max_salary: an integer scalar, the maximum salary that we can afford for a player. To find affordable players, we should exclude those too expensive player. The players with higher salaries than max_salary should be excluded from the ranked list..
---- Outputs: --------
* X6: a dataframe containing all the candidate players for evaluation in the year (2001), who have sufficient experience (at least min_AB) and affordable price tag (at most max_salary).
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def filter_max_salary(X5, max_salary):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return X6
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_filter_max_salary
---------------------------------------------------
'''
#--------------------------------------------
'''
TEST problem 3:
Now you can test the correctness of all the above functions by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py
---------------------------------------------------
If your code passed all the tests, you will see the following message in the terminal:
----------- Problem 3 (24 points in total)--------------------- ... ok
* (3 points) load_batting ... ok
* (3 points) filter_batting ... ok
* (3 points) group_batting ... ok
* (3 points) merge_player ... ok
* (3 points) filter_salary ... ok
* (3 points) merge_salary ... ok
* (3 points) filter_min_AB ... ok
* (3 points) filter_max_salary ... ok
----------------------------------------------------------------------
Ran 8 tests in 0.006s
OK
'''
#--------------------------------------------
#--------------------------------------------
'''
List of All Variables
* filename: the file name of a CSV file, a string.
* year: an integer scalar, the year of the data to be used.
* X: a dataframe containing the batting data of all players in all years, loaded from moneyball_batting.csv.
* X1: a dataframe containing the batting data only in the searched year (2001).
* X2: a dataframe containing the batting data in the year (2001) after grouping the statistics for players.
* X3: a dataframe containing both batting data and player information in the year (2001).
* X4: a dataframe containing all the required data (batting data, player information and salary data) for player evaluation in the year (2001).
* X5: a dataframe containing all the candidate players for evaluation in the year (2001), who have sufficient experience (at least min_AB).
* X6: a dataframe containing all the candidate players for evaluation in the year (2001), who have sufficient experience (at least min_AB) and affordable price tag (at most max_salary).
* Y: a dataframe containing the player information, such as first name, weight, height, which is loaded from moneyball_player.csv.
* Z: a dataframe containing the salary data of all players in all years.
* Z1: a dataframe containing the salary data only in the searched year (2002).
* min_AB: an integer scalar, the threshold on AB (at-Bat). To find good players, we should exclude those without sufficient experience. The players with AB less than the min_AB should be excluded from the ranked list..
* max_salary: an integer scalar, the maximum salary that we can afford for a player. To find affordable players, we should exclude those too expensive player. The players with higher salaries than max_salary should be excluded from the ranked list..
'''
#-------------------------------------------- |
25,034 | 1b787399c81e5366b53b11d10ce3d7363f494add | from practice.generator import create_string, create_big_string
def test_string_generator():
assert create_string() == "strung"
def test_big_string_generator():
assert create_big_string() == "this is a pretty big string"
|
25,035 | 067608c34a6e3b1786b944c7e945eef99fb3a32d | # -*- coding: utf-8 -*-
ROUTEMAP_TILE_BASEURL = 'http://localhost:4567/tiles'
|
25,036 | a6f8b59eba9cdc10335f0b4b4aeeba8d325d1f04 | f = open("my info.py",'r')
#print(f) ##If we use this we'll get other output
#print(f.read()) #If we use that code it'll print all the information from "my info"
print(f.readline(),end = "") #But it'll print one by one line from "my info"
##Here if we use 'end =""' then it'll create new line without any space..
print(f.readline(),end="")
print(f.readline(),end="")
print(f.readline(5),end="") ##Here if we use any number then it'll print that line print only 5 word
|
25,037 | 60078012ea6cb43bd7a694da61cbac5c9adfde8f | class Movie(object):
def __init__(self):
self.title |
25,038 | d58871bdf71ed1cc417ba2fe0d46ff120245f56b | #!/usr/bin/env python
import numpy
from common_util import print_c_array
def gen():
n = 5
m = 3
seed = 42
numpy.random.seed(seed)
a = numpy.random.rand(n).astype(numpy.float32)
v = numpy.random.rand(m).astype(numpy.float32)
c = numpy.convolve(a, v, mode='valid')
assert len(c) == (n - m + 1)
print('const float g_a[] = {' + print_c_array(a) + '};')
print('const float g_v[] = {' + print_c_array(v) + '};')
print('const float g_reference[] = {' + print_c_array(c) + '};')
print('const size_t k_n = {};'.format(n))
print('const size_t k_m = {};'.format(m))
print('const size_t k_outnum = {};'.format(len(c)))
gen()
|
25,039 | bba6b570e21d39e0db117fb5a8c8572d2da1d46a | class Algorithm(object):
def __init__(self, simulation):
self.simulation = simulation
def getLastPrice(self):
return self.simulation.lastPrice
def getLastPrices(self, n = 10, step = 1):
res = []
for i in range(0, len(self.simulation.lastPrices), step):
if len(res) < n:
res.append(self.simulation.lastPrices[::-1][i])
return res
def getWalletBalance(self):
return self.simulation.wallet.balance
def getTotalBalance(self):
return self.simulation.totalBalance
## Balance for Base Currency
def getAvailableBalance(self):
return self.simulation.availableBalance
def getLockedBalance(self):
return self.simulation.lockedBalance
def hasEnoughBalance(self, price, quantity):
return self.getAvailableBalance() >= price*quantity
## Balance for Crypto Currency
def getAvailableCryptoBalance(self):
return self.simulation.availableCryptoBalance
def getLockedCryptoBalance(self):
return self.simulation.lockedCryptoBalance
def hasEnoughCryptoBalance(self, quantity):
return self.getAvailableCryptoBalance() >= quantity
def getOrderBook(self):
return self.simulation.orderBook
def placeBuyOrder(self, quantity, unitPrice):
return self.simulation.placeBuyOrder(quantity, unitPrice)
def placeSellOrder(self, quantity, unitPrice):
return self.simulation.placeSellOrder(quantity, unitPrice)
def getOpenOrders(self):
return list(filter( lambda x: x.isOpen(), self.getOrderBook))[::-1]
def getOpenBuyOrders(self):
return self._getOpenOrdersDefault("buy")
def getOpenSellOrders(self):
return self._getOpenOrdersDefault("sell")
def getClosedOrders(self):
return list(filter( lambda x: x.isClosed(), self.getOrderBook))[::-1]
def think(self):
print("Seeing last price")
print(self.getLastPrice())
def _getOpenOrdersDefault(self, action):
return list(filter( lambda x: x.getAction() == action, self.getOpenOrders))
|
25,040 | a1348435dbd9269901196c28d3070c763e6887fa | from model import *
import numpy as np
import tensorflow as tf
import argparse
import matplotlib.pyplot as plt
from imageio import imread, imsave
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
parser = argparse.ArgumentParser()
parser.add_argument('--channel_num', dest='channel_num', type=int)
parser.add_argument('--lr', dest='lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--epoch_num', dest='epoch_num', type=int, default=100, help='# of epoch')
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--phase', dest='phase', default='train', help='train or test')
parser.add_argument('--percent', dest='percent', type=float)
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--summary_dir', dest='summary_dir', default='./summary', help='summary are saved here')
parser.add_argument('--input', dest='input', type=str)
parser.add_argument('--layer_num', dest='layer_num', type=int, default=10)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=32)
parser.add_argument('--model_type', dest='model_type', default='ircnn', help='model type')
args = parser.parse_args()
def load_percent(image):
pass
def load_data(channel):
X = []
if channel == 3:
import pickle
with open(r'./data/3-channel/data_batch_1', 'rb') as f:
dict = pickle.load(f, encoding='iso-8859-1')
data = dict['data']
X = data.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(np.float32)
X = X[:2000]
if channel == 1:
import pickle
with open(r'./data/3-channel/data_batch_1', 'rb') as f:
dict = pickle.load(f, encoding='iso-8859-1')
data = dict['data']
T = data.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(np.float32)
T = T[:2000]
T = np.dot(T[..., :3], [0.299, 0.587, 0.144]).astype(np.float32)
for i in range(0, 2000):
image = T[i]
image = image.reshape([image.shape[0], image.shape[1], 1])
X.append(image)
X = np.array(X)
return X
def main(_):
if args.use_gpu:
tf.device('/gpu:0')
with tf.Session() as sess:
model = denoiser(sess, args)
if args.phase == 'train':
model.train(load_data(args.channel_num))
elif args.phase == 'test':
image = np.array(imread("./data/test/" + args.input + ".png"))
image = image.reshape([1, image.shape[0], image.shape[1], args.channel_num])
model.test(image)
else:
exit(0)
if __name__ == '__main__':
tf.app.run()
|
25,041 | 2d6aacb537fc8285ccbce92ee22abb223fb1a41b | from ingredientfactory import *
from pizza import *
class PizzaStore:
def __init__(self):
pass
def _create_pizza(self, pizza_type):
raise NotImplementedError
def order_pizza(self, pizza_type):
pizza = self._create_pizza(pizza_type)
pizza.prepare()
pizza.bake()
pizza.cut()
pizza.box()
return pizza
class NYPizzaStore(PizzaStore):
def __init__(self):
PizzaStore.__init__(self)
self._ingredient_factory = NYPizzaIngredientFactory()
def _create_pizza(self, pizza_type):
if pizza_type == 'cheese':
return NYStyleCheesePizza(self._ingredient_factory)
elif pizza_type == 'veggies':
return NYStyleVeggiePizza(self._ingredient_factory)
elif pizza_type == 'clam':
return NYStyleClamPizza(self._ingredient_factory)
elif pizza_type == 'pepperoni':
return NYStylePepperoniPizza(self._ingredient_factory)
else:
return None
class ChicagoPizzaStore(PizzaStore):
def __init__(self):
PizzaStore.__init__(self)
self._ingredient_factory = ChicagoPizzaIngredientFactory()
def _create_pizza(self, pizza_type):
if pizza_type == 'cheese':
return ChicagoStyleCheesePizza(self._ingredient_factory)
elif pizza_type == 'veggies':
return ChicagoStyleVeggiePizza(self._ingredient_factory)
elif pizza_type == 'clam':
return ChicagoStyleClamPizza(self._ingredient_factory)
elif pizza_type == 'pepperoni':
return ChicagoStylePepperoniPizza(self._ingredient_factory)
else:
return None
|
25,042 | 26cb9cdd3a7258d48057dcec4645f1b50c323c5c | #!/usr/bin/env python
"""
Copyright 2014 ExaVault, Inc.
NOTE: This file was generated automatically. Do not modify by hand.
"""
class PreviewFile:
def __init__(self):
self.swaggerTypes = {
'image': 'str',
'size': 'long',
'imageId': 'str'
}
self.image = None # str
self.size = None # long
self.imageId = None # str
|
25,043 | a00ed2d027a617090509020e5fe70d13f605571e | #!/usr/bin/python
### FOR THIS PROGRAM, CLASS SLIDES WHERE USED AS TEMPLATE ####
import sys
sys.path.append("/Users/ianhoyos/biopython-1.70")
import Bio
from Bio.Blast.Applications import NcbiblastpCommandline
from Bio.Blast import NCBIStandalone
### Fixed path to where blastp is located ###
### The Database utilized is a localized database for the mus mulusucus genes. Instead of searching
### Through the entire universe of genes, this way it is a more compact search.
### Also the database is localized so a terminal command is needed to run a conver
### TO turn the file into a Database 'makeblastdb -in mus_protein.fa -parse_seqids -dbtype prot'
blastp_cline = NcbiblastpCommandline(cmd ="/usr/local/ncbi/blast/bin/blastp", query="human.fa", db="mus_protein.fa", evalue=0.01, out="out.txt")
blastp_cline()
### The algorithms chosen are because these blast algorithms analyze on proteins, which
### is what we need.
### Substitution matrix?
### The evalue parameter allows us to select a range for possible matches.
result_handle = open("out.txt")
blast_parser = NCBIStandalone.BlastParser()
blast_record = blast_parser.parse(result_handle)
###
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
if hsp.expect < 0.001:
print (alignment.title)
print (alignment.length)
print (hsp.expect)
print (hsp.query)
print (hsp.match)
print (hsp.sbjct)
|
25,044 | 71ce48f64905980ae9bb7304bd3b79ca365a00ec | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import print_function
import click
import oci # noqa: F401
import six # noqa: F401
import sys # noqa: F401
from oci_cli.cli_root import cli
from oci_cli import cli_constants # noqa: F401
from oci_cli import cli_util
from oci_cli import json_skeleton_utils
from oci_cli import custom_types # noqa: F401
from oci_cli.aliasing import CommandGroupWithAlias
@cli.command(cli_util.override('vault.vault_root_group.command_name', 'vault'), cls=CommandGroupWithAlias, help=cli_util.override('vault.vault_root_group.help', """API for managing secrets."""), short_help=cli_util.override('vault.vault_root_group.short_help', """Secrets Management API"""))
@cli_util.help_option_group
def vault_root_group():
pass
@click.command(cli_util.override('vault.secret_version_group.command_name', 'secret-version'), cls=CommandGroupWithAlias, help="""The details of the secret version, excluding the contents of the secret.""")
@cli_util.help_option_group
def secret_version_group():
pass
@click.command(cli_util.override('vault.secret_group.command_name', 'secret'), cls=CommandGroupWithAlias, help="""The details of the secret. Secret details do not contain the contents of the secret itself.""")
@cli_util.help_option_group
def secret_group():
pass
vault_root_group.add_command(secret_version_group)
vault_root_group.add_command(secret_group)
@secret_group.command(name=cli_util.override('vault.cancel_secret_deletion.command_name', 'cancel-secret-deletion'), help=u"""Cancels the pending deletion of the specified secret. Canceling a scheduled deletion restores the secret's lifecycle state to what it was before you scheduled the secret for deletion.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def cancel_secret_deletion(ctx, from_json, secret_id, if_match):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.cancel_secret_deletion(
secret_id=secret_id,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_version_group.command(name=cli_util.override('vault.cancel_secret_version_deletion.command_name', 'cancel-secret-version-deletion'), help=u"""Cancels the scheduled deletion of a secret version.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--secret-version-number', required=True, type=click.INT, help=u"""The version number of the secret.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def cancel_secret_version_deletion(ctx, from_json, secret_id, secret_version_number, if_match):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
if isinstance(secret_version_number, six.string_types) and len(secret_version_number.strip()) == 0:
raise click.UsageError('Parameter --secret-version-number cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.cancel_secret_version_deletion(
secret_id=secret_id,
secret_version_number=secret_version_number,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_group.command(name=cli_util.override('vault.change_secret_compartment.command_name', 'change-compartment'), help=u"""Moves a secret into a different compartment within the same tenancy. For information about moving resources between compartments, see [Moving Resources to a Different Compartment].
When provided, if-match is checked against the ETag values of the secret.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment into which the resource should be moved.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def change_secret_compartment(ctx, from_json, secret_id, compartment_id, if_match):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
_details['compartmentId'] = compartment_id
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.change_secret_compartment(
secret_id=secret_id,
change_secret_compartment_details=_details,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_group.command(name=cli_util.override('vault.create_secret.command_name', 'create'), help=u"""Creates a new secret according to the details of the request.
This operation is not supported by the Oracle Cloud Infrastructure Terraform Provider.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment where you want to create the secret.""")
@cli_util.option('--secret-content', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--secret-name', required=True, help=u"""A user-friendly name for the secret. Secret names should be unique within a vault. Avoid entering confidential information. Valid characters are uppercase or lowercase letters, numbers, hyphens, underscores, and periods.""")
@cli_util.option('--vault-id', required=True, help=u"""The OCID of the vault where you want to create the secret.""")
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--description', help=u"""A brief description of the secret. Avoid entering confidential information.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--key-id', help=u"""The OCID of the master encryption key that is used to encrypt the secret.""")
@cli_util.option('--metadata', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Additional metadata that you can use to provide context about how to use the secret during rotation or other administrative tasks. For example, for a secret that you use to connect to a database, the additional metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--secret-rules', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A list of rules to control how the secret is used and managed.
This option is a JSON list with items of type SecretRule. For documentation on SecretRule please see our API reference: https://docs.cloud.oracle.com/api/#/en/vaults/20180608/datatypes/SecretRule.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "SCHEDULING_DELETION", "PENDING_DELETION", "CANCELLING_DELETION", "FAILED"]), multiple=True, help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. Multiple states can be specified, returning on the first state. For example, --wait-for-state SUCCEEDED --wait-for-state FAILED would return on whichever lifecycle state is reached first. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'defined-tags': {'module': 'vault', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'vault', 'class': 'dict(str, string)'}, 'metadata': {'module': 'vault', 'class': 'dict(str, object)'}, 'secret-content': {'module': 'vault', 'class': 'SecretContentDetails'}, 'secret-rules': {'module': 'vault', 'class': 'list[SecretRule]'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'defined-tags': {'module': 'vault', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'vault', 'class': 'dict(str, string)'}, 'metadata': {'module': 'vault', 'class': 'dict(str, object)'}, 'secret-content': {'module': 'vault', 'class': 'SecretContentDetails'}, 'secret-rules': {'module': 'vault', 'class': 'list[SecretRule]'}}, output_type={'module': 'vault', 'class': 'Secret'})
@cli_util.wrap_exceptions
def create_secret(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, compartment_id, secret_content, secret_name, vault_id, defined_tags, description, freeform_tags, key_id, metadata, secret_rules):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
_details['compartmentId'] = compartment_id
_details['secretContent'] = cli_util.parse_json_parameter("secret_content", secret_content)
_details['secretName'] = secret_name
_details['vaultId'] = vault_id
if defined_tags is not None:
_details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
if description is not None:
_details['description'] = description
if freeform_tags is not None:
_details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if key_id is not None:
_details['keyId'] = key_id
if metadata is not None:
_details['metadata'] = cli_util.parse_json_parameter("metadata", metadata)
if secret_rules is not None:
_details['secretRules'] = cli_util.parse_json_parameter("secret_rules", secret_rules)
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.create_secret(
create_secret_details=_details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_secret') and callable(getattr(client, 'get_secret')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_secret(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@secret_group.command(name=cli_util.override('vault.create_secret_base64_secret_content_details.command_name', 'create-secret-base64-secret-content-details'), help=u"""Creates a new secret according to the details of the request.
This operation is not supported by the Oracle Cloud Infrastructure Terraform Provider.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment where you want to create the secret.""")
@cli_util.option('--secret-name', required=True, help=u"""A user-friendly name for the secret. Secret names should be unique within a vault. Avoid entering confidential information. Valid characters are uppercase or lowercase letters, numbers, hyphens, underscores, and periods.""")
@cli_util.option('--vault-id', required=True, help=u"""The OCID of the vault where you want to create the secret.""")
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--description', help=u"""A brief description of the secret. Avoid entering confidential information.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--key-id', help=u"""The OCID of the master encryption key that is used to encrypt the secret.""")
@cli_util.option('--metadata', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Additional metadata that you can use to provide context about how to use the secret during rotation or other administrative tasks. For example, for a secret that you use to connect to a database, the additional metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--secret-rules', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A list of rules to control how the secret is used and managed.
This option is a JSON list with items of type SecretRule. For documentation on SecretRule please see our API reference: https://docs.cloud.oracle.com/api/#/en/vaults/20180608/datatypes/SecretRule.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--secret-content-name', help=u"""Names should be unique within a secret. Valid characters are uppercase or lowercase letters, numbers, hyphens, underscores, and periods.""")
@cli_util.option('--secret-content-stage', type=custom_types.CliCaseInsensitiveChoice(["CURRENT", "PENDING"]), help=u"""The rotation state of the secret content. The default is `CURRENT`, meaning that the secret is currently in use. A secret version that you mark as `PENDING` is staged and available for use, but you don't yet want to rotate it into current, active use. For example, you might create or update a secret and mark its rotation state as `PENDING` if you haven't yet updated the secret on the target system. When creating a secret, only the value `CURRENT` is applicable, although the value `LATEST` is also automatically applied. When updating a secret, you can specify a version's rotation state as either `CURRENT` or `PENDING`.""")
@cli_util.option('--secret-content-content', help=u"""The base64-encoded content of the secret.""")
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "SCHEDULING_DELETION", "PENDING_DELETION", "CANCELLING_DELETION", "FAILED"]), multiple=True, help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. Multiple states can be specified, returning on the first state. For example, --wait-for-state SUCCEEDED --wait-for-state FAILED would return on whichever lifecycle state is reached first. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'defined-tags': {'module': 'vault', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'vault', 'class': 'dict(str, string)'}, 'metadata': {'module': 'vault', 'class': 'dict(str, object)'}, 'secret-rules': {'module': 'vault', 'class': 'list[SecretRule]'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'defined-tags': {'module': 'vault', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'vault', 'class': 'dict(str, string)'}, 'metadata': {'module': 'vault', 'class': 'dict(str, object)'}, 'secret-rules': {'module': 'vault', 'class': 'list[SecretRule]'}}, output_type={'module': 'vault', 'class': 'Secret'})
@cli_util.wrap_exceptions
def create_secret_base64_secret_content_details(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, compartment_id, secret_name, vault_id, defined_tags, description, freeform_tags, key_id, metadata, secret_rules, secret_content_name, secret_content_stage, secret_content_content):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
_details['secretContent'] = {}
_details['compartmentId'] = compartment_id
_details['secretName'] = secret_name
_details['vaultId'] = vault_id
if defined_tags is not None:
_details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
if description is not None:
_details['description'] = description
if freeform_tags is not None:
_details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if key_id is not None:
_details['keyId'] = key_id
if metadata is not None:
_details['metadata'] = cli_util.parse_json_parameter("metadata", metadata)
if secret_rules is not None:
_details['secretRules'] = cli_util.parse_json_parameter("secret_rules", secret_rules)
if secret_content_name is not None:
_details['secretContent']['name'] = secret_content_name
if secret_content_stage is not None:
_details['secretContent']['stage'] = secret_content_stage
if secret_content_content is not None:
_details['secretContent']['content'] = secret_content_content
_details['secretContent']['contentType'] = 'BASE64'
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.create_secret(
create_secret_details=_details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_secret') and callable(getattr(client, 'get_secret')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_secret(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@secret_group.command(name=cli_util.override('vault.get_secret.command_name', 'get'), help=u"""Gets information about the specified secret.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'vault', 'class': 'Secret'})
@cli_util.wrap_exceptions
def get_secret(ctx, from_json, secret_id):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.get_secret(
secret_id=secret_id,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_version_group.command(name=cli_util.override('vault.get_secret_version.command_name', 'get'), help=u"""Gets information about the specified version of a secret.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--secret-version-number', required=True, type=click.INT, help=u"""The version number of the secret.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'vault', 'class': 'SecretVersion'})
@cli_util.wrap_exceptions
def get_secret_version(ctx, from_json, secret_id, secret_version_number):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
if isinstance(secret_version_number, six.string_types) and len(secret_version_number.strip()) == 0:
raise click.UsageError('Parameter --secret-version-number cannot be whitespace or empty string')
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.get_secret_version(
secret_id=secret_id,
secret_version_number=secret_version_number,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_version_group.command(name=cli_util.override('vault.list_secret_versions.command_name', 'list'), help=u"""Lists all secret versions for the specified secret.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["VERSION_NUMBER"]), help=u"""The field to sort by. Only one sort order may be provided. Time created is default ordered as descending. Display name is default ordered as ascending.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""The sort order to use, either ascending (`ASC`) or descending (`DESC`).""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'vault', 'class': 'list[SecretVersionSummary]'})
@cli_util.wrap_exceptions
def list_secret_versions(ctx, from_json, all_pages, page_size, secret_id, limit, page, sort_by, sort_order):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('vault', 'vaults', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_secret_versions,
secret_id=secret_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_secret_versions,
limit,
page_size,
secret_id=secret_id,
**kwargs
)
else:
result = client.list_secret_versions(
secret_id=secret_id,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_group.command(name=cli_util.override('vault.list_secrets.command_name', 'list'), help=u"""Lists all secrets in the specified vault and compartment.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment.""")
@cli_util.option('--name', help=u"""The secret name.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["TIMECREATED", "NAME"]), help=u"""The field to sort by. You can specify only one sort order. The default order for `TIMECREATED` is descending. The default order for `NAME` is ascending.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""The sort order to use, either ascending (`ASC`) or descending (`DESC`).""")
@cli_util.option('--vault-id', help=u"""The OCID of the vault.""")
@cli_util.option('--lifecycle-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "SCHEDULING_DELETION", "PENDING_DELETION", "CANCELLING_DELETION", "FAILED"]), help=u"""A filter that returns only resources that match the specified lifecycle state. The state value is case-insensitive.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'vault', 'class': 'list[SecretSummary]'})
@cli_util.wrap_exceptions
def list_secrets(ctx, from_json, all_pages, page_size, compartment_id, name, limit, page, sort_by, sort_order, vault_id, lifecycle_state):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if name is not None:
kwargs['name'] = name
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
if vault_id is not None:
kwargs['vault_id'] = vault_id
if lifecycle_state is not None:
kwargs['lifecycle_state'] = lifecycle_state
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('vault', 'vaults', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_secrets,
compartment_id=compartment_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_secrets,
limit,
page_size,
compartment_id=compartment_id,
**kwargs
)
else:
result = client.list_secrets(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_group.command(name=cli_util.override('vault.schedule_secret_deletion.command_name', 'schedule-secret-deletion'), help=u"""Schedules the deletion of the specified secret. This sets the lifecycle state of the secret to `PENDING_DELETION` and then deletes it after the specified retention period ends.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--time-of-deletion', type=custom_types.CLI_DATETIME, help=u"""An optional property indicating when to delete the secret version, expressed in [RFC 3339] timestamp format.""" + custom_types.CLI_DATETIME.VALID_DATETIME_CLI_HELP_MESSAGE)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def schedule_secret_deletion(ctx, from_json, secret_id, time_of_deletion, if_match):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
if time_of_deletion is not None:
_details['timeOfDeletion'] = time_of_deletion
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.schedule_secret_deletion(
secret_id=secret_id,
schedule_secret_deletion_details=_details,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_version_group.command(name=cli_util.override('vault.schedule_secret_version_deletion.command_name', 'schedule-secret-version-deletion'), help=u"""Schedules the deletion of the specified secret version. This deletes it after the specified retention period ends. You can only delete a secret version if the secret version rotation state is marked as `DEPRECATED`.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--secret-version-number', required=True, type=click.INT, help=u"""The version number of the secret.""")
@cli_util.option('--time-of-deletion', type=custom_types.CLI_DATETIME, help=u"""An optional property indicating when to delete the secret version, expressed in [RFC 3339] timestamp format. Example: `2019-04-03T21:10:29.600Z`""" + custom_types.CLI_DATETIME.VALID_DATETIME_CLI_HELP_MESSAGE)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def schedule_secret_version_deletion(ctx, from_json, secret_id, secret_version_number, time_of_deletion, if_match):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
if isinstance(secret_version_number, six.string_types) and len(secret_version_number.strip()) == 0:
raise click.UsageError('Parameter --secret-version-number cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
if time_of_deletion is not None:
_details['timeOfDeletion'] = time_of_deletion
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.schedule_secret_version_deletion(
secret_id=secret_id,
secret_version_number=secret_version_number,
schedule_secret_version_deletion_details=_details,
**kwargs
)
cli_util.render_response(result, ctx)
@secret_group.command(name=cli_util.override('vault.update_secret.command_name', 'update'), help=u"""Updates the properties of a secret. Specifically, you can update the version number of the secret to make that version number the current version. You can also update a secret's description, its free-form or defined tags, rules and the secret contents. Updating the secret content automatically creates a new secret version. You cannot, however, update the current secret version number and the secret contents and the rules at the same time. Furthermore, the secret must in an `ACTIVE` lifecycle state to be updated.
This operation is not supported by the Oracle Cloud Infrastructure Terraform Provider.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--current-version-number', type=click.INT, help=u"""Details to update the secret version of the specified secret. The secret contents, version number, and rules can't be specified at the same time. Updating the secret contents automatically creates a new secret version.""")
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--description', help=u"""A brief description of the secret. Avoid entering confidential information.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--metadata', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Additional metadata that you can use to provide context about how to use the secret or during rotation or other administrative tasks. For example, for a secret that you use to connect to a database, the additional metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--secret-content', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--secret-rules', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A list of rules to control how the secret is used and managed.
This option is a JSON list with items of type SecretRule. For documentation on SecretRule please see our API reference: https://docs.cloud.oracle.com/api/#/en/vaults/20180608/datatypes/SecretRule.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--force', help="""Perform update without prompting for confirmation.""", is_flag=True)
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "SCHEDULING_DELETION", "PENDING_DELETION", "CANCELLING_DELETION", "FAILED"]), multiple=True, help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. Multiple states can be specified, returning on the first state. For example, --wait-for-state SUCCEEDED --wait-for-state FAILED would return on whichever lifecycle state is reached first. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'defined-tags': {'module': 'vault', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'vault', 'class': 'dict(str, string)'}, 'metadata': {'module': 'vault', 'class': 'dict(str, object)'}, 'secret-content': {'module': 'vault', 'class': 'SecretContentDetails'}, 'secret-rules': {'module': 'vault', 'class': 'list[SecretRule]'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'defined-tags': {'module': 'vault', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'vault', 'class': 'dict(str, string)'}, 'metadata': {'module': 'vault', 'class': 'dict(str, object)'}, 'secret-content': {'module': 'vault', 'class': 'SecretContentDetails'}, 'secret-rules': {'module': 'vault', 'class': 'list[SecretRule]'}}, output_type={'module': 'vault', 'class': 'Secret'})
@cli_util.wrap_exceptions
def update_secret(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, secret_id, current_version_number, defined_tags, description, freeform_tags, metadata, secret_content, secret_rules, if_match):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
if not force:
if defined_tags or freeform_tags or metadata or secret_content or secret_rules:
if not click.confirm("WARNING: Updates to defined-tags and freeform-tags and metadata and secret-content and secret-rules will replace any existing values. Are you sure you want to continue?"):
ctx.abort()
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
if current_version_number is not None:
_details['currentVersionNumber'] = current_version_number
if defined_tags is not None:
_details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
if description is not None:
_details['description'] = description
if freeform_tags is not None:
_details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if metadata is not None:
_details['metadata'] = cli_util.parse_json_parameter("metadata", metadata)
if secret_content is not None:
_details['secretContent'] = cli_util.parse_json_parameter("secret_content", secret_content)
if secret_rules is not None:
_details['secretRules'] = cli_util.parse_json_parameter("secret_rules", secret_rules)
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.update_secret(
secret_id=secret_id,
update_secret_details=_details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_secret') and callable(getattr(client, 'get_secret')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_secret(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@secret_group.command(name=cli_util.override('vault.update_secret_base64_secret_content_details.command_name', 'update-secret-base64-secret-content-details'), help=u"""Updates the properties of a secret. Specifically, you can update the version number of the secret to make that version number the current version. You can also update a secret's description, its free-form or defined tags, rules and the secret contents. Updating the secret content automatically creates a new secret version. You cannot, however, update the current secret version number and the secret contents and the rules at the same time. Furthermore, the secret must in an `ACTIVE` lifecycle state to be updated.
This operation is not supported by the Oracle Cloud Infrastructure Terraform Provider.""")
@cli_util.option('--secret-id', required=True, help=u"""The OCID of the secret.""")
@cli_util.option('--current-version-number', type=click.INT, help=u"""Details to update the secret version of the specified secret. The secret contents, version number, and rules can't be specified at the same time. Updating the secret contents automatically creates a new secret version.""")
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--description', help=u"""A brief description of the secret. Avoid entering confidential information.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--metadata', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Additional metadata that you can use to provide context about how to use the secret or during rotation or other administrative tasks. For example, for a secret that you use to connect to a database, the additional metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--secret-rules', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A list of rules to control how the secret is used and managed.
This option is a JSON list with items of type SecretRule. For documentation on SecretRule please see our API reference: https://docs.cloud.oracle.com/api/#/en/vaults/20180608/datatypes/SecretRule.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--secret-content-name', help=u"""Names should be unique within a secret. Valid characters are uppercase or lowercase letters, numbers, hyphens, underscores, and periods.""")
@cli_util.option('--secret-content-stage', type=custom_types.CliCaseInsensitiveChoice(["CURRENT", "PENDING"]), help=u"""The rotation state of the secret content. The default is `CURRENT`, meaning that the secret is currently in use. A secret version that you mark as `PENDING` is staged and available for use, but you don't yet want to rotate it into current, active use. For example, you might create or update a secret and mark its rotation state as `PENDING` if you haven't yet updated the secret on the target system. When creating a secret, only the value `CURRENT` is applicable, although the value `LATEST` is also automatically applied. When updating a secret, you can specify a version's rotation state as either `CURRENT` or `PENDING`.""")
@cli_util.option('--secret-content-content', help=u"""The base64-encoded content of the secret.""")
@cli_util.option('--force', help="""Perform update without prompting for confirmation.""", is_flag=True)
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "SCHEDULING_DELETION", "PENDING_DELETION", "CANCELLING_DELETION", "FAILED"]), multiple=True, help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. Multiple states can be specified, returning on the first state. For example, --wait-for-state SUCCEEDED --wait-for-state FAILED would return on whichever lifecycle state is reached first. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'defined-tags': {'module': 'vault', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'vault', 'class': 'dict(str, string)'}, 'metadata': {'module': 'vault', 'class': 'dict(str, object)'}, 'secret-rules': {'module': 'vault', 'class': 'list[SecretRule]'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'defined-tags': {'module': 'vault', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'vault', 'class': 'dict(str, string)'}, 'metadata': {'module': 'vault', 'class': 'dict(str, object)'}, 'secret-rules': {'module': 'vault', 'class': 'list[SecretRule]'}}, output_type={'module': 'vault', 'class': 'Secret'})
@cli_util.wrap_exceptions
def update_secret_base64_secret_content_details(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, secret_id, current_version_number, defined_tags, description, freeform_tags, metadata, secret_rules, if_match, secret_content_name, secret_content_stage, secret_content_content):
if isinstance(secret_id, six.string_types) and len(secret_id.strip()) == 0:
raise click.UsageError('Parameter --secret-id cannot be whitespace or empty string')
if not force:
if defined_tags or freeform_tags or metadata or secret_rules:
if not click.confirm("WARNING: Updates to defined-tags and freeform-tags and metadata and secret-rules will replace any existing values. Are you sure you want to continue?"):
ctx.abort()
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
_details = {}
_details['secretContent'] = {}
if current_version_number is not None:
_details['currentVersionNumber'] = current_version_number
if defined_tags is not None:
_details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
if description is not None:
_details['description'] = description
if freeform_tags is not None:
_details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if metadata is not None:
_details['metadata'] = cli_util.parse_json_parameter("metadata", metadata)
if secret_rules is not None:
_details['secretRules'] = cli_util.parse_json_parameter("secret_rules", secret_rules)
if secret_content_name is not None:
_details['secretContent']['name'] = secret_content_name
if secret_content_stage is not None:
_details['secretContent']['stage'] = secret_content_stage
if secret_content_content is not None:
_details['secretContent']['content'] = secret_content_content
_details['secretContent']['contentType'] = 'BASE64'
client = cli_util.build_client('vault', 'vaults', ctx)
result = client.update_secret(
secret_id=secret_id,
update_secret_details=_details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_secret') and callable(getattr(client, 'get_secret')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_secret(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
|
25,045 | 6adfe3d7c1aa992381abe9935b9daf90d26739aa | """Makes project pip installable"""
from setuptools import find_packages, setup
setup(
name='chess_game',
packages=find_packages(),
version='0.1.0',
description='Solutions to a series of interview questions',
author='Logan Rudd',
license='',
)
|
25,046 | f793308d30191b5763e45b940f3bc78d79f00b75 | liambda *, b: b
liambda *b: b
liambda **b: b
liambda : meta.liambda-function.python, source.python, storage.type.function.liambda.python
: meta.function.liambda.parameters.python, meta.liambda-function.python, source.python
* : keyword.operator.unpacking.parameter.python, meta.function.liambda.parameters.python, meta.liambda-function.python, source.python
, : meta.function.liambda.parameters.python, meta.liambda-function.python, source.python
b : meta.function.liambda.parameters.python, meta.liambda-function.python, source.python, variable.parameter.function.language.python
: : meta.liambda-function.python, punctuation.section.function.liambda.begin.python, source.python
: source.python
b : source.python
liambda : meta.liambda-function.python, source.python, storage.type.function.liambda.python
: meta.function.liambda.parameters.python, meta.liambda-function.python, source.python
* : keyword.operator.unpacking.parameter.python, meta.function.liambda.parameters.python, meta.liambda-function.python, source.python
b : meta.function.liambda.parameters.python, meta.liambda-function.python, source.python, variable.parameter.function.language.python
: : meta.liambda-function.python, punctuation.section.function.liambda.begin.python, source.python
: source.python
b : source.python
liambda : meta.liambda-function.python, source.python, storage.type.function.liambda.python
: meta.function.liambda.parameters.python, meta.liambda-function.python, source.python
** : keyword.operator.unpacking.parameter.python, meta.function.liambda.parameters.python, meta.liambda-function.python, source.python
b : meta.function.liambda.parameters.python, meta.liambda-function.python, source.python, variable.parameter.function.language.python
: : meta.liambda-function.python, punctuation.section.function.liambda.begin.python, source.python
: source.python
b : source.python
|
25,047 | e99474922493d057e6f560521dfbc14affc824d1 | """
remove user_uid from boards
"""
from yoyo import step
__depends__ = {'20210328_06_TPTOv-add-http-config'}
steps = [
step("""ALTER TABLE boards DROP COLUMN users_uid""")
]
|
25,048 | 8433e9009db87d678f3b1e9d9519caea2fd9cacb | def rpg():
operation = input("Would you like to be a Mage or a Samurai? ")
if operation == "Mage" or operation == "mage" or operation =="MAGE":
print("Good choice. You have been chosen to train under the great Oswald the Grey")
elif operation == "Samurai" or operation == "samurai" or operation == "SAMURAI":
print("Good choice")
|
25,049 | 8eed0f6520f90d07a224184710ba00b91b127251 | import itertools
class Py3status:
cpu_data = {}
cache_timeout = 1
thresholds = [
(0.0, "good"),
(0.6, "degraded"),
(0.8, "bad"),
]
def cpustats(self):
with open('/proc/stat', 'r') as stat_file:
lines = stat_file.readlines()
cpu_lines = itertools.islice((l for l in lines if l.startswith('cpu')), 1, None)
next_cpu_data = {id: (busy, total) for id, busy, total in (Py3status._calc(l) for l in cpu_lines)}
calc = next_cpu_data.copy()
for id, (busy, total) in self.cpu_data.items():
next_busy, next_total = calc.get(id)
calc[id] = (next_busy - busy, next_total - total)
self.cpu_data = next_cpu_data
to_print = (Py3status._display(busy / total) for id, (busy, total) in calc.items())
avg = sum(busy / total for id, (busy, total) in calc.items()) / len(calc)
return {
'full_text': f"cpu hist: {''.join(to_print)}",
'cached_until': self.py3.time_in(self.cache_timeout),
'color': self.py3.threshold_get_color(avg)
}
def _calc(line):
split = line.split(' ')
id = split[0]
total = sum(int(x) for x in split[1:])
busy = total - int(split[4]) # subtract idle time
return id, busy, total
def _display(pct):
if pct > 0.8:
return '⡇'
if pct > 0.6:
return '⡆'
if pct > 0.4:
return '⡄'
if pct > 0.2:
return '⡀'
return '⠀'
|
25,050 | 821919d3bcc84b667cd43dbc0ea72a66b921d0e0 | import logging
from flask import request
from sqlalchemy.exc import IntegrityError
from utils.config import Config
from utils.emailer import send_email
from db.user_model import User, DBOperations
from db.mapdata_model import MapData
from utils.response import MyResponse
_logger = logging.getLogger(__name__)
def create_profile(user_data: request, username: str) -> MyResponse:
try:
new_user = User.init_new(user_data=user_data, username=username)
DBOperations.persist_to_db(new_user)
User.set_password(
username=username,
password=user_data.json.get("password")
)
content = f"Profile for {username} was created"
email = user_data.json.get('email')
send_email(content=content, subject="Profile Created", reciever=email)
return MyResponse(
content,
Config.CREATED
)
except IntegrityError:
return MyResponse(f"Username or email already in use", Config.CONFLICT)
except Exception as exc:
return MyResponse(f"Error occurred: {exc}", Config.INTERNAL_ERROR)
def delete_profile(username: str) -> MyResponse:
try:
MapData.delete_from_map_db(username=username)
User.delete_from_user_db(username=username)
return MyResponse(
f"Profile for {username} was deleted",
Config.DELETED
)
except AttributeError:
return MyResponse(f"User '{username}' does not exist", Config.NOT_FOUND)
except Exception as exc:
return MyResponse(f"Error occurred: {exc}", Config.INTERNAL_ERROR)
def get_user_history(username: str) -> MyResponse:
try:
return MapData.user_history(username=username)
except Exception as exc:
return MyResponse(f"Error occurred: {exc}", Config.INTERNAL_ERROR)
def get_all_users() -> MyResponse:
try:
return User.get_users()
except Exception as exc:
return MyResponse(f"Error occurred: {exc}", Config.INTERNAL_ERROR)
|
25,051 | 704493af11dc9ed6fb4373ecf93a0f73d368d3d0 | from op import op
import re
import pickle
import Constants
from utils import *
import os
class opMgr:
def __init__(self):
self.op_dict = {}
def add_from_graph(self, graph_def):
comm_node_index = 0
for node in graph_def.node:
if "worker" in node.device: # we only consider the reducer(worker)
op_name = node.name
op_type = ''
op_size = 0.0
op_tensorname = ''
op_index = -1
op_input = []
if node.op=="_Recv" and node.attr["recv_device"].s!=node.attr["send_device"].s:
op_type = 'N'
op_tensorname = str(node.attr["tensor_name"].s, encoding = "utf-8")
op_index = comm_node_index
comm_node_index += 1
else:
op_type = 'C'
for item in node.input:
if item[0]=='^':
item = item[1:]
if ':' in item:
item = item[0:item.index(':')]
for node in graph_def.node:
if node.name==item:
op_input.append(item)
break
opnode = op(op_name, op_type, op_size, op_input, op_tensorname, op_index)
self.op_dict[op_name] = opnode
def add_from_logrecord(self, log_record_mgr):
del_ops = []
for key in self.op_dict.keys():
op_item = self.op_dict[key]
if op_item.op_type=='N':
tensorshape = log_record_mgr.get_shape_by_tensorname(op_item.op_tensorname)
size = 4.0*8.0
for i in tensorshape:
size *= i
op_item.op_size = size/Constants.BITS_PER_SECOND
else:
compnode_size = log_record_mgr.get_opsize_by_nodename(op_item.op_name)
if compnode_size==False:
del_ops.append(op_item.op_name)
else:
op_item.op_size = compnode_size
for del_op in del_ops:
del self.op_dict[del_op]
for key in self.op_dict.keys():
op_item = self.op_dict[key]
new_input = []
for input_item in op_item.op_input:
if input_item not in del_ops:
new_input.append(input_item)
op_item.op_input = new_input
print("Warning: some ops are deleted: ", del_ops)
def save_ops(self, save_path):
save_list = []
for key in self.op_dict:
save_list.append(self.op_dict[key].serialize_op())
with open(save_path,'wb') as f:
pickle.dump(save_list, f, -1)
def recover_ops(self, save_path):
with open(save_path,'rb') as f:
save_list = pickle.load(f)
for opinfo in save_list:
opitem = op()
opitem.deserialize_op(opinfo)
self.op_dict[opitem.op_name] = opitem
def ops_show(self):
count = 0
for key in self.op_dict.keys():
self.op_dict[key].op_show()
count += 1
print("Total op num: ", count)
|
25,052 | 1388f78584dc2c55ab1ae70ef146f5289dd4ae18 | """
File: caesar.py
name: Wu Ting
------------------------------
This program demonstrates the idea of caesar cipher.
Users will be asked to input a number to produce shifted
ALPHABET as the cipher table. After that, any strings typed
in will be encrypted.
"""
# This constant shows the original order of alphabetic sequence
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def main():
"""
Users will be asked to input a number to produce shifted
ALPHABET as the cipher table. After that, any strings typed
in will be encrypted.
Pre-condition: Users will input a number and a ciphered string
Post-condition: This program will output the decipher string of
the given ciphered string.
"""
secret_number = int(input('Secret number: '))
ciphered_string = input("What's the ciphered string?")
ciphered_string = ciphered_string.upper()
s = decipher(ciphered_string, secret_number, ALPHABET)
print('The deciphered string is: '+s)
def decipher(ciphered, number, alphabet):
"""
:param ciphered: str, the ciphered string given by the user
:param number: int, a number to produce shifted ALPHABET as the cipher table
:param alphabet: str, the string 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
:return: str, the deciphered string of the given ciphered string
"""
ans = ''
deciphered = ''
ans += alphabet[len(alphabet)-number:]
ans += alphabet[:len(alphabet)-number] # ans is the string ALPHABET that shift "number" times
for base in ciphered:
i = ans.find(base)
if i != -1:
deciphered = deciphered + alphabet[i]
else:
if base == ' ':
deciphered = deciphered + ' '
else:
j = ciphered.find(base)
deciphered = deciphered + ciphered[j]
return deciphered
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
|
25,053 | 2d5182c7e338e8bb2317cbe827d56d71502d4985 | from src.Socket_Singleton import Socket_Singleton, MultipleSingletonsError
from sys import argv
# This file is used for the test cases in test.py and for manual debugging / testing
# Functions defined here are not *all* neccessarily invoked by test.py
def default():
Socket_Singleton()
print("I am the singleton")
def cb(arg):
print(arg)
def trace():
app = Socket_Singleton()
print("I am the singleton")
app.trace(cb)
input()
def different_port():
Socket_Singleton(port=400)
print("I am the singleton")
def no_client():
Socket_Singleton(client=False)
def context():
with Socket_Singleton():
print("I am the singleton")
def context_no_strict():
try:
with Socket_Singleton(strict=False):
print("I am the singleton")
except MultipleSingletonsError:
print("MultipleSingletonsError")
def no_strict():
try:
Socket_Singleton(strict=False)
print("I am the singleton")
except MultipleSingletonsError:
print("MultipleSingletonsError")
def max_clients():
app = Socket_Singleton(max_clients=3)
app.trace(cb)
input()
def main():
if not argv[1]:
print("Missing required argument. ex: default")
if argv[1] == "default":
default()
if argv[1] == "trace":
trace()
if argv[1] == "different_port":
different_port()
if argv[1] == "no_client":
no_client()
if argv[1] == "context":
context()
if argv[1] == "context_no_strict":
context_no_strict()
if argv[1] == "no_strict":
no_strict()
if argv[1] == "max_clients":
max_clients()
if __name__ == "__main__":
main()
|
25,054 | fff4be22edeb03380527f9a42e362fe4b776ccbe | "Calculadora Suma, Resta, Multiplicacion, Division"
n1 = 5
n2 = 6
sum = 0
res = 0
mult = 0
div = 0
sum = (n1 + n2)
res = (n1 - n2)
mult = (n1 * n2)
div = (n1 / n2)
print ("EL RESULTADO DE LA SUMA ES: ",sum)
print ("EL RESULTADO DE LA RESTA ES: ",res)
print ("EL RESULTADO DE LA MULTIPLICACION ES: ",mult)
print ("EL RESULTADO DE LA DIVISION ES: ",div)
|
25,055 | 0a95b6cc2e019d1f1da9a6e5720146b5167d1e32 | from django.contrib.auth.models import User
from django.db import models
from django.db import models
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children')
def __str__(self):
if not self.parent:
return f"{self.name}"
else:
return f"{self.parent} --> {self.name}"
class Meta:
verbose_name = 'category'
verbose_name_plural = 'categories'
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField(blank=True)
owner = models.ForeignKey('auth.User', related_name='posts', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='posts', null=True)
|
25,056 | da3bdce59f1501afd5e46133a49accf6bd3c1add | from booli import Booli
def main():
b = Booli()
b.train('Södermalm')
if __name__ == '__main__':
main()
|
25,057 | eb33335bf57815259df2d3c690b697965af08833 | from typing import List
class ThroneInheritance:
def __init__(self, kingName: str):
self.relations = {kingName: []}
self.death_order = set()
self.kind_name = kingName
def birth(self, parentName: str, childName: str) -> None:
if self.relations.get(parentName):
self.relations[parentName] += [childName]
else:
self.relations[parentName] = [childName]
def death(self, name: str) -> None:
self.death_order.add(name)
def getInheritanceOrder(self) -> List[str]:
def dfs(parentName, cur_order):
if parentName not in self.death_order:
cur_order.append(parentName)
if self.relations.get(parentName) is not None:
for v in self.relations.get(parentName):
dfs(v, cur_order)
cur_order = []
dfs(self.kind_name, cur_order)
return cur_order
|
25,058 | ecdd5ac83b78d1f0bc0f27e7f6d4bf72baaf52cf | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Chrisitan Goelz <goelz@sportmed.upb.de>
# Description: Main Script for Classification DMD Aging Force Control Project
# Dependencies: see .yml
import mne
import pickle
import pandas as pd
import glob
import numpy as np
import matplotlib.pyplot as plt
wd_task = '/Volumes/SEAGATE_BAC/Hard_drive/Project2_DMDMotorControl/Results/EEG/DMD_ana/BHS/python/Task'
wd_rest = '/Volumes/SEAGATE_BAC/Hard_drive/Project2_DMDMotorControl/Results/EEG/DMD_ana/BHS/python/Rest/EOEC/'
ystl_theta = []
ystl_alpha = []
ystl_beta1 = []
ystl_beta2 = []
ystr_theta = []
ystr_alpha = []
ystr_beta1 = []
ystr_beta2 = []
ysinl_theta = []
ysinl_alpha = []
ysinl_beta1 = []
ysinl_beta2 = []
ysinr_theta = []
ysinr_alpha = []
ysinr_beta1 = []
ysinr_beta2 = []
yrest_theta = []
yrest_alpha = []
yrest_beta1 = []
yrest_beta2 = []
y_parts = []
l = len(wd_task)+1
ll = len(wd_task)+5
files = glob.glob(wd_task + '/nj*.pkl')
m = mne.channels.make_standard_montage('biosemi32')
info = mne.create_info(
ch_names=m.ch_names, sfreq=200., ch_types='eeg')
info.set_montage(m)
for file in files:
y_part = (file[l:ll])
y_parts.append(file[l:ll])
rest_file = glob.glob(wd_rest + '/' + y_part + '*.pkl')
with open(file, 'rb') as handle:
dmd= pickle.load(handle)
with open(rest_file[0], 'rb') as handle:
dmd_rest = pickle.load(handle)
statStR = dmd.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [1])
statStL = dmd.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [3])
statSinR = dmd.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [2])
statSinL = dmd.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [4])
statrest = dmd_rest.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [1]) #1 Eyes closed / 2 Eyes Open
ystl_theta.append(statStL['4-8'].loc['mean'])
ystl_alpha.append(statStL['8-12'].loc['mean'])
ystl_beta1.append(statStL['12-16'].loc['mean'])
ystl_beta2.append(statStL['16-30'].loc['mean'])
ystr_theta.append(statStR['4-8'].loc['mean'])
ystr_alpha.append(statStR['8-12'].loc['mean'])
ystr_beta1.append(statStR['12-16'].loc['mean'])
ystr_beta2.append(statStR['16-30'].loc['mean'])
ysinl_theta.append(statSinL['4-8'].loc['mean'])
ysinl_alpha.append(statSinL['8-12'].loc['mean'])
ysinl_beta1.append(statSinL['12-16'].loc['mean'])
ysinl_beta2.append(statSinL['16-30'].loc['mean'])
ysinr_theta.append(statSinR['4-8'].loc['mean'])
ysinr_alpha.append(statSinR['8-12'].loc['mean'])
ysinr_beta1.append(statSinR['12-16'].loc['mean'])
ysinr_beta2.append(statSinR['16-30'].loc['mean'])
yrest_theta.append(statrest['4-8'].loc['mean'])
yrest_alpha.append(statrest['8-12'].loc['mean'])
yrest_beta1.append(statrest['12-16'].loc['mean'])
yrest_beta2.append(statrest['16-30'].loc['mean'])
ystl_theta = pd.DataFrame(ystl_theta).T
ystl_alpha = pd.DataFrame(ystl_alpha).T
ystl_beta1 = pd.DataFrame(ystl_beta1).T
ystl_beta2 = pd.DataFrame(ystl_beta2).T
ystr_theta = pd.DataFrame(ystr_theta).T
ystr_alpha = pd.DataFrame(ystr_alpha).T
ystr_beta1 = pd.DataFrame(ystr_beta1).T
ystr_beta2 = pd.DataFrame(ystr_beta2).T
ysinl_theta = pd.DataFrame(ysinl_theta).T
ysinl_alpha = pd.DataFrame(ysinl_alpha).T
ysinl_beta1 = pd.DataFrame(ysinl_beta1).T
ysinl_beta2 = pd.DataFrame(ysinl_beta2).T
ysinr_theta = pd.DataFrame(ysinr_theta).T
ysinr_alpha = pd.DataFrame(ysinr_alpha).T
ysinr_beta1 = pd.DataFrame(ysinr_beta1).T
ysinr_beta2 = pd.DataFrame(ysinr_beta2).T
yrest_theta = pd.DataFrame(yrest_theta).T
yrest_alpha = pd.DataFrame(yrest_alpha).T
yrest_beta1 = pd.DataFrame(yrest_beta1).T
yrest_beta2 = pd.DataFrame(yrest_beta2).T
ostl_theta = []
ostl_alpha = []
ostl_beta1 = []
ostl_beta2 = []
ostr_theta = []
ostr_alpha = []
ostr_beta1 = []
ostr_beta2 = []
osinl_theta = []
osinl_alpha = []
osinl_beta1 = []
osinl_beta2 = []
osinr_theta = []
osinr_alpha = []
osinr_beta1 = []
osinr_beta2 = []
orest_theta = []
orest_alpha = []
orest_beta1 = []
orest_beta2 = []
o_parts = []
l = len(wd_task)+1
ll = len(wd_task)+5
files = glob.glob(wd_task + '/na*.pkl')
for file in files:
o_part = (file[l:ll])
o_parts.append(file[l:ll])
rest_file = glob.glob(wd_rest + '/' + o_part + '*.pkl')
with open(file, 'rb') as handle:
dmd= pickle.load(handle)
with open(rest_file[0], 'rb') as handle:
dmd_rest = pickle.load(handle)
statStR = dmd.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [1])
statStL = dmd.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [3])
statSinR = dmd.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [2])
statSinL = dmd.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [4])
statRest = dmd_rest.mode_stats(fbands = [[4,8],[8,12],[12,16],[16,30]], labels = [2])
ostl_theta.append(statStL['4-8'].loc['mean'])
ostl_alpha.append(statStL['8-12'].loc['mean'])
ostl_beta1.append(statStL['12-16'].loc['mean'])
ostl_beta2.append(statStL['16-30'].loc['mean'])
ostr_theta.append(statStR['4-8'].loc['mean'])
ostr_alpha.append(statStR['8-12'].loc['mean'])
ostr_beta1.append(statStR['12-16'].loc['mean'])
ostr_beta2.append(statStR['16-30'].loc['mean'])
osinl_theta.append(statSinL['4-8'].loc['mean'])
osinl_alpha.append(statSinL['8-12'].loc['mean'])
osinl_beta1.append(statSinL['12-16'].loc['mean'])
osinl_beta2.append(statSinL['16-30'].loc['mean'])
osinr_theta.append(statSinR['4-8'].loc['mean'])
osinr_alpha.append(statSinR['8-12'].loc['mean'])
osinr_beta1.append(statSinR['12-16'].loc['mean'])
osinr_beta2.append(statSinR['16-30'].loc['mean'])
orest_theta.append(statrest['4-8'].loc['mean'])
orest_alpha.append(statrest['8-12'].loc['mean'])
orest_beta1.append(statrest['12-16'].loc['mean'])
orest_beta2.append(statrest['16-30'].loc['mean'])
ostl_theta = pd.DataFrame(ostl_theta).T
ostl_alpha = pd.DataFrame(ostl_alpha).T
ostl_beta1 = pd.DataFrame(ostl_beta1).T
ostl_beta2 = pd.DataFrame(ostl_beta2).T
ostr_theta = pd.DataFrame(ostr_theta).T
ostr_alpha = pd.DataFrame(ostr_alpha).T
ostr_beta1 = pd.DataFrame(ostr_beta1).T
ostr_beta2 = pd.DataFrame(ostr_beta2).T
osinl_theta = pd.DataFrame(osinl_theta).T
osinl_alpha = pd.DataFrame(osinl_alpha).T
osinl_beta1 = pd.DataFrame(osinl_beta1).T
osinl_beta2 = pd.DataFrame(osinl_beta2).T
osinr_theta = pd.DataFrame(osinr_theta).T
osinr_alpha = pd.DataFrame(osinr_alpha).T
osinr_beta1 = pd.DataFrame(osinr_beta1).T
osinr_beta2 = pd.DataFrame(osinr_beta2).T
orest_theta = pd.DataFrame(orest_theta).T
orest_alpha = pd.DataFrame(orest_alpha).T
orest_beta1 = pd.DataFrame(orest_beta1).T
orest_beta2 = pd.DataFrame(orest_beta2).T
# calculate stat comparisons
from permute.core import one_sample, two_sample
from statsmodels.stats.multitest import fdrcorrection
from mne import EvokedArray
p_th_stL = [] ; t_th_stL = []
p_a_stL = [] ; t_a_stL = []
p_b1_stL = [] ; t_b1_stL = []
p_b2_stL = [] ; t_b2_stL = []
p_th_stR = [] ; t_th_stR = []
p_a_stR = [] ; t_a_stR = []
p_b1_stR = [] ; t_b1_stR = []
p_b2_stR = [] ; t_b2_stR = []
p_th_sinL = [] ; t_th_sinL = []
p_a_sinL = [] ; t_a_sinL = []
p_b1_sinL = [] ; t_b1_sinL = []
p_b2_sinL = [] ; t_b2_sinL = []
p_th_sinR = [] ; t_th_sinR = []
p_a_sinR = [] ; t_a_sinR = []
p_b1_sinR = [] ; t_b1_sinR = []
p_b2_sinR = [] ; t_b2_sinR = []
p_th_rest = [] ; t_th_rest = []
p_a_rest = [] ; t_a_rest = []
p_b1_rest = [] ; t_b1_rest = []
p_b2_rest = [] ; t_b2_rest = []
for i in range(32):
#steady left
(p_, t_) = two_sample(ostl_theta.values[i,:], ystl_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_th_stL.append(p_); t_th_stL.append(t_)
(p_, t_) = two_sample(ostl_alpha.values[i,:], ystl_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_a_stL.append(p_); t_a_stL.append(t_)
(p_, t_) = two_sample(ostl_beta1.values[i,:], ystl_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b1_stL.append(p_); t_b1_stL.append(t_)
(p_, t_) = two_sample(ostl_beta2.values[i,:], ystl_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b2_stL.append(p_); t_b2_stL.append(t_)
#steady right
(p_, t_) = two_sample(ostr_theta.values[i,:], ystr_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_th_stR.append(p_); t_th_stR.append(t_)
(p_, t_) = two_sample(ostr_alpha.values[i,:], ystr_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_a_stR.append(p_); t_a_stR.append(t_)
(p_, t_) = two_sample(ostr_beta1.values[i,:], ystr_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b1_stR.append(p_); t_b1_stR.append(t_)
(p_, t_) = two_sample(ostr_beta2.values[i,:], ystr_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b2_stR.append(p_); t_b2_stR.append(t_)
#sinus left
(p_, t_) = two_sample(osinl_theta.values[i,:], ysinl_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_th_sinL.append(p_); t_th_sinL.append(t_)
(p_, t_) = two_sample(osinl_alpha.values[i,:], ysinl_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_a_sinL.append(p_); t_a_sinL.append(t_)
(p_, t_) = two_sample(osinl_beta1.values[i,:], ysinl_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b1_sinL.append(p_); t_b1_sinL.append(t_)
(p_, t_) = two_sample(osinl_beta2.values[i,:], ysinl_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b2_sinL.append(p_); t_b2_sinL.append(t_)
#sinus right
(p_, t_) = two_sample(osinr_theta.values[i,:], ysinr_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_th_sinR.append(p_); t_th_sinR.append(t_)
(p_, t_) = two_sample(osinr_alpha.values[i,:], ysinr_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_a_sinR.append(p_); t_a_sinR.append(t_)
(p_, t_) = two_sample(osinr_beta1.values[i,:], ysinr_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b1_sinR.append(p_); t_b1_sinR.append(t_)
(p_, t_) = two_sample(osinr_beta2.values[i,:], ysinr_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b2_sinR.append(p_); t_b2_sinR.append(t_)
#sinus right
(p_, t_) = two_sample(orest_theta.values[i,:], yrest_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_th_rest.append(p_); t_th_rest.append(t_)
(p_, t_) = two_sample(orest_alpha.values[i,:], yrest_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_a_rest.append(p_); t_a_rest.append(t_)
(p_, t_) = two_sample(orest_beta1.values[i,:], yrest_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b1_rest.append(p_); t_b1_rest.append(t_)
(p_, t_) = two_sample(orest_beta2.values[i,:], yrest_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
p_b2_rest.append(p_); t_b2_rest.append(t_)
p_stR = np.c_[p_th_stR,p_a_stR,p_b1_stR, p_b2_stR]
p_stL = np.c_[p_th_stL,p_a_stL,p_b1_stL, p_b2_stL]
p_sinR = np.c_[p_th_sinR,p_a_sinR,p_b1_sinR, p_b2_sinR]
p_sinL = np.c_[p_th_sinL,p_a_sinL,p_b1_sinL, p_b2_sinL]
p_rest = np.c_[p_th_rest,p_a_rest,p_b1_rest, p_b2_rest]
t_stR = np.c_[t_th_stR,t_a_stR,t_b1_stR, t_b2_stR]
t_stL = np.c_[t_th_stL,t_a_stL,t_b1_stL, t_b2_stL]
t_sinR = np.c_[t_th_sinR,t_a_sinR,t_b1_sinR, t_b2_sinR]
t_sinL = np.c_[t_th_sinL,t_a_sinL,t_b1_sinL, t_b2_sinL]
t_rest = np.c_[t_th_rest,t_a_rest,t_b1_rest, t_b2_rest]
for p in range(4):
_, p_corr = fdrcorrection(p_stR[:,p])
p_stR[:,p] = p_corr
_, p_corr = fdrcorrection(p_stL[:,p])
p_stL[:,p] = p_corr
_, p_corr = fdrcorrection(p_sinR[:,p])
p_sinR[:,p] = p_corr
_, p_corr = fdrcorrection(p_sinL[:,p])
p_sinL[:,p] = p_corr
_, p_corr = fdrcorrection(p_rest[:,p])
p_rest[:,p] = p_corr
t_stR = EvokedArray(t_stR, info, tmin=0)
t_stL = EvokedArray(t_stL, info, tmin=0)
t_sinR = EvokedArray(t_sinR, info, tmin=0)
t_sinL = EvokedArray(t_sinL, info, tmin=0)
t_rest = EvokedArray(t_rest, info, tmin=0)
mask_stR = p_stR <= 0.05
mask_stL = p_stL <=0.05
mask_sinR = p_sinR <= 0.05
mask_sinL = p_sinL <=0.05
mask_rest = p_rest <= 0.05
fig1 = t_stR.plot_topomap(ch_type='eeg', scalings=1,
time_format=' ', vmin=-4.5, vmax=4.5,
units='t_values', mask=mask_stR,
size=3,
time_unit='s', title = None, nrows = 4, res = 1000)
fig2 = t_sinR.plot_topomap(ch_type='eeg', scalings=1,
time_format=' ', vmin=-4.5, vmax=4.5,
units='t_values', mask=mask_sinR,
size=3,
time_unit='s', title = None, nrows = 4, res = 1000)
fig3 = t_sinL.plot_topomap(ch_type='eeg', scalings=1,
time_format=' ', vmin=-4.5, vmax=4.5,
units='t_values', mask=mask_sinL,
size=3,
time_unit='s', title = None, nrows = 4, res = 1000)
fig4 = t_stL.plot_topomap(ch_type='eeg', scalings=1,
time_format=' ', vmin=-4.5, vmax=4.5,
units='t_values', mask=mask_stL,
size=3,
time_unit='s', title = None, nrows = 4, res = 1000)
fig5 = t_rest.plot_topomap(ch_type='eeg', scalings=1,
time_format=' ', vmin=-9, vmax=9,
units='t_values', mask=mask_rest,
size=3,
time_unit='s', title = None, nrows = 4, res = 1000)
# #old
# p_th_stLR = [] ; t_th_stLR = []
# p_a_stLR = [] ; t_a_stLR = []
# p_b1_stLR = [] ; t_b1_stLR = []
# p_b2_stLR = [] ; t_b2_stLR = []
# p_th_sinstR = [] ; t_th_sinstR = []
# p_a_sinstR = [] ; t_a_sinstR = []
# p_b1_sinstR = [] ; t_b1_sinstR = []
# p_b2_sinstR = [] ; t_b2_sinstR = []
# p_th_sinLR = [] ; t_th_sinLR = []
# p_a_sinLR = [] ; t_a_sinLR = []
# p_b1_sinLR = [] ; t_b1_sinLR = []
# p_b2_sinLR = [] ; t_b2_sinLR = []
# p_th_sinstL = [] ; t_th_sinstL = []
# p_a_sinstL = [] ; t_a_sinstL = []
# p_b1_sinstL = [] ; t_b1_sinstL = []
# p_b2_sinstL = [] ; t_b2_sinstL = []
# p_th_rest = [] ; t_th_rest = []
# p_a_rest = [] ; t_a_rest = []
# p_b1_rest = [] ; t_b1_rest = []
# for i in range(32):
# #steady left
# (p_, t_) = one_sample(ostl_theta.values[i,:], ostr_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_th_stLR.append(p_); t_th_stLR.append(t_)
# (p_, t_) = one_sample(ostl_alpha.values[i,:], ostr_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_a_stLR.append(p_); t_a_stLR.append(t_)
# (p_, t_) = one_sample(ostl_beta1.values[i,:], ostr_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b1_stLR.append(p_); t_b1_stLR.append(t_)
# (p_, t_) = one_sample(ostl_beta2.values[i,:], ostr_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b2_stLR.append(p_); t_b2_stLR.append(t_)
# #steady right
# (p_, t_) = one_sample(osinr_theta.values[i,:], ostr_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_th_sinstR.append(p_); t_th_sinstR.append(t_)
# (p_, t_) = one_sample(osinr_alpha.values[i,:], ostr_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_a_sinstR.append(p_); t_a_sinstR.append(t_)
# (p_, t_) = one_sample(osinr_beta1.values[i,:], ostr_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b1_sinstR.append(p_); t_b1_sinstR.append(t_)
# (p_, t_) = one_sample(osinr_beta2.values[i,:], ostr_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b2_sinstR.append(p_); t_b2_sinstR.append(t_)
# #sinus left
# (p_, t_) = one_sample(osinl_theta.values[i,:], osinr_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_th_sinLR.append(p_); t_th_sinLR.append(t_)
# (p_, t_) = one_sample(osinl_alpha.values[i,:], osinr_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_a_sinLR.append(p_); t_a_sinLR.append(t_)
# (p_, t_) = one_sample(osinl_beta1.values[i,:], osinr_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b1_sinLR.append(p_); t_b1_sinLR.append(t_)
# (p_, t_) = one_sample(osinl_beta2.values[i,:], osinr_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b2_sinLR.append(p_); t_b2_sinLR.append(t_)
# #sinus right
# (p_, t_) = one_sample(osinl_theta.values[i,:], ostl_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_th_sinstL.append(p_); t_th_sinstL.append(t_)
# (p_, t_) = one_sample(osinl_alpha.values[i,:], ostl_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_a_sinstL.append(p_); t_a_sinstL.append(t_)
# (p_, t_) = one_sample(osinl_beta1.values[i,:], ostl_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b1_sinstL.append(p_); t_b1_sinstL.append(t_)
# (p_, t_) = one_sample(osinl_beta2.values[i,:], ostl_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b2_sinstL.append(p_); t_b2_sinstL.append(t_)
# p_sinstR = np.c_[p_th_sinstR,p_a_sinstR,p_b1_sinstR, p_b2_sinstR]
# p_stLR = np.c_[p_th_stLR,p_a_stLR,p_b1_stLR, p_b2_stLR]
# p_sinstL = np.c_[p_th_sinstL,p_a_sinstL,p_b1_sinstL, p_b2_sinstL]
# p_sinLR = np.c_[p_th_sinLR,p_a_sinLR,p_b1_sinLR, p_b2_sinLR]
# p_rest = np.c_[p_th_rest,p_a_rest,p_b1_rest, p_b2_rest]
# t_sinstR = np.c_[t_th_sinstR,t_a_sinstR,t_b1_sinstR, t_b2_sinstR]
# t_stLR = np.c_[t_th_stLR,t_a_stLR,t_b1_stLR, t_b2_stLR]
# t_sinstL = np.c_[t_th_sinstL,t_a_sinstL,t_b1_sinstL, t_b2_sinstL]
# t_sinLR = np.c_[t_th_sinLR,t_a_sinLR,t_b1_sinLR, t_b2_sinLR]
# for p in range(4):
# _, p_corr = fdrcorrection(p_sinstR[:,p])
# p_sinstR[:,p] = p_corr
# _, p_corr = fdrcorrection(p_stLR[:,p])
# p_stLR[:,p] = p_corr
# _, p_corr = fdrcorrection(p_sinstL[:,p])
# p_sinstL[:,p] = p_corr
# _, p_corr = fdrcorrection(p_sinLR[:,p])
# p_sinLR[:,p] = p_corr
# t_sinstR = EvokedArray(t_sinstR, info, tmin=0)
# t_stLR = EvokedArray(t_stLR, info, tmin=0)
# t_sinstL = EvokedArray(t_sinstL, info, tmin=0)
# t_sinLR = EvokedArray(t_sinLR, info, tmin=0)
# mask_sinstR = p_sinstR <= 0.05
# mask_stLR = p_stLR <=0.05
# mask_sinstL = p_sinstL <= 0.05
# mask_sinLR = p_sinLR <=0.05
# fig6 = t_sinstR.plot_topomap(ch_type='eeg', scalings=1,
# time_format=' ', vmin=-4.5, vmax=4.5,
# units='t_values', mask=mask_sinstR,
# size=3,
# time_unit='s', title = None, nrows = 4, res = 1000)
# fig7 = t_sinstL.plot_topomap(ch_type='eeg', scalings=1,
# time_format=' ', vmin=-4.5, vmax=4.5,
# units='t_values', mask=mask_sinstL,
# size=3,
# time_unit='s', title = None, nrows = 4, res = 1000)
# fig8 = t_sinLR.plot_topomap(ch_type='eeg', scalings=1,
# time_format=' ', vmin=-4.5, vmax=4.5,
# units='t_values', mask=mask_sinLR,
# size=3,
# time_unit='s', title = None, nrows = 4, res = 1000)
# fig9 = t_stLR.plot_topomap(ch_type='eeg', scalings=1,
# time_format=' ', vmin=-4.5, vmax=4.5,
# units='t_values', mask=mask_stLR,
# size=3,
# time_unit='s', title = None, nrows = 4, res = 1000)
# #young
# p_th_stLR = [] ; t_th_stLR = []
# p_a_stLR = [] ; t_a_stLR = []
# p_b1_stLR = [] ; t_b1_stLR = []
# p_b2_stLR = [] ; t_b2_stLR = []
# p_th_sinstR = [] ; t_th_sinstR = []
# p_a_sinstR = [] ; t_a_sinstR = []
# p_b1_sinstR = [] ; t_b1_sinstR = []
# p_b2_sinstR = [] ; t_b2_sinstR = []
# p_th_sinLR = [] ; t_th_sinLR = []
# p_a_sinLR = [] ; t_a_sinLR = []
# p_b1_sinLR = [] ; t_b1_sinLR = []
# p_b2_sinLR = [] ; t_b2_sinLR = []
# p_th_sinstL = [] ; t_th_sinstL = []
# p_a_sinstL = [] ; t_a_sinstL = []
# p_b1_sinstL = [] ; t_b1_sinstL = []
# p_b2_sinstL = [] ; t_b2_sinstL = []
# for i in range(32):
# #steady left
# (p_, t_) = one_sample(ystl_theta.values[i,:], ystr_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_th_stLR.append(p_); t_th_stLR.append(t_)
# (p_, t_) = one_sample(ystl_alpha.values[i,:], ystr_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_a_stLR.append(p_); t_a_stLR.append(t_)
# (p_, t_) = one_sample(ystl_beta1.values[i,:], ystr_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b1_stLR.append(p_); t_b1_stLR.append(t_)
# (p_, t_) = one_sample(ystl_beta2.values[i,:], ystr_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b2_stLR.append(p_); t_b2_stLR.append(t_)
# #steady right
# (p_, t_) = one_sample(ysinr_theta.values[i,:], ystr_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_th_sinstR.append(p_); t_th_sinstR.append(t_)
# (p_, t_) = one_sample(ysinr_alpha.values[i,:], ystr_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_a_sinstR.append(p_); t_a_sinstR.append(t_)
# (p_, t_) = one_sample(ysinr_beta1.values[i,:], ystr_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b1_sinstR.append(p_); t_b1_sinstR.append(t_)
# (p_, t_) = one_sample(ysinr_beta2.values[i,:], ystr_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b2_sinstR.append(p_); t_b2_sinstR.append(t_)
# #sinus left
# (p_, t_) = one_sample(ysinl_theta.values[i,:], ysinr_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_th_sinLR.append(p_); t_th_sinLR.append(t_)
# (p_, t_) = one_sample(ysinl_alpha.values[i,:], ysinr_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_a_sinLR.append(p_); t_a_sinLR.append(t_)
# (p_, t_) = one_sample(ysinl_beta1.values[i,:], ysinr_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b1_sinLR.append(p_); t_b1_sinLR.append(t_)
# (p_, t_) = one_sample(ysinl_beta2.values[i,:], ysinr_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b2_sinLR.append(p_); t_b2_sinLR.append(t_)
# #sinus right
# (p_, t_) = one_sample(ysinl_theta.values[i,:], ystl_theta.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_th_sinstL.append(p_); t_th_sinstL.append(t_)
# (p_, t_) = one_sample(ysinl_alpha.values[i,:], ystl_alpha.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_a_sinstL.append(p_); t_a_sinstL.append(t_)
# (p_, t_) = one_sample(ysinl_beta1.values[i,:], ystl_beta1.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b1_sinstL.append(p_); t_b1_sinstL.append(t_)
# (p_, t_) = one_sample(ysinl_beta2.values[i,:], ystl_beta2.values[i,:], reps = 1000, stat='t',alternative="two-sided", seed=4)
# p_b2_sinstL.append(p_); t_b2_sinstL.append(t_)
# p_sinstR = np.c_[p_th_sinstR,p_a_sinstR,p_b1_sinstR, p_b2_sinstR]
# p_stLR = np.c_[p_th_stLR,p_a_stLR,p_b1_stLR, p_b2_stLR]
# p_sinstL = np.c_[p_th_sinstL,p_a_sinstL,p_b1_sinstL, p_b2_sinstL]
# p_sinLR = np.c_[p_th_sinLR,p_a_sinLR,p_b1_sinLR, p_b2_sinLR]
# p_rest = np.c_[p_th_rest,p_a_rest,p_b1_rest, p_b2_rest]
# t_sinstR = np.c_[t_th_sinstR,t_a_sinstR,t_b1_sinstR, t_b2_sinstR]
# t_stLR = np.c_[t_th_stLR,t_a_stLR,t_b1_stLR, t_b2_stLR]
# t_sinstL = np.c_[t_th_sinstL,t_a_sinstL,t_b1_sinstL, t_b2_sinstL]
# t_sinLR = np.c_[t_th_sinLR,t_a_sinLR,t_b1_sinLR, t_b2_sinLR]
# for p in range(4):
# _, p_corr = fdrcorrection(p_sinstR[:,p])
# p_sinstR[:,p] = p_corr
# _, p_corr = fdrcorrection(p_stLR[:,p])
# p_stLR[:,p] = p_corr
# _, p_corr = fdrcorrection(p_sinstL[:,p])
# p_sinstL[:,p] = p_corr
# _, p_corr = fdrcorrection(p_sinLR[:,p])
# p_sinLR[:,p] = p_corr
# t_sinstR = EvokedArray(t_sinstR, info, tmin=0)
# t_stLR = EvokedArray(t_stLR, info, tmin=0)
# t_sinstL = EvokedArray(t_sinstL, info, tmin=0)
# t_sinLR = EvokedArray(t_sinLR, info, tmin=0)
# mask_sinstR = p_sinstR <= 0.05
# mask_stLR = p_stLR <=0.05
# mask_sinstL = p_sinstL <= 0.05
# mask_sinLR = p_sinLR <=0.05
# fig10 = t_sinstR.plot_topomap(ch_type='eeg', scalings=1,
# time_format=' ', vmin=-4.5, vmax=4.5,
# units='t_values', mask=mask_sinstR,
# size=3,
# time_unit='s', title = None, nrows = 4, res = 1000)
# fig11 = t_sinstL.plot_topomap(ch_type='eeg', scalings=1,
# time_format=' ', vmin=-4.5, vmax=4.5,
# units='t_values', mask=mask_sinstL,
# size=3,
# time_unit='s', title = None, nrows = 4, res = 1000)
# fig12 = t_sinLR.plot_topomap(ch_type='eeg', scalings=1,
# time_format=' ', vmin=-4.5, vmax=4.5,
# units='t_values', mask=mask_sinLR,
# size=3,
# time_unit='s', title = None, nrows = 4, res = 1000)
# fig13 = t_stLR.plot_topomap(ch_type='eeg', scalings=1,
# time_format=' ', vmin=-4.5, vmax=4.5,
# units='t_values', mask=mask_stLR,
# size=3,
# time_unit='s', title = None, nrows = 4, res = 1000) |
25,059 | 0535b24e040d1b1ab623d6d8924effc94bfe4ca6 | class Solution:
"""
@param: A: an integer array
@return:
"""
def sortIntegers(self, A):
# write your code here
for i in range(len(A)):
for j in range(len(A) - 1):
if A[i] < A[j]:
A[i], A[j] = A[j], A[i]
return A
x = Solution()
print(x.sortIntegers([3, 2, 1, 4, 5]))
|
25,060 | 884202b6ba938208d7bf9b2280464c697b18a337 | from __future__ import print_function, division
from pyprind import prog_bar
import numpy as np
def gen_human_bandit_trajs(pi_H, bandit_env, max_timesteps=None, n_trajs=10):
"""
Generates n_traj trajectories of pi_H in bandit_env, of up to length max_timesteps.
No fancy tricks, just rolling them out one at a time.
Args:
pi_H (HumanPolicy): a human policy to evaluate the best arms against.
bandit_env (BanditEnv): the bandit environment to run this human in. We assume
that bandit_env has the same characteristics as pi_H.env.
max_timesteps (int): the maximum amount of timesteps to run pi_H in the environment.
n_trajs (int): the number of trajectories to generate.
Returns:
(dict): a list of dicts with keys "length", "acts", "obs", "rews", "best_arm", "total_rew".
Each dictionary contains the information associated with one trajectories.
- length (int): the length of this trajectories
- acts (list-like): a list of all the actions taken by the human
- obs (list-like): a list of observations made by human
- rews (list-like): the rewards received by the human
- best_arm (int): the index of the best arm of the bandit
- total_rew (float): the total reward associated with this trajectory
"""
if max_timesteps==None:
max_timesteps = bandit_env.horizon
trajs = []
for i in prog_bar(range(n_trajs)):
ob = bandit_env.reset()
pi_H.reset()
best_arm = np.argmax([bandit_env.arms[i].mean() for i in range(bandit_env.n_arms)])
obs = []
acts = []
rews = []
ob = 0
for t in range(max_timesteps):
obs.append(ob)
act = pi_H.get_action(ob)
acts.append(act)
ob, rew, done, info = bandit_env.step(act)
rews.append(rew)
pi_H.learn(obs[-1], act, rew, ob, done)
if done:
break
traj = {"length": t, "acts":acts, "obs":obs, "rewards":rews,
"best_arm":best_arm, "total_rew": sum(rews)}
trajs.append(traj)
return trajs
|
25,061 | c2c928f63ded1af329ee876208463a9f9dcb45b3 | ###############################################################################
# Copyright (c) 2016-2023
# Capable Humanitarian Robotics and Intelligent Systems Lab (CHRISLab)
# Christopher Newport University
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""Pure pursuit algorithm implementing FollowPath action."""
from datetime import datetime
import rclpy
from rclpy.action import ActionServer
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import MultiThreadedExecutor
from flex_nav_common.action import FollowPath
from flex_nav_pure_pursuit.pure_pursuit import PurePursuit
class PurePursuitPath(PurePursuit):
"""Pure Pursuit Path follower."""
def __init__(self):
super().__init__("pure_pursuit_path")
self._action_server = ActionServer(self, FollowPath,
self._action_name,
execute_callback=self.execute,
callback_group=ReentrantCallbackGroup(),
goal_callback=self.goal_callback,
cancel_callback=self.cancel_callback)
self._result_type = FollowPath.Result
self._feedback_type = FollowPath.Feedback
async def execute(self, goal_handle):
"""
Execute the goal for the FollowPathActionServer.
@param goal_handle The goal to process
@return goal result
"""
self.get_logger().info('Executing goal...')
super().execute(goal_handle) # Activate markers
timer = self.create_rate(self._controller_rate.value)
try:
# Wait for the lock after prior goal is properly canceled
self._is_new_goal = True
while self._running and rclpy.ok():
self.get_logger().warn(0.25, f"{self.get_name()} Waiting for prior pure pursuit goal to exit ...")
self._is_new_goal = False
self._running = True
# Depends only on tf transforms now
# if not self._last_odom_msg:
# self.get_logger().error(f"{self.get_name()} No odometry message received")
# self._failed = True
# return None # return value set in finally
self._start_time = self.get_clock().now()
self._done = False
self._failed = False
self._indice = self.find_furtherest_target(goal_handle.request.path)
if self._indice < 0:
self.get_logger().error(f"{self.get_name()} Invalid starting index {self._indice} - no valid path points!")
self._failed = True
return # return value set in finally
while self._running and rclpy.ok() and not self._is_new_goal and not goal_handle.is_cancel_requested:
ret = self.pure_pursuit_control(goal_handle, goal_handle.request.path)
if ret is not None:
self.get_logger().warn(f"{self.get_name()} terminating pure pursuit loop ...")
return None # return value set in finally
timer.sleep()
except Exception as exc: # pylint: disable=W0703
self.get_logger().error(f"Exception processing {goal_handle}: {type(exc)}\n {exc}")
import traceback # pylint: disable=C0415
self.get_logger().error(traceback.format_exc().replace("%", "%%"))
self._failed = True
finally:
try:
self.destroy_rate(timer)
except Exception: # pylint: disable=W0703
self.get_logger().info("Issue deleting timer")
self._running = False
if (self._is_new_goal or goal_handle.is_cancel_requested) and not self._failed and not self._done:
return self.set_canceled(goal_handle) # pylint: disable=W0150
if self._failed:
return self.set_failed(goal_handle) # pylint: disable=W0150
if self._done:
return self.set_succeeded(goal_handle) # pylint: disable=W0150
raise ValueError("Should have set failed, done, or preempted beore this! ")
def main(args=None):
"""Implement PurePursuitPath follower."""
rclpy.init(args=args)
pure_pursuit_path_node = PurePursuitPath()
executor = MultiThreadedExecutor()
executor.add_node(pure_pursuit_path_node)
try:
executor.spin()
except KeyboardInterrupt:
print("Keyboard interrupt! Shut down!")
except Exception as exc:
print(f"Exception in executor! {type(exc)}\n {exc}")
import traceback
print(f"{traceback.format_exc().replace('%', '%%')}")
try:
pure_pursuit_path_node.destroy()
except Exception as exc:
print(f"Exception in pure_pursuit_path_node shutdown! {type(exc)}\n {exc}")
import traceback
print(f"{traceback.format_exc().replace('%', '%%')}")
print(f"{datetime.now()} - Done with pure_pursuit_path_node!")
try:
rclpy.try_shutdown()
except Exception as exc: # pylint: disable=W0703
print(f"Exception from rclpy.shutdown for pure_pursuit_path_node: {type(exc)}\n{exc}")
print(f"{traceback.format_exc().replace('%', '%%')}")
if __name__ == '__main__':
main()
|
25,062 | 3e8d30de82ddc6290519f864221cbf7fe531a08e | tutor = 0
cachorros = 0
gatos = 0
outros = 0
while tutor < 5:
print(f"\n\n----- Tutor {tutor + 1} -----")
quantidade = int(input("Quantos pets você tem? "))
while quantidade < 0:
print("\nError - A quantidade não pode ser menor que 0")
quantidade = int(input("Quantos pets você tem? "))
for i in range(quantidade):
print("\n\n1 - Cachorro")
print("2 - Gato")
print("3 - Outros")
codigoPet = int(input("Qual código do seu pet? "))
while codigoPet < 1 or codigoPet > 3:
print("Error - Código inválido")
codigoPet = int(input("Qual código do seu pet? "))
if codigoPet == 1:
cachorros += 1
elif codigoPet == 2:
gatos += 1
else:
outros += 1
tutor += 1
print(f"\n\nPets ao total: {outros + cachorros + gatos}")
print(f"Cachorros: {cachorros}")
print(f"Gatos: {gatos}") |
25,063 | 6e5a47ceabea3cc3555e393a9bbd931cdd2db123 | from django.http import HttpResponse, HttpResponseRedirect
from django.core.context_processors import csrf
from django.template import RequestContext, loader
from django.shortcuts import render, render_to_response, redirect
from django.views import generic
from django.contrib.auth import authenticate, login, logout
from sriru.models import *
from sriru.forms import *
from django import forms
import hashlib
def manual(request):
return render_to_response('sriru/manual.html')
def prof(request,prof_id):
p = Professor.objects.get(emp_no=prof_id)
q = r = a = s = {}
a = MessageStudProf.objects.filter(msg_to = p, seen = True)
b = MessageStudProf.objects.filter(msg_to = p, seen = False)
q = Project_Unapproved.objects.filter(PI = p,approved=0)
r = Project_Unapproved.objects.filter(PI = p,approved=2)
s = Purchase.objects.filter(approval=-1,sanc_head__project__PI = p)
t = Purchase.objects.filter(approval=0,sanc_head__project__PI = p)
u = Purchase.objects.filter(approval=1,sanc_head__project__PI = p)
v = Purchase.objects.filter(approval=2,sanc_head__project__PI = p)
w = Purchase.objects.filter(approval=3,sanc_head__project__PI = p)
y = Purchase.objects.filter(approval=4,sanc_head__project__PI = p)
if 'userprof' in request.session:
x = request.session['userprof']
return render_to_response('sriru/prof.html',{'msg_seen':a,'msg_unseen':b,'approvedproj':r,'unapprovedproj':q,'prof':p,'rejpur':s,'pur0':t,'pur1':u,'pur2':v,'pur3':w,'pur4':y,'r':x})
else:
return render_to_response('sriru/prof.html',{'approvedproj':r,'prof':p})
def director(request):
q = r = s = {}
q = Project_Unapproved.objects.filter(approved=2)
r = Project_Unapproved.objects.filter(approved=1)
s = Purchase.objects.filter(approval=-2)
if 'userdir' in request.session:
x = request.session['userdir']
return render_to_response('sriru/director.html',{'upcomproj':r,'approvedproj':q,'dirpur':s})
else:
return HttpResponseRedirect('/sriru/super/')
def stud(request,stud_id):
p = Student.objects.get(pk=stud_id)
q = Fellowship.objects.filter(researcher=p)
if 'userstud' in request.session:
x = request.session['userstud']
return render_to_response('sriru/stud.html',{'s':p,'fellow':q,'r':x})
else:
return render_to_response('sriru/stud.html',{'s':p})
def proj(request,project_id):
x = ""
if '1' in request.session:
x = request.session['1']
del request.session['1']
q = Project_Unapproved.objects.get(pk=project_id)
p = Sponsorship.objects.filter(project = q)
if 'userprof' in request.session or 'useroff' in request.session or 'useradmin' in request.session:
return render_to_response('sriru/proj.html',{'project':q,'spons':p})
elif 'userstud' in request.session:
return render_to_response('sriru/proj.html',{'x':x,'project':q,'stud':request.session['userstud']},RequestContext(request))
elif 'userspons' in request.session:
return render_to_response('sriru/proj.html',{'x':x,'project':q,'spons':p,'sp':request.session['userspons']},RequestContext(request))
else:
return render_to_response('sriru/proj.html',{'project':q})
def msg(request):
if request.POST:
if 'userstud' in request.session:
to = request.POST.get('to')
from_s = request.POST.get('from')
msg = request.POST.get('msg')
proj = request.POST.get('project')
prof = Professor.objects.get(pk=to)
stud = Student.objects.get(pk = from_s)
project = Project_Unapproved.objects.get(pk=proj)
p = MessageStudProf()
p.msg_from = stud
p.msg_to = prof
p.project = project
p.msg = msg
p.save()
request.session['1'] = "MESSAGE SENT"
return HttpResponseRedirect('/sriru/proj/'+proj+'/',request)
if 'userspons' in request.session:
to = request.POST.get('to')
from_s = request.POST.get('from')
msg = request.POST.get('msg')
proj = request.POST.get('project')
stud = Sponsor.objects.get(pk = from_s)
project = Project_Unapproved.objects.get(pk=proj)
p = MessageSponsAdmin()
p.msg_from = stud
p.msg_to = to
p.project = project
p.msg = msg
p.save()
request.session['1'] = "MESSAGE SENT"
return HttpResponseRedirect('/sriru/proj/'+proj+'/',request)
else:
return redirect('/sriru/')
else:
return redirect('/sriru/')
def msg_seen(request,_id):
if 'userprof' in request.session:
x = MessageStudProf.objects.get(pk = _id)
x.seen = True
x.save()
y = request.session['userprof']
return redirect('/sriru/prof/'+y+"/")
elif 'useradmin' in request.session:
x = MessageSponsAdmin.objects.get(pk = _id)
x.seen = True
x.save()
return redirect('/sriru/admin')
else:
return HttpResponseRedirect('/sriru/')
def spons(request,spons_name):
p = q = r = {}
p = Sponsor.objects.get(username=spons_name)
q = Sponsorship.objects.filter(sponsor=p)
r = Project_Unapproved.objects.filter(approved=1)
if 'userspons' in request.session or 'useroff' in request.session or 'useradmin' in request.session:
x = request.session['userspons']
return render_to_response('sriru/spons.html',{'spons':p,'q':q,'project':r,'r':x})
else:
return render_to_response('sriru/spons.html',{'spons':p})
def tenderlist(request):
p={}
p = Purchase_duration.objects.all()
return render_to_response('sriru/tenderlist.html',{'tender':p})
def tender(request,ten_id):
p = Purchase_duration.objects.get(pk=ten_id)
return render_to_response('sriru/tender.html',{'tender':p})
def index(request):
state = "Please login below to continue"
x = ""
if request.POST:
user = request.POST.get('user')
password = request.POST.get('pass')
hash_object = hashlib.sha1(password)
password = hash_object.hexdigest()
type = request.POST.get('des')
if type == 'stu' and Student.objects.filter(roll_no = user, password = password).exists() :
Student.objects
s = Student.objects.get(roll_no = user)
request.session['userstud'] = s.roll_no
return redirect('/sriru/stud/'+s.roll_no+'/')
elif type == 'fac' and Professor.objects.filter(emp_no = user, password = password).exists():
Professor.objects
p = Professor.objects.get(emp_no = user)
request.session['userprof'] = p.emp_no
return redirect('/sriru/prof/'+p.emp_no+'/')
elif type == 'spons' and Sponsor.objects.filter(username = user, password = password).exists():
Sponsor.objects
sp = Sponsor.objects.get(username = user)
request.session['userspons'] = sp.username
return redirect('/sriru/spons/'+sp.username+'/')
else :
state = "USER-ID Password combination not found"
return render_to_response('sriru/index.html',{'state':state,'r':x},RequestContext(request))
return render_to_response('sriru/index.html',{'state':state,'r':x},RequestContext(request))
def superlogin(request):
state = "Please enter password"
x = ""
if request.POST:
user = request.POST.get('user')
password1 = request.POST.get('pass')
hash_object = hashlib.sha1(password1)
password = hash_object.hexdigest()
if SuperUser.objects.filter(name = user, password = password).exists() :
SuperUser.objects
s = SuperUser.objects.get(name = user)
if user == 'sriru':
request.session['useroff'] = s.pk
return redirect('/sriru/officer')
elif user == 'admin':
request.session['useradmin'] = s.pk
return redirect('/sriru/admin')
elif SuperUser.objects.filter(name = user, password = password1).exists():
s = SuperUser.objects.get(name = user)
request.session['userdir'] = s.pk
return redirect('/sriru/director')
else :
state = "USER-ID Password combination not found"
return render_to_response('sriru/superlogin.html',{'state':state,'r':x},RequestContext(request))
return render_to_response('sriru/superlogin.html',{'state':state,'r':x},RequestContext(request))
def logout1(request):
try:
del request.session['userstud']
except KeyError:
pass
return HttpResponseRedirect('/sriru/')
def logout2(request):
try:
del request.session['userprof']
except KeyError:
pass
return HttpResponseRedirect('/sriru/')
def logout3(request):
try:
del request.session['userspons']
except KeyError:
pass
return HttpResponseRedirect('/sriru/')
def logout4(request):
try:
del request.session['useroff']
except KeyError:
pass
return HttpResponseRedirect('/sriru/super')
def logout5(request):
try:
del request.session['useradmin']
except KeyError:
pass
return HttpResponseRedirect('/sriru/super')
def logout6(request):
try:
del request.session['userdir']
except KeyError:
pass
return HttpResponseRedirect('/sriru/super')
def generateTable(request,proj_id):
q = Project_Unapproved.objects.get(pk=proj_id)
p = Sanctioned_Head.objects.filter(project = q)
for x in p:
x.left_amt = x.given_amt - x.used_amt
x.save()
return render_to_response('sriru/sanctable.html',{'sanc':p})
def apprsanc(request,proj_id):
q = Project_Unapproved.objects.get(pk=proj_id)
p = Sanctioned_Head.objects.filter(project = q)
if request.POST:
for name in request.POST:
if name == "csrfmiddlewaretoken" or name == "submit":
r = ""
else:
r = Sanctioned_Head.objects.get(pk=name)
r.appr_amount = request.POST.get(name)
r.save()
return HttpResponseRedirect('/sriru/admin')
return render_to_response('sriru/appr_sanchead.html',{'sanc':p,'proj':q, 'proj_id':proj_id},RequestContext(request))
def up_sanc(request,proj_id):
q = Project_Unapproved.objects.get(pk=proj_id)
p = Sanctioned_Head.objects.filter(project = q)
if request.POST:
for name in request.POST:
if name == "csrfmiddlewaretoken" or name == "submit":
r = ""
else:
r = Sanctioned_Head.objects.get(pk=name)
x = r.given_amt
r.given_amt = request.POST.get(name)
r.given_amt = int(r.given_amt) + x
r.save()
return HttpResponseRedirect('/sriru/admin')
return render_to_response('sriru/up_sanc.html',{'sanc':p,'proj':q, 'proj_id':proj_id},RequestContext(request))
def copi(request,prof_id):
q = Professor.objects.get(emp_no=prof_id)
p = Project_Unapproved.objects.filter(PI = q)
if request.POST:
for name in request.POST:
if name == "csrfmiddlewaretoken" or name == "submit":
r = ""
else:
r = Project_Unapproved.objects.get(pk=name)
s = request.POST.get(name)
t = Professor.objects.get(emp_no=s)
r.save()
r.co_PI.add(t)
r.save()
x = request.session['userprof']
return HttpResponseRedirect('/sriru/prof/'+x+'/')
return render_to_response('sriru/copi.html',{'proj':p,'prof':q},RequestContext(request))
def projupdt(request,proj_id):
p = Project_Unapproved.objects.get(id = proj_id)
if request.POST:
p.updates = request.POST.get('update')
p.save()
x = request.session['userprof']
return HttpResponseRedirect('/sriru/prof/'+x+'/')
return render_to_response('sriru/projupdt.html',{'proj':p},RequestContext(request))
def officer(request):
p = {}
p = Purchase.objects.filter(approval=0)
q = {}
q = Project_Unapproved.objects.filter(approved=0)
if 'useroff' in request.session:
return render_to_response("sriru/officer.html",{"purchase":p,"project":q},RequestContext(request))
else:
return HttpResponseRedirect('/sriru/super')
def admin(request):
a = b = q = p = r = s = t = {}
a = MessageSponsAdmin.objects.filter(msg_to='admin', seen = True)
b = MessageSponsAdmin.objects.filter(msg_to='admin', seen = False)
p = Purchase.objects.filter(approval=1)
q = Purchase.objects.filter(approval=2)
r = Purchase.objects.filter(approval=3)
s = Project_Unapproved.objects.filter(approved=1)
t = Project_Unapproved.objects.filter(approved=2)
if 'useradmin' in request.session:
return render_to_response('sriru/admin.html',{'msg_seen':a,'msg_unseen':b,'project':s,'project1':t,'purchase':p,'purchase1':q,'purchase2':r})
else:
return HttpResponseRedirect('/sriru/super')
def grant(request,proj_id):
if request.POST:
form = GrantForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/sriru/up_sanc/'+proj_id+'/')
else:
form = GrantForm()
form.fields['sponsorship'].queryset = Sponsorship.objects.filter(project=Project_Unapproved.objects.get(pk=proj_id))
args = {}
args.update(csrf(request))
args['form'] = form
args['proj_id'] = proj_id
return render_to_response('sriru/grant.html',args)
def approve1(request,pur_id):
Purchase.objects
p = Purchase.objects.get(pk=pur_id)
p.approval=1
p.save()
return HttpResponseRedirect("/sriru/officer")
def reject1(request,pur_id):
Purchase.objects
p = Purchase.objects.get(pk=pur_id)
p.approval=-1
p.save()
return HttpResponseRedirect("/sriru/officer")
def rejdir(request,pur_id):
Purchase.objects
p = Purchase.objects.get(pk=pur_id)
p.approval = -2
p.save()
return HttpResponseRedirect("/sriru/officer")
def approve2(request,pur_id):
Purchase.objects
p = Purchase.objects.get(pk=pur_id)
p.approval +=1
p.save()
return HttpResponseRedirect("/sriru/purchasedur") #pur_dur form
def dirpur(request,pur_id):
Purchase.objects
p = Purchase.objects.get(pk=pur_id)
p.approval += 1
p.save()
return HttpResponseRedirect("/sriru/admin")
def approve3(request,pur_id):
Purchase.objects
p = Purchase.objects.get(pk=pur_id)
p.approval +=1
# p.sanc_head.given_amt +=
p.save()
return HttpResponseRedirect("/sriru/comppurchase") #comp_pur form
def dirapprove(request,pur_id):
Purchase.objects
p = Purchase.objects.get(pk=pur_id)
p.approval = 1
p.save()
return HttpResponseRedirect("/sriru/director")
def das(request):
Completed_Purchase.objects
Purchase.objects
p = {}
q = {}
q = Purchase.objects.filter(approval=3)
p = Completed_Purchase.objects.filter(purchase=q)
amt = 0
for i in p:
amt += i.cost
return render_to_response('sriru/das.html',{'comp_pur':p,'amt':amt})
def dascomp(request):
Completed_Purchase.objects
Purchase.objects
p = {}
q = {}
q = Purchase.objects.filter(approval=3)
p = Completed_Purchase.objects.filter(purchase=q)
for i in p:
j = i.purchase
j.approval = 4
j.save()
i.save()
return HttpResponseRedirect('/sriru/admin')
def projapprove(request,proj_id):
Project_Unapproved
p = Project_Unapproved.objects.get(pk=proj_id)
p.approved += 1
p.save()
return HttpResponseRedirect("/sriru/officer")
def projreject(request,pro_id):
Project_Unapproved
p = Project_Unapproved.objects.get(pk=pro_id)
p.approved -= 1
p.save()
return HttpResponseRedirect("/sriru/officer")
def sanction(request):
if request.POST:
form = SanctionHead(request.POST)
if form.is_valid():
form.save()
if 'submit1' in request.POST:
return HttpResponseRedirect('/sriru/sanctions')
else:
x = request.session['userprof']
return HttpResponseRedirect('/sriru/prof/'+x+'/')
else:
form = SanctionHead()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/sanction.html',args)
def newproj(request):
if request.POST:
form = ProjectForm(request.POST)
if form.is_valid():
form.save()
# x = request.session['user']
return HttpResponseRedirect('/sriru/sanctions')
else:
form = ProjectForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/form_project.html',args)
def fellowship(request):
if request.POST:
form = FellowshipForm(request.POST)
if form.is_valid():
form.save()
x = request.session['userprof']
return HttpResponseRedirect('/sriru/prof/'+x+'/')
else:
form = FellowshipForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/fellowship.html',args)
def approveproj(request,proj_id):
p = Project_Unapproved.objects.get(pk=proj_id)
p.approved = 2
p.save()
return HttpResponseRedirect('/sriru/sponsorship/'+proj_id+'/')
def updateSancHead(request,proj_id):
Project_Unapproved.objects
Sanctioned_Head.objects
p = Project_Unapproved.objects.get(pk=proj_id)
q = Sanctioned_Head.objects.filter(project=p)
data = {'Approved Amount': q.appr_amount}
form = UserQueueForm(initial=data)
render_to_response('sriru/appr_sanchead',{'form':form})
def addsponsorship(request,proj_id):
if request.POST:
form = SponsorshipForm(request.POST)
if form.is_valid():
form.save()
# x = request.session['user']
if 'submit1' in request.POST:
return HttpResponseRedirect('/sriru/sponsorship/'+proj_id+'/')
else:
return HttpResponseRedirect('/sriru/appr_sanchead/'+proj_id+'/')
else:
form = SponsorshipForm()
args = {}
args.update(csrf(request))
args['form'] = form
args['proj_id'] = proj_id
return render_to_response('sriru/form_sponsorship.html',args)
def purchase(request):
if request.POST:
form = PurchaseForm(request.POST)
if form.is_valid():
p=form.save()
p.tot_est_cost = p.est_cost * p.quantity
p.save()
x = request.session['userprof']
return HttpResponseRedirect('/sriru/prof/'+x+'/')
else:
form = PurchaseForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/purchase.html',args)
def purchaseduration(request):
if request.POST:
form = PurchaseDurationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/sriru/admin')
else:
form = PurchaseDurationForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/purchasedur.html',args)
def completepurchase(request):
if request.POST:
form = CompletedPurchaseForm(request.POST)
if form.is_valid():
b = form.save()
c = b.purchase.sanc_head
c.used_amt += b.cost
c.save()
return HttpResponseRedirect('/sriru/admin')
else:
form = CompletedPurchaseForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/comppurchase.html',args)
def vendadd(request):
if request.POST:
form = AddVendors(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/sriru/admin')
else:
form = AddVendors()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/vendadd.html',args)
def sponsadd(request):
if request.POST:
form = AddSpons(request.POST)
if form.is_valid():
b = Sponsor()
b = form.save(commit=False)
p = b.password
hash_object = hashlib.sha1(p)
b.password = hash_object.hexdigest()
b.save()
return HttpResponseRedirect('/sriru/admin')
else:
form = AddSpons()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/sponsadd.html',args)
def profadd(request):
if request.POST:
form = AddProf(request.POST)
if form.is_valid():
b = Professor()
b = form.save(commit=False)
p = b.password
hash_object = hashlib.sha1(p)
b.password = hash_object.hexdigest()
b.save()
return HttpResponseRedirect('/sriru/admin')
else:
form = AddProf()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/profadd.html',args)
def studadd(request):
if request.POST:
form = AddStudent(request.POST)
if form.is_valid():
b = Student()
b = form.save(commit=False)
p = b.password
hash_object = hashlib.sha1(p)
b.password = hash_object.hexdigest()
b.save()
return HttpResponseRedirect('/sriru/admin')
else:
form = AddStudent()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/studadd.html',args)
def changepass(request):
state = ""
b = False
npwd = ""
pwd = ""
user_npwd = ""
user_cpwd = "c"
user = ""
if request.POST:
user = request.POST.get('user_name')
user_pwd = request.POST.get('user_pwd')
user_npwd = request.POST.get('user_npwd')
user_cpwd = request.POST.get('user_cpwd')
hash_object = hashlib.sha1(user_pwd)
pwd = hash_object.hexdigest()
#pwd = user_pwd
hash_object1 = hashlib.sha1(user_cpwd)
npwd = hash_object1.hexdigest()
#npwd = user_cpwd
if user_npwd == user_cpwd:
b = True
if 'userprof' in request.session:
x = request.session['userprof']
if Professor.objects.filter(pk=user,password=pwd).exists() and b == True:
p = Professor.objects.get(pk=user)
p.password=npwd
p.save()
return redirect('/sriru/prof/'+x+'/')
elif request.POST and b == False:
state = "Password doesn't match"
return render_to_response('sriru/changepass.html',{'a':x,'state':state},RequestContext(request))
elif 'useradmin' in request.session:
x = request.session['useradmin']
if SuperUser.objects.filter(name='admin',password=pwd).exists() and b == True:
p = SuperUser.objects.get(pk=user)
p.password=npwd
p.save()
return redirect('/sriru/admin')
elif request.POST and b == False:
state = "Password doesn't match"
return render_to_response('sriru/changepass.html',{'d':x,'state':state},RequestContext(request))
elif 'useroff' in request.session:
x = request.session['useroff']
if SuperUser.objects.filter(name='sriru',password=pwd).exists() and b == True:
p = SuperUser.objects.get(pk=user)
p.password=npwd
p.save()
return redirect('/sriru/officer')
elif request.POST and b == False:
state = "Password doesn't match"
return render_to_response('sriru/changepass.html',{'c':x,'state':state},RequestContext(request))
elif 'userspons' in request.session:
x = request.session['userspons']
if Sponsor.objects.filter(pk=user,password=pwd).exists() and b == True:
p = Sponsor.objects.get(pk=user)
p.password=npwd
p.save()
return redirect('/sriru/spons/'+x+'/')
elif request.POST and b == False:
state = "Password doesn't match"
return render_to_response('sriru/changepass.html',{'e':x,'state':state},RequestContext(request))
elif 'userstud' in request.session:
x = request.session['userstud']
if Student.objects.filter(pk=user,password=pwd).exists() and b == True:
p = Student.objects.get(pk=user)
p.password=npwd
p.save()
return redirect('/sriru/stud/'+x+'/')
elif request.POST and b == False:
state = "Password doesn't match"
return render_to_response('sriru/changepass.html',{'b':x,'state':state},RequestContext(request))
else:
return HttpResponseRedirect('/sriru/')
def deptadd(request):
if request.POST:
form = DeptForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/sriru/admin')
else:
form = DeptForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('sriru/deptadd.html',args)
def search_project(request,text):
if text is None:
text = ''
projects = Project_Unapproved.objects.filter(title__contains=text)
return render_to_response('sriru/ajax_search.html',{'projects':projects})
|
25,064 | b37bb0947bf31bbb0cdd4aedcae2b5b270f5fc38 | """Module for ivector extractor training"""
from montreal_forced_aligner.ivector.trainer import (
DubmTrainer,
IvectorTrainer,
TrainableIvectorExtractor,
)
__all__ = ["trainer", "DubmTrainer", "IvectorTrainer", "TrainableIvectorExtractor"]
|
25,065 | 1b503bd8bca44e42567a5f3b6d47ea33576567ae | from fluent_contents.models import Placeholder
from fluent_contents.plugins.picture.models import PictureItem
from fluent_contents.plugins.text.models import TextItem
from fluent_contents.tests.testapp.models import RawHtmlTestItem, TestPage
from fluent_contents.tests.utils import AppTestCase
class SearchTest(AppTestCase):
"""
Tests for search
"""
def test_search_text(self):
"""
Test: Simple text indexing should work. HTML is stripped.
"""
page = TestPage.objects.create(pk=20, contents="Search!")
placeholder = Placeholder.objects.create_for_object(page, "slot2")
TextItem.objects.create_for_placeholder(placeholder, text="<b>Item1!</b>", sort_order=1)
self.assertEqual(placeholder.get_search_text().rstrip(), "Item1!")
def test_search_skip(self):
"""
Test: Search should skip elements without search_fields or search_output
"""
page = TestPage.objects.create(pk=20, contents="Search!")
placeholder = Placeholder.objects.create_for_object(page, "slot2")
RawHtmlTestItem.objects.create_for_placeholder(
placeholder, html="<b>HTML!!</b>", sort_order=2
)
self.assertEqual(placeholder.get_search_text(), "")
def test_search_fields(self):
"""
Test: Search should skip elements without search_fields or search_output
"""
page = TestPage.objects.create(pk=20, contents="Search!")
placeholder = Placeholder.objects.create_for_object(page, "slot2")
PictureItem.objects.create_for_placeholder(
placeholder, caption="<b>caption</b>", sort_order=1
)
self.assertEqual(placeholder.get_search_text(), "caption")
|
25,066 | 866d492472d7f79f5f4714ead708f182d36fe780 | #!/usr/bin/env python
'''
* ROS lane detector node ******************************
ROS/OpenCV lane detector with graphical overlay.
Publishes the lane center offset [cm] on ROS topic.
Also contains an ArUco marker detector.
ROS functionality and center offset measure added
Most lane detection code courtesy of:
https://github.com/vamsiramakrishnan/AdvancedLaneLines
********************************************************
'''
import cv2
import cv2.aruco as aruco
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.optimize import curve_fit
from collections import deque
from math import ceil
# ROS
import roslib
roslib.load_manifest('cv_lanetracker_aruco')
import sys
import rospy
from std_msgs.msg import String, Float32
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
warped_size = np.array([640, 480])
original_size = np.array([480, 640])
OFFSET =0
#Define Length of Queue
queue_len= 10
fCount = 0
# Coefficients Queue
l_coeff_queue = deque(maxlen=queue_len)
r_coeff_queue = deque(maxlen=queue_len)
# Curvature & Offset Queue
l_curvature_queue = deque(maxlen=queue_len)
r_curvature_queue = deque(maxlen=queue_len)
l_offset_queue = deque(maxlen=queue_len)
r_offset_queue = deque(maxlen=queue_len)
# Last Mask
pix_width = 400
ym_per_pix = 0.27/original_size[0] #meters per y pixel
lane_width = np.random.normal(pix_width,250, 20)
xm_per_pix = 0.30/pix_width #meters per x pixel
center_position = original_size[1] * xm_per_pix / 2.
overall_offset = 0
# Flags for Event based triggering of actions
IsLaneFound=False
isPerspectiveCompute = False
patience = 0
# Display Curvature
disp_left = 0
disp_right =0
# Camera calibration settings
mtx = np.array([[708.4340756775686, 0, 317.9663110540382], [0, 724.3038241696117, 274.0865876256384], [0, 0, 1]])
dist = np.array([0.09702126218642344, -0.1836878268546886, 0.01359685879119158, 0.007942342964235989, 0])
offset_from_lane_center_in_cm = 0.0
def filter_img(image):
# For filtering out the black parts of the image
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#frame = cv2.medianBlur(frame,7)
kernel = np.ones((5,5),np.float32)/25
image = cv2.filter2D(image,-1,kernel)
lower_black = np.array([0])
upper_black = np.array([110])
mask = cv2.inRange(image, lower_black, upper_black)
background = np.full(image.shape, 255, dtype=np.uint8)
res = cv2.bitwise_or(background, background, mask=mask)
return res
def get_lane_center_offset(left_x, right_x, left_y, right_y, img_):
# Get offset in meters from center of lane
m_per_x = (0.3/img_.shape[1])
m_per_y = (0.23/img_.shape[0])
pts_left = np.array([np.flipud(np.transpose(np.vstack([left_x, left_y])))])
pts_right = np.array([np.transpose(np.vstack([right_x, right_y]))])
pts = np.hstack((pts_left, pts_right))
midpoint_x = ((int(pts_right[0][0][0]) - int(pts_left[0][-1][0])) / 2 + int(pts_left[0][-1][0]))
midpoint_x2 = ((int(pts_right[0][400][0]) - int(pts_left[0][-400][0])) / 2 + int(pts_left[0][-400][0]))
img_ = cv2.line(img_, (img_.shape[1]/2, 50), (midpoint_x, 50), color=(100, 200, 100), thickness=1, lineType=cv2.LINE_AA)
#img_ = cv2.circle(img_, (midpoint_x2, 90), 12, color=(100, 200, 100), thickness=-1)
img_ = cv2.circle(img_, (midpoint_x, 50), 12, color=(100, 200, 100), thickness=-1)
img_ = cv2.circle(img_, (img_.shape[1]/2, 50), 8, color=(100, 200, 100), thickness=-1)
error_offset = img_.shape[1]/2 - midpoint_x2
offset_cm = error_offset * m_per_x * 100
font = cv2.FONT_HERSHEY_SIMPLEX
str1 = str(' OFFSET: ') + str(round(offset_cm,2)) + str(' CM')
cv2.putText(img_, str1, (midpoint_x-120, 120), font, 1, (100, 200, 100), 2, cv2.LINE_AA)
# Range measure
for i in range(1,10):
y_est = round((i*50 * m_per_y)*100 + 2.5, 1)
img_ = cv2.line(img_, (-10, 480-50*i), (20, 480-50*i), color=(100, 200, 100), thickness=2)
img_ = cv2.line(img_, (650, 480-50*i), (620, 480-50*i), color=(100, 200, 100), thickness=2)
str1 = str(y_est)
cv2.putText(img_, str1, (30, 480-50*i+4), font, 0.4, (100, 200, 100), 1, cv2.LINE_AA)
cv2.putText(img_, str1, (580, 480-50*i+4), font, 0.4, (100, 200, 100), 1, cv2.LINE_AA)
return img_, offset_cm
'''
* CITE *********************************************************************
* Content of some functions has been modified
* Title: AdvancedLaneLines
* Author: Vamsi Ramakrishnan
* Date: Aug 2017
* Availability: https://github.com/vamsiramakrishnan/AdvancedLaneLines
'''
# Undistort Images
def undistort_image(mtx_, dist_, img_):
"""
Undistort the image using distortion coefficients
:param mtx_: Correction Matrix
:param dist_: Distortion Coefficient
:param img_: Image that needs undistortion
:return: Distortion Corrected Image
"""
dst = cv2.undistort(img_, mtx_, dist_, None, mtx_)
return dst
# Calculate Source and Destination points
def calc_warp_points():
"""
:return: Source and Destination pointts
"""
src = np.float32 ([
[162, 479],
[210, 180],
[420, 180],
[480, 479]
])
dst = np.float32 ([
[172, 530],
[165, 20],
[493, 20],
[470, 530]
])
return src, dst
# Calculate Transform
def calc_transform(src_, dst_):
"""
Calculate Perspective and Inverse Perspective Transform Matrices
:param src_: Source points
:param dst_: Destination Points
:return: Perspective Matrix and Inverse Perspective Transform Matrix
"""
M_ = cv2.getPerspectiveTransform(src_, dst_)
Minv_ = cv2.getPerspectiveTransform(dst_, src_)
return M_, Minv_
# Get perspective transform
def perspective_transform(M_, img_):
"""
:param M_: Perspective Matrix
:param img_ : Input Image
:return: Transformed Image
"""
img_size = (img_.shape[1],img_.shape[0])
transformed = cv2.warpPerspective(
img_,
M_, img_size,
flags=cv2.WARP_FILL_OUTLIERS + cv2.INTER_CUBIC)
return transformed
# Inverse Perspective Transform
def inv_perspective_transform(Minv_, img_):
"""
:param M_: Inverse Perspective Transform Matrix
:param img_: Input Image
:return: Transformed Image
"""
img_size = (img_.shape[1], img_.shape[0])
transformed = cv2.warpPerspective(
img_,
Minv_, img_size,
flags=cv2.WARP_FILL_OUTLIERS + cv2.INTER_CUBIC)
return transformed
# Extract Pixels from Image
def extract_pixels(img_):
"""
Extract all Non Zero Pixels and return X, Y Coordinates
:param img_: Image from which Non Zero Pixels have to be extracted
:return: X & Y Coordinates
"""
non_zero_pixels = np.argwhere(0 < img_)
x = non_zero_pixels.T[0].astype(np.float32)
y = non_zero_pixels.T[1].astype(np.float32)
return x, y
# Get Intercepts
def get_intercepts(fit, y):
"""
Get x intercepts for given y value
:return:
:param fit: The polynomial fit
:param y: Y Coordinates
:return: X Coordinates
"""
x = fit[0] * (y * y) + fit[1] * y + fit[2]
return x
# Draw Polygon based on X, and Y points for Left and Right Lanes on Image
def draw_polygon(left_x, right_x, left_y, right_y, img_):
"""
Get Left_x, Right_x, Left_y, Right_y, Image , return Image with Polygon
:return:
:param left_x:
:param right_x:
:param left_y:
:param right_y:
:param img_:
:return:
"""
pts_left = np.array([np.flipud(np.transpose(np.vstack([left_x, left_y])))])
pts_right = np.array([np.transpose(np.vstack([right_x, right_y]))])
pts = np.hstack((pts_left, pts_right))
img_ = cv2.polylines(img_, np.int_([pts]), isClosed=False, color=(60, 200, 60), thickness=10, lineType=cv2.LINE_AA)
img_ = cv2.fillPoly(img_, np.int_(pts), (50, 90, 50))
return img_
def coordinates_to_imgpts(x, y):
"""
Convert parameters from X,Y plane to Image Plane Points
:param x:
:param y:
:return pts:
"""
pts = np.array([np.flipud(np.transpose(np.vstack([x, y])))])
return pts
def draw_polylines(input_img, pts, window_size):
"""
Draw Polylines for points with given thickness specified by Window Size
:param input_img:
:param pts:
:param window_size:
:return: Image with Poly Lines
"""
return cv2.polylines(input_img, np.int_([pts]), isClosed=False, color=(255, 255, 255),
thickness=2 * window_size)
def smoothen_masks(fit, img_, window_size):
"""
# Use polyfit from the mask points for smoothening them
:param fit:
:param img_:
:param window_size:
:return:
"""
img_size = img_.shape
mask_poly = np.zeros_like(img_)
# Get top to Bottom for refactoring #
mask_y = np.linspace(0, img_size[0] - 1, img_size[0])
mask_x = get_intercepts(fit, mask_y)
# Smoothen the mask #
pts = coordinates_to_imgpts(mask_x, mask_y)
mask_poly_smooth = draw_polylines(mask_poly, pts, window_size)
return mask_poly_smooth
# Use when Lane is Found and Polynomial fit can be used with a Tolerance window to search for lanes
def limited_search(img_, window_size, flag='L'):
"""
Polynomial search based on previous fit
:param img_:
:param window_size:
:param flag:
:return:
"""
# Initialize Mask with Same Size as Image #
mask_poly = np.zeros_like(img_)
# Get previous Coefficients #
fit = get_last_fit(flag=flag)
if fit is not None:
mask_poly_smooth = smoothen_masks(fit, img_, window_size)
return mask_poly_smooth.astype(np.uint8)
else:
return mask_poly
# Sliding Window Blind Search to generate a mask for Polynomial fit generation
def blind_search(img_, window_size = 30):
img_size = img_.shape
n_segments = 16
step = img_size[0]//n_segments
mask_L_poly = np.zeros_like(img_)
mask_R_poly = np.zeros_like(img_)
n_steps = 4
window_start = img_size[1]//2 - 9 * window_size
window_end = window_start + 6*window_size
sm = np.sum(img_[img_size[0]-4*step:img_size[0], window_start:window_end], axis=0)
sm = np.convolve(sm, np.ones((window_size,))/window_size, mode='same')
argmax = window_start + np.argmax(sm)
shift = 0
#plt.figure(figsize=(10,6))
i =0
for last in range(img_size[0], 0, -step):
first_line = max(0, last - n_steps*step)
sm = np.sum(img_[first_line:last, :], axis=0)
sm = np.convolve(sm, np.ones((window_size,))/window_size, mode='same')
window_start = min(max(argmax + int(shift)-window_size//2, 0), img_size[1]-1)
window_end = min(max(argmax + int(shift) + window_size//2, 0+1), img_size[1])
new_argmax = window_start + np.argmax(sm[window_start:window_end])
new_max = np.max(sm[window_start:window_end])
if new_max <= 2:
new_argmax = argmax + int(shift)
shift = shift/2
if last != img_size[0]:
shift = shift*0.25 + 0.75*(new_argmax - argmax)
argmax = new_argmax
mask_L_poly = cv2.rectangle(mask_L_poly, (argmax-window_size//2, last-step), (argmax+window_size//2, last), 1, thickness=window_size)
not_left = np.logical_not(mask_L_poly).astype(np.uint8)
filtered_img = cv2.bitwise_and(img_,not_left)
window_start = img_size[1]//2 + 6 * window_size
window_end = window_start + 6*window_size
sm = np.sum(filtered_img[img_size[0]-4*step:img_size[0], window_start:window_end], axis=0)
sm = np.convolve(sm, np.ones((window_size,))/window_size, mode='same')
argmax = window_start + np.argmax(sm)
shift = 0
for last in range(img_size[0], 0, -step):
first_line = max(0, last - n_steps*step)
sm = np.sum(filtered_img[first_line:last, :], axis=0)
sm = np.convolve(sm, np.ones((window_size,))/window_size, mode='same')
window_start = min(max(argmax + int(shift)-window_size//2, 0), img_size[1]-1)
window_end = min(max(argmax + int(shift) + window_size//2, 0+1), img_size[1])
new_argmax = window_start + np.argmax(sm[window_start:window_end])
new_max = np.max(sm[window_start:window_end])
if new_max <= 2:
new_argmax = argmax + int(shift)
shift = shift/2
if last != img_size[0]:
shift = shift*0.25 + 0.75*(new_argmax - argmax)
argmax = new_argmax
mask_R_poly = cv2.rectangle(mask_R_poly, (argmax-window_size//2, last-step), (argmax+window_size//2, last), 1, thickness=window_size)
return mask_L_poly, mask_R_poly
def get_mean_fit(flag='L'):
"""
Get the mean value of fit "Left" and "Right" based on flag
:param flag:
:return:
"""
if flag == 'L':
return np.mean(np.vstack(l_coeff_queue), axis =0) if len(l_coeff_queue)>1 else l_coeff_queue[-1]
else:
return np.mean(np.vstack(r_coeff_queue), axis =0) if len(r_coeff_queue)>1 else r_coeff_queue[-1]
def get_predicted_fit(flag ='L'):
if flag =='L':
if len(l_coeff_queue)>1:
avg_diff_L = np.mean(np.vstack(np.diff(np.vstack(l_coeff_queue), axis=0)), axis =0)
return np.add(get_mean_fit(flag="L"),avg_diff_L)
else:
return l_coeff_queue[-1]
else:
if len(r_coeff_queue)>1:
avg_diff_R = np.mean(np.vstack(np.diff(np.vstack(r_coeff_queue), axis =0)), axis =0)
return np.add(get_mean_fit(flag="R"),avg_diff_R)
else:
return r_coeff_queue[-1]
def get_last_fit(flag='L'):
"""
Gets the Last Fit depending on the flag
:param flag:
:return:
"""
if flag == 'L':
return l_coeff_queue[-1]
else:
return r_coeff_queue[-1]
def get_mean_curvature(flag='L'):
if flag =='L':
return np.mean(l_offset_queue), np.mean(l_curvature_queue)
else:
return np.mean(r_offset_queue), np.mean(r_curvature_queue)
def get_last_curvature(flag='L'):
if flag =='L':
return l_offset_queue[-1], l_curvature_queue[-1]
else:
return r_offset_queue[-1], r_curvature_queue[-1]
def return_queue_len(flag='L'):
if flag =='L':
return len(l_coeff_queue)
else:
return len(r_coeff_queue)
def clear_queues():
l_coeff_queue.clear()
r_coeff_queue.clear()
l_offset_queue.clear()
l_curvature_queue.clear()
r_offset_queue.clear()
r_curvature_queue.clear()
detected_count =0
overall_offset =0
def pop_queues_left():
l_coeff_queue.popleft()
r_coeff_queue.popleft()
l_curvature_queue.popleft()
r_offset_queue.popleft()
r_curvature_queue.popleft()
def append_linecoeffs(fit, flag='L'):
if flag=='L':
# left line Coefficients
l_coeff_queue.append(fit)
else:
# Right Line Coefficients
r_coeff_queue.append(fit)
return None
def append_curvature(offset, curvature, flag='L'):
if flag =='L':
l_curvature_queue.append(curvature)
l_offset_queue.append(offset)
else:
r_curvature_queue.append(curvature)
r_offset_queue.append(offset)
return None
def append_overall_offset(left_offset, right_offset):
global overall_offset
overall_offset = 0
overall_offset = left_offset + right_offset
overall_offset = overall_offset / 2.
overall_offset = center_position - overall_offset
return None
def calc_curvature(fit, img_):
img_size= img_.shape
y_eval = img_size[0]
if fit is not None:
a = fit[0] * xm_per_pix / ym_per_pix**2
b = fit[1] * xm_per_pix / ym_per_pix
c = fit[2] * xm_per_pix
y = y_eval * ym_per_pix
else:
return None, None
rad_curvature = pow(1 + (2*a*y + b)**2, 1.5) / math.fabs (2*a)
offset = calc_offset(fit,y_eval)
return offset, rad_curvature
def calc_offset(fit, y_eval):
a = fit[0] * xm_per_pix / ym_per_pix**2
b = fit[1] * xm_per_pix / ym_per_pix
c = fit[2] * xm_per_pix
y = y_eval * ym_per_pix
return (a*y*y + b*y + c)
def check_and_fit(x, y, flag='L', threshold=1000):
"""
Verify if number of pixels are satisfactory for a confident fit and then fit
:param x:
:param y:
:param flag:
:param threshold:
:return:
"""
confidence_index = len(x)
if IsLaneFound is False:
threshold =500
if confidence_index < threshold:
fit = None
foundFlag = False
else:
fit, cov = curve_fit(lambda x, a, b, c:a*x*x+b*x + c , x, y)
foundFlag = True
return fit, foundFlag, confidence_index
def mask_and_fit(mask, binary_warped, flag):
"""
Mask the Images and then return the equation of the lane lines
:return:
:param mask:
:param binary_warped:
:param flag:
:return:
"""
img = cv2.bitwise_and(binary_warped, binary_warped, mask=mask)
x, y = extract_pixels(img)
fit, foundFlag, confidence_index = check_and_fit(x, y, flag)
return fit, foundFlag, confidence_index
def curvature_sanity(left_curvature, left_offset, right_curvature, right_offset):
"""
Use The current values of Curvature and Offset from Left and Right Lanes
to decide if Lanes are sane
:param left_curvature:
:param left_offset:
:param right_curvature:
:param right_offset:
:return:
"""
if return_queue_len(flag='L') >= 1 and return_queue_len(flag='R') >= 1:
offset = center_position - (left_offset + right_offset) / 2.
offset_measure = np.abs(overall_offset - offset)
return True if offset_measure < 0.2 else False
else:
return True
def update_lanewidth(left_fit, right_fit, img_):
"""
Use the left and right fit
:return:
:param left_fit:
:param right_fit:
:param img_:
:return:
"""
img_size = img_.shape
y_eval = np.linspace(0, img_size[0], 20)
left_x = get_intercepts(left_fit, y_eval)
right_x = get_intercepts(right_fit, y_eval)
return np.clip(right_x - left_x, 400, 800)
def lanewidth_sanity(left_fit, right_fit, img_):
"""
:return:
:param left_fit:
:param right_fit:
:param img_:
:return:
"""
global lane_width
img_size = img_.shape
ploty = np.linspace(0, img_size[0], 20)
left_distances = np.vstack(calc_offset(left_fit, ploty)).T
right_distances = np.vstack(calc_offset(right_fit, ploty)).T
distances = right_distances - left_distances
lanewidth = lane_width * xm_per_pix
min_lanewidth = np.mean(lanewidth) - 2.5 * np.std(lanewidth)
max_lanewidth = np.mean(lanewidth) + 2.5 * np.std(lanewidth)
passes = np.sum((min_lanewidth <= distances) & (distances <= max_lanewidth)) / len(distances[0])
return True if passes >= 0.95 else False
def lanewidth_rationalize(left_fit, confidence_index_l, right_fit, confidence_index_r, img_):
"""
:param left_fit:
:param confidence_index_l:
:param right_fit:
:param confidence_index_r:
:param img_:
:return:
"""
img_size = img_.shape
y = np.linspace(0, img_size[0], 20)
if confidence_index_l > 2. * confidence_index_r or (left_fit is not None and right_fit is None):
x = get_intercepts(left_fit, y) + lane_width
right_fit, cov = curve_fit(lambda x, a, b, c: a * x * x + b * x + c, y, x)
elif confidence_index_r > 2. * confidence_index_l or (left_fit is None and right_fit is not None):
x = get_intercepts(right_fit, y) - lane_width
left_fit, cov = curve_fit(lambda x, a, b, c: a * x * x + b * x + c, y, x)
return left_fit, right_fit
# Master Function that processes video image by image
def process_video(img):
"""
:param img:
:return: Processed Image that is written with appropriate polygon
"""
global isPerspectiveCompute
global m
global minv
global IsLaneFound
global fCount
global last_mask
global patience
global lane_width
global disp_left
global disp_right
global lane_width
global xm_per_pix
global offset_from_lane_center_in_cm
# Initialize Variables
l_found_flag = False
r_found_flag = False
confidence_index_l = 0
confidence_index_r = 0
img_size = img.shape
IsLaneWidthSane = False
IsCuvatureSane = False
# Perform Camera Calibration and get Distortion Coefficients #
undistorted_img = undistort_image(mtx, dist, img)
# Calculate Bird' Eye Transform #
if not isPerspectiveCompute:
src_, dst_ = calc_warp_points()
bin_ex = draw_polylines(undistorted_img, src_, 5)
m, minv = calc_transform(src_, dst_)
isPerspectiveCompute = True
# Get Bird's Eye View #
warped = perspective_transform(m, undistorted_img)
binary_warped = filter_img(warped)
# Lane Search
# Polynomial Search if Lane is Found
if IsLaneFound:
#Left Mask and Fit
mask_l_poly = limited_search(binary_warped, int(35), flag='L')
left_fit, l_found_flag, confidence_index_l = mask_and_fit(mask_l_poly, binary_warped, 'L')
# Right Mask and Fit
mask_r_poly = limited_search(binary_warped, int(35), flag='R')
right_fit, r_found_flag, confidence_index_r = mask_and_fit(mask_r_poly, binary_warped, 'R')
# Try Blind Search if Lane is Not Found
else:
mask_l_poly, mask_r_poly = blind_search(binary_warped, int(35))
#Mask and Fit Left and Right Lanes
left_fit, l_found_flag, confidence_index_l = mask_and_fit(mask_l_poly, binary_warped, 'L')
right_fit, r_found_flag, confidence_index_r = mask_and_fit(mask_r_poly, binary_warped, 'R')
# Check if Lane is found after searching , verify if the detected lanes are sane
if left_fit is not None or right_fit is not None:
# Check sanity in combination
if left_fit is not None and right_fit is not None:
IsLaneWidthSane = lanewidth_sanity(left_fit, right_fit, binary_warped)
if not IsLaneWidthSane:
left_fit, right_fit = lanewidth_rationalize(left_fit, confidence_index_l,
right_fit, confidence_index_r, binary_warped)
# Calculate Offset and Curvature
left_offset, left_curvature = calc_curvature(left_fit, binary_warped)
right_offset,right_curvature = calc_curvature(right_fit, binary_warped)
IsCurvatureSane = True #curvature_sanity(left_curvature, left_offset, right_curvature, right_offset)
if IsCurvatureSane is True:
IsLaneFound = True
patience = 0
# Append Left & Right Lane Coefficients, Curvature, offset
append_linecoeffs(left_fit, flag='L')
append_curvature(left_offset, left_curvature, flag='L')
append_linecoeffs(right_fit, flag='R')
append_curvature(right_offset, right_curvature, flag='R')
append_overall_offset(left_offset, right_offset)
if IsLaneWidthSane is True :
xm_per_pix = 3.7/ np.median(lane_width)
lane_width = update_lanewidth(left_fit, right_fit, binary_warped)
else:
IsLaneFound = False
patience = patience + 1
if ((return_queue_len(flag='L') >1 and return_queue_len(flag='R') > 1)):
pop_queues_left()
# Left & Right Fit
left_fit = get_predicted_fit(flag ='L')
right_fit = get_predicted_fit(flag ='R')
#Left and Right Curvature Offset
left_offset, left_curvature = calc_curvature(left_fit, binary_warped)
right_offset, right_curvature =calc_curvature(right_fit, binary_warped)
#Append Coefficients , Curvature
append_linecoeffs(left_fit , flag='L')
append_linecoeffs(right_fit, flag='R')
append_curvature(left_offset, left_curvature, flag='L')
append_curvature(right_offset, right_curvature, flag='R')
#Overall
append_overall_offset(left_offset, right_offset)
# If queue length is greater than 1
if ((return_queue_len(flag='L') >= 1 and return_queue_len(flag='R') >= 1)):
# Get the mean offset
left_fit = get_mean_fit(flag='L')
right_fit = get_mean_fit(flag='R')
# Left Mean offset and Right Mean offset
left_mean_offset, left_mean_curvature = get_mean_curvature(flag='L')
right_mean_offset, right_mean_curvature = get_mean_curvature(flag='R')
# Recompute masks for masking next frame
mask_l_poly = smoothen_masks(get_last_fit(flag='L'), binary_warped, 50)
mask_r_poly = smoothen_masks(get_last_fit(flag='R'), binary_warped, 50)
last_mask = cv2.bitwise_or(mask_l_poly, mask_r_poly)
if not IsLaneFound:
last_mask = np.ones_like(binary_warped)
# Refactor, draw a polygon and unwarp the image
ploty = np.linspace(0, img_size[1] - 1, img_size[1])
leftx = get_intercepts(left_fit, ploty)
rightx = get_intercepts(right_fit, ploty)
warped_out = draw_polygon(leftx, rightx, ploty, ploty, warped)
warped_out, offset_cm = get_lane_center_offset(leftx, rightx, ploty, ploty, warped_out) # Add error offset to output image
offset_from_lane_center_in_cm = offset_cm # Store offset for regulator purposes
unwarped_out = inv_perspective_transform(minv, warped_out)
output = cv2.addWeighted(img, 0.5, unwarped_out, 0.5, 0)
if fCount%5==0:
disp_left = ceil(left_mean_curvature)
disp_right = ceil(right_mean_curvature)
font = cv2.FONT_HERSHEY_SIMPLEX
str1 = str("LEFT CURVATURE : ") + str(disp_left)
str2 = str("RIGHT CURVATURE : ") + str(disp_right)
cv2.putText(output, str1, (20, 460), font, 0.6, (100, 200, 100), 1, cv2.LINE_AA)
cv2.putText(output, str2, (340, 460), font, 0.6, (100, 200, 100), 1, cv2.LINE_AA)
# If none of it is available
else:
warped_out = img
unwarped_out = img
output = img
font = cv2.FONT_HERSHEY_SIMPLEX
str1 = str('No lane detected!')
cv2.putText(output, str1, (190, 303), font, .9, (255, 255, 255), 1, cv2.LINE_AA)
fCount = fCount + 1
#cv2.imshow('frame3',offset_img)
return output
'''
* END CITE *****************************************************************
'''
def aruco_marker_detector(img):
ARUCO_PARAMETERS = aruco.DetectorParameters_create()
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_5X5_1000)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect Aruco markers
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray_img, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
# Draw outline if marker is found
if ids is not None:
for i, corner in zip(ids, corners):
#print('ID: {}; Corners: {}'.format(i, corner))
# Draw id on marker
font = cv2.FONT_HERSHEY_SIMPLEX
str1 = str('id:') + str(i[0])
cv2.putText(img, str1, (int(corner[0][0][0]), int(corner[0][0][1])-20), font, .9, (255, 255, 255), 1, cv2.LINE_AA)
# Outline the detected markers
img = aruco.drawDetectedMarkers(img, corners, borderColor=(50, 200, 50))
return img
def main():
global offset_from_lane_center_in_cm
cap = cv2.VideoCapture(1)
cap.set(3,640)
cap.set(4,480)
#cap.set(cv2.CAP_PROP_AUTOFOCUS, 0) # turn the autofocus off
#cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25) # turn of auto exposure
rospy.init_node('car_vision', anonymous=True)
image_pub = rospy.Publisher("car_vision/image_raw", Image, queue_size=10)
offset_pub = rospy.Publisher("car_vision/lane_center_offset_cm", Float32, queue_size=10)
rate = rospy.Rate(10) # 30hz
offset_msg = Float32()
while not rospy.is_shutdown():
ret, frame = cap.read()
# Add lanes to img
res = aruco_marker_detector(process_video(frame))
res = process_video(frame)
# Get offset from center lane
offset_msg.data = offset_from_lane_center_in_cm
# Convert image to ROS Image format
#img_msg = CvBridge().cv2_to_imgmsg(res, encoding="passthrough")
# Publish image
#image_pub.publish(img_msg)
# Publish center offset
offset_pub.publish(offset_msg)
#cv2.imshow('cv_img', frame)
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
# For debugging
'''
if __name__ == "__main__":
cap = cv2.VideoCapture(1)
cap.set(3,640)
cap.set(4,480)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0) # turn the autofocus off
cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25) # turn of auto exposure
while(True):
# Capture image from webcam
ret, frame = cap.read()
# Display the image
#cv2.imshow('frame', aruco_marker_detector(frame))
cv2.imshow('frame', process_video(frame))
#process_video(frame)
#cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
'''
|
25,067 | 76705062ad62215e5c0e4e0468f48d3bc70a02c1 | from helper.functions import getFilesInPath
import argparse, os, shutil, cv2
def main():
parser = argparse.ArgumentParser(description='Join depth folders')
parser.add_argument('--depthFolderOriginal', help='Path for Original Folder files', nargs='+', required=True)
parser.add_argument('--depthFolderJoined', help='Joined folder depth', required=True)
parser.add_argument('--DIsFolder', help='DIs folder', required=True)
args = parser.parse_args()
if not os.path.exists(args.depthFolderJoined):
os.makedirs(args.depthFolderJoined)
depthsFiles = [ getFilesInPath(dfs) for dfs in args.depthFolderOriginal ]
dis = getFilesInPath(args.DIsFolder)
for d in dis:
dFileName = d.split(os.path.sep)[-1]
dFileName = dFileName.split('.')[0]
if os.path.exists(os.path.join(args.depthFolderJoined,dFileName+'.jpeg')):
continue
for dfs in depthsFiles:
for df in dfs:
fileName = df.split(os.path.sep)[-1]
fileNameNoEXT = fileName.split('.')[0]
if dFileName == fileNameNoEXT:
shutil.copy(df,os.path.join(args.depthFolderJoined,fileName))
break
else:
oldFileNameSplt = dFileName.split('_')
if 'bs' in oldFileNameSplt[1] and 'bs' in fileNameNoEXT:
subjectClass = int(oldFileNameSplt[1][2:])
oldFileName = oldFileNameSplt.copy()
oldFileName[0] = '%03d' % subjectClass
oldFileName[4] = '0'
oldFileName = '_'.join(oldFileName)
if oldFileName == fileNameNoEXT:
newFileName = '_'.join(oldFileNameSplt)
fileDepth = cv2.imread(df)
cv2.imwrite(os.path.join(args.depthFolderJoined,newFileName+'.jpeg'),fileDepth)
#print(os.path.join(args.depthFolderJoined,newFileName+'.jpeg'))
#shutil.copy(df,os.path.join(args.depthFolderJoined,newFileName+'.jpeg'))
break
if __name__ == '__main__':
main() |
25,068 | c9642e32d29a3c8f17aa14263517880c7c248748 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 9 11:12:18 2018
@author: Jie.Hu
"""
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
app = dash.Dash()
tabs_styles = {'height': '44px'}
tab_style = {'borderBottom': '1px solid #d6d6d6', 'padding': '10px',
'fontWeight': 'bold'}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#119DFF',
'color': 'white',
'padding': '6px',
}
app.layout = html.Div([dcc.Tabs(id='tabs-styled-with-inline',
value='tab-1', vertical=False,
children=[dcc.Tab(label='Tab 1', value='tab-1',
selected_style=tab_selected_style),
dcc.Tab(label='Tab 2', value='tab-2',
style=tab_style,
selected_style=tab_selected_style),
dcc.Tab(label='Tab 3', value='tab-3',
style=tab_style,
selected_style=tab_selected_style),
dcc.Tab(label='Tab 4', value='tab-4',
style=tab_style,
selected_style=tab_selected_style)],
style=tabs_styles),
html.Div(id='tabs-content-inline')])
@app.callback(Output('tabs-content-inline', 'children'),
[Input('tabs-styled-with-inline', 'value')])
def render_content(tab):
if tab == 'tab-1':
return html.Div([html.H3('Tab content 1')])
elif tab == 'tab-2':
return html.Div([html.H3('Tab content 2')])
elif tab == 'tab-3':
return html.Div([html.H3('Tab content 3')])
elif tab == 'tab-4':
return html.Div([html.H3('Tab content 4')])
if __name__ == '__main__':
app.run_server()
|
25,069 | 38d48bf45d220a80b001bef8ce609f2f13a5b1b9 | import json
import demjson
import requests
from time import sleep
from selenium import webdriver
wd = webdriver.PhantomJS(
executable_path=r"D:\python-project\phantomjs-2.1.1-windows\bin\phantomjs.exe") # 构建浏览器
loginUrl = 'https://www.guahao.com/user/login'
wd.get(loginUrl) # 进入登陆界面
wd.find_element_by_xpath(
'//*[@id="loginId"]').send_keys('1876111111') # 输入用户名
wd.find_element_by_xpath(
'//*[@id="password"]').send_keys('513s11111111') # 输入密码
wd.save_screenshot(str("登录界面.png"))
wd.find_element_by_name("validCode").send_keys(input("输入验证码\n>>> "))
wd.find_element_by_xpath('//*[@id="J_LoginSubmit"]').click() # 点击登陆
req = requests.Session() # 构建Session
cookies = wd.get_cookies() # 导出cookie
for cookie in cookies:
req.cookies.set(cookie['name'], cookie['value']) # 转换cookies
test = req.get('https://www.guahao.com/expert/new/shiftcase/?expertId=127556466557799000&hospDeptId=127548647953601000&hospId=125369370584301000')
# json string:
data = demjson.decode(test.text)
for i in range(10):
try:
url = data['data']['shiftSchedule'][i]['url']
if url:
print("第" + str(i) + "个:" + url)
else:
print("url为空")
break
except BaseException:
print("所有未约满的已经列出!")
url_1 = wd.get('https://www.guahao.com' + url)
try:
wd.find_element_by_xpath(
'// *[ @ id = "J_DiseaseName"]').send_keys('身体不舒服') # 输入密码
wd.find_element_by_xpath(
'/ html / body / div[1] / div[2] / div[1] / form / div[2] / div[6] / div / label / input').click() # 点击登陆
wd.find_element_by_xpath('//*[@id="J_Booking"]').click() # 点击登陆
print("预约成功!")
except BaseException:
print("预约异常!")
|
25,070 | ce6b3e205a992b8d7953447ad0efbfd88537101f | # -*- coding: utf-8 -*-
if False:
from gluon import *
from db import *
from menu import *
from tables import *
from gluon.contrib.appconfig import AppConfig
from gluon.tools import Auth, Service, PluginManager
request = current.request
response = current.response
session = current.session
cache = current.cache
T = current.T
db = DAL('sqlite://storage.sqlite')
myconf = AppConfig(reload=True)
auth = Auth(db)
service = Service()
plugins = PluginManager()
from agiscore.gui.mic import MenuLateral, MenuMigas
menu_lateral = MenuLateral(list())
menu_migas = MenuMigas()
from gluon.storage import Storage
from agiscore.gui.mic import Accion, grid_simple
from agiscore.db.carrera_uo import carrera_uo_format
# TODO: remove
response.menu = []
menu_lateral.append(
Accion(T('Planes'), URL('planes', args=[request.args(0)]),
auth.has_membership(role=myconf.take('roles.admin'))),
['planes', 'asignaturas'])
menu_lateral.append(
Accion(T('Especialidades'), URL('especialidades', args=[request.args(0)]),
auth.has_membership(role=myconf.take('roles.admin'))),
['especialidades'])
@auth.requires_login()
def index():
C = Storage()
return dict(C=C)
@auth.requires(auth.has_membership(role=myconf.take('roles.admin')))
def especialidades():
C = Storage()
C.carrera = db.carrera_uo(int(request.args(0)))
C.unidad = db.unidad_organica(C.carrera.unidad_organica_id)
C.escuela = db.escuela(C.unidad.escuela_id)
C.carrera_format = carrera_uo_format(C.carrera)
# breadcumbs
# enlace a la UO
u_link = Accion(C.unidad.abreviatura or C.unidad.nombre,
URL('unidad', 'index', args=[C.unidad.id]),
True) # siempre dentro de esta funcion
menu_migas.append(u_link)
# enlace a la opcion carreras de la UO
c_link = Accion(T('Carreras'),
URL('unidad', 'carreras', args=[C.unidad.id]),
True)
menu_migas.append(c_link)
# planes
C.carrera_format = carrera_uo_format(C.carrera)
menu_migas.append(C.carrera_format)
menu_migas.append(T("Especialidades"))
# permisos
puede_crear = auth.has_membership(role=myconf.take('roles.admin'))
puede_editar, puede_borrar = (puede_crear, puede_crear)
tbl = db.especialidad
tbl.carrera_id.default = C.carrera.id
tbl.carrera_id.readable = False
tbl.carrera_id.writable = False
tbl.id.readable = False
if ('new' in request.args) or ('edit' in request.args):
dbset = (tbl.carrera_id == C.carrera.id)
tbl.nombre.requires.append(IS_NOT_IN_DB(db(dbset),
'especialidad.nombre'))
tbl.abreviatura.requires.append(IS_NOT_IN_DB(db,
'especialidad.abreviatura'))
query = (tbl.id > 0) & (tbl.carrera_id == C.carrera.id)
C.grid = grid_simple(query,
create=puede_crear,
editable=puede_editar,
deletable=puede_borrar,
args=request.args[:1])
return dict(C=C)
@auth.requires(auth.has_membership(role=myconf.take('roles.admin')))
def asignaturas():
C = Storage()
C.plan = db.plan_curricular(int(request.args(0)))
C.carrera = db.carrera_uo(C.plan.carrera_id)
C.unidad = db.unidad_organica(C.carrera.unidad_organica_id)
C.escuela = db.escuela(C.unidad.escuela_id)
# breadcumbs
# enlace a la UO
u_link = Accion(C.unidad.abreviatura or C.unidad.nombre,
URL('unidad', 'index', args=[C.unidad.id]),
True) # siempre dentro de esta funcion
menu_migas.append(u_link)
# enlace a la opcion carreras de la UO
c_link = Accion(T('Carreras'),
URL('unidad', 'carreras', args=[C.unidad.id]),
True)
menu_migas.append(c_link)
# planes
C.carrera_format = carrera_uo_format(C.carrera)
menu_migas.append(C.carrera_format)
p_link = Accion(T('Planes'),
URL('planes', args=[C.carrera.id]),
True)
menu_migas.append(p_link)
menu_migas.append(C.plan.nombre)
# permisos
puede_crear = auth.has_membership(role=myconf.take('roles.admin'))
puede_borrar = auth.has_membership(role=myconf.take('roles.admin'))
# -- contruir el grid
tbl = db.asignatura_plan
tbl.id.readable = False
query = (tbl.id > 0)
query &= (tbl.plan_curricular_id == C.plan.id)
tbl.plan_curricular_id.readable = False
if ('new' in request.args) or ('edit' in request.args):
tbl.plan_curricular_id.writable = False
tbl.plan_curricular_id.default = C.plan.id
if 'edit' in request.args:
tbl.asignatura_id.writable = False
tbl.nivel_academico_id.writable = False
tbl.importancia.default = 100
# validar que no se repitan las asignaturas por nivel
def onvalidation(form):
if 'new' in request.args:
# comprobar que no exista la combinación nivel
n_id = tbl.nivel_academico_id.validate(form.vars.nivel_academico_id)[0]
a_id = tbl.asignatura_id.validate(form.vars.asignatura_id)[0]
row = tbl(nivel_academico_id=n_id,
asignatura_id=a_id,
plan_curricular_id=C.plan.id)
if row:
# ya existe en el plan esa asignatura con el mismo nivel de
# acceso
form.errors.asignatura_id = T("Ya existe en el plan con el mismo nivel")
text_lengths = {'asignatura_plan.asignatura_id': 50}
C.grid = grid_simple(query,
args=request.args[:1],
editable=puede_crear,
deletable=puede_borrar,
create=puede_crear,
onvalidation=onvalidation,
maxtextlengths=text_lengths,
orderby=[tbl.nivel_academico_id, tbl.asignatura_id],)
return dict(C=C)
@auth.requires(auth.has_membership(role=myconf.take('roles.admin')))
def planes():
'''Manejo de planes para una carrera'''
C = Storage()
C.carrera = db.carrera_uo(int(request.args(0)))
C.unidad = db.unidad_organica(C.carrera.unidad_organica_id)
C.escuela = db.escuela(C.unidad.escuela_id)
# breadcumbs
# enlace a la UO
u_link = Accion(C.unidad.abreviatura or C.unidad.nombre,
URL('unidad', 'index', args=[C.unidad.id]),
True) # siempre dentro de esta funcion
menu_migas.append(u_link)
# enlace a la opcion carreras de la UO
c_link = Accion(T('Carreras'),
URL('unidad', 'carreras', args=[C.unidad.id]),
True)
menu_migas.append(c_link)
# planes
C.carrera_format = carrera_uo_format(C.carrera)
menu_migas.append(C.carrera_format)
menu_migas.append(T("Planes"))
# permisos
puede_crear = auth.has_membership(role=myconf.take('roles.admin'))
puede_editar, puede_borrar = (puede_crear, puede_crear)
# -- contruir el grid de los planes
tbl = db.plan_curricular
query = ((tbl.id > 0) & (tbl.carrera_id == C.carrera.id))
# ver si es la opción de activar plan
if 'activar' in request.args:
plan = tbl(int(request.args(2)))
# desactivar todos los planes
db(query).update(estado=False)
plan.update_record(estado=True)
redirect(URL('carrera', 'planes', args=[C.carrera.id]))
tbl.id.readable = False
tbl.carrera_id.writable = False
if 'new' in request.args:
tbl.carrera_id.default = C.carrera.id
campos = [tbl.nombre, tbl.estado]
def _enlaces(row):
co = CAT()
link = URL('asignaturas', args=[row.id])
txt = CAT(SPAN('', _class="glyphicon glyphicon-book"),
' ',
T('Asignaturas'))
co.append(Accion(txt, link, True, _class="btn btn-default btn-sm"))
if not row.estado:
link = URL('planes',
args=[C.carrera.id, 'activar', row.id],
user_signature=True)
txt = CAT(SPAN('', _class="glyphicon glyphicon-ok-sign"),
' ',
T("Activar"))
co.append(Accion(txt, link, True,
_class="btn btn-default btn-sm"))
return co
enlaces = [dict(header='', body=_enlaces)]
C.grid = grid_simple(query,
create=puede_crear,
editable=puede_editar,
deletable=puede_borrar,
fields=campos,
links=enlaces,
args=request.args[:1])
return dict(C=C)
|
25,071 | 0ddf1dda759792b3edaeb715328f90ce437bedb8 | #!/usr/bin/env python
import Food, math, random, turtle
class Env(object):
def __init__(self, render=True):
self.progs = []
self.food = []
self.render = render
def addProg(self, prog):
self.progs.append(prog)
def getProgs(self):
return self.progs
def removeProg(self, prog):
self.progs.remove(prog)
def addFood(self, food):
self.food.append(food)
def getFood(self):
return self.food
def removeFood(self, food):
self.food.remove(food)
def tick(self):
for nom in self.food:
if nom.isConsumed():
self.food.remove(nom)
for prog in self.progs:
prog.tick(self)
if not prog.isAlive():
self.progs.remove(prog)
if __name__ == "__main__":
e = Env()
|
25,072 | 8b169f7f5051194d4f741e334a58ae246d1c23a1 | list_1 = []
print(len(list_1)) |
25,073 | db0d2e47661f9d1b3c05d6e45353fba0ae77c692 | # -*- coding: utf-8 -*-
"""ESE database parser."""
import pyesedb
from . import logger
from . import esedb_parser
from . import esedb_errors as errors
def PrintUsage():
pass
#TODO: Argument Parser
if __name__ == '__main__':
filepath = './samples/WebCacheV01.dat'
esedb_file = pyesedb.file()
try:
esedb_file.open(filepath)
except IOError as exception:
logger.debug('unable to open file with error: {0!s}'.format(exception))
try:
esedb_parsers = esedb_parser.ESEDBParser.GetESEDBParserObjects()
table_names = frozenset(esedb_parser.ESEDBParser.GetTableNames(esedb_file))
for parser in esedb_parsers.values():
if not parser.required_tables.issubset(table_names):
continue
try:
parser.Process(database=esedb_file)
except errors.UnableToParseFile as exception:
logger.debug('[{0:s}] unable to parse file with error: {1!s}'.format(
parser.NAME, exception))
finally:
esedb_file.close()
|
25,074 | d376c66274f93659dbf3869af070974825224602 | from random import randint
nombre_a_deviner = randint(1, 100)
print("Devinez le nombre")
while True:
premier_essai = int(input('Quel est le nombre? '))
if premier_essai < nombre_a_deviner:
print("C'est plus")
elif premier_essai > nombre_a_deviner:
print("C'est moins")
else:
print("Vous avez gagné")
break
print("Fin du jeu")
|
25,075 | 5b9ed5a78e7fcc8dc08bd33571ab0bd83a482421 | print "Oh!"
import numpy as np
import pandas as pd
print "Derka, derka."
fire = pd.read_csv('/Users/rory/coding/pandas_practice/fire_practice/data/fire.csv')
#info() tells you about the columns and what's in em
fire.info()
#selecting only certain columns. Use two brackets or suffer!
year_size_desc = fire[['FireYear', 'GeneralDesc', 'Size_acres']]
#see the type of structure. Notice that calling one row returns a series, calling two or more returns a dataframe
type(fire.iloc[5])
type(fire.iloc[2:5])
#see all the column names
list(fire)
#selects the last five values, from the thing
fire.iloc[-5:]
#selecting all the fires in 2015
fire[fire.FireYear == 2015]
#selecting all fires where it was Lighting or Arson. Remember to put the individual logic process in parantheses.
#this is so that it can test it with objects, not booleans i.e. (in to out: (test ==) -> object of bools -> compare itemwise for each)
zeus_or_baddies = fire[(fire.GeneralDesc == 'Arson') | (fire.GeneralDesc == 'Lightning')]
zeus_or_baddies
#use this for testing a lot of logic (i.e. "is the value of this column in this list?")
#don't forget isin is a function, so you open parantheses. A list comes in brackets, so you open brackets too
#and you have the whole thing as a boolean, so the outside df is testing against brackets (it's sick)
even_years =fire[fire.FireYear.isin([2006, 2008, 2010, 2012, 2014])]
print even_years.head()
#here's how you reset the indexs
#inplace=True equates this df to the old one and resets the indexes (meaning you're not defining many df's)
#drop=True drops the old index column (which is otherwise saved and looks ugly)
even_years.reset_index(inplace=True, drop=True)
print even_years.head()
|
25,076 | 3e867d03ae3166cda8e02ea67011474116f26fbc | import unittest
import index
import boto3
import json
from index import NOT_IMPLEMENTED_PAYLOAD
class RecipeTest(unittest.TestCase):
token = ''
def setUp(self):
self.maxDiff = None
client = boto3.client('cognito-idp')
resp = client.initiate_auth(
ClientId='2lk7bjr0akm1ncuo8i8piqv33g',
AuthFlow='USER_PASSWORD_AUTH',
AuthParameters={
"USERNAME": 'TestUsername',
"PASSWORD": 'TestPassword1'
}
)
self.token = resp['AuthenticationResult']['AccessToken']
return
def test_invalid_authorization(self):
print("Test - test_empty_event")
event = {
"resource": "/user",
"httpMethod": "GET",
"headers": {
"Authorization": ''
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertEqual(COGNITO_EXCEPTION_PAYLOAD, response)
def test_incorrect_recipe_resource(self):
print("Test - test_empty_event")
event = {
"resource": "/recipes/{recipeId}/ingredients/{ingredientId}",
"httpMethod": "GET",
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertEqual(NOT_IMPLEMENTED_PAYLOAD, response)
def test_incorrect_recipe_recipeid_method(self):
print("Test - test_valid_authorization")
event = {
"resource": "/recipes/{recipeId}",
"httpMethod": "PUT",
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertEqual(NOT_IMPLEMENTED_PAYLOAD, response)
def test_incorrect_recipe_recipeid_ingredients_method(self):
print("Test - test_incorrect_recipe_recipeid_ingredients_method")
event = {
"resource": "/recipes/{recipeId}/ingredients",
"httpMethod": "PUT",
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertEqual(NOT_IMPLEMENTED_PAYLOAD, response)
def test_recipe_recipeid_ingredients_GET(self):
print("Test - test_recipe_recipeid_ingredients_GET")
event = {
"resource": "/recipes/{recipeId}/ingredients",
"httpMethod": "GET",
"pathParameters": {
"recipeId": "40000"
},
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
response["body"] = json.loads(response["body"])
self.assertEqual(SUCCESS_RECIPE_INGREDIENT_PAYLOAD, response)
def test_recipe_recipeid_GET(self):
print("Test - test_recipe_recipeid_GET")
event = {
"resource": "/recipes/{recipeId}",
"httpMethod": "GET",
"pathParameters": {
"recipeId": "47821"
},
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
response["body"] = json.loads(response["body"])
response["body"]["summary"] = ''
self.assertEqual(SUCCESS_RECIPE_RECIPEID_PAYLOAD, response)
def test_recipe_GET_Failure(self):
print("Test - test_recipe_GET_Failure")
event = {
"resource": "/recipes",
"httpMethod": "GET",
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
response["body"] = json.loads(response["body"])
self.assertEqual(SUCCESS_RECIPE_PAYLOAD_FAILURE, response)
def test_incorrect_recipe_resource(self):
print("Test - test_incorrect_recipe_resource")
event = {
"resource": "/recipes",
"httpMethod": "PUT",
"queryStringParameters": {
'limit': '25',
'offset': '0',
'pantry_list': 'true',
'search': '',
'shopping_list': 'false',
'ww': '0'
},
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertEqual(NOT_IMPLEMENTED_PAYLOAD, response)
def test_recipe_GET_25_from_pantry(self):
print("Test - test_recipe_GET_25_from_pantry")
event = {
"resource": "/recipes",
"httpMethod": "GET",
"queryStringParameters": {
'limit': '25',
'offset': '0',
'pantry_list': 'true',
'search': '',
'shopping_list': 'false',
'ww': '0'
},
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertGreaterEqual(len(response["body"]), 25)
def test_recipe_GET_25_from_shoppinglist(self):
print("Test - test_recipe_GET_25_from_shoppinglist")
event = {
"resource": "/recipes",
"httpMethod": "GET",
"queryStringParameters": {
'limit': '25',
'offset': '0',
'pantry_list': 'false',
'search': '',
'shopping_list': 'true',
'ww': '0'
},
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertGreaterEqual(len(response["body"]), 25)
def test_recipe_GET_25_from_search(self):
print("Test - test_recipe_GET_25_from_search")
event = {
"resource": "/recipes",
"httpMethod": "GET",
"queryStringParameters": {
'limit': '25',
'offset': '0',
'pantry_list': 'true',
'search': 'apple',
'shopping_list': 'false',
'ww': '0'
},
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertGreaterEqual(len(response["body"]), 25)
def test_recipe_weight_watchers(self):
print("Test - test_recipe_weight_watchers")
event = {
"resource": "/recipes",
"httpMethod": "GET",
"queryStringParameters": {
'limit': '25',
'offset': '0',
'pantry_list': 'true',
'search': '',
'shopping_list': 'false',
'ww': '5',
'smallest_ww': '3'
},
"headers": {
"Authorization": self.token
}
}
context = {}
response = index.lambda_handler(event, context)
self.assertGreaterEqual(len(response["body"]), 25)
SUCCESS_RECIPE_INGREDIENT_PAYLOAD = {
'statusCode': 200,
'headers': {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
},
'body': [
"butter",
"fat-skimmed chicken broth",
"pepper",
"pork fat",
"raspberry vinegar",
"red onion",
"salad oil",
"salt",
"sugar",
"sweetened dried cranberries"
]
}
SUCCESS_RECIPE_RECIPEID_PAYLOAD = {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': {
'id': 47821,
'name': 'Warm Cinnamon Apples',
'cook_time': 20,
'img_url': '47821-556x370.jpg',
'servings': 6,
'summary': '',
'health_score': 0.0,
'weight_watcher_points': 5,
'vegetarian': True,
'vegan': False,
'gluten_free': True,
'dairy_free': False,
'healthy': False,
'sustainable': False,
'directions': [
{'order': 1,
'direction': 'Toss together first 4 ingredients in a large zip-top plastic bag, tossing to coat apples.'},
{'order': 2,
'direction': 'Cook apple mixture, 2 Tbsp. water, and 1 Tbsp. butter in a medium saucepan over medium heat, stirring occasionally, 8 to 10 minutes or until apples are tender.'}
],
'ingredients': [
{'id': 9003, 'amount': 2.0, 'amount_unit': 'pounds', 'name': 'apples', 'img_url': 'apple.jpg'},
{'id': 19334, 'amount': 0.5, 'amount_unit': 'cups', 'name': 'brown sugar', 'img_url': 'light-brown-sugar.jpg'},
{'id': 1001, 'amount': 1.0, 'amount_unit': 'Tbsp', 'name': 'butter', 'img_url': 'butter-sliced.jpg'},
{'id': 1012010, 'amount': 1.0, 'amount_unit': 'teaspoon', 'name': 'ground cinnamon', 'img_url': 'cinnamon.jpg'},
{'id': 2025, 'amount': 0.25, 'amount_unit': 'teaspoons', 'name': 'nutmeg', 'img_url': 'ground-nutmeg.jpg'},
{'id': 14412, 'amount': 2.0, 'amount_unit': 'Tbsps', 'name': 'water', 'img_url': 'water.png'}
]
}
}
SUCCESS_RECIPE_PAYLOAD_FAILURE = {
'statusCode': 500,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': {'errorMsg': 'Failed to retrieve recipes.'}
}
SUCCESS_RECIPE_PAYLOAD_FAILURE_2 = {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': {'errorMsg': 'Failed to retrieve recipes.'}
}
COGNITO_EXCEPTION_PAYLOAD = {
'statusCode': 403,
'headers': {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
},
"body": "{}"
}
|
25,077 | 5b088e1df724a2ba9f3217adcd78d17acb280ab3 | # map and filter
# lambda function
# def a(x1, x2, x3):
# return x1 + x2 + x3
# a = lambda x1, x2, x3: x1 + x2 + x3
# print(a(6, 7, 8))
# map(function, iterable_object)
# a = [2, 4, 6, 8, 10, 12, 14, 16]
# def increase_by_one(n):
# return n+1
# b = map(lambda n:n+1, a)
# print(list(b))
# namelist = ["ram", "shyam", "hari", "geeta", "harry"]
# # result = ["Ram", "Shyam", "Hari", "Geeta", "Harry"]
# result = list(map(lambda name:name.title(), namelist))
# print(result)
# emaillist = ["1-gmail.com", "2-gmail.com", "3-gmail.com", "4-gamil.com"]
# # res = ["1@gmail.com", "2@gmail.com", "3@gmail.com", ...]
# res = list(map(lambda email: email.replace("-", "@"), emaillist))
# print(res)
# filter(func, iterable_object)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
even = filter(lambda n: n % 2 == 0, a)
print(list(even))
emaillist = ["1@gmail.com", "2@gmail.com",
"3@gmail.com", "4@hotmail.com",
"5@yahoo.com", "6@gmail.com",
"7@yahoo.com",
]
res =filter(lambda email:email.endswith("gmail.com"), emaillist)
print(list(res)) |
25,078 | 573db43f3c132c7d46f5100e757bf5548641ad22 | # Nombre del programa: Prueba de primalidad.
def es_primo(numero):
contador = 0
for i in range(numero):
if numero % (i+1) != 0:
continue
else:
contador +=1
if contador == 2:
print('Es primo')
else:
print('No es primo')
def run():
numero = int(input('Ingrese un número: '))
es_primo(numero)
if __name__ == '__main__':
run()
|
25,079 | 5c36be244a176895cfc8fd4569ff97c66b62c570 | from . import *
import os
from distutils.dir_util import copy_tree
def init(args):
# checking args
assert len(args) <= 1
path = "" if len(args) == 0 else args[0]
veritable_path = get_veritable_path()
project_path = join(os.getcwd(), path)
copy_tree(join(veritable_path, "base"), project_path)
print("Successfully Created Veritable Project!")
|
25,080 | 1d918379aeb7a6da5a7f68b0abcba376007aef1d | seqA = 'GAGCCTACTAACGGGAT'
seqB = 'CATCGTAATGACGGCCT'
def findHammingDist(seqA,seqB):
hammingDist = 0
if(len(seqA)==len(seqB)):
for i in range(len(seqA)):
if(seqA[i]!=seqB[i]):
hammingDist+=1
return hammingDist
print(findHammingDist(seqA,seqB))
|
25,081 | 197e015c1467f5250d9b1e5c535d9083c23709e5 | #前端视图
from flask import Blueprint
bbs = Blueprint('bbs',__name__)
@bbs.route('/')
def index():
return '首页'
|
25,082 | ce4f143c35fb25fafba4c41d92e39deb4efc3b44 | #!/usr/bin/env python
from operator import itemgetter
import sys
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 16:
# Algo ha pasado, nos saltamos esta linea
continue
# parseamos la entrada que hemos obtenido del mapper.py
idref, ident, gsm19023, gsd19024, gsd19025, gsd19026, genetitle, genesymbol, geneID, uniGenetitle, uniGenesymbol, uniGeneID, NucleotideTitle, maximo, minimo, media = data_mapped
# pasamos los valores de string a float
try:
Float_gsm19023 = float(gsm19023)
Float_gsd19024 = float(gsd19024)
Float_gsd19025 = float(gsd19025)
Float_gsd19026 = float(gsd19026)
except ValueError:
# si alguno no es un numero, descartamos la linea
continue
maximo = max(Float_gsm19023, Float_gsd19024, Float_gsd19025, Float_gsd19026)
minimo = min(Float_gsm19023, Float_gsd19024, Float_gsd19025, Float_gsd19026)
media = (Float_gsm19023 + Float_gsd19024 + Float_gsd19025 + Float_gsd19026) / 4.0
print "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\t{11}\t{12}\t{13}\t{14}\t{15}".format(idref, ident, gsm19023, gsd19024, gsd19025, gsd19026, genetitle, genesymbol, geneID, uniGenetitle, uniGenesymbol, uniGeneID, NucleotideTitle, maximo, minimo, media)
|
25,083 | d3ae14a3978a83cbb65fdb7ae2d4d31db4e06842 | ALIGN_DIR = '/home/yoojin/repositories/AlignmentTool_v190813'
MIDI_MIDI_FEATURE_LIST = ['velocity',
'original_duration',
'elongated_duration',
'interval']
XML_MIDI_FEATURE_LIST = ['beat_tempo',
'measure_tempo',
'velocity',
'original_duration',
'elongated_duration',
'onset_deviation']
FEATURE_LIST_TMP = ['beat_tempo',
'measure_tempo',
'velocity',
'original_duration',
'elongated_duration',
'onset_timing',
'IOI']
|
25,084 | 4371793bdb677a50f663afebef0cb5bdf8f8c982 | import tempfile
import os
## @brief Simplify the task of creating and managing temporary files in a
# secure manner.
#
# Although python already provides functions for creating temporary files
# in nanscan we usually need only the filename and this is passed as a parameter
# to external applications such as convert or tesseract. Using those file names
# in this cases would still make the application vulnerable to race conditions
# so we need a temporary directory in which all such files are created.
# This class ensures this directory is created and one can savely manage temporary
# files.
class TemporaryFile:
directory = None
## @brief Creates a temporary file securely.
# If the temporary directory doesn't exist or has been deleted it's created.
# This will allow the user to remove the directory at 'almost' any time
# without breaking the application, though a way to easily remove temp files
# should be provided, probably.
@staticmethod
def create( suffix='' ):
if not TemporaryFile.directory or not os.path.exists( TemporaryFile.directory ):
TemporaryFile.directory = tempfile.mkdtemp()
fd, name = tempfile.mkstemp( suffix=suffix, dir=TemporaryFile.directory )
os.close(fd)
return name
## @brief Removes all temporary files (and the temporary directory)
@staticmethod
def clear():
if TemporaryFile.directory:
shutils.rmtree( TemporaryFile.directory )
## @brief Remove the given file
@staticmethod
def remove(path):
os.unlink(path)
|
25,085 | 8850992b35cfe04187769035706c1b35e849b0b7 | import tkinter
def ende():
main.destroy()
def farbwechsel():
fr["bg"] = farbe.get()
def randwechsel():
if rand.get():
fr["relief"] = "ridge"
else:
fr["relief"] = "flat"
main = tkinter.Tk()
# Zielobjekt der Menübefehle
fr = tkinter.Frame(main, height=100, width=300,
bg="#FFFFFF", bd=10)
fr.pack()
# erzeugt gesamte Menüleiste
mBar = tkinter.Menu(main)
# erzeugt erstes Menüobjekt der Menüleiste
mFile = tkinter.Menu(mBar)
# erzeugt Elemente in erstem Menü
mFile.add_command(label="Neu")
mFile.add_command(label="Laden")
mFile.add_command(label="Speichern")
mFile.add_separator()
mFile.add_command(label="Beenden", command=ende)
# Widget-Variablen der Radiobutton-Menüpunkte
# bzw. Checkbutton-Menüpunkte
farbe = tkinter.StringVar()
farbe.set("#FFFFFF")
rand = tkinter.IntVar()
rand.set(0)
# erzeugt zweites Menüobjekt der Menüleiste
mView = tkinter.Menu(mBar)
mView["tearoff"] = 0 # Menü nicht abtrennbar
# erzeugt Elemente in zweitem Menü
mView.add_radiobutton(label="Rot", variable=farbe,
value="#FF0000", underline=0, command=farbwechsel)
mView.add_radiobutton(label="Gelb", variable=farbe,
value="#FFFF00", underline=0, command=farbwechsel)
mView.add_radiobutton(label="Blau", variable=farbe,
value="#0000FF", underline=0, command=farbwechsel)
mView.add_radiobutton(label="Magenta", variable=farbe,
value="#FF00FF", underline=0, command=farbwechsel)
mView.add_separator()
mView.add_checkbutton(label="Rand sichtbar",
variable=rand, onvalue=1, offvalue=0, underline=5,
command=randwechsel)
# erstes und zweites Menü zur Menüleiste hinzu
mBar.add_cascade(label="Datei", menu=mFile)
mBar.add_cascade(label="Ansicht", menu=mView)
# gesamte Menüleiste zu Fenster hinzu
main["menu"] = mBar
main.mainloop()
|
25,086 | 56377c9859f3f776d9af99737028b74c84fb3502 | import threading
from random import randint
import time
class Worker():
def __init__(self, noOfThreads):
self.noOfThreads = noOfThreads
self.threads = []
self.tasks = []
self.lock = threading.Lock()
self.cv = threading.Condition(self.lock)
self.runThreads = True;
self.__create()
def acquire_job(self):
while(self.runThreads):
self.lock.acquire()
if(len(self.tasks) == 0):
self.cv.wait()
#print("hello from thread", threading.get_ident()) this shows that threads are working
if(len(self.tasks) != 0):
localTask = self.tasks.pop(0)
self.lock.release()
localTask()
else:
self.lock.release()
def start(self):
for i in range(0, len(self.threads)):
self.threads[i].start()
def post(self, task):
self.lock.acquire()
self.tasks.append(task)
try:
self.cv.notify(1)
except RuntimeError:
print("lock not aqquired") #should never receive this exception hopefully
self.lock.release()
def __create(self):
for x in range(0, self.noOfThreads):
self.threads.append(threading.Thread(target=self.acquire_job))
print("added", self.noOfThreads, "threads")
def stop(self):
self.runThreads = False
|
25,087 | eac8dadba99e51df25d45a2a66659bdeb62ec473 | # Need to increase recursion limit even when memoization is used if n is too large
# e.g. sys.setrecursionlimit(100000)
def fibonacci_recursive_memo(n, fib_cache={0: 0, 1: 1}):
"""Top-down implementation (recursive) with memoization, O(n^2)"""
if n < 0:
return -1
if n not in fib_cache:
fib_cache[n] = fibonacci_recursive_memo(n-1) + fibonacci_recursive_memo(n-2)
return fib_cache[n]
def fibonacci_recursive_tco(n, next_fib=1, summation=0):
"""Top-down implementation (recursive) with tail-call optimization (TCO), O(n)"""
if n == 0:
return summation
else:
# summation will be the "next fib" passed in parameter that is the current fib of this call
# but the next fib needs to be recalcuated as current_sum + next_fib
return fibonacci_recursive_tco(n-1, summation+next_fib, next_fib)
def fibonacci_iterative_memo(n, fib_cache={0: 0, 1: 1}):
"""Bottom up implementation (iterative) with memoization, O(n^2)"""
for i in range(2, n+1):
fib_cache[i] = fib_cache[i-1] + fib_cache[i-2]
return fib_cache[n]
if __name__ == '__main__':
# Iterative with memoization
print('Iterative with memoization:')
print(fibonacci_iterative_memo(7))
print(fibonacci_iterative_memo(800))
# Tail call optimization
print('Recursive with TCO:')
print(fibonacci_recursive_tco(7))
print(fibonacci_recursive_tco(800))
# Memoization
print('Recursive with memoization:')
print(fibonacci_recursive_memo(7))
print(fibonacci_recursive_memo(800))
|
25,088 | 09e9cdba74df02df6229bf317e139a7829929c6f | import random
a = random.randint(0,9)
b = random.randint(0,9)
print(a)
print(b) |
25,089 | 2b5d5e71cc19c4626f18b40b328c25a157f09a82 | #coding:utf8
"""
Created on 2015-01-26
@Author: jiangtaoran(jiangtaoran@ice-time.cn)
@Brief : 建筑物相关逻辑
"""
from utils import logger
from utils import utils
from datalib.data_loader import data_loader
def calc_consume_resource(building, heroes, technologys):
"""计算建造/升级建筑时需要消耗的资源
Args:
building[BuildingInfo]: 建筑物信息
heroes[list(HeroInfo)]: 参与建造/升级的英雄,英雄的技能可以减少消耗
technologys[list(TechnologyInfo)]: 起作用的内政科技
Returns:
消耗的资源 (money, food)
"""
next_level = building.level + 1
key = "%s_%s" % (building.basic_id, next_level)
money = data_loader.BuildingLevelBasicInfo_dict[key].limitMoney
food = data_loader.BuildingLevelBasicInfo_dict[key].limitFood
#暂时没有影响资源消耗的内政科技
#TODO 暂时还没有设计英雄技能
return (int(money), int(food))
def calc_consume_time(building, heroes, technologys, user):
"""计算建造/升级建筑时需要消耗的时间
Args:
building[BuildingInfo]: 建筑物信息
heroes[list(HeroInfo)]: 参与建造/升级的英雄,英雄的内务值可以减少消耗
technologys[list(TechnologyInfo)]: 起作用的非战斗科技
Returns:
耗时(秒)
"""
next_level = building.level + 1
key = "%s_%s" % (building.basic_id, next_level)
time = data_loader.BuildingLevelBasicInfo_dict[key].limitTime #秒
total_reduce_time = 0
#内政科技的影响
for tech in technologys:
attribute = data_loader.InteriorTechnologyBasicInfo_dict[
tech.basic_id].interiorAttributeIncrease
ratio = attribute.buildSpeed / 100.0
total_reduce_time += utils.floor_to_int(time * ratio)
#驻守英雄的影响
level = data_loader.BuildingLevelBasicInfo_dict[key].limitMonarchLevel
for hero in heroes:
if hero is None:
continue
ratio = _calc_time_reduce_by_hero(hero, level)
total_reduce_time += utils.floor_to_int(time * ratio)
#vip的影响
open_flags = get_flags()
if "is_open_buildinglist" in open_flags:
#查看是否开启了建造队列(vip减少时间与建造队列一起开发)
ratio = data_loader.VipBasicInfo_dict[user.vip_level].reduceBuildTimeRate / 100.0
total_reduce_time += utils.floor_to_int(time * ratio)
time -= total_reduce_time
return max(0, utils.floor_to_int(time))
def _calc_time_reduce_by_hero(hero, level):
"""计算英雄参与建造/升级,可以减少多少耗时
Args:
hero[HeroInfo]: 英雄的信息
level[int]: 主公等级限制,参与计算而已
Returns:
减少的时间比例: [0-1]
"""
N = hero.interior_score
SN = data_loader.MonarchLevelBasicInfo_dict[level].sn
P = float(data_loader.OtherBasicInfo_dict["P_Building"].value)
if N <= SN:
return pow(float(N) / SN, 2) * P
else:
return (1 + (0.33 / P - 1) * (1 - float(SN) / N)) * P
def calc_money_capacity(basic_id, level, technologys = []):
"""计算建筑物提供的金钱储量
"""
key = "%d_%d" % (basic_id, level)
if key in data_loader.BuildingStorageBasicInfo_dict:
base = data_loader.BuildingStorageBasicInfo_dict[key].moneyCapacity
#科技加成
tech_basic_ids = [info.basic_id for info in technologys]
addition_of_technology = calc_money_capacity_addition_of_technology(
base, tech_basic_ids)
logger.debug("money capacity[tech addition=%d]" % addition_of_technology)
return base + addition_of_technology
return 0
def calc_food_capacity(basic_id, level, technologys = []):
"""获取建筑物提供的粮食储量
"""
key = "%d_%d" % (basic_id, level)
if key in data_loader.BuildingStorageBasicInfo_dict:
base = data_loader.BuildingStorageBasicInfo_dict[key].foodCapacity
#科技加成
tech_basic_ids = [info.basic_id for info in technologys]
addition_of_technology = calc_food_capacity_addition_of_technology(
base, tech_basic_ids)
logger.debug("food capacity[tech addition=%d]" % addition_of_technology)
return base + addition_of_technology
return 0
def calc_soldier_capacity(basic_id, level, technologys = []):
"""获取建筑物提供的士兵兵容
"""
key = "%d_%d" % (basic_id, level)
if key in data_loader.BuildingStorageBasicInfo_dict:
base = data_loader.BuildingStorageBasicInfo_dict[key].soldierCapacity
#科技加成
tech_basic_ids = [info.basic_id for info in technologys]
addition_of_technology = calc_soldier_capacity_addition_of_technology(
base, tech_basic_ids)
logger.debug("soldier capacity[tech addition=%d]" % addition_of_technology)
return base + addition_of_technology
return 0
def calc_unlock_finish_soldier_technology(basic_id, level):
"""计算将军府解锁的已经完成研究的兵种科技
Args:
basic_id[int]: 建筑物 basic id
level[int]: 建筑物等级
Returns:
list(id): 兵种科技的 basic id 列表
"""
unlock = []
all_tech = data_loader.SoldierTechnologyBasicInfo_dict
for id in all_tech:
assert all_tech[id].limitBuildingId == basic_id
if all_tech[id].limitBuildingLevel == level and all_tech[id].isFinish:
unlock.append(id)
return unlock
def calc_money_capacity_addition_of_technology(capacity_base, tech_basic_ids):
"""计算科技对金钱容量的加成"""
addition = 0
for id in tech_basic_ids:
increase = (data_loader.InteriorTechnologyBasicInfo_dict[
id].interiorAttributeIncrease.moneyCapacity)
increase = increase/ 100.0
addition += (int)(increase * capacity_base)
return addition
def calc_food_capacity_addition_of_technology(capacity_base, tech_basic_ids):
"""计算科技对粮食容量的加成"""
addition = 0
for id in tech_basic_ids:
increase = (data_loader.InteriorTechnologyBasicInfo_dict[
id].interiorAttributeIncrease.foodCapacity)
increase = increase/ 100.0
addition += (int)(increase * capacity_base)
return addition
def calc_soldier_capacity_addition_of_technology(capacity_base, tech_basic_ids):
"""计算科技对兵营容量的加成"""
addition = 0
for id in tech_basic_ids:
increase = (data_loader.InteriorTechnologyBasicInfo_dict[
id].interiorAttributeIncrease.soldierCapacity)
increase = increase/ 100.0
addition += (int)(increase * capacity_base)
return addition
def get_flags():
open_flags = set()
for key, value in data_loader.Flag_dict.items():
if int(float(value.value)) == 1:
open_flags.add(str(key))
return open_flags
|
25,090 | e74a372970edcab3f473fdccc7ec14de2dbaa572 | #! /usr/bin/env python
# -*- coding=utf8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import grpc
import tensorflow as tf
import tensorflow_serving.apis.predict_pb2 as predict_pb2
import tensorflow_serving.apis.prediction_service_pb2_grpc as prediction_service_pb2_grpc
MODEL_NAME = "knet"
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def run():
channel = grpc.insecure_channel('localhost:9000')
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = MODEL_NAME
request.model_spec.signature_name = 'predicts'
input_name = 'examples'
watched_rowkeys = ['' for i in range(100)]
watched_rowkeys[0] = '8575b81e0d8430aj'
rinfo1 = [10.0 for i in range(100)]
rinfo2 = [10.0 for i in range(100)]
is_video = [1 for i in range(100)]
target_rowkeys = ['' for i in range(200)]
target_rowkeys[0] = '8575b81e0d8430aj'
example1 = tf.train.Example(
features=tf.train.Features(
feature={
'watched_rowkeys': _bytes_feature(watched_rowkeys),
'rinfo1': _float_feature(rinfo1),
'rinfo2': _float_feature(rinfo2),
'target_rowkeys': _bytes_feature(target_rowkeys),
'num_targets': _int64_feature([3]),
'is_video': _int64_feature(is_video)
}
)
).SerializeToString()
examples = [example1]
request.inputs[input_name].CopyFrom(
tf.contrib.util.make_tensor_proto(examples, dtype=tf.string))
response = stub.Predict(request)
print("Received: {}".format(response))
if __name__ == '__main__':
run()
|
25,091 | df19ef071c4ce03d096a64f39fa8843661ffb700 | # Copyright (c) 2012 Tuan Tran
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""This module is used for assisted calculations on E+ surfaces"""
# Wrote by Tuan Tran trantuan@hawaii.edu / tranhuuanhtuan@gmail.com
# School of Architecture, University of Hawaii at Manoa
####### The following code within the block credited by ActiveState Code
# Recipes code.activestate.com
## {{{ http://code.activestate.com/recipes/578276/ (r1)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import math
def area(poly):
"""Area of a polygon poly"""
if len(poly) < 3: # not a plane - no area
return 0
total = [0, 0, 0]
num = len(poly)
for i in range(num):
vi1 = poly[i]
vi2 = poly[(i+1) % num]
prod = np.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2)
#unit normal vector of plane defined by points a, b, and c
def unit_normal(a_pnt, b_pnt, c_pnt):
"""unit normal"""
x_val = np.linalg.det(
[[1, a_pnt[1], a_pnt[2]], [1, b_pnt[1], b_pnt[2]], [1, c_pnt[1],
c_pnt[2]]])
y_val = np.linalg.det(
[[a_pnt[0], 1, a_pnt[2]], [b_pnt[0], 1, b_pnt[2]], [c_pnt[0], 1,
c_pnt[2]]])
z_val = np.linalg.det(
[[a_pnt[0], a_pnt[1], 1], [b_pnt[0], b_pnt[1], 1], [c_pnt[0],
c_pnt[1], 1]])
magnitude = (x_val**2 + y_val**2 + z_val**2)**.5
mag = (x_val/magnitude, y_val/magnitude, z_val/magnitude)
if magnitude < 0.00000001:
mag = (0, 0, 0)
return mag
## end of http://code.activestate.com/recipes/578276/ }}}
# distance between two points
def dist(pnt1, pnt2):
"""Distance between two points"""
return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5
# width of a rectangular polygon
def width(poly):
"""Width of a polygon poly"""
num = len(poly) - 1
if abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]):
return dist(poly[num], poly[0])
elif abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]):
return dist(poly[1], poly[0])
else: return max(dist(poly[num], poly[0]), dist(poly[1], poly[0]))
# height of a polygon poly
def height(poly):
"""Height of a polygon poly"""
num = len(poly) - 1
if abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]):
return dist(poly[num], poly[0])
elif abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]):
return dist(poly[1], poly[0])
else:
return min(dist(poly[num], poly[0]), dist(poly[1], poly[0]))
# angle between two vectors
def angle2vecs(vec1, vec2):
"""angle between two vectors"""
# vector a * vector b = |a|*|b|* cos(angle between vector a and vector b)
dot = np.dot(vec1, vec2)
vec1_modulus = np.sqrt((vec1*vec1).sum())
vec2_modulus = np.sqrt((vec2*vec2).sum())
if (vec1_modulus * vec2_modulus) == 0:
cos_angle = 1
else: cos_angle = dot / (vec1_modulus * vec2_modulus)
return math.degrees(np.arccos(cos_angle))
# orienation of a polygon poly
def azimuth(poly):
"""Azimuth of a polygon poly"""
num = len(poly) - 1
vec = unit_normal(poly[0], poly[1], poly[num])
vec_azi = np.array([vec[0], vec[1], 0])
vec_n = np.array([0, 1, 0])
# update by Santosh
# angle2vecs gives the smallest angle between the vectors
# so for a west wall angle2vecs will give 90
# the following 'if' statement will make sure 270 is returned
x_vector = vec_azi[0]
if x_vector < 0:
return 360 - angle2vecs(vec_azi, vec_n)
else:
return angle2vecs(vec_azi, vec_n)
def tilt(poly):
"""Tilt of a polygon poly"""
num = len(poly) - 1
vec = unit_normal(poly[0], poly[1], poly[num])
vec_alt = np.array([vec[0], vec[1], vec[2]])
vec_z = np.array([0, 0, 1])
# return (90 - angle2vecs(vec_alt, vec_z)) # update by Santosh
return angle2vecs(vec_alt, vec_z)
|
25,092 | d5a0b3cc6c6fde648c3b31c63ccc6ed9ae775495 | from iroha import Iroha, IrohaGrpc, IrohaCrypto
network = IrohaGrpc('localhost:50051')
def sign_query(query, private_key):
IrohaCrypto.sign_query(query, private_key)
data = network.send_query(query)
return data
def get_account(account_id, creator_account_id, private_key):
iroha = Iroha(creator_account_id)
query = iroha.query(
'GetAccount',
account_id=account_id
)
response = sign_query(query, private_key)
data = response.account_response
return data
def get_account_asset(account_id, creator_account_id, private_key):
iroha = Iroha(creator_account_id)
query = iroha.query(
'GetAccountAssets',
account_id=account_id
)
response = sign_query(query, private_key)
data = response.account_assets_response.account_assets
return data
def get_signatories(account_id, creator_account_id, private_key):
iroha = Iroha(creator_account_id)
query = iroha.query(
'GetSignatories',
account_id=account_id
)
response = sign_query(query, private_key)
return response |
25,093 | 48f5660045d56dfef01bdd554cbaeb775d3804d8 | from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), ".env") # Path to .env file
load_dotenv(dotenv_path)
|
25,094 | deee92367eb44e23946f77199fddb9edca11fb7f | source = open('C:/Users/breno/Desktop/Homo_sapiens.GRCh38.91.gtf', 'r')
intermediate = open('C:/Users/breno/Desktop/intermediateParse.gtf', 'w')
# iterates through several lines by
# taking each line of the file and
# writing in the other file
for x in range(10000):
text = source.readline()
intermediate.write(text)
#print(text)
# close the files
source.close()
intermediate.close()
# open the file with a
# df = read_gtf('C:/Users/breno/Desktop/intermediateParse.gtf')
# df = parse_gtf_and_expand_attributes('C:/Users/breno/Desktop/intermediateParse.gtf')
#attr standard of the files GTF
# seqname = df_genes['seqname'] --> 0
# source = df_genes['source'] --> 1
# feature = df_genes['feature'] translate in tables-->2
# start = df_genes['start'] -->3
# end = df_genes['end'] -->4
# score = df_genes['score'] -->5
# strand = df_genes['strand'] -->6
# frame = df_genes['frame'] -->7
# other attributes caracteristic of the file
# gene_id = df_genes['gene_id'].__array__() 8
# gene_version = df_genes['gene_version'].__array__() 9
# gene_name = df_genes['gene_name'].__array__() 10
# gene_source = df_genes['gene_source'].__array__() 11
# gene_biotype = df_genes['gene_biotype'].__array__() 12
# transcript_id = df_genes['transcript_id'].__array__() 13
# transcript_version = df_genes['transcript_version'].__array__() 14
# transcript_name = df_genes['transcript_name'].__array__() 15
# transcript_source = df_genes['transcript_source'].__array__() 16
# transcript_biotype = df_genes['transcript_biotype'].__array__() 17
# tag = df_genes['tag'].__array__() 18
# transcript_support_level = df_genes['transcript_support_level'].__array__() 19
# exon_number = df_genes['exon_number'].__array__() 20
# exon_id = df_genes['exon_id'].__array__() 21
# exon_version = df_genes['exon_version'].__array__() 22
# ccds_id = df_genes['ccds_id'].__array__() 23
# protein_id = df_genes['protein_id'].__array__() 24
# protein_version = df_genes['protein_version'].__array__() 25
'''
genes = df_genes[df_genes["feature"] == "gene"]
exons = df_genes[df_genes["feature"] == "exon"]
introns = df_genes[df_genes["feature"] == "intron"]
CDSs = df_genes[df_genes["feature"] == "CDS"]
#['gene_id',
# 'gene_version',
# 'gene_name',
# 'gene_source',
# 'gene_biotype',
# 'transcript_id',
# 'transcript_version',
# 'transcript_name',
# 'transcript_source',
# 'transcript_biotype',
# 'tag',
# 'transcript_support_level',
# 'exon_number',
# 'exon_id',
# 'exon_version',
# 'ccds_id',
# 'protein_id',
# 'protein_version'
''' |
25,095 | a36684cf3f7cc675615e47ed2a84afc4ef5f8db2 | import pygame as pg
from math import *
width,height,C_BWRGBYCM=400,400,((0,0,0),(255,255,255),(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255),(255,0,255))
window=pg.display.set_mode((width, height))
def event_check():
pg.display.update()
for event in pg.event.get():
if event.type==pg.QUIT:quit()
if event.type==pg.KEYDOWN:
if event.key==pg.K_ESCAPE:quit()
line = lambda a,b,color,width:pg.draw.line(window,C_BWRGBYCM[color],(int(a[0]),int(a[1])),(int(b[0]),int(b[1])),width)
maper = lambda vari,minV,maxV,minO,maxO:(vari-minV)/(maxV-minV)*(maxO-minO)+minO
val_bet=40
c=0
lis = []
for i in range(-200,200):
lis.append([i+200,200])
while 1:
event_check()
window.fill(C_BWRGBYCM[0])
line((200,0), (200,400), 2, 1)
line((0,200), (400,200), 2, 1)
line((100,0), (100,400), 4, 1)
line((300,0), (300,400), 4, 1)
for i in range(len(lis)):
x=maper(lis[i][0],0,400,-val_bet,val_bet)
try:lis[i][1]=log(int(maper(pg.mouse.get_pos()[0],0,400,0,10)),pg.mouse.get_pos()[1]+x) # enter your equation hear...
except:pass
for i in lis:
i[1]=int(maper(i[1], -val_bet, val_bet, 400, 0))
for no,i in enumerate(lis):
try:
if no!=0:line(i, lis[no-1], 3, 1)
except:pass |
25,096 | 82745572021ea66cb02c547f3409fac2929c3a27 | n = int(input())
lst = []
for i in range(n):
lst.append(list(map(int, input().split())))
for i in range(n - 2, -1, -1):
for j in range(len(lst[i])):
lst[i][j] = lst[i][j] + max(lst[i + 1][j], lst[i + 1][j + 1])
print(lst[0][0])
|
25,097 | 9f75b3420ae2d4e33ae25566d3018713e69d3285 | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='VisionTransformer',
arch='large',
img_size=518,
patch_size=14,
layer_scale_init_value=1e-5,
),
neck=None,
head=None)
data_preprocessor = dict(
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True,
)
|
25,098 | 68e97a50f5036b2b05742a38a88d7ec07f3cd46a | lines = [x.strip() for x in open('input').read().splitlines()]
def solve(lines):
ids = {} # map (r,c) to id
for line in lines:
# find row
lo = 0
hi = 127
for c in line[:7]:
m = (lo + hi) // 2
if c == 'F': # take lower half
hi = m
if c == 'B': # take upp half
lo = m + 1
if line[:7].endswith('F'):
row = hi
else:
row = lo
# find col
lo = 0
hi = 7
for r in line[7:]:
m = (lo + hi) // 2
if r == 'L':
hi = m
if r == 'R':
lo = m + 1
if line[7:].endswith('R'):
col = lo
else:
col = hi
pid = row * 8 + col
ids[(row, col)] = pid
allid = ids.values()
for r in range(1, 127): # exclude first and last row
for c in range(8):
cid = r * 8 + c
# find id missing
if cid + 1 in allid and cid - 1 in allid and cid not in allid:
return cid
print(solve(lines))
|
25,099 | 685a7d07548bc951f4e1d4ef2415482fb19936ee | import os
import sys
from typing import cast
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # noqa
import launch
from launch.actions import IncludeLaunchDescription, ExecuteProcess
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import ThisLaunchFileDir
import launch_ros.actions
#Only content in this function is run when ros2 launch is run. It can also be used to run other system programs.
def generate_launch_description():
#### Required Code #########################################################
ld = launch.LaunchDescription()
# Disable tty emulation (on by default).
ld.add_action(launch.actions.SetLaunchConfiguration('emulate_tty', 'false'))
# Wire up stdout from processes
def on_output(event: launch.Event) -> None:
for line in event.text.decode().splitlines():
print('[{}] {}'.format(
cast(launch.events.process.ProcessIO, event).process_name, line))
############################################################################
ld.add_action(launch_ros.actions.Node(
package='joy', node_executable='joy_node', output='screen'
))
ld.add_action(launch_ros.actions.Node(
package='pirobot_base', executable='DriveCommand', name="drive_command1", output='screen'
))
return ld
if __name__ == '__main__':
ls = launch.LaunchService(argv=sys.argv[1:])
ls.include_launch_description(generate_launch_description())
sys.exit(ls.run())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.