blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43008fcf3f9717797c9c87f53e21bb7da22f9c96 | fb006584fd88b7d1b542320b930d1249e4214daf | /app/tables/equipe9/documento/documento_modelo.py | 4772a28f1eb3c510232758303eade1fc449f4144 | [] | no_license | jucimarjr/zelda | 51d3b84f17ea7538412edf6429334049ca9de9d6 | 2a854c73742262e244b603ef1cc3f41ae4d4ec67 | refs/heads/master | 2021-01-20T12:16:51.316714 | 2017-12-12T18:18:57 | 2017-12-12T18:18:57 | 101,705,786 | 4 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | from ....cursor import db
from ..processo.processo_modelo import Processo
from app import app
class Documento:
def __init__(self, documento_id=None):
self.__documento_id = None
self.__processo = None
self.documento_desc = None
self.documento_tipo = None
self.documento_caminho = None
if documento_id is not None:
data = db.get_documento(documento_id)
if len(data) > 0:
self.__documento_id = documento_id
self.__processo = Processo(processo_id = data[0]['processo_id'])
self.documento_desc = data[0]['descricao']
self.documento_tipo = data[0]['tipo']
self.documento_caminho = data[0]['caminho']
def get_id(self):
return self.__documento_id
def get_processo(self):
return self.__processo
def set_processo(self, processo):
if processo.get_id() is not None:
self.__processo = processo
def get_documento_tipo(self):
return self.documento_tipo
def get_documento_desc(self):
return self.documento_desc
def get_documento_caminho(self):
return self.documento_caminho
def deleta(self):
if self.get_id() is not None:
db.deleta_documento(self.get_id())
def salva(self):
if self.get_id() is None:
self.__documento_id = db.cadastra_documento(self)
else:
db.edita_documento(self)
| [
"ldsm.snf16@uea.edu.br"
] | ldsm.snf16@uea.edu.br |
57b76ded4c046a47dade461d6824ad9153bdf8d2 | d8a3e33a356436be3a30eefbba8b80a38dfd1333 | /2018 Herbstsemester/Machine Learning/Machine Learning - Programming/Übung 01/Assignment_1/assignment1/features.py | 0d17d9e5494dc3dd1b4c919a17121eebbe6fc499 | [] | no_license | M4RZ1997/University | cb59e85e7751a9eeb862b694ee995ddd1998081b | c7a8bfd6941f86f843124a2970dad1d02d8f0352 | refs/heads/master | 2023-08-15T12:28:25.212987 | 2023-05-16T20:16:54 | 2023-05-16T20:16:54 | 221,927,703 | 6 | 0 | null | 2023-07-20T14:58:47 | 2019-11-15T13:17:19 | C++ | UTF-8 | Python | false | false | 1,617 | py | from skimage.feature import hog
import numpy as np
def hog_features(X):
"""
Extract HOG features from input images
Args:
X: Data matrix of shape [num_train, 577]
Returns:
hogs: Extracted hog features
"""
hog_list = []
for i in range(X.shape[0]):
#######################################################################
# TODO: #
# Extract HOG features from each image and append them to the #
# hog_list #
# #
# Hint: Make sure that you reshape the imput features to size (24,24) #
# Make sure you add an intercept term to the extracted features #
# #
#######################################################################
image = np.reshape(X[i, 1:], [24, 24])
fd = hog(image, orientations=8, pixels_per_cell=(8, 8), block_norm='L2-Hys',
cells_per_block=(1, 1), visualize=False, multichannel=False)
fd = np.insert(fd, 0, 1)
hog_list.append(fd)
#######################################################################
# END OF YOUR CODE #
#######################################################################
hogs = np.stack(hog_list,axis=0)
return hogs | [
"44601347+M4RZ1997@users.noreply.github.com"
] | 44601347+M4RZ1997@users.noreply.github.com |
8782c7c8bb56b0118c1f5cd84e030e48d23be1d5 | 6a0abe2f4172f680415d83f1946baaf85e5711b7 | /aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/DescribeAccessControlListsRequest.py | d63e244ab470b758431d5842a789854ee82f0557 | [
"Apache-2.0"
] | permissive | brw123/aliyun-openapi-python-sdk | 905556b268cbe4398f0f57b48422b713d9e89a51 | 8c77db6fd6503343cffa3c86fcb9d11770a64ca2 | refs/heads/master | 2020-05-01T16:26:49.291948 | 2019-03-21T09:11:55 | 2019-03-21T09:11:55 | 177,572,187 | 1 | 0 | null | 2019-03-25T11:21:59 | 2019-03-25T11:21:59 | null | UTF-8 | Python | false | false | 2,788 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeAccessControlListsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'DescribeAccessControlLists','slb')
def get_access_key_id(self):
return self.get_query_params().get('access_key_id')
def set_access_key_id(self,access_key_id):
self.add_query_param('access_key_id',access_key_id)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_AclName(self):
return self.get_query_params().get('AclName')
def set_AclName(self,AclName):
self.add_query_param('AclName',AclName)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AddressIPVersion(self):
return self.get_query_params().get('AddressIPVersion')
def set_AddressIPVersion(self,AddressIPVersion):
self.add_query_param('AddressIPVersion',AddressIPVersion)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
self.add_query_param('Tags',Tags)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
29d6a9c763c36c451e48ff3a95546812691604dc | 90abe44f78ffd66cc91c91815639ece016d4d76c | /bin/nwispy-script.py | 9395e43e07abdfbf7a2da6aa3fb8a4666c277254 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | jlant/nwispy | 631abe9874f3c197553b9ce6340c7bdaf7caaa1e | 1c9e7be134aa0933375324959641efa19b3cb4aa | refs/heads/master | 2020-04-19T07:01:10.325793 | 2019-01-28T21:07:27 | 2019-01-28T21:07:27 | 168,035,046 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from context import nwispy
# call main function of nwispy/nwispy.py
nwispy.nwispy.main() | [
"jlant@usgs.gov"
] | jlant@usgs.gov |
486318df6aa913f5aefffee90c5c6156380673f3 | d9ab75c5d530b5318bf274d4e0893137b970827e | /python/nova/GeneratedPy/GenBattleEvent.py | d8e966db842638845e96b69eacfb783039b5e1fb | [] | no_license | kgilpin/nova | 6a1413aa0a72415d9557e80e37ada20df40311e3 | bcff0fefc87ad5a4c72e9e01ee7747006ce0c14a | refs/heads/master | 2023-01-21T08:26:14.951384 | 2020-12-02T18:57:27 | 2020-12-02T18:57:27 | 317,962,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,504 | py | '''
GenBattleEvent.py
Tue Mar 29 11:44:59 2005
Generated by MiddleKit.
'''
# MK attribute caches for setFoo() methods
_gameAttr = None
_executionTimeAttr = None
_eventCounterAttr = None
_playerAttr = None
_statusAttr = None
_starAttr = None
_attackerAttr = None
_defenderAttr = None
_victorAttr = None
_numAttackingShipsAttr = None
_numDefendingShipsAttr = None
_numShipsLostAttr = None
import types
from mx import DateTime
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))))
from nova.Event import Event
del sys.path[0]
from types import InstanceType, LongType
from MiddleKit.Run.SQLObjectStore import ObjRefError
class GenBattleEvent(Event):
def __init__(self):
Event.__init__(self)
self._star = None
self._attacker = None
self._defender = None
self._victor = None
self._numAttackingShips = None
self._numDefendingShips = None
self._numShipsLost = None
def construct(self, game, executionTime, player, star, attacker, defender, victor, numAttackingShips, numDefendingShips, numShipsLost):
# Construct the BattleEvent with all the required attributes that do not have default values
self.setGame( game )
self.setExecutionTime( executionTime )
self.setPlayer( player )
self.setStar( star )
self.setAttacker( attacker )
self.setDefender( defender )
self.setVictor( victor )
self.setNumAttackingShips( numAttackingShips )
self.setNumDefendingShips( numDefendingShips )
self.setNumShipsLost( numShipsLost )
def star(self):
if self._star is not None and type(self._star) is not InstanceType:
try:
self.__dict__['_star'] = self._mk_store.fetchObjRef(self._star)
except ObjRefError, e:
self.__dict__['_star'] = self.objRefErrorWasRaised(e, 'BattleEvent', 'star')
return self._star
def setStar(self, value):
assert value is not None
if value is not None and type(value) is not LongType:
if not type(value) is InstanceType:
raise TypeError, 'expecting InstanceType, but got value %r of type %r instead' % (value, type(value))
from nova.Star import Star
if not isinstance(value, Star):
raise TypeError, 'expecting Star, but got value %r of type %r instead' % (value, type(value))
# set the attribute
origValue = self._star
self._star = value
# MiddleKit machinery
self._mk_changed = 1 # @@ original semantics, but I think this should be under "if not self._mk_initing..."
if not self._mk_initing and self._mk_serialNum>0 and value is not origValue:
global _starAttr
if _starAttr is None:
_starAttr = self.klass().lookupAttr('star')
if not _starAttr.shouldRegisterChanges():
_starAttr = 0
if _starAttr:
# Record that it has been changed
if self._mk_changedAttrs is None:
self._mk_changedAttrs = {} # maps name to attribute
self._mk_changedAttrs['star'] = _starAttr # changedAttrs is a set
# Tell ObjectStore it happened
self._mk_store.objectChanged(self)
def attacker(self):
if self._attacker is not None and type(self._attacker) is not InstanceType:
try:
self.__dict__['_attacker'] = self._mk_store.fetchObjRef(self._attacker)
except ObjRefError, e:
self.__dict__['_attacker'] = self.objRefErrorWasRaised(e, 'BattleEvent', 'attacker')
return self._attacker
def setAttacker(self, value):
assert value is not None
if value is not None and type(value) is not LongType:
if not type(value) is InstanceType:
raise TypeError, 'expecting InstanceType, but got value %r of type %r instead' % (value, type(value))
from nova.Player import Player
if not isinstance(value, Player):
raise TypeError, 'expecting Player, but got value %r of type %r instead' % (value, type(value))
# set the attribute
origValue = self._attacker
self._attacker = value
# MiddleKit machinery
self._mk_changed = 1 # @@ original semantics, but I think this should be under "if not self._mk_initing..."
if not self._mk_initing and self._mk_serialNum>0 and value is not origValue:
global _attackerAttr
if _attackerAttr is None:
_attackerAttr = self.klass().lookupAttr('attacker')
if not _attackerAttr.shouldRegisterChanges():
_attackerAttr = 0
if _attackerAttr:
# Record that it has been changed
if self._mk_changedAttrs is None:
self._mk_changedAttrs = {} # maps name to attribute
self._mk_changedAttrs['attacker'] = _attackerAttr # changedAttrs is a set
# Tell ObjectStore it happened
self._mk_store.objectChanged(self)
def defender(self):
if self._defender is not None and type(self._defender) is not InstanceType:
try:
self.__dict__['_defender'] = self._mk_store.fetchObjRef(self._defender)
except ObjRefError, e:
self.__dict__['_defender'] = self.objRefErrorWasRaised(e, 'BattleEvent', 'defender')
return self._defender
def setDefender(self, value):
assert value is not None
if value is not None and type(value) is not LongType:
if not type(value) is InstanceType:
raise TypeError, 'expecting InstanceType, but got value %r of type %r instead' % (value, type(value))
from nova.Player import Player
if not isinstance(value, Player):
raise TypeError, 'expecting Player, but got value %r of type %r instead' % (value, type(value))
# set the attribute
origValue = self._defender
self._defender = value
# MiddleKit machinery
self._mk_changed = 1 # @@ original semantics, but I think this should be under "if not self._mk_initing..."
if not self._mk_initing and self._mk_serialNum>0 and value is not origValue:
global _defenderAttr
if _defenderAttr is None:
_defenderAttr = self.klass().lookupAttr('defender')
if not _defenderAttr.shouldRegisterChanges():
_defenderAttr = 0
if _defenderAttr:
# Record that it has been changed
if self._mk_changedAttrs is None:
self._mk_changedAttrs = {} # maps name to attribute
self._mk_changedAttrs['defender'] = _defenderAttr # changedAttrs is a set
# Tell ObjectStore it happened
self._mk_store.objectChanged(self)
def victor(self):
if self._victor is not None and type(self._victor) is not InstanceType:
try:
self.__dict__['_victor'] = self._mk_store.fetchObjRef(self._victor)
except ObjRefError, e:
self.__dict__['_victor'] = self.objRefErrorWasRaised(e, 'BattleEvent', 'victor')
return self._victor
def setVictor(self, value):
assert value is not None
if value is not None and type(value) is not LongType:
if not type(value) is InstanceType:
raise TypeError, 'expecting InstanceType, but got value %r of type %r instead' % (value, type(value))
from nova.Player import Player
if not isinstance(value, Player):
raise TypeError, 'expecting Player, but got value %r of type %r instead' % (value, type(value))
# set the attribute
origValue = self._victor
self._victor = value
# MiddleKit machinery
self._mk_changed = 1 # @@ original semantics, but I think this should be under "if not self._mk_initing..."
if not self._mk_initing and self._mk_serialNum>0 and value is not origValue:
global _victorAttr
if _victorAttr is None:
_victorAttr = self.klass().lookupAttr('victor')
if not _victorAttr.shouldRegisterChanges():
_victorAttr = 0
if _victorAttr:
# Record that it has been changed
if self._mk_changedAttrs is None:
self._mk_changedAttrs = {} # maps name to attribute
self._mk_changedAttrs['victor'] = _victorAttr # changedAttrs is a set
# Tell ObjectStore it happened
self._mk_store.objectChanged(self)
def numAttackingShips(self):
return self._numAttackingShips
def setNumAttackingShips(self, value):
assert value is not None
if value is not None:
if type(value) is types.LongType:
value = int(value)
elif type(value) is not types.IntType:
raise TypeError, 'expecting int type, but got value %r of type %r instead' % (value, type(value))
# set the attribute
origValue = self._numAttackingShips
self._numAttackingShips = value
# MiddleKit machinery
self._mk_changed = 1 # @@ original semantics, but I think this should be under "if not self._mk_initing..."
if not self._mk_initing and self._mk_serialNum>0 and value is not origValue:
global _numAttackingShipsAttr
if _numAttackingShipsAttr is None:
_numAttackingShipsAttr = self.klass().lookupAttr('numAttackingShips')
if not _numAttackingShipsAttr.shouldRegisterChanges():
_numAttackingShipsAttr = 0
if _numAttackingShipsAttr:
# Record that it has been changed
if self._mk_changedAttrs is None:
self._mk_changedAttrs = {} # maps name to attribute
self._mk_changedAttrs['numAttackingShips'] = _numAttackingShipsAttr # changedAttrs is a set
# Tell ObjectStore it happened
self._mk_store.objectChanged(self)
def numDefendingShips(self):
return self._numDefendingShips
def setNumDefendingShips(self, value):
assert value is not None
if value is not None:
if type(value) is types.LongType:
value = int(value)
elif type(value) is not types.IntType:
raise TypeError, 'expecting int type, but got value %r of type %r instead' % (value, type(value))
# set the attribute
origValue = self._numDefendingShips
self._numDefendingShips = value
# MiddleKit machinery
self._mk_changed = 1 # @@ original semantics, but I think this should be under "if not self._mk_initing..."
if not self._mk_initing and self._mk_serialNum>0 and value is not origValue:
global _numDefendingShipsAttr
if _numDefendingShipsAttr is None:
_numDefendingShipsAttr = self.klass().lookupAttr('numDefendingShips')
if not _numDefendingShipsAttr.shouldRegisterChanges():
_numDefendingShipsAttr = 0
if _numDefendingShipsAttr:
# Record that it has been changed
if self._mk_changedAttrs is None:
self._mk_changedAttrs = {} # maps name to attribute
self._mk_changedAttrs['numDefendingShips'] = _numDefendingShipsAttr # changedAttrs is a set
# Tell ObjectStore it happened
self._mk_store.objectChanged(self)
def numShipsLost(self):
return self._numShipsLost
def setNumShipsLost(self, value):
assert value is not None
if value is not None:
if type(value) is types.LongType:
value = int(value)
elif type(value) is not types.IntType:
raise TypeError, 'expecting int type, but got value %r of type %r instead' % (value, type(value))
# set the attribute
origValue = self._numShipsLost
self._numShipsLost = value
# MiddleKit machinery
self._mk_changed = 1 # @@ original semantics, but I think this should be under "if not self._mk_initing..."
if not self._mk_initing and self._mk_serialNum>0 and value is not origValue:
global _numShipsLostAttr
if _numShipsLostAttr is None:
_numShipsLostAttr = self.klass().lookupAttr('numShipsLost')
if not _numShipsLostAttr.shouldRegisterChanges():
_numShipsLostAttr = 0
if _numShipsLostAttr:
# Record that it has been changed
if self._mk_changedAttrs is None:
self._mk_changedAttrs = {} # maps name to attribute
self._mk_changedAttrs['numShipsLost'] = _numShipsLostAttr # changedAttrs is a set
# Tell ObjectStore it happened
self._mk_store.objectChanged(self)
| [
"kgilpin@gmail.com"
] | kgilpin@gmail.com |
d4cbfc2426de53c78c4c7822c92566a5082ccb05 | 9a851652385c40b0e01503a34b3a1bc4421023c0 | /python_education/decorators/decorators2.py | bc162056de3bbcdb2809178373570a454c52a9f9 | [] | no_license | jordano1/python | c1e7a4871eabf0140e2a48b1091eeaa4a202097d | c5fa751dc2a115a38324e57d116e469c4244e143 | refs/heads/master | 2023-02-08T08:49:56.420232 | 2020-12-31T22:19:14 | 2020-12-31T22:19:14 | 297,722,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # my_list = []
# for char in 'hello':
# my_list.append(char)
# print(my_list)
def my_decorator(func):
def wrap_func():
print('******')
func()
print('******')
return wrap_func
@my_decorator
def hello():
print('hello')
hello() | [
"jordanolson1@gmail.com"
] | jordanolson1@gmail.com |
7bec075f31ca3ddb81d54d0863a5042e62638f05 | 95bc3ca917a8c7805aefa6d58ea9908f8d2bf6ae | /model.py | 50a60d45601c7a729f7de999622d69ecdcb140f0 | [
"MIT"
] | permissive | BenoitCorsini/mallows-trees | 8a188fd2f643ad5891a203c071ab805a4e43729b | af240bb83d2abf62109d8c70f59a85055bd50833 | refs/heads/main | 2023-03-06T03:03:47.770511 | 2021-02-15T15:17:49 | 2021-02-15T15:17:49 | 338,856,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,746 | py | import numpy as np
import numpy.random as npr
class MallowsPermutation(object):
def __init__(self, n, q, seed=None):
'''
Create a random Mallows permutation with parameters 'n' and 'q'.
'''
self.n = n
self.q = q
self.seed = seed
npr.seed(self.seed)
self.params = 'n={};q={}'.format(self.n,self.q)
self.__permutation__()
def __permutation__(self):
'''
Generates the permutation.
It first creates a vector 'G' such that 0 <= G[i] <= n-1-i and G[i] ~ Geometric(q).
This vector is then used to create the permutation by choosing the image of i as the G[i]-th available value.
'''
if self.q == 0:
self.permutation = np.arange(self.n)
elif self.q_is_inf():
self.permutation = np.arange(self.n-1, -1, -1)
else:
U = npr.rand(self.n)
N = np.arange(self.n, 0, -1)
if self.q == 1:
G = np.floor(N*U).astype(int)
else:
G = np.floor(np.log(1 - U*(1 - self.q**N))/np.log(self.q)).astype(int)
available_values = np.arange(self.n)
self.permutation = np.zeros(self.n)
for i in range(self.n):
self.permutation[i] = available_values[G[i]]
available_values = np.delete(available_values, G[i])
self.permutation = np.argsort(self.permutation).astype(int)
def __getitem__(self, key):
'''
Calling 'MallowsPermutation()[i]' returns the image of i through the permutation.
'''
return self.permutation[key]
def __iter__(self):
'''
Iterating through 'MallowsPermutation()' goes through the images of 0, 1, 2, ...
'''
return iter(self.permutation)
def q_is_inf(self):
'''
Checks if 'q' is infinity.
'''
return self.q > max(self.n**2, 1e10)
class MallowsTree(MallowsPermutation):
def __init__(self, n, q, seed=None):
'''
Creates a random Mallows tree from a Mallows permutation.
'''
super().__init__(n, q, seed)
self.__tree__()
def __tree__(self):
'''
This function creates the tree corresponding to the permutation.
It explores the entries of the permutation and insert them one after the other in the tree.
The parameter 'self.int_to_node' is a dictionary mapping the entries to their nodes.
The parameter 'self.node_to_int' is a dictionary mapping the nodes to their entries.
The parameter 'self.E' is a list of edges of the tree.
'''
self.int_to_node = {}
self.node_to_int = {}
self.E = []
for v in self:
node_v = ''
parent = -1
while node_v in self.node_to_int:
parent = self.node_to_int[node_v]
if v > parent:
node_v += '1'
else:
node_v += '0'
self.int_to_node[v] = node_v
self.node_to_int[node_v] = v
if parent >= 0:
self.E.append((parent, v))
def __call__(self, key):
'''
Calling 'MallowsTree()(i)' returns the node of i.
'''
return self.int_to_node[key]
def height(self):
'''
Computes the height of the tree.
'''
return max([len(m) for m in self.int_to_node.values()])
def depth(self, v):
'''
Computes the depth of 'v'.
'''
return len(self.int_to_node[v])
def parent(self, v):
'''
Finds the parent of 'v' in the tree.
'''
node = self.int_to_node[v]
if node:
return self.node_to_int[node[:-1]]
else:
raise Exception('The root has no parent')
def path_from_root(self, v, edge_list=False):
'''
Computes the path from the root to the node 'v'.
Either as a list of nodes, or as a list of edges.
'''
path = [self.node_to_int['']]
node_v = self.int_to_node[v]
for i in range(len(node_v)):
path.append(self.node_to_int[node_v[:i+1]])
if edge_list:
edge_path = []
if len(path) > 1:
u = path[0]
for v in path[1:]:
edge_path.append((u,v))
u = v
return edge_path
else:
return path
@staticmethod
def c_star():
'''
Computes c*, solution to clog(2e/c)=1 with c>=2.
This corresponds to the ratio height of random binary search tree over log(n).
'''
return 4.311070407001 | [
"noreply@github.com"
] | BenoitCorsini.noreply@github.com |
42068b4e7b1f2e463a43462214166bfc9ec4875a | f22ece4fff5555de54c53f46fdbf0892eb04cf1c | /Models/resnet.py | a791cdada6edab32aedeb9859e39b2d9e4de0afe | [] | no_license | tiqq111/Face-Alignment | 7b8ec3456798d71c2749fb90fab672e1682e1717 | 76831af679b344837bb11e4c7bc6be5d71178e4c | refs/heads/master | 2022-03-13T10:05:57.201007 | 2019-11-15T12:46:50 | 2019-11-15T12:46:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,345 | py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet34', 'resnet101']
model_urls = {
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, modelpath='./models', **kwargs):
model = ResNet(Bottleneck, [2, 2, 2, 2], **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir=modelpath))
return model
def resnet34(pretrained=False, modelpath='./models', **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir=modelpath))
return model
def resnet101(pretrained=False, modelpath='./models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir=modelpath))
return model | [
"9982733@qq.com"
] | 9982733@qq.com |
457217febe5da4d0f157e8d403170e9569c532f6 | 4aeea5475bf0d07a7b63b9ef39cc894d5e00e2d7 | /Greedy/베이비진 게임.py | 403862fe74e4731a4a673b18c27ed0fa593ed6ce | [] | no_license | seeker1207/Algorithm-Test | a16295a0693b7df4f98496c152b1329be59cd3ef | 802cda567f36937f563d4acaf24974a27ec11059 | refs/heads/master | 2023-04-17T23:14:58.120939 | 2023-04-10T00:47:24 | 2023-04-10T00:47:24 | 233,829,081 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | def is_run(input_list):
for j in range(0, len(input_list)-2):
if input_list[j] >= 1 and input_list[j+1] >= 1 and input_list[j+2] >= 1:
return True
return False
def is_triplet(input_list):
for k in range(len(input_list)):
if input_list[k] >= 3:
return True
return False
for i in range(int(input())):
num_list = map(int, input().split())
player1 = [0]*10
player2 = [0]*10
winner = ''
for idx, num in enumerate(num_list):
if (idx+1) % 2:
player1[num] += 1
if idx >= 2:
if is_run(player1) or is_triplet(player1):
winner = 'player1'
break
else:
player2[num] += 1
if idx >= 2:
if is_run(player2) or is_triplet(player2):
winner = 'player2'
break
if winner == 'player1':
print(f'#{i+1} 1')
elif winner == 'player2':
print(f'#{i+1} 2')
else:
print(f'#{i+1} 0')
| [
"noreply@github.com"
] | seeker1207.noreply@github.com |
94e88112d08132d9d2ecb7f026a2c346bbb6b7ad | 04ce9939a13dab2dd306d06f2c420e074de87a03 | /SiamDW_LT/test/utils/analyse_results.py | 85dfd46fa80b689d1740953da914300369a55281 | [
"MIT"
] | permissive | cy-sohn/VOT2019 | d0ae7a083dc96eb2a1bd6f9340dbf36b1583cfc7 | eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36 | refs/heads/master | 2022-03-03T07:43:10.778201 | 2019-10-23T06:32:24 | 2019-10-23T06:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,143 | py | import os
import numpy as np
import os.path as osp
video_keys = dict()
video_keys['18LT'] = ['ballet', 'bicycle', 'bike1', 'bird1', 'car1', 'car3', 'car6', 'car8', 'car9', 'car16',
'carchase', 'cat1', 'cat2', 'dragon', 'following', 'freestyle', 'group1', 'group2', 'group3',
'liverRun', 'longboard', 'nissan', 'person2', 'person4', 'person5', 'person7', 'person14',
'person17', 'person19', 'person20', 'rollerman', 'skiing', 'tightrope', 'uav1', 'yamaha']
video_keys['19LT'] = ['ballet','bicycle','bike1','bird1','boat','bull',
'car1','car3','car6','car8','car9','car16','carchase','cat1',
'cat2','deer','dog','dragon','f1','following','freesbiedog','freestyle',
'group1','group2','group3','helicopter','horseride','kitesurfing',
'liverRun','longboard','nissan','parachute','person2','person4',
'person5','person7','person14','person17','person19','person20',
'rollerman','sitcom','skiing','sup','tightrope','uav1','volkswagen',
'warmup','wingsuit','yamaha']
pwd = osp.dirname(__file__)
result_dir = osp.join(pwd, "../tracking_results")
save_dir = osp.join(pwd, "../analyse_results")
def read_recordtxt(path):
data = []
with open(path, "r") as f:
for line in f:
line = line.split(",")
line[-1] = line[-1].split("\n")[0]
line = [float(x) for x in line]
data.append(line)
return data
def split(record_data, thr, gt_thr):
cnt = 0
for idx, item in enumerate(record_data):
if item[0] >= thr and item[1] >= gt_thr: # both pos
cnt += 1
elif item[0] <= thr and item[1] <= gt_thr: # both neg
cnt += 1
return cnt
def split_neg(record_data, thr, gt_thr):
cnt, total = 0, 0
for idx, item in enumerate(record_data):
if item[1] <= gt_thr:
total += 1
if item[0] <= thr:
cnt += 1
return cnt, total
def split_pos(record_data, thr, gt_thr):
cnt, total = 0, 0
for idx, item in enumerate(record_data):
if item[1] >= gt_thr:
total += 1
if item[0] >= thr:
cnt += 1
return cnt, total
def get_pre_rec(pos, neg, pos_gt, neg_gt):
len_pre = len(pos)
len_rec = len(pos_gt)
inter = len(pos & pos_gt)
precision, recall = 0, 0
if len_pre > 0: precision = float(inter) / float(len_pre)
if len_rec > 0: recall = float(inter) / float(len_rec)
return precision, recall
def draw(arrx, arry, path, sub_name):
import matplotlib.pyplot as plt
from scipy.interpolate import spline
threshold = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
T = arry
power = arrx
plt.gca().set_ylabel('precision')
plt.gca().set_xlabel('recall')
plt.scatter(T, power)
for i in range(len(threshold)):
plt.annotate(threshold[i], xy=(T[i], power[i]), xytext=(T[i] + 0.001, power[i] + 0.001))
plt.savefig(os.path.join(path, "{}.png".format(sub_name)))
plt.clf()
def calculate(record_data, gt_thr=0.7):
precision = []
total = len(record_data)
threshold = np.arange(0.05, 1, 0.05).tolist()
for thr in threshold:
cnt = split(record_data, thr, gt_thr)
pre = float(cnt) / float(total)
precision.append(pre)
return precision
def calculate_neg(record_data, gt_thr=0.7):
precision = []
threshold = np.arange(0.05, 1, 0.05).tolist()
for thr in threshold:
cnt, total = split_neg(record_data, thr, gt_thr)
pre_neg = float(cnt) / float(total)
cnt, total = split_pos(record_data, thr, gt_thr)
pre_pos = float(cnt) / float(total)
precision.append([pre_pos, pre_neg])
return precision
def analyse_record(draw_pic=False):
# (box_iou, gt_iou)
tracker = "testiounet"
results = ["res50_rpnbox_000"]
dataset = '18LT'
rpnbox_pos = False
for result in results:
type = result.split("_")[-2]
save_pic_path = "/data/home/v-had/Tmsrasia_MSM/v-had/pre_rec/{}".format(result)
if not os.path.exists(save_pic_path): os.mkdir(save_pic_path)
record_dir = osp.join(result_dir, tracker, result)
save_t_dir = osp.join(save_dir, tracker)
if not osp.exists(save_t_dir): os.mkdir(save_t_dir)
save_t_r_dir = osp.join(save_t_dir, result)
if not osp.exists(save_t_r_dir): os.mkdir(save_t_r_dir)
record_data = []
for video in video_keys[dataset]:
record_path = osp.join(record_dir, "{}_record.txt".format(video))
record_datav = read_recordtxt(record_path)
# pos_pre, pos_rec, neg_pre, neg_rec = calculate(record_datav, save_pic_path)
# print("done")
# while True: continue
# print("For video: {}".format(video))
# threshold = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# for thr, pos_pre, pos_rec, neg_pre, neg_rec in zip(threshold, pos_pre, pos_rec, neg_pre, neg_rec):
# print(" For thr: {}".format(thr))
# print(" pos_pre: {:.4f}, pos_rec: {:.4f}".format(pos_pre, pos_rec))
# print(" neg_pre: {:.4f}, neg_rec: {:.4f}".format(neg_pre, neg_rec))
record_data += record_datav
print("For result: {}".format(result))
threshold = np.arange(0.05, 1, 0.05).tolist()
if type == 'fixed01':
precision = calculate(record_data, gt_thr=0.7) # []
elif type == 'fixedgap':
precision = calculate(record_data, gt_thr=0.5)
elif type == 'rpnbox':
gt = [0.1, 0.3, 0.5]
if rpnbox_pos:
precisions = [calculate(record_data, gt_thr=x) for x in gt]
else:
precisions = [calculate_neg(record_data, gt_thr=x) for x in gt]
else:
raise ValueError('Unsupported Type.')
if type == 'rpnbox':
for precision, gtt in zip(precisions, gt):
precision = np.array(precision)
print("*--gt_thr: {}".format(gtt))
sum = 0
for i in range(len(threshold)):
sum += precision[i]
print("*----threshold: {}, precision: {}".format(threshold[i], precision[i]))
top5_mean = (precision[-1] + precision[-2] + precision[-3] + precision[-4] + precision[-5]
+ precision[-6] + precision[-7] + precision[-8] + precision[-9] + precision[-10]) / 10.
print("*----precision mean: {}".format(sum / len(threshold)))
print("*---->=0.5 precision mean: {}".format(top5_mean))
else:
sum = 0
for i in range(len(threshold)):
sum += precision[i]
print("*threshold: {}, precision: {}".format(threshold[i], precision[i]))
top5_mean = (precision[-1] + precision[-2] + precision[-3] + precision[-4] + precision[-5]) / 5.
print("*precision mean: {}".format(sum / 9.))
print("*>=0.5 precision mean: {}".format(top5_mean))
if __name__ == '__main__':
analyse_record()
| [
"henry.hw.peng@gmail.com"
] | henry.hw.peng@gmail.com |
352263ea15b834a57908183b227037d2162a2b54 | 773e5f7a307b322e82a37fb4f9199e93532798e2 | /parrotsec/wsgi.py | e798fc70c1d8fb099a53688a1a9cf06dd0f582d5 | [] | no_license | mark-ndonye/parrotsec-clone | 157ae697fd02b981c34f585a3cea7c4a98d03974 | 0c45d2fe3268852d97eb305be77f822804cdf4a3 | refs/heads/master | 2022-12-08T11:25:52.294237 | 2020-08-25T16:52:25 | 2020-08-25T16:52:25 | 290,271,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for parrotsec project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'parrotsec.settings')
application = get_wsgi_application()
| [
"rainey@localhost.localdomain"
] | rainey@localhost.localdomain |
660fb803c45459f81ec12dded0ca2bcdc1611bde | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_016/ch31_2020_03_30_19_43_47_008102.py | d7542220ac3217770604bf2f6d5102dfb4b58bc3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | def eh_primo(x):
r = x%2
return r
exercicio = True
while exercicio:
if r == 0:
print('True')
else:
exercicio = False
| [
"you@example.com"
] | you@example.com |
aece84afd74ef9555f501e9d59b03d5c1195a33a | b961c648475fd3803181ee2cd9ec53fef374708a | /baoand模块/p02.py | 9e9b9844170df19b36080f27a85a7d03a7d70d07 | [] | no_license | weininianqing/rr | 8c59eaabf8234a864cfe6d6213f7d57a8725fc67 | c804139d7fac19d31d41b87c1238c73cab291ea6 | refs/heads/master | 2022-03-08T08:53:32.922339 | 2019-11-14T09:21:17 | 2019-11-14T09:21:17 | 197,883,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | import p01
stu = p01.student("小佳佳",20)
stu.sayhello()
p01.say() | [
"740056981@qq.com"
] | 740056981@qq.com |
aa05fee6767f8d2b64701b1265fa4f2b83d792da | 927c86941a56b77ee3ba3ef0483aa3da209a8d61 | /case/run_All_Case.py | 56c30fb46ade4e12a4499bae2b80c00e1d3e02e1 | [] | no_license | GaoQD/BasicAutomationFramework | bde8cb5bc4b5ae69c2addbe154494058b02ed746 | e35aa18faa0f2fadc8739bbbc0f383cab17523dd | refs/heads/master | 2020-03-24T22:35:26.887411 | 2018-08-28T02:50:36 | 2018-08-28T02:50:36 | 143,092,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | '''
-*- coding: utf-8 -*-
@Author : Admin
@Time : 18-7-25 下午3:57
@Software: PyCharm Community Edition
@File : run_All_Case.py
'''
import unittest
import time
import HTMLTestRunner
import common.common
if __name__ == '__main__':
now_time = time.strftime("%Y%m%M%H%M%S", time.localtime(time.time()))
suite = unittest.TestSuite()
all_cases = unittest.defaultTestLoader.discover('.','test_*.py')
for case in all_cases:
suite.addTest(case)
fp = open('..//report//' + now_time + '.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='Test MeiTu',
description='Test Case Run Result')
runner.run(suite)
common.sendEmail.send_email(now_time) | [
"lixinghui@example.com"
] | lixinghui@example.com |
28239ffc539f84c6b04063f2b2eb51b1ae3a3bf7 | 69012a2437d4aa4ee5655376c068c2f8288ddebc | /event_management/models/eventtype.py | 1c49d09866be6f48f7c5cf67955116f1194e4e83 | [] | no_license | sreelakshmipm21/MyProjectOdoo | 957860da4c7b22d39858fbf185383d341458403b | fa0b4b6ad608a9e82a97222ed48cfaeb6748ccc2 | refs/heads/master | 2023-08-08T03:46:05.876095 | 2021-09-22T11:21:41 | 2021-09-22T11:21:41 | 409,162,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from odoo import fields, models
class EventTypes(models.Model):
_name = "event.type"
_description = "Event Types"
name = fields.Char(String='Name')
code = fields.Char(String='Code')
image = fields.Binary(String='Image')
| [
"sreelakshmipm2104@gmail.com"
] | sreelakshmipm2104@gmail.com |
4668a42fea619ea81fa4b864ff73b0174570782d | 32c99d9077bf5b553baa50554d41f5cf22441e0d | /MISPELL/MISPELL.py | 0ae4d10f13ab68b4ec8cd87016229ba7b48dc506 | [] | no_license | free-lunch/algospot-solving | ed9af366f6383ebc2715aec44c496fd4765446f0 | 156cdc1cbb3607552904a55bba1771c9fb0f6ec9 | refs/heads/master | 2020-12-11T09:05:11.182329 | 2016-08-30T09:12:10 | 2016-08-30T09:12:10 | 59,334,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | import sys
if __name__ == "__main__":
rl = lambda : sys.stdin.readline()
d = []
for _ in xrange(int(rl())):
input = rl().split()
n, str = int(input[0]), input[1]
d.append(str[:n-1]+str[n:])
for i, v in enumerate(d):
print i+1, v
| [
"jjh783@gmail.com"
] | jjh783@gmail.com |
c978f3e1dd8e4c792e995c5c3dd5c03e9e1d01f8 | bdda26e6380fe9b4de71638426be316afbf20309 | /main.py | 5cc9ac5b007db75deb6e77509e4862f310a33444 | [] | no_license | nellbag/cyoa | f9546ba24038476a823386904f087d3b02204b33 | c1b1713b68cd2fde88e35141c716b31bac9886d9 | refs/heads/master | 2022-06-14T16:39:28.596947 | 2020-05-01T10:06:56 | 2020-05-01T10:06:56 | 259,860,626 | 1 | 0 | null | 2020-05-01T10:01:48 | 2020-04-29T07:53:09 | null | UTF-8 | Python | false | false | 9,901 | py |
##
## Author: Neil MacPhail ## Started: 28/04/2020
##
## Description:
## Choose your own adventure game, welll at least
## an attempt at it as I am a NOOB.
##
## See references.txt for ideas/resources used
##
import time
import sys
import random
global hp
locations = {
'jungle': {
'name': 'doon the Jungle',
'north': 'main_st',
'south': 'glasgow_rd',
'items': ['iphone', 'wallet'],
'npc_name': '',
'npc_text': '',
'desc': "Everything seems creepily quiet.\nThe trees don't seem to be moving at all.\nThis is spooky as fuck.",
'text': 'You see a path to the north leading to the\nMain St and a path to the south leading to\nAuld Glasgow Rd.'},
'main_st': {
'name': 'up the Main St',
'east': 'top_glass',
'south': 'jungle',
'items': [],
'npc_name': 'that burd',
'npc_text': "In the distance you see a wumin shuffling towards you...\nShit! It's that burd!",
'desc': "All the street lights are out, probably just a power cut\nor the young team have smashed them all. Wee bams.",
'text': 'To the east you can head towards Top Glass.\nTo the south you can go back doon the Jungle.'},
'glasgow_rd': {
'name': 'auld Glasgow Rd',
'east': 'spur_tunnel',
'west': 'hoose',
'north': 'jungle',
'items': [],
'npc_name': "",
'npc_text': "",
'desc': "",
'text': ""}
}
enemies = {'that burd':
{'description': 'That psycho you winched up paps.',
'contents': None,
'hp': 18,
'attack': [3, 7, 4, 5, 1, 9, 3]}
}
#random.choice(items['that burd']['attack'])
#set player details and stats to begin game
player = ''
bird = ''
hp = 100
inventory = []
current_location = locations['jungle']
directions = ['north', 'south', 'east', 'west']
keep_going = True
loc_count = -1
def delay_print_slow(string):
for char in string:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.15) #0.15
def delay_print(string):
for char in string:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.10) #0.10
def delay_print_fast(string):
for char in string:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.0005) #0.0005
def whitey():
spew = random.randrange(100)
if loc_count > 4 and spew >= 85:
global hp
hp = hp -3
return 'You begin to get hot and sweaty.\nYou start to feel dizzy.\nSuddenly you spew. It is an absolute honkin mess.\nHP now: ' + str(hp) + '\n'
else:
return ''
#intro and name getting
delay_print_fast("|----------------------------------------------------------------|\n")
delay_print_fast("|8888888888888888888888888888888888888888888888888888888888888888|\n")
delay_print_fast("|88|--------------------------------------------------------- |88|\n")
delay_print_fast("|88| .___________. __ __ _______ |88|\n")
delay_print_fast("|88| | || | | | | ____| |88|\n")
delay_print_fast("|88| `---| |----`| |__| | | |__ |88|\n")
delay_print_fast("|88| | | | __ | | __| |88|\n")
delay_print_fast("|88| | | | | | | | |____ |88|\n")
delay_print_fast("|88| |__| |__| |__| |_______| |88|\n")
delay_print_fast("|88| |88|\n")
delay_print_fast("|88| ____ ____ ____ ____ .___________. |88|\n")
delay_print_fast("|88| \ \ / / \ \ / / | | |88|\n")
delay_print_fast("|88| \ \/ / \ \/ / `---| |----` |88|\n")
delay_print_fast("|88| \ / \_ _/ | | |88|\n")
delay_print_fast("|88| \ / | | | | |88|\n")
delay_print_fast("|88| \__/ |__| |__| |88|\n")
delay_print_fast("|88| |88|\n")
delay_print_fast("|88| _______. ___ _______ ___ |88|\n")
delay_print_fast("|88| / | / \ / _____| / \ |88|\n")
delay_print_fast("|88| | (----` / ^ \ | | __ / ^ \ |88|\n")
delay_print_fast("|88| \ \ / /_\ \ | | |_ | / /_\ \ |88|\n")
delay_print_fast("|88| .----) | / _____ \ | |__| | / _____ \ |88|\n")
delay_print_fast("|88| |_______/ /__/ \__\ \______| /__/ \__\ |88|\n")
delay_print_fast("|88| |88|\n")
delay_print_fast("|88| |88|\n")
delay_print_fast("|88|--------------------------------------------------------- |88|\n")
delay_print_fast("|8888888888888888888888888888888888888888888888888888888888888888|\n")
delay_print_fast("|----------------------------------------------------------------|\n")
delay_print_fast("By repl.it/@nellbag\n")
print('')
print('')
delay_print_slow("It’s cold as fuck....\n")
print('')
time.sleep(2) #2
delay_print_slow("It’s dark....\n")
print('')
time.sleep(2) #2
delay_print("You appear to be lying on the ground.\n")
delay_print("WTF, what even happened last night?!\n")
print('')
time.sleep(2) #2
delay_print("The last thing you remember is tanning a bottle\nof wine and 4 cans a Dragon Soop......\n")
print('')
time.sleep(3) #3
print('Can you even remember your own name?')
#player = 'Testy McDebug'
player = input('Name: ').strip()
player = player.lower()
print('')
print("Suddenly you get a flashback from last night, you\nremember winching that psycho burd fae the scheme up Paps :/")
time.sleep(3) #3
print("Fuck sake " + str(player) + ", she's fuckin' mental.")
print('')
print("Anyway.... you slowly open your eyes and come to.")
time.sleep(2) #2
print("Your heid is absolutely banging and you feel a whitey coming on.")
time.sleep(4) #4
print('')
print("Fuck this shit, you need to find the troops\nand work out what happened last night.")
time.sleep(4) #4
delay_print_slow(".......\n")
delay_print_slow(".......\n")
delay_print_slow(".......\n")
delay_print_slow(".......\n")
print('')
while keep_going == True:
print(' || ') #change these
print(' || ')
print(' || ')
print(' \ / ')
print(' \/ ')
print('')
###
### Movement
###
loc_count += 1
print('You are {}.'.format(current_location['name']))
print(current_location['desc'])
if current_location['npc_text']:
print(current_location['npc_text'])
print('')
print(current_location['text'])
print('')
print(whitey())
if current_location['items']:
print('Through your fuzzy hungover eyes you\nsee these items: {}'.format(', '.join(current_location['items'])))
command = input("What do you do?\n").strip()
command = command.lower().split()[1]
# movement
if command in directions:
if command in current_location:
current_location = locations[current_location[command]]
else:
# bad movement
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxx')
print(" Cannae go that way mate.")
print(' xxxxxxxxxxxxxxxxxxxxxxxx')
print('')
# quit game
elif command.lower() in ('q', 'quit'):
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxx')
print(" xxx SHAT IT, GOODBYE xxx")
print(' xxxxxxxxxxxxxxxxxxxxxxxx')
print('')
break
# gather objects
elif command.lower().split()[0] == 'get':
item = command.lower().split()[1]
if item in current_location['items']:
current_location['items'].remove(item)
inventory.append(item)
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print(" xxx You've lifted '" + str(item) + "' xxx")
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print('')
else:
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print(" xxxx That's no here dafty xxxx")
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print('')
# get rid of objects
elif command.lower().split()[0] == 'drop':
item = command.lower().split()[1]
if item in inventory:
current_location['items'].append(item)
inventory.remove(item)
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print(" xxx You've drapped " + str(item) + " xxx")
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print('')
#print("You've drapped " + str(item) + ".")
else:
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print(" xx You've no goat that hing xx")
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print('')
# check HP
elif command.lower().split()[1] == 'hp':
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxx')
print(" xxx Current HP: " + str(hp) + " xxxx")
print(' xxxxxxxxxxxxxxxxxxxxxxxx')
print('')
# check Inventory
elif command.lower().split()[1] == 'inventory':
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print(' Items currently in inventory:')
print(' \n '.join(inventory))
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print('')
# bad command
else:
print('')
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print(" xx Nae idea what you're oan about mate xx")
print(' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print('')
# keep_going = False
| [
"nellbag@hotmail.co.uk"
] | nellbag@hotmail.co.uk |
bdba43f40b7128a2e5d9f1816e2e80cafacdf000 | 49513f4a7e7ba91835c0e55863d00c83c9afff5e | /07.LSTM/test/email-file.py | 173844f536fd77a55823e9be685f2b8acc6e3733 | [] | no_license | tfedohk/dohyung | 4aff34a9c2b9d74a5b20ee65d3408999e2a66b53 | 14a396384008e5434bb2f98e97a13106937a4bdc | refs/heads/master | 2022-07-11T06:24:06.705736 | 2020-10-08T00:46:07 | 2020-10-08T00:46:07 | 127,891,694 | 2 | 0 | null | 2022-06-24T02:27:57 | 2018-04-03T10:31:11 | Jupyter Notebook | UTF-8 | Python | false | false | 1,000 | py | import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
smtp = smtplib.SMTP_SSL('smtp.naver.com', 465)
smtp.ehlo() # say Hello
# smtp.starttls() # TLS 사용시 필요
smtp.login('@.com', '@')
# msg = MIMEText('본문 테스트 메시지')
msg = MIMEMultipart()
part = MIMEText('본문 테스트 메시지')
msg.attach(part)
path = '../evaluate_result/for_email_transfer/BCH_10_25_1_0.1_param.pickle'
msg['Subject'] = '테스트'
msg['To'] = 'ulujo_dohk@naver.com'
with open(path, 'rb') as f:
part = MIMEBase("application", "octet-stream")
part.set_payload(f.read()) #payload: osi 7-layers
encoders.encode_base64(part) #base64 encoding: 영상, 이미지 파일을 문자열 형태로 변환
part.add_header('Content-Disposition', 'attachment', filename=path)
msg.attach(part)
smtp.sendmail('dhgdohk@naver.com', 'ulujo_dohk@naver.com', msg.as_string())
smtp.quit() | [
"gwondohyung@gmail.com"
] | gwondohyung@gmail.com |
a5fbd0f35dfb82d0b0e1fc5a1581cfd39752caec | 004207c38a539f94ef7910ef9011336040d6276b | /User.py | e65481f1c47b089b30e3c6ee6715bc77f9ea459f | [
"MIT"
] | permissive | davospots/PassLocker | cf0bfb5d32cf3826d263ed37336735f4e2c0ffb2 | 2b9bd57ad92b899a0c26c6b4b903c7b9eaf0c2f9 | refs/heads/main | 2023-08-20T22:28:43.035490 | 2021-10-25T07:33:01 | 2021-10-25T07:33:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,759 | py | import random
import string
import pyperclip
class User:
user_list = []
def __init__(self, user_name, password):
self.user_name = user_name
self.password = password
def save_user(self):
User.user_list.append(self)
@classmethod
def display_user(cls):
return cls.user_list
class Credential:
credential_list = []
@classmethod
def verify_user(cls, user_name, password):
current_user = ''
for user in User.user_list:
if user.user_name == user_name and user.password == password:
current_user == user.user_name
return current_user
def __init__(self, account, user_name, password):
self.account = account
self.user_name = user_name
self.password = password
def save_account(self):
Credential.credential_list.append(self)
def del_account(self):
Credential.credential_list.remove(self)
@classmethod
def find_account(cls, account):
for credential in cls.credential_list:
if credential.account == account:
return credential
@classmethod
def display_account(cls):
return cls.credential_list
@classmethod
def copy_account(cls, account):
found_acc = Credential.find_account(account)
pyperclip.copy(found_acc.password)
@classmethod
def find_by_acc(cls, account):
for credential in cls.credential_list:
if credential.account == account:
return credential
def generate_password(size=8, char=string.ascii_uppercase + string.ascii_lowercase + string.digits):
gen_pass = ''.join(random.choice(char) for _ in range(size))
return gen_pass
| [
"david.mathaga@student.moringaschool.com"
] | david.mathaga@student.moringaschool.com |
ba2c241b5f20b60721a0c95ffcebcc0ef2f0261c | 05a249bd9d45f691df5599816b0929770fb47bf7 | /scripts/methods/03-est_pars-SERGIO.py | 289df4c8a5c82ecefefe85492a0c5f0e503ad3bc | [] | no_license | jligm-hash/simulation-comparison | 457dbdfae7c09e7e4aef74af3639858b4f6566fc | 0724439875168fb497bf9ada0742a6082a77b5ac | refs/heads/master | 2023-04-16T08:04:59.746920 | 2021-04-30T08:21:22 | 2021-04-30T08:21:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | from SERGIO.SERGIO import sergio
# from SERGIO import sergio
import numpy as np
import pandas as pd
# import sys
# print(sys.path)
# DEMO
def simulate():
sim = sergio.sergio(number_genes=100,
number_bins=9,
number_sc=300,
noise_params=1,
decays=0.8,
sampling_state=15,
noise_type='dpd')
sim.build_graph(input_file_taregts="/Users/sarahmorillo/anaconda3/envs/sim_comp/lib/python3.8/site-packages/SERGIO/Demo/steady-state_input_GRN.txt",
input_file_regs='/Users/sarahmorillo/anaconda3/envs/sim_comp/lib/python3.8/site-packages/SERGIO/Demo/steady-state_input_MRs.txt',
shared_coop_state=2)
sim.simulate()
expr = sim.getExpressions()
expr_clean_ss = np.concatenate(expr, axis=1)
return expr_clean_ss
| [
"helena.crowell@uzh.ch"
] | helena.crowell@uzh.ch |
7146769607b95efff637a6b789c6ee70848820c2 | 536bec877f564703cf5ab1aaba87c2a4c272b8a7 | /older_ver/ver6/config.py | 5cc41c43e2bfb109a24f1b0807ecacea8dc7f6c7 | [] | no_license | sys3948/flask_blog | 2ade431d8c845050fdfb67c61b14c2d880741c12 | 31f4d2b1f3ccaa769c934dd72b083c59b9f618ec | refs/heads/master | 2022-04-29T12:31:59.422800 | 2022-04-05T01:39:48 | 2022-04-05T01:39:48 | 192,293,756 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | # config.py는 application의 설정값이 지정되어있는 파일이며
# 개발, 테스트, 배포 작업에 필요한 설정값이 있는 파일이다.
class Config:
# 개발, 테스트, 배포 작업에 공통되는 설정값이 지정되어있는 class이다.
SECRET_KEY = 'secret key in flasky example'
EXTENTION_FILES = set({'png', 'jpg', 'jpeg', 'gif'})
UPLOAD_FOLDERS = 'app/static/icon'
UPLOAD_POST_PATH_DEFAULT = 'app/templates/'
UPLOAD_POST_PATH = 'postFiles/'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
# 개발 작업에 필요한 설정값이 지정되어있는 class이다.
# 추후 추가가될 것이다.
pass
class TestingConfig(Config):
# 테스트 작업에 필요한 설정값이 지정되어있는 class이다.
# 추후 추가가될 것이다.
pass
class ProductionConfig(Config):
# 배포 작업에 필요한 설정값이 지정되어있는 class이다.
# 추후 추가가될 것이다.
pass
# app 실행시 어느 환경으로 실행할 것인가를 선택하기 위한 딕셔너리.
# 이 딕셔너리를 통해 위의 설정 class를 선택한다.
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'product': ProductionConfig,
'default': DevelopmentConfig
} | [
"sys394880@gmail.com"
] | sys394880@gmail.com |
79661751089250ffe479fa80d5e2d9b644cfab1d | 59fb085f0d8fb60b9b631f36b7e863e90fe73945 | /image_cropper_align.py | 0e0bcf8d65308dfe8cfdb5fce3c4b5c3d774f2f9 | [] | no_license | grvm20/deepbirdsnap | cfa98ece5170fe39c1fbf570d3e356a5cb340167 | 3abf1e552252c19dcc8dc53c194286626ce6fefb | refs/heads/master | 2021-03-24T12:00:19.117254 | 2017-10-01T08:22:28 | 2017-10-01T08:22:28 | 87,668,889 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,492 | py |
# coding: utf-8
# In[16]:
from concurrent.futures import ThreadPoolExecutor
import cv2
from PIL import Image
import numpy as np
from utils import img_parts_generator
import pickle
import scipy
import os
import math
from progress.bar import Bar
def rotate_image(img, body_point, head_point, target_angle, flipped):
head_x = head_point[0]
head_y = head_point[1]
body_x = body_point[0]
body_y = body_point[1]
slope = float(-head_y + body_y)/ (head_x - body_x)
image_angle = math.degrees(math.atan(slope))
if(flipped):
image_angle = np.abs(image_angle)
if image_angle < 0 and head_y < body_y:
img = np.fliplr(img)
flipped = True
image_angle = np.abs(image_angle)
image_center = tuple(reversed((np.array(img.shape)/2)[:-1]))
rot_mat = cv2.getRotationMatrix2D(image_center,-image_angle+target_angle, 1.0)
img = cv2.warpAffine(img, rot_mat, (img.shape[1], img.shape[0]),flags=cv2.INTER_LINEAR)
return img, image_angle, flipped
def flip_image_if_needed(img, parts):
beak_x = parts[6]
left_eye_x = parts[18]
if beak_x != -1 and left_eye_x != -1 and beak_x < left_eye_x:
return np.fliplr(img), True
else:
return img, False
def get_save_path(path):
save_img_dir = "/".join([save_path] + path[3:].split('/')[:2])
if not os.path.exists(save_img_dir):
os.makedirs(save_img_dir)
save_img_path = save_path + path[2:]
return save_img_path
def crop(img, t_l_x, t_l_y, b_r_x, b_r_y):
return img[t_l_y:b_r_y, t_l_x:b_r_x]
def resize(img, target_dim):
return scipy.misc.imresize(img, target_dim)
# In[25]:
def get_bird_body_head_location(parts):
body_x = []
body_y = []
head_x = []
head_y = []
for i in range(4, len(parts)):
if parts[i] != -1:
if i%2 == 0:
if(i/2 in body_index):
body_x.append(parts[i])
elif(i/2 in head_index):
head_x.append(parts[i])
else:
print("Location neither bodyx nor headx: ", i)
else:
if(i//2 in body_index):
body_y.append(parts[i])
elif(i//2 in head_index):
head_y.append(parts[i])
else:
print("Location neither bodyy nor heady: ", i)
return (np.mean(body_x),np.mean(body_y)) , (np.mean(head_x),np.mean(head_y))
# In[26]:
def rotate_bb_point(p_x, p_y, angle, y, x):
center = x/2, y/2
top_left_distance = math.hypot(center[0] - p_x, center[1] - p_y)
ydiff = float(-p_y+center[1])
xdiff = float(p_x-center[0])
top_left_slope = ydiff/ xdiff
top_left_angle = math.degrees(math.atan(top_left_slope))
if ydiff>=0 and xdiff >= 0:
top_left_angle = top_left_angle
elif ydiff >=0:
top_left_angle = 180 + top_left_angle
elif xdiff >=0:
top_left_angle = 360 + top_left_angle
else:
top_left_angle = 180 + top_left_angle
top_left_angle = top_left_angle - angle
p_x = center[0] + top_left_distance * math.cos(math.radians(top_left_angle))
p_y = center[1] - top_left_distance * math.sin(math.radians(top_left_angle))
return (p_x, p_y)
def align_crop(imgs, paths, parts):
for i in range(len(imgs)):
img = imgs[i]
path = paths[i]
t_l_x = parts[i][0]
t_l_y = parts[i][1]
b_r_x = parts[i][2]
b_r_y = parts[i][3]
body_point, head_point = get_bird_body_head_location(parts[i])
img, flipped = flip_image_if_needed(img, parts[i])
img, image_angle, flipped = rotate_image(img, body_point, head_point, target_angle, flipped)
if flipped:
y, x, rgb = img.shape
temp = t_l_x
t_l_x = x - b_r_x
b_r_x = x - temp
t_l = rotate_bb_point(t_l_x, t_l_y, -target_angle + image_angle, img.shape[0], img.shape[1])
b_l = rotate_bb_point(t_l_x, b_r_y, -target_angle + image_angle, img.shape[0], img.shape[1])
t_r = rotate_bb_point(b_r_x, t_l_y, -target_angle + image_angle, img.shape[0], img.shape[1])
b_r = rotate_bb_point(b_r_x, b_r_y, -target_angle + image_angle, img.shape[0], img.shape[1])
t_l_x = int(max(min(t_l[0], b_l[0]), 0))
t_l_y = int(max(min(t_l[1], t_r[1]), 0))
b_r_x = int(min(max(t_r[0], b_r[0]), img.shape[1]))
b_r_y = int(min(max(b_l[1], b_r[1]), img.shape[0]))
img = crop(img, t_l_x, t_l_y, b_r_x, b_r_y)
resized_cropped_img = resize(img, (299, 299))
save_img_path = get_save_path(path)
image = Image.fromarray(resized_cropped_img)
image.save(save_img_path)
bar.next()
part_file_name = 'parts_info.txt'
data_dir = 'validation/'
batch_size = 1
steps=4
target_dim=None
cache=False
save_path = '../cropped_aligned'
target_angle = 50
generator = img_parts_generator(part_file_name, data_dir, batch_size=20, load_image=True, target_dim=target_dim, cache=False, load_paths=True, load_parts=True, bb_only=False)
body_index = set([2, 4, 5, 10, 11, 15, 16, 17])
head_index = set([3, 6, 7, 8, 9, 12, 13, 14, 18])
bar = Bar('Cropping aligned image', max=3000)
with ThreadPoolExecutor(max_workers=100) as executor:
for imgs, paths, parts in generator:
executor.submit(align_crop, imgs, paths, parts)
bar.finish()
| [
"kartikeya1994@gmail.com"
] | kartikeya1994@gmail.com |
cc14b6eaaffadd74381b2c758e2324c947a2db53 | 3a6cbe6940b657ac6b608ce93d8d41ffeb6b9e65 | /rocon_python_comms/src/rocon_python_comms/subscriber_proxy.py | 307b3a19e2da6b0f0d8015cf88ad17466399e082 | [] | no_license | robotics-in-concert/rocon_tools | cdfc4ccfc04b79262fb151640966a33bd0b5f498 | 1f182537b26e8622eefaf6737d3b3d18b1741ca6 | refs/heads/devel | 2021-01-17T01:58:12.163878 | 2018-02-06T15:20:29 | 2018-02-06T15:20:29 | 15,774,638 | 7 | 22 | null | 2017-08-16T06:39:47 | 2014-01-09T18:02:42 | Python | UTF-8 | Python | false | false | 4,285 | py | #
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_tools/license/LICENSE
#
##############################################################################
# Description
##############################################################################
"""
.. module:: subscriber_proxy
:platform: Unix
:synopsis: Request-response style communication with a latched publisher.
This module provides a means of interacting with a ros latched publisher
in the same style as you would a ros service (request-response).
----
"""
##############################################################################
# Imports
##############################################################################
import time
import rospy
import threading
##############################################################################
# Subscriber Proxy
##############################################################################
class SubscriberProxy():
'''
Works like a service proxy, but using a latched subscriber instead (regular
subscribers will also work, but this is especially useful for latched
subscribers since they typically always provide data).
If no timeout is specified when calling, it blocks indefinitely on a
100ms loop until a message arrives. Alternatively it will return with None
if a specified timeout is reached.
**Usage:**
.. code-block:: python
from rocon_python_comms import SubscriberProxy
try:
gateway_info = SubscriberProxy('gateway_info', gateway_msgs.GatewayInfo)(rospy.Duration(0.5))
if gateway_info is not None:
# do something
except rospy.exceptions.ROSInterruptException: # make sure to handle a Ros shutdown
# react something
:todo: upgrade to make use of python events instead of manual loops
'''
def __init__(self, topic, msg_type):
'''
:param str topic: the topic name to subscriber to
:param str msg_type: any ros message type (e.g. std_msgs/String)
'''
self._data = None
self._lock = threading.Lock()
self._subscriber = rospy.Subscriber(topic, msg_type, self._callback)
def __call__(self, timeout=None):
'''
Returns immediately with the latest data or blocks to a timeout/indefinitely
until the next data arrives.
:param rospy.Duration timeout: time to wait for data, polling at 10Hz (None = /infty)
:returns: msg type data or None
:rtype: same as the msg type specified in the arg or None
:returns: latest data or None
'''
if timeout is not None:
# everything in floating point calculations
timeout_time = time.time() + timeout.to_sec()
with self._lock:
data = self._data
while not rospy.is_shutdown() and data is None:
rospy.rostime.wallsleep(0.1)
if timeout is not None:
if time.time() > timeout_time:
return None
# check to see if there is new data
with self._lock:
data = self._data
return data
def wait_for_next(self, timeout=None):
'''
Makes sure any current data is cleared and waits for new data.
:param rospy.Duration timeout: time to wait for data, polling at 10Hz.
:returns: latest data or None
'''
self._data = None
return self.__call__(timeout)
def wait_for_publishers(self):
'''
Blocks until publishers are seen.
:raises: rospy.ROSInterruptException if we are in shutdown.
'''
r = rospy.Rate(10)
while not rospy.is_shutdown():
if self._subscriber.get_num_connections() != 0:
return
else:
r.sleep()
# we are shutting down
raise rospy.exceptions.ROSInterruptException
def _callback(self, data):
with self._lock:
self._data = data
def unregister(self):
'''
Unregister the subscriber so future instantiations of this class can pull a
fresh subscriber (important if the data is latched).
'''
self._subscriber.unregister()
| [
"d.stonier@gmail.com"
] | d.stonier@gmail.com |
e8055cef7954132336a38efaaff3f88a5092ec0d | 66d184a2b36ab1db564305ea36be891aaf0e236b | /py/function_local.py | 32bd23b710843314480e40414153008ee5c24f6f | [] | no_license | joyDDT/python_code | bef57936a1167fa65e28b6c52ab7857b34dc74a8 | 3aae56c51660579a4eaaa087ac2459c9bf2f2e23 | refs/heads/master | 2021-10-30T10:22:21.328633 | 2019-04-26T04:45:01 | 2019-04-26T04:45:01 | 112,004,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | x = 50
def func(x):
print('x is', x)
x = 2
print('Changed local x to',x)
func(x)
print('x is still ',x)
| [
"15894500833@163.com"
] | 15894500833@163.com |
aaf162b4058461280df61455f113c5dd8d8184a2 | b6bd2ef337ae5f2fe8d65978af32fda8f6089c15 | /mall/apps/verifications/urls.py | ce25c1a210b573362e04447da0694f5095a72e62 | [] | no_license | GODsdgg/- | f6acb16abc94a6157dca939c7fc573ecb5a2606d | db9b745e247b9e54b7bdc8e3f3ba5b20d05c731a | refs/heads/master | 2020-04-06T13:42:24.668186 | 2018-11-14T07:45:44 | 2018-11-14T07:45:44 | 157,511,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | from django.conf.urls import url
from . import views
urlpatterns = [
#verifications/imagecodes/(?P<image_code_id>.+)/
url(r'^imagecodes/(?P<image_code_id>.+)/$',views.RegisterImageCodeView.as_view(),name='imagecode'),
url(r'^smscodes/(?P<mobile>1[345789]\d{9})/$',views.RegisterSmscodeView.as_view()),
] | [
"m15130638277@163.com"
] | m15130638277@163.com |
816f7c9573750b418355b515fae6a322cac506a0 | d842a95213e48e30139b9a8227fb7e757f834784 | /gcloud/google-cloud-sdk/lib/surface/spanner/databases/create.py | 85d2e132651f4d9072e81b0f73b363d8ae6a24bf | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/JobSniperRails | f37a15edb89f54916cc272884b36dcd83cdc868a | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | refs/heads/master | 2022-11-22T18:12:37.972441 | 2019-09-20T22:43:14 | 2019-09-20T22:43:14 | 282,293,504 | 0 | 0 | MIT | 2020-07-24T18:47:35 | 2020-07-24T18:47:34 | null | UTF-8 | Python | false | false | 2,348 | py | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner databases create."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.spanner import database_operations
from googlecloudsdk.api_lib.spanner import databases
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.spanner import flags
from googlecloudsdk.command_lib.spanner import resource_args
class Create(base.CreateCommand):
"""Create a Cloud Spanner database."""
@staticmethod
def Args(parser):
"""See base class."""
resource_args.AddDatabaseResourceArg(parser, 'to create')
flags.Ddl(help_text='Semi-colon separated DDL (data definition language) '
'statements to run inside the '
'newly created database. If there is an error in any statement, '
'the database is not created. Full DDL specification is at '
'https://cloud.google.com/spanner/docs/data-definition-language'
).AddToParser(parser)
base.ASYNC_FLAG.AddToParser(parser)
parser.display_info.AddCacheUpdater(flags.DatabaseCompleter)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
database_ref = args.CONCEPTS.database.Parse()
instance_ref = database_ref.Parent()
op = databases.Create(instance_ref, args.database,
flags.SplitDdlIntoStatements(args.ddl or []))
if args.async:
return op
return database_operations.Await(op, 'Creating database')
| [
"luizfper@gmail.com"
] | luizfper@gmail.com |
fadeb446465b4999b64736ca9098b091c2f71a47 | 753c8b96d240f6470a29dcfe10ab7cfe0172bea6 | /access_control/models/domain_update.py | 95e7c7dc448984c5fcdb2744607d75f29e42d3dc | [
"BSD-3-Clause"
] | permissive | girleffect/core-authentication-service | a939eaa28ccec72f91b32782053cf7b43001fd61 | 1e020370e4fe3f1b915da9604389727979746c9b | refs/heads/develop | 2022-11-26T06:57:55.971369 | 2019-08-26T12:33:15 | 2019-08-26T12:33:15 | 112,724,628 | 1 | 1 | BSD-3-Clause | 2022-11-22T02:18:13 | 2017-12-01T10:01:56 | Python | UTF-8 | Python | false | false | 4,788 | py | # coding: utf-8
"""
Access Control API
# The Access Control API ## Overview The Access Control API is an API exposed to other core components. It uses an API Key in an HTTP header to perform authentication and authorisation. Most of the API calls facilitates CRUD of the entities defined in the Access Control component. Others calls allows the retrieval of information in a form that is convenient for other components (most notably the Management Layer) to consume. # noqa: E501
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DomainUpdate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'parent_id': 'int',
'name': 'str',
'description': 'str'
}
attribute_map = {
'parent_id': 'parent_id',
'name': 'name',
'description': 'description'
}
def __init__(self, parent_id=None, name=None, description=None): # noqa: E501
"""DomainUpdate - a model defined in Swagger""" # noqa: E501
self._parent_id = None
self._name = None
self._description = None
self.discriminator = None
if parent_id is not None:
self.parent_id = parent_id
if name is not None:
self.name = name
if description is not None:
self.description = description
@property
def parent_id(self):
"""Gets the parent_id of this DomainUpdate. # noqa: E501
:return: The parent_id of this DomainUpdate. # noqa: E501
:rtype: int
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this DomainUpdate.
:param parent_id: The parent_id of this DomainUpdate. # noqa: E501
:type: int
"""
self._parent_id = parent_id
@property
def name(self):
"""Gets the name of this DomainUpdate. # noqa: E501
:return: The name of this DomainUpdate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DomainUpdate.
:param name: The name of this DomainUpdate. # noqa: E501
:type: str
"""
if name is not None and len(name) > 100:
raise ValueError("Invalid value for `name`, length must be less than or equal to `100`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this DomainUpdate. # noqa: E501
:return: The description of this DomainUpdate. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this DomainUpdate.
:param description: The description of this DomainUpdate. # noqa: E501
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DomainUpdate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"ruan.desousa@praekeltconsulting.com"
] | ruan.desousa@praekeltconsulting.com |
9738634e64730b4d9ef7134d6187e6d089f937e9 | 553af5b6c023eec3e8f114973067c098999d81b4 | /src/rulebased_argid/argI_kfn.py | f605345a6e4dc5ee2ad51a0cae9560b503d95fa0 | [] | no_license | machinereading/KFparser | 96cf43b9eff1f401713dbf57cfd45540900f1f45 | 57c0959e751df6756ed66fedee1689f13c52b544 | refs/heads/master | 2020-03-21T00:01:56.875424 | 2018-08-16T07:23:01 | 2018-08-16T07:23:01 | 137,874,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,790 | py | import json
import pprint
from nltk.corpus import framenet as fn
import os
import re
def load_kfn():
dir_path = os.path.dirname(os.path.abspath(__file__))
#print(type(dir_path))
with open(dir_path+'/resource/KFN_lus.json','r',encoding='utf-8') as f:
kolus = json.load(f)
with open(dir_path+'/resource/KFN_annotations.json','r',encoding='utf-8') as f:
annos = json.load(f)
with open(dir_path+'/resource/KFN_annotations_from_sejong.json','r',encoding='utf-8') as f:
s_annos = json.load(f)
return kolus,annos,s_annos
kolus,annos,s_annos = load_kfn()
print(len(kolus))
def lus():
return kolus
def get_lu_id(lexicalUnit,frame):
# get lu id from lu(entry) and frame
lu_id = False
for i in kolus:
if lexicalUnit == i['lu'] and frame == i['frameName']:
lu_id = i['lu_id']
break
return lu_id
def lus_by_lemma(lemma):
lu_id = False
lu_list = []
for i in kolus:
if lemma == i['lexeme']: #only matching with lexeme
d = {}
d['lu_id'] = i['lu_id']
d['lu'] = i['lu']
lu_list.append(d)
if len(lu_list) == 0:
for i in kolus:
if lemma in i['lu']:
d = {}
d['lu_id'] = i['lu_id']
d['lu'] = i['lu']
lu_list.append(d)
return lu_list
def lu(lu_id):
# get lu information using lu_id
lexicalUnit = False
for i in kolus:
if lu_id == i['lu_id']:
lexicalUnit = i
break
return lexicalUnit
def annotations_by_lu(lu_id):
result = []
lexicalUnit = lu(lu_id)
ko_annotation = lexicalUnit['ko_annotation_id']
sejong_annotation = lexicalUnit['sejong_annotation_id']
result = ko_annotation + sejong_annotation
return result
def annotation(lu_id):
aids = annotations_by_lu(lu_id)
result = []
for i in aids:
if type(i) == str:
for s_anno in s_annos:
for k in s_anno['annotations']:
if i == k['ko_annotation_id']:
for d in k['denotations']:
if d['span']['begin'] != -1:
result.append(k)
elif type(i) == int:
for anno in annos:
for k in anno['frameAnnotation']['ko_annotations']:
if i == k['ko_annotation_id']:
k['text'] = anno['text']['ko_text']
result.append(k)
else:
pass
return result
def surface_to_lu_id(surface, frame, CoNLL = True):
# spc = ['\,','.','!','?']
# if len(surface) >1:
# if surface[-1] in spc:
# surface = re.sub('[,.?!]','',surface)
result = []
if CoNLL == True:
for i in kolus:
if frame == i['frameName']:
if surface in i['surface_forms']:
result.append(i['lu_id'])
else:
for i in kolus:
if frame == i['lu']:
result.append(i['lu_id'])
result = list(set(result))
ds = []
for i in result:
lu_info = lu(i)
p = lu_info['lu'].split('.')[1]
n = len(lu_info['ko_annotation_id'])
d = (i, p, n)
ds.append(d)
if len(ds) > 0:
luid = ds[0]
for i in ds:
if i[1] == 'v':
luid = i
for i in ds:
if luid[1] == 'v':
if i[1] == 'v':
if i[2] > luid[2]:
luid = i
else:
pass
else:
pass
else:
if i[2] > luid[2]:
luid = i
else:
luid = [False]
return luid[0]
| [
"nuclear852@kaist.ac.kr"
] | nuclear852@kaist.ac.kr |
bad4f8f73261aa4c8e9738bd063f8568b1a71066 | 898567269017131f7609aa1f546bdd5ab4dcb645 | /students/DiaoJunyu65233768/House.py | 5ffe1ab6701fe84d838a95b656b94f09d9ae1a31 | [] | no_license | sht2018/itm | f40578fc9a7c9fd62eb2fc5c7fece4dd3ea256e2 | 4b0461ecc20b519e4da2cc4077e0145cae5c01f4 | refs/heads/master | 2020-04-06T09:18:40.120687 | 2018-12-29T12:08:17 | 2018-12-29T12:08:17 | 157,336,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | # Adventure 3: buildHouse.py
# From the book: "Adventures in Minecraft"
# written by David Whale and Martin O'Hanlon, Wiley, 2014
# http://eu.wiley.com/WileyCDA/WileyTitle/productCd-111894691X.html
#
# This program builds a single house, with a doorway, windows,
# a roof, and a carpet.
# Import necessary modules
import mcpi.minecraft as minecraft
import mcpi.block as block
# Connect to Minecraft
mc = minecraft.Minecraft.create()
#mc.player.setTilePos(-30,10,-40)
# A constant, that sets the size of your house
def bu(SIZE):
# Get the players position
pos = mc.player.getTilePos()
# Decide where to start building the house, slightly away from player
x = pos.x - SIZE/2
y = pos.y
z = pos.z - SIZE/2
# Calculate the midpoints of the front face of the house
midx = x+SIZE/2
midy = y+SIZE/2
# Build the outer shell of the house
mc.setBlocks(x, y, z, x+SIZE, y+SIZE, z+SIZE, 85)
# Carve the insides out with AIR
mc.setBlocks(x+1, y, z+1, x+SIZE-2, y+SIZE-1, z+SIZE-2, block.AIR.id)
# Carve out a space for the doorway
mc.setBlocks(midx-1, y, z, midx+1, y+3, z, 50)
# Carve out the left hand window
mc.setBlocks(x+3, y+SIZE-3, z, midx-3, midy+3, z, block.GLASS.id)
# Carve out the right hand window
mc.setBlocks(midx+3, y+SIZE-3, z, x+SIZE-3, midy+3, z, block.GLASS.id)
# Add a wooden roof
mc.setBlocks(x, y+SIZE, z, x+SIZE, y+SIZE, z+SIZE, block.WOOD.id)
#Add lamps
mc.setBlocks(x, y+SIZE+1, z, x+SIZE, y+SIZE+1, z+SIZE, 50)
mc.setBlocks(x, y+SIZE, z-1, x+SIZE, y+SIZE, z-1,50)
# Add a woolen carpet, the colour is 14, which is red.
mc.setBlocks(x+1, y-1, z+1, x+SIZE-2, y-1, z+SIZE-2, block.WOOL.id, 14)
# END
bu(22)
bu(14)
bu(6)
| [
"noreply@github.com"
] | sht2018.noreply@github.com |
47d4c7cb7eb9933b0369b1e45e59ea87c9b72b5f | f1d01d9074ace289e7e43530079b0c34ccdde4c4 | /ontology/creation/data/patents/concatenate_features.py | 5d008e8f60662834f64b614c5873f3ab50efcfec | [] | no_license | techknowledgist/techknowledgist | 4889300a92aad8fa940d1246ddd75036d90a6563 | 7d422ac38a9212670d0ce6e26e1446fb46740837 | refs/heads/master | 2021-04-26T22:35:50.390037 | 2015-11-23T19:52:26 | 2015-11-23T19:52:26 | 124,117,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | """
Takes a corpus and concatenates all phr_feats files into a single file
Usage:
$ python concatenate_features.py CORPUS OUTFILE
TODO: this script, and others, should probably live in another directory
"""
import os, sys, codecs, glob, gzip
corpus = sys.argv[1]
outfile = sys.argv[2]
fh_out = codecs.open(outfile, 'w', encoding='utf-8')
feats_dir = os.path.join(corpus, 'data', 'd3_phr_feats', '01', 'files')
regexp = "%s/WoS.out.*/*.xml.gz" % feats_dir
fnames = glob.glob(regexp)
count = 0
for fname in fnames:
count += 1
print "%05d %s" % (count, fname)
gzipfile = gzip.open(fname, 'rb')
reader = codecs.getreader('utf-8')
fh = reader(gzipfile)
for line in fh:
fh_out.write(line)
| [
"marc@cs.brandeis.edu"
] | marc@cs.brandeis.edu |
68893da9c3d3ee4d0d4f11bd5f1f098ed94dfa93 | 186e4a95c0a49cb81254d0d76c8d428d095960b0 | /cpskin/minisite/browser/interfaces.py | d6bc9ec74c914462e16582582220be393e4c00c6 | [] | no_license | IMIO/cpskin.minisite | 727fb328e015831b34db5ece64af1fbed6762dab | f84d5fbd0213726b9c001d5f726f7036350e3a4f | refs/heads/master | 2023-01-06T06:27:21.888332 | 2020-12-09T19:45:40 | 2020-12-09T19:45:40 | 20,135,118 | 0 | 1 | null | 2022-12-27T15:37:49 | 2014-05-24T17:15:41 | Python | UTF-8 | Python | false | false | 713 | py | from zope import schema
from zope.interface import Interface
class IHNavigationActivationView(Interface):
""" Horizontal navigation activation """
can_enable_hnavigation = schema.Bool(
u'Can enable horizontal navigation viewlet',
readonly=True
)
can_disable_hnavigation = schema.Bool(
u'Can disable horizontal navigation viewlet',
readonly=True
)
def enable_hnavigation():
""" Enable horizontal navigation viewlet
"""
def disable_hnavigation():
""" Disable horizontal navigation viewlet
"""
class IHNavigationActivated(Interface):
"""
marker interface to tell if Horizontal Navigation is activate
"""
| [
"christophe.boulanger@imio.be"
] | christophe.boulanger@imio.be |
3e3cec46a5d299b728b7ec4fe4bd5e4fa651b295 | a8c7da9ad9da68146fc319467a9ca80b5fba5ab8 | /Account/migrations/0006_auto_20210501_2158.py | 741185715a2f45ceaf39398c58af72806e01cf2a | [] | no_license | MinhHoang2000/Project-2-School-Management | 359b2632b7b8d43934f90cdb1ae21caa561baef2 | c6174183aad2a594d8ebdbc848e3ee26d75203b0 | refs/heads/main | 2023-05-11T11:50:17.120515 | 2021-06-05T16:17:13 | 2021-06-05T16:17:13 | 359,300,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Generated by Django 3.1.7 on 2021-05-01 14:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Account', '0005_auto_20210428_1949'),
]
operations = [
migrations.AlterField(
model_name='account',
name='password',
field=models.CharField(max_length=500),
),
]
| [
"minhhoang14072000@gmail.com"
] | minhhoang14072000@gmail.com |
1b82e86c123a177a8f2ea505f676028ecdf4d35f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_caretakers.py | 24865dfee38f564211700a1151c979c6df6fd6e9 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _CARETAKERS():
def __init__(self,):
self.name = "CARETAKERS"
self.definitions = caretaker
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['caretaker']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
21da9c74630b4ec861d28fce6da0d2f504fc15a0 | 8665386267a7abd69f27da671926703198e065ea | /Battery Testing Script Sample Output.py | b830d12221a91699e59b3983ecdaf0a57ba2c58f | [] | no_license | nlyman9/GPS-Battery-Saving-Project-CS-1980 | f62cda297a2d5030274751eab8033aa00fc8c9cf | a582efd213dc45aa27b4c1e737be040ab9f3a4e2 | refs/heads/master | 2020-12-22T17:18:37.829338 | 2020-05-01T18:48:19 | 2020-05-01T18:48:19 | 236,871,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,312 | py | #NOTICE : This is the simplified version of the script solely for generating sample output before we do the actual presentation
#Import statements
import datetime
#This will be the power testing script for testing if the GPS can turn on and off
now = datetime.datetime.now()
#First open the logfile
logfile = open("powerlogsamplefile " + str(now.date()) + " " + str(now.time())[:8].replace(":", "-") +".txt", "w")
#Declaring variables, currently in testing mode with flat values
amps = 0.5
watts = 1
volts = 5
trial = 1
logline=""
powerline = ""
#Declaring time variables.
curtime = 0
logfile.write("Data Format:\n")
logfile.write("GPS ON/OFF | Current Time (Seconds) | Screen On/Off | Trial # | Power Statistics for current time | TimeStamp\n")
#Basic For loop structure
for gpsStat in ["On","Off"]:
#Control the total amount of time. Currently in seconds
#Minutes - 1, 2
for endtime in [20, 40]:
for screenstatus in ["On", "Off"]:
trial = 1
while trial <= 2:
while curtime <= endtime:
logline = ""
#Let's simulate some alterations, shall we?
#In the production code - this should be replaced with reading from the power meter
if curtime % 2 == 0:
amps = amps + 1.6
volts = volts + 5.1
else:
amps = amps / 2.0
volts = volts / 2
watts = watts + 1
powerline = str("%.2f" % watts) + "W, " + str( "%.2f" % amps) + "A, " + str("%.2f" % volts) + "V"
#Get the current datestamp
now = datetime.datetime.now()
#Let's convert the data to a loggable form
logline = gpsStat + " | " + str(curtime)+ "s" + " | " + screenstatus + " | " + "#" + str(trial) + " | " + powerline + " | " + str(now)
curtime = curtime + 1
print(logline)
#Write to the logfile
logfile.write(logline + "\n")
curtime = 0
trial = trial + 1
logfile.close()
| [
"nlyman9@gmail.com"
] | nlyman9@gmail.com |
e46b01983cfffde74a55b3887e6d3b2e7f2fed39 | 79402289f1ea9ca837c28344bc8a7c9e8691be80 | /Project/migrations/0001_initial.py | a7a88cbeb9d343c26ee71aee13daff9b16d4ccbd | [] | no_license | Kingapple21/YouPark | b6f27d0d89428639953efc47fe5739a2cb6a05cf | febdb4ba9667a034744f65b050b0542ba8489756 | refs/heads/master | 2020-04-08T10:48:57.318469 | 2018-11-27T08:55:01 | 2018-11-27T08:55:01 | 159,283,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,842 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-14 17:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('User_id', models.AutoField(primary_key=True, serialize=False)),
('Username', models.CharField(max_length=255, unique=True)),
('Password', models.CharField(max_length=255)),
('Verify_Password', models.CharField(max_length=255)),
('Fname', models.CharField(max_length=255)),
('Lname', models.CharField(max_length=255)),
('Email', models.EmailField(max_length=255)),
('Address', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='ParkingLot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Availability', models.BooleanField(default=False)),
('Slot_no', models.IntegerField()),
],
),
migrations.CreateModel(
name='ReserveParking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('User_id', models.IntegerField()),
('Time_in', models.TimeField()),
('Date_in', models.DateField()),
('Slot_no', models.IntegerField()),
('Plate_no', models.CharField(max_length=7, unique=True)),
('Type_of_Vehicle', models.CharField(max_length=255)),
],
),
]
| [
"ricardocalura94@gmail.com"
] | ricardocalura94@gmail.com |
7f3dc2eb57b3190f8010e6c0bdaf16ea89358dcb | 99f9483263dfb2f6989ffc70e9d2fcfacc365bc8 | /noccwiki/urls.py | 73ff0ba967c037a22d8e773a712fc3cac28c1da0 | [] | no_license | shaktijeet-ego/noccguide | 9b53afd47bcb6388f1b1df85695a9cee64198a08 | c123ee3a3f8d058eada3cb42d64e4f8282566afe | refs/heads/master | 2023-07-01T00:44:14.686615 | 2021-08-02T06:34:26 | 2021-08-02T06:34:26 | 391,839,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | """noccwiki URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"shaktijeet.tripathi@gmail.com"
] | shaktijeet.tripathi@gmail.com |
f43edb4afec86010c5691b6f2c80cffe27d30a66 | 41812b03f2b05569ff9767f26a67f78850992230 | /blog/urls.py | 87a748e425ce98c819ce61a36c21a6f1c45584f6 | [] | no_license | huleos/my-first-blog | 9bb288197728f806a307f16d9af6cc523e97f6f9 | 90e032718cee97b9fbe94153adfac343d93749d4 | refs/heads/master | 2021-01-01T03:42:53.121825 | 2016-05-27T23:24:35 | 2016-05-27T23:24:35 | 59,703,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.post_list),
url(r'^post/(?P<pk>[0-9]+)/$', views.post_detail),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', views.post_edit, name='post_edit'),
] | [
"creativo@686studio.com"
] | creativo@686studio.com |
c8939291ca72e27ded4934d3af80fe3336b879db | 021f9a9719d1f86552ac62a123dab72a61f7665b | /firebase.py | 7ba1e53940a3f686a90f59ebbc24aea0006065d9 | [] | no_license | auxo86/py2_0829 | d582d8d3f648fe866311b6c6ed0582997c19b9b2 | b4243bb3d7fbc6b26267c0fe98e17bb2ec21a67b | refs/heads/master | 2021-01-20T21:06:24.974305 | 2017-08-29T13:33:38 | 2017-08-29T13:33:38 | 101,747,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | # encoding=utf-8
import requests
import datetime
url = 'https://cool-cf19a.firebaseio.com/'
request1 = requests.put(url + '/data1_string.json', json='Hello World 0829')
print request1.status_code, request1.content
request2 = requests.put(url + '/data2_utf_chinese.json', json=u'中文0829')
print request2.status_code, request2.content
request3 = requests.put(url + '/data3_chinese.json', json='中文非utf八月二十九號')
print request3.status_code, request3.content
object1 = {'name':'Mark', 'age': 42}
request4 = requests.put(url + '/data4_obj.json', json=object1)
print request4.status_code, request4.content
# 插入None, 3就沒有了
array1 = ['Mark', True, 3.1415926, None, '2.968', 0, 2.968]
request5 = requests.put(url + '/data5_array.json', json=array1)
print request5.status_code, request5.content
# /代表路徑
request6 = requests.put(url + '/can/be/any/path/sample.json', json='Hello World ' + str(datetime.datetime.now()))
print request6.status_code, request6.content | [
"auxo86@gmail.com"
] | auxo86@gmail.com |
004685c045e5c27a6895a96304a7e41c9c1ea678 | 72957e8908905aab51b7a0b6be2ce1558bc677b6 | /src/glove.py | b36943a0b9cdcd366effb5bfe44cb5f937ad2d68 | [] | no_license | rekcahpassyla/nlp-project-2021 | 2f914d1045d455bc2800126560fa3fa0fca9d086 | 22ea5f9a1e860e721d18878b35701b1625f60c87 | refs/heads/main | 2023-04-23T21:04:15.258547 | 2021-05-17T14:15:26 | 2021-05-17T14:15:26 | 342,647,373 | 1 | 1 | null | 2021-04-28T15:44:35 | 2021-02-26T17:17:56 | Jupyter Notebook | UTF-8 | Python | false | false | 7,285 | py | import os
import socket
from copy import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
# specify GPU
GPU = torch.cuda.is_available()
GPU = False
# If you have a problem with your GPU, set this to "cpu" manually
device = torch.device("cuda:0" if GPU else "cpu")
from collections import Counter, defaultdict
import numpy as np
import torch
class GloveWordsDataset:
# TODO: Need to refactor so that all datasets take a co-occurrence matrix
# instead of building in here
def __init__(self, text, n_words=200000, window_size=5, device='cpu'):
# "text" is just an enormous string of all the words, in order,
# joined together
self._window_size = window_size
self._tokens = text.split(" ")[:n_words]
word_counter = Counter()
word_counter.update(self._tokens)
self._word2id = {w:i for i, (w,_) in enumerate(word_counter.most_common())}
self._id2word = {i:w for w, i in self._word2id.items()}
self._vocab_len = len(self._word2id)
self.concept_len = self._vocab_len
self._id_tokens = [self._word2id[w] for w in self._tokens]
self.device = device
self._create_coocurrence_matrix()
print("# of words: {}".format(len(self._tokens)))
print("Vocabulary length: {}".format(self._vocab_len))
def _create_coocurrence_matrix(self):
device = self.device
cooc_mat = defaultdict(Counter)
for i, w in enumerate(self._id_tokens):
start_i = max(i - self._window_size, 0)
end_i = min(i + self._window_size + 1, len(self._id_tokens))
for j in range(start_i, end_i):
if i != j:
c = self._id_tokens[j]
cooc_mat[w][c] += 1 / abs(j-i)
self._i_idx = list()
self._j_idx = list()
self._xij = list()
#Create indexes and x values tensors
for w, cnt in cooc_mat.items():
for c, v in cnt.items():
self._i_idx.append(w)
self._j_idx.append(c)
self._xij.append(v)
self._i_idx = torch.LongTensor(self._i_idx).to(device)
self._j_idx = torch.LongTensor(self._j_idx).to(device)
self._xij = torch.FloatTensor(self._xij).to(device)
self.N = len(self._i_idx)
def get_batches(self, batch_size):
#Generate random idx
rand_ids = torch.LongTensor(np.random.choice(len(self._xij), len(self._xij), replace=False))
for p in range(0, len(rand_ids), batch_size):
batch_ids = rand_ids[p:p+batch_size]
yield self._xij[batch_ids], self._i_idx[batch_ids], self._j_idx[batch_ids]
class GloveModel(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super(GloveModel, self).__init__()
self.wi = nn.Embedding(num_embeddings, embedding_dim)
self.wj = nn.Embedding(num_embeddings, embedding_dim)
self.bi = nn.Embedding(num_embeddings, 1)
self.bj = nn.Embedding(num_embeddings, 1)
self.wi.weight.data.uniform_(-1, 1)
self.wj.weight.data.uniform_(-1, 1)
self.bi.weight.data.zero_()
self.bj.weight.data.zero_()
def forward(self, i_indices, j_indices):
w_i = self.wi(i_indices)
w_j = self.wj(j_indices)
b_i = self.bi(i_indices).squeeze()
b_j = self.bj(j_indices).squeeze()
x = torch.sum(w_i * w_j, dim=1) + b_i + b_j
return x
def weight_func(x, x_max, alpha):
wx = (x/x_max)**alpha
wx = torch.min(wx, torch.ones_like(wx))
return wx.to(device)
def wmse_loss(weights, inputs, targets):
loss = weights * F.mse_loss(inputs, targets, reduction='none')
return torch.mean(loss).to(device)
def train(model, dataset, n_epochs, batch_size, x_max=100, alpha=0.75,
output_filename='glove'):
optimizer = optim.Adagrad(glove.parameters(), lr=0.05)
n_batches = int(dataset.N / batch_size)
loss_values = list()
min_loss = np.inf
l = np.inf
for e in range(1, n_epochs+1):
batch_i = 0
for x_ij, i_idx, j_idx in dataset.get_batches(batch_size):
batch_i += 1
optimizer.zero_grad()
outputs = model(i_idx, j_idx)
weights_x = weight_func(x_ij, x_max, alpha)
loss = wmse_loss(weights_x, outputs, torch.log(x_ij))
optimizer.step()
l = loss.item()
loss_values.append(l)
#if batch_i % 1024 == 0:
print("Epoch: {}/{} \t Batch: {}/{} \t Loss: {}".format(e, n_epochs, batch_i, n_batches, np.mean(loss_values[-20:])))
if l < min_loss:
min_loss = l
torch.save(model.state_dict(), f"{output_filename}_min.pt")
#torch.save(model.state_dict(), f"{output_filename}.pt")
if __name__ == '__main__':
import os
cfg = {
"train": True,
"plot": True,
"co_occurrence_file": None,
"glove_options": {
"words_dataset": True,
# I tried various values; only got good clustering with 3-5
"embed_dim": 3,
"n_epochs": 100,
"batch_size": 1000000,
"x_max": 100,
"alpha": 0.75,
"output_file": None
}
}
# just run for all the text files in the datasets/glove directory
basepath = os.path.abspath(os.path.join("..", "datasets", "glove"))
files = os.listdir(basepath)
outdir = os.path.join(basepath, "embeddings")
glove_opts = cfg['glove_options']
for fn in files:
if not fn.endswith(".txt"):
continue
print(f"Processing: {fn}")
inputfile = os.path.join(basepath, fn)
outputfile = os.path.join(outdir,
os.path.basename(inputfile).replace('.txt', ''))
dataset = GloveWordsDataset(open(inputfile).read(), 10000000, device=device)
glove = GloveModel(dataset.concept_len, glove_opts['embed_dim']).to(device)
if cfg['train']:
train(glove, dataset, glove_opts['n_epochs'], glove_opts['batch_size'],
glove_opts['x_max'], glove_opts['alpha'],
outputfile)
else:
kws = {}
if device == 'cpu':
kws['map_location'] = device
glove.load_state_dict(
torch.load(f"{outputfile}_min.pt", **kws))
# plotting is auxiliary so not in a function yet
if cfg['plot']:
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
emb_i = glove.wi.weight.cpu().data.numpy()
emb_j = glove.wj.weight.cpu().data.numpy()
emb = emb_i + emb_j
top_k = 500
tsne = TSNE(metric='cosine', random_state=123)
embed_tsne = tsne.fit_transform(emb[:top_k, :])
fig, ax = plt.subplots(figsize=(30, 30))
for idx in range(top_k):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(dataset._id2word[idx],
(embed_tsne[idx, 0], embed_tsne[idx, 1]),
alpha=0.7)
plt.savefig(f'{outputfile}.png')
| [
"corno@alum.mit.edu"
] | corno@alum.mit.edu |
b3e8b2e4d08c57d6fc06cf70e8ca5fc6190b709e | d9435de440750ab1fcf1ac846d718e4e0b2981bc | /Mainproject/Mainapp/views.py | a2043c5683814615ca24f773b7645dd7cb100779 | [] | no_license | Snex-Thadeus/snex1 | a7a0344ad9de82abb6cdd7797a627d171c096024 | fbaa5df0c13bc7d727ccc35463f511ef3965c330 | refs/heads/master | 2022-12-25T02:29:33.780923 | 2020-08-08T13:53:00 | 2020-08-08T13:53:00 | 286,052,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request,'index.html')
def register(request):
return render(request,'register.html')
def login(request):
return render(request,'login.html') | [
"lwande94@gmail.com"
] | lwande94@gmail.com |
40a784449252db3c7223622cf4d268cd424600a5 | 4fc73ab81ffa1c21d9cde182daa59f6e931627d9 | /app/main/forms.py | 7ba60878671966f6576dc1b3c4ba5ccc33c27893 | [] | no_license | billyconnerjr/psc-project | d42d046ee90f2bed370dfeaa48917d9b1c0b3c21 | a5359689000b2ae5f53ba4548ab06bd5cbf37bd9 | refs/heads/master | 2022-09-25T21:57:35.950696 | 2020-03-05T17:13:42 | 2020-03-05T17:13:42 | 245,132,287 | 0 | 0 | null | 2022-09-16T18:19:17 | 2020-03-05T10:24:26 | Python | UTF-8 | Python | false | false | 828 | py | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.fields.html5 import DateField
from wtforms.validators import DataRequired, Length, Email
from app.models import Book
class BookForm(FlaskForm):
title = StringField('Title', validators=[DataRequired(), Length(max=140, message='Title text is too long!')])
author = StringField('Author', validators=[DataRequired(), Length(max=64, message='Author text is too long!')])
date_purchased = DateField('Date Purchased', format='%Y-%m-%d')
notes = TextAreaField('Notes', validators=[Length(max=200, message='Notes text is too long!')])
submit = SubmitField('Submit')
class ShareBookListForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Share') | [
"billychaos@gmail.com"
] | billychaos@gmail.com |
70d98d3a2e98864827e096b6d36d39057c150cf3 | 638db831149040650a383d5908025b4fc2be1aa6 | /alien/day9/game_stats.py | e531649fae2331b89767e0942b553e13f6317b97 | [] | no_license | oubingkun/pi4_tutorial | e5487a33209f0650c05d0ed82975a60887eb992e | f60aebd2bd81f17b1142fb5e66f4c38ca4e7a0f9 | refs/heads/master | 2022-03-30T08:00:50.768736 | 2019-12-17T07:00:03 | 2019-12-17T07:00:03 | 258,737,532 | 1 | 0 | null | 2020-04-25T09:48:21 | 2020-04-25T09:48:20 | null | UTF-8 | Python | false | false | 212 | py |
class GameStats():
def __init__(self, ai_settings):
self.ai_settings = ai_settings
self.game_active = True
self.reset_stats()
def reset_stats(self):
self.ships_left = self.ai_settings.ship_limit
| [
"rcdrones@163.com"
] | rcdrones@163.com |
10f3f0ca057e6383b33ac0929fc7f212d2521e61 | b9e4bf5c00ac0d6c1a6e6038e8dc18041819ff99 | /Python3/0224_Basic_Calculator.py | 8dc3ecb6218b2b585509e0dca74cd886059e4f2d | [] | no_license | kiranani/playground | 98fdb70a3ca651436cc1eede0d2ba1b1ea9aba1d | 12f62a218e827e6be2578b206dee9ce256da8d3d | refs/heads/master | 2021-06-03T12:43:29.388589 | 2020-06-12T15:43:45 | 2020-06-12T15:43:45 | 149,614,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | from operator import add, sub
class Solution:
def calculate(self, s: str) -> int:
ops = {"+": add, "-": sub}
i, n = 0, len(s)
def helper(i):
values, opers = [0], []
while i < n:
c = s[i]
if c.isdigit():
j = i
while j + 1 < n and s[j + 1].isdigit():
j += 1
values.append(int(s[i:j + 1]))
i = j
elif c in ops:
while opers and len(values) > 1:
r = values.pop()
values.append(ops[opers.pop()](values.pop(), r))
opers.append(c)
elif c == "(":
i, value = helper(i + 1)
values.append(value)
elif c == ")":
break
i += 1
while opers and len(values) > 1:
r = values.pop()
values.append(ops[opers.pop()](values.pop(), r))
return i, values[-1]
return helper(0)[1]
| [
"noreply@github.com"
] | kiranani.noreply@github.com |
b78f528ab314388dc0798d4a3220cb253e8cc030 | 4928bbf9a2bf834b13488046373adad39d604c26 | /home/admin.py | 2bce5cf50a363c684c9e0e42fb3d2258696ee672 | [] | no_license | alinadir/futbolhaber | 4ff56c6e8366a092e1f94f538b48421b870d21af | da11039e8ff9a11762c80ebffb5196de930c15ad | refs/heads/master | 2020-03-06T23:19:40.438568 | 2018-05-03T11:36:31 | 2018-05-03T11:36:31 | 127,129,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | from django.contrib import admin
from .models import SosyalMedia,Yorum,Karakterler,Yazi,Yazarlar,Konu,Puan
from django.utils.text import slugify
# Register your models here.
class PuanAdmin(admin.ModelAdmin):
list_display = ['isim','puan','averaj']
list_display_links = ['isim']
list_filter = ['puan','averaj']
search_fields = ['isim']
list_editable = ['puan','averaj',]#buraya verdiğimiz alanlar link halinde olmamalı.
class Meta:
model = Puan
class SosyalAdmin(admin.ModelAdmin):
list_display = ['isim','content1']
list_display_links = ['isim']
list_filter = ['publishing_date']
search_fields = ['isim']
list_editable = ['content1',]#buraya verdiğimiz alanlar link halinde olmamalı.
class Meta:
model = SosyalMedia
class YazıAdmin(admin.ModelAdmin):
list_display = ['yazar','yazının_baslıgı']
list_display_links = ['yazar']
list_filter = ['yazar']
search_fields = ['ana_content','yazar','yazının_baslıgı']
list_editable = ['yazının_baslıgı',]#buraya verdiğimiz alanlar link halinde olmamalı.
class Meta:
model = Yazi
class YorumAdmin(admin.ModelAdmin):
list_display = ['isim','yorumunuz']
list_display_links = ['isim']
list_filter = ['isim']
search_fields = ['isim','yorumunuz',]
list_editable = ['yorumunuz',]#buraya verdiğimiz alanlar link halinde olmamalı.
class Meta:
model = Yorum
admin.site.register(SosyalMedia,SosyalAdmin)
admin.site.register(Puan,PuanAdmin)
admin.site.register(Yazi,YazıAdmin)
admin.site.register(Yorum,YorumAdmin)
admin.site.register(Karakterler)
admin.site.register(Yazarlar)
admin.site.register(Konu)
| [
"erdgn.shn57@gmail.com"
] | erdgn.shn57@gmail.com |
744e9561f7d4d3eef9a94b6e8640cd338572b27e | 9ca6885d197aaf6869e2080901b361b034e4cc37 | /DQM/EcalBarrelMonitorTasks/python/TowerStatusTask_cfi.py | 8f95685d078f8314dcec7f90e2f74a551bc5503f | [] | no_license | ktf/cmssw-migration | 153ff14346b20086f908a370029aa96575a2c51a | 583340dd03481dff673a52a2075c8bb46fa22ac6 | refs/heads/master | 2020-07-25T15:37:45.528173 | 2013-07-11T04:54:56 | 2013-07-11T04:54:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | import FWCore.ParameterSet.Config as cms
ecalTowerStatusTask = cms.untracked.PSet(
doDAQInfo = cms.untracked.bool(True),
doDCSInfo = cms.untracked.bool(True),
MEs = cms.untracked.PSet(
DAQSummary = cms.untracked.PSet(
path = cms.untracked.string('Ecal/EventInfo/DAQSummary'),
otype = cms.untracked.string('Ecal'),
btype = cms.untracked.string('Report'),
kind = cms.untracked.string('REAL'),
description = cms.untracked.string('')
),
DAQSummaryMap = cms.untracked.PSet(
path = cms.untracked.string('Ecal/EventInfo/DAQSummaryMap'),
otype = cms.untracked.string('Ecal'),
btype = cms.untracked.string('DCC'),
kind = cms.untracked.string('TH2F'),
description = cms.untracked.string('')
),
DAQContents = cms.untracked.PSet(
path = cms.untracked.string('Ecal/EventInfo/DAQContents/Ecal_%(sm)s'),
otype = cms.untracked.string('SM'),
btype = cms.untracked.string('Report'),
kind = cms.untracked.string('REAL'),
description = cms.untracked.string('')
),
DCSSummary = cms.untracked.PSet(
path = cms.untracked.string('Ecal/EventInfo/DCSSummary'),
otype = cms.untracked.string('Ecal'),
btype = cms.untracked.string('Report'),
kind = cms.untracked.string('REAL'),
description = cms.untracked.string('')
),
DCSSummaryMap = cms.untracked.PSet(
path = cms.untracked.string('Ecal/EventInfo/DCSSummaryMap'),
otype = cms.untracked.string('Ecal'),
btype = cms.untracked.string('DCC'),
kind = cms.untracked.string('TH2F'),
description = cms.untracked.string('')
),
DCSContents = cms.untracked.PSet(
path = cms.untracked.string('Ecal/EventInfo/DCSContents/Ecal_%(sm)s'),
otype = cms.untracked.string('SM'),
btype = cms.untracked.string('Report'),
kind = cms.untracked.string('REAL'),
description = cms.untracked.string('')
)
)
)
| [
"sha1-b63dc01a642d380a16da36b4e834787802efdeb4@cern.ch"
] | sha1-b63dc01a642d380a16da36b4e834787802efdeb4@cern.ch |
4e2bc237b656dec73be3da3e08e027e4576fbbc3 | 7f414ca0b82277d6b16145a78530364cb4513925 | /tutorial/api/templates.py | c7cf5ef8cbd1620942906d5f6d6d294ba1147286 | [
"MIT"
] | permissive | mrchi/fastapi-tutorial | c8e01148366a6eea56decaba97c90e66a3ed2436 | b8453e65a9b7ec2b488c6ef4b72a3be06f79446f | refs/heads/master | 2023-07-02T06:18:04.166989 | 2021-08-13T12:04:18 | 2021-08-13T12:04:18 | 382,529,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # coding=utf-8
from fastapi import APIRouter, Request
from fastapi.templating import Jinja2Templates
from starlette.responses import HTMLResponse
router = APIRouter()
tpls = Jinja2Templates(directory="tutorial/templates")
@router.get("/hello", response_class=HTMLResponse, summary="Render jinja2 templates")
async def hello(request: Request):
return tpls.TemplateResponse("hello.html", {"request": request, "id": 3})
| [
"chiqingjun@gmail.com"
] | chiqingjun@gmail.com |
4bd3fea9df0d339760410abcb7bc705831ee1022 | b40e5c6c1787dd222702b3dc817537c059e3abf7 | /interlink/migrations/0004_auto__add_field_incomingmail_owner.py | bca828735df8a4344e1f4d980a41e9d1b5389875 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | tevatia/nadine | 2e1d4908cab37c2f893f68ce59d939b5621c14a0 | e1db914ae70d3f7f3de719c8fb54b0e2198e2b56 | refs/heads/master | 2021-01-18T11:15:48.083446 | 2014-04-18T22:39:42 | 2014-04-18T22:39:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,572 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'IncomingMail.owner'
db.add_column('interlink_incomingmail', 'owner', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['auth.User'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'IncomingMail.owner'
db.delete_column('interlink_incomingmail', 'owner_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'interlink.incomingmail': {
'Meta': {'object_name': 'IncomingMail'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailing_list': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'incoming_mails'", 'to': "orm['interlink.MailingList']"}),
'origin_address': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'sent_time': ('django.db.models.fields.DateTimeField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'default': "'raw'", 'max_length': '10'}),
'subject': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'interlink.mailinglist': {
'Meta': {'object_name': 'MailingList'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_opt_out': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'moderated_mailing_lists'", 'blank': 'True', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'pop_host': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'pop_port': ('django.db.models.fields.IntegerField', [], {'default': '995'}),
'smtp_host': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'smtp_port': ('django.db.models.fields.IntegerField', [], {'default': '587'}),
'subject_prefix': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subscribed_mailing_lists'", 'blank': 'True', 'to': "orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'interlink.outgoingmail': {
'Meta': {'ordering': "['-created']", 'object_name': 'OutgoingMail'},
'attempts': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'mailing_list': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outgoing_mails'", 'to': "orm['interlink.MailingList']"}),
'moderators_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'original_mail': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['interlink.IncomingMail']", 'blank': 'True'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['interlink']
| [
"trevor@trevor.smith.name"
] | trevor@trevor.smith.name |
b3c615e65519ecbd43687d83e8216c9f86283458 | 4bf7abfb2d02be6cce23828d0554ccb575b91c3a | /step02/bookings.py | bc2ee7c8312d9977f6c5975d40040a7a883b6119 | [] | no_license | OneScreenfulOfPython/booking-system | 5c6471ec1e4c0103bd871d1e7a45a474a30f7e91 | e4daf5d3eb6e8620acb164f35ad09cb9081612aa | refs/heads/master | 2021-06-01T23:14:25.649607 | 2014-09-24T08:01:32 | 2014-09-24T08:01:32 | 24,347,319 | 10 | 14 | null | 2020-10-01T16:53:56 | 2014-09-22T22:04:53 | Python | UTF-8 | Python | false | false | 3,003 | py | #!python3
import os, sys
import sqlite3
#
# Ensure we're using the same database filename throughout.
# It doesn't matter what this is called or where it is:
# sqlite3 will just accept anything.
#
DATABASE_FILEPATH = "bookings.db"
def create_database():
"""Connect to the database, read the CREATE statements and split
them at the semicolon into individual statements. Once each
statement has been executed, close the connection.
"""
#
# Since we might be re-running this, delete the file and rebuild
# it if necessary.
#
if os.path.exists(DATABASE_FILEPATH):
os.remove(DATABASE_FILEPATH)
#
# A database cursor the the Python mechanism for running something
# against any database. You create a cursor and then .execute
# SQL statements through it.
#
db = sqlite3.connect(DATABASE_FILEPATH)
q = db.cursor()
#
# Read all the contents of create.sql in one gulp
#
sql = open("create.sql").read()
#
# Split it into individual statements, breaking on the semicolon
#
statements = sql.split(";")
#
# Execute each of the individual statements against the database
#
for statement in statements:
q.execute(statement)
#
# Close everything
#
q.close()
db.commit()
db.close()
def populate_database():
"""Populate the database with some valid test data
"""
db = sqlite3.connect(DATABASE_FILEPATH)
q = db.cursor()
sql = "INSERT INTO users(id, name, email_address) VALUES(?, ?, ?)"
q.execute(sql, [1, "Mickey Mouse", "mickey.mouse@example.com"])
q.execute(sql, [2, "Donald Duck", "donald.duck@example.com"])
q.execute(sql, [3, "Kermit the Frog", None])
sql = "INSERT INTO rooms(id, name, location) VALUES(?, ?, ?)"
q.execute(sql, [1, "Room A", "Next to the stairway"])
q.execute(sql, [2, "Room B", "On the Second Floor"])
q.execute(sql, [3, "Main Hall", None])
#
# Triple-quoted strings can cross lines
# NB the column order doesn't matter if you specify it
#
sql = """
INSERT INTO
bookings
(
room_id, user_id, booked_on, booked_from, booked_to
)
VALUES(
?, ?, ?, ?, ?
)"""
q.execute(sql, [1, 1, '2014-09-25', '09:00', '10:00']) # Room A (1) booked by Mickey (1) from 9am to 10am on 25th Sep 2014
q.execute(sql, [3, 1, '2015-09-25', None, None]) # Main Hall (3) booked by Mickey (1) from all day on 25th Sep 2014
q.execute(sql, [2, 3, '2014-09-22', '12:00', None]) # Room B (2) booked by Kermit (3) from midday onwards on 22nd Sep 2014
q.execute(sql, [1, 2, '2015-02-14', '09:30', '10:00']) # Room A (1) booked by Donald (2) from 9.30am to 10am on 15th Feb 2014
q.close()
db.commit()
db.close()
if __name__ == '__main__':
print("About to create database %s" % DATABASE_FILEPATH)
create_database()
print("About to populate database %s" % DATABASE_FILEPATH)
populate_database()
print("Finished")
| [
"mail@timgolden.me.uk"
] | mail@timgolden.me.uk |
886adf173237513d78cfb2fca8a029481ea0ee3a | 75952c75cf062910c9ced72ff22d7a3d5ea1513d | /javascript/javascript-python-ipc/iris.py | 1c9365e2972788ad538548b9fd2c7c4c3d55c721 | [] | no_license | GabrieleMaurina/workspace | f01a412f001f63c8e41dc8e6d21941be83bf404e | 37d98a99c83d36c07c51990c5b6ec2a8acaac33e | refs/heads/master | 2022-09-26T16:48:49.169262 | 2022-08-30T20:09:18 | 2022-08-30T20:09:18 | 155,683,393 | 1 | 0 | null | 2021-02-26T14:42:44 | 2018-11-01T08:16:45 | Python | UTF-8 | Python | false | false | 102 | py | import pandas as pd
df = pd.read_csv('iris.data', header=None)
print(df)
data = input()
print(data)
| [
"gabriele@localhost-live.lan"
] | gabriele@localhost-live.lan |
ea474e3e6556d493379e349ccdd48a5d1f59ce81 | e1f0c304087a7180e1abe23d47d1f1208e80966e | /kaggle_import.py | 2baf0ba132a9300638d8f2492f9970e6590e53fe | [] | no_license | sinixy/DBWorkshop-3 | 44e40ef9916e63c6745413b359074e0ea599f108 | 4c35c98119e5dcdf1aa5545d8d9909ec900790d3 | refs/heads/master | 2022-07-07T16:08:45.130451 | 2020-05-08T12:45:33 | 2020-05-08T12:45:33 | 260,464,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,061 | py | '''
УВАГА
Даний скрипт імпортує всі 5 млн записів із .csv файлу, що займає дуже багато часу. Для перевірки краще запускати kaggle_import
_first40000rows.py - імпорт лише перших 40000 записів
'''
import cx_Oracle
from csv import reader
import re
username = 'system'
password = 'databon'
dsn = 'localhost/xe'
connection = cx_Oracle.connect(username, password, dsn)
cursor = connection.cursor()
# Для заведення timestamp у форматі 2016-01-07 замість 2016-JAN-07 (формат за замовчуванням)
cursor.execute("""ALTER SESSION SET nls_timestamp_format = 'YYYY-MM-DD HH24:MI:SS.FF'""")
# Допоміжні функції для валідації
def isFloat(num):
regex = '^[+-]?[0-9]+\.[0-9]+$'
if re.search(regex, num):
return True
return False
def isZipcode(zipcode):
regex = '^[0-9]{4,5}$'
if re.search(regex, zipcode):
return True
return False
with open('US_WeatherEvents_2016-2019.csv') as file:
reader = reader(file, delimiter=',')
header = next(reader)
rowCnt = 1
# Створюємо списки для перевірки вже записаних даних
typeLst, severityLst, airportLst, locationLst, periodLst, cityLst, countyLst, stateLst, zipLst = [[] for i in range(9)]
try:
for row in reader:
# Отримуємо необхідні дані
eventId, eType, severity, startTime,\
endTime, airportCode, locationLat, locationLng,\
city, county, state, zipcode = [row[i].strip() for i in range(13) if i != 5]
# Оброблюемо числові дані
if isFloat(locationLat):
locationLat = float(locationLat)
else:
# Встановлюєм значення за замовчуванням 0.1
locationLat = 0.1
if isFloat(locationLng):
locationLng = float(locationLng)
else:
locationLng = 0.1
if isZipcode(zipcode):
zipcode = int(zipcode)
else:
zipcode = 111
location = (locationLat, locationLng)
period = (startTime, endTime)
# Перевіряємо, чи не були дані вже занесені в таблиці
# Також відстежуємо можливу відсутність даних
if eType not in typeLst:
typeLst.append(eType)
if not eType:
eType = 'Undefined Type'
query = '''INSERT INTO EventType (eType) VALUES (:eType)'''
cursor.execute(query, eType=eType)
if severity not in severityLst:
severityLst.append(severity)
if not severity:
severity = 'Undefined Type'
query = '''INSERT INTO EventSeverity (severity) VALUES (:severity)'''
cursor.execute(query, severity=severity)
if period not in periodLst:
periodLst.append(period)
if not startTime:
startTime = '2000-01-01 22:00:00'
if not endTime:
endTime = '2000-01-01 23:00:00'
query = '''INSERT INTO EventPeriod (starttime, endtime) VALUES (:starttime, :endtime)'''
cursor.execute(query, starttime=startTime, endtime=endTime)
if state not in stateLst:
stateLst.append(state)
if not state:
state = 'Undefined state'
query = '''INSERT INTO States (statename) VALUES (:state)'''
cursor.execute(query, state=state)
if county not in countyLst:
countyLst.append(county)
if not county:
county = 'Undefined county'
query = '''INSERT INTO Okrug (county, statename) VALUES (:county, :state)'''
cursor.execute(query, county=county, state=state)
if city not in cityLst:
cityLst.append(city)
if not city:
city = 'Undefined city'
query = '''INSERT INTO City (city, county) VALUES (:city, :county)'''
cursor.execute(query, city=city, county=county)
if zipcode not in zipLst:
zipLst.append(zipcode)
query = '''INSERT INTO ZipCode (zipcode) VALUES (:zipcode)'''
cursor.execute(query, zipcode=zipcode)
if location not in locationLst:
locationLst.append(location)
query = '''INSERT INTO Location (locationlat, locationlng, city, zipcode)
VALUES (:locationlat, :locationlng, :city, :zipcode)'''
cursor.execute(query, locationlat=locationLat, locationlng=locationLng, city=city, zipcode=zipcode)
if airportCode not in airportLst:
airportLst.append(airportCode)
if not airportCode:
airportCode = 'Undefined code'
query = '''INSERT INTO Airport (airportcode, locationlat, locationlng)
VALUES (:airportcode, :locationlat, :locationlng)'''
cursor.execute(query, airportcode=airportCode, locationlat=locationLat, locationlng=locationLng)
query = '''INSERT INTO Event (eventId, airportCode, startTime, endTime, severity, eType)
VALUES (:eventId, :airportCode, :startTime, :endTime, :severity, :eType)'''
cursor.execute(query,
eventId=eventId,
airportCode=airportCode,
startTime=startTime,
endTime=endTime,
severity=severity,
eType=eType)
rowCnt += 1
except Exception as e:
print(f'Error! {e}\nLine {rowCnt}')
connection.commit()
cursor.close()
connection.close()
| [
"noreply@github.com"
] | sinixy.noreply@github.com |
3a58744c056a4224e8c07b5f68fc0dbfff184534 | 1e4eee5e5649cfad99a295fc20d0c2643324f9f0 | /python/ch_15_play_sound.py | cef5328f06adbe99903a6701ad9a0e1b8e99f65a | [
"MIT"
] | permissive | zeitguy/raspberrypi_cookbook_ed3 | 2ccfd6bfeab71378c3ecb78240b3aa3d78f9e46d | bccf35ea309f493c1a45c3b96dec45dc7cebb0d9 | refs/heads/master | 2020-06-14T04:19:52.506327 | 2019-06-28T15:46:42 | 2019-06-28T15:46:42 | 194,896,377 | 2 | 0 | MIT | 2019-07-02T16:10:37 | 2019-07-02T16:10:36 | null | UTF-8 | Python | false | false | 171 | py | import subprocess
sound_file = '/home/pi/raspberrypi_cookbook_ed3/python/school_bell.mp3'
sound_out = 'local'
subprocess.run(['omxplayer', '-o', sound_out, sound_file])
| [
"srmonk@gmail.com"
] | srmonk@gmail.com |
ed9a521d925b3b887a69d6c8b857a69eca06014e | 118eac712fedef2004679b4578929ca53ef9de16 | /ehcs_test.py | 96fa0785483a02def1817b0b0a9f5de01679767c | [] | no_license | syakirali/stemming | 200d50239e2a649421ed40c03828c158e9eb6436 | 849b6c577f7d901a926208ab97fb11ac3f602748 | refs/heads/master | 2020-05-03T01:35:35.388989 | 2019-05-22T11:38:00 | 2019-05-22T11:38:00 | 178,341,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from EHCS import EHCS
stemmer = EHCS()
terms = [line.rstrip('\n') for line in open('term.txt')]
for t in terms:
print(t)
print(stemmer.process(t)['stem'])
| [
"email02081997@gmail.com"
] | email02081997@gmail.com |
9b9fd9d1617f1c1bc52d136a916dff43ab07573c | 4ef670c0cadd086e2096ba1074d96ee09c87e217 | /poc/__init__.py | 909411c3a9018b2c5076688e23350853b9a0e759 | [] | no_license | JasonLOU/WeblogicScan-master | 86808a42d859d758b1c58bb78fa49fc950209158 | 8cd227ca2cc838c02687157b907c6c8a98deb47a | refs/heads/master | 2020-05-30T05:59:13.231776 | 2019-05-31T09:40:30 | 2019-05-31T09:40:30 | 189,568,866 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
'''
____ _ _ _ _ __ __ _
| _ \ __ _| |__ | |__ (_) |_| \/ | __ _ ___| | __
| |_) / _` | '_ \| '_ \| | __| |\/| |/ _` / __| |/ /
| _ < (_| | |_) | |_) | | |_| | | | (_| \__ \ <
|_| \_\__,_|_.__/|_.__/|_|\__|_| |_|\__,_|___/_|\_\
'''
| [
"noreply@github.com"
] | JasonLOU.noreply@github.com |
cca570aa241c988d1edd83041d542e10b419c813 | 65df497ef4c6bd44d70e766bff22c5ffd97f6166 | /example_simple_distilled.py | 34fa5c27d282304c88ec291f64f414b11bc0a4ad | [] | no_license | imnotamember/python-egi | a49d8d070e1ee699c37d171f66dafe1801204a46 | 75145546a8a7d473acefb8d44e64077ad5cd5bc3 | refs/heads/master | 2020-06-21T12:11:40.670168 | 2019-09-18T15:59:29 | 2019-09-18T15:59:29 | 197,446,678 | 2 | 0 | null | 2019-07-17T19:01:56 | 2019-07-17T19:01:55 | null | UTF-8 | Python | false | false | 830 | py |
# >>> import and initialization >>>
import egi.simple as egi
## import egi.threaded as egi
# ms_localtime = egi.egi_internal.ms_localtime
ms_localtime = egi.ms_localtime
ns = egi.Netstation()
ns.connect('11.0.0.42', 55513) # sample address and port -- change according to your network settings
## ns.initialize('11.0.0.42', 55513)
ns.BeginSession()
ns.sync()
ns.StartRecording()
# >>> send many events here >>>
## # optionally can perform additional synchronization
## ns.sync()
ns.send_event( 'evt_', label="event", timestamp=egi.ms_localtime(), table = {'fld1' : 123, 'fld2' : "abc", 'fld3' : 0.042} )
# >>> we have sent all we wanted, time to go home >>>
ns.StopRecording()
ns.EndSession()
ns.disconnect()
## ns.EndSession()
## ns.finalize()
# >>> that's it !
| [
"gaelenh@gmail.com"
] | gaelenh@gmail.com |
662b6a31b9f1142ef6d5e1adfaecdae19bd7bb1e | 6fe8cf62f157672715fae94af6cfae08432d826d | /gl_metr_scr/mon.py | 89125f0d299f3ac4306535249219deb576208d61 | [] | no_license | monergeim/python | 20c7268d5edf397c329532f61d759c6fc342ead6 | 2c504fca506a0c38b3b69f48d0e211bb5479ab9d | refs/heads/master | 2021-06-24T23:04:09.295166 | 2021-03-22T21:52:54 | 2021-03-22T21:52:54 | 185,149,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | #!/usr/bin/env python
import psutil
import argparse
#Parse arguments
myparser = argparse.ArgumentParser()
myparser.add_argument("arg", help="enter argument mem or cpu to see details")
myargs = myparser.parse_args()
#Metric condition
if myargs.arg == "mem":
res_virt = psutil.virtual_memory()
res_swap = psutil.swap_memory()
print ("virtual total", res_virt[0]);
print ("virtual used", res_virt[3]);
print ("virtual free", res_virt[4]);
print ("virtual shared", res_virt[9]);
print ("swap total", res_swap[0]);
print ("swap used", res_swap[1]);
print ("swap free", res_swap[2]);
elif myargs.arg == "cpu":
result = psutil.cpu_times_percent(interval=1)
print ("system.cpu.idle", result[3]);
print ("system.cpu.user", result[0]);
print ("system.cpu.guest", result[8]);
print ("system.cpu.iowait", result[4]);
print ("system.cpu.stolen", result[7]);
print ("system.cpu.system", result[2]);
else:
print ("Incorrect argument, please use -h for help")
| [
"ext-sergii.lypnytskyi@here.com"
] | ext-sergii.lypnytskyi@here.com |
6a3c0325baab2735fcefafaf2edc1ad0cbd8c95f | 91c53528f81f89488173d3fc4de6814a964742fc | /src/d3vz/urls.py | 9e383db00f67b55667c6bffe8fdae34c2cc89e47 | [] | no_license | jbigdataspr/d3api | 06312125fca4f7d50341aa79d509f15e57736c20 | 4d469704ed8dbebd6099881ca65700e595eb3fe9 | refs/heads/master | 2020-09-14T16:11:40.122670 | 2020-08-01T15:51:14 | 2020-08-01T15:51:14 | 223,179,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,434 | py | """d3vz URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.generic.base import RedirectView
from d3e.views import home_view, rrect_view, burst_view, ttip_view, ttip2_view, \
piechart_view, d3dbar_view, relig_view, bar2_view, bar3_view, \
raindrop_view, debts_view, barcharts_view, tmp_view, \
heatmap_view, globtemp_view, realine_view, contour1_view, \
contour2_view, contour3_view, contour4_view, howard_view, md_view
urlpatterns = [
path('admin/', admin.site.urls),
path('d3/', home_view, name='d3 home'),
path('d3/pie/', piechart_view, name='piechart'),
path('d3/roundedrect/', rrect_view, name='RoundedRectangles'),
path('d3/sunburst/', burst_view, name='sunburst'),
path('d3/raindrop/', raindrop_view, name='raindrop'),
path('d3/debts', debts_view, name='debts'),
path('d3/bar/', barcharts_view, name = 'barcharts'),
path('d3/bar2/', bar2_view, name = 'bar2'),
path('d3/bar3/', bar3_view, name = 'bar3'),
path('d3/d3dbar/', d3dbar_view, name = 'd3dBar'),
path('d3/ttip/', ttip_view, name = 'tooltips'),
path('d3/ttip2/', ttip2_view, name = 'tooltips2'),
path('d3/howard/', howard_view, name = 'howard_tooltips2'),
path('d3/md/', md_view, name = 'md_tooltips2'),
path('d3/religions/', relig_view, name = 'religions'),
path('d3/heatmap/', heatmap_view, name = 'heatmap'),
path('d3/globtemp/', globtemp_view, name = 'heatmap'),
path('d3/realine/', realine_view, name = 'RealTime'),
path('d3/contour1/', contour1_view, name = 'contour1'),
path('d3/contour2/', contour2_view, name = 'contour2'),
path('d3/contour3/', contour3_view, name = 'contour3'),
path('d3/contour4/', contour4_view, name = 'contour4'),
path('d3/tmp/', tmp_view, name='tmp'),
path('', RedirectView.as_view(url='d3/', permanent=False), name='index'),
]
| [
"jbigdataspr@gmail.com"
] | jbigdataspr@gmail.com |
3abe7b337d5075c12a0b8bc503be657c2c0e65fb | 2faafbb83794cfdb117bc2f9d6c60257b4d6f75c | /learn_python/03_OOP/oop_14_类方法.py | ffd9acea7aed5ba3871668126f3c676f598ad69e | [] | no_license | HongbinW/learn_python | dd0e79cee1036c0ad8b323e5996d8654bfd56bb2 | d63355443eff6a45752bec6b0784fcecafd4b785 | refs/heads/master | 2020-04-13T03:54:04.713328 | 2018-12-24T03:41:58 | 2018-12-24T03:41:58 | 152,084,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | class Tool(object):
#使用类属性,记录所有工具对象的数量
count = 0
@classmethod
def show_count(cls):
print("工具对象的数量 %d"% cls.count)
def __init__(self,name):
Tool.count += 1
self.count = 1
tool1 = Tool("斧头")
tool2 = Tool("锤子")
tool3 = Tool("剪刀")
print(Tool.count)
print(tool2.count)
print(tool3.count)
#调用类方法
Tool.show_count()
| [
"501245176@qq.com"
] | 501245176@qq.com |
1078f867a59a2e17e6d26d2944b95c7c5d2e46a6 | f159d66b093713ffff8125b029e303526a222e60 | /Q3.py | 39942481a4e6209b23d0eb0c022f93784b70818d | [] | no_license | Rnazx/end-sem | 116246b0243a8f0842eb98c12fc64920cb29ac79 | 9d1344e62d9d7568e7d7da04f36715186ddd44ec | refs/heads/main | 2023-02-27T10:36:22.057652 | 2021-02-04T07:37:59 | 2021-02-04T07:37:59 | 335,875,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | #first part
import math
T=[0.00,0.30,0.60,0.90,1.20,1.50,1.80,2.10,2.40,2.70,3.00,3.30]
W=[2.2,1.96,1.72,1.53,1.36,1.22,1.10,1.00,0.86,0.75,0.65,0.60]
T2=[]
TW=[]
N=len(T)
#define sum
def sum(X):
s=0
for i in range(N):
s+=X[i]
return s
for i in range(N):
T2.append((T[i])**2)
TW.append(T[i]*W[i])
#slope using linear fit
slope=(sum(T)*sum(W)-N*sum(TW))/((sum(T))**2-N*sum(T2))
intercept=(sum(T)*sum(TW)-(sum(T)*sum(W)))/((sum(T)**2)-N*sum(T2))
print("The slope and the intercept obtained are",slope,intercept)
#second part
logw=[]
tlogw=[]
for i in range(N):
logw.append(math.log(W[i]))
tlogw.append(T[i]*math.log(W[i]))
wc=(N*sum(tlogw)-sum(T)*sum(logw))/((sum(T)**2)-N*sum(T2))
s=(sum(X2)*sum(logw))
print("The wc and the A obtained are",wc) | [
"noreply@github.com"
] | Rnazx.noreply@github.com |
c7b53899be6d574060b96f06147189d36c08824b | 186ef411f83f7fec097df8903ed8f34aa6dcc602 | /data/generator.py | 2aae44d9a08486645acb17bf297d30926d654cd1 | [] | no_license | AshleighC/motivate-me | 635a06dbebd19476c922a342344d0eee19144dce | 26b731912565d112ac4bf7370deafc074053cb94 | refs/heads/master | 2021-01-10T19:15:51.529768 | 2013-09-30T02:33:05 | 2013-09-30T02:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | from json import loads, dumps
def print_quote(quote):
print '"' + quote["quote"] + '"'
print " - " + quote["author"]
def get_word():
return raw_input(">> ")
def get_quote_words(quote):
print_quote(quote)
words = []
word = get_word()
while word != "":
words.append(word)
word = get_word()
return words
def get_word_list():
data = loads(open("quotes.json").read())
wordlist = []
print
for quote in data:
wordlist.append(get_quote_words(quote))
print
return wordlist
json = open("words.json", "w")
json.write(dumps(get_word_list()))
json.close()
| [
"ashleigh.cushman@gmail.com"
] | ashleigh.cushman@gmail.com |
3658e297ae12af76ec880a9fe4fcd892b28e26a5 | cbc3743a4e19bda0ff56ab6ca65c766f62daa78c | /sis/model/__init__.py | f4526be22db2ca5888bbfada1bdd3dc7ea606b56 | [] | no_license | kuba/SIS | ef51ecc486c2e8e084cac9634e3421632580ce5b | 327ef2fd4f65db471c408a26095439630c53139e | refs/heads/master | 2021-04-26T12:12:40.048846 | 2013-11-07T12:48:48 | 2013-11-07T12:48:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | """The application's model objects."""
from sis.model.meta import Session, Base
from sis.model.basic import Person, Educator, Subject, Group, Lesson, Student, \
GroupMembership, SchoolYear, Schedule
from sis.model.subs import Substitution
from sis.model.lucky import LuckyNumber
from sis.model.auth import AuthUser, AuthGroup, AuthPermission
__all__ = [
"Session", "Base", "Person", "Educator", "Subject", "Group", "Lesson",
"Student", "GroupMembership", "SchoolYear", "Schedule", "Substitution",
"LuckyNumber", "AuthUser", "AuthGroup", "AuthPermission"
]
def init_model(engine):
"""Call me before using any of the tables or classes in the model"""
Session.configure(bind=engine)
| [
"jakub.warmuz@gmail.com"
] | jakub.warmuz@gmail.com |
18fc9f0de056ced36bd6b8d58e3c8770a8c28f8f | 82452c2c25560fcc069b57ad22cf41668d081a98 | /abhiyaan_workspace/src/Adarsh_Abhiyaan/Nodes/Node3_listener.py | 13551eea99404f2fbd30071849b76b9d64cba7e6 | [] | no_license | pyrated03/Abhiyaan_Application | aa8b42774f5247d59f8aead2903f09475f4cc318 | 97ccc60d14efe917bf4498b66907d26af6c67758 | refs/heads/main | 2023-04-17T19:27:33.851424 | 2021-05-01T07:28:11 | 2021-05-01T07:28:11 | 362,343,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | #!/usr/bin/env python3
# importing necessary libraries
import rospy
from std_msgs.msg import String
#Callback Function to assign the message recieved from Node1 in the global variable
def callback_node1(data):
global data_node1 #Setting it global so that it is accessible from other functions as well
data_node1 = data.data
#Callback Function to append the 2 messages from the 2 nodes, and print it.
def callback_node2(data):
data_node2 = data.data
final = data_node1 + data_node2 # Appending the 2 messages
print(final)
#Function to listen to the messages by subscribing the corresponding nodes
def listener():
print(">>> Listening:")
rospy.init_node('node3', anonymous=True) # initializing node node3 as unique(using anonymous=True)
rospy.Subscriber('/team_abhiyaan', String, callback_node1) # initializing the subscriber, to topic '/team_abhiyaan' to get the message from node1 and call the function callback_node1
rospy.Subscriber('/autonomy', String, callback_node2) # initializing the subscriber, to topic '/autonomy' to get the message from node2 and call the function callback_node2
rospy.spin() # wait till user terminates the execution
if __name__ == "__main__":
listener()
| [
"adarsh@ubuntu.ubuntu-domain"
] | adarsh@ubuntu.ubuntu-domain |
7ba0b046bbd66ab68fb468bcc87715231c4b3920 | c7d018b4ffc9a837bde653ead8fe3c3d8f2177f9 | /examples/Tile.py | 0dc1b2178ac5c5b1697d0c432d6413bb276f1a7c | [] | no_license | Gray430/Dungeons-Level-Format | 68a59852b12282789ea4f6cdee10da78b68c1f38 | 9eb650235d65d506eaa17ac9de4c6a71e34ee18a | refs/heads/master | 2023-06-09T12:52:34.937444 | 2021-06-23T03:19:27 | 2021-06-23T03:19:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,225 | py | import zlib
import base64
from array import array
from itertools import chain, zip_longest
"""
This module contains useful classes for different tile-related objects.
The documentation here isn't great, but hopefully most of the function names
are self-explanatory.
There are still some parts that aren't implemented yet and parts that could be
optimized better.
"""
def decompress(s):
return zlib.decompress(base64.b64decode(s))
def compress(b):
return base64.b64encode(zlib.compress(b, 9)).decode('utf-8')
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
class Boundary:
"""
Tile boundary, which is a column of invisible, solid blocks.
Boundaries only have solid walls; the top and bottom are not solid.
"""
def __init__(self, x, y, z, h):
self.x = x
self.y = y
self.z = z
self.h = h
@staticmethod
def from_bytes(bytes_):
"""Returns a Boundary object with properties from the given bytes."""
return Boundary(
x = bytes_[0] << 8 | bytes_[1],
y = bytes_[2] << 8 | bytes_[3],
z = bytes_[4] << 8 | bytes_[5],
h = bytes_[6] << 8 | bytes_[7])
def bytes(self):
"""Returns the boundary represented as bytes."""
return bytes([
self.x >> 8 & 0xff, self.x & 0xff,
self.y >> 8 & 0xff, self.y & 0xff,
self.z >> 8 & 0xff, self.z & 0xff,
self.h >> 8 & 0xff, self.h & 0xff])
class Door:
"""
Tile door, which is a tile connection or teleport point.
"""
def __init__(self, pos = [0, 0, 0], size = [1, 1, 1]):
self.pos = pos
self.size = size
@staticmethod
def from_dict(dict_door):
"""Returns a Door object with properties from the given dict."""
door = Door(dict_door['pos'], dict_door['size'])
if 'name' in dict_door:
door.name = dict_door['name']
if 'tags' in dict_door:
door.tags = dict_door['tags']
return door
def dict(self):
"""Returns the door represented as a dict."""
dict = {}
if hasattr(self, 'name'):
dict['name'] = self.name
if hasattr(self, 'tags'):
dict['tags'] = self.tags
dict['pos'] = self.pos
dict['size'] = self.size
return dict
class Region:
"""
Tile region, which is an area marker that can be used set up triggers
or place objects in the level.
Not yet implemented:
- 'locked' property
"""
def __init__(self, pos = [0, 0, 0], size = [1, 1, 1]):
self.pos = pos
self.size = size
@staticmethod
def from_dict(dict_region):
"""Returns a Region object with properties from the given dict."""
region = Region(dict_region['pos'], dict_region['size'])
if 'name' in dict_region:
region.name = dict_region['name']
if 'tags' in dict_region:
region.tags = dict_region['tags']
if 'type' in dict_region:
region.type = dict_region['type']
return region
def dict(self):
"""Returns the region represented as a dict."""
dict = {}
if hasattr(self, 'name'):
dict['name'] = self.name
if hasattr(self, 'tags'):
dict['tags'] = self.tags
if hasattr(self, 'type'):
dict['type'] = self.type
dict['pos'] = self.pos
dict['size'] = self.size
return dict
class Tile:
"""
A tile is a cuboid chunk of blocks. They are pieced together to create
the levels in Dungeons.
Not yet implemented:
- 'is-leaky' property
- 'locked' property
- 'tags' property
"""
def __init__(self, name, size):
self.id = name
self.size = size
self.volume = size[0] * size[1] * size[2]
self.blocks = array('H', [0] * self.volume) # unsigned 16-bit int array
self.block_data = bytearray([0] * self.volume)
self.region_plane = bytearray([0] * (size[0] * size[2]))
self.region_y_plane = bytearray([0] * (size[0] * size[2]))
self.region_y_plane_copy_height = True
self.walkable_plane = bytearray([0] * (size[0] * size[2]))
self.write_walkable_plane = False
self.y = 0
self.pos = None
self.boundaries = []
self.doors = []
self.regions = []
@staticmethod
def from_dict(dict_tile):
"""Returns a Tile object with properties from the given dict."""
if 'size' in dict_tile:
tile = Tile(dict_tile['id'], dict_tile['size'])
if 'pos' in dict_tile:
tile.pos = dict_tile['pos']
elif 'pos' in dict_tile and 'pos2' in dict_tile:
tile = Tile(dict_tile['id'], [abs(a-b) + 1 for a, b in zip(dict_tile['pos'], dict_tile['pos2'])])
tile.pos = [min(a, b) for a, b in zip(dict_tile['pos'], dict_tile['pos2'])]
else:
raise Exception('Tile is missing the size property.')
if 'blocks' in dict_tile:
decompressed_blocks = decompress(dict_tile['blocks'])
# If the number of bytes is greater than 2 times the tile volume, the tile must be using the 16-bit format
if len(decompressed_blocks) > tile.volume * 2:
# IDs are the first {tile.volume} 16-bit ints
tile.blocks = array('H', [x[0] << 8 | x[1] for x in pairwise(decompressed_blocks[:tile.volume*2])])
# Data values are only 4 bits each, so we need to split each byte in 2 and create a 1D list from that
tile.block_data = bytearray(chain.from_iterable([(d >> 4, d & 0xf) for d in decompressed_blocks[tile.volume*2:]]))
else:
# IDs are simply the first {tile.volume} bytes
tile.blocks = array('H', iter(decompressed_blocks[:tile.volume]))
# Data values are only 4 bits each, so we need to split each byte in 2 and create a 1D list from that
tile.block_data = bytearray(chain.from_iterable([(d >> 4, d & 0xf) for d in decompressed_blocks[tile.volume:]]))
if 'region-plane' in dict_tile:
tile.region_plane = bytearray(decompress(dict_tile['region-plane']))
if 'region-y-plane' in dict_tile:
tile.region_y_plane = bytearray(decompress(dict_tile['region-y-plane']))
tile.region_y_plane_copy_height = False
if 'walkable-plane' in dict_tile:
tile.walkable_plane = bytearray(decompress(dict_tile['walkable-plane']))
tile.write_walkable_plane = True
if 'y' in dict_tile:
tile.y = dict_tile['y']
if 'doors' in dict_tile:
tile.doors = [Door.from_dict(d) for d in dict_tile['doors']]
if 'regions' in dict_tile:
tile.regions = [Region.from_dict(r) for r in dict_tile['regions']]
if 'boundaries' in dict_tile:
# Old uncompressed boundaries format
if isinstance(dict_tile['boundaries'], list):
tile.boundaries = [Boundary(*b) for b in dict_tile['boundaries']]
else: # Normal compressed format
boundaries_bytes = decompress(dict_tile['boundaries'])
for i in range(0, len(boundaries_bytes), 8):
tile.boundaries.append(Boundary.from_bytes(boundaries_bytes[i:i+8]))
return tile
def dict(self):
"""Returns the tile represented as a dict.
The height-plane property is automatically generated.
"""
obj = {
'id': self.id,
'size': self.size
}
if self.pos != None:
obj['pos'] = self.pos
if any([x > 0xff for x in self.blocks]): # Requires 16-bit format
obj['blocks'] = compress(
bytearray(chain.from_iterable([(x >> 8, x & 0xff) for x in self.blocks])) +
bytearray([a << 4 | b & 0xf for a, b in zip_longest(self.block_data[::2], self.block_data[1::2], fillvalue=0)])
)
else: # Can use 8-bit format
obj['blocks'] = compress(
bytearray(tuple(self.blocks)) +
bytearray([a << 4 | b & 0xf for a, b in zip_longest(self.block_data[::2], self.block_data[1::2], fillvalue=0)])
)
obj['region-plane'] = compress(self.region_plane)
obj['height-plane'] = compress(bytes(self.get_height_map()))
if self.region_y_plane_copy_height:
obj['region-y-plane'] = obj['height-plane']
else:
obj['region-y-plane'] = compress(self.region_y_plane)
if self.write_walkable_plane:
obj['walkable-plane'] = compress(self.walkable_plane)
if len(self.boundaries) > 0:
boundaries = bytearray()
for boundary in self.boundaries:
boundaries.extend(boundary.bytes())
obj['boundaries'] = compress(boundaries)
if self.y != 0:
obj['y'] = self.y
if len(self.doors) > 0:
obj['doors'] = [d.dict() for d in self.doors]
if len(self.regions) > 0:
obj['regions'] = [r.dict() for r in self.regions]
return obj
def resize(self, x, y, z):
"""Resizes the tile to the given size.
Anything that is dependant on the tile's size is reset.
"""
self.size = [x, y, z]
self.volume = x * y * z
self.blocks = array('H', [0] * self.volume)
self.block_data = bytearray([0] * self.volume)
self.region_plane = bytearray([0] * (x * z))
self.region_y_plane = bytearray([0] * (x * z))
def get_block_index(self, x, y, z):
"""Returns the index of the block at the given position.
This function is useful if you need to get both the block ID and data value
at the same time. get_block_id and get_block_data are more convenient, but
when iterating over all blocks in the tile, using this will be faster."""
return (y * self.size[2] + z) * self.size[0] + x
def get_block_id(self, x, y, z):
"""Returns the ID of the block at the given position."""
# We could use self.get_block_index(x, y, z) here, but this is faster
return self.blocks[(y * self.size[2] + z) * self.size[0] + x]
def get_block_data(self, x, y, z):
"""Returns the data value of the block at the given position."""
# We could use self.get_block_index(x, y, z) here, but this is faster
return self.block_data[(y * self.size[2] + z) * self.size[0] + x]
def set_block(self, x, y, z, block_id, block_data = 0):
"""Sets the block at the given position to the given block ID and data value."""
idx = (y * self.size[2] + z) * self.size[0] + x
self.blocks[idx] = block_id
self.block_data[idx] = block_data
def get_region_value(self, x, z):
"""Returns the value of the region plane at the given position."""
return self.region_plane[z * self.size[0] + x]
def set_region_value(self, x, z, value):
"""Sets the value of the region plane at the given position to the given value."""
self.region_plane[z * self.size[0] + x] = value
def get_region_y_value(self, x, z):
"""Returns the value of the region-y plane at the given position."""
return self.region_y_plane[z * self.size[0] + x]
def set_region_y_value(self, x, z, value):
"""Sets the value of the region-y plane at the given position to the given value."""
self.region_y_plane[z * self.size[0] + x] = value
def get_height_map(self):
"""Returns a height map of the tile as a 1D list."""
zr = range(0, self.size[2])
yr = range(min(self.size[1] - 1, 254), -1, -1)
height_map = [0] * (self.size[0] * self.size[2])
for x in range(0, self.size[0]):
for z in zr: # zr and yr are created only once above to save time
for y in yr: # Start at the top and go down until a solid block is found
if self.get_block_id(x, y, z) != 0: # Block is not air
height_map[z * self.size[0] + x] = y + 1
break
return height_map | [
"eventorset@gmail.com"
] | eventorset@gmail.com |
6689bda17969ce7d5c74c76e31523e8c509ba831 | ebd24e400986c57b4bb1b9578ebd8807a6db62e8 | /InstaGrade-FormBuilder/xlsxwriter/test/worksheet/test_extract_filter_tokens.py | f42d6ade4765867a416adf7c334a45d6f2c4965c | [] | no_license | nate-parrott/ig | 6abed952bf32119a536a524422037ede9b431926 | 6e0b6ac0fb4b59846680567150ce69a620e7f15d | refs/heads/master | 2021-01-12T10:15:15.825004 | 2016-12-13T21:23:17 | 2016-12-13T21:23:17 | 76,399,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,714 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestExtractFilterTokens(unittest.TestCase):
"""
Test the Worksheet _extract_filter_tokens() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_extract_filter_tokens(self):
"""Test the _extract_filter_tokens() method"""
testcases = [
[
None,
[],
],
[
'',
[],
],
[
'0 < 2001',
['0', '<', '2001'],
],
[
'x < 2000',
['x', '<', '2000'],
],
[
'x > 2000',
['x', '>', '2000'],
],
[
'x == 2000',
['x', '==', '2000'],
],
[
'x > 2000 and x < 5000',
['x', '>', '2000', 'and', 'x', '<', '5000'],
],
[
'x = "goo"',
['x', '=', 'goo'],
],
[
'x = moo',
['x', '=', 'moo'],
],
[
'x = "foo baz"',
['x', '=', 'foo baz'],
],
[
'x = "moo "" bar"',
['x', '=', 'moo " bar'],
],
[
'x = "foo bar" or x = "bar foo"',
['x', '=', 'foo bar', 'or', 'x', '=', 'bar foo'],
],
[
'x = "foo "" bar" or x = "bar "" foo"',
['x', '=', 'foo " bar', 'or', 'x', '=', 'bar " foo'],
],
[
'x = """"""""',
['x', '=', '"""'],
],
[
'x = Blanks',
['x', '=', 'Blanks'],
],
[
'x = NonBlanks',
['x', '=', 'NonBlanks'],
],
[
'top 10 %',
['top', '10', '%'],
],
[
'top 10 items',
['top', '10', 'items'],
],
]
for testcase in testcases:
expression = testcase[0]
exp = testcase[1]
got = self.worksheet._extract_filter_tokens(expression)
self.assertEqual(got, exp)
| [
"nateparro2t@gmail.com"
] | nateparro2t@gmail.com |
186d6c74a11f3723f7afa9fee9fabb8293b12090 | a7122df9b74c12a5ef23af3cd38550e03a23461d | /Elementary/Popular Words.py | b195f9aa52413e4b553b3d220df0730845fc443b | [] | no_license | CompetitiveCode/py.checkIO.org | c41f6901c576c614c4c77ad5c4162448828c3902 | e34648dcec54364a7006e4d78313e9a6ec6c498b | refs/heads/master | 2022-01-09T05:48:02.493606 | 2019-05-27T20:29:24 | 2019-05-27T20:29:24 | 168,180,493 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | #Answer to Popular Words - https://py.checkio.org/en/mission/popular-words/
import re
def popular_words(text: str, words: list) -> dict:
dictionary = {}
for i in words:
count = sum(1 for _ in re.finditer(r'\b%s\b' %re.escape(i), text, re.I))
dictionary[i] = count
return dictionary
if __name__ == '__main__':
print("Example:")
print(popular_words('''
When I was One
I had just begun
When I was Two
I was nearly new
''', ['i', 'was', 'three', 'near']))
# These "asserts" are used for self-checking and not for an auto-testing
assert popular_words('''
When I was One
I had just begun
When I was Two
I was nearly new
''', ['i', 'was', 'three', 'near']) == {
'i': 4,
'was': 3,
'three': 0,
'near': 0
}
print("Coding complete? Click 'Check' to earn cool rewards!") | [
"admin@remedcu.com"
] | admin@remedcu.com |
2909dee689cf910623e0d1009c4341bc11275cfa | 20044db9ab2c773cc80caa4a5a1175ee8148269d | /test/test_win.py | dd916746e989802e3b643dfe3860a27d8b315dfd | [] | no_license | senganal/mpi4py | a4e5dbf2d7e6cf9b6eb783f1e5f1c523326d667f | 28a2e95506844a32efb6b14238ca60173fe7c5a2 | refs/heads/master | 2021-01-13T08:02:43.439643 | 2017-06-16T15:03:44 | 2017-06-16T15:03:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,255 | py | from mpi4py import MPI
import mpiunittest as unittest
try:
from sys import getrefcount
except ImportError:
class getrefcount(object):
def __init__(self, arg):
pass
def __eq__(self, other):
return True
def __add__(self, other):
return self
def __sub__(self, other):
return self
def memzero(m):
try:
m[:] = 0
except IndexError: # cffi buffer
m[0:len(m)] = b'\0'*len(m)
class BaseTestWin(object):
COMM = MPI.COMM_NULL
INFO = MPI.INFO_NULL
CREATE_FLAVOR = MPI.UNDEFINED
def testGetAttr(self):
base = MPI.Get_address(self.memory)
size = len(self.memory)
unit = 1
self.assertEqual(size, self.WIN.Get_attr(MPI.WIN_SIZE))
self.assertEqual(unit, self.WIN.Get_attr(MPI.WIN_DISP_UNIT))
self.assertEqual(base, self.WIN.Get_attr(MPI.WIN_BASE))
def testMemory(self):
memory = self.WIN.memory
pointer = MPI.Get_address(memory)
length = len(memory)
base, size, dunit = self.WIN.attrs
self.assertEqual(size, length)
self.assertEqual(dunit, 1)
self.assertEqual(base, pointer)
def testAttributes(self):
base, size, unit = self.WIN.attrs
self.assertEqual(size, len(self.memory))
self.assertEqual(unit, 1)
self.assertEqual(base, MPI.Get_address(self.memory))
def testGetGroup(self):
cgroup = self.COMM.Get_group()
wgroup = self.WIN.Get_group()
grpcmp = MPI.Group.Compare(cgroup, wgroup)
cgroup.Free()
wgroup.Free()
self.assertEqual(grpcmp, MPI.IDENT)
def testGetSetInfo(self):
#info = MPI.INFO_NULL
#self.WIN.Set_info(info)
info = MPI.Info.Create()
self.WIN.Set_info(info)
info.Free()
info = self.WIN.Get_info()
self.WIN.Set_info(info)
info.Free()
def testGetSetErrhandler(self):
for ERRHANDLER in [MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN,
MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN,]:
errhdl_1 = self.WIN.Get_errhandler()
self.assertNotEqual(errhdl_1, MPI.ERRHANDLER_NULL)
self.WIN.Set_errhandler(ERRHANDLER)
errhdl_2 = self.WIN.Get_errhandler()
self.assertEqual(errhdl_2, ERRHANDLER)
errhdl_2.Free()
self.assertEqual(errhdl_2, MPI.ERRHANDLER_NULL)
self.WIN.Set_errhandler(errhdl_1)
errhdl_1.Free()
self.assertEqual(errhdl_1, MPI.ERRHANDLER_NULL)
def testGetSetName(self):
try:
name = self.WIN.Get_name()
self.WIN.Set_name('mywin')
self.assertEqual(self.WIN.Get_name(), 'mywin')
self.WIN.Set_name(name)
self.assertEqual(self.WIN.Get_name(), name)
except NotImplementedError:
pass
def testCreateFlavor(self):
if MPI.WIN_CREATE_FLAVOR == MPI.KEYVAL_INVALID: return
flavors = (MPI.WIN_FLAVOR_CREATE,
MPI.WIN_FLAVOR_ALLOCATE,
MPI.WIN_FLAVOR_DYNAMIC,
MPI.WIN_FLAVOR_SHARED,)
flavor = self.WIN.Get_attr(MPI.WIN_CREATE_FLAVOR)
self.assertTrue (flavor in flavors)
self.assertEqual(flavor, self.WIN.flavor)
self.assertEqual(flavor, self.CREATE_FLAVOR)
def testMemoryModel(self):
if MPI.WIN_MODEL == MPI.KEYVAL_INVALID: return
models = (MPI.WIN_SEPARATE, MPI.WIN_UNIFIED)
model = self.WIN.Get_attr(MPI.WIN_MODEL)
self.assertTrue (model in models)
self.assertEqual(model, self.WIN.model)
class BaseTestWinCreate(BaseTestWin):
CREATE_FLAVOR = MPI.WIN_FLAVOR_CREATE
def setUp(self):
self.memory = MPI.Alloc_mem(10)
memzero(self.memory)
self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
def tearDown(self):
self.WIN.Free()
MPI.Free_mem(self.memory)
class BaseTestWinAllocate(BaseTestWin):
CREATE_FLAVOR = MPI.WIN_FLAVOR_ALLOCATE
def setUp(self):
self.WIN = MPI.Win.Allocate(10, 1, self.INFO, self.COMM)
self.memory = self.WIN.memory
memzero(self.memory)
def tearDown(self):
self.WIN.Free()
class BaseTestWinAllocateShared(BaseTestWin):
CREATE_FLAVOR = MPI.WIN_FLAVOR_SHARED
def setUp(self):
self.WIN = MPI.Win.Allocate_shared(10, 1, self.INFO, self.COMM)
self.memory = self.WIN.memory
memzero(self.memory)
def tearDown(self):
self.WIN.Free()
def testSharedQuery(self):
memory = self.WIN.memory
address = MPI.Get_address(memory)
length = len(memory)
memories = self.COMM.allgather((address, length))
rank = self.COMM.Get_rank()
size = self.COMM.Get_size()
for i in range(size):
mem, disp = self.WIN.Shared_query(rank)
base = MPI.Get_address(mem)
size = len(mem)
if i == rank:
self.assertEqual(base, memories[i][0])
self.assertEqual(size, memories[i][1])
self.assertEqual(disp, 1)
class BaseTestWinCreateDynamic(BaseTestWin):
CREATE_FLAVOR = MPI.WIN_FLAVOR_DYNAMIC
def setUp(self):
self.WIN = MPI.Win.Create_dynamic(self.INFO, self.COMM)
def tearDown(self):
self.WIN.Free()
def testGetAttr(self):
base = self.WIN.Get_attr(MPI.WIN_BASE)
size = self.WIN.Get_attr(MPI.WIN_SIZE)
disp = self.WIN.Get_attr(MPI.WIN_DISP_UNIT)
self.assertEqual(base, 0)
self.assertEqual(size, 0)
#self.assertEqual(disp, 1)
def testMemory(self):
self.assertTrue(self.WIN.memory is None)
def testAttributes(self):
pass
def testAttachDetach(self):
mem1 = MPI.Alloc_mem(8)
mem2 = MPI.Alloc_mem(16)
mem3 = MPI.Alloc_mem(32)
for mem in (mem1, mem2, mem3):
self.WIN.Attach(mem)
self.testMemory()
self.WIN.Detach(mem)
for mem in (mem1, mem2, mem3):
self.WIN.Attach(mem)
self.testMemory()
for mem in (mem1, mem2, mem3):
self.WIN.Detach(mem)
for mem in (mem1, mem2, mem3):
self.WIN.Attach(mem)
self.testMemory()
for mem in (mem3, mem2, mem1):
self.WIN.Detach(mem)
MPI.Free_mem(mem1)
MPI.Free_mem(mem2)
MPI.Free_mem(mem3)
class TestWinCreateSelf(BaseTestWinCreate, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestWinCreateWorld(BaseTestWinCreate, unittest.TestCase):
COMM = MPI.COMM_WORLD
class TestWinAllocateSelf(BaseTestWinAllocate, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestWinAllocateWorld(BaseTestWinAllocate, unittest.TestCase):
COMM = MPI.COMM_WORLD
class TestWinAllocateSharedSelf(BaseTestWinAllocateShared, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestWinAllocateSharedWorld(BaseTestWinAllocateShared, unittest.TestCase):
COMM = MPI.COMM_WORLD
class TestWinCreateDynamicSelf(BaseTestWinCreateDynamic, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestWinCreateDynamicWorld(BaseTestWinCreateDynamic, unittest.TestCase):
COMM = MPI.COMM_WORLD
try:
MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free()
except NotImplementedError:
del TestWinCreateSelf, TestWinCreateWorld
try:
MPI.Win.Allocate(1, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free()
except NotImplementedError:
del TestWinAllocateSelf, TestWinAllocateWorld
try:
MPI.Win.Allocate_shared(1, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free()
except NotImplementedError:
del TestWinAllocateSharedSelf, TestWinAllocateSharedWorld
try:
MPI.Win.Create_dynamic(MPI.INFO_NULL, MPI.COMM_SELF).Free()
except NotImplementedError:
del TestWinCreateDynamicSelf, TestWinCreateDynamicWorld
name, version = MPI.get_vendor()
if name == 'Open MPI':
if version < (1,4,0):
if MPI.Query_thread() > MPI.THREAD_SINGLE:
del TestWinCreateWorld
del TestWinAllocateWorld
if name == 'MPICH2':
import sys
if sys.platform.startswith('win'):
del BaseTestWin.testAttributes
if __name__ == '__main__':
unittest.main()
| [
"dalcinl@gmail.com"
] | dalcinl@gmail.com |
63d532f2b05888c32f6a68377442ab9daa35236c | 70762d2602bc69c28da31a2505d48ed636f14d8d | /Voice-Conversion-with-TF2.0/eval.py | 0598b4eecd0d1c25fa51d57eca79a482f01a73c0 | [
"MIT"
] | permissive | Acemyzoe/Voice-Conversion-with-VCC2016 | 0fbd378e8d138b51bffa35af26e7576ddb89ff4e | 6fc3b927b5f71576416f9b9bd39b1d4fbf8d5dc3 | refs/heads/master | 2022-07-16T11:58:07.545860 | 2020-05-27T02:24:55 | 2020-05-27T02:24:55 | 267,199,159 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,521 | py | import tensorflow as tf
import numpy as np
import os
import pickle
import librosa
from librosa import display
import matplotlib.pyplot as plt
from model import CycleGAN2
from hyperparameter import Hyperparameter as hp
from preprocess import pitch_conversion, world_speech_synthesis
from preprocess import world_decompose, world_encode_spectral_envelop, world_decode_spectral_envelop
def seg_and_pad(src, n_frames):
n_origin = src.shape[1]
n_padded = (n_origin // n_frames + 1) * n_frames
left_pad = (n_padded - n_origin) // 2
right_pad = n_padded - n_origin - left_pad
src = np.pad(src, [(0, 0), (left_pad, right_pad)], 'constant', constant_values=0)
src = np.reshape(src, [-1, hp.num_mceps, n_frames])
return src
model = CycleGAN2()
latest = tf.train.latest_checkpoint(hp.weights_dir)
model.load_weights(latest)
print('Loading cached data...')
with open('./datasets/JSUT/jsut.p', 'rb') as f:
coded_sps_A_norm, coded_sps_A_mean, coded_sps_A_std, log_f0s_mean_A, log_f0s_std_A = pickle.load(f)
with open('./datasets/target_voice/target_voice.p', 'rb') as f:
coded_sps_B_norm, coded_sps_B_mean, coded_sps_B_std, log_f0s_mean_B, log_f0s_std_B = pickle.load(f)
wav, _ = librosa.load('./outputs/100002.wav', sr=hp.rate)
f0, timeaxis, sp, ap = world_decompose(wav, hp.rate)
f0_converted = pitch_conversion(f0, log_f0s_mean_A, log_f0s_std_A, log_f0s_mean_B, log_f0s_std_B)
coded_sp = world_encode_spectral_envelop(sp, hp.rate, hp.num_mceps)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (coded_sp_transposed - coded_sps_A_mean) / coded_sps_A_std
coded_sp_norm = seg_and_pad(coded_sp_norm, hp.n_frames)
wav_forms = []
for i, sp_norm in enumerate(coded_sp_norm):
sp_norm = np.expand_dims(sp_norm, axis=-1)
coded_sp_converted_norm = model([sp_norm, sp_norm])[1][0]
coded_sp_converted = coded_sp_converted_norm * coded_sps_B_std + coded_sps_B_mean
coded_sp_converted = np.array(coded_sp_converted, dtype=np.float64).T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decode_sp_converted = world_decode_spectral_envelop(coded_sp_converted, hp.rate)
if len(f0) < (i + 1) * hp.output_size:
decode_sp_converted = decode_sp_converted[:len(f0) % hp.output_size]
f0_piece = f0_converted[i * hp.output_size:i * hp.output_size + len(f0) % hp.output_size]
ap_piece = ap[i * hp.output_size:i * hp.output_size + len(f0) % hp.output_size]
wav_transformed = world_speech_synthesis(f0_piece, decode_sp_converted, ap_piece, hp.rate, hp.duration)
wav_forms.append(wav_transformed)
break
else:
f0_piece = f0_converted[i * hp.output_size:(i + 1) * hp.output_size]
ap_piece = ap[i * hp.output_size:(i + 1) * hp.output_size]
wav_transformed = world_speech_synthesis(f0_piece, decode_sp_converted, ap_piece, hp.rate, hp.duration)
wav_forms.append(wav_transformed)
wav_forms = np.concatenate(wav_forms)
print(wav_forms)
if not os.path.exists('outputs'):
os.mkdir('outputs')
import soundfile as sf
sf.write('./outputs/test.wav', wav_forms, hp.rate)
#librosa.output.write_wav('./outputs/test.wav', wav_forms, hp.rate)
def plot_db(path):
test_wav, _ = librosa.load(path, sr=hp.rate)
D = librosa.amplitude_to_db(np.abs(librosa.stft(test_wav)), ref=np.max)
plt.figure(figsize=(12, 8))
display.specshow(D, x_axis='time', y_axis='log')
plt.colorbar(format='%+2.0f dB')
plt.savefig(path+'_fig.png')
plot_db('./outputs/test.wav')
plot_db('./outputs/100002.wav')
| [
"acemyzoe@github.com"
] | acemyzoe@github.com |
74f844764738da0244858435d7e738a04f5e0542 | 1ea679d61ab8a48ddb98e11748b343e966774496 | /moe/stubs.py | 931b6479911f18eeb0882cfa16cabbf0058f87bc | [
"Apache-2.0"
] | permissive | NeoTim/MOE-py | ca5307cb623dba378148360ba6529bee19f13e82 | b433ba61d2f7b32d06eb7df8db38ba545827ad5e | refs/heads/master | 2022-02-24T04:51:28.846001 | 2015-05-13T19:09:04 | 2015-05-13T19:09:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub entry points for MOE programs."""
__author__ = 'dborowitz@google.com (Dave Borowitz)'
from google.apputils import run_script_module
def RunPushCodebase():
from moe import push_codebase
return run_script_module.RunScriptModule(push_codebase)
def RunManageCodebases():
from moe import manage_codebases
return run_script_module.RunScriptModule(manage_codebases)
def RunInitCodebases():
from moe import init_codebases
return run_script_module.RunScriptModule(init_codebases)
def RunScrubber():
from moe.scrubber import scrubber
return run_script_module.RunScriptModule(scrubber)
def RunMoe():
from moe import moe_main
return run_script_module.RunScriptModule(moe_main)
| [
"dbentley@google.com@74c1791e-9d01-7665-3648-555f95fc0228"
] | dbentley@google.com@74c1791e-9d01-7665-3648-555f95fc0228 |
67bed8ee2cf8654efc0ac980515699f555298c21 | bcb178eabc006d2d3dcff3d35bb3a47dc1573841 | /tools/nicythize | 01143856430ef42cc335ccecd619ec488c98f407 | [
"BSD-3-Clause"
] | permissive | poldrack/nipy | dc34e341ce07ecd6a1240b84582186073a1348a7 | cffbb76c86e648a647327e654f0d33220035c6ec | refs/heads/master | 2022-04-24T00:23:12.054157 | 2020-04-23T15:29:34 | 2020-04-23T15:29:34 | 258,041,266 | 0 | 0 | NOASSERTION | 2020-04-22T23:12:22 | 2020-04-22T23:12:21 | null | UTF-8 | Python | false | false | 5,989 | #!/usr/bin/env python
""" nicythize
Cythonize pyx files into C files as needed.
Usage: nicythize [root_dir]
Default [root_dir] is 'nipy'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files if:
* The pyx file is modified compared to the version checked into git, and the C
file is not
* The pyx file was committed to the repository more recently than the C file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed (using dates seem to fail with
frequent change of git branch).
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
import os
from os.path import join as pjoin, abspath
import sys
import hashlib
import pickle
from subprocess import Popen, PIPE
from distutils.version import LooseVersion
try:
import Cython
except ImportError:
raise OSError('We need cython for this script')
from Cython.Compiler.Version import version as cython_version
HAVE_CYTHON_0p14 = LooseVersion(cython_version) >= LooseVersion('0.14')
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'nipy'
EXTRA_FLAGS = '-I {}'.format(
abspath(pjoin('lib', 'fff_python_wrapper')))
#
# Rules
#
def process_pyx(fromfile, tofile):
if HAVE_CYTHON_0p14:
opt_str = '--fast-fail'
else:
opt_str = ''
if os.system('cython %s %s -o "%s" "%s"' % (
opt_str, EXTRA_FLAGS, tofile, fromfile)) != 0:
raise Exception('Cython failed')
def process_tempita_pyx(fromfile, tofile):
import tempita
with open(fromfile, 'rt') as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, 'w') as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : (toext, function)
'.pyx' : ('.c', process_pyx),
'.pyx.in' : ('.c', process_tempita_pyx)
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
with open(filename, 'rb') as f:
hashes = pickle.load(f)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'wb') as f:
pickle.dump(hash_db, f)
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, 'rb') as f:
h.update(f.read())
return h.hexdigest()
#
# interface with git
#
def execproc(cmd):
assert isinstance(cmd, (list, tuple))
pp = Popen(cmd, stdout=PIPE, stderr=PIPE)
result = pp.stdout.read().strip()
err = pp.stderr.read()
retcode = pp.wait()
if retcode != 0:
return None
else:
return result
def git_last_commit_to(filename):
out = execproc(['git', 'log', '-1', '--format=format:%H', filename])
if out == '':
out = None
return out
def git_is_dirty(filename):
out = execproc(['git', 'status', '--porcelain', filename])
assert out is not None
return (out != '')
def git_is_child(parent_sha, child_sha):
out = execproc(['git', 'rev-list', child_sha, '^%s^' % parent_sha])
assert out is not None
for line in out.split('\n'):
if line == parent_sha:
return True
return False
#
# Main program
#
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(fullfrompath, None):
print('%s has not changed' % fullfrompath)
return
from_sha = git_last_commit_to(fullfrompath)
to_sha = git_last_commit_to(fulltopath)
if (from_sha is not None and to_sha is not None and
not git_is_dirty(fullfrompath)):
# Both source and target is under revision control;
# check with revision control system whether we need to
# update
if git_is_child(from_sha, to_sha):
hash_db[fullfrompath] = current_hash
print('%s is up to date (according to git)' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[fullfrompath] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
for fromext, rule in rules.items():
if filename.endswith(fromext):
toext, function = rule
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| [
"matthew.brett@gmail.com"
] | matthew.brett@gmail.com | |
ef2e88d29cfe089cc1be831f33bd70a3202da7d4 | f854eb5b82e1cc025d72274a1065f797d1255786 | /user/migrations/0011_auto_20150206_1753.py | ceece9b989a92d091994d6be564de7b094b83762 | [] | no_license | societyofcoders/cetaa.org | 3827c9a51d8d30cb79cf5209b00ddd3acc4313b4 | d15fbb7a96a4f3c4a8031cbe4ca27cc1d7ee320f | refs/heads/master | 2021-01-10T19:34:27.041952 | 2015-04-05T17:51:04 | 2015-04-05T17:51:04 | 30,086,667 | 3 | 5 | null | 2015-02-06T06:34:38 | 2015-01-30T19:08:17 | null | UTF-8 | Python | false | false | 625 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0010_auto_20150206_1752'),
]
operations = [
migrations.AlterField(
model_name='user',
name='dob',
field=models.DateField(auto_now_add=True),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='passyear',
field=models.DateField(auto_now_add=True),
preserve_default=True,
),
]
| [
"kmjayadeep@gmail.com"
] | kmjayadeep@gmail.com |
9694cf60afff5d48760b1f7fa81c10c2aa2ccfa7 | 20d1e0ad2985e5d9882c3bad61101c6fa8f66905 | /scanner.py | 1adafc7c0e9b370ac3d0a33c9f86422db906a4a8 | [] | no_license | alphageek9788/Mini-stuff-with-python | 7ac3ab67b9f738a253538d2a69fe211f15fd8be2 | 96311a5e591d709b3b79b363a48c81a38175a4fe | refs/heads/master | 2020-03-26T16:22:47.525540 | 2019-12-23T08:25:25 | 2019-12-23T08:25:25 | 145,098,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | import nmap
nm = nmap.PortScanner()
print("Welcome!!!")
print("<---------------------------------------------------->")
ip_addr = input("Enter the target IP address: ")
print("The entered IP address is: ", ip_addr)
type(ip_addr)
nm.scan(ip_addr, '22-443')
for host in nm.all_hosts():
print('----------------------------------------------------')
print('Host : %s (%s)' % (host, nm[host].hostname()))
print('State : %s' % nm[host].state())
| [
"noreply@github.com"
] | alphageek9788.noreply@github.com |
50abe42a3a46645944c24b95a934d4056d2a0bac | 8029a03d3725a162e157502b43be68b829c5e6dd | /lft/touristPredict.py | 8607625157c0d7e46310228a7d16b1b74a17a169 | [] | no_license | qinyaoting/python3-study | 71914da36922038f28421dd49a3315ca09b6a253 | ac082bd775e0521a4d9b64529fb97b9a5c8d35da | refs/heads/master | 2021-05-05T00:25:06.368125 | 2018-04-24T10:55:22 | 2018-04-24T10:55:22 | 119,491,608 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,381 | py | import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.metrics import mean_absolute_error,mean_squared_error
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
import matplotlib.pyplot as plt
from numpy import shape
import matplotlib.ticker as ticker
# %matplotlib inline
df = pd.read_excel("datatest.xlsx")
print("Length of original data : ", len(df))
_ = df.fillna(0, inplace=True)
data = df.iloc[:,[2,3,4,5,6,7,8,13,14,15,16,17,18,19,20,21,22]].values
rnn_unit = 32 #10 #hidden layer units/ hidden dim
input_size = 16 #5 #input dim
output_size=1 # output dim
lr=0.0006 # learning rate
# batch_size = 14
# time_step = 6 #需要测试调参
batch_size = 25
time_step = 5 #需要测试调参
train_begin=0
train_end = 1500
test_begin = 1501
test_len = 200
iter_time = 50
# RNN output node weights and biases
weights = {
'in':tf.Variable(tf.random_normal([input_size,rnn_unit])),
'out':tf.Variable(tf.random_normal([rnn_unit,1]))
}
biases = {
'in':tf.Variable(tf.constant(0.1,shape=[rnn_unit,])),
'out':tf.Variable(tf.constant(0.1,shape=[1,]))
}
# Get train data function: load training data for LSTM
# Input: batch_size, time_step, train_begin, train_end
# Output: batch_index, train_x, train_y
def get_train_data(batch_size, time_step, train_begin, train_end):
batch_index = []
# normalize the data
scaler_for_x = MinMaxScaler(feature_range=(0, 1))
scaled_x_data = scaler_for_x.fit_transform(data)
scaler_for_y = MinMaxScaler(feature_range=(0, 1))
scaled_y_data = scaler_for_y.fit_transform(data[:, 0].reshape(-1, 1))
# get train data
normalized_train_data = scaled_x_data[train_begin:train_end]
print("scaled_x_data:",scaled_x_data,scaled_x_data.shape)
print("normalized_train_data",normalized_train_data,normalized_train_data.shape)
train_x, train_y = [], []
for i in range(len(normalized_train_data) - time_step):
if i % batch_size == 0:
batch_index.append(i)
x = normalized_train_data[i:i + time_step, 1:]
y = normalized_train_data[i+1:i + time_step+1, 0, np.newaxis]
train_x.append(x.tolist())
train_y.append(y.tolist())
batch_index.append((len(normalized_train_data) - time_step))
return batch_index, train_x, train_y
# Get test data function: load testing data for LSTM
# Input: time_step, test_begin, test_len
# Output: test_x, test_y, scaler_for_x, scaler_for_y
def get_test_data(time_step, test_begin, test_len):
# normalize the data
scaler_for_x = MinMaxScaler(feature_range=(0, 1))
scaler_for_y = MinMaxScaler(feature_range=(0, 1))
scaled_x_data = scaler_for_x.fit_transform(data)
scaled_y_data = scaler_for_y.fit_transform(data[:, 0].reshape(-1, 1))
# get test data
size = test_len // time_step
normalized_test_data = scaled_x_data[test_begin: (test_begin + test_len)]
normalized_test_lable = scaled_x_data[test_begin + 1: (test_begin + test_len + 1)]
test_y = normalized_test_lable[:, 0]
# 把测试集 分成几段
test_x = []
for i in range(size):
x = normalized_test_data[i * time_step:(i + 1) * time_step, 1:]
test_x.append(x.tolist())
return test_x, test_y, scaler_for_x, scaler_for_y
# LSTM function: definition of recurrent neural network
# Input: X
# Output: pred, final_states
def lstm(X):
batch_size = tf.shape(X)[0]
time_step = tf.shape(X)[1]
w_in = weights['in']
b_in = biases['in']
# reshape to (batch_size * time_step, input_size)
input = tf.reshape(X, [-1, input_size]) #turn tensor to 3D-Array as the input of hidden layer
input_rnn = tf.matmul(input, w_in) + b_in
input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit])
# create an LSTM cell to be unrolled
cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit)
# cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit)
# cell=tf.nn.rnn_cell.BasicLSTMCell(rnn_unit)
# At each time step, reinitialising the hidden state
init_state = cell.zero_state(batch_size, dtype=tf.float32)
# generate prediction
# create a dynamic RNN object in TensorFlow.
# This object will dynamically perform the unrolling of the LSTM cell over each time step
output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state,
dtype=tf.float32)
output = tf.reshape(output_rnn, [-1, rnn_unit])
w_out = weights['out']
b_out = biases['out']
## Get the last output
pred = tf.matmul(output, w_out) + b_out
return pred, final_states
# train_lstm function: train the LSTM model, make predictions, and calculate the error of predication
# Input: batch_size, time_step, train_begin, train_end, test_begin, iter_time, test_len
# Output: test_y, test_predict, loss_list, rmse, mae
def train_lstm(batch_size, time_step, train_begin, train_end, test_begin, iter_time, test_len):
# set up the state storage / extraction
X = tf.placeholder(tf.float32, shape=[None, time_step, input_size])
Y = tf.placeholder(tf.float32, shape=[None, time_step, output_size])
batch_index, train_x, train_y = get_train_data(batch_size, time_step, train_begin, train_end)
print("Training parameters:***************************************************")
print("batch size: ", batch_size)
print("Number of batches: ", len(batch_index))
print("Shape of training samples:", shape(train_x))
print("Shape of training labels:", shape(train_y))
pred, _ = lstm(X)
## Loss and optimizer
loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1])))
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
loss_list = []
print("Training begins: *****************************************************")
## Training step optimization
"""
The loss are accumulated to monitor the progress of the training.
20 iteration is generally enough to achieve an acceptable accuracy.
"""
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# repeat training 100 times
for epoch in range(iter_time):
for step in range(len(batch_index) - 2):
## Calculate batch loss
_, loss_ = sess.run([train_op, loss], feed_dict={X: train_x[batch_index[step]:batch_index[step + 1]],
Y: np.array(train_y[batch_index[step]:batch_index[step + 1]], dtype=float).reshape(batch_size,time_step,1)})
loss_list.append(loss_)
# Show loss every 5 iterations
if epoch % 5 == 0:
print("Epoch:", epoch, " loss:",loss_)
#if step%100==0:
#print('Epoch:', epoch, 'steps: ', step, 'loss:', loss_)
print("Training Optimization Finished! ***************************************")
"""Testing the model"""
print("Prediction Begins: ****************************************************")
test_x, test_y, scaler_for_x, scaler_for_y = get_test_data(time_step, test_begin, test_len)
print("Shape of testing samples:", shape(test_x))
test_predict = []
for step in range(len(test_x)):
prob = sess.run(pred, feed_dict={X: [test_x[step]]})
predict = prob.reshape((-1))
test_predict.extend(predict)
#test_predict = scaler_for_y.inverse_transform(np.array(test_predict).reshape(-1,1))
#test_y = scaler_for_y.inverse_transform(np.array(test_y).reshape(-1,1))
test_y = np.array(test_y).reshape(-1,1)
test_predict = np.array(test_predict).reshape(-1,1)
print("Shape of testing lables:", shape(test_predict))
test_predict = scaler_for_y.inverse_transform(test_predict).reshape(-1,1)
test_y= scaler_for_y.inverse_transform(test_y).reshape(-1,1)
#calculate the error of predication
rmse = np.sqrt(mean_squared_error(test_predict, test_y))
mae = mean_absolute_error(y_pred=test_predict, y_true=test_y)
print ("Mean absolute error:", "{:.3f}".format(mae),
"Root mean squared error:", "{:.3f}".format(rmse))
# visualization
figure = plt.figure(figsize=(8,5))
axes = figure.add_subplot(1, 1, 1, facecolor='ghostwhite')
axes.plot(test_y, lw='2', c = 'red', label='Real Testing Data')
axes.plot(test_predict, lw='2', c = 'darkblue', label='LSTM Forecasted Data')
axes.set_title('Real Testing vs. Forecasted Wind Power', fontsize = 12)
axes.set_xlabel('Time Series', fontsize = 12)
axes.set_ylabel('Wind Power(MWh)', fontsize = 12)
axes.grid(ls = '--' , c = 'darkgrey', alpha=0.5)
axes.legend(fontsize=12)
plt.show()
return test_y, test_predict, loss_list, rmse, mae
# %%time
if __name__ == '__main__':
test_y, test_predict, loss_list, rmse, mae = train_lstm(batch_size, time_step, train_begin, train_end, test_begin, iter_time, test_len) | [
"qinyaoting@gmail.com"
] | qinyaoting@gmail.com |
4fb63461d25aae70e78349f22ea21fa4ac0d836a | 1b0a729f6e20c542a6370785a49c181c0675e334 | /mint/_versions/20151206231927 router/mint/host.py | 2ead8cb0368f35e377b1077b2efe0c88d58c32b6 | [] | no_license | fans656/mint-dev | 68125c4b41ab64b20d54a2b19e8bf0179dc4636b | 408f6f055670b15a3f3ee9c9ec086b1090cce372 | refs/heads/master | 2021-05-04T11:43:44.740116 | 2016-09-07T13:43:44 | 2016-09-07T13:43:44 | 45,515,119 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | import mint
from mint.interface import Interface
from mint.routetable import RouteTable
from mint.core import EntityWithNIC
from mint.libsocket import LibSocket
from mint.pdus import (
Frame, Packet,
IP,
Discomposer,
)
class Host(EntityWithNIC):
def __init__(self, **kwargs):
super(Host, self).__init__(n_interfaces=1)
self.libsocket = LibSocket(self)
ip = kwargs.get('ip', '192.168.0.1{:02}'.format(self.index))
mask = kwargs.get('mask', '255.255.255.0')
mac = kwargs.get('mac', None)
self.interfaces = [Interface(self, nic, ip, mask, mac)
for nic in self.nics]
self.interface = self.interfaces[0]
self.interface.report.connect(self.report)
self.interface.on_ipv4.connect(self.on_ipv4)
self.routes = RouteTable(self)
def send(self, data, ip, protocol=Packet.Protocol.Raw):
dst_ip = IP(ip)
_, _, gateway, interface = self.routes.find(dst_ip)
if gateway:
self.report('{} is non-local, beg {} to deliver', dst_ip, gateway)
else:
self.report('{} is local, send directly', dst_ip)
packet = Packet(
src_ip=interface.ip,
dst_ip=dst_ip,
protocol=protocol,
payload=data,
)
interface.send_packet(
packet.raw,
gateway if gateway else dst_ip,
Frame.EtherType.IPv4,
)
@property
def ip(self):
return self.interface.ip
@ip.setter
def ip(self, val):
self.interface.ip = val
@property
def mask(self):
return self.interface.mask
@mask.setter
def mask(self, val):
self.interface.mask = val
@property
def mac(self):
return self.interface.mac
@mac.setter
def mac(self, val):
self.interface.mac = val
@property
def default_gateway(self):
return self._default_gateway
@default_gateway.setter
def default_gateway(self, val):
gateway = IP(val)
self._default_gateway = gateway
self.routes[-1].gateway = gateway
def on_ipv4(self, frame, **_):
packet = Packet(frame.payload)
self.send('wooo..', packet.src_ip)
def run(self):
while True:
self.interface.do_send()
self.interface.do_recv()
mint.elapse(1)
@property
def status(self):
nic_title = 'IP:{}/{} MAC:{}'.format(
self.ip, self.mask.net_len, self.mac)
return [
(nic_title, self.nic),
('O', Discomposer(lambda: self.nic.odata)),
('I', Discomposer(lambda: self.nic.idata)),
]
| [
"fans656@yahoo.com"
] | fans656@yahoo.com |
abdfbeb837d66279e5eef19a86763585c7d558ea | 745f2596ce4fe528ba93df6a0f8b7effec7cf879 | /views/HomeView.py | 2bc2d0471038ee7311dfb05395ba3c2ca4ab6a64 | [] | no_license | bmorenos/Examen-Final-de-Programacion-3 | 7f4e9d8ae6709b79127e3ceebe66065976d2603c | 32232d314dbfc8bf8b19b84b7467209dd9eaf6fc | refs/heads/master | 2022-10-22T17:00:57.234813 | 2020-06-14T04:24:50 | 2020-06-14T04:24:50 | 272,130,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from django.http import HttpResponse
from django.template.loader import get_template
class HomeView():
def home(self):
plantilla = get_template('index.html')
return HttpResponse(plantilla.render())
def pagina1(self):
return HttpResponse('hola desde una nueva ruta')
| [
"kaiksoku@gmail.com"
] | kaiksoku@gmail.com |
2120527b5cd6c7eb1d1588f9ee4ee4c4fca4eafc | 3aa05ed64f0866c7d0e3426e8871df98a36c24f1 | /Tarifa.py | 5ba64fc00e317a5e7bc1955a306e27f89767fc17 | [] | no_license | GavinSidhu/KattisSolutions | ae6900fd1163303b557639a041662e2ff6e3e9c7 | 51a93fff5cf16dbeaa9c889f70c36d69dbc5100f | refs/heads/master | 2020-07-19T03:31:29.300104 | 2019-09-18T16:37:42 | 2019-09-18T16:37:42 | 206,366,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | monthlyMegabyte = int(input())
numMonths = int(input())
dataAllotted = monthlyMegabyte * numMonths
dataAllotted = dataAllotted + monthlyMegabyte
month = 1
total = 0
while (month < numMonths + 1):
total += int(input())
month += 1
print(dataAllotted - total) | [
"gsidhu6666@gmail.com"
] | gsidhu6666@gmail.com |
2f08b370506572d5ceddd916a1b2224ca168f9ba | 5ba78d15c0da7ea138230fae9159fbbacde7fbac | /backslashx90/tests/interfere.py | b027d03bad8b91e1c87188995d432d8b46ef6818 | [] | no_license | nbroeking/Basic-Python-Compiler | f8fe23bdfffc85f7142ed0dfaeca830bc25a3e69 | 8e38a1d8f389a183bf4b61a3bd554ca5320a851a | refs/heads/master | 2020-12-07T13:41:55.425269 | 2015-04-30T03:01:38 | 2015-04-30T03:01:38 | 29,210,403 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | a = 5
b = 3
c = 4
d = a + b
e = b + a
f = 2 + a
g = 5 + f
h = a
i = b + a + h
z = a + b + c + d + e + f + g + h + i
| [
"nbroeking@me.com"
] | nbroeking@me.com |
cfa734dc45fd018c8e7f698ec5da162694aa8ce6 | dcce56815dca2b18039e392053376636505ce672 | /dumpscripts/collections_deque_rotate.py | bcc8cb420d285ce582d140a4f19d8855221efbf4 | [] | no_license | robertopauletto/PyMOTW-it_3.0 | 28ff05d8aeccd61ade7d4107a971d9d2576fb579 | c725df4a2aa2e799a969e90c64898f08b7eaad7d | refs/heads/master | 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | # collections_deque_rotate.py
import collections
d = collections.deque(range(10))
print('Normale :', d)
d = collections.deque(range(10))
d.rotate(2)
print('Rotazione destra:', d)
d = collections.deque(range(10))
d.rotate(-2)
print('Rotazione sinistra:', d)
| [
"roberto.pauletto@gmail.com"
] | roberto.pauletto@gmail.com |
244c6f3bb61e659a079e842e9b933fbcbc9b1d94 | 644b13f90d43e9eb2fae0d2dc580c7484b4c931b | /programmers/level1/최소공배수, 최대공약수.py | 9ae0e0c7b50f0b452035698c8c30943f5df4074d | [] | no_license | yeonnseok/ps-algorithm | c79a41f132c8016655719f74e9e224c0870a8f75 | fc9d52b42385916344bdd923a7eb3839a3233f18 | refs/heads/master | 2020-07-09T11:53:55.786001 | 2020-01-26T02:27:09 | 2020-01-26T02:27:09 | 203,962,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | def solution(n, m):
answer = []
a, b = max(n, m), min(n, m)
if a == b:
return a
while a % b != 0:
a, b = b, a % b
answer.append(b)
answer.append(n * m // b)
return answer | [
"smr603@snu.ac.kr"
] | smr603@snu.ac.kr |
13684e7cf69dca373b7213f649506f032c6765e6 | c517aa2aee6897768e8f146a9e1fffeb76e6feb2 | /cuenta/migrations/0001_initial.py | 9592cafd5eccbf86616d928f028d9e5a9b585ad3 | [] | no_license | farojas85/sistema_ventas | 13c526bc6e6b8b1a476185de2eebdddba9e3b7a3 | a832380adf8ce896f27db6f2e036978aba4312ae | refs/heads/master | 2023-02-25T03:34:40.787842 | 2021-02-01T18:33:38 | 2021-02-01T18:33:38 | 307,830,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,966 | py | # Generated by Django 2.2 on 2020-12-12 00:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('emp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Acceso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(help_text='Nombre de Acceso', max_length=191)),
('slug', models.CharField(help_text='Ruta Amigable Acceso', max_length=191)),
('descripcion', models.CharField(blank=True, help_text='Descripción de Acceso', max_length=191, null=True)),
('estado', models.BooleanField(default=True)),
('fecha_creada', models.DateTimeField(auto_now_add=True)),
('fecha_modificada', models.DateTimeField(auto_now=True)),
('fecha_eliminada', models.DateTimeField(blank=True, help_text='Fecha Eliminada', null=True)),
],
options={
'verbose_name_plural': 'accesos',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(help_text='Nombre del Menú', max_length=191)),
('enlace', models.CharField(help_text='Ruta Menú', max_length=191)),
('icono', models.CharField(blank=True, help_text='Icono del Menú', max_length=191, null=True)),
('padre', models.IntegerField(default=0, help_text='Padre del Menú')),
('orden', models.SmallIntegerField(blank=True, help_text='Orden de menú', null=True)),
('estado', models.BooleanField(default=True)),
('fecha_creada', models.DateTimeField(auto_now_add=True)),
('fecha_modificada', models.DateTimeField(auto_now=True)),
('fecha_eliminada', models.DateTimeField(blank=True, help_text='Fecha Eliminada', null=True)),
],
options={
'verbose_name_plural': 'menus',
},
),
migrations.CreateModel(
name='Permiso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(help_text='Nombre de Acceso', max_length=191)),
('slug', models.CharField(help_text='Ruta Amigable Acceso', max_length=191)),
('descripcion', models.CharField(blank=True, help_text='Descripción de Acceso', max_length=191, null=True)),
('estado', models.BooleanField(default=True)),
('fecha_creada', models.DateTimeField(auto_now_add=True)),
('fecha_modificada', models.DateTimeField(auto_now=True)),
('fecha_eliminada', models.DateTimeField(blank=True, help_text='Fecha Eliminada', null=True)),
],
options={
'verbose_name_plural': 'permisos',
},
),
migrations.CreateModel(
name='Sexo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(help_text='Descripción Sexo', max_length=191)),
],
options={
'verbose_name_plural': 'sexos',
},
),
migrations.CreateModel(
name='TipoDocumento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estado', models.BooleanField(default=True)),
('fecha_creacion', models.DateTimeField(auto_now_add=True)),
('fecha_modificacion', models.DateTimeField(auto_now=True)),
('tipo', models.CharField(blank=True, help_text='Código Tipo Documento', max_length=2, null=True)),
('nombre_corto', models.CharField(blank=True, help_text='Descripción Corta', max_length=191, null=True)),
('nombre_largo', models.CharField(blank=True, help_text='Descripción Larga', max_length=191, null=True)),
('longitud', models.PositiveSmallIntegerField(help_text='Longitud Documento')),
('fecha_eliminada', models.DateTimeField(blank=True, help_text='Fecha Eliminada', null=True)),
],
options={
'verbose_name_plural': 'tipo_documentos',
},
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, help_text='Nombre de Rol', max_length=191, null=True)),
('slug', models.CharField(blank=True, help_text='Ruta Amigable Rol', max_length=191, null=True)),
('descripcion', models.CharField(blank=True, help_text='Descripción de Rol', max_length=191, null=True)),
('estado', models.BooleanField(default=True)),
('fecha_creada', models.DateTimeField(auto_now_add=True)),
('fecha_modificada', models.DateTimeField(auto_now=True)),
('fecha_eliminada', models.DateTimeField(blank=True, help_text='Fecha Eliminada', null=True)),
('acceso', models.ForeignKey(blank=True, help_text='Persona Usuario', null=True, on_delete=django.db.models.deletion.CASCADE, to='cuenta.Acceso')),
('menus', models.ManyToManyField(to='cuenta.Menu')),
('permisos', models.ManyToManyField(to='cuenta.Permiso')),
],
options={
'verbose_name_plural': 'roles',
},
),
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estado', models.BooleanField(default=True)),
('fecha_creacion', models.DateTimeField(auto_now_add=True)),
('fecha_modificacion', models.DateTimeField(auto_now=True)),
('numero_documento', models.CharField(help_text='Número Documento', max_length=15)),
('nombres', models.CharField(help_text='Nombres', max_length=191)),
('apellidos', models.CharField(help_text='Apellidos', max_length=191)),
('telefono', models.CharField(blank=True, help_text='Teléfono', max_length=50, null=True)),
('direccion', models.CharField(blank=True, help_text='Dirección', max_length=191, null=True)),
('sexo', models.ForeignKey(blank=True, help_text='Sexo Persona', null=True, on_delete=django.db.models.deletion.CASCADE, to='cuenta.Sexo')),
('tipo_documento', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cuenta.TipoDocumento')),
],
options={
'verbose_name_plural': 'personas',
},
),
migrations.CreateModel(
name='Usuario',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(help_text='Nombre Usuario', max_length=191, unique=True)),
('email', models.EmailField(blank=True, help_text='Correo Electrónico', max_length=191, null=True, unique=True)),
('imagen', models.ImageField(blank=True, help_text='Imagen de Perfil', max_length=191, null=True, upload_to='media/imagen/')),
('estado', models.BooleanField(default=True)),
('fecha_creada', models.DateTimeField(auto_now_add=True)),
('fecha_modificada', models.DateTimeField(auto_now=True)),
('fecha_eliminada', models.DateTimeField(blank=True, help_text='Fecha Eliminada', null=True)),
('empresa', models.ForeignKey(blank=True, help_text='Persona Usuario', null=True, on_delete=django.db.models.deletion.CASCADE, to='emp.Empresa')),
('persona', models.OneToOneField(blank=True, help_text='Persona Usuario', null=True, on_delete=django.db.models.deletion.CASCADE, to='cuenta.Persona')),
('roles', models.ManyToManyField(to='cuenta.Role')),
],
options={
'abstract': False,
},
),
]
| [
"frerojas@vonneumann.pe"
] | frerojas@vonneumann.pe |
94eceb862738372e3128c77562888de88e2c221a | 742b2a850a5ba3f4d291f7d3bd21d5a88237b4f9 | /mtsgo/geocalc.py | 6aae4edf1ef71353b178a772fea3814ed2a05c44 | [] | no_license | adolfoeliazat/mtsgo-server | 3ee317867ed98ef88a87763a03f3af3b670aa2b6 | 3e59f0d118e2678200da957356bf53446b3fbb64 | refs/heads/master | 2021-07-22T04:31:59.659258 | 2017-10-30T22:49:18 | 2017-10-30T22:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | # -*- coding: utf8 -*-
from math import radians, cos, sin, asin, sqrt
import numpy as np
import matplotlib.path as mplPath
def geo_distance_between_points(p1, p2):
"""
Calcule la distance entre deux points de la terre (spécifiés en degrés décimaux)[#]_
**Note: ** Shamelessly stolen from http://stackoverflow.com/a/4913653
:param p1: Premier point.
:param p2: L'autre point.
:return: La distance en mètres.
"""
# Convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [p1[0], p1[1], p2[0], p2[1]])
# Haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371008 # Radius of earth in meters.
return c * r
def geo_point_in_polygon(point, poly):
"""
Vérifie si un point est dans un polygone.
**Note :** On travaille dans l'approximation du Campus. Hors de cette approximation, la validité de cet
algorithme n'est pas garantie.
:param point: Point décrit par un tuple de deux à trois float.
:param poly: List de Points décrivant les arrêts du polygone.
:return: True si le point appartient au polygone, False sinon.
"""
poly = np.array([np.array([p[0], p[1]]) for p in poly])
mPath = mplPath.Path(poly)
return mPath.contains_point((point[0], point[1]))
| [
"a.boudhar@outlook.com"
] | a.boudhar@outlook.com |
c79bce9232fc7f27eb05d3910c031414eeee1a53 | 9dad718cc19c5384874871d16861d2fc8662954a | /Save_pictures.py | ba7c85678d78837bb7c585f6ea62fbd8e4d902c5 | [] | no_license | chiehpower/Web-Scraping | 84d7c7daabf6832262b5c761ad62515fe8a4ddfd | 4d37aa597ebad46094c0ac6a230441eac19e73bc | refs/heads/master | 2021-07-04T13:28:15.426337 | 2020-10-19T15:33:40 | 2020-10-19T15:33:40 | 189,037,276 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | import requests
from bs4 import BeautifulSoup
# In this file, I have two purposes. One is how to requests the picture;
# another one is how to save the binary picture.
# And, I also compared two kinds of different methods.
# Once, I used the content of requests to save the picture.
# Another once, I used the shutil.copyfileobj to save the pic.
def content(url):
"""Use the content of requests
the file name is content_img.jpg."""
r = requests.get(url)
r2 = r.content
file_name = url.split('/')[-1]
pic_out = open('content_img_'+file_name+'.jpg','wb')
pic_out.write(r2)
pic_out.close()
def shutil(url):
import shutil
"""Use the shutil.copyfileobj
the file name is shutil_img.jpg."""
sh = requests.get(url,stream=True)
file_name = url.split('/')[-1]
pic_out = open('shutil_img_'+file_name+'.jpg','wb')
shutil.copyfileobj(sh.raw,pic_out)
pic_out.close()
url = 'https://s1.imgs.cc/img/aaaaaAlIU.jpg?_w=750'
content(url)
shutil(url)
print('We can see the shutil which cannot get the picture from this page, but content can get successfully.')
url = 'https://files.ckcdn.com/attachments/forum/201905/02/013301re4yvl9vvv191hgy.png.thumb.jpg'
content(url)
shutil(url)
print('But if our url has the name of jpg, then both can get successfully.')
print('So I will say content method is more flexible on here.')
| [
"iloveberit@gmail.com"
] | iloveberit@gmail.com |
f92f1dc064ee15db14477b6d7d43f81fc28eb2ce | 36ac195ecceb868e78372bc8e976066cc9ff0fae | /torch_glow/tests/nodes/bitwise_not_test.py | 638abae4da1c69f85aee374d8a864635a2f08b75 | [
"Apache-2.0"
] | permissive | jeff60907/glow | d283d65bc67e0cc9836854fa7e4e270b77023fff | 34214caa999e4428edbd08783243d29a4454133f | refs/heads/master | 2021-09-23T07:30:29.459957 | 2021-09-14T01:47:06 | 2021-09-14T01:48:00 | 216,199,454 | 0 | 0 | Apache-2.0 | 2019-10-19T12:00:31 | 2019-10-19T12:00:31 | null | UTF-8 | Python | false | false | 1,295 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from parameterized import parameterized
from tests import utils
class SimpleBitwiseNotModule(torch.nn.Module):
def __init__(self):
super(SimpleBitwiseNotModule, self).__init__()
def forward(self, a):
b = torch.bitwise_not(a)
return torch.bitwise_not(b)
class TestBitwiseNot(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", torch.tensor([-1, -2, 3], dtype=torch.int32)),
lambda: ("basic_int64", torch.tensor([-1, -2, 3], dtype=torch.int64)),
lambda: (
"rand_int",
torch.randint(-1000000000, 1000000000, (2, 3), dtype=torch.int64),
),
lambda: ("bool_ts", torch.zeros((2, 2, 3), dtype=torch.bool)),
lambda: ("bool_fs", torch.ones((2, 2, 3), dtype=torch.bool)),
lambda: ("bool_tf", torch.tensor([False, True], dtype=torch.bool)),
]
)
def test_bitwise_not(self, _, x):
"""Tests of the PyTorch Bitwise Not Node on Glow."""
utils.compare_tracing_methods(
SimpleBitwiseNotModule(),
x,
fusible_ops={"aten::bitwise_not"},
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e61ecdf487b93ae8f74b98cd762a928e04184341 | fe940e1234ac6f3da7650ef5a43678d2a7e37d61 | /python_work/python_function/8.12.py | 28aa0aca116f183be5e7b8356af9df2c0e8e3b32 | [] | no_license | vikramsinghal/python_project | e92d8d313b8e9d7756281eaa491bb2153c941832 | 1e1d46e1fed26807e63ce26310735e7030b8a02e | refs/heads/master | 2020-08-22T00:30:46.082817 | 2019-11-20T18:44:23 | 2019-11-20T18:44:23 | 216,280,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | def make_sandwich(type_od_sandwich,*items):
print(f"\nYour {type_od_sandwich.title()} sandwich is being ordered with the following items:")
for item in items:
print(f"- {item}")
make_sandwich("Beef pastrami","Mayo")
| [
"thevikramsinghal@gmail.com"
] | thevikramsinghal@gmail.com |
11ee931e40d0bb015d6763cf38814d5e8f796363 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/85/usersdata/216/58702/submittedfiles/funcoes1.py | e3b878f3470c294280f93e1db209d477acd78ca7 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | # -*- coding: utf-8 -*-
def crescente (lista):
for i in range(0,len(lista)-1,1):
if lista[i]<lista[i+1]:
return True
return False
def decrescente(lista):
for i in range(0,len(lista)-1,1):
if lista[i]>lista[i+1]:
return True
return False
def numiguais(lista):
for i in range(0,len(lista)-1,1):
if lista[i]==lista[i+1]:
return True
break
return False
n=int(input('Digite a quantidade de elementos:'))
a=[]
b=[]
c=[]
for i in range(0,n,1):
x=float(input('Digite os elementos:'))
a.append(x)
for i in range(0,n,1):
y=float(input('Digite os elementos:'))
b.append(y)
for i in range(0,n,1):
w=float(input('Digite os elementos:'))
c.append(w)
if crescente (a):
print('S')
else:
print('N')
if decrescente (a):
print('S')
else:
print('N')
if numiguais (a):
print('S')
else:
print('N')
if crescente (b):
print('S')
else:
print('N')
if decrescente (b):
print('S')
else:
print('N')
if numiguais (b):
print('S')
else:
print('N')
if crescente (c):
print('S')
else:
print('N')
if decrescente (c):
print('S')
else:
print('N')
if numiguais (c):
print('S')
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
690118aba3b85f3b271c545b319c76f1c48fcb58 | 45bc0ddf35e3cc2968badf6245b391df1a6407cd | /remove exclamation mark from end of string.py | 9b2c544432bb3904589f2198cce2ba9e00612c8b | [] | no_license | erikac613/CodewarsKata | 417df2b16506b2ee961702c0be4c286f628dabf5 | 2b23d2f1e869cac033e4ebd7e5d856eff2b45da6 | refs/heads/master | 2021-08-19T07:08:09.888789 | 2017-11-25T04:13:16 | 2017-11-25T04:13:16 | 110,899,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | def remove(s):
if s == "":
return s
if s[-1] == '!':
return s[:-1]
return s
| [
"noreply@github.com"
] | erikac613.noreply@github.com |
2ea9a395f409873f7d49408e8d7930d37e0ff437 | 361127f6765c5a89bf0fdc70ae215736987e9f57 | /manage.py | c184a14909a0bcc2fe1a2c903b7d853f83434b26 | [] | no_license | Davont/PatentMiner | 8f45befce19f248bd652f57d846875c4d3a60e9d | 18c41be4c26a96cacbd5283d929e636b6b4b16a0 | refs/heads/master | 2020-04-27T17:43:11.898728 | 2019-10-18T04:04:31 | 2019-10-18T04:04:31 | 174,534,023 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PatentMiner.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"zhang980511@outlook.com"
] | zhang980511@outlook.com |
62a9a663cbb9700f013aa19c11d5484a289e57c0 | cbf9eb8fa36a59f80023d1f6c0997fc2d8830f7c | /examples/fig03.py | c7bd2bd345a530f1c102f4bbc2a046cd1d95af44 | [
"MIT"
] | permissive | LRydin/MFDFA | ee417ce3436b898ed7f6cbe9124c43978c9571fe | ef652e12174541e5b2ad96260b919b40f8b99d9e | refs/heads/master | 2022-08-29T09:46:42.182956 | 2022-08-15T14:36:29 | 2022-08-15T14:36:29 | 224,135,077 | 110 | 24 | MIT | 2022-08-15T14:36:30 | 2019-11-26T08:01:06 | Python | UTF-8 | Python | false | false | 4,035 | py | # created by Leonardo Rydin Gorjão. Most python libraries are standard (e.g. via
# Anaconda). If TeX is not present in the system comment out lines 13 to 16.
import numpy as np
import pandas as pd
# to install MFDFA just run pip install MFDFA
from MFDFA import MFDFA
from MFDFA import fgn
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['pgf.texsystem'] = 'pdflatex'
matplotlib.rcParams.update({'font.family': 'serif', 'font.size': 18,
'axes.labelsize': 20,'axes.titlesize': 24, 'figure.titlesize' : 28})
matplotlib.rcParams['text.usetex'] = True
colours = ['#1b9e77','#d95f02','#7570b3']
# %% ############################ Sunspot data #################################
sun = pd.read_csv('sunspots.csv', ';', header=None)
time = np.array(sun[0])
# select columns
sun = np.array(sun[5])
# mask -1 as np.nan (-1 means no entry recorded)
sun[sun ==-1.] = np.nan
sun_m = np.ma.masked_invalid(sun)
# %%
lag = np.unique(np.round(np.logspace(.4, 4, 100)))[30:]
q_list = np.linspace(-10,10,41)
q_list = q_list[q_list!=0.0]
lag, dfa_sun = MFDFA(sun_m, lag, q=q_list, order=1)
# %% ################################## FIG03 ##################################
fig, ax = plt.subplots(1,3, figsize=(16,4))
ax[0].loglog(lag[:], dfa_sun[:,[0]], '^', markersize=6, markerfacecolor='none',
color=colours[0], label=r'$q=-10$')
ax[0].loglog(lag[:], dfa_sun[:,[10]], 'D', markersize=6, markerfacecolor='none',
color=colours[0], label=r'$q=-5$')
ax[0].loglog(lag[:], dfa_sun[:,[17]], 'v', markersize=6, markerfacecolor='none',
color=colours[0], label=r'$q=-2$')
ax[0].loglog(lag[:], dfa_sun[:,[23]], '<', markersize=6, markerfacecolor='none',
color=colours[1], label=r'$q=2$')
ax[0].loglog(lag[:], dfa_sun[:,[29]], 'H', markersize=6, markerfacecolor='none',
color=colours[1], label=r'$q=5$')
ax[0].loglog(lag[:], dfa_sun[:,[39]], '>', markersize=6, markerfacecolor='none',
color=colours[1], label=r'$q=10$')
ax[0].set_ylabel(r'$F_q(s)$',labelpad=7,fontsize=24)
ax[0].set_xlabel(r'segment size $s$',labelpad=3,fontsize=24)
slopes_sun = np.polynomial.polynomial.polyfit(np.log(lag)[20:55],np.log(dfa_sun)[20:55],1)[1]
ax[1].plot(q_list[:20], slopes_sun[:20],'o', markersize=9,
markerfacecolor='none', color=colours[0])
ax[1].plot(q_list[20:], slopes_sun[20:],'o', markersize=9,
markerfacecolor='none', color=colours[1])
ax[1].set_ylim([None,2.5])
ax[1].set_ylabel(r'$h(q)$',labelpad=5,fontsize=24)
ax[1].set_xlabel(r'$q$',labelpad=3,fontsize=24)
axi2 = fig.add_axes([0.52, 0.6, 0.135, .37])
axi2.plot(q_list[:20], q_list[:20]*slopes_sun[:20]-1,'o', markersize=6,
color=colours[0], markerfacecolor='none')
axi2.plot(q_list[20:], q_list[20:]*slopes_sun[20:]-1,'o', markersize=6,
color=colours[1], markerfacecolor='none')
axi2.set_xlabel(r'$q$',labelpad=3,fontsize=24)
axi2.set_ylabel(r'$\tau(q)$',labelpad=-3,fontsize=24)
axi2.set_yticks([-20,-10,0,10])
t_sun = q_list * slopes_sun - 1
hq_sun = np.gradient(t_sun) / np.gradient(q_list)
f_sun = q_list * hq_sun - t_sun
ax[2].plot(hq_sun[5:20], f_sun[5:20],'o', markersize=9,
markerfacecolor='none', color=colours[0])
ax[2].plot(hq_sun[20:], f_sun[20:],'o', markersize=9,
markerfacecolor='none', color=colours[1])
ax[2].set_xlabel(r'$\alpha$',labelpad=3,fontsize=24)
ax[2].set_ylabel(r'$D(\alpha)$',labelpad=-5,fontsize=24)
ax[0].legend(loc=4, handletextpad=.3, handlelength=.5, ncol=2,
columnspacing=.65)
locmaj = matplotlib.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax[0].yaxis.set_major_locator(locmaj)
locmin = matplotlib.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax[0].yaxis.set_minor_locator(locmin)
ax[0].yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
fig.text(.005, .92, r'a)', fontsize=28)
fig.text(.34, .92, r'b)', fontsize=28)
fig.text(.67, .92, r'c)', fontsize=28)
fig.subplots_adjust(left=.07, bottom=.17, right=.99, top=.99, hspace=.06,
wspace=.25)
# fig.savefig('fig03.pdf', trasparent=True)
| [
"leonardo.rydin@gmail.com"
] | leonardo.rydin@gmail.com |
66cbc44fcbfa83158cbb865c345d3648987f82cb | 7ad07a904ef68535ed9b00f2f153375dc11946ee | /arangopipe/arangopipe/arangopipe/arangopipe_storage/arangopipe_admin_api.py | bf4fd445e9df1b1288e71b7b0f7e4515e720e6aa | [] | no_license | nicodev-git/arangopipe | 163d685ff925f44e253380aa43ae6b2102427101 | 1044f8327987f8094f56be97207a20a18efe8e09 | refs/heads/master | 2023-04-30T09:17:41.170262 | 2021-03-09T11:17:09 | 2021-03-09T11:17:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,064 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 25 09:30:33 2019
@author: Rajiv Sambasivan
"""
from arango import ArangoClient, DatabaseListError
import logging
from arangopipe.arangopipe_storage.arangopipe_config import ArangoPipeConfig
from arangopipe.arangopipe_storage.custom_http_client import CustomHTTPClient
from arangopipe.arangopipe_storage.managed_service_conn_parameters import ManagedServiceConnParam
import json
import requests
#import traceback
import sys
# create logger with 'spam_application'
logger = logging.getLogger('arangopipe_admin_logger')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('arangopipeadmin.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
class ArangoPipeAdmin:
def __init__(self, reuse_connection=True, config=None, persist_conn=True):
self.reuse_connection = reuse_connection
self.db = None
self.emlg = None
self.config = None
self.cfg = None
self.mscp = ManagedServiceConnParam()
self.use_supp_config_to_reconnect = False
if reuse_connection:
info_msg = "If a config is provided, it will be used for setting up the connection"
if config is None:
self.config = self.create_config()
self.cfg = self.config.get_cfg()
self.use_supp_config_to_reconnect = False
else:
self.config = config
self.cfg = config.cfg
self.use_supp_config_to_reconnect = True
logger.info(info_msg)
else:
assert config is not None,\
"You must provide connection information for new connections"
self.config = config
self.cfg = config.cfg
try:
db_serv_host = self.cfg['arangodb'][self.mscp.DB_SERVICE_HOST]
db_serv_port = self.cfg['arangodb'][self.mscp.DB_SERVICE_PORT]
db_end_point = self.cfg['arangodb'][self.mscp.DB_SERVICE_END_POINT]
db_serv_name = self.cfg['arangodb'][self.mscp.DB_SERVICE_NAME]
except KeyError as k:
logger.error("Connection information is missing : " + k.args[0])
logger.error(
"Please try again after providing the missing information !")
raise Exception("Key error associated with missing " + k.args[0])
# check if connection preferences are indicated
if 'dbName' in self.cfg['arangodb']:
logger.info("DB name for connection: " + \
str(self.cfg['arangodb'][self.mscp.DB_NAME]))
db_dbName = self.cfg['arangodb'][self.mscp.DB_NAME]
else:
db_dbName = ''
if 'username' in self.cfg['arangodb']:
logger.info("user name for connection: " +\
str(self.cfg['arangodb'][self.mscp.DB_USER_NAME]))
db_user_name = self.cfg['arangodb'][self.mscp.DB_USER_NAME]
else:
db_user_name = ''
if 'password' in self.cfg['arangodb']:
logger.info("A specific password was requested !")
db_password = self.cfg['arangodb'][self.mscp.DB_PASSWORD]
else:
db_password = ''
if self.mscp.DB_CONN_PROTOCOL in self.cfg['arangodb']:
db_conn_protocol = self.cfg['arangodb'][self.mscp.DB_CONN_PROTOCOL]
else:
db_conn_protocol = "http"
if self.mscp.DB_REPLICATION_FACTOR in self.cfg['arangodb']:
db_replication_factor = self.cfg['arangodb'][
self.mscp.DB_REPLICATION_FACTOR]
else:
db_replication_factor = None
if self.mscp.DB_ROOT_USER in self.cfg['arangodb']:
logger.info("A root user was specified, persisting...")
if self.mscp.DB_ROOT_USER_PASSWORD in self.cfg['arangodb']:
logger.info("A root user password was specified, persisting...")
self.create_db(db_serv_host, db_serv_port,\
db_serv_name, db_end_point,\
db_dbName, db_user_name, db_password, db_conn_protocol)
# If you could create a DB, proceed with provisioning the graph. Otherwise you
# had an issue creating the database.
if self.db is not None:
self.create_enterprise_ml_graph(db_replication_factor)
if persist_conn:
self.config.dump_data()
return
def check_repeated_creation(self, api_data):
if not api_data:
repeated_connection = False
else:
try:
user_name_equal = api_data[self.mscp.DB_USER_NAME] ==\
self.cfg['arangodb'][self.mscp.DB_USER_NAME]
password_equal = api_data[self.mscp.DB_PASSWORD] ==\
self.cfg['arangodb'][self.mscp.DB_PASSWORD]
db_name_equal = api_data[self.mscp.DB_NAME] ==\
self.cfg['arangodb'][self.mscp.DB_NAME]
repeated_connection = user_name_equal or password_equal or db_name_equal
if user_name_equal:
logger.info(
"Attempting to create a connection with an existing username"
)
if db_name_equal:
logger.info(
"Attempting to create a connection with an existing db name"
)
except KeyError:
repeated_connection = False
return repeated_connection
def set_connection_params(self, config):
self.cfg = config
self.cfg.dump_data()
return
def create_config(self):
apc = ArangoPipeConfig()
return apc
def get_config(self):
return self.config
def create_db(self, db_srv_host, db_srv_port, db_serv_name,\
db_end_point, db_dbName, db_user_name, db_password,\
db_conn_protocol):
host_connection = db_conn_protocol + "://" + db_srv_host + ":" + str(
db_srv_port)
client = ArangoClient(hosts=host_connection)
logger.debug("Connection reuse: " + str(self.reuse_connection))
if not self.reuse_connection:
API_ENDPOINT = host_connection + "/_db/_system/" + db_end_point + \
"/" + db_serv_name
print("API endpoint: " + API_ENDPOINT)
if db_dbName:
logger.info("DB name preferrence: " + str(db_dbName))
if db_user_name:
logger.info("DB user name preferrence: " + str(db_user_name))
if db_password:
logger.info(
"Password preference for managed connection was indicated !"
)
api_data = {self.mscp.DB_NAME : db_dbName,\
self.mscp.DB_USER_NAME: db_user_name,\
self.mscp.DB_PASSWORD: db_password }
logger.info("Requesting a managed service database...")
r = requests.post(url=API_ENDPOINT, json=api_data, verify=False)
if r.status_code == 409 or r.status_code == 400:
logger.error(
"It appears that you are attempting to connecting using \
existing connection information. So either set reconnect = True when you create ArangoPipeAdmin or recreate a connection config and try again!"
)
return
assert r.status_code == 200, \
"Managed DB endpoint is unavailable !, reason: " + r.reason + " err code: " +\
str(r.status_code)
result = json.loads(r.text)
logger.info("Managed service database was created !")
ms_dbName = result['dbName']
ms_user_name = result['username']
ms_password = result['password']
self.cfg['arangodb'][self.mscp.DB_NAME] = ms_dbName
self.cfg['arangodb'][self.mscp.DB_USER_NAME] = ms_user_name
self.cfg['arangodb'][self.mscp.DB_PASSWORD] = ms_password
self.cfg['arangodb'][self.mscp.DB_SERVICE_HOST] = db_srv_host
self.cfg['arangodb'][self.mscp.DB_SERVICE_NAME] = db_serv_name
self.cfg['arangodb'][self.mscp.DB_SERVICE_END_POINT] = db_end_point
self.cfg['arangodb'][self.mscp.DB_SERVICE_PORT] = db_srv_port
self.cfg['arangodb'][self.mscp.DB_CONN_PROTOCOL] = db_conn_protocol
else:
if self.use_supp_config_to_reconnect:
ms_dbName = self.cfg['arangodb'][self.mscp.DB_NAME]
ms_user_name = self.cfg['arangodb'][self.mscp.DB_USER_NAME]
ms_password = self.cfg['arangodb'][self.mscp.DB_PASSWORD]
else:
disk_cfg = ArangoPipeConfig()
pcfg = disk_cfg.get_cfg() # persisted config values
ms_dbName = pcfg['arangodb'][self.mscp.DB_NAME]
ms_user_name = pcfg['arangodb'][self.mscp.DB_USER_NAME]
ms_password = pcfg['arangodb'][self.mscp.DB_PASSWORD]
self.config = disk_cfg
self.cfg = disk_cfg.cfg
# Connect to arangopipe database as administrative user.
#This returns an API wrapper for "test" database.
print("Host Connection: " + str(host_connection))
client = ArangoClient(hosts= host_connection,\
http_client=CustomHTTPClient(username = ms_user_name,\
password = ms_password))
db = client.db(ms_dbName, ms_user_name, ms_password)
self.db = db
return
def create_enterprise_ml_graph(self, db_replication_factor):
cl = ['project', 'models', 'datasets', 'featuresets', 'modelparams', 'run',\
'devperf', 'servingperf', 'deployment']
if self.reuse_connection:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
return
if not self.db.has_graph(self.cfg['mlgraph']['graphname']):
self.emlg = self.db.create_graph(self.cfg['mlgraph']['graphname'])
else:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
for col in cl:
if not self.emlg.has_vertex_collection(col):
self.db.create_collection(col, db_replication_factor)
self.emlg.create_vertex_collection(col)
from_list = ['project', 'models', 'run', 'run', 'run', 'run',\
'deployment', 'deployment', 'deployment', 'deployment',\
'featuresets']
to_list = ['models', 'run', 'modelparams', 'datasets', 'devperf',\
'featuresets', 'servingperf', 'models', 'modelparams',\
'featuresets', 'datasets']
edge_names = ['project_models', 'run_models', 'run_modelparams', 'run_datasets',\
'run_devperf', 'run_featuresets', 'deployment_servingperf', \
'deployment_model', 'deployment_modelparams', 'deployment_featureset',\
'featureset_dataset']
for edge, fromv, tov in zip(edge_names, from_list, to_list):
if not self.emlg.has_edge_definition(edge):
self.db.create_collection(edge, edge = True,\
replication_factor = db_replication_factor)
self.emlg.create_edge_definition(edge_collection = edge,\
from_vertex_collections = [fromv],\
to_vertex_collections = [tov] )
self.cfg['arangodb'][
self.mscp.DB_REPLICATION_FACTOR] = db_replication_factor
return
def register_project(self, p):
projects = self.emlg.vertex_collection("project")
proj_reg = projects.insert(p)
return proj_reg
def delete_arangomldb(self):
return
def register_deployment(self, dep_tag):
# Execute the query
cursor = self.db.aql.execute(
'FOR doc IN run FILTER doc.deployment_tag == @value RETURN doc',
bind_vars={'value': dep_tag})
run_docs = [doc for doc in cursor]
the_run_doc = run_docs[0]
# Get the model params for the run
rmpe = self.emlg.edge_collection("run_modelparams")
edge_dict = rmpe.edges(the_run_doc, direction="out")
tmp_id = edge_dict["edges"][0]["_to"]
mpc = self.emlg.edge_collection("modelparams")
tagged_model_params = mpc.get(tmp_id)
# Get the model for the run
rme = self.emlg.edge_collection("run_models")
edge_dict = rme.edges(the_run_doc, direction="in")
tm_id = edge_dict["edges"][0]["_from"]
mc = self.emlg.edge_collection("models")
tagged_model = mc.get(tm_id)
# Get the featureset for the run
rfse = self.emlg.edge_collection("run_featuresets")
edge_dict = rfse.edges(the_run_doc, direction="out")
tfid = edge_dict["edges"][0]["_to"]
tfc = self.emlg.edge_collection("featuresets")
tagged_featureset = tfc.get(tfid)
# Create a deployment artifact
deployment = self.emlg.vertex_collection("deployment")
deploy_info = {"tag": dep_tag}
dep_reg = deployment.insert(deploy_info)
#Link the deployment to the model parameters
dep_model_params_edge = self.emlg.edge_collection(
"deployment_modelparams")
dep_model_params_key = dep_reg["_key"] + "-" + tagged_model_params[
"_key"]
the_dep_model_param_edge = { "_key": dep_model_params_key,\
"_from": dep_reg["_id"],\
"_to": tagged_model_params["_id"]}
dep_mp_reg = dep_model_params_edge.insert(the_dep_model_param_edge)
# Link the deployment to the featureset
dep_featureset_edge = self.emlg.edge_collection(
"deployment_featureset")
dep_featureset_key = dep_reg["_key"] + "-" + tagged_featureset["_key"]
the_dep_featureset_edge = { "_key": dep_featureset_key,\
"_from": dep_reg["_id"],\
"_to": tagged_featureset["_id"]}
dep_fs_reg = dep_featureset_edge.insert(the_dep_featureset_edge)
# Link the deployment to the model
dep_model_edge = self.emlg.edge_collection("deployment_model")
dep_featureset_key = dep_reg["_key"] + "-" + tagged_model["_key"]
the_dep_model_edge = { "_key": dep_featureset_key,\
"_from": dep_reg["_id"],\
"_to": tagged_model["_id"]}
dep_model_reg = dep_model_edge.insert(the_dep_model_edge)
return dep_model_reg
def add_vertex_to_arangopipe(self, vertex_to_create):
rf = self.cfg['arangodb'][self.mscp.DB_REPLICATION_FACTOR]
if not self.db.has_graph(self.cfg['mlgraph']['graphname']):
self.emlg = self.db.create_graph(self.cfg['mlgraph']['graphname'])
else:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
#Check if vertex exists in the graph, if not create it
if not self.emlg.has_vertex_collection(vertex_to_create):
self.db.create_collection(vertex_to_create, rf)
self.emlg.create_vertex_collection(vertex_to_create)
else:
logger.error("Vertex, " + vertex_to_create + " already exists!")
return
def remove_vertex_from_arangopipe(self, vertex_to_remove, purge=True):
if not self.db.has_graph(self.cfg['mlgraph']['graphname']):
self.emlg = self.db.create_graph(self.cfg['mlgraph']['graphname'])
else:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
#Check if vertex exists in the graph, if not create it
if self.emlg.has_vertex_collection(vertex_to_remove):
self.emlg.delete_vertex_collection(vertex_to_remove, purge)
logger.info("Vertex collection " + vertex_to_remove +
" has been deleted!")
else:
logger.error("Vertex, " + vertex_to_remove + " does not exist!")
return
def add_edge_definition_to_arangopipe(self, edge_col_name, edge_name,
from_vertex_name, to_vertex_name):
rf = self.cfg['arangodb'][self.mscp.DB_REPLICATION_FACTOR]
if not self.db.has_graph(self.cfg['mlgraph']['graphname']):
self.emlg = self.db.create_graph(self.cfg['mlgraph']['graphname'])
else:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
#Check if all data needed to create an edge exists, if so, create it
if not self.emlg.has_vertex_collection(from_vertex_name):
logger.error("Source vertex, " + from_vertex_name +\
" does not exist, aborting edge creation!")
return
elif not self.emlg.has_vertex_collection(to_vertex_name):
logger.error("Destination vertex, " + to_vertex_name +\
" does not exist, aborting edge creation!")
return
else:
if not self.emlg.has_edge_definition(edge_name):
if not self.emlg.has_edge_collection(edge_col_name):
self.db.create_collection(edge_col_name, edge = True,\
replication_factor = rf)
self.emlg.create_edge_definition(edge_collection = edge_col_name,\
from_vertex_collections=[from_vertex_name],\
to_vertex_collections=[to_vertex_name] )
else:
logger.error("Edge, " + edge_name + " already exists!")
return
def add_edges_to_arangopipe(self, edge_col_name, from_vertex_list,
to_vertex_list):
rf = self.cfg['arangodb'][self.mscp.DB_REPLICATION_FACTOR]
if not self.db.has_graph(self.cfg['mlgraph']['graphname']):
self.emlg = self.db.create_graph(self.cfg['mlgraph']['graphname'])
else:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
#Check if all data needed to create an edge exists, if so, create it
if not self.emlg.has_edge_collection(edge_col_name):
msg = "Edge collection %s did not exist, creating it!" % (
edge_col_name)
logger.info(msg)
self.db.create_collection(edge_col_name, edge = True,\
replication_factor = rf)
ed = self.emlg.create_edge_definition(edge_collection = edge_col_name,\
from_vertex_collections= from_vertex_list,\
to_vertex_collections= to_vertex_list )
return
def remove_edge_definition_from_arangopipe(self, edge_name, purge=True):
if not self.db.has_graph(self.cfg['mlgraph']['graphname']):
self.emlg = self.db.create_graph(self.cfg['mlgraph']['graphname'])
else:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
if self.emlg.has_edge_definition(edge_name):
self.emlg.delete_edge_definition(edge_name, purge)
else:
logger.error("Edge definition " + edge_name + " does not exist!")
return
def has_vertex(self, vertex_name):
if not self.db.has_graph(self.cfg['mlgraph']['graphname']):
self.emlg = self.db.create_graph(self.cfg['mlgraph']['graphname'])
else:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
result = self.emlg.has_vertex_collection(vertex_name)
return result
def has_edge(self, edge_name):
if not self.db.has_graph(self.cfg['mlgraph']['graphname']):
self.emlg = self.db.create_graph(self.cfg['mlgraph']['graphname'])
else:
self.emlg = self.db.graph(self.cfg['mlgraph']['graphname'])
result = self.emlg.has_edge_definition(edge_name)
return result
def delete_all_databases(self,\
preserve = ['arangopipe', 'facebook_db', \
'fb_node2vec_db', 'node2vecdb', '_system']):
db_srv_host = self.cfg['arangodb'][self.mscp.DB_SERVICE_HOST]
db_srv_port = self.cfg['arangodb'][self.mscp.DB_SERVICE_PORT]
try:
root_user = self.cfg['arangodb'][self.mscp.DB_ROOT_USER]
root_user_password = self.cfg['arangodb'][
self.mscp.DB_ROOT_USER_PASSWORD]
except KeyError as k:
msg = "Root credentials are unvailable, try again " + \
"with a new connection and credentials for root provided"
logger.error(msg)
logger.error("Credential information that is missing : " +
k.args[0])
raise Exception("Key error associated with missing " + k.args[0])
db_conn_protocol = self.cfg['arangodb'][self.mscp.DB_CONN_PROTOCOL]
host_connection = db_conn_protocol + "://" + \
db_srv_host + ":" + str(db_srv_port)
if not root_user and not root_user_password:
msg = "You will need to provide root credentials while connecting to perform" + \
" deletes of databases ! Please try again after doing so."
logger.info(msg)
return
client = ArangoClient(hosts= host_connection,\
http_client=CustomHTTPClient(username=root_user,\
password = root_user_password))
if not '_system' in preserve:
preserve.append('_system')
sys_db = client.db('_system',\
username = root_user,\
password = root_user_password)
try:
all_db = sys_db.databases()
print("There were " + str(len(all_db) - 4) + " databases!")
for the_db in all_db:
if not the_db in preserve:
sys_db.delete_database(the_db)
except DatabaseListError as err:
logger.error(err)
print("Error code: " + str(err.error_code) + " received !")
print("Error Message: " + str(err.error_message))
return
def delete_database(self, db_to_delete):
db_srv_host = self.cfg['arangodb'][self.mscp.DB_SERVICE_HOST]
db_srv_port = self.cfg['arangodb'][self.mscp.DB_SERVICE_PORT]
try:
root_user = self.cfg['arangodb'][self.mscp.DB_ROOT_USER]
root_user_password = self.cfg['arangodb'][
self.mscp.DB_ROOT_USER_PASSWORD]
except KeyError as k:
msg = "Root credentials are unvailable, try again " + \
"with a new connection and credentials for root provided"
logger.error(msg)
logger.error("Credential information that is missing : " +
k.args[0])
raise Exception("Key error associated with missing " + k.args[0])
db_conn_protocol = self.cfg['arangodb'][self.mscp.DB_CONN_PROTOCOL]
host_connection = db_conn_protocol + "://" + \
db_srv_host + ":" + str(db_srv_port)
if not root_user and not root_user_password:
msg = "You will need to provide root credentials while connecting to perform" + \
" deletes of databases ! Please try again after doing so."
logger.info(msg)
return
client = ArangoClient(hosts= host_connection,\
http_client=CustomHTTPClient(username=root_user,\
password = root_user_password))
sys_db = client.db('_system',\
username = root_user,\
password = root_user_password)
try:
if sys_db.has_database(db_to_delete):
sys_db.delete_database(db_to_delete)
else:
logger.error("The database, " + db_to_delete +
", does not exist !")
except DatabaseListError as err:
logger.error(err)
print("Error code: " + str(err.error_code) + " received !")
print("Error Message: " + str(err.error_message))
return
| [
"noreply@github.com"
] | nicodev-git.noreply@github.com |
9bad7ba88b49290861ae215cd0a3410b76a09167 | ee41311a11a1c6baedafd9a914d5a1f8330fe8a9 | /SANEF_LIVE/agentBulkTransaction.py | 2d2bc2ee94b1cacb4deac3295236f24beb6442f9 | [] | no_license | sethnanati/CodeRepoPython | 2dffb7263620bd905bf694f348485d894a9513db | b55e66611d19b35e9926d1b1387320cf48e177c8 | refs/heads/master | 2023-07-07T11:16:12.958401 | 2021-02-13T10:09:48 | 2021-02-13T10:09:48 | 376,531,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,118 | py | import requests
import requests
import json
import time
from signatureEncoding256 import encrypt_string
import authorizationEncoding64
from cryptoAESLatest import encrypt
from config import endpoints
from ResponseErrorLog import ErrorLog
from iLogs import iLog
def createAgent():
try:
bulkdata = [ {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988171" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988172" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988173" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988170" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988169" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988168" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988167" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988166" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988165" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988164" }, {"transactionDate":"2018-07-12", "cashInCount":"1000", "cashInValue":"20000", "cashOutCount":"1000",
"cashOutValue":"2000", "accountOpeningCount":"2000", "accountOpeningValue":"4000",
"billsPaymentCount":"2000", "billsPaymentValue":"3000", "airtimeRechargeCount":"20000",
"airtimeRechargeValue":"4000", "fundTransferCount":"2000", "fundTransferValue":"3000",
"bvnEnrollmentCount":"1000", "bvnEnrollmentValue":"3000", "othersCount":"4000", "othersValue":"30000",
"additionalService1Count":"", "additionalService1Value":"", "additionalService2Count":"",
"additionalService2Value":"", "agentCode":"99988163" }]
json.dumps(bulkdata)
data = str(json.dumps(bulkdata)).encode('utf-8')
print(data)
serviceurl = endpoints()['transactionReportBulk']
print('url:', serviceurl)
base_data = encrypt(data)
print('request:', base_data)
iLog(base_data)
headers = {"Authorization":authorizationEncoding64.authdecoded, "Signature":encrypt_string(),
"Content-Type": 'application/json', 'HTTP method':'POST', 'Signature_Meth':'SHA256'}
serviceurl = requests.post(url=serviceurl, data=base_data, headers=headers)
responsetext = serviceurl.text
print(responsetext)
#iLog(responsetext)
#print('Status_Code:', serviceurl.status_code, 'Status_Text:', responsetext, ErrorLog()[str(serviceurl.status_code)], 'Status_Reason:', serviceurl.reason)
except requests.ConnectionError as e:
print("OOPS! Connection Error:", e)
except requests.RequestException as e:
print("OOPS! Request Error:", e)
except requests.ConnectTimeout as e:
print("OOPS! Connect Timeout:", e)
try:
getErrorMessage = ErrorLog()[str(serviceurl.status_code)]
try:
errorDesc = ErrorLog()[json.loads(serviceurl.text)['responseCode']]
result = 'Status_Code:', serviceurl.status_code, 'Status_Msg:', getErrorMessage, 'Response:', responsetext, errorDesc
except KeyError:
errorDesc = KeyError
result = 'Status_Code:', serviceurl.status_code, 'Status_Msg:', getErrorMessage, 'Response:', responsetext, errorDesc
print(result)
iLog(result)
except json.JSONDecodeError as e:
print('JSON Error:', e)
createAgent()
# print('-----Initializing Create Agent-------')
# while True:
# if ping.pingConnection()[0] == 200:
# time.sleep(2)
# createAgent()
#
# else:
# ping.pingConnection()
# print('...Service is unavailable retrying in 60sec.....')
# time.sleep(60)
# def createAgent(pingresp): | [
"adeyemiadenuga@gmail.com"
] | adeyemiadenuga@gmail.com |
fe85918273b3ffd9dc2339a9b0f97a381f0ab2db | 22f80b809204010da7e8217374a2ca78a5613308 | /files/ResourceTools.py | 8f03f0628ceb8a13b409ba83e82ad66a8f46bbb8 | [
"BSD-3-Clause"
] | permissive | frohro/pysam | 23421f506c25e3f2a57ef2533029e64dc856612d | cac4423410d948d886b3f19c83a73ac29ab618ae | refs/heads/master | 2021-02-09T03:46:56.540560 | 2020-03-17T13:32:05 | 2020-03-17T13:32:05 | 244,236,139 | 0 | 0 | BSD-3-Clause | 2020-03-01T22:49:09 | 2020-03-01T22:49:08 | null | UTF-8 | Python | false | false | 6,820 | py | import csv
import os
from collections import defaultdict
def TMY_CSV_to_solar_data(filename):
"""
Format a TMY csv file as 'solar_resource_data' dictionary for use in PySAM.
:param: filename:
any csv resource file formatted according to NSRDB
:return: dictionary for PySAM.Pvwattsv7.Pvwattsv7.SolarResource, and other models
"""
if not os.path.isfile(filename):
raise FileNotFoundError(filename + " does not exist.")
wfd = defaultdict(list)
with open(filename) as file_in:
info = []
for i in range(2):
info.append(file_in.readline())
info[i] = info[i].split(",")
if "Time Zone" not in info[0]:
raise ValueError("`Time Zone` field not found in solar resource file.")
latitude = info[1][info[0].index("Latitude")]
longitude = info[1][info[0].index("Longitude")]
tz = info[1][info[0].index("Time Zone")]
elev = info[1][info[0].index("Elevation")]
reader = csv.DictReader(file_in)
for row in reader:
for col, dat in row.items():
if len(col) > 0:
wfd[col].append(float(dat))
weather = dict()
weather['tz'] = float(tz)
weather['elev'] = float(elev)
weather['lat'] = float(latitude)
weather['lon'] = float(longitude)
weather['year'] = wfd.pop('Year')
weather['month'] = wfd.pop('Month')
weather['day'] = wfd.pop('Day')
weather['hour'] = wfd.pop('Hour')
weather['minute'] = wfd.pop('Minute')
weather['dn'] = wfd.pop('DNI')
weather['df'] = wfd.pop('DHI')
weather['gh'] = wfd.pop('GHI')
weather['wspd'] = wfd.pop('Wind Speed')
weather['tdry'] = wfd.pop('Temperature')
return weather
def SRW_to_wind_data(filename):
"""
Format as 'wind_resource_data' dictionary for use in PySAM.
:param: filename:
srw wind resource file
:return: dictionary for PySAM.Windpower.Windpower.Resource
"""
if not os.path.isfile(filename):
raise FileNotFoundError(filename + " does not exist.")
data_dict = dict()
field_names = ('Temperature', 'Pressure', 'Speed', 'Direction')
fields_id = (1, 2, 3, 4)
with open(filename) as file_in:
file_in.readline()
file_in.readline()
fields = str(file_in.readline().strip()).split(',')
file_in.readline()
heights = str(file_in.readline().strip()).split(',')
data_dict['heights'] = [float(i) for i in heights]
data_dict['fields'] = []
for field_name in fields:
if field_name not in field_names:
raise ValueError(field_name + " required for wind data")
data_dict['fields'].append(field_names.index(field_name) + 1)
data_dict['data'] = []
reader = csv.reader(file_in)
for row in reader:
data_dict['data'].append([float(i) for i in row])
return data_dict
def URDBv7_to_ElectricityRates(urdb_response):
"""
Formats response from Utility Rate Database API version 7 for use in PySAM
i.e.
model = PySAM.UtilityRate5.new()
rates = PySAM.ResourceTools.URDBv7_to_ElectricityRates(urdb_response)
model.ElectricityRates.assign(rates)
:param: urdb_response
dictionary with response fields following https://openei.org/services/doc/rest/util_rates/?version=7
:return: dictionary for PySAM.UtilityRate5.UtilityRate5.ElectricityRates
"""
def try_get_schedule(urdb_name, data_name):
if urdb_name in urdb_response.keys():
data[data_name] = urdb_response[urdb_name]
for i in range(12):
for j in range(24):
data[data_name][i][j] += 1
def try_get_rate_structure(urdb_name, data_name):
mat = []
if urdb_name in urdb_response.keys():
structure = urdb_response[urdb_name]
for i, period in enumerate(structure):
for j, entry in enumerate(period):
rate = entry['rate']
if 'adj' in entry.keys():
rate += entry['adj']
tier_max = 1e38
if 'max' in entry.keys():
tier_max = entry['max']
sell = 0
if 'sell' in entry.keys():
sell = entry['sell']
units = ['kwh', 'kw']
if 'unit' in entry.keys():
if entry['unit'].lower() not in units:
raise RuntimeError("UtilityRateDatabase error: unrecognized unit in rate structure")
mat.append((i + 1, j + 1, tier_max, 0.0, rate, sell))
data[data_name] = mat
data = dict()
data['en_electricity_rates'] = 1
rules = urdb_response['dgrules']
if rules == "Net Metering":
data['ur_metering_option'] = 0
elif rules == "Net Billing Instantaneous":
data['ur_metering_option'] = 2
elif rules == "Net Billing Hourly":
data['ur_metering_option'] = 3
elif rules == "Buy All Sell All":
data['ur_metering_option'] = 4
if 'fixedchargefirstmeter' in urdb_response.keys():
fixed_charge = urdb_response['fixedchargefirstmeter']
fixed_charge_units = urdb_response['fixedchargeunits']
if fixed_charge_units == "$/day":
fixed_charge *= 365/30
elif fixed_charge_units == "$/year":
fixed_charge /= 12
data['ur_fixed_monthly_charge'] = fixed_charge
if 'mincharge' in urdb_response.keys():
min_charge = urdb_response['mincharge']
min_charge_units = urdb_response['minchargeunits']
if min_charge_units == "$/year":
data['ur_annual_min_charge'] = min_charge
else:
if min_charge_units == "$/day":
min_charge *= 365 / 30
data['ur_monthly_min_charge'] = min_charge
try_get_schedule('energyweekdayschedule', 'ur_ec_sched_weekday')
try_get_schedule('energyweekendschedule', 'ur_ec_sched_weekend')
if 'flatdemandmonths' in urdb_response.keys():
data['ur_dc_enable'] = 1
flat_mat = []
flat_demand = urdb_response['flatdemandmonths']
for i in range(12):
flat_mat.append([i, 1, 1e38, flat_demand[i]])
data['ur_dc_flat_mat'] = flat_mat
try_get_rate_structure('energyratestructure', 'ur_ec_tou_mat')
try_get_rate_structure('flatdemandstructure', 'ur_dc_flat_mat')
try_get_rate_structure('demandratestructure', 'ur_dc_tou_mat')
try_get_schedule('demandweekdayschedule', 'ur_dc_sched_weekday')
try_get_schedule('demandweekendschedule', 'ur_dc_sched_weekend')
return data
| [
"dguittet@nrel.gov"
] | dguittet@nrel.gov |
5c4c384c9943f0631fa5d0ec566776deaf77389f | 4899dda2ae16a734fb38315f5022fc013ca07d80 | /ex7.py | c9e2c2d4568465e8f03f6bdfffdb8d0ebddca9f4 | [] | no_license | phantomphildius/python-the-hard-way | 953149dc908a6b592c205eaa00a2ae415d4b12eb | 79c25790f0a028804fb1340d06ea6d2d40a16b66 | refs/heads/master | 2021-01-15T10:35:24.312268 | 2017-08-14T21:17:14 | 2017-08-14T21:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | print("mary had a little lamb")
print("it's fleece was white as {}".format("snow"))
print('it went everywher that Mary went')
print("," * 10)
print("this is a line break \n")
print("i didn't type a space")
print('''this is a multiline string
I used three sets of quotes
check it out mannnnnnnnnnn''')
| [
"afreeman@simplereach.com"
] | afreeman@simplereach.com |
d179a925c5a2f7940abe471a664f8e61ad086732 | 5205a6640b3c4e51f909d0d5f85bbacbe27e4a89 | /PYTHON LOG/PYTHON ADVANGE/SOCKETS/servidor_udp.py | 5dab69e006e85d52f334701a0fa4f2dc049848ad | [] | no_license | lfvelascot/Analysis-and-design-of-algorithms | bd3fa1498c9a76586905af92ab3cd78cf0974bb9 | 3e7eec42ba58921a034501da776c1675d14af488 | refs/heads/master | 2021-09-24T14:51:04.564024 | 2021-09-14T22:21:04 | 2021-09-14T22:21:04 | 252,166,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import socket
class UDPserver:
def __init__(self,h,p):
self.host=h
self.port=p
def job(self):
sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_add=(self.host,self.port)
sock.bind(server_add)
while True:
print('waitiing for message.....')
data,address=sock.recvfrom(4096)
if data:
sent=sock.sendto(data,address)
print(data,sent,address)
else:
print('done')
break
def main():
udp_obj=UDPserver('192.168.0.14',5000)
udp_obj.job()
if __name__=='__main__':
main() | [
"60698380+lfvelascot@users.noreply.github.com"
] | 60698380+lfvelascot@users.noreply.github.com |
89e89281ebd3d63ddec32bd7a2cc3a73e1e85b30 | 560a7f534d2a2733d66b6f07cde113fbde1d9aa2 | /CED_Cagliari/CED/var/targets.py | cbef52d6a73b4440b806f748305a4fd7bb6a1d64 | [] | no_license | Nicolalorettu/CED_Python_2019 | 9419b36f2ec90bce264b0450caafb8414a64f54c | 767ae0ea3ef57883c7f5951a4ffe15be0855e20c | refs/heads/main | 2023-04-10T11:18:54.691181 | 2021-04-21T11:23:47 | 2021-04-21T11:23:47 | 360,141,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,429 | py | # c87 targets
oldkpic87 = {#"FE_RicS": 24.49, "FE_RicMC": 22.99, "FE_RicC": 24.99,
#"FE_ORic": 28.49,
"FE_RHF": 14.99, "FE_RHD": 16.49, "FE_IBOHF": 11.99,
"FE_IBOHD": 14.99, "FE_IOFHF": 26.99, "FE_IOFHD": 13.49,
"FE_RO": 14.49, "FE_IBOO": 15.49, "FE_IOFO": 21.99,
"BO_RF": 21.99, "BO_RA": 21.99, "BO_RF": 21.99,
"BO_IOFHF": 27.49, "BO_IOFHA": 12.99, "BO_IOFHFi": 11.99,
"BO_CTFD": 15.99}
opera_kpi_names = {"FEH": ["Rework Home Fonia", "Rework Home Dati",
"Invio BO Home Fonia", "Invio BO Home Dati",
"Invio OF Home Fonia", "Invio OF Home Dati"],
"FEO": ["Rework Office", "Invio BO Office", "Invio OF Office"],
"BO": ["Rework Fonia", "Rework ADSL", "Rework Fibra",
"Invio OF Home Fonia", "Invio OF Home ADSL",
"Invio OF Home FIBRA", "C TEAM FONIA + DATI"]}
opera_kpi_targets = {"FEH": [14.99, 16.49, 11.99, 14.99, 26.99, 13.49],
"FEO": [14.49, 15.49, 21.99],
"BO": [21.99, 21.99, 21.99, 27.49, 12.99, 11.99, 15.99]}
ibia_kpi_targets = {"Home": {"FONIA": [55, 4, 6, 1, 5, 10, 27, 27, 0, 2, "na", 1, 3],
"ADSL": [61, 5.5, 8, 1, 6, 15, 16, 15, 1.5, 2, "na", 1, 4.5],
"FIBRA": [61, 5.5, 10, 1, 4, 15, 16, 15, 1.5, 2, "na", 1, 4.5]},
"Bus": {"FONIA": [51, 5, 9.5, 1, 5, 15.5, 26, 22, 0, 2, "na", 1, 4.5],
"ADSL": [56, 8, 9.5, 1, 5 , 15.5, 20, 22, 0, 2, "na", 1, 4.5],
"FIBRA": [56, 8, 10.5, 1, 4, 15.5, 20, 22, 0, 2, "na", 1, 4.5]}}
newtargetrichHOME = [23, 23, 23]
newtargetrichOFF = [23, 22]
newkpic87 = {#"FE_RicS": 23, "FE_RicMC": 23, "FE_RicC": 23,
#"FE_ORicMC": 23, "FE_ORicC": 22,
"FE_RHF": 15.5, "FE_RHD": 20, "FE_RHFi": 15.5,
"FE_ONT_FH": 7, "FE_ONT_DH": 8.5, "FE_ONT_FiH": 6.5,
"FE_IBOHF": 14, "FE_IBOHD": 14, "FE_IBOHFi": 15,
"FE_IOFHF": 28, "FE_IOFHD": 16, "FE_IOFHFi": 11,
"FE_RMOF": 16.5, "FE_RCOF": 15.5, "FE_ONT_MOF": 9,
"FE_ONT_COF": 8, "FE_IBOOFM": 16, "FE_IBOOFC": 15,
"FE_IOFOFM": 22, "FE_IOFOFC": 11, "BO_VR7F": 15.5,
"BO_VR7D": 15.5, "BO_VR7Fi": 15.5, "BO_ONT_F": 8.5,
"BO_ONT_D": 9, "BO_ONT_Fi": 6.5, "BO_IOFF": 18,
"BO_IOFD": 14, "BO_IOFFi": 10, "BOCTFD": 16,
"BO_Rip_3GCol": 4.8}
newivrc87 = {"Semplici": [6.51, 7.51, 8.51], "MedioCom": [7.51, 8.51, 9.01], "Complesse": [8.01, 9.01, 9.51]}
oldivrc87 = {"Semplici": 7.41, "MedioCom": 7.41, "Complesse": 7.41}
# TABLES var
columnkpo = { "FEH": ["Ivr Semplici", "Ivr Medio Comp.", "Ivr Complesse", "Richiamate Semplici",
"Richiamate Medio Comp.", "Richiamate Complesse", "Rework FONIA HOME",
"Rework DATI HOME", "Rework FIBRA HOME", "ONT Fonia", "ONT ADSL", "ONT Fibra",
"Invio BO Fonia", "Invio BO ADSL", "Invio BO Fibra", "Invio OF Fonia",
"Invio OF ADSL", "Invio OF Fibra"],
"FEO": ["Ivr Medio Comp.", "Ivr Complesse", "Richiamate Medio Comp.",
"Richiamate Complesse", "Rework Medie", "Rework Complesse", "ONT Medie",
"ONT Complesse", "Invio BO Medie", "Invio BO Complesse", "Invio OF Medie",
"Invio OF Complesse"],
"BO": ["BO REWORK (VN+RIP3GG)FONIA", "BO REWORK (VN+RIP3GG)ADSL",
"BO REWORK (VN+RIP3GG)FIBRA", "ONT Fonia", "ONT ADSL", "ONT Fibra",
"BO INVIO OF FONIA", "BO INVIO OF ADSL", "BO INVIO OF FIBRA",
"C TEAM FONIA + DATI", "RIPETIZIONE A 33 GG SU COLLAUDI"]}
operadb = {"MonthYear": [["1", "4"]]}
ivrdb = {"DATA_INT": [["4", "2"], ["9", "2"]]}
pacodb = {"Data": [["4", "2"], ["7", "2"]]}
sosibiadb = {"DATA_TK": [["1", "6"]]}
fakedb = {"Data_Invio": [["6", "2"], ["3", "2"]]}
non3xxdb = {"DATA_ETT": [["6", "2"], ["3", "2"]]}
rispostedb = {"Data_Invio_Risposta_Cliente": [["6", "2"], ["3", "2"]]}
fakedbw = {"Data_Invio": [["9", "2"], ["6", "2"], ["3", "2"]]}
non3xxdbw = {"DATA_ETT": [["9", "2"], ["6", "2"], ["3", "2"]]}
rispostedbw = {"Data_Invio_Risposta_Cliente": [["9", "2"], ["6", "2"], ["3", "2"]]}
| [
"nicola.lorettu@outlook.it"
] | nicola.lorettu@outlook.it |
ec302f6666b63aafc4a5281f17731332d073b014 | 0ea3612486194b2f5cee893d7ccc08014255fc9f | /lifeapp/migrations/0010_auto_20190527_1142.py | 6c1c832a0716914c8723994997d54d61ad5099a5 | [] | no_license | Naifraz/django_lifestyleblog | 6ced177e356433e5119ce4443154caa6390789e6 | a778d977ce74652bd52ffb7d6bce595a8ccdb0f8 | refs/heads/master | 2020-12-23T16:33:45.178297 | 2020-01-30T12:12:41 | 2020-01-30T12:12:41 | 237,205,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | # Generated by Django 2.1.3 on 2019-05-27 05:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lifeapp', '0009_auto_20190527_1106'),
]
operations = [
migrations.AddField(
model_name='article',
name='height_field',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='article',
name='width_field',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='article',
name='image',
field=models.ImageField(blank=True, height_field='height_field', null=True, upload_to='', width_field='width_field'),
),
migrations.AlterField(
model_name='article',
name='introduction',
field=models.TextField(max_length=100),
),
]
| [
"muhammed1096m@gmail.com"
] | muhammed1096m@gmail.com |
1c252e869f1dcff8c40c182c0779b6a502827fbf | ae158e336fdf3ce548a216b92ab974df29e233ca | /resources/data-generator.py | 792a45b4e21b062102e0a241eb8be18d28a2b735 | [] | no_license | nhinguyen78/Project1_Group1_education | 690f4cfbc27dc1e4b281f5a37aa67716c8ede152 | 079b7b8e7d49e3d8e62b7bcb659d93ef6092ed1b | refs/heads/main | 2023-07-10T21:20:27.306093 | 2021-08-11T10:32:55 | 2021-08-11T10:32:55 | 392,250,320 | 0 | 0 | null | 2021-08-11T10:32:56 | 2021-08-03T08:46:17 | Python | UTF-8 | Python | false | false | 6,378 | py | import csv
import os
import random
from datetime import datetime
import time
from decimal import Decimal
import random
from faker import Faker
fake = Faker()
#1. Instructors Info Data
def create_csv_file_Instructors_Info():
time_stampe = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
raw_path = os.path.dirname(__file__)
with open(f'{raw_path}\InstructorData-{time_stampe}.csv', 'w', newline='') as csvfile:
fieldnames = ['Instructor_ID','Instructor_Name', 'Instructor_email','Instructor_address','Instructor_PhoneNum', 'Instructor_level', 'Total_courses_released', 'Instructor_ranked','Gender']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
RECORD_COUNT = 20
writer.writeheader()
for i in range(RECORD_COUNT):
writer.writerow(
{
'Instructor_ID': i,
'Instructor_Name': fake.name(),
'Gender':random.choice(['Male', 'Female']),
'Instructor_email': fake.email(),
'Instructor_address': fake.address(),
'Instructor_PhoneNum': fake.msisdn(),
'Instructor_level': random.choice(['None', 'Bacherlor', 'Master', 'Doctor', 'Professor', 'Other']),
'Total_courses_released': fake.random_int(1,20),
'Instructor_ranked': fake.random_int(1,5)
}
)
#2. Courses Info Data
def create_csv_file_Courses_Info():
# pdb.set_trace()
time_stampe = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
raw_path = os.path.dirname(__file__)
with open(f'{raw_path}\CoursesData-{time_stampe}.csv', 'w', newline='') as csvfile:
fieldnames = ['Course_ID','Course_Name','Instructor_ID','Month Duration', 'Price in $','Online']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
RECORD_COUNT = 30
writer.writeheader()
for i in range(RECORD_COUNT):
writer.writerow(
{
'Course_ID': i,
'Course_Name': random.choice([
'Basic Computer Courses List','Basic C Programs','The Fundamentals of Computers','Microsoft Office','Operating Systems','Office Automation',
'Adobe Photoshop','Accounting Software','Web Designing','Certification Course in Computer Technology','VFX and Animation','Tally', 'Microsoft Office and Typing Courses',
'Cyber Security Courses', 'Software and Programming Languages', 'Diploma in IT or Computer Science','Hardware Maintenance','Digital Marketing Course','Data Analytics Course',
'Artificial intelligence course','Machine learning course','Network Security Course','Moral Hacking Course','Website optimization course','Full Stack Developer Course','Web Development Course',
'Php and MySQL','Advanced UI Design','Advanced UX Design','Graphic Design'
]),
'Instructor_ID': fake.random_int(1,20),
'Month Duration': fake.random_int(1,6),
'Price in $': fake.random_int(299,599),
'Online': random.choice(['Yes', 'No'])
}
)
#3. Transaction Info
def str_time_prop(start, end, time_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formatted in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, time_format))
etime = time.mktime(time.strptime(end, time_format))
ptime = stime + prop * (etime - stime)
return time.strftime(time_format, time.localtime(ptime))
def random_date(start, end, prop):
return str_time_prop(start, end, '%m/%d/%Y', prop)
def create_csv_file_Transactions():
time_stampe = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
raw_path = os.path.dirname(__file__)
with open(f'{raw_path}\TracsactionData-{time_stampe}.csv', 'w', newline='') as csvfile:
fieldnames = ['Transaction_ID','Instructor_ID', 'Course_ID', 'Student_ID','Date']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
RECORD_COUNT = 2000
writer.writeheader()
for i in range(RECORD_COUNT):
writer.writerow(
{
'Transaction_ID': i,
'Instructor_ID': fake.random_int(1,10),
'Course_ID': fake.random_int(1,30),
'Student_ID':fake.random_int(1,1000),
'Date': random_date("1/1/2017", "6/1/2021", random.random())
}
)
#4. Student Info Data
def create_csv_file_Students_Info():
time_stampe = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
raw_path = os.path.dirname(__file__)
with open(f'{raw_path}\StudentData-{time_stampe}.csv', 'w', newline='') as csvfile:
fieldnames = ['Student_ID', 'Name','Gender','DOB','Email','Address','PhoneNumber', 'Final_Score', 'Rating_class', 'Rating_teacher','Class_recommendation' ]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
RECORD_COUNT = 2000
writer.writeheader()
for i in range(RECORD_COUNT):
writer.writerow(
{
'Student_ID':i,
'Name': fake.name(),
'Gender': random.choice(['Male', 'Female']),
'DOB': random_date("1/1/1989", "1/1/2012", random.random()),
'Email': fake.email(),
'Address': fake.address(),
'PhoneNumber': fake.msisdn(),
'Final_Score': fake.random_int(1,10),
'Rating_class': fake.random_int(1,5),
'Rating_teacher':fake.random_int(1,5),
'Class_recommendation':bool(random.getrandbits(1))
}
)
if __name__ == '__main__':
print('Creating a fake data...')
create_csv_file_Instructors_Info()
create_csv_file_Students_Info()
create_csv_file_Courses_Info()
create_csv_file_Transactions()
| [
"noreply@github.com"
] | nhinguyen78.noreply@github.com |
d8073efab7ed5a3e482774d76ba15846a4152f77 | edf2cdb14814ae40c0f448360af23d414db444ab | /tools/logger.py | 8479cd6a57d7bcf92bacc0a979f89e5c3fe01157 | [] | no_license | Savant-Dev/BurgerBot | 7cb6cdc26b1ad5aed30afbbc4de1ab0663ed3e3b | 81788d71435c2b03ab382af825267e07731954c6 | refs/heads/master | 2022-12-18T06:16:00.066700 | 2020-09-29T02:08:43 | 2020-09-29T02:08:43 | 299,479,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | '''
This file initializes the event logger
Available Methods:
- get (returns an instance of CustomLogger)
- fetch (returns the application's event/error logs)
Definitions:
- CustomLogger
Event Output Handler
Optional Colored Terminal Logs based on priority level
File Outputs Mandatory
'''
| [
"simplysavant@outlook.com"
] | simplysavant@outlook.com |
de8ecd8638791145efbd6cea008a094fb7ef19ea | ea8e945af461ae6e5a2dcd9dce244391f14ec695 | /koyama/chapter06/knock50.py | cd481e55b4ab3b896f5ce19fed8524e547f6d314 | [] | no_license | tmu-nlp/100knock2020 | b5a98485e52b88003fa97966c8d6eef292c9f036 | 1133fa833ea32ad3e54833e420bcb1433f3ec2f3 | refs/heads/master | 2023-04-09T06:48:04.571566 | 2020-08-13T05:38:25 | 2020-08-13T05:38:25 | 258,825,143 | 1 | 2 | null | 2020-08-12T15:56:56 | 2020-04-25T16:43:13 | Python | UTF-8 | Python | false | false | 3,466 | py | # 50. データの入手・整形
# News Aggregator Data Setをダウンロードし、以下の要領で学習データ(train.txt),検証データ(valid.txt),評価データ(test.txt)を作成せよ.
# ダウンロードしたzipファイルを解凍し,readme.txtの説明を読む.
# 情報源(publisher)が”Reuters”, “Huffington Post”, “Businessweek”, “Contactmusic.com”, “Daily Mail”の事例(記事)のみを抽出する.
# 抽出された事例をランダムに並び替える.
# 抽出された事例の80%を学習データ,残りの10%ずつを検証データと評価データに分割し,それぞれtrain.txt,valid.txt,test.txtというファイル名で保存する.
# ファイルには,1行に1事例を書き出すこととし,カテゴリ名と記事見出しのタブ区切り形式とせよ(このファイルは後に問題70で再利用する).
# 学習データと評価データを作成したら,各カテゴリの事例数を確認せよ.
from sklearn.model_selection import train_test_split
import pandas as pd
import collections
if __name__ == "__main__":
# データを読み込む
newsCorpora_path = "newsCorpora.csv"
newsCorpora = pd.read_csv(newsCorpora_path, header=None, sep="\t")
# 列の名前を設定する
colums_name = ["ID", "TITLE", "URL", "PUBLISHER", "CATEGORY", "STORY", "HOSTNAME", "TIMESTAMP"]
newsCorpora.columns = colums_name
# PUBLISHERが、”Reuters”、“Huffington Post”、“Businessweek”、“Contactmusic.com”、“Daily Mail” の事例のみを抽出する
newsCorpora = newsCorpora[newsCorpora["PUBLISHER"].isin(["Reuters", "Huffington Post", "Businessweek", "Contactmusic.com", "Daily Mail"])]
# 抽出された事例をランダムに並び替える
# frac: 抽出する行・列の割合を指定
# random_state: 乱数シードの固定
newsCorpora = newsCorpora.sample(frac=1, random_state=0)
# X = "TITLE" から Y = "CATEGORY" を予測する
X = newsCorpora["TITLE"]
Y = newsCorpora["CATEGORY"]
# train:valid:test = 8:1:1 にしたい
# まず、全体を train:(valid + test) = 8:2 に分ける
# 次に、(valid + test) を valid:test = 5:5 に分ける
# stratify: 層化抽出(元のデータの比率と同じになるように分ける)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, stratify=Y, random_state=0)
X_valid, X_test, Y_valid, Y_test = train_test_split(X_test, Y_test, test_size=0.5, stratify=Y_test, random_state=0)
# X_train と Y_train を列方向に連結する
# axis: 連結方向
XY_train = pd.concat([X_train, Y_train], axis=1)
XY_valid = pd.concat([X_valid, Y_valid], axis=1)
XY_test = pd.concat([X_test, Y_test], axis=1)
# csvファイルとして保存する
XY_train.to_csv("train.txt", sep="\t", index=False, header=None)
XY_valid.to_csv("valid.txt", sep="\t", index=False, header=None)
XY_test.to_csv("test.txt", sep="\t", index=False, header=None)
# 学習データ、検証データ、評価データの事例数を確認する
print(collections.Counter(Y_train)) # Counter({'b': 4502, 'e': 4223, 't': 1219, 'm': 728})
print(collections.Counter(Y_valid)) # Counter({'b': 562, 'e': 528, 't': 153, 'm': 91})
print(collections.Counter(Y_test)) # Counter({'b': 563, 'e': 528, 't': 152, 'm': 91})
| [
"aomiabracadabra@gmail.com"
] | aomiabracadabra@gmail.com |
ef0664987e727ca3d22bc36a3e8a4d9660e92da3 | 642ba1746fed0b722a127b8426eca987df6efc61 | /share/lib/python/neuron/config.py | 5c53169d7ec682aa6a2f3e1f432cb00382dae13a | [
"BSD-3-Clause"
] | permissive | neuronsimulator/nrn | 23781d978fe9253b0e3543f41e27252532b35459 | b786c36d715ba0f6da1ba8bdf5d2338c939ecf51 | refs/heads/master | 2023-08-09T00:13:11.123525 | 2023-08-04T13:11:02 | 2023-08-04T13:11:02 | 71,627,569 | 313 | 171 | NOASSERTION | 2023-09-14T17:48:03 | 2016-10-22T08:47:37 | C++ | UTF-8 | Python | false | false | 1,255 | py | def _convert_value(key, value):
"""Convert a string representing a CMake variable value into a Python value.
This does some basic conversion of values that CMake interprets as boolean
values into Python's True/False, and includes some special cases for
variables that are known to represent lists. See also:
https://cmake.org/cmake/help/latest/command/if.html#basic-expressions.
"""
if key.upper() in {"NRN_ENABLE_MODEL_TESTS"}:
return tuple(value.split(";"))
elif value.upper() in {"ON", "YES", "TRUE", "Y"}:
return True
elif value.upper() in {"OFF", "NO", "FALSE", "N"}:
return False
try:
return int(value)
except ValueError:
return value
def _parse_arguments(h):
"""Map the C++ structure neuron::config::arguments into Python.
The Python version is accessible as neuron.config.arguments.
"""
global arguments
arguments = {}
num_keys_double = h.nrn_num_config_keys()
num_keys = int(num_keys_double)
assert float(num_keys) == num_keys_double
for i in range(num_keys):
key = h.nrn_get_config_key(i)
val = h.nrn_get_config_val(i)
assert key not in arguments
arguments[key] = _convert_value(key, val)
| [
"noreply@github.com"
] | neuronsimulator.noreply@github.com |
11904ef28fb8158f2089b88efb705ee390701ba2 | 47008724082fd7fa39b87416bcd1d7633e9d8ef7 | /04-使用Item封装数据/example/example/pipelines.py | 65d9fcd59f07f0c6977eae5bb000f890f65df5e8 | [
"Apache-2.0"
] | permissive | gy0109/matser-scrapy-liushuo | 1909d5902dcaf9a119a1cbf42dff9c9434fb58cc | 99afa51aa30248282bf6d86f8a98a28b086f54ff | refs/heads/master | 2020-05-16T09:25:13.429519 | 2019-04-25T12:34:30 | 2019-04-25T12:34:30 | 182,947,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ExamplePipeline(object):
def process_item(self, item, spider):
return item
# pipeline不需要继承什么基类,只需要实现特定的方法 open_spider close_spider process_item
# process_item是必须要有的 用来处理spider怕取到的数据 item: 爬取到的一项数据 spider 爬取的spider对象
class BookPipeline(object):
# 汇率
exchange_rate = 8.5309
def process_item(self, item, spider):
price = float(item['price'][1:])
item['price'] = '¥%.2f'% price
return item
| [
"1974326896@qq.com"
] | 1974326896@qq.com |
680f27f3508814e50b45f6f517c70cd13d3ff1fb | a5039e73f9c00ff27b8bd7c1aed8eb0ad2fb7336 | /mysite/mysite/settings.py | 70409c4ce34a87b250f375b9f0c229e0f719ceb6 | [] | no_license | stopic13/lazy-light-backend | 9451fe92a0a77bec6d7562085fdf49a2631cd42a | 9995cf063f7d9fa7b31d3ce2cb2a8001d92e9617 | refs/heads/master | 2020-04-16T01:27:05.085949 | 2019-01-13T04:33:44 | 2019-01-13T04:33:44 | 165,173,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,127 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'at!-)^6q5bdsi9rb)y0tgq5o(kf(bz0+i3k$w!td1^ugw=a=zs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'lazy_light.apps.LazyLightConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"sara.topic.18@dartmouth.edu"
] | sara.topic.18@dartmouth.edu |
6a67ae9b12bce403eab3825b7d7072f3c0feae52 | 7c9f28ef0cdfc0aacde231d821578f90e924dcac | /Array/Two pointer/15. 3Sum.py | c4e54cba7df30777bb0cd05f4dc3a174c33f04ad | [] | no_license | Azure-Whale/Kazuo-Leetcode | f42fa902c98d1c6143a2cc7d583d38160dc605cf | 7f778244d3a8bd5157b05ac0f2b59ada199d7443 | refs/heads/main | 2023-08-19T09:31:14.039666 | 2021-10-03T19:26:13 | 2021-10-03T19:26:13 | 308,467,543 | 0 | 0 | null | 2021-03-02T17:02:08 | 2020-10-29T22:46:20 | Python | UTF-8 | Python | false | false | 1,347 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@File : 15. 3Sum.py
@Time : 1/3/2021 3:45 PM
@Author : Kazuo
@Email : azurewhale1127@gmail.com
@Software: PyCharm
'''
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
# The main idea here is keep that you shall not repeat on same elements, comapre the current one with the previous one and see if there is a difference
res = []
nums.sort()
for i in range(len(nums)):
if nums[i] > 0:
# if the smallest is more than 0, then the bigger one cannot be negative
break
if i == 0 or nums[i - 1] != nums[i]: # the first one should be unique, for each unqiue first, find all match pairs to contribute to the trumplet
self.twoSumII(nums, i, res)
return res
def twoSumII(self, nums: List[int], i: int, res: List[List[int]]):
lo, hi = i + 1, len(nums) - 1
while (lo < hi):
sum = nums[i] + nums[lo] + nums[hi]
if sum < 0 or (lo > i + 1 and nums[lo] == nums[lo - 1]):
lo += 1
elif sum > 0 or (hi < len(nums) - 1 and nums[hi] == nums[hi + 1]):
hi -= 1
else:
res.append([nums[i], nums[lo], nums[hi]])
lo += 1
hi -= 1 | [
"Azure-Whale@github.com"
] | Azure-Whale@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.