hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a33e27da81bb7bce9e69f05f780fb86f4b322234 | 501 | py | Python | tests/test_trinomial.py | caltechlibrary/trinomial | 0313e16400aca50f54bfe79080a10a1977702e9a | [
"BSD-3-Clause"
] | 1 | 2021-05-22T17:29:17.000Z | 2021-05-22T17:29:17.000Z | tests/test_trinomial.py | caltechlibrary/trinomial | 0313e16400aca50f54bfe79080a10a1977702e9a | [
"BSD-3-Clause"
] | null | null | null | tests/test_trinomial.py | caltechlibrary/trinomial | 0313e16400aca50f54bfe79080a10a1977702e9a | [
"BSD-3-Clause"
] | null | null | null | import os
import pytest
import sys
from time import time
try:
thisdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(thisdir, '..'))
except:
sys.path.append('..')
import trinomial
def test_basic():
trinomial.set_unique_key('x')
assert trinomial.anon('foo@bar.com') == '55086f20ea'
assert trinomial.anon('foo@bar.com') == '55086f20ea'
def test_repeat():
trinomial.set_unique_key('x')
assert trinomial.anon('foo@bar.com') == '55086f20ea'
| 22.772727 | 56 | 0.686627 |
a33e48a10e11e80e4f5b747b61f4474ae03b0923 | 667 | py | Python | openprocurement/tender/openeu/views/award_complaint_document.py | leits/openprocurement.tender.openeu | 6e8b6650a23761cc09794030583206029a2928e8 | [
"Apache-2.0"
] | 1 | 2016-02-02T09:55:08.000Z | 2016-02-02T09:55:08.000Z | openprocurement/tender/openeu/views/award_complaint_document.py | Leits/openprocurement.tender.openeu | 6e8b6650a23761cc09794030583206029a2928e8 | [
"Apache-2.0"
] | 2 | 2021-03-26T00:35:37.000Z | 2022-03-21T22:21:31.000Z | openprocurement/tender/openeu/views/award_complaint_document.py | leits/openprocurement.tender.openeu | 6e8b6650a23761cc09794030583206029a2928e8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from openprocurement.api.utils import opresource
from openprocurement.tender.openua.views.award_complaint_document import TenderUaAwardComplaintDocumentResource
@opresource(name='Tender EU Award Complaint Documents',
collection_path='/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}/documents',
path='/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}/documents/{document_id}',
procurementMethodType='aboveThresholdEU',
description="Tender award complaint documents")
class TenderEUAwardComplaintDocumentResource(TenderUaAwardComplaintDocumentResource):
pass
| 51.307692 | 111 | 0.773613 |
a33e4ece404ced51ee4f1506f207476b0d455c63 | 2,398 | py | Python | pymic/layer/activation.py | vincentme/PyMIC | 5cbbca7d0a19232be647086d4686ceea523f45ee | [
"Apache-2.0"
] | 147 | 2019-12-23T02:52:04.000Z | 2022-03-06T16:30:43.000Z | pymic/layer/activation.py | vincentme/PyMIC | 5cbbca7d0a19232be647086d4686ceea523f45ee | [
"Apache-2.0"
] | 4 | 2020-12-18T12:47:21.000Z | 2021-05-21T02:18:01.000Z | pymic/layer/activation.py | vincentme/PyMIC | 5cbbca7d0a19232be647086d4686ceea523f45ee | [
"Apache-2.0"
] | 32 | 2020-01-08T13:48:50.000Z | 2022-03-12T06:31:13.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
def get_acti_func(acti_func, params):
acti_func = acti_func.lower()
if(acti_func == 'relu'):
inplace = params.get('relu_inplace', False)
return nn.ReLU(inplace)
elif(acti_func == 'leakyrelu'):
slope = params.get('leakyrelu_negative_slope', 1e-2)
inplace = params.get('leakyrelu_inplace', False)
return nn.LeakyReLU(slope, inplace)
elif(acti_func == 'prelu'):
num_params = params.get('prelu_num_parameters', 1)
init_value = params.get('prelu_init', 0.25)
return nn.PReLU(num_params, init_value)
elif(acti_func == 'rrelu'):
lower = params.get('rrelu_lower', 1.0 /8)
upper = params.get('rrelu_upper', 1.0 /3)
inplace = params.get('rrelu_inplace', False)
return nn.RReLU(lower, upper, inplace)
elif(acti_func == 'elu'):
alpha = params.get('elu_alpha', 1.0)
inplace = params.get('elu_inplace', False)
return nn.ELU(alpha, inplace)
elif(acti_func == 'celu'):
alpha = params.get('celu_alpha', 1.0)
inplace = params.get('celu_inplace', False)
return nn.CELU(alpha, inplace)
elif(acti_func == 'selu'):
inplace = params.get('selu_inplace', False)
return nn.SELU(inplace)
elif(acti_func == 'glu'):
dim = params.get('glu_dim', -1)
return nn.GLU(dim)
elif(acti_func == 'sigmoid'):
return nn.Sigmoid()
elif(acti_func == 'logsigmoid'):
return nn.LogSigmoid()
elif(acti_func == 'tanh'):
return nn.Tanh()
elif(acti_func == 'hardtanh'):
min_val = params.get('hardtanh_min_val', -1.0)
max_val = params.get('hardtanh_max_val', 1.0)
inplace = params.get('hardtanh_inplace', False)
return nn.Hardtanh(min_val, max_val, inplace)
elif(acti_func == 'softplus'):
beta = params.get('softplus_beta', 1.0)
threshold = params.get('softplus_threshold', 20)
return nn.Softplus(beta, threshold)
elif(acti_func == 'softshrink'):
lambd = params.get('softshrink_lambda', 0.5)
return nn.Softshrink(lambd)
elif(acti_func == 'softsign'):
return nn.Softsign()
else:
raise ValueError("Not implemented: {0:}".format(acti_func)) | 31.552632 | 67 | 0.607173 |
a33eb973d0edc831eea7bb11066042e56e9c2e88 | 3,359 | py | Python | ui/flowlayout.py | amadotejada/self-portal | c508fb120548f3eb65e872d08a823d3942fc650d | [
"Apache-2.0"
] | 9 | 2022-03-15T02:02:30.000Z | 2022-03-18T16:16:59.000Z | ui/flowlayout.py | amadotejada/self-portal | c508fb120548f3eb65e872d08a823d3942fc650d | [
"Apache-2.0"
] | null | null | null | ui/flowlayout.py | amadotejada/self-portal | c508fb120548f3eb65e872d08a823d3942fc650d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Amado Tejada
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PyQt5.QtCore import QPoint, QRect, QSize, Qt
from PyQt5.QtWidgets import QLayout, QSizePolicy
class FlowLayout(QLayout):
def __init__(self, parent=None, margin=0, spacing=-1):
super(FlowLayout, self).__init__(parent)
if parent is not None:
self.setContentsMargins(margin, margin, margin, margin)
self.setSpacing(spacing)
self.itemList = []
def __del__(self):
item = self.takeAt(0)
while item:
item = self.takeAt(0)
def addItem(self, item):
self.itemList.append(item)
def count(self):
return len(self.itemList)
def itemAt(self, index):
if 0 <= index < len(self.itemList):
return self.itemList[index]
return None
def takeAt(self, index):
if 0 <= index < len(self.itemList):
return self.itemList.pop(index)
return None
def expandingDirections(self):
return Qt.Orientations(Qt.Orientation(0))
def hasHeightForWidth(self):
return True
def heightForWidth(self, width):
height = self.doLayout(QRect(0, 0, width, 0), True)
return height
def setGeometry(self, rect):
super(FlowLayout, self).setGeometry(rect)
self.doLayout(rect, False)
def sizeHint(self):
return self.minimumSize()
def minimumSize(self):
size = QSize()
for item in self.itemList:
size = size.expandedTo(item.minimumSize())
margin, _, _, _ = self.getContentsMargins()
size += QSize(2 * margin, 2 * margin)
return size
def doLayout(self, rect, testOnly):
x = rect.x()
y = rect.y()
lineHeight = 0
for item in self.itemList:
wid = item.widget()
spaceX = self.spacing() + wid.style().layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton,
Qt.Horizontal)
spaceY = self.spacing() + wid.style().layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton,
Qt.Vertical)
nextX = x + item.sizeHint().width() + spaceX
if nextX - spaceX > rect.right() and lineHeight > 0:
x = rect.x()
y = y + lineHeight + spaceY
nextX = x + item.sizeHint().width() + spaceX
lineHeight = 0
if not testOnly:
item.setGeometry(QRect(QPoint(x, y), item.sizeHint()))
x = nextX
lineHeight = max(lineHeight, item.sizeHint().height())
return y + lineHeight - rect.y()
| 31.688679 | 112 | 0.575171 |
a340df3cf71eb1be1675fbe29cece65cbcc98d43 | 3,183 | py | Python | methods/smartdumpRJ.py | wdempsey/sense2stop-lvm | ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2 | [
"CECILL-B"
] | 1 | 2020-04-18T11:16:02.000Z | 2020-04-18T11:16:02.000Z | methods/smartdumpRJ.py | wdempsey/sense2stop-lvm | ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2 | [
"CECILL-B"
] | 6 | 2020-04-13T18:38:04.000Z | 2022-03-12T00:55:56.000Z | methods/smartdumpRJ.py | wdempsey/sense2stop-lvm | ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2 | [
"CECILL-B"
] | 1 | 2020-07-02T04:47:00.000Z | 2020-07-02T04:47:00.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue May 26 14:29:26 2020
@author: Walter Dempsey & Jamie Yap
"""
#%%
###############################################################################
# Build a RJMCMC class
###############################################################################
from pymc import Stochastic, Deterministic, Node, StepMethod
from numpy import ma, random, where
from numpy.random import random
from copy import deepcopy
class smartdumbRJ(StepMethod):
"""
S = smartdumbRJ(self, stochs, indicator, p, rp, g, q, rq, inv_q, Jacobian, **kwargs)
smartdumbRJcan control single indicatored-array-valued stochs. The indicator
indicates which stochs (events) are currently 'in the model;' if
stoch.value.indicator[index] = True, that index is currently being excluded.
indicatored-array-valued stochs and their children should understand how to
cope with indicatored arrays when evaluating their logpabilities.
The prior for the indicatored-array-valued stoch may depend explicitly on the
indicator.
The dtrm arguments are, in notation similar to that of Waagepetersen et al.,
def p(indicator):
Returns the probability of jumping to
def smartbirth(indicator):
Draws a value for the auxiliary RV's u given indicator.value (proposed),
indicator.last_value (current), and the value of the stochs.
def smartdeath(indicator):
"""
def __init__(self, stochs, indicator, p, rp, g, q, rq, inv_q, Jacobian):
StepMethod.__init__(self, nodes = stochs)
self.g = g
self.q = q
self.rq = rq
self.p = p
self.rp = rp
self.inv_q = inv_q
self.Jacobian = Jacobian
self.stoch_dict = {}
for stoch in stochs:
self.stoch_dict[stoch.__name__] = stoch
self.indicator = indicator
def propose(self):
"""
Sample a new indicator and value for the stoch.
"""
self.rp(self.indicator)
self._u = self.rq(self.indicator)
self.g(self.indicator, self._u, **self.stoch_dict)
def step(self):
# logpability and loglike for stoch's current value:
logp = sum([stoch.logp for stoch in self.stochs]) + self.indicator.logp
loglike = self.loglike
# Sample a candidate value for the value and indicator of the stoch.
self.propose()
# logpability and loglike for stoch's proposed value:
logp_p = sum([stoch.logp for stoch in self.stochs]) + self.indicator.logp
# Skip the rest if a bad value is proposed
if logp_p == -Inf:
for stoch in self.stochs: stoch.revert()
return
loglike_p = self.loglike
# test:
test_val = logp_p + loglike_p - logp - loglike
test_val += self.inv_q(self.indicator)
test_val += self.q(self.indicator,self._u)
if self.Jacobian is not None:
test_val += self.Jacobian(self.indicator,self._u,**self.stoch_dict)
if log(random()) > test_val:
for stoch in self.stochs:
stoch.revert
def tune(self):
pass
| 31.514851 | 88 | 0.602576 |
a34205264c406b528a6fcfa5ac69debf00a2b02c | 2,021 | py | Python | tests/test_slack_user.py | tomcooperca/mlb-slack-tracker | bbfd8ed6f0c345d5286813a6cd4b04e0557a762a | [
"MIT"
] | null | null | null | tests/test_slack_user.py | tomcooperca/mlb-slack-tracker | bbfd8ed6f0c345d5286813a6cd4b04e0557a762a | [
"MIT"
] | 7 | 2018-09-08T20:07:43.000Z | 2021-12-13T19:54:53.000Z | tests/test_slack_user.py | tomcooperca/mlb-slack-tracker | bbfd8ed6f0c345d5286813a6cd4b04e0557a762a | [
"MIT"
] | null | null | null | from unittest.mock import MagicMock
from slack.user import User
from baseball.team import Team
reusableUser = User(token='blah', id='UB00123', team=None)
testTeam = Team(abbreviation='CN', location='City Name',
full_name='City Name Players', record='0W-162L', division='CL Beast',
wins=0, losses=162, standing=5, todays_game_text='CN@BOB',
todays_game_score='1-0')
def test_init():
u = User(token='gooblygook', id='ABC123', team=None)
assert u.id == 'ABC123'
def test_status_calls_updater():
reusableUser.su.display_status = MagicMock(return_value="Test status")
reusableUser.status()
reusableUser.su.display_status.assert_called_with()
def test_emoji_calls_updater():
reusableUser.su.display_status_emot = MagicMock(return_value=":cat:")
reusableUser.emoji()
reusableUser.su.display_status_emot.assert_called_with()
def test_simple_team_and_record_status():
expected = 'CN | 0W-162L'
u = User(token='blah', id='UB00123', team=testTeam)
u.su.update_status = MagicMock()
u.simple_team_and_record()
u.su.update_status.assert_called_once_with(status=expected)
def test_todays_game_and_standings_status():
expected = 'CN@BOB | 0W-162L | #5 in CL Beast'
u = User(token='blah', id='UB00123', team=testTeam)
u.su.update_status = MagicMock()
u.todays_game_and_standings()
u.su.update_status.assert_called_once_with(status=expected)
def test_todays_game_and_standings_status():
expected = 'CN@BOB | 0W-162L | #5 in CL Beast'
u = User(token='blah', id='UB00123', team=testTeam)
u.su.update_status = MagicMock()
u.todays_game_and_standings()
u.su.update_status.assert_called_once_with(status=expected)
def test_todays_game_score_and_standings_status():
expected = 'CN@BOB (Final: 1-0) | 0W-162L | #5 in CL Beast'
u = User(token='blah', id='UB00123', team=testTeam)
u.su.update_status = MagicMock()
u.todays_game_score_and_standings()
u.su.update_status.assert_called_once_with(status=expected)
| 34.844828 | 74 | 0.730332 |
a342151afcda4ba72f2d257247a2de01de22ba98 | 1,934 | py | Python | tmuxp/testsuite/test_workspacefreezer.py | wrongwaycn/tmuxp | 367cca3eb1b3162bb7e4801fe752b520f1f8eefa | [
"BSD-3-Clause"
] | 2 | 2018-02-05T01:27:07.000Z | 2018-06-10T02:02:25.000Z | tmuxp/testsuite/test_workspacefreezer.py | wrongwaycn/tmuxp | 367cca3eb1b3162bb7e4801fe752b520f1f8eefa | [
"BSD-3-Clause"
] | null | null | null | tmuxp/testsuite/test_workspacefreezer.py | wrongwaycn/tmuxp | 367cca3eb1b3162bb7e4801fe752b520f1f8eefa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import logging
import time
import kaptan
from .. import Window, config, exc
from ..workspacebuilder import WorkspaceBuilder, freeze
from .helpers import TmuxTestCase
logger = logging.getLogger(__name__)
current_dir = os.path.abspath(os.path.dirname(__file__))
example_dir = os.path.abspath(os.path.join(current_dir, '..', '..'))
class FreezeTest(TmuxTestCase):
yaml_config = """
session_name: sampleconfig
start_directory: '~'
windows:
- layout: main-vertical
panes:
- shell_command:
- vim
start_directory: '~'
- shell_command:
- echo "hey"
- cd ../
window_name: editor
- panes:
- shell_command:
- tail -F /var/log/syslog
start_directory: /var/log
window_name: logging
- window_name: test
panes:
- shell_command:
- htop
"""
def test_focus(self):
# assure the built yaml config has focus
pass
def test_freeze_config(self):
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(self.yaml_config).get()
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=self.session)
assert(self.session == builder.session)
import time
time.sleep(1)
session = self.session
sconf = freeze(session)
config.validate_schema(sconf)
sconf = config.inline(sconf)
kaptanconf = kaptan.Kaptan()
kaptanconf = kaptanconf.import_config(sconf)
json = kaptanconf.export(
'json',
indent=2
)
yaml = kaptanconf.export(
'yaml',
indent=2,
default_flow_style=False,
safe=True
)
#logger.error(json)
#logger.error(yaml)
| 23.585366 | 80 | 0.609617 |
a343d4acc1180ec43471b02424e6695cc4893a9e | 9,132 | py | Python | libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v15.py | Thinklab-SJTU/DCL_RetinaNet_Tensorflow | 1d14c9800c3eb1975e8832978f7a263783d171ec | [
"Apache-2.0"
] | 36 | 2020-11-19T07:23:42.000Z | 2022-03-30T03:35:57.000Z | libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v15.py | Thinklab-SJTU/DCL_RetinaNet_Tensorflow | 1d14c9800c3eb1975e8832978f7a263783d171ec | [
"Apache-2.0"
] | 4 | 2021-01-30T09:49:10.000Z | 2021-12-05T12:49:11.000Z | libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v15.py | Thinklab-SJTU/DCL_RetinaNet_Tensorflow | 1d14c9800c3eb1975e8832978f7a263783d171ec | [
"Apache-2.0"
] | 6 | 2020-11-23T07:54:47.000Z | 2021-07-09T07:20:15.000Z | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
"""
GCL + OMEGA = 180 / 512.
{'0.6': {'ground-track-field': 0.573582489319409, 'harbor': 0.3891521609424017, 'bridge': 0.2563337419887201, 'small-vehicle': 0.5648505388890961, 'plane': 0.8953705097216129, 'baseball-diamond': 0.6304525425142407, 'tennis-court': 0.9068133847959017, 'roundabout': 0.5504477682851595, 'storage-tank': 0.7818913345802345, 'swimming-pool': 0.39985514157699587, 'mAP': 0.5792389738191542, 'soccer-ball-field': 0.624200360919821, 'basketball-court': 0.5216235844619704, 'large-vehicle': 0.5246429570098051, 'ship': 0.7314627227976299, 'helicopter': 0.3379053694843169},
'0.8': {'ground-track-field': 0.2640926811979444, 'harbor': 0.0994356798615974, 'bridge': 0.09090909090909091, 'small-vehicle': 0.14845898197949595, 'plane': 0.5189377689746963, 'baseball-diamond': 0.14224201616818288, 'tennis-court': 0.7850084962037644, 'roundabout': 0.2161224596513639, 'storage-tank': 0.4032224420253035, 'swimming-pool': 0.021645021645021644, 'mAP': 0.25175554640925113, 'soccer-ball-field': 0.38894355893358884, 'basketball-court': 0.361673373734271, 'large-vehicle': 0.08588614768791838, 'ship': 0.18384638625743577, 'helicopter': 0.06590909090909092},
'mmAP': 0.35923286694026607,
'0.7': {'ground-track-field': 0.4385066163040262, 'harbor': 0.2004849369462918, 'bridge': 0.13189991198289955, 'small-vehicle': 0.41173024457583235, 'plane': 0.7905792123899915, 'baseball-diamond': 0.33846255142519494, 'tennis-court': 0.9031235090086663, 'roundabout': 0.45296468077000096, 'storage-tank': 0.6792869554877644, 'swimming-pool': 0.1969023557455042, 'mAP': 0.4448856961613535, 'soccer-ball-field': 0.5147552299156577, 'basketball-court': 0.47906270045099153, 'large-vehicle': 0.3334752568068329, 'ship': 0.5709906745500424, 'helicopter': 0.23106060606060608},
'0.9': {'ground-track-field': 0.013986013986013986, 'harbor': 0.002932551319648094, 'bridge': 0.000282326369282891, 'small-vehicle': 0.0031978072179077205, 'plane': 0.12144979203802733, 'baseball-diamond': 0.09090909090909091, 'tennis-court': 0.3105592596206337, 'roundabout': 0.09090909090909091, 'storage-tank': 0.043532372020744114, 'swimming-pool': 0.00029231218941829873, 'mAP': 0.05292676216204492, 'soccer-ball-field': 0.05524475524475524, 'basketball-court': 0.045454545454545456, 'large-vehicle': 0.006060606060606061, 'ship': 0.009090909090909092, 'helicopter': 0.0},
'0.65': {'ground-track-field': 0.5256384950288536, 'harbor': 0.2916501930015581, 'bridge': 0.17809220559814648, 'small-vehicle': 0.5129586251041002, 'plane': 0.8894034686906369, 'baseball-diamond': 0.5249010996303538, 'tennis-court': 0.9050013758244457, 'roundabout': 0.504625741843787, 'storage-tank': 0.7537275931713616, 'swimming-pool': 0.2889168538278225, 'mAP': 0.5213593647460195, 'soccer-ball-field': 0.5539343130129118, 'basketball-court': 0.5139638068449094, 'large-vehicle': 0.4321755180088217, 'ship': 0.6335125302514466, 'helicopter': 0.3118886513511373},
'0.5': {'ground-track-field': 0.5817047190853409, 'harbor': 0.5423160296407179, 'bridge': 0.37985530785380944, 'small-vehicle': 0.6212558927508246, 'plane': 0.8991382954230245, 'baseball-diamond': 0.6884909042118417, 'tennis-court': 0.9074714532809276, 'roundabout': 0.6247024980791215, 'storage-tank': 0.7908352165588822, 'swimming-pool': 0.5101446981453137, 'mAP': 0.6433669597686625, 'soccer-ball-field': 0.709771501950316, 'basketball-court': 0.5437748871261118, 'large-vehicle': 0.6161368250574863, 'ship': 0.8084240148818748, 'helicopter': 0.4264821524843431},
'0.55': {'ground-track-field': 0.575700748371701, 'harbor': 0.48360728773857997, 'bridge': 0.32298317197853993, 'small-vehicle': 0.6060592932618177, 'plane': 0.8978626322707085, 'baseball-diamond': 0.657004331905233, 'tennis-court': 0.907337369076047, 'roundabout': 0.6011977619793185, 'storage-tank': 0.7885043330695543, 'swimming-pool': 0.48472692462266914, 'mAP': 0.6140150681924789, 'soccer-ball-field': 0.6472686724945429, 'basketball-court': 0.5309924718578253, 'large-vehicle': 0.5552623519506533, 'ship': 0.750600756135258, 'helicopter': 0.40111791617473436},
'0.95': {'ground-track-field': 0.0, 'harbor': 0.0, 'bridge': 0.0, 'small-vehicle': 0.00010078613182826043, 'plane': 0.004102785575469661, 'baseball-diamond': 0.0, 'tennis-court': 0.09090909090909091, 'roundabout': 0.0016835016835016834, 'storage-tank': 0.003621876131836291, 'swimming-pool': 0.0, 'mAP': 0.007933510175509946, 'soccer-ball-field': 0.018181818181818184, 'basketball-court': 0.0, 'large-vehicle': 0.00025826446280991736, 'ship': 0.00014452955629426219, 'helicopter': 0.0},
'0.85': {'ground-track-field': 0.12179691653375865, 'harbor': 0.00818181818181818, 'bridge': 0.011363636363636364, 'small-vehicle': 0.020008904011782284, 'plane': 0.3041595005123823, 'baseball-diamond': 0.10876623376623376, 'tennis-court': 0.6415239979360767, 'roundabout': 0.1266637317484775, 'storage-tank': 0.21079632046855917, 'swimming-pool': 0.004329004329004329, 'mAP': 0.1360229133672777, 'soccer-ball-field': 0.17866004962779156, 'basketball-court': 0.18620689655172412, 'large-vehicle': 0.02561482058270067, 'ship': 0.07928485690820646, 'helicopter': 0.012987012987012986},
'0.75': {'ground-track-field': 0.38324233567107485, 'harbor': 0.11957411957411958, 'bridge': 0.10577255444175597, 'small-vehicle': 0.2773328982910034, 'plane': 0.6717961393802804, 'baseball-diamond': 0.18744781108289382, 'tennis-court': 0.80974614279133, 'roundabout': 0.3273415371813541, 'storage-tank': 0.5539919596357566, 'swimming-pool': 0.0639939770374553, 'mAP': 0.3408238746009085, 'soccer-ball-field': 0.4580894506562955, 'basketball-court': 0.42804302074314954, 'large-vehicle': 0.2186913819763849, 'ship': 0.3686584269144099, 'helicopter': 0.13863636363636364}}
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_DCL_G_2x_20200929'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 2000
SAVE_WEIGHTS_INTE = 20673 * 2
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 0.5
REG_LOSS_MODE = None
ALPHA = 1.0
BETA = 1.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'DOTATrain' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 800
CLASS_NUM = 15
OMEGA = 180 / 512.
ANGLE_MODE = 1
IMG_ROTATE = False
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network_config
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
FPN_CHANNEL = 256
# ---------------------------------------------Anchor config
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 180 # 90 or 180
# --------------------------------------------RPN config
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
| 71.34375 | 583 | 0.727551 |
a34438fcd2d05af774f8b7d208037ebd093f49f3 | 1,488 | py | Python | test.py | KyleJeong/ast_calculator | cf65ad76739839ac4b3df36b82862612d6bd4492 | [
"MIT"
] | 6 | 2016-07-20T07:37:07.000Z | 2022-01-14T06:35:26.000Z | test.py | KyleJeong/ast_calculator | cf65ad76739839ac4b3df36b82862612d6bd4492 | [
"MIT"
] | 1 | 2020-03-29T05:13:58.000Z | 2020-03-29T05:13:58.000Z | test.py | KyleJeong/ast_calculator | cf65ad76739839ac4b3df36b82862612d6bd4492 | [
"MIT"
] | 1 | 2020-03-29T04:29:36.000Z | 2020-03-29T04:29:36.000Z | """
Test cases for AST calculator
"""
from unittest import TestCase
from calc import evaluate
class TestCaclEvaluate(TestCase):
"""
Test cases for AST calculator - evaluation
"""
def test_simple_expression(self):
"""
Test expression without functions or constants
"""
data = [
("84-9*3", 57),
("8**4", 4096),
("3*(2*5)**3/(123-32+9)", 30),
]
for expression, expected in data:
result = evaluate(expression)
msg = "{} evaluated to: {}. Expected {}".format(
expression, result, expected)
self.assertEquals(result, expected, msg)
def test_complex_expression(self):
"""
Test expression with functions or constants
"""
data = [
("2*log(exp(2))", 4),
("cos(2*pi)", 1),
("log(8,2)", 3),
]
for expression, expected in data:
result = evaluate(expression)
msg = "{} evaluated to: {}. Expected {}".format(
expression, result, expected)
self.assertEquals(result, expected, msg)
def test_invalid_expression(self):
"""
Make sure code will behave correctly for invalid input
"""
data = [
"1/0",
"import os",
]
for expression in data:
with self.assertRaises(StandardError):
evaluate(expression)
| 23.619048 | 62 | 0.511425 |
a3446186d570e00d5586a746c5e62060ac9246b6 | 315 | py | Python | app/main/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | 2 | 2019-06-10T19:30:06.000Z | 2020-04-30T01:05:04.000Z | app/main/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | null | null | null | app/main/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | 3 | 2019-01-23T21:37:29.000Z | 2020-04-08T13:22:29.000Z | from flask import request
from app.main.extensions import cache
def clear_cache(key_prefix):
keys = [key for key in cache.cache._cache.keys() if key.startswith(key_prefix)]
cache.delete_many(*keys)
def cache_json_keys():
json_data = tuple(sorted(request.get_json().items()))
return json_data
| 19.6875 | 83 | 0.733333 |
a344e089a4efbd0afbd6c50e23cff7269d7dd9c8 | 1,931 | py | Python | gum/managers.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 16 | 2015-05-04T18:47:33.000Z | 2021-02-03T17:10:40.000Z | gum/managers.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 4 | 2015-09-08T14:48:31.000Z | 2016-09-09T09:49:41.000Z | gum/managers.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 2 | 2015-05-04T18:39:23.000Z | 2016-04-18T14:35:47.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from gum.utils import elasticsearch_connection
class ElasticsearchManager(object):
"""Like a `ModelManager` gives to the user methods to apply queries
to Elasticsearch from a specific model.
"""
def __init__(self, model=None, mapping_type=None, urls=None):
self.model = model
self.mapping_type = mapping_type
self.urls = urls
def get_elasticsearch_connection(self):
"""Gets the Elasticsearch connection with the urls attribute"""
if self.mapping_type is not None:
return self.mapping_type.get_elasticsearch_connection()
return elasticsearch_connection(urls=self.urls)
def search(self, **kwargs):
"""Partial application of `search` function from Elasticsearch
module.
:param kwargs:
"""
es = self.get_elasticsearch_connection()
if 'index' not in kwargs:
kwargs["index"] = self.mapping_type.index
if 'doc_type' not in kwargs:
kwargs["doc_type"] = self.mapping_type.get_type()
return es.search(**kwargs)
def index(self, instance):
"""Shortcut to index an instance.
:param instance:
:return:
"""
return self.mapping_type.index_document(instance)
class GenericElasticsearchManager(ElasticsearchManager):
"""Generic Elasticsearch manager to make queries without using MappingTypes."""
def search(self, **kwargs):
"""For this manager it's mandatory to specify index and doc_type on each call:
>>> elasticsearch = GenericElasticsearchManager()
>>> elasticsearch.search(index="index-name", doc_type="mapping-type-name")
:param kwargs:
:return:
"""
assert "index" in kwargs
assert "doc_type" in kwargs
return super(GenericElasticsearchManager, self).search(**kwargs)
| 32.183333 | 86 | 0.658726 |
a34991845be5613841f0b124224655a27cd95755 | 1,732 | py | Python | app.py | u-aaa/House-_prediction_model | 4808b4aefb802520a7ccd878c342699093e6942d | [
"MIT"
] | null | null | null | app.py | u-aaa/House-_prediction_model | 4808b4aefb802520a7ccd878c342699093e6942d | [
"MIT"
] | null | null | null | app.py | u-aaa/House-_prediction_model | 4808b4aefb802520a7ccd878c342699093e6942d | [
"MIT"
] | 1 | 2021-09-23T19:42:36.000Z | 2021-09-23T19:42:36.000Z | import pickle
import json
import numpy as np
from flask import Flask, request, jsonify
app = Flask(__name__)
with open('models/regressor.pkl', 'rb') as f:
model = pickle.load(f)
def __process_input(posted_data) -> np.array:
'''
transforms JSON type data acquired from request and transforms it into 2D array the model understands
:param posted_data:
:return:np.array
'''
try:
data_str = json.loads(posted_data)
data_list = data_str['features']
data_item = np.array(data_list)
dimensions = data_item.ndim
if dimensions > 2:
return None
if len(data_item.shape) == 1: #checks if array is 1D
data_item = data_item.reshape(1, -1)
arr_len = data_item.shape[-1]
if arr_len == 13:
return data_item
return None
except (KeyError, json.JSONDecodeError, AssertionError):
return None
@app.route('/')
def index() -> str:
return 'Welcome to the house prediction interface', 200
@app.route('/predict', methods=['POST'])
def predict() -> (str, int):
'''
loads the data acquired from request to the model and returns the predicted value
:return: prediction
'''
try:
data_str = request.data
predict_params = __process_input(data_str)
if predict_params is not None:
prediction = model.predict(predict_params)
return json.dumps({'predicted house price(s) (in dollars)': prediction.tolist()}), 200
return json.dumps({'Error': 'Invalid input'}), 400
except (KeyError, json.JSONDecodeError, AssertionError):
return json.dumps({'Error': 'Unable to predict'}), 500
if __name__ == '__main__':
app.run()
| 28.393443 | 105 | 0.639723 |
a34ab44ceb198f7ffec0e7a91a4d37823eb68330 | 4,734 | py | Python | sobit_bringup/src/joint_state_listner.py | Choi-Laboratory/sobit_blue | 81a228390e1726653d54f33afb3fbb1c1fdb5b24 | [
"Apache-2.0"
] | null | null | null | sobit_bringup/src/joint_state_listner.py | Choi-Laboratory/sobit_blue | 81a228390e1726653d54f33afb3fbb1c1fdb5b24 | [
"Apache-2.0"
] | null | null | null | sobit_bringup/src/joint_state_listner.py | Choi-Laboratory/sobit_blue | 81a228390e1726653d54f33afb3fbb1c1fdb5b24 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import rospy
import math
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from sobit_bringup.msg import *
#---グローバル変数-----------------------------
motion = [0]*21
TIME = 0.1
serial_joint = Serial_motion()
state_jointstate = JointState()
state_jointstate.name =["L_wheel","R_wheel","L_shoulder_roll","L_shoulder_pitch","L_elbow_yaw","L_shoulder_pitch","R_shoulder_roll","R_shoulder_pitch","R_elbow_yaw","R_elbow_pitch","neck_pitch","neck_roll","neck_yaw","L_hand_twist","L_hand_thumb","L_hand_index","L_hand_mid","L_hand_ring","L_hand_pinky","R_hand_twist"]
####[上半身モーションの計算]-------------------------------------------------------------------
def cul_upper_motion(position):
motion_deg = [0]*21
print "\n[CUL_UPEER_MOTION]"
#print "position:",position
#rad2deg
motion_deg[2] = position[2] * 57.29 #<L_shoulder_roll>
motion_deg[3] = position[3] * 57.29 #<L_shoulder_pitch>
motion_deg[4] = position[4] * 57.29 #<L_elbow_yaw>
motion_deg[5] = position[5] * 57.29 #<L_elbow_pitch>
motion_deg[6] = position[6] * 57.29 #<R_shoulder_roll>
motion_deg[7] = position[7] * 57.29 #<R_shoulder_pitch>
motion_deg[8] = position[8] * 57.29 #<R_elbow_yaw>
motion_deg[9] = position[9] * 57.29 #<R_elbow_pitch>
motion_deg[10] = position[10] * 57.29 #<neck_pitch>
motion_deg[11] = position[11] * 57.29 #<neck_roll>
motion_deg[12] = position[12] * 57.29 #<neck yaw>
motion_deg[13] = position[13] * 57.29 #<L_hand_twist>
motion_deg[14] = position[14] * 57.29 #<L_hand_thumb>
motion_deg[15] = position[15] * 57.29 #<L_hand_index>
motion_deg[16] = position[16] * 57.29 #<L_hand_middle>
motion_deg[17] = position[17] * 57.29 #<L_hand_ring>
motion_deg[18] = position[18] * 57.29 #<L_hand_pinky>
motion_deg[19] = position[19] * 57.29 #<R_hand_twist>
#10→16進
motion[0] = '%04x' %(TIME * 40)
motion[3] = '%04x' %(32768 + motion_deg[2] * 97) #<L_shoulder_roll>
motion[4] = '%04x' %(32768 + motion_deg[3] * 86) #<L_shoulder_pitch>
motion[5] = '%04x' %(32768 + motion_deg[4] * 58) #<L_elbow_yaw>
motion[6] = '%04x' %(32768 - motion_deg[5] * 105) #<L_elbow_pitch>
motion[7] = '%04x' %(32768 - motion_deg[6] * 97) #<R_shoulder_roll>
motion[8] = '%04x' %(32768 - motion_deg[7] * 86) #<R_shoulder_pitch>
motion[9] = '%04x' %(32768 - motion_deg[8] * 58) #<R_elbow_yaw>
motion[10] = '%04x' %(32768 + motion_deg[9] * 105) #<R_elbow_pitch>
motion[11] = '%04x' %(32768 + motion_deg[10] * 110) #<neck_pitch>
motion[12] = '%04x' %(32768 + motion_deg[11] * 112) #<neck_roll>
motion[13] = '%04x' %(32768 + motion_deg[12] * 246) #<neck yaw>
motion[14] = '%04x' %(32768 - motion_deg[13] * 91) #<L_hand_twist>
motion[15] = '%04x' %(32768 - motion_deg[14] * 91) #<L_hand_thumb>
motion[16] = '%04x' %(26624 - motion_deg[15] * 68) #<L_hand_index>
motion[17] = '%04x' %(38912 + motion_deg[16] * 68) #<L_hand_middle>
motion[18] = '%04x' %(26624 - motion_deg[17] * 68) #<L_hand_ring>
motion[19] = '%04x' %(38912 + motion_deg[18] * 68) #<L_hand_pinky>
motion[20] = '%04x' %(32768 + motion_deg[19] * 91) #<R_hand_twist>
print "motion:",motion
return motion
####[JOINT_STATE CALLBACK]-------------------------------------------------------------------------------------
def callback1(jointstate):
global state_jointstate, UPPER_FLAG
print "\n\n[JOINT:CALLBACK]"
#print jointstate
#1秒以上古いjointstateの切り捨て
now = rospy.get_rostime()
test = now.nsecs - jointstate.header.stamp.nsecs
print "test:",test
if now.secs - jointstate.header.stamp.secs > 1:
print "skip"
return
#ポジション情報の格納
state_jointstate.position = jointstate.position
print state_jointstate.position
#上半身モーションの計算
motion = cul_upper_motion(state_jointstate.position)
print "upper_motion:",motion
serial_joint.name = "JOINT"
serial_joint.serial = "@"+motion[0]+":::T"+motion[3]+"::T"+motion[5]+":T"+motion[6]+":T"+motion[7]+":T"+motion[8]+":T"+motion[9]+":T"+motion[10]+":T"+motion[11]+":T"+motion[12]+":T"+motion[13]+"::::::T"+motion[14]+":T"+motion[15]+":T"+motion[16]+":T"+motion[17]+":T"+motion[18]+":T"+motion[19]+":T"+motion[20]+":::::\n"
print serial_joint
#シリアル信号の送信
pub = rospy.Publisher('serial_msg', Serial_motion , queue_size=5) #publisherの定義
pub.publish(serial_joint)
####[メイン関数]#################################################################################################################
if __name__ == '__main__':
rospy.init_node('joint_listner')
sub = rospy.Subscriber('sobit/joint_states', JointState, callback1) #joint_state
rospy.spin()
| 37.872 | 322 | 0.603929 |
a34d2a23f38ff576e6a5ef0f805165729d2fc6ef | 2,789 | py | Python | scalex/metrics.py | jsxlei/SCALEX | 021c6d35a0cebeaa1f59ea53b9b9e22015ce6e5f | [
"MIT"
] | 11 | 2021-04-09T02:46:29.000Z | 2022-01-04T16:42:44.000Z | scale/metrics.py | QingZhan98/SCALE_v2 | 69bb02beee40ec085684335f356798d4dcb53fbc | [
"MIT"
] | 2 | 2021-04-18T02:30:18.000Z | 2022-03-05T10:40:00.000Z | scale/metrics.py | QingZhan98/SCALE_v2 | 69bb02beee40ec085684335f356798d4dcb53fbc | [
"MIT"
] | 4 | 2021-03-29T12:34:47.000Z | 2022-03-06T12:42:45.000Z | #!/usr/bin/env python
"""
# Author: Xiong Lei
# Created Time : Thu 10 Jan 2019 07:38:10 PM CST
# File Name: metrics.py
# Description:
"""
import numpy as np
import scipy
from sklearn.neighbors import NearestNeighbors, KNeighborsRegressor
def batch_entropy_mixing_score(data, batches, n_neighbors=100, n_pools=100, n_samples_per_pool=100):
"""
Calculate batch entropy mixing score
Algorithm
-----
* 1. Calculate the regional mixing entropies at the location of 100 randomly chosen cells from all batches
* 2. Define 100 nearest neighbors for each randomly chosen cell
* 3. Calculate the mean mixing entropy as the mean of the regional entropies
* 4. Repeat above procedure for 100 iterations with different randomly chosen cells.
Parameters
----------
data
np.array of shape nsamples x nfeatures.
batches
batch labels of nsamples.
n_neighbors
The number of nearest neighbors for each randomly chosen cell. By default, n_neighbors=100.
n_samples_per_pool
The number of randomly chosen cells from all batches per iteration. By default, n_samples_per_pool=100.
n_pools
The number of iterations with different randomly chosen cells. By default, n_pools=100.
Returns
-------
Batch entropy mixing score
"""
# print("Start calculating Entropy mixing score")
def entropy(batches):
p = np.zeros(N_batches)
adapt_p = np.zeros(N_batches)
a = 0
for i in range(N_batches):
p[i] = np.mean(batches == batches_[i])
a = a + p[i]/P[i]
entropy = 0
for i in range(N_batches):
adapt_p[i] = (p[i]/P[i])/a
entropy = entropy - adapt_p[i]*np.log(adapt_p[i]+10**-8)
return entropy
n_neighbors = min(n_neighbors, len(data) - 1)
nne = NearestNeighbors(n_neighbors=1 + n_neighbors, n_jobs=8)
nne.fit(data)
kmatrix = nne.kneighbors_graph(data) - scipy.sparse.identity(data.shape[0])
score = 0
batches_ = np.unique(batches)
N_batches = len(batches_)
if N_batches < 2:
raise ValueError("Should be more than one cluster for batch mixing")
P = np.zeros(N_batches)
for i in range(N_batches):
P[i] = np.mean(batches == batches_[i])
for t in range(n_pools):
indices = np.random.choice(np.arange(data.shape[0]), size=n_samples_per_pool)
score += np.mean([entropy(batches[kmatrix[indices].nonzero()[1]
[kmatrix[indices].nonzero()[0] == i]])
for i in range(n_samples_per_pool)])
Score = score / float(n_pools)
return Score / float(np.log2(N_batches))
from sklearn.metrics import silhouette_score | 34.8625 | 114 | 0.639656 |
a34d8a9377344d0edaae44dd4947affb31816584 | 189 | py | Python | src/miu/colorbilding.py | memowe/miniciti-user | ce1cbccdb693c4916217e1395eacb14f06d536a1 | [
"MIT"
] | null | null | null | src/miu/colorbilding.py | memowe/miniciti-user | ce1cbccdb693c4916217e1395eacb14f06d536a1 | [
"MIT"
] | null | null | null | src/miu/colorbilding.py | memowe/miniciti-user | ce1cbccdb693c4916217e1395eacb14f06d536a1 | [
"MIT"
] | null | null | null | from miniciti.bilding import Bilding
class ColorBilding(Bilding):
def __init__(self, color="bli"):
self.color = color
def isBli(self):
return self.color == "bli"
| 18.9 | 36 | 0.650794 |
a34e461868bd92e65252352e4554823a69ea35c7 | 2,603 | py | Python | examples/data/create_data.py | fdabek1/EHR-Functions | e6bd0b6fa213930358c4a19be31c459ac7430ca9 | [
"MIT"
] | null | null | null | examples/data/create_data.py | fdabek1/EHR-Functions | e6bd0b6fa213930358c4a19be31c459ac7430ca9 | [
"MIT"
] | null | null | null | examples/data/create_data.py | fdabek1/EHR-Functions | e6bd0b6fa213930358c4a19be31c459ac7430ca9 | [
"MIT"
] | null | null | null | import pandas as pd
import random
import time
# Source: https://stackoverflow.com/a/553320/556935
def str_time_prop(start, end, date_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, date_format))
etime = time.mktime(time.strptime(end, date_format))
ptime = stime + prop * (etime - stime)
return time.strftime(date_format, time.localtime(ptime))
def random_date(start, end):
return str_time_prop(start, end, '%m/%d/%Y', random.random())
def basic(n=1000):
data = {
'PatientID': [],
'PatientAge': [],
'PatientGender': [],
'PatientCategory': [],
}
for i in range(1, n + 1):
data['PatientID'].append(i)
data['PatientAge'].append(random.randint(18, 100))
data['PatientGender'].append(random.choice(['M', 'F']))
data['PatientCategory'].append(random.choice(['A', 'B', 'C']))
df = pd.DataFrame(data)
df.to_csv('basic.csv', index=False)
def encounters(n=1000):
data = {
'PatientID': [],
'PatientAge': [],
'PatientGender': [],
'PatientCategory': [],
'EncounterDate': [],
'Diagnosis1': [],
'Diagnosis2': [],
'Diagnosis3': [],
}
for i in range(1, n + 1):
age = random.randint(18, 100)
gender = random.choice(['M', 'F'])
category = random.choice(['A', 'B', 'C'])
for _ in range(random.randint(2, 15)): # Random number of encounters
date = random_date('01/01/2015', '12/31/2019')
year = int(date[-4:])
data['PatientID'].append(i)
data['PatientAge'].append(age + (year - 2015))
data['PatientGender'].append(gender)
data['PatientCategory'].append(category)
data['EncounterDate'].append(date)
data['Diagnosis1'].append(random.choice(['A', 'B', 'C']) + random.choice(['A', 'B', 'C']))
data['Diagnosis2'].append(random.choice(['A', 'B', 'C']) + random.choice(['A', 'B', 'C']))
data['Diagnosis3'].append(random.choice(['A', 'B', 'C']) + random.choice(['A', 'B', 'C']))
df = pd.DataFrame(data)
df.to_csv('encounters.csv', index=False)
if __name__ == '__main__':
random.seed(3)
basic()
encounters()
| 31.743902 | 102 | 0.579332 |
a350ecde028977958b337223398f9351c3e4bbec | 1,317 | py | Python | contests/ccpc20qhd/f超时.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | contests/ccpc20qhd/f超时.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | contests/ccpc20qhd/f超时.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
'''
#ccpc20qhd-f => 最大联通子图
#如果都是联通的,所有节点都要放进去,
#友好值=联通子图中边的个数-点的个数
#应该所有(友好值>0)联通子图加起来?
#DFS搜索,或者是并查集? 数一数有多少联通块?
#最短路用广搜,全部解用深搜
连通图的复杂度是O(V+E)..
为什么会Runtime Error?
分析:
解法1: DFS做联通块
解法2: 看不包含哪些人,相当于走个捷径!
'''
def f(n,l):
el = [[] for _ in range(n)]
for x,y in l:
if x>y:
x,y=y,x
el[x-1].append(y-1) #make sure edge is from small to BIG!
uzd = [False]*n #uzed node
st = [0]*n #stack!
fv = 0
print(el)
for i in range(n):
if uzd[i]:
continue
sp = 0
st[sp] = i #PUSH
nn = 0
ne = 0
while sp>-1:
ii = st[sp] #POP a node as source node
sp -= 1
if uzd[ii]:
continue
nn += 1
uzd[ii] = True
for j in el[ii]:
ne += 1 #ii=>j
if not uzd[j]: #make sure edges are checked and counted ONCE!
sp += 1
st[sp] = j
fv += max(0,ne-nn)
return fv
t = int(input())
for i in range(t):
n,m = list(map(int,input().split()))
l = [list(map(int,input().split())) for _ in range(m)]
print('Case #%d: %s'%((i+1), f(n,l)))
| 23.105263 | 82 | 0.430524 |
a3525d2e36b057b387fd2a242a0be1258c2a7481 | 2,920 | py | Python | test/feature_extraction/list_counter_test.py | tmhatton/MLinPractice | 759706e13181cec864d6aa8ece9ae7042f083e4c | [
"MIT"
] | null | null | null | test/feature_extraction/list_counter_test.py | tmhatton/MLinPractice | 759706e13181cec864d6aa8ece9ae7042f083e4c | [
"MIT"
] | 1 | 2021-10-19T08:09:44.000Z | 2021-10-19T08:09:44.000Z | test/feature_extraction/list_counter_test.py | tmhatton/MLinPractice | 759706e13181cec864d6aa8ece9ae7042f083e4c | [
"MIT"
] | null | null | null | import unittest
import pandas as pd
from code.feature_extraction.list_counter import PhotosNum, URLsNum, HashtagNum, MentionNum, TokenNum
from code.util import COLUMN_PHOTOS, COLUMN_URLS, COLUMN_HASHTAGS, COLUMN_MENTIONS
class PhotosNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_PHOTOS
self.extractor = PhotosNum(self.INPUT_COLUMN)
def test_photos_num(self):
input_data = '''['www.hashtag.de/234234.jpg', 'www.yolo.us/g5h23g45f.png', 'www.data.it/246gkjnbvh2.jpg']'''
input_df = pd.DataFrame([COLUMN_PHOTOS])
input_df[COLUMN_PHOTOS] = [input_data]
expected_output = [3]
output = self.extractor.fit_transform(input_df)
self.assertEqual(expected_output, output)
class URLsNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_URLS
self.extractor = URLsNum(self.INPUT_COLUMN)
def test_url_num(self):
input_data = '''['www.google.com', 'www.apple.com', 'www.uos.de', 'www.example.com']'''
input_df = pd.DataFrame([COLUMN_URLS])
input_df[COLUMN_URLS] = [input_data]
expected_output = [4]
output = self.extractor.fit_transform(input_df)
self.assertEqual(expected_output, output)
class HashtagNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_HASHTAGS
self.extractor = HashtagNum(self.INPUT_COLUMN)
def test_hashtag_num(self):
input_data = '''['hashtag', 'yolo', 'data']'''
input_df = pd.DataFrame([COLUMN_HASHTAGS])
input_df[COLUMN_HASHTAGS] = [input_data]
expected_output = [3]
output = self.extractor.fit_transform(input_df)
self.assertEqual(expected_output, output)
class MentionNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_MENTIONS
self.extractor = MentionNum(self.INPUT_COLUMN)
def test_mention_num(self):
input_data = '''[{'id': '2235729541', 'name': 'dogecoin', 'screen_name': 'dogecoin'}, {'id': '123432342', 'name': 'John Doe', 'screen_name': 'jodoe'}]'''
input_df = pd.DataFrame([COLUMN_MENTIONS])
input_df[COLUMN_MENTIONS] = [input_data]
expected_output = [2]
output = self.extractor.fit_transform(input_df)
self.assertEqual(expected_output, output)
class TokenNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = "input"
self.extractor = TokenNum(self.INPUT_COLUMN)
def test_token_length(self):
input_text = "['This', 'is', 'an', 'example', 'sentence']"
output = [5]
input_df = pd.DataFrame()
input_df[self.INPUT_COLUMN] = [input_text]
token_length = self.extractor.fit_transform(input_df)
self.assertEqual(output, token_length)
if __name__ == '__main__':
unittest.main()
| 30.736842 | 161 | 0.667466 |
a352f55dcd4b6a9dcf2653a39663d590b4d79e27 | 926 | py | Python | tests/test_2_promethee.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | 4 | 2022-03-05T20:51:38.000Z | 2022-03-15T17:10:22.000Z | tests/test_2_promethee.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | null | null | null | tests/test_2_promethee.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | 1 | 2022-03-08T13:45:22.000Z | 2022-03-08T13:45:22.000Z | import unittest
from operator import index
from EasyMCDM.models.Promethee import Promethee
class TestPrometheeMethods(unittest.TestCase):
def test_str_str_str(self):
d = "data/partiels_donnees.csv"
p = Promethee(data=d, verbose=False)
res = p.solve(
weights=[0.3, 0.2, 0.2, 0.1, 0.2],
prefs=["min","min","max","max","max"]
)
assert res["phi_negative"] == [('A', 0.8), ('C', 1.4000000000000001), ('D', 1.7), ('E', 2.4), ('B', 3.0999999999999996)], "Phi Negative are differents!"
assert res["phi_positive"] == [('A', 3.0), ('C', 2.2), ('D', 1.9), ('E', 1.4000000000000001), ('B', 0.9)], "Phi positive are differents!"
assert res["phi"] == [('A', 2.2), ('C', 0.8), ('D', 0.19999999999999996), ('E', -0.9999999999999998), ('B', -2.1999999999999997)], "Phi are differents!"
if __name__ == '__main__':
unittest.main() | 42.090909 | 161 | 0.565875 |
a354ea47baa38abfde41024d2fd179d6d96966cf | 1,207 | py | Python | testbed_frontend/api/emulation/emulation_handler.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | 1 | 2021-05-25T09:33:28.000Z | 2021-05-25T09:33:28.000Z | testbed_frontend/api/emulation/emulation_handler.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | 6 | 2020-06-06T01:59:09.000Z | 2021-06-10T20:17:56.000Z | testbed_frontend/api/emulation/emulation_handler.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | null | null | null | '''
Vortex OpenSplice
This software and documentation are Copyright 2006 to TO_YEAR ADLINK
Technology Limited, its affiliated companies and licensors. All rights
reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from functools import wraps
import redis
from api.emulation import (
Config,
EmulationStatus
)
def abort_handled(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
redis_connection = redis.StrictRedis(
host=Config.FRONTEND_IP, port=Config.REDIS_PORT, password=Config.REDIS_PASSWORD,
encoding="utf-8", decode_responses=True)
if redis_connection.get('emulation_status') == EmulationStatus.ABORT:
return
return fun(*args, **kwargs)
return wrapper
| 32.621622 | 92 | 0.729909 |
a3551761361e06ddd937cee500aed18df74cd70f | 2,027 | py | Python | torchtraining/functional/metrics/regression.py | szymonmaszke/torchtraining | 1ddf169325b7239d6d6686b20072a406b69a0180 | [
"MIT"
] | 3 | 2020-08-26T06:11:58.000Z | 2020-08-27T08:11:15.000Z | torchtraining/functional/metrics/regression.py | klaudiapalasz/torchtraining | 7ac54009eea2fd84aa635b6f3cbfe306f317d087 | [
"MIT"
] | 1 | 2020-08-25T19:19:43.000Z | 2020-08-25T19:19:43.000Z | torchtraining/functional/metrics/regression.py | klaudiapalasz/torchtraining | 7ac54009eea2fd84aa635b6f3cbfe306f317d087 | [
"MIT"
] | 1 | 2021-04-15T18:55:57.000Z | 2021-04-15T18:55:57.000Z | import typing
import torch
from .. import utils
@utils.docs
def regression_of_squares(
output: torch.Tensor,
target: torch.Tensor,
reduction: typing.Callable[[torch.Tensor,], torch.Tensor,] = torch.sum,
) -> torch.Tensor:
return reduction(output - torch.mean(target)) ** 2
@utils.docs
def squares_of_residuals(
output: torch.Tensor,
target: torch.Tensor,
reduction: typing.Callable[[torch.Tensor,], torch.Tensor,] = torch.sum,
) -> torch.Tensor:
return reduction(output - target) ** 2
@utils.docs
def r2(output: torch.Tensor, target: torch.Tensor,) -> torch.Tensor:
return 1 - squares_of_residuals(output, target) / total_of_squares(target)
@utils.docs
def absolute_error(
output: torch.Tensor,
target: torch.Tensor,
reduction: typing.Callable[[torch.Tensor,], torch.Tensor,] = torch.mean,
) -> torch.Tensor:
return reduction(torch.nn.functional.l1_loss(output, target, reduction="none"))
@utils.docs
def squared_error(
output: torch.Tensor,
target: torch.Tensor,
reduction: typing.Callable[[torch.Tensor,], torch.Tensor,] = torch.mean,
) -> torch.Tensor:
return reduction(torch.nn.functional.mse_loss(output, target, reduction="none"))
@utils.docs
def squared_log_error(
output: torch.Tensor,
target: torch.Tensor,
reduction: typing.Callable[[torch.Tensor,], torch.Tensor,] = torch.mean,
) -> torch.Tensor:
return reduction((torch.log(1 + target) - torch.log(1 + output)) ** 2)
@utils.docs
def adjusted_r2(output: torch.Tensor, target: torch.Tensor, p: int) -> torch.Tensor:
numel = output.numel()
return 1 - (1 - r2(output, target)) * ((numel - 1) / (numel - p - 1))
@utils.docs
def max_error(output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return torch.max(torch.abs(output - target))
@utils.docs
def total_of_squares(
target: torch.Tensor,
reduction: typing.Callable[[torch.Tensor,], torch.Tensor,] = torch.sum,
) -> torch.Tensor:
return reduction(target - torch.mean(target)) ** 2
| 27.026667 | 84 | 0.693143 |
a3552615d55b8131f79fc858dd41da8c30cf2d71 | 6,028 | py | Python | Source/game/systems/puzzle/hold.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | null | null | null | Source/game/systems/puzzle/hold.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | null | null | null | Source/game/systems/puzzle/hold.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | 1 | 2019-11-27T18:00:00.000Z | 2019-11-27T18:00:00.000Z | #!/usr/bin/env python2.7
# -*- coding:UTF-8 -*-2
u"""hold.py
Copyright (c) 2019 Yukio Kuro
This software is released under BSD license.
ホールドピース管理モジュール。
"""
import pieces as _pieces
import utils.const as _const
import utils.layouter as _layouter
class Hold(object):
u"""ホールドピース管理。
"""
__slots__ = (
"__id", "__item_state", "__is_captured", "__keep", "__piece",
"__system", "__window")
__GOOD_ITEM_NAMES = (
_const.STAR_NAMES+"#"+_const.SHARD_NAMES+"#" +
_const.KEY_NAMES+"#"+_const.CHEST_NAMES+"#Maxwell")
__BAD_ITEM_NAMES = (
_const.IRREGULAR_NAMES+"#"+_const.DEMON_NAMES+"#" +
_const.GHOST_NAMES+"#Pandora#Joker")
def __init__(self, system):
u"""コンストラクタ。
self.__id: オブジェクトの位置決定に使用。
self.__keep: ホールドピースパターンを保持。
"""
import pygame as __pygame
import window as __window
self.__system = system
self.__id = self.__system.id
self.__piece = None
self.__keep = _pieces.Array(length=2)
self.__window = __window.Next(__pygame.Rect(
(0, 0), _const.NEXT_WINDOW_SIZE))
self.__is_captured = False
self.__item_state = 0b0000
self.__window.is_light = not self.__is_captured
_layouter.Game.set_hold(self.__window, self.__id)
def __display(self):
u"""ピース表示。
"""
self.__piece = _pieces.Falling(self.__keep[0], (0, 0))
self.__window.piece = self.__piece
def __set_item_state(self):
u"""パターン内部のアイテムによって値を設定。
0b0001: ホールドブロックが存在する。
0b0010: 基本ブロックが存在する。
0b0100: 良性アイテムが存在する。
0b1000: 悪性アイテムが存在する。
"""
pattern, = self.__keep
self.__item_state = (
0b0001+(any(any(
shape and shape.type in _const.BASIC_NAMES.split("#") for
shape in line) for line in pattern) << 1) +
(any(any(
shape and shape.type in self.__GOOD_ITEM_NAMES.split("#") for
shape in line) for line in pattern) << 2) +
(any(any(
shape and shape.type in self.__BAD_ITEM_NAMES.split("#") for
shape in line) for line in pattern) << 3))
def change(self, is_single, target):
u"""ブロック変化。
"""
if not self.__keep.is_empty:
new, old = target.split("##")
self.__piece.clear()
if self.__system.battle.player.armor.is_prevention(new):
_, _, armor, _ = self.__system.battle.equip_huds
armor.flash()
elif not self.__system.battle.group.is_prevention(new):
pattern, = self.__keep
if is_single:
pattern.append(new, old)
else:
pattern.change(new, old)
self.__set_item_state()
self.__display()
def capture(self):
u"""ピースの取得・交換。
"""
import material.sound as __sound
def __accessory_effect():
u"""装飾品効果。
"""
battle = self.__system.battle
effect = battle.player.accessory.spell
if effect:
is_single, new, old = effect
_, _, armor, accessory = battle.equip_huds
if battle.player.armor.is_prevention(new):
armor.flash()
elif not battle.group.is_prevention(new) and (
self.__keep[-1].append(new, old) if is_single else
self.__keep[-1].change(new, old)
):
accessory.flash()
def __update():
u"""パラメータ更新。
"""
self.is_captured = True
self.__set_item_state()
self.__display()
if not self.__is_captured:
__sound.SE.play("hold")
puzzle = self.__system.puzzle
if self.__keep.is_empty:
puzzle.piece.pattern.rotate(0)
self.__keep.append(puzzle.piece.pattern)
__accessory_effect()
puzzle.piece.clear()
puzzle.forward()
__update()
else:
virtual = self.virtual
virtual.topleft = puzzle.piece.state.topleft
if not virtual.is_collide(puzzle.field):
self.__piece.clear()
puzzle.piece.clear()
puzzle.piece.pattern.rotate(0)
self.__keep.append(puzzle.piece.pattern)
__accessory_effect()
puzzle.piece.pattern = self.__keep.pop()
puzzle.update()
__update()
def exchange(self, other):
u"""ピース交換。
"""
if not self.__keep.is_empty and not other.__keep.is_empty:
self.__piece.clear()
other.__piece.clear()
pattern, = self.__keep
other_pattern, = other.__keep
self.__keep[0] = other_pattern
other.__keep[0] = pattern
self.__set_item_state()
other.__set_item_state()
self.__display()
other.__display()
@property
def virtual(self):
u"""計算用ピース取得。
"""
if not self.__keep.is_empty:
pattern, = self.__keep
return _pieces.Falling(pattern, is_virtual=True)
@property
def is_empty(self):
u"""空判定。
"""
return self.__keep.is_empty
@property
def is_captured(self):
u"""キャプチャ判定。
"""
return self.__is_captured
@is_captured.setter
def is_captured(self, value):
u"""キャプチャ設定。
ウィンドウの色付けも設定。
"""
self.__is_captured = value
self.__window.is_light = not self.__is_captured
@property
def item_state(self):
u"""アイテム状態取得。
"""
return self.__item_state
@property
def window(self):
u"""ウィンドウ取得。
"""
return self.__window
| 31.233161 | 77 | 0.535169 |
a35602a1c5d4bcf343e77bdb5e4000c799357ee5 | 347 | py | Python | homeworks/kirill_shevchuk/hw05/level04.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | null | null | null | homeworks/kirill_shevchuk/hw05/level04.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | 8 | 2019-11-15T18:15:56.000Z | 2020-02-03T18:05:05.000Z | homeworks/kirill_shevchuk/hw05/level04.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | null | null | null | from urllib.parse import urlparse
def host(url):
if not url:
return ""
data = urlparse(url)
if data.netloc:
return data.netloc
value = data.path.split("/")[0]
if "@" not in value or ":" not in value:
return value
from_ = value.find("@") + 1
for_ = value.find(":")
return value[from_:for_]
| 21.6875 | 44 | 0.570605 |
a35af943a1738408edb737fd87daf987635bfda0 | 1,554 | py | Python | pertemuan_13/draw_utils.py | Muhammad-Yunus/Jetson-Nano-OpenCV-Learn | 933cb2594539a877030fb82dc3e6867409c1a557 | [
"Apache-2.0"
] | null | null | null | pertemuan_13/draw_utils.py | Muhammad-Yunus/Jetson-Nano-OpenCV-Learn | 933cb2594539a877030fb82dc3e6867409c1a557 | [
"Apache-2.0"
] | null | null | null | pertemuan_13/draw_utils.py | Muhammad-Yunus/Jetson-Nano-OpenCV-Learn | 933cb2594539a877030fb82dc3e6867409c1a557 | [
"Apache-2.0"
] | 2 | 2021-09-28T00:24:21.000Z | 2022-03-09T13:38:29.000Z | import cv2
import numpy as np
# draw_ped() function to draw bounding box with top labeled text
def draw_ped(img, label, x0, y0, xt, yt, font_size=0.4, alpha=0.5, bg_color=(255,0,0), ouline_color=(255,255,255), text_color=(0,0,0)):
overlay = np.zeros_like(img)
y0, yt = max(y0 - 15, 0) , min(yt + 15, img.shape[0])
x0, xt = max(x0 - 15, 0) , min(xt + 15, img.shape[1])
(w, h), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, font_size, 1)
cv2.rectangle(overlay,
(x0, y0 + baseline),
(max(xt, x0 + w), yt),
bg_color,
-1)
cv2.rectangle(img,
(x0, y0 + baseline),
(max(xt, x0 + w), yt),
ouline_color,
2)
pts = np.array([[x0, y0 - h - baseline], # top left
[x0 + w, y0 - h - baseline], # top right
[x0 + w + 10, y0 + baseline], # bolom right
[x0,y0 + baseline]]) # bottom left
cv2.fillPoly(img, [pts], ouline_color) # add label white fill
cv2.polylines(img, [pts], True, ouline_color, 2) # add label white border
cv2.putText(img,
label,
(x0, y0),
cv2.FONT_HERSHEY_SIMPLEX,
font_size,
text_color,
1,
cv2.LINE_AA)
img_blend = cv2.addWeighted(img, 1, overlay, alpha, 0.0)
return img_blend | 39.846154 | 135 | 0.47426 |
a35b39c11aff2330ec7aa6556e235a658417a015 | 2,204 | py | Python | Sensors/PortStart.py | cybertraining-dsc/boat | 32e4942b69059d1dd48d79c8e0f55bac438eb5e7 | [
"Apache-2.0"
] | null | null | null | Sensors/PortStart.py | cybertraining-dsc/boat | 32e4942b69059d1dd48d79c8e0f55bac438eb5e7 | [
"Apache-2.0"
] | null | null | null | Sensors/PortStart.py | cybertraining-dsc/boat | 32e4942b69059d1dd48d79c8e0f55bac438eb5e7 | [
"Apache-2.0"
] | null | null | null | """
Code modified from:
apps.fishandwhistle.net/archives/1155
"""
from __future__ import print_function
import serial
import sys
import glob
port_list = {}
def identifyPort(port):
"""
tests the port and identifies what device is attached to it from probing it
:param port:
:return: a port list dict with the tho porst for 'GPS' and 'Sonar'
"""
global port_list
try:
with serial.Serial(port, baudrate=4800, timeout=1) as ser:
# read 10 lines from the serial output
for i in range(10):
line = ser.readline().decode('ascii', errors='replace')
msg = line.split(',')
if msg[0] == '$GPRMC':
port_list['GPS'] = port
return
elif msg[0] == '$SDDBT':
port_list['Sonar'] = port
return
except Exception as e:
print(e)
def _scan_ports():
"""
scan the ports on various devices including Windows, linux, and OSX
:return:
"""
if sys.platform.startswith('win'):
print("scan Windows")
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
print("scan Linux")
# this excludes your current terminal "/dev/tty"
patterns = ('/dev/tty[A-Za-z]*', '/dev/ttyUSB*')
ports = [glob.glob(pattern) for pattern in patterns]
ports = [item for sublist in ports for item in sublist] # flatten
elif sys.platform.startswith('darwin'):
print("scan Darwin")
patterns = ('/dev/*serial*', '/dev/ttyUSB*', '/dev/ttyS*')
ports = [glob.glob(pattern) for pattern in patterns]
ports = [item for sublist in ports for item in sublist] # flatten
else:
raise EnvironmentError('Unsupported platform')
return ports
def getPorts():
"""
get the ports
:return: return the ports dict
"""
ports = _scan_ports()
print(ports)
for port in ports:
identifyPort(port)
global port_list
return port_list
def test():
list = getPorts()
print(list)
if __name__ == "__main__":
test()
| 26.878049 | 79 | 0.583031 |
a35c7cddf46b7abcc142f392526fdba0c6a3aa7e | 112 | py | Python | test/python/test_utils.py | AdityaSidharta/docker_base | ccdbe7f10e4ddb329dfc106734b3e58226cd0b05 | [
"MIT"
] | null | null | null | test/python/test_utils.py | AdityaSidharta/docker_base | ccdbe7f10e4ddb329dfc106734b3e58226cd0b05 | [
"MIT"
] | null | null | null | test/python/test_utils.py | AdityaSidharta/docker_base | ccdbe7f10e4ddb329dfc106734b3e58226cd0b05 | [
"MIT"
] | null | null | null | from app.python.utils import get_datetime
def test_get_datetime():
assert isinstance(get_datetime(), str)
| 18.666667 | 42 | 0.776786 |
a35f847cfae16fa50a6998fa4b3afcf7165085cb | 883 | py | Python | tests/core/test_registered_plugins.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 955 | 2018-05-16T17:10:12.000Z | 2022-03-30T20:14:26.000Z | tests/core/test_registered_plugins.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 490 | 2018-05-16T08:00:22.000Z | 2022-03-28T21:14:39.000Z | tests/core/test_registered_plugins.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 243 | 2018-05-17T11:07:24.000Z | 2022-03-27T18:01:07.000Z | from nornir.core.plugins.inventory import InventoryPluginRegister
from nornir.core.plugins.runners import RunnersPluginRegister
from nornir.plugins.inventory import SimpleInventory
from nornir.plugins.runners import SerialRunner, ThreadedRunner
from nornir_utils.plugins.inventory import YAMLInventory
class Test:
def test_registered_runners(self):
RunnersPluginRegister.deregister_all()
RunnersPluginRegister.auto_register()
assert RunnersPluginRegister.available == {
"threaded": ThreadedRunner,
"serial": SerialRunner,
}
def test_registered_inventory(self):
InventoryPluginRegister.deregister_all()
InventoryPluginRegister.auto_register()
assert InventoryPluginRegister.available == {
"SimpleInventory": SimpleInventory,
"YAMLInventory": YAMLInventory,
}
| 33.961538 | 65 | 0.737259 |
a3644bcfb5d4ed17d821b83cba8aacde7ddfe23f | 1,411 | py | Python | setup.py | hodgestar/tesseract-control-software | 41f47a4b901a0069f1745c90abe28f0778704b0e | [
"MIT"
] | 2 | 2019-07-13T14:15:30.000Z | 2020-01-04T10:44:47.000Z | setup.py | hodgestar/tesseract-control-software | 41f47a4b901a0069f1745c90abe28f0778704b0e | [
"MIT"
] | null | null | null | setup.py | hodgestar/tesseract-control-software | 41f47a4b901a0069f1745c90abe28f0778704b0e | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="tessled",
version="0.0.1",
url='http://github.com/hodgestar/tesseract-control-software',
license='MIT',
description="Tesseract control software and simulator.",
long_description=open('README.rst', 'r').read(),
author='Simon Cross',
author_email='hodgestar+tesseract@gmail.com',
packages=find_packages(),
include_package_data=True,
install_requires=[
'click',
'numpy',
'pillow',
'zmq',
],
extras_require={
'simulator': ['faulthandler', 'pygame_cffi', 'PyOpenGL'],
'spidev': ['wiringpi', 'spidev'],
},
entry_points={ # Optional
'console_scripts': [
'tesseract-effectbox=tessled.effectbox:main',
'tesseract-simulator=tessled.simulator:main',
'tesseract-spidev-driver=tessled.spidev_driver:main',
],
},
scripts=[
'bin/tesseract-runner',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Games/Entertainment',
],
)
| 30.673913 | 65 | 0.593196 |
a364c15aa063e5f5b9ce9b053b0dc00b7991aba9 | 45 | py | Python | config.py | grimpy/glunit | ed8b8fabc8539abe94a9bf93418b95d006283066 | [
"MIT"
] | null | null | null | config.py | grimpy/glunit | ed8b8fabc8539abe94a9bf93418b95d006283066 | [
"MIT"
] | null | null | null | config.py | grimpy/glunit | ed8b8fabc8539abe94a9bf93418b95d006283066 | [
"MIT"
] | 1 | 2019-03-02T12:32:40.000Z | 2019-03-02T12:32:40.000Z | GITLAB_URL = "XXXXXX"
GITLAB_TOKEN = "XXXXX"
| 15 | 22 | 0.733333 |
a3670442cb8f6ed8744f92e8d59bbfa74b3455a4 | 481 | py | Python | app/collect/patch.py | luiscape/hdxscraper-unhcr-mediterranean-refugees | 372bd7f565569e1d3a8428e6f09e86a01842bb9c | [
"MIT"
] | null | null | null | app/collect/patch.py | luiscape/hdxscraper-unhcr-mediterranean-refugees | 372bd7f565569e1d3a8428e6f09e86a01842bb9c | [
"MIT"
] | 2 | 2015-10-08T15:41:56.000Z | 2015-10-08T15:50:48.000Z | app/collect/patch.py | luiscape/hdxscraper-unhcr-mediterranean-refugees | 372bd7f565569e1d3a8428e6f09e86a01842bb9c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
def Epoch(data):
'''Patching Epoch timestamps.'''
for record in data:
record['last_updated'] = time.strftime('%Y-%m-%d', time.localtime(record['last_updated']))
return data
def Date(data):
'''Patching date stamps.'''
for record in data:
m = time.strptime(record['month_en'], '%B')
m = time.strftime('%m', m)
record['date'] = '{year}-{month}'.format(year=record['year'], month=m)
return data
| 20.041667 | 94 | 0.619543 |
a3686d9e544eb4ac435a125dc81bd7efb5af661e | 1,875 | py | Python | datasets/nlmap/preprocess_nlmap.py | zhuang-li/pyaudio_with_tranx | 934d0431539564bb815c4c2c6399fe9d2fe7db55 | [
"Apache-2.0"
] | null | null | null | datasets/nlmap/preprocess_nlmap.py | zhuang-li/pyaudio_with_tranx | 934d0431539564bb815c4c2c6399fe9d2fe7db55 | [
"Apache-2.0"
] | null | null | null | datasets/nlmap/preprocess_nlmap.py | zhuang-li/pyaudio_with_tranx | 934d0431539564bb815c4c2c6399fe9d2fe7db55 | [
"Apache-2.0"
] | null | null | null | from nltk.tokenize import TweetTokenizer
import io
def read_en_lines(lines):
tknzr = TweetTokenizer()
result = []
for line in lines:
result.append(tknzr.tokenize(line))
return result
def read_mrl_lines(lines):
result = []
for line in lines:
tgt = ''
for i, ch in enumerate(line.strip()):
if ch == '(' or ch == ')' or ch == ',':
if tgt[-1] == ' ':
tgt = tgt + ch + ' '
else:
tgt = tgt + ' ' + ch + ' '
elif ch == ' ':
tgt = tgt + "_"
else:
tgt = tgt + ch
tgt_list = tgt.strip().split(' ')
result.append(tgt_list)
return result
def read_nlmap_data(en_path, mrl_path):
with open(en_path, "r") as lines:
en_result = read_en_lines(lines)
with open(mrl_path, "r") as lines:
mrl_result = read_mrl_lines(lines)
return en_result, mrl_result
def write_to_txt_file(src_list, tgt_list, fp):
fp.write(' '.join(src_list) + '\t' + ' '.join(tgt_list) + '\n')
def process_results(src_result, tgt_result, path):
txt_fp = io.open(path, "w")
for i, src_list in enumerate(src_result):
tgt_list = tgt_result[i]
write_to_txt_file(src_list, tgt_list, txt_fp)
dir_path = "../../data/nlmap/"
train_en_path = dir_path + "nlmaps.train.en"
train_mrl_path = dir_path + "nlmaps.train.mrl"
test_en_path = dir_path + "nlmaps.test.en"
test_mrl_path = dir_path + "nlmaps.test.mrl"
train_txt = dir_path + "train.txt"
test_txt = dir_path + "test.txt"
train_en_result, train_mrl_result = read_nlmap_data(train_en_path, train_mrl_path)
test_en_result, test_mrl_result = read_nlmap_data(test_en_path, test_mrl_path)
process_results(train_en_result, train_mrl_result, train_txt)
process_results(test_en_result, test_mrl_result, test_txt) | 29.296875 | 82 | 0.6208 |
a3697ddf813bc6d7c74b1660f1c7cbb233952678 | 2,228 | py | Python | ocr.py | RonLek/ALPR-and-Identification-for-Indian-Vehicles | 2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e | [
"FTL",
"Xnet",
"X11"
] | 13 | 2020-09-25T16:48:06.000Z | 2022-01-31T01:36:33.000Z | ocr.py | RonLek/ALPR-and-Identification-for-Indian-Vehicles | 2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e | [
"FTL",
"Xnet",
"X11"
] | 5 | 2021-01-19T09:36:59.000Z | 2022-03-25T06:56:08.000Z | ocr.py | RonLek/ALPR-and-Identification-for-Indian-Vehicles | 2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e | [
"FTL",
"Xnet",
"X11"
] | 7 | 2020-09-24T01:15:52.000Z | 2022-03-23T06:50:55.000Z | states = ['AP', 'AR', 'AS', 'BR', 'CG', 'GA',
'GJ', 'HR', 'HP', 'JH', 'KA', 'KL',
'MP', 'MH', 'MN', 'ML', 'MZ', 'NL',
'OD', 'PB', 'RJ', 'SK', 'TN', 'TS',
'TR', 'UP', 'UK', 'WB', 'AN', 'CH',
'DD', 'DL', 'JK', 'LA', 'LD', 'PY']
def resultplate(plate):
result=""
j=0
for character in plate:
if character.isalnum():
result+=character
if character.isdigit():
j+=1
else:
j=0
if j==4:
break
if j!=4:
print('Couldn\'t extract number')
else:
while result[0:2] not in states and result!="":
result=result[2:]
if result=="":
print('Couldn\'t Recognize Plate. Try with a different plate')
else:
return result
def preprocess(plate):
plate = plate.replace('\n', '')
plate = plate.replace('INDIA', '')
plate = plate.replace('IND', '')
plate = plate.replace('IN', '')
return plate
def detect_text(path):
"""Detects text in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
#with open('results.txt', 'w', encoding='utf8') as f:
#result=""
#for text in texts:
# result+=text.description
# result+='\n"{}"'.format(text.description)
#vertices = (['({},{})'.format(vertex.x, vertex.y)
# for vertex in text.bounding_poly.vertices])
#result+='bounds: {}'.format(','.join(vertices))
#f.write(result)
plate = preprocess(texts[0].description)
plate = resultplate(plate)
print(plate)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
detect_text('numberplate.jpg')
| 30.520548 | 75 | 0.508528 |
a36a1929d767b48efa4751ceab577496580f2e66 | 667 | py | Python | setup.py | LehmRob/photorename | b499b08f225264e5c7be3b51988d8e8fcbeb088f | [
"MIT"
] | null | null | null | setup.py | LehmRob/photorename | b499b08f225264e5c7be3b51988d8e8fcbeb088f | [
"MIT"
] | null | null | null | setup.py | LehmRob/photorename | b499b08f225264e5c7be3b51988d8e8fcbeb088f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup
from distutils.util import convert_path
main_ns = {}
vpath = convert_path('photorename/version.py')
with open(vpath) as vfile:
exec(vfile.read(), main_ns)
setup(
name='photorename',
version=main_ns['__version__'],
description='bulk rename photos in a dictionary',
author='Robert Lehmann',
author_email='lehmrob@posteo.net',
url='https://github.com/lehmrob',
packages=['photorename'],
entry_points = {
'console_scripts': ['phore=photorename.cli:main'],
},
install_requires=[
'exif',
],
test_suite='nose.collector',
tests_require=['nose'],
)
| 23.821429 | 58 | 0.667166 |
a36a758d49817dccb80abed74b7ead8eedf80c06 | 456 | py | Python | python_loop/groom_lt1.py | hesthers/self-python-practice- | 79e7a86385e599ca430af761f533f6eaf90aa448 | [
"MIT"
] | 1 | 2021-09-07T00:26:56.000Z | 2021-09-07T00:26:56.000Z | python_loop/groom_lt1.py | hesthers/self-python-practice | 79e7a86385e599ca430af761f533f6eaf90aa448 | [
"MIT"
] | null | null | null | python_loop/groom_lt1.py | hesthers/self-python-practice | 79e7a86385e599ca430af761f533f6eaf90aa448 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# UTF-8 encoding when using korean
import numpy as np
import math
input_l = []
while True:
user_input = int(input(''))
input_l.append(user_input)
if len(input_l[1:]) == input_l[0]:
#user_input = user_input.split('\n')
cnt_input = []
for i in range(1, len(input_l)):
if np.sqrt(input_l[i])-math.isqrt(input_l[i]) == 0:
cnt_input.append(input_l[i])
else:
pass
break
else:
next
print(len(cnt_input))
| 17.538462 | 54 | 0.640351 |
a36c8b3504ed6254b18b5d9848ed7acfd15782c9 | 454 | py | Python | setup.py | hasangchun/ContextNet | da2515bb506a304186c9f579b251be8f086c541a | [
"Apache-2.0"
] | 8 | 2021-06-20T17:59:22.000Z | 2021-09-15T05:28:45.000Z | setup.py | hasangchun/ContextNet | da2515bb506a304186c9f579b251be8f086c541a | [
"Apache-2.0"
] | 2 | 2021-12-02T20:33:08.000Z | 2021-12-28T14:23:17.000Z | setup.py | hasangchun/ContextNet | da2515bb506a304186c9f579b251be8f086c541a | [
"Apache-2.0"
] | 3 | 2022-02-02T10:36:01.000Z | 2022-03-03T09:04:37.000Z | from setuptools import setup, find_packages
setup(
name='ContextNet',
version='latest',
packages=find_packages(),
description='ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context',
author='Sangchun Ha',
author_email='seomk9896@naver.com',
url='https://github.com/hasangchun/ContextNet',
install_requires=[
'torch>=1.4.0',
],
python_requires='>=3.6',
) | 28.375 | 123 | 0.698238 |
a36d0ac9736ee7f0f87c898553b9622f6343c622 | 130 | py | Python | katas/kyu_7/product_of_main_diagonal.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | katas/kyu_7/product_of_main_diagonal.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | katas/kyu_7/product_of_main_diagonal.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | from operator import mul
def main_diagonal_product(matrix):
return reduce(mul, (matrix[a][a] for a in xrange(len(matrix))))
| 21.666667 | 67 | 0.730769 |
a36e85cc522d69fee1eb9747d2afca83c85e094a | 1,643 | py | Python | src/ctc/protocols/curve_utils/cli/curve_pools_command.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 94 | 2022-02-15T19:34:49.000Z | 2022-03-26T19:26:22.000Z | src/ctc/protocols/curve_utils/cli/curve_pools_command.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-03-03T02:58:47.000Z | 2022-03-11T18:41:05.000Z | src/ctc/protocols/curve_utils/cli/curve_pools_command.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-02-15T17:53:07.000Z | 2022-03-17T19:14:17.000Z | from __future__ import annotations
import toolcli
from ctc.protocols import curve_utils
def get_command_spec() -> toolcli.CommandSpec:
return {
'f': async_curve_pools_command,
'help': 'list curve pools',
'args': [
{
'name': '--verbose',
'help': 'include extra data',
'action': 'store_true',
},
],
}
async def async_curve_pools_command(verbose: bool) -> None:
import asyncio
factories = [
'0xB9fC157394Af804a3578134A6585C0dc9cc990d4',
'0x90E00ACe148ca3b23Ac1bC8C240C2a7Dd9c2d7f5',
'0x0959158b6040d32d04c301a72cbfd6b39e21c9ae',
'0x8F942C20D02bEfc377D41445793068908E2250D0',
'0xF18056Bbd320E96A48e3Fbf8bC061322531aac99',
]
# get data from each factory
coroutines = [
curve_utils.async_get_factory_pool_data(factory, include_balances=False)
for factory in factories
]
factories_data = await asyncio.gather(*coroutines)
items = [item for factory_data in factories_data for item in factory_data]
# print as table
completed = set()
items = sorted(items, key=lambda item: ', '.join(item['symbols']))
for item in items:
if item['address'] in completed:
continue
else:
completed.add(item['address'])
if not verbose:
skip = False
for symbol in item['symbols']:
if symbol.startswith('RC_'):
skip = True
if skip:
continue
print(item['address'] + ' ' + ', '.join(item['symbols']))
| 26.934426 | 80 | 0.593427 |
a3724c66e413effcdf21b1d39aedb643be084706 | 218 | py | Python | constants/db.py | sshikshu/app.cavill.in | 4e9472ea9640dad920f17d29b9625c8485022a5e | [
"MIT"
] | null | null | null | constants/db.py | sshikshu/app.cavill.in | 4e9472ea9640dad920f17d29b9625c8485022a5e | [
"MIT"
] | null | null | null | constants/db.py | sshikshu/app.cavill.in | 4e9472ea9640dad920f17d29b9625c8485022a5e | [
"MIT"
] | null | null | null | """
db constants
"""
DB_HOST = 'localhost'
DB_PORT = 28015
# Database is cavilling
DB_NAME = 'cavilling'
DB_TABLE_CAVILLS = 'cavills'
DB_TABLE_HAIRDOS = 'hairdos'
DB_TABLE_POLRUS = 'polrus'
DB_TABLE_USERS = 'users'
| 14.533333 | 28 | 0.733945 |
a3725144f31da3c2b8b26f9fa9ea6b635892f533 | 76,062 | py | Python | socfakerservice/api/api.py | MSAdministrator/soc-faker-service | f3c401f8d6931d1c421f594dc8f1137f2ad984aa | [
"MIT"
] | 1 | 2021-04-11T05:53:38.000Z | 2021-04-11T05:53:38.000Z | socfakerservice/api/api.py | MSAdministrator/soc-faker-service | f3c401f8d6931d1c421f594dc8f1137f2ad984aa | [
"MIT"
] | null | null | null | socfakerservice/api/api.py | MSAdministrator/soc-faker-service | f3c401f8d6931d1c421f594dc8f1137f2ad984aa | [
"MIT"
] | 1 | 2021-04-11T05:53:42.000Z | 2021-04-11T05:53:42.000Z | from flask import Blueprint, render_template, request, abort, jsonify, Response
from socfakerservice import status, HTMLRenderer, set_renderers
from socfakerservice.model import TokenModel
from socfaker import SocFaker
socfaker = SocFaker()
api_bp = Blueprint(
'api',
__name__
)
def validate_request(request):
auth_header = request.headers.get('soc-faker')
if auth_header:
existing_registration = TokenModel.objects(token=auth_header).first()
if existing_registration:
return True
abort(401)
@api_bp.errorhandler(401)
def unauthorized(error):
return Response('Unauthorized to access this resource', 401, {'Content-Type': 'application/json'})
@api_bp.route("/agent", methods=['GET'])
def socfaker_socfaker_agent():
"""
Access generated data related to an endpoint agent
Returns:
Agent: Returns an object with properties related to an endpoint agent
"""
if validate_request(request):
return jsonify(str(socfaker.agent))
@api_bp.route("/agent/ephermeral_id", methods=['GET'])
def socfaker_agent_ephermeral_id():
"""
A unique and random ephermal ID that changes
Returns:
str: A unique 8 character length hex ID
"""
if validate_request(request):
return { 'value': socfaker.agent.ephermeral_id }
@api_bp.route("/agent/id", methods=['GET'])
def socfaker_agent_id():
"""
A agent ID which is typically static across the lifetime of the
agent (per instance of this class)
Returns:
str: A static but unique 8 character length ID representing the agent ID
"""
if validate_request(request):
return { 'value': socfaker.agent.id }
@api_bp.route("/agent/name", methods=['GET'])
def socfaker_agent_name():
"""
A custom name of the agent
Returns:
str: A custom name of the agent
"""
if validate_request(request):
return { 'value': socfaker.agent.name }
@api_bp.route("/agent/type", methods=['GET'])
def socfaker_agent_type():
"""
The type of agent.
Options are: 'filebeat', 'auditbeat', 'functionbeat',
'heartbeat', 'winlogbeat', 'packetbeat'
Returns:
str: A agent type
"""
if validate_request(request):
return { 'value': socfaker.agent.type }
@api_bp.route("/agent/version", methods=['GET'])
def socfaker_agent_version():
"""
The agent version
Returns:
str: Currently set to a static value of 7.8.0
"""
if validate_request(request):
return { 'value': socfaker.agent.version }
### AGENT ROUTES ###
### ALERT ROUTES ###
@api_bp.route("/alert", methods=['GET'])
def socfaker_socfaker_alert():
"""
Alert or Detection properties
Returns:
Alert: Returns an object with properties about a alert or detection
"""
if validate_request(request):
return jsonify(str(socfaker.alert))
@api_bp.route("/alert/action", methods=['GET'])
def socfaker_alert_action():
"""
An action taken based on the alert
Returns:
str: Returns a random action based on the alert
"""
if validate_request(request):
return { 'value': socfaker.alert.action }
@api_bp.route("/alert/direction", methods=['GET'])
def socfaker_alert_direction():
"""
The direction of the alert (network based)
Returns:
str: Random direction of from or to
"""
if validate_request(request):
return { 'value': socfaker.alert.direction }
@api_bp.route("/alert/location", methods=['GET'])
def socfaker_alert_location():
"""
The country the alert originated from
Returns:
str: A random country an alert was generated from
"""
if validate_request(request):
return { 'value': socfaker.alert.location }
@api_bp.route("/alert/signature_name", methods=['GET'])
def socfaker_alert_signature_name():
"""
Returns the name of a signature that the Alert triggered upon
Returns:
Str: returns a random alert signature name
"""
if validate_request(request):
return { 'value': socfaker.alert.signature_name }
@api_bp.route("/alert/status", methods=['GET'])
def socfaker_alert_status():
"""
The current alert status
Returns:
str: Returns whether the alert was successful
or unsuccessful
"""
if validate_request(request):
return { 'value': socfaker.alert.status }
@api_bp.route("/alert/summary", methods=['GET'])
def socfaker_alert_summary():
"""
Returns the summary of an alert
Returns:
str: Returns a string of this instance of an alert.
Contains a status, action, type, direction, and location.
"""
if validate_request(request):
return { 'value': socfaker.alert.summary }
@api_bp.route("/alert/type", methods=['GET'])
def socfaker_alert_type():
"""
Returns an alert type
Returns:
str: Returns a random alert type
"""
if validate_request(request):
return { 'value': socfaker.alert.type }
### ALERT ROUTES ###
### APPLICATION ROUTES ###
@api_bp.route("/application", methods=['GET'])
def socfaker_socfaker_application():
"""
Generated data related to a application
Returns:
Application: Returns an object with properties about an application
"""
if validate_request(request):
return jsonify(str(socfaker.application))
@api_bp.route("/application/account_status", methods=['GET'])
def socfaker_application_account_status():
"""
A random account status for the application
Returns:
str: Returns whether an account is enabled or
disabled for an application
"""
if validate_request(request):
return { 'value': socfaker.application.account_status }
@api_bp.route("/application/logon_timestamp", methods=['GET'])
def socfaker_application_logon_timestamp():
"""
Logon timestamp of a user/service for an applicaiton
Returns:
str: Returns an ISO 8601 timestamp in the past
"""
if validate_request(request):
return { 'value': socfaker.application.logon_timestamp }
@api_bp.route("/application/name", methods=['GET'])
def socfaker_application_name():
"""
The name of an application
Returns:
str: Returns a random application name based on common
applications used in enterprises
"""
if validate_request(request):
return { 'value': socfaker.application.name }
@api_bp.route("/application/status", methods=['GET'])
def socfaker_application_status():
"""
Returns the application status
Returns:
str: Returns the application status of
Active, Inactive, or Legacy
"""
if validate_request(request):
return { 'value': socfaker.application.status }
### APPLICATION ROUTES ###
### CLOUD ROUTES ###
@api_bp.route("/cloud", methods=['GET'])
def socfaker_socfaker_cloud():
"""
Generated data related to cloud infrastructure
Returns:
Cloud: Returns an object with properties about cloud infrastructure
"""
if validate_request(request):
return jsonify(str(socfaker.cloud))
@api_bp.route("/cloud/id", methods=['GET'])
def socfaker_cloud_id():
"""
A cloud instance ID
Returns:
str: A random GUID for a cloud instance ID
"""
if validate_request(request):
return { 'value': socfaker.cloud.id }
@api_bp.route("/cloud/instance_id", methods=['GET'])
def socfaker_cloud_instance_id():
"""
A random hex instance ID
Returns:
str: A random HEX character instance ID
"""
if validate_request(request):
return { 'value': socfaker.cloud.instance_id }
@api_bp.route("/cloud/name", methods=['GET'])
def socfaker_cloud_name():
"""
The name of a cloud VM/container instance
Returns:
str: A random generated name of a cloud VM or container instance
"""
if validate_request(request):
return { 'value': socfaker.cloud.name }
@api_bp.route("/cloud/provider", methods=['GET'])
def socfaker_cloud_provider():
"""
The cloud provider
Returns:
str: A random cloud provider of either aws, azure, gcp, or digitalocean
"""
if validate_request(request):
return { 'value': socfaker.cloud.provider }
@api_bp.route("/cloud/region", methods=['GET'])
def socfaker_cloud_region():
"""
The region of a cloud instance
Returns:
str: The region of a cloud instance
"""
if validate_request(request):
return { 'value': socfaker.cloud.region }
@api_bp.route("/cloud/size", methods=['GET'])
def socfaker_cloud_size():
"""
The size of a instance (based on AWS naming convention)
Returns:
str: A random size of an instance based on AWS naming convention
"""
if validate_request(request):
return { 'value': socfaker.cloud.size }
@api_bp.route("/cloud/zone", methods=['GET'])
def socfaker_cloud_zone():
"""
A random generated availability zone in common cloud platforms like AWS & Azure
Returns:
str: A string representing a cloud availability zone
"""
if validate_request(request):
return { 'value': socfaker.cloud.zone }
### CLOUD ROUTES ###
### COMPUTER ROUTES ###
@api_bp.route("/computer", methods=['GET'])
def socfaker_socfaker_computer():
"""
Generated data about a computer system
Returns:
Computer: Returns an object with properties about a computer system
"""
if validate_request(request):
return {'value': socfaker.computer}
@api_bp.route("/computer/architecture", methods=['GET'])
def socfaker_computer_architecture():
"""
Architecture of a computer instance
Returns:
str: Architecture of computer system of either x86_64 or x86
"""
if validate_request(request):
return { 'value': socfaker.computer.architecture }
@api_bp.route("/computer/disk", methods=['GET'])
def socfaker_computer_disk():
"""
The disk size of a computer instance
Returns:
list: Returns a list of B,KB,MB,GB, and TB size of a computers disk
"""
if validate_request(request):
return { 'value': socfaker.computer.disk }
@api_bp.route("/computer/ipv4", methods=['GET'])
def socfaker_computer_ipv4():
"""
The operating system ipv4 address
Returns:
str: A random operating system ipv4 address
"""
if validate_request(request):
return { 'value': socfaker.computer.ipv4 }
@api_bp.route("/computer/mac_address", methods=['GET'])
def socfaker_computer_mac_address():
"""
A generated MAC address for a computer instance
Returns:
str: A random MAC Address
"""
if validate_request(request):
return { 'value': socfaker.computer.mac_address }
@api_bp.route("/computer/memory", methods=['GET'])
def socfaker_computer_memory():
"""
The memory size of a computer instance
Returns:
list: Returns a list of B,KB,MB,GB, and TB size of a computers memory size
"""
if validate_request(request):
return { 'value': socfaker.computer.memory }
@api_bp.route("/computer/name", methods=['GET'])
def socfaker_computer_name():
"""
The name of a comptuer
Returns:
str: A random name of a computer
"""
if validate_request(request):
return { 'value': socfaker.computer.name }
@api_bp.route("/computer/os", methods=['GET'])
def socfaker_computer_os():
"""
The operating system full name of the computer instance
Returns:
str: A random operating system version
"""
if validate_request(request):
return { 'value': socfaker.computer.os }
@api_bp.route("/computer/platform", methods=['GET'])
def socfaker_computer_platform():
"""
A random name of the computers platform
Returns:
str: Random name of a computers platform (e.g. worksation, server, etc.)
"""
if validate_request(request):
return { 'value': socfaker.computer.platform }
### COMPUTER ROUTES ###
### CONTAINER ROUTES ###
@api_bp.route("/container", methods=['GET'])
def socfaker_socfaker_container():
"""
Generated data about a container
Returns:
Container: Returns an object with properties about a container
"""
if validate_request(request):
return jsonify(str(socfaker.container))
@api_bp.route("/container/id", methods=['GET'])
def socfaker_container_id():
"""
A container ID
Returns:
str: A hex container ID
"""
if validate_request(request):
return { 'value': socfaker.container.id }
@api_bp.route("/container/name", methods=['GET'])
def socfaker_container_name():
"""
A random generated container name
Returns:
str: A randomly generated container name
"""
if validate_request(request):
return { 'value': socfaker.container.name }
@api_bp.route("/container/runtime", methods=['GET'])
def socfaker_container_runtime():
"""
A container runtime
Returns:
str: Returns either docker or kubernetes
"""
if validate_request(request):
return { 'value': socfaker.container.runtime }
@api_bp.route("/container/tags", methods=['GET'])
def socfaker_container_tags():
"""
Container tags
Returns:
list: A random list of container tags
"""
if validate_request(request):
return { 'value': socfaker.container.tags }
### CONTAINER ROUTES ###
### DNS ROUTES ###
@api_bp.route("/dns", methods=['GET'])
def socfaker_socfaker_dns():
"""
DNS Information
Returns:
DNS: Returns an object with properties about DNS request, response, etc.
"""
if validate_request(request):
return jsonify(str(socfaker.dns))
@api_bp.route("/dns/answers", methods=['GET'])
def socfaker_dns_answers():
"""
A list of DNS answers during a DNS request
Returns:
list: A random list (count) of random DNS answers during a DNS request
"""
if validate_request(request):
return jsonify(str(socfaker.dns.answers))
@api_bp.route("/dns/direction", methods=['GET'])
def socfaker_dns_direction():
"""
The direction of a DNS request
Returns:
str: Returns a direction for a DNS request or response
"""
if validate_request(request):
return { 'value': socfaker.dns.direction }
@api_bp.route("/dns/header_flags", methods=['GET'])
def socfaker_dns_header_flags():
"""
DNS Header flags
Returns:
str: A randomly selected DNS Header Flag
"""
if validate_request(request):
return { 'value': socfaker.dns.header_flags }
@api_bp.route("/dns/id", methods=['GET'])
def socfaker_dns_id():
"""
A random DNS ID value from 10000,100000
Returns:
int: A random DNS ID value
"""
if validate_request(request):
return { 'value': socfaker.dns.id }
@api_bp.route("/dns/name", methods=['GET'])
def socfaker_dns_name():
"""
Returns a randomly generated DNS name
Returns:
str: A random DNS Name
"""
if validate_request(request):
return { 'value': socfaker.dns.name }
@api_bp.route("/dns/op_code", methods=['GET'])
def socfaker_dns_op_code():
"""
A DNS OP COde
Returns:
str: A random DNS OP Code for a DNS request
"""
if validate_request(request):
return { 'value': socfaker.dns.op_code }
@api_bp.route("/dns/question", methods=['GET'])
def socfaker_dns_question():
"""
A DNS question during a DNS request
Returns:
dict: A random DNS question during a DNS request
"""
if validate_request(request):
return jsonify(str(socfaker.dns.question))
@api_bp.route("/dns/record", methods=['GET'])
def socfaker_dns_record():
"""
A randomly selected record type
Returns:
str: A random DNS record (e.g. A, CNAME, PTR, etc.)
"""
if validate_request(request):
return { 'value': socfaker.dns.record }
@api_bp.route("/dns/response_code", methods=['GET'])
def socfaker_dns_response_code():
"""
A DNS Response Code
Returns:
str: A DNS response code as part of a response made during a DNS request
"""
if validate_request(request):
return { 'value': socfaker.dns.response_code }
### DNS ROUTES ###
### EMPLOYEE ROUTES ###
@api_bp.route("/employee", methods=['GET'])
def socfaker_socfaker_employee():
"""
An employee object
Returns:
Employee: Returns an object with properties about a fake employee
"""
if validate_request(request):
return jsonify(str(socfaker.employee))
@api_bp.route("/employee/account_status", methods=['GET'])
def socfaker_employee_account_status():
"""
Account status of an employee
Returns:
str: Returns an employee's account status. This is weighted towards enabled.
"""
if validate_request(request):
return { 'value': socfaker.employee.account_status }
@api_bp.route("/employee/department", methods=['GET'])
def socfaker_employee_department():
"""
Employee department
Returns:
str: Returns a random employee department
"""
if validate_request(request):
return { 'value': socfaker.employee.department }
@api_bp.route("/employee/dob", methods=['GET'])
def socfaker_employee_dob():
"""
Date of Birth of an employee
Returns:
str: Returns the date of birth (DOB) of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.dob }
@api_bp.route("/employee/email", methods=['GET'])
def socfaker_employee_email():
"""
Email of an employee
Returns:
str: Returns the email address of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.email }
@api_bp.route("/employee/first_name", methods=['GET'])
def socfaker_employee_first_name():
"""
First name of an employee
Returns:
str: Returns the first name of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.first_name }
@api_bp.route("/employee/gender", methods=['GET'])
def socfaker_employee_gender():
"""
Gender of an employee
Returns:
str: Returns the gender of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.gender }
@api_bp.route("/employee/language", methods=['GET'])
def socfaker_employee_language():
"""
The preferred employee language
Returns:
str: Returns a random language of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.language }
@api_bp.route("/employee/last_name", methods=['GET'])
def socfaker_employee_last_name():
"""
Last name of an employee
Returns:
str: Returns the last name of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.last_name }
@api_bp.route("/employee/logon_timestamp", methods=['GET'])
def socfaker_employee_logon_timestamp():
"""
Last logon timestamp of an employee
Returns:
str: Returns a random ISO 8601 timestamp of an employee in the past
"""
if validate_request(request):
return { 'value': socfaker.employee.logon_timestamp }
@api_bp.route("/employee/name", methods=['GET'])
def socfaker_employee_name():
"""
Returns First and Last name of an employee
Returns:
str: Returns a random First and Last name of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.name }
@api_bp.route("/employee/phone_number", methods=['GET'])
def socfaker_employee_phone_number():
"""
Phone number of an employee
Returns:
str: Returns a random phone number of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.phone_number }
@set_renderers(HTMLRenderer)
@api_bp.route("/employee/photo", methods=['GET'])
def socfaker_employee_photo():
"""
Photo URL of an employee
Returns:
str: Returns a URL of a random photo for the employee
"""
if validate_request(request):
return f'<html><body><h1><img src="{socfaker.employee.photo}</h1></body></html>'
@api_bp.route("/employee/ssn", methods=['GET'])
def socfaker_employee_ssn():
"""
SSN of an employee
Returns:
str: Returns the SSN of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.ssn }
@api_bp.route("/employee/title", methods=['GET'])
def socfaker_employee_title():
"""
Employee title
Returns:
str: Returns a random employee title
"""
if validate_request(request):
return { 'value': socfaker.employee.title }
@api_bp.route("/employee/user_id", methods=['GET'])
def socfaker_employee_user_id():
"""
User ID of an employee
Returns:
str: Returns a random user ID of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.user_id }
@api_bp.route("/employee/username", methods=['GET'])
def socfaker_employee_username():
"""
Username of an employee
Returns:
str: Returns the username of an employee
"""
if validate_request(request):
return { 'value': socfaker.employee.username }
### EMPLOYEE ROUTES ###
### FILE ROUTES ###
@api_bp.route("/file", methods=['GET'])
def socfaker_socfaker_file():
"""
A file object
Returns:
File: Returns an object with properties about a fake file object
"""
if validate_request(request):
return jsonify(str(socfaker.file))
@api_bp.route("/file/accessed_timestamp", methods=['GET'])
def socfaker_file_accessed_timestamp():
"""
The last accessed timestamp of a file in the past
Returns:
str: A randomly generated accessed timestamp is ISO 8601 format
"""
if validate_request(request):
return { 'value': socfaker.file.accessed_timestamp }
@api_bp.route("/file/attributes", methods=['GET'])
def socfaker_file_attributes():
"""
Attributes of the file
Returns:
list: A randomly selected list of file attributes
"""
if validate_request(request):
return jsonify(str(socfaker.file.attributes))
@api_bp.route("/file/build_version", methods=['GET'])
def socfaker_file_build_version():
"""
A build version of a file
Returns:
str: Returns the last digit in the version string
"""
if validate_request(request):
return { 'value': socfaker.file.build_version }
@api_bp.route("/file/checksum", methods=['GET'])
def socfaker_file_checksum():
"""
A MD5 checksum of a file
Returns:
str: Returns a MD5 of the file
"""
if validate_request(request):
return { 'value': socfaker.file.checksum }
@api_bp.route("/file/directory", methods=['GET'])
def socfaker_file_directory():
"""
The directory of a file
Returns:
str: The directory of a file
"""
if validate_request(request):
return { 'value': socfaker.file.directory }
@api_bp.route("/file/drive_letter", methods=['GET'])
def socfaker_file_drive_letter():
"""
The drive letter of a file
Returns:
str: A randomly selected drive letter of a file
"""
if validate_request(request):
return { 'value': socfaker.file.drive_letter }
@api_bp.route("/file/extension", methods=['GET'])
def socfaker_file_extension():
"""
The extension of a file
Returns:
str: The extension of a file
"""
if validate_request(request):
return { 'value': socfaker.file.extension }
@api_bp.route("/file/full_path", methods=['GET'])
def socfaker_file_full_path():
"""
The full path of a file
Returns:
str: A randomly selected file name path
"""
if validate_request(request):
return { 'value': socfaker.file.full_path }
@api_bp.route("/file/gid", methods=['GET'])
def socfaker_file_gid():
"""
The GID of a file
Returns:
str: A randomly generated GID of a file
"""
if validate_request(request):
return { 'value': socfaker.file.gid }
@api_bp.route("/file/hashes", methods=['GET'])
def socfaker_file_hashes():
"""
A dict containing MD5, SHA1, and SHA256 hashes
Returns:
str: A randomly generated dict containing MD5, SHA1, and SHA256 hashes
"""
if validate_request(request):
return { 'value': socfaker.file.hashes }
@api_bp.route("/file/install_scope", methods=['GET'])
def socfaker_file_install_scope():
"""
The install scope of a file
Returns:
str: Returns a random install scope of user-local or global for a file
"""
if validate_request(request):
return { 'value': socfaker.file.install_scope }
@api_bp.route("/file/md5", methods=['GET'])
def socfaker_file_md5():
"""
A random generated MD5 hash
Returns:
str: A randomly generated MD5 file hash
"""
if validate_request(request):
return { 'value': socfaker.file.md5 }
@api_bp.route("/file/mime_type", methods=['GET'])
def socfaker_file_mime_type():
"""
The mime type of a file
Returns:
str: A randomly selected file mime type
"""
if validate_request(request):
return { 'value': socfaker.file.mime_type }
@api_bp.route("/file/name", methods=['GET'])
def socfaker_file_name():
"""
The name of a file
Returns:
str: A randomly selected file name
"""
if validate_request(request):
return { 'value': socfaker.file.name }
@api_bp.route("/file/sha1", methods=['GET'])
def socfaker_file_sha1():
"""
A random generated SHA1 hash
Returns:
str: A randomly generated SHA1 file hash
"""
if validate_request(request):
return { 'value': socfaker.file.sha1 }
@api_bp.route("/file/sha256", methods=['GET'])
def socfaker_file_sha256():
"""
A random generated SHA256 hash
Returns:
str: A randomly generated SHA256 file hash
"""
if validate_request(request):
return { 'value': socfaker.file.sha256 }
@api_bp.route("/file/signature", methods=['GET'])
def socfaker_file_signature():
"""
The file signature
Returns:
str: Returns the signature name of Microsoft Windows
"""
if validate_request(request):
return { 'value': socfaker.file.signature }
@api_bp.route("/file/signature_status", methods=['GET'])
def socfaker_file_signature_status():
"""
The signature status of a file
Returns:
str: A randomly selected signature status of Verified, Unknown, or Counterfit
"""
if validate_request(request):
return { 'value': socfaker.file.signature_status }
@api_bp.route("/file/signed", methods=['GET'])
def socfaker_file_signed():
"""
Whether the file is signed or not
Returns:
str: Returns whether a file is signed or not
"""
if validate_request(request):
return { 'value': socfaker.file.signed }
@api_bp.route("/file/size", methods=['GET'])
def socfaker_file_size():
"""
The file size
Returns:
str: A randomly generated file size
"""
if validate_request(request):
return { 'value': socfaker.file.size }
@api_bp.route("/file/timestamp", methods=['GET'])
def socfaker_file_timestamp():
"""
The timestamp of a file in the past
Returns:
str: A randomly generated file timestamp is ISO 8601 format
"""
if validate_request(request):
return { 'value': socfaker.file.timestamp }
@api_bp.route("/file/type", methods=['GET'])
def socfaker_file_type():
"""
The type of a file
Returns:
str: A randomly selected file type
"""
if validate_request(request):
return { 'value': socfaker.file.type }
@api_bp.route("/file/version", methods=['GET'])
def socfaker_file_version():
"""
A random generated file version string
Returns:
str: A randomly generated file version string
"""
if validate_request(request):
return { 'value': socfaker.file.version }
### FILE ROUTES ###
@api_bp.route("/http", methods=['GET'])
def socfaker_socfaker_http():
"""
Data related to HTTP requests and responses
Returns:
HTTP: Returns an object with properties about HTTP requests and responses
"""
if validate_request(request):
return jsonify(str(socfaker.http))
@api_bp.route("/http/bytes", methods=['GET'])
def socfaker_http_bytes():
"""
Random bytes for an HTTP request
Returns:
int: Random bytes for an HTTP request
"""
if validate_request(request):
return { 'value': socfaker.http.bytes }
@api_bp.route("/http/method", methods=['GET'])
def socfaker_http_method():
"""
A randomly selected method for an HTTP request or response
Returns:
str: A randomly selected method for an HTTP request or response
"""
if validate_request(request):
return { 'value': socfaker.http.method }
@api_bp.route("/http/request", methods=['GET'])
def socfaker_http_request():
"""
A randomly generated request dictionary based on Elastic ECS format
Returns:
dict: A random request dictionary containing body, bytes, method and referrer information
"""
if validate_request(request):
return { 'value': socfaker.http.request }
@api_bp.route("/http/response", methods=['GET'])
def socfaker_http_response():
"""
A randomly generated response dictionary based on Elastic ECS format
Returns:
dict: A random response dictionary containing body, bytes, and status code information
"""
if validate_request(request):
return { 'value': socfaker.http.response }
@api_bp.route("/http/status_code", methods=['GET'])
def socfaker_http_status_code():
"""
A randomly selected status_code for an HTTP request or response
Returns:
str: A randomly selected status code for an HTTP request or response
"""
if validate_request(request):
return { 'value': socfaker.http.status_code }
### FILE ROUTES ###
### LOCATION ROUTES ###
@api_bp.route("/location", methods=['GET'])
def socfaker_socfaker_location():
"""
Fake location data
Returns:
Location: Returns an object with properties containing location information
"""
if validate_request(request):
return jsonify(str(socfaker.location))
@api_bp.route("/location/city", methods=['GET'])
def socfaker_location_city():
"""
A random city
Returns:
str: Returns a random city name
"""
if validate_request(request):
return { 'value': socfaker.location.city }
@api_bp.route("/location/continent", methods=['GET'])
def socfaker_location_continent():
"""
A random continent
Returns:
str: Returns a random continent
"""
if validate_request(request):
return { 'value': socfaker.location.continent }
@api_bp.route("/location/country", methods=['GET'])
def socfaker_location_country():
"""
A random country
Returns:
str: Returns a random country
"""
if validate_request(request):
return { 'value': socfaker.location.country }
@api_bp.route("/location/country_code", methods=['GET'])
def socfaker_location_country_code():
"""
A random country code
Returns:
str: Returns a random country code
"""
if validate_request(request):
return { 'value': socfaker.location.country_code }
@api_bp.route("/location/latitude", methods=['GET'])
def socfaker_location_latitude():
"""
Random Latitude coordinates
Returns:
str: Returns a random latitude coordinates
"""
if validate_request(request):
return { 'value': socfaker.location.latitude }
@api_bp.route("/location/longitude", methods=['GET'])
def socfaker_location_longitude():
"""
Random Longitude coordinates
Returns:
str: Returns a random longitude coordinates
"""
if validate_request(request):
return { 'value': socfaker.location.longitude }
### LOCATION ROUTES ###
### LOGS ROUTES ###
@api_bp.route("/logs/syslog", methods=['POST'])
def socfaker_logs_syslog(type='ransomware', count=1):
"""
The syslog method generates random syslog messages based on the type and count requested
Args:
type (str, optional): Generates random syslog files with ransomware traffic added randomly. Defaults to 'ransomware'.
count (int, optional): The number of logs to generate. Defaults to 10.
Returns:
list: Returns a list of generated syslogs
"""
if validate_request(request):
return jsonify(str(socfaker.logs.syslog(type=type, count=count)))
@api_bp.route("/logs/windows/eventlog", methods=['POST'])
def socfaker_windows_eventlog(count=1, computer_name=None, os_version='Windows', json=False):
"""
Generate fake event logs based on the provided inputs
Args:
count (int, optional): The number of logs to generate. Defaults to 1.
computer_name (str, optional): A computer name to use when generating logs. Defaults to None.
os_version (str, optional): The Operating System version to use when generating logs. Defaults to 'Windows'.
json (bool, optional): Whether or not to if validate_request(request):
return data as JSON or XML. Defaults to False.
Returns:
list: Returns a list of generated Windows Event Logs
"""
if validate_request(request):
return jsonify(str(socfaker.logs.windows.eventlog(count=count, computer_name=computer_name, os_version=os_version, json=json)))
@api_bp.route("/logs/windows/sysmon", methods=['POST'])
def socfaker_sysmon_get(count=1):
"""
Returns a list of generated sysmon logs
Args:
count (int, optional): The number of sysmon logs to return. Defaults to 21.
Returns:
list: A list of generated sysmon logs
"""
if validate_request(request):
return jsonify(str(socfaker.logs.windows.sysmon(count=count)))
### LOGS ROUTES ###
### NETWORK ROUTES ###
@api_bp.route("/network", methods=['GET'])
def socfaker_socfaker_network():
"""
Access common generated network information
Returns:
Network: Returns an object with properties containing general
or common network information
"""
if validate_request(request):
return jsonify(str(socfaker.network))
@api_bp.route("/network/get_cidr_range", methods=['POST'])
def socfaker_network_get_cidr_range(cidr):
"""
Returns an IPv4 range
Returns:
str: Returns CIDR range for an IPv4 address.
"""
if validate_request(request):
return jsonify(str(socfaker.network.get_cidr_range(cidr=cidr)))
@api_bp.route("/network/ipv4", methods=['GET'])
def socfaker_network_ipv4():
"""
Returns an IPv4 IP Address
Returns:
str: Returns an IPv4 Address. If private the address will be 10.x.x.x or 172.x.x.x or 192.168.x.x.
"""
if validate_request(request):
return { 'value': socfaker.network.ipv4 }
@api_bp.route("/network/ipv6", methods=['GET'])
def socfaker_network_ipv6():
"""
Returns an IPv6 IP Address
Returns:
dict: Returns a compressed and exploded IPv6 Address.
"""
if validate_request(request):
return { 'value': socfaker.network.ipv6 }
@api_bp.route("/network/netbios", methods=['GET'])
def socfaker_network_netbios():
"""
Returns a netbios name
Returns:
str: Returns a random netbios name
"""
if validate_request(request):
return { 'value': socfaker.network.netbios }
@api_bp.route("/network/port", methods=['GET'])
def socfaker_network_port():
"""
Returns a dictionary map of a port and it's common name
Returns:
dict: A random port and it's common name
"""
if validate_request(request):
return jsonify(str(socfaker.network.port))
@api_bp.route("/network/protocol", methods=['GET'])
def socfaker_network_protocol():
"""
Random network protocol
Returns:
dict: Returns a random network protocol and protocol number
"""
if validate_request(request):
return jsonify(str(socfaker.network.protocol))
### NETWORK ROUTES ###
### OPERATING_SYSTEM ROUTES ###
@api_bp.route("/operating_system", methods=['GET'])
def socfaker_socfaker_operating_system():
"""
Fake operating system information
Returns:
OperatingSystem: Returns an object with properties containing
Operating System information
"""
if validate_request(request):
return jsonify(str(socfaker.operating_system))
@api_bp.route("/operating_system/family", methods=['GET'])
def socfaker_operatingsystem_family():
"""
The operating system family
Returns:
str: Returns a random operating system family
"""
if validate_request(request):
return { 'value': socfaker.operating_system.family }
@api_bp.route("/operating_system/fullname", methods=['GET'])
def socfaker_operatingsystem_fullname():
"""
The operating system full name
Returns:
str: Returns a random operating system full name including name, type and version
"""
if validate_request(request):
return { 'value': socfaker.operating_system.fullname }
@api_bp.route("/operating_system/name", methods=['GET'])
def socfaker_operatingsystem_name():
"""
The operating system name
Returns:
str: Returns a random operating system name
"""
if validate_request(request):
return { 'value': socfaker.operating_system.name }
@api_bp.route("/operating_system/version", methods=['GET'])
def socfaker_operatingsystem_version():
"""
The operating system version
Returns:
str: Returns a random operating system version
"""
if validate_request(request):
return { 'value': socfaker.operating_system.version }
### OPERATING_SYSTEM ROUTES ###
### ORGANIZATION ROUTES ###
@api_bp.route("/organization", methods=['GET'])
def socfaker_socfaker_organization():
"""
Fake organization information
Returns:
Organization: Returns an object with properties containing common
organization information
"""
if validate_request(request):
return jsonify(str(socfaker.organization))
@api_bp.route("/organization/division", methods=['GET'])
def socfaker_organization_division():
"""
Returns a division within an organization
Returns:
str: Returns a division within an organization
"""
if validate_request(request):
return { 'value': socfaker.organization.division }
@api_bp.route("/organization/domain", methods=['GET'])
def socfaker_organization_domain():
"""
Returns a domain name based on the organization name
Returns:
str: Returns a domain name based on the organizational name
"""
if validate_request(request):
return { 'value': socfaker.organization.domain }
@api_bp.route("/organization/name", methods=['GET'])
def socfaker_organization_name():
"""
A randomly generated organization name
Returns:
str: A randomly generated organization name
"""
if validate_request(request):
return { 'value': socfaker.organization.name }
@api_bp.route("/organization/title", methods=['GET'])
def socfaker_organization_title():
"""
Returns a title within an organization
Returns:
str: Returns a title within an organization
"""
if validate_request(request):
return { 'value': socfaker.organization.title }
### ORGANIZATION ROUTES ###
### PCAP ROUTES ###
@api_bp.route("/pcap", methods=['POST'])
def socfaker_pcap_generate(count=1, port=9600):
"""
None
"""
if validate_request(request):
return jsonify(str(socfaker.pcap(count=count)))
### PCAP ROUTES ###
### REGISTRY ROUTES ###
@api_bp.route("/registry", methods=['GET'])
def socfaker_socfaker_registry():
"""
Fake registry information
Returns:
Registry: Returns an object with properties containing
common Windows registry information
"""
if validate_request(request):
return jsonify(str(socfaker.registry))
@api_bp.route("/registry/hive", methods=['GET'])
def socfaker_registry_hive():
"""
A random registry hive
Returns:
str: Returns a random registry hive
"""
if validate_request(request):
return { 'value': socfaker.registry.hive }
@api_bp.route("/registry/key", methods=['GET'])
def socfaker_registry_key():
"""
A random registry key
Returns:
str: Returns a random registry key
"""
if validate_request(request):
return { 'value': socfaker.registry.key }
@api_bp.route("/registry/path", methods=['GET'])
def socfaker_registry_path():
"""
A full registry path
Returns:
str: Returns a random full registry path
"""
if validate_request(request):
return { 'value': socfaker.registry.path }
@api_bp.route("/registry/root", methods=['GET'])
def socfaker_registry_root():
"""
A random registry root path string
Returns:
str: Returns a random registry root path string
"""
if validate_request(request):
return { 'value': socfaker.registry.root }
@api_bp.route("/registry/type", methods=['GET'])
def socfaker_registry_type():
"""
A random registry key type
Returns:
str: A random registry key type
"""
if validate_request(request):
return { 'value': socfaker.registry.type }
@api_bp.route("/registry/value", methods=['GET'])
def socfaker_registry_value():
"""
A random registry key value
Returns:
str: A random registry key value
"""
if validate_request(request):
return { 'value': socfaker.registry.value }
### REGISTRY ROUTES ###
### TIMESTAMP ROUTES ###
@api_bp.route("/timestamp", methods=['GET'])
def socfaker_socfaker_timestamp():
"""
Fake timestamp information
Returns:
Timestamp: Returns an object with methods to generate fake
timestamps
"""
if validate_request(request):
return jsonify(str(socfaker.timestamp))
@api_bp.route("/timestamp/date_string", methods=['POST'])
def socfaker_timestamp_date_string(years=81, months=5, days=162):
"""
Returns a date string
Args:
years ([type], optional): The number of years subtracted from the current time. Defaults to random.randint(18,85).
months ([type], optional): The number of months subtracted from the current time. Defaults to random.randint(1,12).
days ([type], optional): The number of days subtracted from the current time. Defaults to random.randint(1,365).
Returns:
str: An date string for the generated timestamp
"""
if validate_request(request):
return {'value': socfaker.timestamp.date_string(years=years, months=months, days=days)}
@api_bp.route("/timestamp/in_the_future", methods=['POST'])
def socfaker_timestamp_in_the_future(years=0, months=0, days=4, hours=13, minutes=25, seconds=3):
"""
Generates a timestamp in the future
Args:
years (int, optional): The number of years to add from the current time. Defaults to 0.
months ([type], optional): The number of months to add from the current time. Defaults to random.randint(0,3).
days ([type], optional): The number of days to add from the current time. Defaults to random.randint(1,15).
hours ([type], optional): The number of hours to add from the current time. Defaults to random.randint(1,24).
minutes ([type], optional): The number of minutes to add from the current time. Defaults to random.randint(1,60).
seconds ([type], optional): The number of seconds to add from the current time. Defaults to random.randint(1,60).
Returns:
str: Returns an ISO 8601 timestamp string
"""
if validate_request(request):
return {'value': socfaker.timestamp.in_the_future(years=years, months=months, days=days, hours=hours, minutes=minutes, seconds=seconds)}
@api_bp.route("/timestamp/in_the_past", methods=['POST'])
def socfaker_timestamp_in_the_past(years=0, months=2, days=6, hours=19, minutes=37, seconds=5):
"""
Generates a timestamp in the past
Args:
years (int, optional): The number of years to subtract from the current time. Defaults to 0.
months ([type], optional): The number of months to subtract from the current time. Defaults to random.randint(0,3).
days ([type], optional): The number of days to subtract from the current time. Defaults to random.randint(1,15).
hours ([type], optional): The number of hours to subtract from the current time. Defaults to random.randint(1,24).
minutes ([type], optional): The number of minutes to subtract from the current time. Defaults to random.randint(1,60).
seconds ([type], optional): The number of seconds to subtract from the current time. Defaults to random.randint(1,60).
Returns:
str: Returns an ISO 8601 timestamp string
"""
if validate_request(request):
return {'value': socfaker.timestamp.in_the_past(years=years, months=months, days=days, hours=hours, minutes=minutes, seconds=seconds)}
@api_bp.route("/timestamp/current", methods=['GET'])
def socfaker_timestamp_current():
"""
The current timestamp
Returns:
str: Returns the current timestamp in ISO 8601 format
"""
if validate_request(request):
return { 'value': socfaker.timestamp.current }
### TIMESTAMP ROUTES ###
### USER_AGENT ROUTES ###
@api_bp.route("/user_agent", methods=['GET'])
def socfaker_socfaker_user_agent():
"""
Fake user agent information
Returns:
UserAgent: Returns an object with methods to generate fake
user agent strings
"""
if validate_request(request):
return jsonify(str(socfaker.user_agent))
### USER_AGENT ROUTES ###
### VULNERABILITY ROUTES ###
@api_bp.route("/vulnerability/critical", methods=['GET'])
def socfaker_vulnerability_critical():
"""
Returns a list of critical vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of critical vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().critical))
@api_bp.route("/vulnerability/data", methods=['GET'])
def socfaker_vulnerability_data():
"""
Returns all vulnerability data
Returns:
json: Returns json of all vulnerability data
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().data))
@api_bp.route("/vulnerability/high", methods=['GET'])
def socfaker_vulnerability_high():
"""
Returns a list of high vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of high vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().high))
@api_bp.route("/vulnerability/informational", methods=['GET'])
def socfaker_vulnerability_informational():
"""
Returns a list of informational vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of informational vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().informational))
@api_bp.route("/vulnerability/low", methods=['GET'])
def socfaker_vulnerability_low():
"""
Returns a list of low vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of low vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().low))
@api_bp.route("/vulnerability/medium", methods=['GET'])
def socfaker_vulnerability_medium():
"""
Returns a list of medium vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of medium vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().medium))
@api_bp.route("/vulnerability/host", methods=['GET'])
def socfaker_vulnerability_host():
"""
Retrieve information about hosts found in a vulnerability scan
Returns:
VulnerabilityHost: Returns an object with properties for a vulnerable host
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host))
@api_bp.route("/vulnerability/host/checks_considered", methods=['GET'])
def socfaker_vulnerabilityhost_checks_considered():
"""
A count of how many vulnerability checks were considered for a host
Returns:
int: Returns a randomly integer for checks considered during a vulnerability scan
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().host.checks_considered }
@api_bp.route("/vulnerability/host/critical", methods=['GET'])
def socfaker_vulnerabilityhost_critical():
"""
Returns a list of critical vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of critical vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host.critical))
@api_bp.route("/vulnerability/host/data", methods=['GET'])
def socfaker_vulnerabilityhost_data():
"""
Returns all vulnerability data
Returns:
json: Returns json of all vulnerability data
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host.data))
@api_bp.route("/vulnerability/host/fqdn", methods=['GET'])
def socfaker_vulnerabilityhost_fqdn():
"""
A host FQDN
Returns:
str: Returns a randomly generated DNS name
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().host.fqdn }
@api_bp.route("/vulnerability/host/high", methods=['GET'])
def socfaker_vulnerabilityhost_high():
"""
Returns a list of high vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of high vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host.high))
@api_bp.route("/vulnerability/host/host", methods=['GET'])
def socfaker_vulnerabilityhost_host():
"""
Retrieve information about hosts found in a vulnerability scan
Returns:
VulnerabilityHost: Returns an object with properties for a vulnerable host
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host.host))
@api_bp.route("/vulnerability/host/host_id", methods=['GET'])
def socfaker_vulnerabilityhost_host_id():
"""
Returns a random host ID
Returns:
int: Returns a random host ID
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().host.host_id }
@api_bp.route("/vulnerability/host/informational", methods=['GET'])
def socfaker_vulnerabilityhost_informational():
"""
Returns a list of informational vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of informational vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host.informational))
@api_bp.route("/vulnerability/host/low", methods=['GET'])
def socfaker_vulnerabilityhost_low():
"""
Returns a list of low vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of low vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host.low))
@api_bp.route("/vulnerability/host/mac_address", methods=['GET'])
def socfaker_vulnerabilityhost_mac_address():
"""
A host MAC Address
Returns:
str: Returns a randomly generated MAC Address
"""
if validate_request(request):
return {'value': socfaker.vulnerability().host.mac_address}
@api_bp.route("/vulnerability/host/medium", methods=['GET'])
def socfaker_vulnerabilityhost_medium():
"""
Returns a list of medium vulnerabilities based on counts provided when instantiating the class
Returns:
list: Returns a list of medium vulnerabilities
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host.medium))
@api_bp.route("/vulnerability/host/name", methods=['GET'])
def socfaker_vulnerabilityhost_name():
"""
Returns a computer name
Returns:
str: Returns a randomly generated computer name
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().host.name }
@api_bp.route("/vulnerability/host/percentage", methods=['GET'])
def socfaker_vulnerabilityhost_percentage():
"""
Returns a percentage of vulnerabilities found on a host
Returns:
dict: Returns a percentage of vulnerabilities found on a host
"""
if validate_request(request):
return {'value': socfaker.vulnerability().host.percentage}
@api_bp.route("/vulnerability/host/scan", methods=['GET'])
def socfaker_vulnerabilityhost_scan():
"""
A vulnerability scan
Returns:
VulnerabilityScan: Returns a vulnerability scan object with properties related a vulnerability scan
"""
if validate_request(request):
return jsonify(str(socfaker.vulnerability().host.scan))
@api_bp.route("/vulnerability/host/total_score", methods=['GET'])
def socfaker_vulnerabilityhost_total_score():
"""
The total score of a host during a vulnerability scan
Returns:
int: The total score for a host during a vulnerability scan
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().host.total_score }
@api_bp.route("/vulnerability/scan", methods=['POST'])
def socfaker_vulnerability_scan(host_count=1, critical=1, high=1, medium=1, low=1, informational=1):
if validate_request(request):
return jsonify(str(socfaker.vulnerability(host_count=host_count, critical=critical, high=high, medium=medium, low=low, informational=informational).scan))
@api_bp.route("/vulnerability/scan/end_time", methods=['GET'])
def socfaker_vulnerabilityscan_end_time():
"""
End time of a vulnerability scan
Returns:
str: The end time of a vulnerability scan in the future
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.end_time }
@api_bp.route("/vulnerability/scan/host_count", methods=['GET'])
def socfaker_vulnerabilityscan_host_count():
"""
A vulnerability scan host count
Returns:
int: The provided vulnerability scan host count
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.host_count }
@api_bp.route("/vulnerability/scan/id", methods=['GET'])
def socfaker_vulnerabilityscan_id():
"""
A vulnerability scan ID
Returns:
int: Returns a random vulnerability scan ID
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.id }
@api_bp.route("/vulnerability/scan/ip_list", methods=['GET'])
def socfaker_vulnerabilityscan_ip_list():
"""
A list of host IPs during a Vulnerability scan
Returns:
list: A randomly generated list of host IPs during a vulnerability scan
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.ip_list }
@api_bp.route("/vulnerability/scan/name", methods=['GET'])
def socfaker_vulnerabilityscan_name():
"""
A vulnerability scan name
Returns:
str: A randomly selected vulnerability scan name
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.name }
@api_bp.route("/vulnerability/scan/scan_uuid", methods=['GET'])
def socfaker_vulnerabilityscan_scan_uuid():
"""
A vulnerability scan UUID
Returns:
str: A random UUID for a vulnerability scan
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.scan_uuid }
@api_bp.route("/vulnerability/scan/scanner_name", methods=['GET'])
def socfaker_vulnerabilityscan_scanner_name():
"""
A vulnerability scaner name
Returns:
str: Returns a random vulnerability scanner name
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.scanner_name }
@api_bp.route("/vulnerability/scan/scanner_uuid", methods=['GET'])
def socfaker_vulnerabilityscan_scanner_uuid():
"""
A vulnerability scanner UUID
Returns:
str: A random UUID for a scanner
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.scanner_uuid }
@api_bp.route("/vulnerability/scan/start_time", methods=['GET'])
def socfaker_vulnerabilityscan_start_time():
"""
Start time of a vulnerability scan
Returns:
str: The start time of a vulnerability scan in the past
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.start_time }
@api_bp.route("/vulnerability/scan/status", methods=['GET'])
def socfaker_vulnerabilityscan_status():
"""
Vulnerability scan status
Returns:
str: A randomly selected scan status
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.status }
@api_bp.route("/vulnerability/scan/type", methods=['GET'])
def socfaker_vulnerabilityscan_type():
"""
The vulnerability scan type
Returns:
str: A randomly selected vulnerability scan type
"""
if validate_request(request):
return { 'value': socfaker.vulnerability().scan.type }
### VULNERABILITY ROUTES ###
### WORDS ROUTES ###
@api_bp.route("/words", methods=['GET'])
def socfaker_socfaker_words():
"""
Used to create fake words or strings
Returns:
Words: Returns an object with methods to generate fake words and strings
"""
if validate_request(request):
return {'value': socfaker.words }
### WORDS ROUTES ###
### PRODUCT ROUTES ###
### PRODUCTS - AZURE - VM - DETAILS ###
@api_bp.route("/products/azure/details", methods=['GET'])
def socfaker_products_azure():
"""
Azure class contains properties related to Azure products
Returns:
Azure: Microsoft Azure object containing properties and methods for generating data about Microsoft Azure products and services
"""
if validate_request(request):
return jsonify(str(socfaker.products.azure.vm.details))
@api_bp.route("/products/azure/vm/details/location", methods=['GET'])
def socfaker_azureproperties_location():
"""
A location based on Microsoft Azure available locations
Returns:
str: Returns a Azure location
"""
if validate_request(request):
return { 'value': socfaker.products.azure.vm.details.location }
@api_bp.route("/products/azure/vm/details/network_zone", methods=['GET'])
def socfaker_azureproperties_network_zone():
"""
Network zone type in Microsoft Azure
Returns:
str: Returns a random type for a network zone in Azure
"""
if validate_request(request):
return { 'value': socfaker.products.azure.vm.details.network_zone }
@api_bp.route("/products/azure/vm/details/resource_group_id", methods=['GET'])
def socfaker_azureproperties_resource_group_id():
"""
Resource Group ID
Returns:
str: Returns a random resource group ID (GUID)
"""
if validate_request(request):
return { 'value': socfaker.products.azure.vm.details.resource_group_id }
@api_bp.route("/products/azure/vm/details/resource_group_name", methods=['GET'])
def socfaker_azureproperties_resource_group_name():
"""
Resource Group Name in Azure
Returns:
str: Returns a three-word Resource Group name for Microsoft Azure
"""
if validate_request(request):
return { 'value': socfaker.products.azure.vm.details.resource_group_name }
@api_bp.route("/products/azure/vm/details/score", methods=['GET'])
def socfaker_azureproperties_score():
"""
None
"""
if validate_request(request):
return { 'value': socfaker.products.azure.vm.details.score }
@api_bp.route("/products/azure/vm/details/vm_name", methods=['GET'])
def socfaker_azureproperties_vm_name():
"""
A Azure VM Name
Returns:
str: Returns a random Azure VM name
"""
if validate_request(request):
return { 'value': socfaker.products.azure.vm.details.vm_name }
### PRODUCTS - AZURE - VM - DETAILS ###
### PRODUCTS - AZURE - VM - METRICS ###
@api_bp.route("/products/azure/vm/metrics", methods=['POST'])
def socfaker_azurevmmetrics_generate():
"""
Returns a list of dicts containing Azure VM Metrics
Returns:
list: A list of dicts containing metrics for an Azure VM
"""
if validate_request(request):
return jsonify(str(socfaker.products.azure.vm.metrics.generate()))
@api_bp.route("/products/azure/vm/metrics/average", methods=['GET'])
def socfaker_azurevmmetrics_average():
"""
None
"""
if validate_request(request):
return { 'value': socfaker.products.azure.vm.metrics.average }
@api_bp.route("/products/azure/vm/metrics/graphs", methods=['GET'])
def socfaker_azurevmmetrics_graphs():
"""
None
"""
if validate_request(request):
return { 'value': socfaker.products.azure.vm.metrics.graphs }
### PRODUCTS - AZURE - VM - METRICS ###
### PRODUCTS - AZURE - VM - TOPOLOGY ###
@api_bp.route("/products/azure/vm/topology", methods=['GET'])
def socfaker_azurevmtopology_get():
"""
None
"""
if validate_request(request):
return jsonify(str(socfaker.products.azure.vm.topology))
### PRODUCTS - AZURE - VM - TOPOLOGY ###
### PRODUCTS - ELASTIC ###
@api_bp.route("/products/elastic", methods=['GET'])
def socfaker_products_elastic():
"""
Elastic class contains properties related to Elastic products
Returns:
Elastic: Elastic object containing properties and methods for generating data about Elastic products and services
"""
if validate_request(request):
return { 'value': socfaker.products.elastic }
@api_bp.route("/products/elastic/document", methods=['POST'])
def socfaker_elasticecs_get(count=1):
"""
Generates one or more Elastic Common Schema documents
Args:
count (int, optional): The number of documents you want
generated. Defaults to 1.
Returns:
list: A list of ECS Document dictionaries
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.get(count=count)))
@api_bp.route("/products/elastic/document/fields", methods=['GET'])
def socfaker_elasticecs_fields():
"""
None
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields))
@api_bp.route("/products/elastic/document/fields/agent", methods=['GET'])
def socfaker_elasticecsfields_agent():
"""
Returns an ECS agent dictionary
Returns:
dict: Returns a dictionary of agent
fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.agent))
@api_bp.route("/products/elastic/document/fields/base", methods=['GET'])
def socfaker_elasticecsfields_base():
"""
Returns an ECS base fields dictionary
Returns:
dict: Returns a dictionary of ECS base
fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.base))
@api_bp.route("/products/elastic/document/fields/client", methods=['GET'])
def socfaker_elasticecsfields_client():
"""
Returns an ECS Client dictionary
Returns:
dict: Returns a dictionary of ECS
client fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.client))
@api_bp.route("/products/elastic/document/fields/cloud", methods=['GET'])
def socfaker_elasticecsfields_cloud():
"""
Returns an ECS Cloud dictionary
Returns:
dict: Returns a dictionary of ECS
Cloud fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.cloud))
@api_bp.route("/products/elastic/document/fields/code_signature", methods=['GET'])
def socfaker_elasticecsfields_code_signature():
"""
Returns an ECS Code Signature dictionary
Returns:
dict: Returns a dictionary of ECS
Code Signature fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.code_signature))
@api_bp.route("/products/elastic/document/fields/container", methods=['GET'])
def socfaker_elasticecsfields_container():
"""
Returns an ECS container dictionary
Returns:
dict: Returns a dictionary of ECS
container fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.container))
@api_bp.route("/products/elastic/document/fields/destination", methods=['GET'])
def socfaker_elasticecsfields_destination():
"""
Returns an ECS destination dictionary
Returns:
dict: Returns a dictionary of ECS
destination fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.destination))
@api_bp.route("/products/elastic/document/fields/dll", methods=['GET'])
def socfaker_elasticecsfields_dll():
"""
Returns an ECS DLL dictionary
Returns:
dict: Returns a dictionary of ECS
DLL fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.dll))
@api_bp.route("/products/elastic/document/fields/dns", methods=['GET'])
def socfaker_elasticecsfields_dns():
"""
Returns an ECS DNS dictionary
Returns:
dict: Returns a dictionary of ECS
DNS fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.dns))
@api_bp.route("/products/elastic/document/fields/event", methods=['GET'])
def socfaker_elasticecsfields_event():
"""
Returns an ECS Event dictionary
Returns:
dict: Returns a dictionary of ECS
Event fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.event))
@api_bp.route("/products/elastic/document/fields/file", methods=['GET'])
def socfaker_elasticecsfields_file():
"""
Returns an ECS file dictionary
Returns:
dict: Returns a dictionary of ECS
file fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.file))
@api_bp.route("/products/elastic/document/fields/host", methods=['GET'])
def socfaker_elasticecsfields_host():
"""
Returns an ECS host dictionary
Returns:
dict: Returns a dictionary of ECS
host fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.host))
@api_bp.route("/products/elastic/document/fields/http", methods=['GET'])
def socfaker_elasticecsfields_http():
"""
Returns an ECS HTTP dictionary
Returns:
dict: Returns a dictionary of ECS
HTTP fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.http))
@api_bp.route("/products/elastic/document/fields/network", methods=['GET'])
def socfaker_elasticecsfields_network():
"""
Returns an ECS network dictionary
Returns:
dict: Returns a dictionary of ECS
network fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.network))
@api_bp.route("/products/elastic/document/fields/organization", methods=['GET'])
def socfaker_elasticecsfields_organization():
"""
Returns an ECS Organization dictionary
Returns:
dict: Returns a dictionary of ECS
organization fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.organization))
@api_bp.route("/products/elastic/document/fields/package", methods=['GET'])
def socfaker_elasticecsfields_package():
"""
Returns an ECS package dictionary
Returns:
dict: Returns a dictionary of ECS
package fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.package))
@api_bp.route("/products/elastic/document/fields/registry", methods=['GET'])
def socfaker_elasticecsfields_registry():
"""
Returns an ECS Windows Registry dictionary
Returns:
dict: Returns a dictionary of ECS
Windows Registry fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.registry))
@api_bp.route("/products/elastic/document/fields/server", methods=['GET'])
def socfaker_elasticecsfields_server():
"""
Returns an ECS server dictionary
Returns:
dict: Returns a dictionary of ECS
server fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.server))
@api_bp.route("/products/elastic/document/fields/source", methods=['GET'])
def socfaker_elasticecsfields_source():
"""
Returns an ECS source dictionary
Returns:
dict: Returns a dictionary of ECS
source fields/properties
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.document.fields.source))
@api_bp.route("/products/elastic/hits", methods=['POST'])
def socfaker_elastic_hits(count=10):
"""
Returns a provided count of generated / fake Elasticsearch query hits. Default is 10.
Args:
count (int, optional): The number of Elasticsearch query hits returned in a list. Defaults to 10.
Returns:
list: A list of Elasticsearch query hits
"""
if validate_request(request):
return jsonify(str(socfaker.products.elastic.hits(count=count)))
### PRODUCTS - ELASTIC ###
### PRODUCTS - QUALYSGUARD ###
@api_bp.route("/products/qualysguard/scan", methods=['POST'])
def socfaker_qualysguard_scan(count=1, host_count=1):
"""
Retrieve 1 or more QualysGuard VM scans for 1 or more hosts
Args:
count (int, optional): The number of scans to return. Defaults to 1.
host_count (int, optional): The number of hosts within a scan. Defaults to 1.
Returns:
list: Returns a list of scans based on the provided inputs
"""
if validate_request(request):
return jsonify(str(socfaker.products.qualysguard.scan(count=count, host_count=host_count)))
### PRODUCTS - QUALYSGUARD ###
### PRODUCTS - SERVICENOW ###
@api_bp.route("/products/servicenow/search", methods=['POST'])
def socfaker_servicenow_search(random_keyword=None):
"""
Generates a fake response from a ServiceNow Incident Search
Args:
random_keyword (str, optional): Adds a random keyword string you provide to fields within the generated response object. Defaults to None.
Returns:
dict: A ServiceNow Incident Search response object
"""
if validate_request(request):
return jsonify(str(socfaker.products.servicenow.search(random_keyword=random_keyword)))
| 28.349609 | 162 | 0.639623 |
a37355a19aa8f440bb3300c6b512a843d8e672aa | 3,494 | py | Python | jdit/trainer/instances/fashionClassParallelTrainer.py | dingguanglei/jdit | ef878e696c9e2fad5069f106496289d4e4cc6154 | [
"Apache-2.0"
] | 28 | 2019-06-18T15:56:53.000Z | 2021-11-09T13:11:13.000Z | jdit/trainer/instances/fashionClassParallelTrainer.py | dingguanglei/jdit | ef878e696c9e2fad5069f106496289d4e4cc6154 | [
"Apache-2.0"
] | 2 | 2018-10-24T01:09:56.000Z | 2018-11-08T07:13:48.000Z | jdit/trainer/instances/fashionClassParallelTrainer.py | dingguanglei/jdit | ef878e696c9e2fad5069f106496289d4e4cc6154 | [
"Apache-2.0"
] | 8 | 2019-01-11T01:12:15.000Z | 2021-03-12T10:15:43.000Z | # coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from jdit.trainer.single.classification import ClassificationTrainer
from jdit.model import Model
from jdit.optimizer import Optimizer
from jdit.dataset import FashionMNIST
from jdit.parallel import SupParallelTrainer
class SimpleModel(nn.Module):
def __init__(self, depth=64, num_class=10):
super(SimpleModel, self).__init__()
self.num_class = num_class
self.layer1 = nn.Conv2d(1, depth, 3, 1, 1)
self.layer2 = nn.Conv2d(depth, depth * 2, 4, 2, 1)
self.layer3 = nn.Conv2d(depth * 2, depth * 4, 4, 2, 1)
self.layer4 = nn.Conv2d(depth * 4, depth * 8, 4, 2, 1)
self.layer5 = nn.Conv2d(depth * 8, num_class, 4, 1, 0)
def forward(self, x):
out = F.relu(self.layer1(x))
out = F.relu(self.layer2(out))
out = F.relu(self.layer3(out))
out = F.relu(self.layer4(out))
out = self.layer5(out)
out = out.view(-1, self.num_class)
return out
class FashionClassTrainer(ClassificationTrainer):
def __init__(self, logdir, nepochs, gpu_ids, net, opt, dataset, num_class):
super(FashionClassTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, dataset, num_class)
def compute_loss(self):
var_dic = {}
var_dic["CEP"] = loss = nn.CrossEntropyLoss()(self.output, self.ground_truth.squeeze().long())
_, predict = torch.max(self.output.detach(), 1) # 0100=>1 0010=>2
total = predict.size(0) * 1.0
labels = self.ground_truth.squeeze().long()
correct = predict.eq(labels).cpu().sum().float()
acc = correct / total
var_dic["ACC"] = acc
return loss, var_dic
def compute_valid(self):
_,var_dic = self.compute_loss()
return var_dic
def build_task_trainer(unfixed_params):
"""build a task just like FashionClassTrainer.
:param unfixed_params:
:return:
"""
logdir = unfixed_params['logdir']
gpu_ids_abs = unfixed_params["gpu_ids_abs"]
depth = unfixed_params["depth"]
lr = unfixed_params["lr"]
batch_size = 32
opt_name = "RMSprop"
lr_decay = 0.94
decay_position= 1
position_type = "epoch"
weight_decay = 2e-5
momentum = 0
nepochs = 100
num_class = 10
torch.backends.cudnn.benchmark = True
mnist = FashionMNIST(root="datasets/fashion_data", batch_size=batch_size, num_workers=2)
net = Model(SimpleModel(depth), gpu_ids_abs=gpu_ids_abs, init_method="kaiming", verbose=False)
opt = Optimizer(net.parameters(), opt_name, lr_decay, decay_position, position_type=position_type,
lr=lr, weight_decay=weight_decay, momentum=momentum)
Trainer = FashionClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt, mnist, num_class)
return Trainer
def trainerParallel():
unfixed_params = [
{'task_id': 1, 'gpu_ids_abs': [],
'depth': 4, 'lr': 1e-3,
},
{'task_id': 1, 'gpu_ids_abs': [],
'depth': 8, 'lr': 1e-2,
},
{'task_id': 2, 'gpu_ids_abs': [],
'depth': 4, 'lr': 1e-2,
},
{'task_id': 2, 'gpu_ids_abs': [],
'depth': 8, 'lr': 1e-3,
},
]
tp = SupParallelTrainer(unfixed_params, build_task_trainer)
return tp
def start_fashionClassPrarallelTrainer(run_type="debug"):
tp = trainerParallel()
tp.train()
if __name__ == '__main__':
start_fashionClassPrarallelTrainer()
| 32.351852 | 105 | 0.634516 |
a3741285d787f895d330b0560df8e8e215121b8a | 1,701 | py | Python | examples/plotter/lego.py | adamfazzari/pylgbst | 1ae2b761a9da3d8983a8d3cd9dd3dfd7a0b7540b | [
"MIT"
] | 468 | 2017-09-19T16:59:55.000Z | 2022-03-09T11:52:02.000Z | examples/plotter/lego.py | adamfazzari/pylgbst | 1ae2b761a9da3d8983a8d3cd9dd3dfd7a0b7540b | [
"MIT"
] | 110 | 2017-12-28T14:53:17.000Z | 2022-02-20T12:29:46.000Z | examples/plotter/lego.py | adamfazzari/pylgbst | 1ae2b761a9da3d8983a8d3cd9dd3dfd7a0b7540b | [
"MIT"
] | 120 | 2017-10-09T20:16:52.000Z | 2022-03-29T13:14:28.000Z | def lego(plotter, t):
h = t * 5.0
w = t * 3.0
plotter.move(-t * 2.0, 0)
l(h, plotter, t, w)
plotter.move(0, w + t)
e(h, plotter, t, w)
plotter.move(0, w + t)
g(plotter, t)
plotter.move(0, w + t)
o(plotter, t)
def o(plotter, t):
# O
plotter.move(t, 0)
plotter.line(3 * t, 0)
plotter.line(t, t)
plotter.line(0, t)
plotter.line(-t, t)
plotter.line(-3 * t, 0)
plotter.line(-t, -t)
plotter.line(0, -t)
plotter.line(t, -t)
plotter.move(0, t)
plotter.line(3 * t, 0)
plotter.line(0, t)
plotter.line(-3 * t, 0)
plotter.line(0, -t)
def g(plotter, t):
# G
plotter.move(t, 0)
plotter.line(3 * t, 0)
plotter.line(t, t)
plotter.line(0, t)
plotter.line(-t, t)
plotter.line(-t, 0)
plotter.line(0, -t)
plotter.line(t, 0)
plotter.line(0, -t)
plotter.line(-3 * t, 0)
plotter.line(0, t)
plotter.line(t * 0.25, 0)
plotter.line(0, -t * 0.25)
plotter.line(t * 0.75, 0)
plotter.line(0, t * 1.25)
plotter.line(-3 * t, 0)
plotter.line(0, -t)
plotter.line(t, 0)
plotter.line(0, -t)
plotter.line(t, -t)
plotter.move(-t, 0)
def e(h, plotter, t, w):
# E
plotter.line(h, 0)
plotter.line(0, w)
plotter.line(-t, 0)
plotter.line(0, -2 * t)
plotter.line(-t, 0)
plotter.line(0, t)
plotter.line(-t, 0)
plotter.line(0, -t)
plotter.line(-t, 0)
plotter.line(0, 2 * t)
plotter.line(-t, 0)
plotter.line(0, -w)
def l(h, plotter, t, w):
# L
plotter.line(h, 0)
plotter.line(0, t)
plotter.line(-(h - t), 0)
plotter.line(0, 2 * t)
plotter.line(-t, 0)
plotter.line(0, -w)
| 20.743902 | 30 | 0.522046 |
a37644a1e11006bb540b7235f3216f75efbca584 | 5,711 | py | Python | movies_modeling.py | amotter443/movies | ae375d19befb8133c014199dc1bf1ae728fd0147 | [
"MIT"
] | 1 | 2022-01-13T21:46:40.000Z | 2022-01-13T21:46:40.000Z | movies_modeling.py | amotter443/movies | ae375d19befb8133c014199dc1bf1ae728fd0147 | [
"MIT"
] | null | null | null | movies_modeling.py | amotter443/movies | ae375d19befb8133c014199dc1bf1ae728fd0147 | [
"MIT"
] | null | null | null | #Initialize packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.model_selection as model_selection
from sklearn import linear_model
import sklearn.metrics as metrics
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.inspection import permutation_importance
from sklearn.feature_selection import RFE
from sklearn.impute import KNNImputer
import warnings
#Read in data
df = pd.read_csv(r'\movie_data_final.csv')
#If revenue is less than $5000 set to NA
df.loc[df['revenue'] <= 5000,'revenue'] = np.nan
#Impute missing reveneue using KNN (ignoring date and name columns)
imputer = KNNImputer(n_neighbors=2)
df.iloc[: , 2:] = imputer.fit_transform(df.iloc[: , 2:])
#Drop columns that cause problems with the modeling aspect
df=df.drop(['Logged_Date','Name','Logged_Year'], axis=1)
######################## Transformations ########################
#Plot correlation matrix
corrMatrix = df.corr()
plt.subplots(figsize=(20,15))
sns_plot = sns.heatmap(corrMatrix,cmap="RdBu",annot=True)
fig = sns_plot.get_figure()
fig.savefig("jupyter_heatmap.png")
#Scale non-boolean features
df[['Year','popularity','vote_average','vote_count','revenue','runtime','Rating','Logged_DOW','Logged_Month','Logged_Week','Daily_Movie_Count','Weekly_Movie_Count']] = StandardScaler().fit_transform(df[['Year','popularity','vote_average','vote_count','revenue','runtime','Rating','Logged_DOW','Logged_Month','Logged_Week','Daily_Movie_Count','Weekly_Movie_Count']])
#Plot potenitally problematic features
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, sharey=True,figsize=(14,5))
sns.scatterplot(data=df,x="movie_sentiment",y="revenue",ax=ax1)
sns.scatterplot(data=df,x="runtime",y="revenue",ax=ax2)
sns.scatterplot(data=df,x="popularity",y="revenue",ax=ax3);
#Remove outliers and replace with mean
replace = df['runtime'].mean()
df.loc[df['runtime'] >= 2,'runtime'] = np.nan
df['runtime'] = np.where(df['runtime'].isna(),replace,df['runtime'])
#Same process but with popularity
replace = df['popularity'].mean()
df.loc[df['popularity'] >= 2,'popularity'] = np.nan
df['popularity'] = np.where(df['popularity'].isna(),replace,df['popularity'])
#Transform problematic columns
df['movie_sentiment'] = df['movie_sentiment']**(1./3.)
#Recode bad values to mean
df.replace([np.inf, -np.inf], np.nan, inplace=True)
replace = df['movie_sentiment'].mean()
df['movie_sentiment'] = np.where(df['movie_sentiment'].isna(),replace,df['movie_sentiment'])
#Plot again to see change in features after transformation
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, sharey=True,figsize=(14,5))
sns.scatterplot(data=df,x="movie_sentiment",y="revenue",ax=ax1)
sns.scatterplot(data=df,x="runtime",y="revenue",ax=ax2)
sns.scatterplot(data=df,x="popularity",y="revenue",ax=ax3);
############ Research Question: Which factors impact revenue the most? ############
#Train Test Split
X=df.drop('revenue', axis=1)
y=df[['revenue']]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.3, random_state=24)
###### 1.1 OLS ######
lm = linear_model.LinearRegression()
lm.fit(X_train, y_train)
ols_fitted = lm.predict(X_test)
#Calculate R Squared
print("OLS R Squared: %s" % round(metrics.r2_score(y_test, ols_fitted),2))
###### 1.2 Elastic Net ######
search=model_selection.GridSearchCV(estimator=linear_model.ElasticNet(),param_grid={'alpha':np.logspace(-5,2,8),'l1_ratio':[.2,.4,.6,.8]},scoring='neg_mean_squared_error',n_jobs=1,refit=True,cv=10)
search.fit(X_train,y_train)
print(search.best_params_)
enet=linear_model.ElasticNet(normalize=True,alpha=0.001,l1_ratio=0.8)
enet.fit(X_train, y_train)
enet_fitted = enet.predict(X_test)
#Calculate R Squared
print("Elastic Net R Squared: %s" % round(metrics.r2_score(y_test, enet_fitted),2))
###### 1.3 RF ######
warnings.simplefilter("ignore")
nof_list=np.arange(1,37)
high_score=0
nof=0
score_list =[]
#Variable to store the optimum features
for n in range(len(nof_list)):
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.3, random_state=24)
model = linear_model.LinearRegression()
rfe = RFE(model,nof_list[n])
X_train_rfe = rfe.fit_transform(X_train,y_train)
X_test_rfe = rfe.transform(X_test)
model.fit(X_train_rfe,y_train)
score = model.score(X_test_rfe,y_test)
score_list.append(score)
if(score>high_score):
high_score = score
nof = nof_list[n]
print("Optimum number of features: %d" %nof)
print("Score with %d features: %f" % (nof, high_score))
#Optimum number of features: 35
#Score with 35 features: 0.645497
rf = RandomForestRegressor(max_features = 35, n_estimators=100)
rf.fit(X_train, y_train)
rf_fitted = rf.predict(X_test)
#Generate Feature Importance
rev_importance = {} # a dict to hold feature_name: feature_importance
for feature, importance in zip(X_train.columns, rf.feature_importances_):
rev_importance[feature] = importance #add the name/value pair
rev_importance = pd.DataFrame.from_dict(rev_importance, orient='index').rename(columns={0: 'Revenue_Importance'})
#Calculate R Squared
print("RF R Squared: %s" % round(metrics.r2_score(y_test, rf_fitted),2))
################### Feature Importance ###################
#Plot Feature Importance table
print(rev_importance.sort_values(by='Revenue_Importance', ascending=False))
#Plot as bar chart
rev_importance.sort_values(by='Revenue_Importance', ascending=False).plot(kind='bar', rot=45)
| 38.073333 | 366 | 0.711959 |
a3767371ed8f0cd8ffdd0f52e641dd47e92c68df | 1,287 | py | Python | Python/142.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | Python/142.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | 1 | 2018-04-16T21:01:50.000Z | 2018-04-16T21:01:50.000Z | Python/142.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 142 - Perfect Square Collection
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
from itertools import combinations
import numpy as np
def run():
N = 1000000
candids = {}
# generate all pairs of squares which differ by an even number. record the
# midpoint and the distance from the midpoint. These are the candidates for
# squares which satisfy both (x+y) and (x-y).
for i in range(1, int(np.sqrt(N))):
for j in range(i + 1, int(np.sqrt(N))):
diff_squares = j ** 2 - i ** 2
if diff_squares % 2 == 0:
midpoint = (j ** 2 + i ** 2) // 2
d = diff_squares // 2
if midpoint not in candids.keys():
candids[midpoint] = [d]
else:
candids[midpoint].append(d)
best_xyz = 1e20
for x, v in candids.items():
if len(v) == 1:
continue
for y, z in combinations(v, 2):
if z > y:
z, y = y, z
if y in candids.keys():
if z in candids[y]:
best_xyz = min(best_xyz, x + y + z)
return best_xyz
if __name__ == "__main__":
print(run())
| 28.6 | 79 | 0.529915 |
a3775d28ecda7be7aab9864818d4a6bf38e3387c | 6,032 | py | Python | garcon/activity.py | mortaliorchard/garcon | 281221c63da0b2ea3ea51174a3d5878d1de8f487 | [
"MIT"
] | null | null | null | garcon/activity.py | mortaliorchard/garcon | 281221c63da0b2ea3ea51174a3d5878d1de8f487 | [
"MIT"
] | null | null | null | garcon/activity.py | mortaliorchard/garcon | 281221c63da0b2ea3ea51174a3d5878d1de8f487 | [
"MIT"
] | null | null | null | """
Activity
========
Activities are self generated classes to which you can pass an identifier,
and a list of tasks to perform. The activities are in between the decider and
the task.
For ease, two types of task runners are available: SyncTasks and AsyncTasks. If
you need something more specific, you should either create your own runner, or
you should create a main task that will then split the work.
"""
from threading import Thread
import boto.swf.layer2 as swf
import json
ACTIVITY_STANDBY = 0
ACTIVITY_SCHEDULED = 1
ACTIVITY_COMPLETED = 2
class Activity(swf.ActivityWorker):
version = '1.0'
task_list = None
def run(self):
"""Activity Runner.
Information is being pulled down from SWF and it checks if the Activity
can be ran. As part of the information provided, the input of the
previous activity is consumed (context).
"""
activity_task = self.poll()
packed_context = activity_task.get('input')
context = dict()
if packed_context:
context = json.loads(packed_context)
if 'activityId' in activity_task:
try:
context = self.execute_activity(context)
self.complete(result=json.dumps(context))
except Exception as error:
self.fail(reason=str(error))
raise error
return True
def execute_activity(self, context):
"""Execute the tasks within the activity.
Args:
context (dict): The flow context.
"""
return self.tasks.execute(context)
def hydrate(self, data):
"""Hydrate the task with information provided.
Args:
data (dict): the data to use (if defined.)
"""
self.name = self.name or data.get('name')
self.domain = getattr(self, 'domain', '') or data.get('domain')
self.requires = getattr(self, 'requires', []) or data.get('requires')
self.task_list = self.task_list or data.get('task_list')
self.tasks = getattr(self, 'tasks', []) or data.get('tasks')
class ActivityWorker():
def __init__(self, flow, activities=None):
"""Initiate an activity worker.
The activity worker take in consideration all the activities from a
flow, or specific activities. Some activities (tasks) might require
more power than others, and be then launched on different machines.
If a list of activities is passed, the worker will be focused on
completing those and will ignore all the others.
Args:
flow (module): the flow module.
activities (list): the list of activities that this worker should
handle.
"""
self.flow = flow
self.activities = find_activities(self.flow)
self.worker_activities = activities
def run(self):
"""Run the activities.
"""
for activity in self.activities:
if (self.worker_activities and
not activity.name in self.worker_activities):
continue
Thread(target=worker_runner, args=(activity,)).start()
def worker_runner(worker):
"""Run indefinitely the worker.
Args:
worker (object): the Activity worker.
"""
while(worker.run()):
continue
def create(domain):
"""Helper method to create Activities.
The helper method simplifies the creation of an activity by setting the
domain, the task list, and the activity dependencies (what other
activities) need to be completed before this one can run.
Note:
The task list is generated based on the domain and the name of the
activity. Always make sure your activity name is unique.
"""
def wrapper(**options):
activity = Activity()
activity.hydrate(dict(
domain=domain,
name=options.get('name'),
requires=options.get('requires', []),
task_list=domain + '_' + options.get('name'),
tasks=options.get('tasks', []),
))
return activity
return wrapper
def find_available_activities(flow, history):
"""Find all available activities of a flow.
The history contains all the information of our activities (their state).
This method focuses on finding all the activities that need to run.
Args:
flow (module): the flow module.
history (dict): the history information.
"""
for activity in find_activities(flow):
# If an event is already available for the activity, it means it is
# not in standby anymore, it's either processing or has been completed.
# The activity is thus not available anymore.
event = history.get(activity.name)
if event:
continue
add = True
for requirement in activity.requires:
requirement_evt = history.get(requirement.name)
if not requirement_evt == ACTIVITY_COMPLETED:
add = False
break
if add:
yield activity
def find_uncomplete_activities(flow, history):
"""Find uncomplete activities.
Uncomplete activities are all the activities that are not marked as
completed.
Args:
flow (module): the flow module.
history (dict): the history information.
Yield:
activity: The available activity.
"""
for activity in find_activities(flow):
event = history.get(activity.name)
if not event or event != ACTIVITY_COMPLETED:
yield activity
def find_activities(flow):
"""Retrieves all the activities from a flow.
Args:
flow (module): the flow module.
Return:
List of all the activities for the flow.
"""
activities = []
for module_attribute in dir(flow):
instance = getattr(flow, module_attribute)
if isinstance(instance, Activity):
activities.append(instance)
return activities
| 28.587678 | 79 | 0.629145 |
a37b1669512f165099c1e03b767ae6863a2fb2c7 | 7,754 | py | Python | csvdiff.py | bugph0bia/PyCsvDiff | 57d438d50e758e13834affd8d0e46f8a7bfe0ebe | [
"MIT"
] | null | null | null | csvdiff.py | bugph0bia/PyCsvDiff | 57d438d50e758e13834affd8d0e46f8a7bfe0ebe | [
"MIT"
] | null | null | null | csvdiff.py | bugph0bia/PyCsvDiff | 57d438d50e758e13834affd8d0e46f8a7bfe0ebe | [
"MIT"
] | null | null | null | import sys
import csv
import json
import argparse
from collections import namedtuple
# diff info
DiffInfo = namedtuple('DiffInfo', [
'mark', # diff kind (!, -, +)
'address', # row/column addresses of diff
'keyname', # row/column key names of diff
'value', # values of diff
])
def main():
"""main"""
parser = argparse.ArgumentParser(description='Output the difference between two CSV files.')
parser.add_argument('csv1', help='1st CSV file.')
parser.add_argument('csv2', help='2nd CSV file.')
parser.add_argument('-e', '--encoding', default='utf-8', help='Encoding for CSV files. (default: utf-8)')
parser.add_argument('-p', '--primary-key', type=int, default=1, help='Column number as primary key. (range: 1-N, default: 1)')
parser.add_argument('-t', '--has-title', action='store_true', help='Treat the first line as a header.')
parser.add_argument('-f', '--format', default='normal', help='Set format. (normal, json)')
parser.add_argument('--excel-style', action='store_true', help='Print addresses excel A1 style.')
parser.add_argument('--hide-address', action='store_true', help='Do not print row/column addresses.')
parser.add_argument('--hide-keyname', action='store_true', help='Do not print row/column key names.')
parser.add_argument('--hide-value', action='store_true', help='Do not print difference values.')
args = parser.parse_args()
# read csv
csv1, header1 = read_csv(args.csv1, args.encoding, args.has_title)
csv2, header2 = read_csv(args.csv2, args.encoding, args.has_title)
# check column count
if len(header1) != len(header2):
print(f'error: different column count in CSV files. (csv1:{len(header1)}, csv2:{len(header2)})', file=sys.stderr)
return
# check primary key value
if not (0 < args.primary_key <= len(header1)):
print(f'error: primary key invalid. (primary key:{args.primary_key}, column count:{len(header1)})', file=sys.stderr)
return
# correct column number to start with 0
primary_key = args.primary_key - 1
# sort by primary key
csv1.sort(key=lambda x: x[primary_key])
csv2.sort(key=lambda x: x[primary_key])
# get diff info
diffs = diff_csv(csv1, header1, csv2, header2, primary_key, args.excel_style)
# print result
if args.format.lower() == 'json':
print(json.dumps([d._asdict() for d in diffs]))
else:
print_diffs(diffs, args.hide_address, args.hide_keyname, args.hide_value)
def read_csv(fname: str, encoding: str, has_header: bool):
"""Read CSV file
Args:
fname (str): CSV file.
encoding (str): encoding for CSV File.
has_header (bool): if first row is header then True, else False.
Returns:
tuple[list[list[str]], list[str]]: Tuple of CSV data and CSV header.
"""
with open(fname, 'r', encoding=encoding) as f:
csvdata = list(csv.reader(f))
# Match the column count to their max
max_colmuns = max(map(lambda x: len(x), csvdata))
for row in csvdata:
row.extend([''] * (max_colmuns - len(row)))
# get header row
if has_header:
header = csvdata[0]
csvdata = csvdata[1:]
else:
header = [''] * len(csvdata[0])
return csvdata, header
def diff_csv(csv1: list[list[str]], header1: list[str],
csv2: list[list[str]], header2: list[str],
primary_key: int, excel_style: bool):
"""Diff CSV files.
Args:
csv1 (list[list[str]]): 1st CSV data.
header1 (list[str]): 1st CSV header.
csv2 (list[list[str]]): 2nd CSV data.
header2 (list[str]): 2nd CSV header.
primary_key (int): column number of primary key.
excel_style (bool): excel A1 style.
Returns:
list[DiffInfo]: list of diff infos.
"""
diffs = []
ri1 = ri2 = 0
while True:
# get target row
row1 = csv1[ri1] if len(csv1) > ri1 else None
row2 = csv2[ri2] if len(csv2) > ri2 else None
# get primary key of target row
pkey1 = row1[primary_key] if row1 else None
pkey2 = row2[primary_key] if row2 else None
# exit when both CSV data is terminated
if row1 is None and pkey2 is None:
break
# remaining lines of csv2, if csv1 is terminated
# (== the row in csv2 only)
elif pkey1 is None:
diffs.append(DiffInfo(
mark='+',
address=make_row_address(ri2, excel_style),
keyname='',
value=','.join(row2),
))
ri2 += 1
# remaining lines of csv1, if csv2 is terminated
# (== the row in csv1 only)
elif pkey2 is None:
diffs.append(DiffInfo(
mark='-',
address=make_row_address(ri1, excel_style),
keyname='',
value=','.join(row1),
))
ri1 += 1
# the row in csv2 only
elif pkey1 > pkey2:
diffs.append(DiffInfo(
mark='+',
address=make_row_address(ri2, excel_style),
keyname='',
value=','.join(row2),
))
ri2 += 1
# the row in csv1 only
elif pkey1 < pkey2:
diffs.append(DiffInfo(
mark='-',
address=make_row_address(ri1, excel_style),
keyname='',
value=','.join(row1),
))
ri1 += 1
# the row in both files
else: # pkey1 == pkey2
for ci, (v1, v2) in enumerate(zip(row1, row2)):
if v1 != v2:
diffs.append(DiffInfo(
mark='!',
address=make_cell_address(ri1, ri2, ci, excel_style),
keyname=f'{pkey1},{header1[ci]}',
value=f'{v1} | {v2}',
))
ri1 += 1
ri2 += 1
return diffs
def a1_address(ri, ci):
"""Make Excel A1 style address from row/column address."""
CHR_A = 65 # ascii code of 'A'
ALNUM = 26 # number of alphabet
if ci >= ALNUM:
return chr(CHR_A + (ci // ALNUM)) + chr(CHR_A + (ci % ALNUM)) + str(ri+1)
else:
return chr(CHR_A + (ci % ALNUM)) + str(ri+1)
def make_row_address(ri, excel_style):
"""Make row address for print."""
if excel_style:
return f'{ri+1}:{ri+1}'
else:
return f'R{ri+1}'
def make_cell_address(ri1, ri2, ci, excel_style):
"""Make cell addresses for print."""
if excel_style:
return f'{a1_address(ri1, ci)} | {a1_address(ri2, ci)}'
else:
return f'R{ri1+1},C{ci+1} | R{ri2+1},C{ci+1}'
def print_diffs(diffs, hide_address, hide_keyname, hide_value):
"""Print diffs.
Args:
diffs (list[DiffInfo]): list of diff infos.
hide_address (bool): if true then do not print addresses.
hide_keyname (bool): if true then do not print key names.
hide_value (bool): if true then do not print values.
"""
for diff in diffs:
pstr = f'{diff.mark} '
if not hide_address and diff.address:
pstr += f'[{diff.address}] '
if not hide_keyname and diff.keyname:
pstr += f'[{diff.keyname}] '
if not hide_value and diff.value:
pstr += f'> {diff.value}'
print(pstr)
print(f'(diff count: {len(diffs)})')
if __name__ == '__main__':
main()
| 33.5671 | 131 | 0.554037 |
a37e426d249e4fa306b483523b559a9a0ae9cff3 | 2,563 | py | Python | map_reduce/core/data_source.py | HuynhThanhQuan/graph-network | e429a641e7baecad9765700cac580cfbdedbe1bd | [
"MIT"
] | null | null | null | map_reduce/core/data_source.py | HuynhThanhQuan/graph-network | e429a641e7baecad9765700cac580cfbdedbe1bd | [
"MIT"
] | 11 | 2020-11-13T18:29:37.000Z | 2022-02-10T00:25:15.000Z | map_reduce/core/data_source.py | HuynhThanhQuan/graph-network | e429a641e7baecad9765700cac580cfbdedbe1bd | [
"MIT"
] | null | null | null | import os
from time import time
import pandas as pd
from sqlalchemy import create_engine
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class AbstractDataSource:
def __int__(self):
pass
class AbstractDataSourcePointer(AbstractDataSource):
def __int__(self):
super(AbstractDataSourcePointer, self).__int__()
def identify_pointer(self):
pass
class DataSourceGenerator:
def __init__(self, engine, offset=0, limit=10000, chunks=None):
self.engine = engine
self.offset = offset
self.limit = limit
self.chunks = chunks
def __iter__(self):
return self
def __next__(self):
start = time()
sql = """select id, content from test_result_log offset %s limit %s""" % (self.offset, self.limit)
data = pd.read_sql(sql, con=self.engine)
logger.info('--- Querying data offset {} to {} within {}'.format(self.offset * self.limit,
(self.offset + 1) * self.limit,
time() - start))
self.offset += self.limit
yield data
class PostgresDBSourcePointer(AbstractDataSourcePointer):
def __int__(self):
self.host = os.getenv('KI_HOST')
self.port = os.getenv('KI_PORT')
self.db = os.getenv('KI_KITDB')
self.user = os.getenv('KI_USER')
self.password = os.getenv('KI_PASSWORD')
self.chunksize = 10000
def identify_pointer(self):
logger.info('Estimating...')
engine = create_engine('postgres://{}:{}@{}:{}/{}'.format(self.user, self.password,
self.host, self.port, self.db))
sql = """select count(*) from test_result_log """
total_records = pd.read_sql(sql, con=engine)['count'][0]
chunks = (total_records // self.chunksize) + 1
logger.info('Estimate - Total records {} - Total chunks {} - '.format(total_records, chunks))
generator = DataSourceGenerator(engine=engine, limit=self.chunksize, chunks=chunks)
return generator
class DataSource:
def __int__(self, pointer):
self.pointer = pointer
self._check_variable_types_()
def _check_variable_types_(self):
assert isinstance(self.pointer, AbstractDataSourcePointer), "Pointer must be instance of " \
"AbstractDataSourcePointer"
| 35.109589 | 106 | 0.587593 |
a37e9163f756c5b933aa7522cfc07f57edae5c1e | 3,431 | py | Python | setup.py | michael-borisov/django-omnibus | 3275ae41dcad5a140433f0bfcea5961dc837e913 | [
"BSD-3-Clause"
] | null | null | null | setup.py | michael-borisov/django-omnibus | 3275ae41dcad5a140433f0bfcea5961dc837e913 | [
"BSD-3-Clause"
] | 4 | 2020-08-19T08:39:55.000Z | 2021-03-31T08:23:26.000Z | setup.py | radiosilence/django-omnibus | c31337306c601e75fbdac9d6b9b62dcc980e04f5 | [
"BSD-3-Clause"
] | null | null | null | import codecs
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
test_requires = [
'pytest>=2.5.2',
'pytest-cov>=1.6',
'pytest-flakes>=0.2',
'pytest-pep8>=1.0.5',
'pytest-django>=2.6',
'mock==1.0.1',
'pep8==1.4.6'
]
install_requires = [
'Django>=1.4',
'pyzmq==14.1.1',
'tornado==3.1.1',
'sockjs-tornado>=1.0.0',
]
dev_requires = [
'tox',
]
docs_requires = [
'sphinx',
'sphinx_rtd_theme'
]
class PyTest(TestCommand):
user_options = [('cov=', None, 'Run coverage'),
('cov-xml=', None, 'Generate junit xml report'),
('cov-html=', None, 'Generate junit html report'),
('junitxml=', None, 'Generate xml of test results'),
('clearcache', None, 'Clear cache first')]
boolean_options = ['clearcache']
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.cov_xml = False
self.cov_html = False
self.junitxml = None
self.clearcache = False
def run_tests(self):
import pytest
params = {'args': self.test_args}
if self.cov is not None:
params['plugins'] = ['cov']
params['args'].extend(['--cov', self.cov, '--cov-report', 'term-missing'])
if self.cov_xml:
params['args'].extend(['--cov-report', 'xml'])
if self.cov_html:
params['args'].extend(['--cov-report', 'html'])
if self.junitxml is not None:
params['args'].extend(['--junitxml', self.junitxml])
if self.clearcache:
params['args'].extend(['--clearcache'])
self.test_suite = True
errno = pytest.main(**params)
sys.exit(errno)
setup(
name='django-omnibus',
version='0.1.0',
description='Django/JavaScript WebSocket Connections',
long_description=read('README.md'),
author='Stephan Jaekel, Norman Rusch',
author_email='info@moccu.com',
url='https://github.com/moccu/django-omnibus/',
packages=find_packages(exclude=[
'testing',
'testing.pytests',
'examples',
]),
include_package_data=True,
extras_require={
'docs': docs_requires,
'tests': test_requires,
'dev': dev_requires,
},
test_suite='.',
install_requires=install_requires,
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: CPython',
'Framework :: Django',
],
zip_safe=False,
)
| 27.669355 | 86 | 0.575051 |
a37f3e393c9a970f74e1fb50bf59be6bc0954abc | 504 | py | Python | scripts/tests/snapshots/snap_etc_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 105 | 2018-02-07T22:07:47.000Z | 2022-03-31T18:16:47.000Z | scripts/tests/snapshots/snap_etc_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 57 | 2018-02-07T23:07:41.000Z | 2021-11-21T17:14:06.000Z | scripts/tests/snapshots/snap_etc_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 10 | 2018-02-24T23:44:51.000Z | 2022-03-02T07:52:27.000Z | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_etc 1'] = '[{"lineno": 2, "value": "tup = (1, 2, 3)"}, {"lineno": 3, "source": ["tup\\n"], "value": "(1, 2, 3)"}, {"lineno": 5, "value": "False"}, {"lineno": 7, "value": "text = happy"}, {"lineno": 9, "source": ["text\\n"], "value": "happy"}, {"lineno": 12, "value": "x = foo\\nfaa"}, {"lineno": 15, "value": "a = 1"}]'
| 45.818182 | 335 | 0.56746 |
a37f43b419e3def4e72bb772a8952c0f709cee66 | 1,823 | py | Python | config.py | greatwallet/cosypose | e72ce7d521ef61870daef267cbbe65aaebe9d24d | [
"MIT"
] | null | null | null | config.py | greatwallet/cosypose | e72ce7d521ef61870daef267cbbe65aaebe9d24d | [
"MIT"
] | null | null | null | config.py | greatwallet/cosypose | e72ce7d521ef61870daef267cbbe65aaebe9d24d | [
"MIT"
] | null | null | null | import cosypose
import os
import yaml
from joblib import Memory
from pathlib import Path
import getpass
import socket
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
hostname = socket.gethostname()
username = getpass.getuser()
PROJECT_ROOT = Path(cosypose.__file__).parent.parent
PROJECT_DIR = PROJECT_ROOT
DATA_DIR = PROJECT_DIR / 'data'
LOCAL_DATA_DIR = PROJECT_DIR / 'local_data'
TEST_DATA_DIR = LOCAL_DATA_DIR
DASK_LOGS_DIR = LOCAL_DATA_DIR / 'dasklogs'
SYNT_DS_DIR = LOCAL_DATA_DIR / 'synt_datasets'
BOP_DS_DIR = LOCAL_DATA_DIR / 'bop_datasets'
BOP_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_cosypose'
BOP_CHALLENGE_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_challenge'
EXP_DIR = LOCAL_DATA_DIR / 'experiments'
RESULTS_DIR = LOCAL_DATA_DIR / 'results'
DEBUG_DATA_DIR = LOCAL_DATA_DIR / 'debug_data'
DEPS_DIR = PROJECT_DIR / 'deps'
CACHE_DIR = LOCAL_DATA_DIR / 'joblib_cache'
assert LOCAL_DATA_DIR.exists()
CACHE_DIR.mkdir(exist_ok=True)
TEST_DATA_DIR.mkdir(exist_ok=True)
DASK_LOGS_DIR.mkdir(exist_ok=True)
SYNT_DS_DIR.mkdir(exist_ok=True)
RESULTS_DIR.mkdir(exist_ok=True)
DEBUG_DATA_DIR.mkdir(exist_ok=True)
ASSET_DIR = DATA_DIR / 'assets'
MEMORY = Memory(CACHE_DIR, verbose=2)
CONDA_PREFIX = os.environ['CONDA_PREFIX']
if 'CONDA_PREFIX_1' in os.environ:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX_1']
CONDA_ENV = os.environ['CONDA_DEFAULT_ENV']
else:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX']
CONDA_ENV = 'base'
cfg = yaml.load((PROJECT_DIR / 'config_yann.yaml').read_text(), Loader=yaml.FullLoader)
SLURM_GPU_QUEUE = cfg['slurm_gpu_queue']
SLURM_QOS = cfg['slurm_qos']
DASK_NETWORK_INTERFACE = cfg['dask_network_interface']
# Kwai path
KWAI_PATH = "/data2/cxt/kwai/IMG_3486" | 30.383333 | 88 | 0.765222 |
a37f76a50ac5297fbe0ae2e72f8f20c6b13bb7e0 | 2,761 | py | Python | 2018/16/main.py | zelane/advent-of-code-2016 | 72ebda3ec4eca4d1921dd8d94f2f84bbd0566fc5 | [
"MIT"
] | null | null | null | 2018/16/main.py | zelane/advent-of-code-2016 | 72ebda3ec4eca4d1921dd8d94f2f84bbd0566fc5 | [
"MIT"
] | null | null | null | 2018/16/main.py | zelane/advent-of-code-2016 | 72ebda3ec4eca4d1921dd8d94f2f84bbd0566fc5 | [
"MIT"
] | null | null | null | from copy import copy
import re
class Cpu:
def __init__(self, mem):
self.mem = mem
def addr(self, a, b, c):
self.mem[c] = self.mem[a] + self.mem[b]
def addi(self, a, b, c):
self.mem[c] = self.mem[a] + b
def mulr(self, a, b, c):
self.mem[c] = self.mem[a] * self.mem[b]
def muli(self, a, b, c):
self.mem[c] = self.mem[a] * b
def banr(self, a, b, c):
self.mem[c] = self.mem[a] & self.mem[b]
def bani(self, a, b, c):
self.mem[c] = self.mem[a] & b
def borr(self, a, b, c):
self.mem[c] = self.mem[a] | self.mem[b]
def bori(self, a, b, c):
self.mem[c] = self.mem[a] | b
def setr(self, a, b, c):
self.mem[c] = copy(self.mem[a])
def seti(self, a, b, c):
self.mem[c] = a
def gtir(self, a, b, c):
self.mem[c] = 1 if a > self.mem[b] else 0
def gtri(self, a, b, c):
self.mem[c] = 1 if self.mem[a] > b else 0
def gtrr(self, a, b, c):
self.mem[c] = 1 if self.mem[a] > self.mem[b] else 0
def eqir(self, a, b, c):
self.mem[c] = 1 if a == self.mem[b] else 0
def eqri(self, a, b, c):
self.mem[c] = 1 if self.mem[a] == b else 0
def eqrr(self, a, b, c):
self.mem[c] = 1 if self.mem[a] == self.mem[b] else 0
opts = [
"addr", "addi", "mulr", "muli", "banr", "bani", "borr", "bori", "setr",
"seti", "gtir", "gtri", "gtrr", "eqir", "eqri", "eqrr"
]
inputs = []
program = []
parse_mem = lambda s: list(map(int, re.findall(r"[0-9]", s)))
parse_params = lambda s: list(map(int, s.split(" ")))
with open('input.txt') as f:
while True:
before, args, after, _ = (f.readline().strip() for x in range(4))
if not before:
break
inputs.append(
(parse_mem(before), parse_params(args), parse_mem(after)))
while True:
line = f.readline()
if not line:
break
program.append(parse_params(line))
def test(opts):
solved = {}
more_than_3 = 0
for before, args, after in inputs:
possible = []
for func_name in opts:
cpu = Cpu(copy(before))
getattr(cpu, func_name)(*args[1:])
if cpu.mem == after:
possible.append(func_name)
if len(possible) > 2:
more_than_3 += 1
elif len(possible) == 1:
solved[args[0]] = possible[0]
return more_than_3, solved
answer_1, opt_map = test(opts)
print(answer_1)
while opts:
_, solved = test(opts)
opt_map.update(solved)
for opt in solved.values():
opts.remove(opt)
cpu = Cpu([0, 0, 0, 0])
for args in program:
func_name = opt_map[args[0]]
getattr(cpu, func_name)(*args[1:])
print(cpu.mem[0])
| 23.801724 | 75 | 0.522637 |
a382674b28c095d002534f7e5a89fab99c7987b3 | 2,700 | py | Python | streamlit/About.py | sarthak815/sage2therescue | b4bc38731a2b43866c545560e850cdddd85a0852 | [
"MIT"
] | null | null | null | streamlit/About.py | sarthak815/sage2therescue | b4bc38731a2b43866c545560e850cdddd85a0852 | [
"MIT"
] | null | null | null | streamlit/About.py | sarthak815/sage2therescue | b4bc38731a2b43866c545560e850cdddd85a0852 | [
"MIT"
] | 1 | 2022-03-04T09:31:18.000Z | 2022-03-04T09:31:18.000Z | import streamlit as st
def app():
st.title("About")
st.markdown('''
During the Cyclone Fani which hit Odisha in 2019 a lot of places ran out of electricity, water and other basic necessities. During this period most of the rescue workers relied on data accumulated pre disaster to guide rescue services but this was far off from the on ground reality where many places were in much worse of a situation than what was predicted. This is why we have developed this application to help guide rescue services in realtime using data from twitter. Even during the cyclone many parts of the city had mobile data running even though electricity and satellite television was not available. This is where our application comes into use by accessing the data posted to twitter we help guide rescue services towards the areas that need it the most.
## What it does
The application uses tweepy to retrieve tweets in real time using keywords given by the user. At the time of a disaster the city or state can be entered by the user into our application. Using the keyword received it retrieves all the tweets available, giving priority to the most recent tweets. These tweets we then run through a disaster prediction SVC model which was built using sagemaker. This model helps to eliminate tweets that are not disaster-related so that we only account for valid tweets. The newly generated set of tweets which contain only disaster related tweets is now run through a sentiment analysis model to determine the negativity or positivity of a tweet. Using the sentiment analysis model we assign a float value score between -1 and 1 to each tweet. Now we use NLTK to extract the locations present in each of the tweets and add the score from the tweet to determine a total score for each location based on the sentiment of the tweets describing these places. This is used to finally display a list of locations along with a score for each of them describing the severity of their situation. We then use the names of the locations obtained and look up their coordinates using HERE api to plot them on a map with appropriate markings visually describing the severity of each of the locations.
## How we built it
1)SVC model from sklearn to determine whether a tweet is disaster-related
2)BERT model to determine sentiment behind each of the tweets
3)AWS Sagemaker to train both the SVC and BERT models
3)NLTK to extract the location keywords from the tweets
4)Tweepy to extract the tweets
5)Streamlit to deploy the app with an UI
6)HERE api to obtain the coordinates of each of the locations
## Challenges we ran into
## Accomplishments that we're proud of
## What we learned
## What's next for Sage Rescuer''') | 117.391304 | 1,320 | 0.802963 |
a3842c6138c7e752e05c72628b0129a00a3d511f | 1,617 | py | Python | tests/test_reduce_sum.py | gavinuhma/tf-encrypted | 4e18d78a151bbe91489a1773fb839b889ff5b460 | [
"Apache-2.0"
] | 3 | 2018-10-18T19:36:02.000Z | 2020-07-05T19:46:23.000Z | tests/test_reduce_sum.py | dropoutlabs/tf-encrypted | 48c9dc7419163425e736ad05bb19980d134fc851 | [
"Apache-2.0"
] | null | null | null | tests/test_reduce_sum.py | dropoutlabs/tf-encrypted | 48c9dc7419163425e736ad05bb19980d134fc851 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=missing-docstring
import unittest
import numpy as np
import tensorflow as tf
import tf_encrypted as tfe
class TestReduceSum(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
def test_reduce_sum_1d(self):
t = [1, 2]
with tf.Session() as sess:
out = tf.reduce_sum(t)
actual = sess.run(out)
with tfe.protocol.Pond() as prot:
b = prot.define_private_variable(tf.constant(t))
out = prot.reduce_sum(b)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
final = sess.run(out.reveal())
np.testing.assert_array_equal(final, actual)
def test_reduce_sum_2d(self):
t = [[1, 2], [1, 3]]
with tf.Session() as sess:
out = tf.reduce_sum(t, axis=1)
actual = sess.run(out)
with tfe.protocol.Pond() as prot:
b = prot.define_private_variable(tf.constant(t))
out = prot.reduce_sum(b, axis=1)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
final = sess.run(out.reveal())
np.testing.assert_array_equal(final, actual)
def test_reduce_sum_huge_vector(self):
t = [1] * 2**13
with tf.Session() as sess:
out = tf.reduce_sum(t)
actual = sess.run(out)
with tfe.protocol.Pond() as prot:
b = prot.define_private_variable(tf.constant(t))
out = prot.reduce_sum(b)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
final = sess.run(out.reveal())
np.testing.assert_array_equal(final, actual)
if __name__ == '__main__':
unittest.main()
| 24.134328 | 54 | 0.650588 |
a38463fb4d443f7e3aa2457876c06216a04ae227 | 1,010 | py | Python | tests/test_nodes.py | simonzabrocki/GraphModels | b43e44a189d663364ae08de9a1d1305320854d63 | [
"MIT"
] | null | null | null | tests/test_nodes.py | simonzabrocki/GraphModels | b43e44a189d663364ae08de9a1d1305320854d63 | [
"MIT"
] | null | null | null | tests/test_nodes.py | simonzabrocki/GraphModels | b43e44a189d663364ae08de9a1d1305320854d63 | [
"MIT"
] | null | null | null |
import pytest
from GraphModels.models.Sarah.model_agricultural_water import AgriculturalWaterNodes
from GraphModels.models.Sarah.model_freshwater_available import FreshwaterAvailableNodes
from GraphModels.models.Sarah.model_municipal_water import MunicipalWaterNodes
nodes_list = AgriculturalWaterNodes + FreshwaterAvailableNodes + MunicipalWaterNodes
computationnal_nodes = [node for node in nodes_list if 'computation' in node.keys()]
@pytest.mark.parametrize(('node'), nodes_list)
def test_node_minimal_keys(node):
assert set(['type', 'unit', 'id', 'name']) <= set(node.keys())
@pytest.mark.parametrize(('node'), computationnal_nodes)
def test_node_computationnal(node):
assert set(['formula', 'name']) == set(node['computation'].keys())
def test_inputs_computation():
inputs_computation = [val for sublist in [node['in'] for node in nodes_list if 'in' in node] for val in sublist]
node_ids = [node['id'] for node in nodes_list]
assert set(inputs_computation) <= set(node_ids)
| 37.407407 | 116 | 0.773267 |
a3859a2bc6f5180117d2aa59a1b851252ca8c8a5 | 1,350 | py | Python | backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py | appheap/social-media-analyzer | 0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c | [
"Apache-2.0"
] | 5 | 2021-09-11T22:01:15.000Z | 2022-03-16T21:33:42.000Z | backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | null | null | null | backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | 3 | 2022-01-18T11:06:22.000Z | 2022-02-26T13:39:28.000Z | from django.db import transaction
from db.scaffold import Scaffold
from typing import List
from telegram import models as tg_models
from pyrogram import types
class GetUpdatedMessageEntityTypes(Scaffold):
def get_updated_message_entity_types(
self,
*,
db_message: 'tg_models.Message',
raw_message: 'types.Message'
) -> List['tg_models.EntityType']:
if db_message is None or raw_message is None:
return None
if raw_message.type == 'message' and raw_message.content.entities:
entity_types = set()
entities = raw_message.content.entities
for entity in entities:
entity_types.add(entity.type)
if len(entity_types):
db_entity_types = []
with transaction.atomic():
for raw_entity in entities:
db_entity_types.append(
self.tg_models.EntityType.objects.update_or_create_from_raw(
raw_entity=raw_entity,
db_message=db_message,
)
)
db_entity_types = list(filter(lambda obj: obj is not None, db_entity_types))
return db_entity_types
return None
| 32.926829 | 92 | 0.565926 |
a387303aea958c01ac561cbc1d4a035d79ef4112 | 486 | py | Python | app/user_api/user_api.py | Basselbi/hikma-health-backend | 0f891821a04aa103fff62097443bd585bc342dbc | [
"MIT"
] | null | null | null | app/user_api/user_api.py | Basselbi/hikma-health-backend | 0f891821a04aa103fff62097443bd585bc342dbc | [
"MIT"
] | null | null | null | app/user_api/user_api.py | Basselbi/hikma-health-backend | 0f891821a04aa103fff62097443bd585bc342dbc | [
"MIT"
] | null | null | null | from flask import Blueprint, request, jsonify
from web_util import assert_data_has_keys
from users.user import User
user_api = Blueprint('users_api', __name__, url_prefix='/api/user')
@user_api.route('/reset_password', methods=['POST'])
def sync():
params = assert_data_has_keys(request, {'email', 'password', 'new_password'})
user = User.authenticate(params['email'], params['password'])
user.reset_password(params['new_password'])
return jsonify({'message': 'OK'})
| 34.714286 | 81 | 0.73251 |
a389bd7328bfeb9809c940787f3815d94a0c7bd6 | 2,783 | py | Python | commands.py | abcxyz618/MovieGeek | 06029ed4202c63d3da4e306eb5d500ab81f2e1cb | [
"MIT"
] | null | null | null | commands.py | abcxyz618/MovieGeek | 06029ed4202c63d3da4e306eb5d500ab81f2e1cb | [
"MIT"
] | null | null | null | commands.py | abcxyz618/MovieGeek | 06029ed4202c63d3da4e306eb5d500ab81f2e1cb | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from omdb_api import *
from tmdb_api import *
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def movie(self, ctx, *, in_str=""): # input format: *movie {movie name} / {year}
if in_str == "":
return await self.handle_empty(ctx)
title, index = self.process_input(in_str)
if index.isdigit() is False:
ctx.send(":warning: Invalid input")
return
try:
released, title, runtime, genres, t_bo, overview, poster_url, \
comp_url, trailer_url, color, year = tmdb_search(title, int(index) - 1)
release, rated, director_str, actor_str, d_bo, awards, ratings = omdb_search(title, year)
except TypeError:
await ctx.send(":x: No movie found!")
return
except IndexError:
await ctx.send(":x: Invalid index!")
return
except:
await ctx.send(":x: Unidentified Error. Please relay error to I'm Peter #1327")
return
e = discord.Embed(color=discord.Color.from_rgb(color[0], color[1], color[2]), title=title)
e.add_field(name="Released", value=release, inline=True)
e.add_field(name="Duration", value=runtime if released else 'N/A', inline=True)
e.add_field(name="Rated", value=rated if released else 'N/A', inline=True)
e.add_field(name="Genres", value=genres, inline=True)
e.add_field(name="Director", value=director_str, inline=True)
e.add_field(name="Actors", value=actor_str, inline=True)
e.add_field(name="Box Office", value=t_bo + '\n' + d_bo if released else 'N/A', inline=True)
e.add_field(name="Awards", value=awards if released else 'N/A', inline=True)
e.add_field(name="Rating", value=ratings if released else 'N/A', inline=True)
e.add_field(name="Overview:", value=overview, inline=False)
e.set_image(url=poster_url)
e.set_thumbnail(url=comp_url)
await ctx.send(embed=e)
await ctx.send(f":movie_camera::clapper: Watch Movie Trailer here:\n{trailer_url}")
@commands.command()
async def actor(self, ctx, *, in_str):
ctx.send("Searching...")
@staticmethod
def process_input(in_str):
if '[' not in in_str:
return in_str, '1'
li = [x.strip() for x in in_str.split('[')]
if len(li) != 2:
return False, False
return li[0], li[1][:-1]
@staticmethod
async def handle_empty(ctx):
await ctx.send(":warning: No input was given! ")
def setup(bot):
bot.add_cog(Commands(bot))
| 36.618421 | 102 | 0.594682 |
a38a03f634375d52713a25701814579ff7b6e33e | 92,070 | py | Python | cryptoapis/api/unified_endpoints_api.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | cryptoapis/api/unified_endpoints_api.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | cryptoapis/api/unified_endpoints_api.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.api_client import ApiClient, Endpoint as _Endpoint
from cryptoapis.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from cryptoapis.model.get_address_details_r import GetAddressDetailsR
from cryptoapis.model.get_block_details_by_block_hash_r import GetBlockDetailsByBlockHashR
from cryptoapis.model.get_block_details_by_block_height_r import GetBlockDetailsByBlockHeightR
from cryptoapis.model.get_fee_recommendations_r import GetFeeRecommendationsR
from cryptoapis.model.get_last_mined_block_r import GetLastMinedBlockR
from cryptoapis.model.get_transaction_details_by_transaction_idr import GetTransactionDetailsByTransactionIDR
from cryptoapis.model.inline_response400 import InlineResponse400
from cryptoapis.model.inline_response40010 import InlineResponse40010
from cryptoapis.model.inline_response40015 import InlineResponse40015
from cryptoapis.model.inline_response40016 import InlineResponse40016
from cryptoapis.model.inline_response40017 import InlineResponse40017
from cryptoapis.model.inline_response40024 import InlineResponse40024
from cryptoapis.model.inline_response40026 import InlineResponse40026
from cryptoapis.model.inline_response40030 import InlineResponse40030
from cryptoapis.model.inline_response40037 import InlineResponse40037
from cryptoapis.model.inline_response4004 import InlineResponse4004
from cryptoapis.model.inline_response40042 import InlineResponse40042
from cryptoapis.model.inline_response40053 import InlineResponse40053
from cryptoapis.model.inline_response401 import InlineResponse401
from cryptoapis.model.inline_response40110 import InlineResponse40110
from cryptoapis.model.inline_response40115 import InlineResponse40115
from cryptoapis.model.inline_response40116 import InlineResponse40116
from cryptoapis.model.inline_response40117 import InlineResponse40117
from cryptoapis.model.inline_response40124 import InlineResponse40124
from cryptoapis.model.inline_response40126 import InlineResponse40126
from cryptoapis.model.inline_response40130 import InlineResponse40130
from cryptoapis.model.inline_response40137 import InlineResponse40137
from cryptoapis.model.inline_response4014 import InlineResponse4014
from cryptoapis.model.inline_response40142 import InlineResponse40142
from cryptoapis.model.inline_response40153 import InlineResponse40153
from cryptoapis.model.inline_response402 import InlineResponse402
from cryptoapis.model.inline_response403 import InlineResponse403
from cryptoapis.model.inline_response40310 import InlineResponse40310
from cryptoapis.model.inline_response40315 import InlineResponse40315
from cryptoapis.model.inline_response40316 import InlineResponse40316
from cryptoapis.model.inline_response40317 import InlineResponse40317
from cryptoapis.model.inline_response40324 import InlineResponse40324
from cryptoapis.model.inline_response40326 import InlineResponse40326
from cryptoapis.model.inline_response40330 import InlineResponse40330
from cryptoapis.model.inline_response40337 import InlineResponse40337
from cryptoapis.model.inline_response4034 import InlineResponse4034
from cryptoapis.model.inline_response40342 import InlineResponse40342
from cryptoapis.model.inline_response40353 import InlineResponse40353
from cryptoapis.model.inline_response404 import InlineResponse404
from cryptoapis.model.inline_response4041 import InlineResponse4041
from cryptoapis.model.inline_response4042 import InlineResponse4042
from cryptoapis.model.inline_response409 import InlineResponse409
from cryptoapis.model.inline_response415 import InlineResponse415
from cryptoapis.model.inline_response422 import InlineResponse422
from cryptoapis.model.inline_response429 import InlineResponse429
from cryptoapis.model.inline_response500 import InlineResponse500
from cryptoapis.model.list_all_unconfirmed_transactions_r import ListAllUnconfirmedTransactionsR
from cryptoapis.model.list_confirmed_transactions_by_address_r import ListConfirmedTransactionsByAddressR
from cryptoapis.model.list_latest_mined_blocks_r import ListLatestMinedBlocksR
from cryptoapis.model.list_transactions_by_block_hash_r import ListTransactionsByBlockHashR
from cryptoapis.model.list_transactions_by_block_height_r import ListTransactionsByBlockHeightR
from cryptoapis.model.list_unconfirmed_transactions_by_address_r import ListUnconfirmedTransactionsByAddressR
class UnifiedEndpointsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_address_details_endpoint = _Endpoint(
settings={
'response_type': (GetAddressDetailsR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/addresses/{address}',
'operation_id': 'get_address_details',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'address',
'context',
],
'required': [
'blockchain',
'network',
'address',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'address':
(str,),
'context':
(str,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'address': 'address',
'context': 'context',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'address': 'path',
'context': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_block_details_by_block_hash_endpoint = _Endpoint(
settings={
'response_type': (GetBlockDetailsByBlockHashR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/blocks/hash/{blockHash}',
'operation_id': 'get_block_details_by_block_hash',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'block_hash',
'context',
],
'required': [
'blockchain',
'network',
'block_hash',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'block_hash':
(str,),
'context':
(str,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'block_hash': 'blockHash',
'context': 'context',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'block_hash': 'path',
'context': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_block_details_by_block_height_endpoint = _Endpoint(
settings={
'response_type': (GetBlockDetailsByBlockHeightR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/blocks/height/{height}',
'operation_id': 'get_block_details_by_block_height',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'height',
'context',
],
'required': [
'blockchain',
'network',
'height',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'height':
(int,),
'context':
(str,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'height': 'height',
'context': 'context',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'height': 'path',
'context': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_fee_recommendations_endpoint = _Endpoint(
settings={
'response_type': (GetFeeRecommendationsR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/mempool/fees',
'operation_id': 'get_fee_recommendations',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'context',
],
'required': [
'blockchain',
'network',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"BITCOIN-CASH": "bitcoin-cash",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"LITECOIN": "litecoin",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'context':
(str,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'context': 'context',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'context': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_last_mined_block_endpoint = _Endpoint(
settings={
'response_type': (GetLastMinedBlockR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/blocks/last',
'operation_id': 'get_last_mined_block',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'context',
],
'required': [
'blockchain',
'network',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'context':
(str,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'context': 'context',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'context': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_transaction_details_by_transaction_id_endpoint = _Endpoint(
settings={
'response_type': (GetTransactionDetailsByTransactionIDR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/transactions/{transactionId}',
'operation_id': 'get_transaction_details_by_transaction_id',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'transaction_id',
'context',
],
'required': [
'blockchain',
'network',
'transaction_id',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'transaction_id':
(str,),
'context':
(str,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'transaction_id': 'transactionId',
'context': 'context',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'transaction_id': 'path',
'context': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_all_unconfirmed_transactions_endpoint = _Endpoint(
settings={
'response_type': (ListAllUnconfirmedTransactionsR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/address-transactions-unconfirmed',
'operation_id': 'list_all_unconfirmed_transactions',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'context',
'limit',
'offset',
],
'required': [
'blockchain',
'network',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"ZCASH": "zcash",
"BINANCE-SMART-CHAIN": "binance-smart-chain"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'context':
(str,),
'limit':
(int,),
'offset':
(int,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'context': 'context',
'limit': 'limit',
'offset': 'offset',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'context': 'query',
'limit': 'query',
'offset': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_confirmed_transactions_by_address_endpoint = _Endpoint(
settings={
'response_type': (ListConfirmedTransactionsByAddressR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/addresses/{address}/transactions',
'operation_id': 'list_confirmed_transactions_by_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'address',
'context',
'limit',
'offset',
],
'required': [
'blockchain',
'network',
'address',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"ETHEREUM-CLASSIC": "ethereum-classic",
"ETHEREUM": "ethereum",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"MORDOR": "mordor",
"ROPSTEN": "ropsten"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'address':
(str,),
'context':
(str,),
'limit':
(int,),
'offset':
(int,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'address': 'address',
'context': 'context',
'limit': 'limit',
'offset': 'offset',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'address': 'path',
'context': 'query',
'limit': 'query',
'offset': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_latest_mined_blocks_endpoint = _Endpoint(
settings={
'response_type': (ListLatestMinedBlocksR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/blocks/last/{count}',
'operation_id': 'list_latest_mined_blocks',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'network',
'blockchain',
'count',
'context',
],
'required': [
'network',
'blockchain',
'count',
],
'nullable': [
],
'enum': [
'network',
'blockchain',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('network',): {
"TESTNET": "testnet",
"MORDOR": "mordor",
"MAINNET": "mainnet",
"ROPSTEN": "ropsten"
},
('blockchain',): {
"BITCOIN": "bitcoin",
"BITCOIN-CASH": "bitcoin-cash",
"ETHEREUM-CLASSIC": "ethereum-classic",
"ETHEREUM": "ethereum",
"LITECOIN": "litecoin",
"DASH": "dash",
"DOGECOIN": "dogecoin",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZILLIQA": "zilliqa",
"ZCASH": "zcash",
"XRP": "xrp"
},
},
'openapi_types': {
'network':
(str,),
'blockchain':
(str,),
'count':
(int,),
'context':
(str,),
},
'attribute_map': {
'network': 'network',
'blockchain': 'blockchain',
'count': 'count',
'context': 'context',
},
'location_map': {
'network': 'path',
'blockchain': 'path',
'count': 'path',
'context': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_transactions_by_block_hash_endpoint = _Endpoint(
settings={
'response_type': (ListTransactionsByBlockHashR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/blocks/hash/{blockHash}/transactions',
'operation_id': 'list_transactions_by_block_hash',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'block_hash',
'context',
'limit',
'offset',
],
'required': [
'blockchain',
'network',
'block_hash',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"TESTNET": "testnet",
"MAINNET": "mainnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'block_hash':
(str,),
'context':
(str,),
'limit':
(int,),
'offset':
(int,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'block_hash': 'blockHash',
'context': 'context',
'limit': 'limit',
'offset': 'offset',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'block_hash': 'path',
'context': 'query',
'limit': 'query',
'offset': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_transactions_by_block_height_endpoint = _Endpoint(
settings={
'response_type': (ListTransactionsByBlockHeightR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/blocks/height/{height}/transactions',
'operation_id': 'list_transactions_by_block_height',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'height',
'context',
'limit',
'offset',
],
'required': [
'blockchain',
'network',
'height',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"ETHEREUM": "ethereum",
"DASH": "dash",
"DOGECOIN": "dogecoin",
"LITECOIN": "litecoin",
"BITCOIN-CASH": "bitcoin-cash",
"ETHEREUM-CLASSIC": "ethereum-classic",
"BINANCE-SMART-CHAIN": "binance-smart-chain",
"ZCASH": "zcash"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'height':
(int,),
'context':
(str,),
'limit':
(int,),
'offset':
(int,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'height': 'height',
'context': 'context',
'limit': 'limit',
'offset': 'offset',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'height': 'path',
'context': 'query',
'limit': 'query',
'offset': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_unconfirmed_transactions_by_address_endpoint = _Endpoint(
settings={
'response_type': (ListUnconfirmedTransactionsByAddressR,),
'auth': [
'ApiKey'
],
'endpoint_path': '/blockchain-data/{blockchain}/{network}/address-transactions-unconfirmed/{address}',
'operation_id': 'list_unconfirmed_transactions_by_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'blockchain',
'network',
'address',
'context',
'limit',
'offset',
],
'required': [
'blockchain',
'network',
'address',
],
'nullable': [
],
'enum': [
'blockchain',
'network',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('blockchain',): {
"BITCOIN": "bitcoin",
"BITCOIN-CASH": "bitcoin-cash",
"LITECOIN": "litecoin",
"DOGECOIN": "dogecoin",
"DASH": "dash",
"ETHEREUM": "ethereum",
"ETHEREUM-CLASSIC": "ethereum-classic",
"ZCASH": "zcash",
"BINANCE-SMART-CHAIN": "binance-smart-chain"
},
('network',): {
"MAINNET": "mainnet",
"TESTNET": "testnet",
"ROPSTEN": "ropsten",
"MORDOR": "mordor"
},
},
'openapi_types': {
'blockchain':
(str,),
'network':
(str,),
'address':
(str,),
'context':
(str,),
'limit':
(int,),
'offset':
(int,),
},
'attribute_map': {
'blockchain': 'blockchain',
'network': 'network',
'address': 'address',
'context': 'context',
'limit': 'limit',
'offset': 'offset',
},
'location_map': {
'blockchain': 'path',
'network': 'path',
'address': 'path',
'context': 'query',
'limit': 'query',
'offset': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def get_address_details(
self,
blockchain,
network,
address,
**kwargs
):
"""Get Address Details # noqa: E501
Through this endpoint the customer can receive basic information about a given address based on confirmed/synced blocks only. In the case where there are any incoming or outgoing **unconfirmed** transactions for the specific address, they **will not** be counted or calculated here. Applies only for coins. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_address_details(blockchain, network, address, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
address (str): Represents the public address, which is a compressed and shortened form of a public key.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetAddressDetailsR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
kwargs['address'] = \
address
return self.get_address_details_endpoint.call_with_http_info(**kwargs)
def get_block_details_by_block_hash(
self,
blockchain,
network,
block_hash,
**kwargs
):
"""Get Block Details By Block Hash # noqa: E501
Through this endpoint customers can obtain basic information about a given mined block, specifically by using the `hash` parameter. These block details could include the hash of the specific, the previous and the next block, its transactions count, its height, etc. Blockchain specific data is information such as version, nonce, size, bits, merkleroot, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_block_details_by_block_hash(blockchain, network, block_hash, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
block_hash (str): Represents the hash of the block, which is its unique identifier. It represents a cryptographic digital fingerprint made by hashing the block header twice through the SHA256 algorithm.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetBlockDetailsByBlockHashR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
kwargs['block_hash'] = \
block_hash
return self.get_block_details_by_block_hash_endpoint.call_with_http_info(**kwargs)
def get_block_details_by_block_height(
self,
blockchain,
network,
height,
**kwargs
):
"""Get Block Details By Block Height # noqa: E501
Through this endpoint customers can obtain basic information about a given mined block, specifically by using the `height` parameter. These block details could include the hash of the specific, the previous and the next block, its transactions count, its height, etc. Blockchain specific data is information such as version, nonce, size, bits, merkleroot, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_block_details_by_block_height(blockchain, network, height, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
height (int): Represents the number of blocks in the blockchain preceding this specific block. Block numbers have no gaps. A blockchain usually starts with block 0 called the \"Genesis block\".
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetBlockDetailsByBlockHeightR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
kwargs['height'] = \
height
return self.get_block_details_by_block_height_endpoint.call_with_http_info(**kwargs)
def get_fee_recommendations(
self,
blockchain,
network,
**kwargs
):
"""Get Fee Recommendations # noqa: E501
Through this endpoint customers can obtain fee recommendations. Our fees recommendations are based on Mempool data which makes them much more accurate than fees based on already mined blocks. Calculations are done in real time live. Using this endpoint customers can get gas price for Ethereum, fee per byte for Bitcoin, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_fee_recommendations(blockchain, network, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetFeeRecommendationsR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
return self.get_fee_recommendations_endpoint.call_with_http_info(**kwargs)
def get_last_mined_block(
self,
blockchain,
network,
**kwargs
):
"""Get Last Mined Block # noqa: E501
Through this endpoint customers can fetch the last mined block in a specific blockchain network, along with its details. These could include the hash of the specific, the previous and the next block, its transactions count, its height, etc. Blockchain specific data is information such as version, nonce, size, bits, merkleroot, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_last_mined_block(blockchain, network, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetLastMinedBlockR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
return self.get_last_mined_block_endpoint.call_with_http_info(**kwargs)
def get_transaction_details_by_transaction_id(
self,
blockchain,
network,
transaction_id,
**kwargs
):
"""Get Transaction Details By Transaction ID # noqa: E501
Through this endpoint customers can obtain details about a transaction by the transaction's unique identifier. In UTXO-based protocols like BTC there are attributes such as `transactionId` and transaction `hash`. They still could be different. In protocols like Ethereum there is only one unique value and it's `hash`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_transaction_details_by_transaction_id(blockchain, network, transaction_id, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
transaction_id (str): Represents the unique identifier of a transaction, i.e. it could be `transactionId` in UTXO-based protocols like Bitcoin, and transaction `hash` in Ethereum blockchain.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetTransactionDetailsByTransactionIDR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
kwargs['transaction_id'] = \
transaction_id
return self.get_transaction_details_by_transaction_id_endpoint.call_with_http_info(**kwargs)
def list_all_unconfirmed_transactions(
self,
blockchain,
network,
**kwargs
):
"""List All Unconfirmed Transactions # noqa: E501
Through this endpoint customers can list all **unconfirmed** transactions for a specified blockchain and network. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_unconfirmed_transactions(blockchain, network, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
limit (int): Defines how many items should be returned in the response per page basis.. [optional] if omitted the server will use the default value of 50
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.. [optional] if omitted the server will use the default value of 0
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ListAllUnconfirmedTransactionsR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
return self.list_all_unconfirmed_transactions_endpoint.call_with_http_info(**kwargs)
def list_confirmed_transactions_by_address(
self,
blockchain,
network,
address,
**kwargs
):
"""List Confirmed Transactions By Address # noqa: E501
This endpoint will list transactions by an attribute `address`. The transactions listed will detail additional information such as hash, height, time of creation in Unix timestamp, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_confirmed_transactions_by_address(blockchain, network, address, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
address (str): Represents the public address, which is a compressed and shortened form of a public key.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
limit (int): Defines how many items should be returned in the response per page basis.. [optional] if omitted the server will use the default value of 50
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.. [optional] if omitted the server will use the default value of 0
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ListConfirmedTransactionsByAddressR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
kwargs['address'] = \
address
return self.list_confirmed_transactions_by_address_endpoint.call_with_http_info(**kwargs)
def list_latest_mined_blocks(
self,
network,
blockchain,
count,
**kwargs
):
"""List Latest Mined Blocks # noqa: E501
Through this endpoint customers can list **up to 50** from the latest blocks that were mined. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_latest_mined_blocks(network, blockchain, count, async_req=True)
>>> result = thread.get()
Args:
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
count (int): Specifies how many records were requested.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ListLatestMinedBlocksR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['network'] = \
network
kwargs['blockchain'] = \
blockchain
kwargs['count'] = \
count
return self.list_latest_mined_blocks_endpoint.call_with_http_info(**kwargs)
def list_transactions_by_block_hash(
self,
blockchain,
network,
block_hash,
**kwargs
):
"""List Transactions by Block Hash # noqa: E501
This endpoint will list transactions by an attribute `transactionHash`. The transactions listed will detail additional information such as addresses, height, time of creation in Unix timestamp, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_transactions_by_block_hash(blockchain, network, block_hash, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
block_hash (str): Represents the hash of the block, which is its unique identifier. It represents a cryptographic digital fingerprint made by hashing the block header twice through the SHA256 algorithm.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
limit (int): Defines how many items should be returned in the response per page basis.. [optional] if omitted the server will use the default value of 50
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.. [optional] if omitted the server will use the default value of 0
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ListTransactionsByBlockHashR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
kwargs['block_hash'] = \
block_hash
return self.list_transactions_by_block_hash_endpoint.call_with_http_info(**kwargs)
def list_transactions_by_block_height(
self,
blockchain,
network,
height,
**kwargs
):
"""List Transactions by Block Height # noqa: E501
This endpoint will list transactions by an attribute `blockHeight`. The transactions listed will detail additional information such as hash, addresses, time of creation in Unix timestamp, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_transactions_by_block_height(blockchain, network, height, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
height (int): Represents the number of blocks in the blockchain preceding this specific block. Block numbers have no gaps. A blockchain usually starts with block 0 called the \"Genesis block\".
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
limit (int): Defines how many items should be returned in the response per page basis.. [optional] if omitted the server will use the default value of 50
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.. [optional] if omitted the server will use the default value of 0
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ListTransactionsByBlockHeightR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
kwargs['height'] = \
height
return self.list_transactions_by_block_height_endpoint.call_with_http_info(**kwargs)
def list_unconfirmed_transactions_by_address(
self,
blockchain,
network,
address,
**kwargs
):
"""List Unconfirmed Transactions by Address # noqa: E501
Through this endpoint customers can list transactions by `address` that are **unconfirmed**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_unconfirmed_transactions_by_address(blockchain, network, address, async_req=True)
>>> result = thread.get()
Args:
blockchain (str): Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
network (str): Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\" are test networks.
address (str): Represents the public address, which is a compressed and shortened form of a public key.
Keyword Args:
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional]
limit (int): Defines how many items should be returned in the response per page basis.. [optional] if omitted the server will use the default value of 50
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.. [optional] if omitted the server will use the default value of 0
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ListUnconfirmedTransactionsByAddressR
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['blockchain'] = \
blockchain
kwargs['network'] = \
network
kwargs['address'] = \
address
return self.list_unconfirmed_transactions_by_address_endpoint.call_with_http_info(**kwargs)
| 42.704082 | 484 | 0.514945 |
a38b317b32dcbc6c9dff08940ace5dc60a5e39cd | 1,853 | py | Python | examples/run_ranch_baseline.py | pinjutien/DeepExplain | a80d85dcd5adc90968b6924a7ef39528170830f0 | [
"MIT"
] | null | null | null | examples/run_ranch_baseline.py | pinjutien/DeepExplain | a80d85dcd5adc90968b6924a7ef39528170830f0 | [
"MIT"
] | null | null | null | examples/run_ranch_baseline.py | pinjutien/DeepExplain | a80d85dcd5adc90968b6924a7ef39528170830f0 | [
"MIT"
] | null | null | null | """
RANdom CHoice baseline (RANCH): random image from the target class
"""
import random
import numpy as np
import tensorflow_datasets as tfds
from tqdm import tqdm
# output_pattern = '/home/ec2-user/gan_submission_1/mnist/mnist_v2/ranch_baselines_%d'
# tfds_name = 'mnist'
# target_size = [28, 28, 1]
# num_class = 10
# n_samples = 10000
# output_pattern = '/home/ec2-user/gan_submission_1/svhn/svhn_v2/ranch_baselines_%d'
# tfds_name = 'svhn_cropped'
# target_size = [32, 32, 3]
# num_class = 10
# n_samples = 26032
output_pattern = '/home/ec2-user/gan_submission_1/cifar10/cifar10_v2/ranch_baselines_%d'
tfds_name = 'cifar10'
target_size = [32, 32, 3]
num_class = 10
n_samples = 10000
if __name__ == '__main__':
# obtain train images
data_train = list(tfds.as_numpy(tfds.load(tfds_name, split='train')))
# obtain test images with target labels
ds_test = tfds.load(tfds_name, split='test')
dslist = list(tfds.as_numpy(ds_test.take(n_samples)))
ys_target = np.random.RandomState(seed=222).randint(num_class - 1, size=n_samples)
xs, ys_label = [], []
for ind, sample in enumerate(dslist):
xs.append(sample['image'])
ys_label.append(sample['label'])
if ys_target[ind] >= sample['label']:
ys_target[ind] += 1
for ind in range(len(data_train)):
data_train[ind]['image'] = data_train[ind]['image'] / 255.0
xs = np.array(xs)
xs = xs / 255.5
ys_label = np.array(ys_label)
index_map = {i: [] for i in range(10)}
for i, train_sample in enumerate(data_train):
index_map[train_sample['label']].append(i)
outputs = []
for ind in tqdm(range(n_samples)):
i = random.choice(index_map[ys_target[ind]])
outputs.append(data_train[i]['image'])
outputs = np.array(outputs)
np.save(output_pattern % n_samples, outputs) | 28.953125 | 88 | 0.67674 |
a38b4a3c4607025ed47cb0e6994bcee905fa97f0 | 359 | py | Python | pageOne.py | 3bru/qt-tkinter-Test | 41eefe7621c6a0bf3a25b4503df7a7451fc363b2 | [
"MIT"
] | 1 | 2020-05-18T21:59:39.000Z | 2020-05-18T21:59:39.000Z | pageOne.py | 3bru/qt-tkinter-Test | 41eefe7621c6a0bf3a25b4503df7a7451fc363b2 | [
"MIT"
] | null | null | null | pageOne.py | 3bru/qt-tkinter-Test | 41eefe7621c6a0bf3a25b4503df7a7451fc363b2 | [
"MIT"
] | null | null | null | import sqlite3, os
con = sqlite3.connect('database.sqlite')
im = con.cursor()
tablo = """CREATE TABLE IF NOT EXISTS writes(day, topic, texti)"""
deger = """INSERT INTO writes VALUES('oneDay', 'nmap', 'nmaple ilgili bisiler')"""
im.execute(tablo)
im.execute(deger)
con.commit()
im.execute("""SELECT * FROM writes""")
veriler = im.fetchall()
print(veriler)
| 22.4375 | 82 | 0.696379 |
a38b9d380fbd10ce2b7350457ab818a75b222fac | 6,075 | py | Python | basicsr/metrics/psnr_ssim.py | BCV-Uniandes/RSR | dad60eedd3560f2655e3d1ed444153ed2616af2e | [
"zlib-acknowledgement"
] | 14 | 2021-08-28T04:15:37.000Z | 2021-12-28T17:00:33.000Z | basicsr/metrics/psnr_ssim.py | BCV-Uniandes/RSR | dad60eedd3560f2655e3d1ed444153ed2616af2e | [
"zlib-acknowledgement"
] | 2 | 2021-09-26T01:27:06.000Z | 2021-12-24T19:06:09.000Z | basicsr/metrics/psnr_ssim.py | BCV-Uniandes/RSR | dad60eedd3560f2655e3d1ed444153ed2616af2e | [
"zlib-acknowledgement"
] | 1 | 2021-10-18T15:48:56.000Z | 2021-10-18T15:48:56.000Z | import cv2
import numpy as np
from basicsr.metrics.metric_util import reorder_image, to_y_channel
def calculate_psnr(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1 (ndarray): Images with range [0, 255] with order 'HWC'.
img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) *
(2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the SSIM calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
import torch
import torch.nn as nn
import lpips
import torchvision
import numpy
# from misc.kernel_loss import shave_a2b
def calculate_lpips(output, gt, device):
lpips = LPIPS(net='alex', verbose=False).to(device)
# output = 2*((output - (output.min()))/(output.max() - (output.min()))) - 1
# gt = 2*((gt - (gt.min()))/(gt.max() - (gt.min()))) - 1
return lpips(output, gt).cpu().numpy().mean()
class LPIPS(nn.Module):
def __init__(self, net='alex', verbose=True, device='cpu', vgg19=False):
super().__init__()
if vgg19:
self.lpips = VGGFeatureExtractor(device=device).to(device)
else:
self.lpips = lpips.LPIPS(net=net, verbose=verbose).to(device)
# imagenet normalization for range [-1, 1]
self.lpips.eval()
for param in self.lpips.parameters():
param.requires_grad = False
def perceptual_rec(self, x, y):
loss_rgb = nn.L1Loss()(x, y)
loss = loss_rgb + self(x, y)
return loss
@torch.no_grad()
def forward(self, x, y):
# normalization -1,+1
# if x.size(-1) > y.size(-1):
# x = shave_a2b(x, y)
# elif x.size(-1) < y.size(-1):
# y = shave_a2b(y, x)
lpips_value = self.lpips(x, y, normalize=True) # True
return lpips_value.mean() | 33.379121 | 80 | 0.596708 |
a38ee10bfa692aa23805d2d2b99b5f0481e7ce48 | 14,224 | py | Python | data/dataset.py | limingwu8/Pneumonia-Detection | 8541e0f34a72f6e94773bf234cfd071732229b2b | [
"MIT"
] | 7 | 2019-01-27T02:30:56.000Z | 2020-04-29T18:47:21.000Z | data/dataset.py | limingwu8/Pneumonia-Detection | 8541e0f34a72f6e94773bf234cfd071732229b2b | [
"MIT"
] | 1 | 2020-01-28T04:40:15.000Z | 2020-05-01T02:37:40.000Z | data/dataset.py | limingwu8/Pneumonia-Detection | 8541e0f34a72f6e94773bf234cfd071732229b2b | [
"MIT"
] | 3 | 2019-08-09T09:16:00.000Z | 2021-07-01T11:45:00.000Z | import os
import numpy as np
import torch
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from skimage import io, transform
from utils.Config import opt
from skimage import exposure
import matplotlib.pylab as plt
from utils import array_tool as at
from sklearn.model_selection import train_test_split
from data.data_utils import read_image, resize_bbox, flip_bbox, random_flip, flip_masks
from utils.vis_tool import apply_mask_bbox
import matplotlib.patches as patches
DSB_BBOX_LABEL_NAMES = ('p') # Pneumonia
def inverse_normalize(img):
if opt.caffe_pretrain:
img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1))
return img[::-1, :, :].clip(min=0, max=255)
# approximate un-normalize for visualize
return (img * 0.225 + 0.45).clip(min=0, max=1) * 255
"""Transforms:
Data augmentation
"""
class Transform(object):
def __init__(self, min_size=600, max_size=1000, train=True):
self.min_size = min_size
self.max_size = max_size
self.train = train
def __call__(self, in_data):
if len(in_data.keys())!=2:
img_id, img, bbox, label = in_data['img_id'], in_data['image'], in_data['bbox'], in_data['label']
_, H, W = img.shape
img = preprocess(img, self.min_size, self.max_size, self.train)
_, o_H, o_W = img.shape
scale = o_H/H
# horizontally flip
# img, params = random_flip(img, x_random=True, y_random=True, return_param=True)
bbox = resize_bbox(bbox, (H, W), (o_H, o_W))
img, params = random_flip(img, x_random=True, y_random=False, return_param=True)
bbox = flip_bbox(bbox, (o_H, o_W), x_flip=params['x_flip'], y_flip=params['y_flip'])
label = label if label is None else label.copy()
return {'img_id': img_id, 'image': img.copy(), 'bbox': bbox, 'label': label, 'scale': scale}
else:
img_id, img = in_data['img_id'], in_data['image']
_, H, W = img.shape
img = preprocess(img, self.min_size, self.max_size, self.train)
_, o_H, o_W = img.shape
scale = o_H/H
# horizontally flip
# img, params = random_flip(img, x_random=True, y_random=True, return_param=True)
return {'img_id': img_id, 'image': img.copy(), 'scale': scale}
def preprocess(img, min_size=600, max_size=1000, train=True):
"""Preprocess an image for feature extraction.
The length of the shorter edge is scaled to :obj:`self.min_size`.
After the scaling, if the length of the longer edge is longer than
:param min_size:
:obj:`self.max_size`, the image is scaled to fit the longer edge
to :obj:`self.max_size`.
After resizing the image, the image is subtracted by a mean image value
:obj:`self.mean`.
Args:
img (~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
Returns:
~numpy.ndarray: A preprocessed image.
"""
C, H, W = img.shape
scale1 = min_size / min(H, W)
scale2 = max_size / max(H, W)
scale = min(scale1, scale2)
if opt.caffe_pretrain:
normalize = caffe_normalize
else:
normalize = pytorch_normalze
if opt.hist_equalize:
hist_img = exposure.equalize_hist(img)
hist_img = transform.resize(hist_img, (C, H * scale, W * scale), mode='reflect')
hist_img = normalize(hist_img)
return hist_img
img = img / 255.
img = transform.resize(img, (C, H * scale, W * scale), mode='reflect')
# both the longer and shorter should be less than
# max_size and min_size
img = normalize(img)
return img
def pytorch_normalze(img):
"""
https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683
https://github.com/pytorch/vision/issues/223
return appr -1~1 RGB
"""
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img = normalize(torch.from_numpy(img))
return img.numpy()
def caffe_normalize(img):
"""
return appr -125-125 BGR
"""
img = img[[2, 1, 0], :, :] # RGB-BGR
img = img * 255
mean = np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)
img = (img - mean).astype(np.float32, copy=True)
return img
class RSNADataset(Dataset):
def __init__(self, root_dir, img_id, transform=True, train=True):
"""
Args:
:param root_dir (string): Directory with all the images
:param img_id (list): lists of image id
:param train: if equals true, then read training set, so the output is image, mask and imgId
if equals false, then read testing set, so the output is image and imgId
:param transform (callable, optional): Optional transform to be applied on a sample
"""
self.root_dir = root_dir
self.img_id = img_id
self.transform = transform
self.tsf = Transform(opt.min_size, opt.max_size, train)
def __len__(self):
return len(self.img_id)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir, self.img_id[idx], 'image.png')
bbox_path = os.path.join(self.root_dir, self.img_id[idx], 'bbox.npy')
image = read_image(img_path, np.float32, True)
if os.path.exists(bbox_path):
bbox = np.load(bbox_path)
label = np.zeros(len(bbox)).astype(np.int32)
sample = {'img_id': self.img_id[idx], 'image':image.copy(), 'bbox':bbox, 'label': label}
else:
sample = {'img_id': self.img_id[idx], 'image':image.copy()}
if self.transform:
sample = self.tsf(sample)
return sample
class RSNADatasetTest(Dataset):
def __init__(self, root_dir, transform=True, train=False):
"""
Args:
:param root_dir (string): Directory with all the images
:param img_id (list): lists of image id
:param train: if equals true, then read training set, so the output is image, mask and imgId
if equals false, then read testing set, so the output is image and imgId
:param transform (callable, optional): Optional transform to be applied on a sample
"""
self.root_dir = root_dir
self.img_id = os.listdir(root_dir)
self.transform = transform
self.tsf = Transform(opt.min_size, opt.max_size, train)
def __len__(self):
return len(self.img_id)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir, self.img_id[idx], 'image.png')
image = read_image(img_path, np.float32, True)
sample = {'img_id': self.img_id[idx], 'image': image.copy()}
if self.transform:
sample = self.tsf(sample)
return sample
def get_train_loader(root_dir, batch_size=16, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param split: if split data set to training set and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param val_ratio: ratio of validation set size
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
if split the data set then returns:
- train_loader: Dataloader for training
- valid_loader: Dataloader for validation
else returns:
- dataloader: Dataloader of all the data set
"""
img_ids = os.listdir(root_dir)
img_ids.sort()
transformed_dataset = RSNADataset(root_dir=root_dir, img_id=img_ids, transform=True, train=True)
dataloader = DataLoader(transformed_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return dataloader
def get_train_val_loader(root_dir, batch_size=16, val_ratio=0.2, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param split: if split data set to training set and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param val_ratio: ratio of validation set size
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
if split the data set then returns:
- train_loader: Dataloader for training
- valid_loader: Dataloader for validation
else returns:
- dataloader: Dataloader of all the data set
"""
img_ids = os.listdir(root_dir)
img_ids.sort()
train_id, val_id = train_test_split(img_ids, test_size=val_ratio, random_state=55, shuffle=shuffle)
train_dataset = RSNADataset(root_dir=root_dir, img_id=train_id, transform=True, train=True)
train_loader = DataLoader(train_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
val_dataset = RSNADataset(root_dir=root_dir, img_id=val_id, transform=True, train=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return train_loader, val_loader
def get_test_loader(test_dir, batch_size=16, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
- testloader: Dataloader of all the test set
"""
transformed_dataset = RSNADatasetTest(root_dir=test_dir)
testloader = DataLoader(transformed_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return testloader
def show_batch_train(sample_batched):
"""
Visualize one training image and its corresponding bbox
"""
if len(sample_batched.keys())==5:
# if sample_batched['img_id']=='8d978e76-14b9-4d9d-9ba6-aadd3b8177ce':
# print('stop')
img_id, image, bbox = sample_batched['img_id'], sample_batched['image'], sample_batched['bbox']
orig_img = at.tonumpy(image)
orig_img = inverse_normalize(orig_img)
bbox = bbox[0, :]
ax = plt.subplot(111)
ax.imshow(np.transpose(np.squeeze(orig_img / 255.), (1, 2, 0)))
ax.set_title(img_id[0])
for i in range(bbox.shape[0]):
y1, x1, y2, x2 = int(bbox[i][0]), int(bbox[i][1]), int(bbox[i][2]), int(bbox[i][3])
h = y2 - y1
w = x2 - x1
rect = patches.Rectangle((x1, y1), w, h, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.show()
def show_batch_test(sample_batch):
img_id, image = sample_batch['img_id'], sample_batch['image']
image = inverse_normalize(at.tonumpy(image[0]))
plt.figure()
plt.imshow(np.transpose(at.tonumpy(image/255), (1, 2, 0)))
plt.show()
if __name__ == '__main__':
# dataset = RSNADataset(root_dir=opt.root_dir, transform=True)
# sample = dataset[13]
# print(sample.keys())
# Load training set
# trainloader = get_train_loader(opt.root_dir, batch_size=opt.batch_size, shuffle=opt.shuffle,
# num_workers=opt.num_workers, pin_memory=opt.pin_memory)
#
# for i_batch, sample in tqdm(enumerate(trainloader)):
# B,C,H,W = sample['image'].shape
# if (H,W)!=(600,600):
# print(sample['img_id'])
# show_batch_train(sample)
# Load testing set
# testloader = get_test_loader(opt.test_dir, batch_size=opt.batch_size, shuffle=opt.shuffle,
# num_workers=opt.num_workers, pin_memory=opt.pin_memory)
# for i_batch, sample in enumerate(testloader):
# print('i_batch: ', i_batch, 'len(sample)', len(sample.keys()))
# show_batch_test(sample)
# Load training & validation set
train_loader, val_loader = get_train_val_loader(opt.root_dir, batch_size=opt.batch_size, val_ratio=0.1,
shuffle=True, num_workers=opt.num_workers,
pin_memory=opt.pin_memory)
for i_batch, sample in enumerate(train_loader):
show_batch_train(sample)
# Test train & validation set on densenet
# img_ids = os.listdir(opt.root_dir)
# dataset = RSNADataset_densenet(root_dir=opt.root_dir, img_id=img_ids, transform=True)
# sample = dataset[13]
# print(sample.keys())
# train_loader, val_loader = get_train_val_loader_densenet(opt.root_dir, batch_size=128, val_ratio=0.1,
# shuffle=False, num_workers=opt.num_workers,
# pin_memory=opt.pin_memory)
# non_zeros = 0 # 4916 + 743 = 5659
# zeros = 0 # 15692 + 4505 = 20197
# for i, sample in tqdm(enumerate(val_loader)):
# non_zeros += np.count_nonzero(at.tonumpy(sample['label']))
# zeros += (128-np.count_nonzero(at.tonumpy(sample['label'])))
# # print(sample['img_id'], ', ', at.tonumpy(sample['label']))
# print("non_zeros: ", non_zeros)
# print("zeros: ", zeros)
| 41.228986 | 113 | 0.646161 |
a38f9c51d087930a15e07db3d41e43fedee278f9 | 8,344 | py | Python | make_dataset/kor_sample_dataset.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 67 | 2021-05-12T15:54:28.000Z | 2022-03-12T15:55:35.000Z | make_dataset/kor_sample_dataset.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 71 | 2021-05-01T06:07:37.000Z | 2022-01-28T16:54:46.000Z | make_dataset/kor_sample_dataset.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 14 | 2021-05-24T10:57:27.000Z | 2022-02-18T06:34:11.000Z | import json
import os.path as p
from collections import defaultdict
import pandas as pd
from datasets import load_dataset
from datasets import concatenate_datasets
from datasets import Sequence, Value, Features, Dataset, DatasetDict
from utils.tools import get_args
f = Features(
{
"answers": Sequence(
feature={"text": Value(dtype="string", id=None), "answer_start": Value(dtype="int32", id=None)},
length=-1,
id=None,
),
"id": Value(dtype="string", id=None),
"context": Value(dtype="string", id=None),
"question": Value(dtype="string", id=None),
"title": Value(dtype="string", id=None),
}
)
def remove_multiple_indexes(rlist, indexes):
assert indexes == sorted(indexes, reverse=True)
for index in indexes:
del rlist[index]
return rlist
def filtering_by_doc_len(kor_dataset, doc_len=512):
indexes = []
for idx, context in enumerate(kor_dataset["context"]):
if len(context) < doc_len:
indexes.append(idx)
indexes.sort(reverse=True)
tmp = {}
for key in kor_dataset.features.keys():
tmp[key] = remove_multiple_indexes(kor_dataset[key], indexes)
df = pd.DataFrame(tmp)
datasets = Dataset.from_pandas(df, features=f)
return datasets
def filtering_by_dup_question(kor_dataset, dup_limit=4):
indexes = []
context_cnt = defaultdict(int)
for idx, context in enumerate(kor_dataset["context"]):
context_cnt[context] += 1
if context_cnt[context] > dup_limit:
indexes.append(idx)
indexes.sort(reverse=True)
tmp = {}
for key in kor_dataset.features.keys():
tmp[key] = remove_multiple_indexes(kor_dataset[key], indexes)
df = pd.DataFrame(tmp)
datasets = Dataset.from_pandas(df, features=f)
return datasets
def sampling_by_ans_start_weights(kor_dataset, sample=8000):
kor_df = kor_dataset.to_pandas()
kor_ans_cnt = defaultdict(int)
kor_ans_weights = defaultdict(float)
bucket = 100
for i, rows in kor_df.iterrows():
kor_ans_cnt[rows["answers"]["answer_start"][0] // bucket] += 1
total_cnt = sum(kor_ans_cnt.values())
for k, v in kor_ans_cnt.items():
kor_ans_weights[k] = (1 - (v / total_cnt)) ** 6 # 5가 적당
def apply_weights(row):
key = row["answer_start"][0] // bucket
return kor_ans_weights[key]
kor_df["weight"] = kor_df["answers"].apply(apply_weights)
kor_df = kor_df.sample(n=sample, weights="weight", random_state=42) # 다시 생각해보니깐 전체 저장은 불가능, 2배수 가능
datasets = Dataset.from_pandas(kor_df, features=f)
return datasets
def sampling_by_doc_lens(kor_dataset, sample):
kor_df = kor_dataset.to_pandas()
kor_ans_cnt = defaultdict(int)
kor_ans_weights = defaultdict(float)
bucket = 100
for i, rows in kor_df.iterrows():
kor_ans_cnt[len(rows["context"]) // bucket] += 1
total_cnt = sum(kor_ans_cnt.values())
for k, v in kor_ans_cnt.items():
kor_ans_weights[k] = (1 - (v / total_cnt)) ** 6 # 5가 적당
def apply_weights(row):
key = len(row) // bucket
return kor_ans_weights[key]
kor_df["weight"] = kor_df["context"].apply(apply_weights)
kor_df = kor_df.sample(n=sample, weights="weight", random_state=42) # 다시 생각해보니깐 전체 저장은 불가능, 2배수 가능
datasets = Dataset.from_pandas(kor_df, features=f)
return datasets
def make_kor_dataset_v1(args):
"""KorQuad Dataset V1
1. 문서 길이 512이하 Filtering
2. Context당 Question 최대 4개
3. ans_start 위치로 8000개 샘플링
"""
kor_dataset_path = p.join(args.path.train_data_dir, "kor_dataset")
if p.exists(kor_dataset_path):
raise FileExistsError(f"{kor_dataset_path}는 이미 존재하는 파일입니다!")
kor_dataset = load_dataset("squad_kor_v1")
kor_dataset = concatenate_datasets(
[kor_dataset["train"].flatten_indices(), kor_dataset["validation"].flatten_indices()]
)
# (1) 문서 길이: KLUE MRC 512가 최소 길이
kor_dataset = filtering_by_doc_len(kor_dataset, doc_len=512)
# (2) 중복 Context 제거: Context당 최대 4개의 질문
kor_dataset = filtering_by_dup_question(kor_dataset, dup_limit=4)
# (3) KOR answer_start Weight Sampling 2배수 사용
kor_dataset = sampling_by_ans_start_weights(kor_dataset, sample=8000)
# (4) KOR_DATASET만 저장
kor_datasets = DatasetDict({"train": kor_dataset})
kor_datasets.save_to_disk(kor_dataset_path)
print(f"{kor_dataset_path}에 저장되었습니다!")
def make_kor_dataset_v2(args):
"""KorQuad Dataset V1
1. 문서 길이 512이하 Filtering
2. Context당 Question 최대 4개
3. ans_start 위치로 8000개 샘플링
4. doc_len 길이로 4000개 필터링
"""
kor_dataset_path = p.join(args.path.train_data_dir, "kor_dataset_v2")
if p.exists(kor_dataset_path):
raise FileExistsError(f"{kor_dataset_path}는 이미 존재하는 파일입니다!")
kor_dataset = load_dataset("squad_kor_v1")
kor_dataset = concatenate_datasets(
[kor_dataset["train"].flatten_indices(), kor_dataset["validation"].flatten_indices()]
)
# (1) 문서 길이: KLUE MRC 512가 최소 길이
kor_dataset = filtering_by_doc_len(kor_dataset, doc_len=512)
# (2) 중복 Context 제거: Context당 최대 4개의 질문
kor_dataset = filtering_by_dup_question(kor_dataset, dup_limit=4)
# (3) KOR answer_start Weight Sampling 2배수 사용
kor_dataset = sampling_by_ans_start_weights(kor_dataset)
# (4) KOR docs_len Weights Sampling 4000개 까지
kor_dataset = sampling_by_doc_lens(kor_dataset, sample=4000)
# (5) KOR_DATASET만 저장
kor_datasets = DatasetDict({"train": kor_dataset})
kor_datasets.save_to_disk(kor_dataset_path)
print(f"{kor_dataset_path}에 저장되었습니다!")
def get_etr_dataset(args):
etr_path = p.join(args.path.train_data_dir, "etr_qa_dataset.json")
if not p.exists(etr_path):
raise FileNotFoundError(f"ETRI 데이터 셋 {etr_path}로 파일명 바꿔서 데이터 넣어주시길 바랍니다.")
with open(etr_path, "r") as f:
etr_dict = json.load(f)
# print(etr_dict["data"][0])
new_dataset = defaultdict(list)
cnt = 0
for datas in etr_dict["data"]:
title = datas["title"]
context = datas["paragraphs"][0]["context"]
for questions in datas["paragraphs"][0]["qas"]:
question = questions["question"]
answers = {
"answer_start": [questions["answers"][0]["answer_start"]],
"text": [questions["answers"][0]["text"]],
}
new_dataset["id"].append(f"etr-custom-{cnt}")
new_dataset["title"].append(title)
new_dataset["context"].append(context)
new_dataset["question"].append(question)
new_dataset["answers"].append(answers)
cnt += 1
f = Features(
{
"answers": Sequence(
feature={"text": Value(dtype="string", id=None), "answer_start": Value(dtype="int32", id=None)},
length=-1,
id=None,
),
"id": Value(dtype="string", id=None),
"context": Value(dtype="string", id=None),
"question": Value(dtype="string", id=None),
"title": Value(dtype="string", id=None),
}
)
df = pd.DataFrame(new_dataset)
etr_dataset = Dataset.from_pandas(df, features=f)
return etr_dataset
def make_etr_dataset_v1(args):
"""ETRI 데이터 셋 가져오는 함수
1. 문서 길이 512이하 Filtering
2. 중복 Context 제거, Question 최대 4개
3. ans_start 위치로 3000개 샘플링
"""
etr_dataset_path = p.join(args.path.train_data_dir, "etr_dataset_v1")
if p.exists(etr_dataset_path):
raise FileExistsError(f"{etr_dataset_path}는 이미 존재하는 파일입니다!")
etr_dataset = get_etr_dataset(args)
# (1) 문서 길이: KLUE MRC 512가 최소 길이
etr_dataset = filtering_by_doc_len(etr_dataset, doc_len=512)
# (2) 중복 Context 제거: Context당 최대 4개의 질문
etr_dataset = filtering_by_dup_question(etr_dataset, dup_limit=4)
# (3) ETR answer_start Weight 3000개 Sampling
etr_dataset = sampling_by_ans_start_weights(etr_dataset, sample=3000)
# (4) ETR_DATASET만 저장
etr_datasets = DatasetDict({"train": etr_dataset})
etr_datasets.save_to_disk(etr_dataset_path)
print(f"{etr_dataset_path}에 저장되었습니다!")
def main(args):
make_kor_dataset_v1(args)
make_kor_dataset_v2(args)
make_etr_dataset_v1(args)
if __name__ == "__main__":
args = get_args()
main(args)
| 28.772414 | 112 | 0.65604 |
a3927c6d9fb19dc907aa3851f9fb6293c833eaf2 | 1,737 | py | Python | tests/test_simple.py | teosavv/pyembroidery | 00985f423e64ea1a454e5484012c19a64f26eb2c | [
"MIT"
] | 45 | 2018-07-08T09:49:30.000Z | 2022-03-23T07:01:15.000Z | tests/test_simple.py | teosavv/pyembroidery | 00985f423e64ea1a454e5484012c19a64f26eb2c | [
"MIT"
] | 59 | 2018-07-05T22:05:58.000Z | 2022-02-20T01:01:20.000Z | tests/test_simple.py | teosavv/pyembroidery | 00985f423e64ea1a454e5484012c19a64f26eb2c | [
"MIT"
] | 23 | 2018-08-10T17:58:04.000Z | 2022-03-29T03:41:46.000Z | import os
import shutil
import pyembroidery
import test_fractals
def test_simple():
pattern = pyembroidery.EmbPattern()
pattern.add_thread({
"rgb": 0x0000FF,
"name": "Blue Test",
"catalog": "0033",
"brand": "PyEmbroidery Brand Thread"
})
pattern.add_thread({
"rgb": 0x00FF00,
"name": "Green",
"catalog": "0034",
"brand": "PyEmbroidery Brand Thread"
})
test_fractals.generate(pattern)
settings = {
"tie_on": True,
"tie_off": True
}
temp_dir = "temp"
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir)
pyembroidery.write(pattern, temp_dir + "/generated.u01", settings)
pyembroidery.write(pattern, temp_dir + "/generated.pec", settings)
pyembroidery.write(pattern, temp_dir + "/generated.pes", settings)
pyembroidery.write(pattern, temp_dir + "/generated.exp", settings)
pyembroidery.write(pattern, temp_dir + "/generated.dst", settings)
settings["extended header"] = True
pyembroidery.write(pattern, temp_dir + "/generated-eh.dst", settings)
pyembroidery.write(pattern, temp_dir + "/generated.jef", settings)
pyembroidery.write(pattern, temp_dir + "/generated.vp3", settings)
settings["pes version"] = 1,
pyembroidery.write(pattern, temp_dir + "/generatedv1.pes", settings)
settings["truncated"] = True
pyembroidery.write(pattern, temp_dir + "/generatedv1t.pes", settings)
settings["pes version"] = 6,
pyembroidery.write(pattern, temp_dir + "/generatedv6t.pes", settings)
pyembroidery.convert(temp_dir + "/generated.exp", temp_dir + "/genconvert.dst",
{"stable": False, "encode": False})
shutil.rmtree(temp_dir)
| 31.581818 | 84 | 0.651698 |
a392dab4e0208bcba731af6d1b6b1dd6d3c0e78a | 21,317 | py | Python | train.py | eapache/HawkEars | 3b979166ed09de9f9254b830bb57499e1da7a015 | [
"MIT"
] | null | null | null | train.py | eapache/HawkEars | 3b979166ed09de9f9254b830bb57499e1da7a015 | [
"MIT"
] | 1 | 2021-12-17T16:56:12.000Z | 2021-12-19T15:53:55.000Z | train.py | eapache/HawkEars | 3b979166ed09de9f9254b830bb57499e1da7a015 | [
"MIT"
] | 1 | 2021-12-17T16:59:04.000Z | 2021-12-17T16:59:04.000Z | # Train the selected neural network model on spectrograms for birds and a few other classes.
# Train the selected neural network model on spectrograms for birds and a few other classes.
# To see command-line arguments, run the script with -h argument.
import argparse
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import shutil
import sys
import time
import zlib
from collections import namedtuple
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # 1 = no info, 2 = no warnings, 3 = no errors
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
from tensorflow import keras
from core import audio
from core import constants
from core import data_generator
from core import database
from core import plot
from core import util
from model import model_checkpoint
from model import efficientnet_v2
class Trainer:
def __init__(self, parameters):
global trainer
trainer = self
self.parameters = parameters
self.db = database.Database(f'data/{parameters.training}.db')
self.classes = util.get_class_list()
self.init()
# create a plot and save it to the output directory
def plot_results(self, dir, history, key1, key2 = None):
plt.clf() # clear any existing plot data
plt.plot(history.history[key1])
if key2 != None:
plt.plot(history.history[key2])
plt.title(key1)
plt.ylabel(key1)
plt.xlabel('epoch')
if key2 is None:
plt.legend(['train'], loc='upper left')
else:
plt.legend(['train', 'test'], loc='upper left')
plt.savefig(f'{dir}/{key1}.png')
def run(self):
# only use MirroredStrategy in a multi-GPU environment
#strategy = tf.distribute.MirroredStrategy()
strategy = tf.distribute.get_strategy()
with strategy.scope():
# define and compile the model
if self.parameters.type == 0:
model = keras.models.load_model(constants.CKPT_PATH)
else:
if self.parameters.multilabel:
class_act = 'sigmoid'
else:
class_act = 'softmax'
model = efficientnet_v2.EfficientNetV2(
model_type=self.parameters.eff_config,
num_classes=len(self.classes),
input_shape=(self.spec_height, constants.SPEC_WIDTH, 1),
activation='swish',
classifier_activation=class_act,
dropout=0.15,
drop_connect_rate=0.25)
opt = keras.optimizers.Adam(learning_rate = cos_lr_schedule(0))
if self.parameters.multilabel:
loss = keras.losses.BinaryCrossentropy(label_smoothing = 0.13)
else:
loss = keras.losses.CategoricalCrossentropy(label_smoothing = 0.13)
model.compile(loss = loss, optimizer = opt, metrics = 'accuracy')
# create output directory
dir = 'summary'
if not os.path.exists(dir):
os.makedirs(dir)
# output text and graphical descriptions of the model
with open(f'{dir}/table.txt','w') as text_output:
model.summary(print_fn=lambda x: text_output.write(x + '\n'))
if self.parameters.verbosity == 3:
keras.utils.plot_model(model, show_shapes=True, to_file=f'{dir}/graphic.png')
# initialize callbacks
lr_scheduler = keras.callbacks.LearningRateScheduler(cos_lr_schedule)
model_checkpoint_callback = model_checkpoint.ModelCheckpoint(
constants.CKPT_PATH, self.parameters.ckpt_min_epochs, self.parameters.ckpt_min_val_accuracy,
copy_ckpt=self.parameters.copy_ckpt, save_best_only=self.parameters.save_best_only)
callbacks = [lr_scheduler, model_checkpoint_callback]
# create the training and test datasets
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
datagen = data_generator.DataGenerator(self.x_train, self.y_train, seed=self.parameters.seed,
binary_classifier=self.parameters.binary_classifier, multilabel=self.parameters.multilabel)
train_ds = tf.data.Dataset.from_generator(
datagen,
output_types=(tf.float16, tf.float16),
output_shapes=([self.spec_height, constants.SPEC_WIDTH, 1],[len(self.classes)]))
train_ds = train_ds.with_options(options)
train_ds = train_ds.batch(self.parameters.batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((self.x_test, self.y_test))
test_ds = test_ds.with_options(options)
test_ds = test_ds.batch(self.parameters.batch_size)
class_weight = self._get_class_weight()
# run training
if self.parameters.seed is None:
workers = 2
else:
workers = 0 # run data augmentation in main thread to improve repeatability
start_time = time.time()
history = model.fit(train_ds, epochs = self.parameters.epochs, verbose = self.parameters.verbosity, validation_data = test_ds,
workers = workers, shuffle = False, callbacks = callbacks, class_weight = class_weight)
elapsed = time.time() - start_time
# output loss/accuracy graphs and a summary report
training_accuracy = history.history["accuracy"][-1]
if len(self.x_test) > 0:
self.plot_results(dir, history, 'accuracy', 'val_accuracy')
self.plot_results(dir, history, 'loss', 'val_loss')
scores = model.evaluate(self.x_test, self.y_test)
test_accuracy = scores[1]
else:
self.plot_results(dir, history, 'accuracy')
self.plot_results(dir, history, 'loss')
if self.parameters.verbosity >= 2 and len(self.x_test) > 0:
# report on misidentified test spectrograms
predictions = model.predict(self.x_test)
self.analyze_predictions(predictions)
if self.parameters.verbosity > 0:
with open(f'{dir}/summary.txt','w') as text_output:
text_output.write(f'EfficientNetV2 config: {self.parameters.eff_config}\n')
text_output.write(f'Batch size: {self.parameters.batch_size}\n')
text_output.write(f'Epochs: {self.parameters.epochs}\n')
text_output.write(f'Training loss: {history.history["loss"][-1]:.3f}\n')
text_output.write(f'Training accuracy: {training_accuracy:.3f}\n')
if len(self.x_test) > 0:
text_output.write(f'Test loss: {scores[0]:.3f}\n')
text_output.write(f'Final test accuracy: {test_accuracy:.3f}\n')
text_output.write(f'Best test accuracy: {model_checkpoint_callback.best_val_accuracy:.4f}\n')
minutes = int(elapsed) // 60
seconds = int(elapsed) % 60
text_output.write(f'Elapsed time for training = {minutes}m {seconds}s\n')
print(f'Best test accuracy: {model_checkpoint_callback.best_val_accuracy:.4f}\n')
print(f'Elapsed time for training = {minutes}m {seconds}s\n')
return model_checkpoint_callback.best_val_accuracy
# find and report on incorrect predictions;
# always generate summary/stats.csv, but output misident/*.png only if verbosity >= 2;
# this is based on the last epoch, which may not be the best saved model
def analyze_predictions(self, predictions):
class ClassInfo:
def __init__(self):
self.spec_count = 0
self.true_pos = 0
self.false_pos = 0
self.false_neg = 0
misident_dir = 'misident'
if os.path.exists(misident_dir):
shutil.rmtree(misident_dir) # ensure we start with an empty folder
os.makedirs(misident_dir)
# collect data per class and output images if requested
classes = {}
for i in range(len(predictions)):
actual_index = np.argmax(self.y_test[i])
actual_name = self.classes[actual_index]
predicted_index = np.argmax(predictions[i])
predicted_name = self.classes[predicted_index]
if actual_name in classes:
actual_class_info = classes[actual_name]
else:
actual_class_info = ClassInfo()
classes[actual_name] = actual_class_info
if predicted_name in classes:
predicted_class_info = classes[predicted_name]
else:
predicted_class_info = ClassInfo()
classes[predicted_name] = predicted_class_info
actual_class_info.spec_count += 1
if predicted_index == actual_index:
actual_class_info.true_pos += 1
else:
actual_class_info.false_neg += 1
predicted_class_info.false_pos += 1
if self.parameters.verbosity >= 2:
if i in self.spec_file_name.keys():
suffix = self.spec_file_name[i]
else:
suffix = i
spec = self.x_test[i].reshape(self.x_test[i].shape[0], self.x_test[i].shape[1])
plot.plot_spec(spec, f'{misident_dir}/{actual_name}_{predicted_name}_{suffix}.png')
# output stats.csv containing data per class
stats = 'class,count,TP,FP,FN,FP+FN,precision,recall,average\n'
for class_name in sorted(classes):
count = classes[class_name].spec_count
tp = classes[class_name].true_pos
fp = classes[class_name].false_pos
fn = classes[class_name].false_neg
if tp + fp == 0:
precision = 0
else:
precision = tp / (tp + fp)
if tp + fn == 0:
recall = 0
else:
recall = tp / (tp + fn)
stats += f'{class_name},{count},{tp:.3f},{fp:.3f},{fn:.3f},{fp + fn:.3f},{precision:.3f},{recall:.3f},{(precision+recall)/2:.3f}\n'
with open(f'summary/stats.csv','w') as text_output:
text_output.write(stats)
# given the total number of spectrograms in a class, return a dict of randomly selected
# indices to use for testing (indices not in the list are used for training)
def get_test_indices(self, total):
num_test = math.ceil(self.parameters.test_portion * total)
test_indices = {}
while len(test_indices.keys()) < num_test:
index = random.randint(0, total - 1)
if index not in test_indices.keys():
test_indices[index] = 1
return test_indices
# heuristic to adjust weights of classes;
# data/weights.txt contains optional weight per class name;
# format is "class-name,weight", e.g. "Noise,1.1";
# classes not listed there default to a weight of 1.0
def _get_class_weight(self):
input_weight = {}
path = 'data/weights.txt'
try:
with open(path, 'r') as file:
for line in file.readlines():
line = line.strip()
if len(line) > 0 and line[0] != '#':
tokens = line.split(',')
if len(tokens) > 1:
try:
weight = float(tokens[1])
input_weight[tokens[0].strip()] = weight
except ValueError:
print(f'Invalid input weight = {tokens[1]} for class {tokens[0]}')
except IOError:
print(f'Unable to open weights file "{path}"')
return
class_weight = {}
for i in range(len(self.classes)):
if self.classes[i] in input_weight.keys():
print(f'Assigning weight {input_weight[self.classes[i]]} to {self.classes[i]}')
class_weight[i] = input_weight[self.classes[i]]
else:
class_weight[i] = 1.0
return class_weight
def init(self):
if self.parameters.binary_classifier:
self.spec_height = constants.BINARY_SPEC_HEIGHT
else:
self.spec_height = constants.SPEC_HEIGHT
# count spectrograms and randomly select which to use for testing vs. training
num_spectrograms = []
self.test_indices = []
for i in range(len(self.classes)):
total = self.db.get_num_spectrograms(self.classes[i])
num_spectrograms.append(total)
self.test_indices.append(self.get_test_indices(total))
# get the total training and testing counts across all classes
test_total = 0
train_total = 0
for i in range(len(self.classes)):
test_count = len(self.test_indices[i].keys())
train_count = num_spectrograms[i] - test_count
test_total += test_count
train_total += train_count
if len(self.parameters.val_db) > 0:
# when we just use a portion of the training data for testing/validation, it ends up being highly
# correlated with the training data, so the validation percentage is artificially high and it's
# difficult to detect overfitting;
# adding separate test data from a validation database helps to counteract this;
# there can be multiple, which must be comma-separated
val_names = self.parameters.val_db.split(',')
for val_name in val_names:
validation_db = database.Database(f'data/{val_name}.db')
num_validation_specs = 0
for class_name in self.classes:
test_total += validation_db.get_num_spectrograms(class_name)
print(f'# training samples: {train_total}, # test samples: {test_total}')
# initialize arrays
self.x_train = [0 for i in range(train_total)]
self.y_train = np.zeros((train_total, len(self.classes)))
self.x_test = np.zeros((test_total, self.spec_height, constants.SPEC_WIDTH, 1))
self.y_test = np.zeros((test_total, len(self.classes)))
self.input_shape = (self.spec_height, constants.SPEC_WIDTH, 1)
# map test spectrogram indexes to file names for outputting names of misidentified ones
self.spec_file_name = {}
# populate from the database;
# they will be selected randomly per mini batch, so no need to randomize here
train_index = 0
test_index = 0
for i in range(len(self.classes)):
results = self.db.get_recordings_by_subcategory_name(self.classes[i])
spec_index = 0
for result in results:
recording_id, file_name, _ = result
specs = self.db.get_spectrograms_by_recording_id(recording_id)
for j in range(len(specs)):
spec, offset = specs[j]
if spec_index in self.test_indices[i].keys():
# test spectrograms are expanded here
self.spec_file_name[test_index] = f'{file_name}-{offset}' # will be used in names of files written to misident folder
self.x_test[test_index] = util.expand_spectrogram(spec, binary_classifier=self.parameters.binary_classifier)
self.y_test[test_index][i] = 1
test_index += 1
else:
# training spectrograms are expanded in data generator
self.x_train[train_index] = spec
self.y_train[train_index][i] = 1
train_index += 1
spec_index += 1
if len(self.parameters.val_db) > 0:
# append test data from the validation database(s)
val_names = self.parameters.val_db.split(',')
for val_name in val_names:
validation_db = database.Database(f'data/{val_name}.db')
for i in range(len(self.classes)):
specs = validation_db.get_spectrograms_by_name(self.classes[i])
for spec in specs:
self.x_test[test_index] = util.expand_spectrogram(spec[0], binary_classifier=self.parameters.binary_classifier)
self.y_test[test_index][i] = 1
test_index += 1
# learning rate schedule with cosine decay
def cos_lr_schedule(epoch):
global trainer
base_lr = trainer.parameters.base_lr * trainer.parameters.batch_size / 64
lr = base_lr * (1 + math.cos(epoch * math.pi / max(trainer.parameters.epochs, 1))) / 2
if trainer.parameters.verbosity == 0:
print(f'epoch: {epoch + 1} / {trainer.parameters.epochs}') # so there is at least some status info
return lr
if __name__ == '__main__':
# command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-b', type=int, default=32, help='Batch size. Default = 32.')
parser.add_argument('-c', type=int, default=15, help='Minimum epochs before saving checkpoint. Default = 15.')
parser.add_argument('-d', type=float, default=0.0, help='Minimum validation accuracy before saving checkpoint. Default = 0.')
parser.add_argument('-e', type=int, default=10, help='Number of epochs. Default = 10.')
parser.add_argument('-f', type=str, default='training', help='Name of training database. Default = training.')
parser.add_argument('-g', type=int, default=1, help='If 1, make a separate copy of each saved checkpoint. Default = 1.')
parser.add_argument('-j', type=int, default=0, help='If 1, save checkpoint only when val accuracy improves. Default = 0.')
parser.add_argument('-m', type=int, default=1, help='Model type (0 = Load existing model, 1 = EfficientNetV2. Default = 1.')
parser.add_argument('-m2', type=str, default='a0', help='Name of EfficientNetV2 configuration to use. Default = "a0". ')
parser.add_argument('-r', type=float, default=.006, help='Base learning rate. Default = .006')
parser.add_argument('-t', type=float, default=.01, help='Test portion. Default = .01')
parser.add_argument('-u', type=int, default=0, help='1 = Train a multi-label classifier. Default = 0.')
parser.add_argument('-v', type=int, default=1, help='Verbosity (0-2, 0 omits output graphs, 2 plots misidentified test spectrograms, 3 adds graph of model). Default = 1.')
parser.add_argument('-x', type=str, default='', help='Name(s) of extra validation databases. "abc" means load "abc.db". "abc,def" means load both databases for validation. Default = "". ')
parser.add_argument('-y', type=int, default=0, help='If y = 1, extract spectrograms for binary classifier. Default = 0.')
parser.add_argument('-z', type=int, default=None, help='Integer seed for random number generators. Default = None (do not). If specified, other settings to increase repeatability will also be enabled, which slows down training.')
args = parser.parse_args()
Parameters = namedtuple('Parameters', ['base_lr', 'batch_size', 'binary_classifier', 'ckpt_min_epochs', 'ckpt_min_val_accuracy',
'copy_ckpt', 'eff_config', 'epochs', 'multilabel', 'save_best_only', 'seed', 'test_portion', 'training', 'type',
'val_db', 'verbosity'])
parameters = Parameters(base_lr=args.r, batch_size = args.b, binary_classifier=(args.y==1), ckpt_min_epochs=args.c, ckpt_min_val_accuracy=args.d,
copy_ckpt=(args.g == 1), eff_config = args.m2, epochs = args.e, multilabel=(args.u==1), save_best_only=(args.j == 1), seed=args.z,
test_portion = args.t, training=args.f, type = args.m, val_db = args.x, verbosity = args.v)
if args.z != None:
# these settings make results more reproducible, which is very useful when tuning parameters
os.environ['PYTHONHASHSEED'] = str(args.z)
#os.environ['TF_DETERMINISTIC_OPS'] = '1'
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
random.seed(args.z)
np.random.seed(args.z)
tf.random.set_seed(args.z)
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
keras.mixed_precision.set_global_policy("mixed_float16") # trains 25-30% faster
trainer = Trainer(parameters)
trainer.run()
| 47.476615 | 233 | 0.598208 |
a3943fc348baced6fa934c762ac87be734e9ae13 | 2,002 | py | Python | limix/heritability/estimate.py | fpcasale/limix | a6bc2850f243fe779991bb53a24ddbebe0ab74d2 | [
"Apache-2.0"
] | null | null | null | limix/heritability/estimate.py | fpcasale/limix | a6bc2850f243fe779991bb53a24ddbebe0ab74d2 | [
"Apache-2.0"
] | null | null | null | limix/heritability/estimate.py | fpcasale/limix | a6bc2850f243fe779991bb53a24ddbebe0ab74d2 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
from numpy import ascontiguousarray, copy, ones, var
from numpy_sugar.linalg import economic_qs
from glimix_core.glmm import GLMMExpFam
def estimate(pheno, lik, K, covs=None, verbose=True):
r"""Estimate the so-called narrow-sense heritability.
It supports Normal, Bernoulli, Binomial, and Poisson phenotypes.
Let :math:`N` be the sample size and :math:`S` the number of covariates.
Parameters
----------
pheno : tuple, array_like
Phenotype. Dimensions :math:`N\\times 0`.
lik : {'normal', 'bernoulli', 'binomial', 'poisson'}
Likelihood name.
K : array_like
Kinship matrix. Dimensions :math:`N\\times N`.
covs : array_like
Covariates. Default is an offset. Dimensions :math:`N\\times S`.
Returns
-------
float
Estimated heritability.
Examples
--------
.. doctest::
>>> from numpy import dot, exp, sqrt
>>> from numpy.random import RandomState
>>> from limix.heritability import estimate
>>>
>>> random = RandomState(0)
>>>
>>> G = random.randn(50, 100)
>>> K = dot(G, G.T)
>>> z = dot(G, random.randn(100)) / sqrt(100)
>>> y = random.poisson(exp(z))
>>>
>>> print('%.2f' % estimate(y, 'poisson', K, verbose=False))
0.70
"""
K = _background_standardize(K)
QS = economic_qs(K)
lik = lik.lower()
if lik == "binomial":
p = len(pheno[0])
else:
p = len(pheno)
if covs is None:
covs = ones((p, 1))
glmm = GLMMExpFam(pheno, lik, covs, QS)
glmm.feed().maximize(verbose=verbose)
g = glmm.scale * (1 - glmm.delta)
e = glmm.scale * glmm.delta
h2 = g / (var(glmm.mean()) + g + e)
return h2
def _background_standardize(K):
from ..stats.kinship import gower_norm
K = copy(K, "C")
K = ascontiguousarray(K, dtype=float)
gower_norm(K, K)
K /= K.diagonal()
return K
| 24.414634 | 76 | 0.580919 |
a394632989f95d229e000f46db6a73bbdcda0cf3 | 2,739 | py | Python | pyrat/__main__.py | gitmarek/pyrat | cbf918d5c23d5d39e62e00bb64b6d0596170c68b | [
"MIT"
] | null | null | null | pyrat/__main__.py | gitmarek/pyrat | cbf918d5c23d5d39e62e00bb64b6d0596170c68b | [
"MIT"
] | null | null | null | pyrat/__main__.py | gitmarek/pyrat | cbf918d5c23d5d39e62e00bb64b6d0596170c68b | [
"MIT"
] | null | null | null | import argparse, importlib, sys
import pyrat
from pyrat import name, version, logger
# This returns a function to be called by a subparser below
# We assume in the tool's submodule there's a function called 'start(args)'
# That takes over the execution of the program.
def tool_(tool_name):
def f(args):
submodule = importlib.import_module('pyrat.' + tool_name)
getattr(submodule, 'start')(args)
return f
if __name__ == '__main__':
# create the top-level parser
parser = argparse.ArgumentParser(prog=name,
description='Raw tools for raw audio.',
epilog= name+' <command> -h for more details.')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--quiet', action='store_true',
help='takes precedence over \'verbose\'')
parser.add_argument('-v', '--version', action='store_true',
help='print version number and exit')
subparsers = parser.add_subparsers(title="Commands")
# create the parser for the "conv" command
parser_conv = subparsers.add_parser('conv',
description='''Convolve input signal with kernel.
Normalize the result and write it to outfile.''',
help='Convolve input with a kernel.')
parser_conv.add_argument('infile', type=argparse.FileType('r'))
parser_conv.add_argument('kerfile', type=argparse.FileType('r'),
help="kernel to be convolved with infile")
parser_conv.add_argument('outfile', type=argparse.FileType('w'))
parser_conv.set_defaults(func=tool_('conv'))
# create the parser for the "randph" command
parser_randph = subparsers.add_parser('randph',
description='''Randomize phases of Fourier coefficients.
Calculate the FFT of the entire signal; then randomize the phases of each
frequency bin by multiplying the frequency coefficient by a random phase:
e^{2pi \phi}, where $\phi$ is distributed uniformly on the interval [0,b). By
default, b=0.1. The result is saved to outfile.''',
help='Randomize phases of Fourier coefficients.')
parser_randph.add_argument('infile', type=argparse.FileType('r'))
parser_randph.add_argument('outfile', type=argparse.FileType('w'))
parser_randph.add_argument('-b', type=float, default=0.1,
help='phases disttibuted uniformly on [0,b)')
parser_randph.set_defaults(func=tool_('randph'))
if len(sys.argv) < 2:
parser.print_usage()
sys.exit(1)
args = parser.parse_args()
if args.version:
print(name + '-' + version)
sys.exit(0)
if args.verbose:
logger.setLevel('INFO')
else:
logger.setLevel('WARNING')
if args.quiet:
logger.setLevel(60) # above 'CRITICAL'
args.func(args)
sys.exit(0)
| 36.039474 | 78 | 0.683826 |
a394774a260348220f0663c39347cf191a6da686 | 485 | py | Python | zof/event.py | byllyfish/pylibofp | 8e96caf83f57cab930b45a78eb4a8eaa6d9d0408 | [
"MIT"
] | 4 | 2017-09-20T19:10:51.000Z | 2022-01-10T04:02:00.000Z | zof/event.py | byllyfish/pylibofp | 8e96caf83f57cab930b45a78eb4a8eaa6d9d0408 | [
"MIT"
] | 2 | 2017-09-02T22:53:03.000Z | 2018-01-01T03:27:48.000Z | zof/event.py | byllyfish/pylibofp | 8e96caf83f57cab930b45a78eb4a8eaa6d9d0408 | [
"MIT"
] | null | null | null | from .objectview import to_json, from_json
def load_event(event):
try:
return from_json(event)
except ValueError as ex:
# Report malformed JSON input.
if event == b'':
return {'event': 'EXCEPTION', 'reason': 'EOF', 'input': event}
return {'event': 'EXCEPTION', 'reason': str(ex), 'input': event}
def dump_event(event):
if isinstance(event, str):
return event.encode('utf-8')
return to_json(event).encode('utf-8')
| 26.944444 | 74 | 0.610309 |
a3955ee346d7a3a5338cd528fa6afbec24d5527c | 2,007 | py | Python | python/projecteuler/src/longest_collatz_sequence.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | python/projecteuler/src/longest_collatz_sequence.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | 2 | 2022-03-10T03:49:14.000Z | 2022-03-14T00:49:54.000Z | python/projecteuler/src/longest_collatz_sequence.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Longest Collatz sequence.
The following iterative sequence is defined
for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13,
we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1)
contains 10 terms. Although it has not been proved yet
(Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are
allowed to go above one million.
source: https://projecteuler.net/problem=14
"""
CACHE = {1: [1]}
CACHE_LENGTH = {1: 1}
def collatz_sequence(n) -> int:
"""Get the Collatz Sequence list.
Add each found Collatz Sequence to CACHE.
:return:
"""
if n in CACHE:
return CACHE[n]
next_ = int(n // 2) if n % 2 == 0 else int(3 * n + 1)
CACHE[n] = [n] + collatz_sequence(next_)
return CACHE[n]
def longest_collatz_sequence(limit: int) -> int:
"""Find the longest Collatz Sequence length.
:return: number that generates the longest collazt sequence.
"""
for i in range(2, limit+1):
collatz_sequence_length(i)
longest = max(CACHE_LENGTH.keys(), key=lambda k: CACHE_LENGTH[k])
return longest
def collatz_sequence_length(n):
"""Get the Collatz Sequence of n.
:return: List of Collatz Sequence.
"""
if n not in CACHE_LENGTH:
next_ = int(n // 2) if n % 2 == 0 else int(3 * n + 1)
CACHE_LENGTH[n] = 1 + collatz_sequence_length(next_)
return CACHE_LENGTH[n]
def main() -> int:
"""Find the Longest Collatz sequence under 1,000,000.
:return: Longest Collatz sequence under 1,000,000
"""
return longest_collatz_sequence(1000000)
if __name__ == "__main__":
lcs = main()
print(lcs, CACHE_LENGTH[lcs])
print(" → ".join(map(str, collatz_sequence(lcs))))
| 23.611765 | 71 | 0.659691 |
a396aa841a074ff27cad63b9fc597eb1d7fa8b7c | 1,823 | py | Python | examples/classify_pose.py | scottamain/aiy-maker-kit | 4cdb973067b83d27cf0601c811d887877d1bc253 | [
"Apache-2.0"
] | null | null | null | examples/classify_pose.py | scottamain/aiy-maker-kit | 4cdb973067b83d27cf0601c811d887877d1bc253 | [
"Apache-2.0"
] | null | null | null | examples/classify_pose.py | scottamain/aiy-maker-kit | 4cdb973067b83d27cf0601c811d887877d1bc253 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performs pose classification using the MoveNet model.
The MoveNet model identifies the body keypoints on a person, and then this
code passes those keypoints to a custom-trained pose classifier model that
classifies the pose with a label, such as the name of a yoga pose.
You must first complete the Google Colab to train the pose classification model:
https://g.co/coral/train-poses
And save the output .tflite and .txt files into the examples/models/ directory.
Then just run this script:
python3 classify_pose.py
For more instructions, see g.co/aiy/maker
"""
from aiymakerkit import vision
from pycoral.utils.dataset import read_label_file
import models
MOVENET_CLASSIFY_MODEL = 'models/pose_classifier.tflite'
MOVENET_CLASSIFY_LABELS = 'models/pose_labels.txt'
pose_detector = vision.PoseDetector(models.MOVENET_MODEL)
pose_classifier = vision.PoseClassifier(MOVENET_CLASSIFY_MODEL)
labels = read_label_file(MOVENET_CLASSIFY_LABELS)
for frame in vision.get_frames():
# Detect the body points and draw the skeleton
pose = pose_detector.get_pose(frame)
vision.draw_pose(frame, pose)
# Classify different body poses
label_id = pose_classifier.get_class(pose)
vision.draw_label(frame, labels.get(label_id))
| 35.745098 | 80 | 0.785518 |
a396f80d3df39bc129b954b6343810b69c00e0ea | 291 | py | Python | weldx/tags/measurement/source.py | CagtayFabry/weldx | 463f949d4fa54b5edafa2268cb862716865a62c2 | [
"BSD-3-Clause"
] | 13 | 2020-02-20T07:45:02.000Z | 2021-12-10T13:15:47.000Z | weldx/tags/measurement/source.py | BAMWelDX/weldx | ada4e67fa00cdb80a0b954057f4e685b846c9fe5 | [
"BSD-3-Clause"
] | 675 | 2020-02-20T07:47:00.000Z | 2022-03-31T15:17:19.000Z | weldx/tags/measurement/source.py | CagtayFabry/weldx | 463f949d4fa54b5edafa2268cb862716865a62c2 | [
"BSD-3-Clause"
] | 5 | 2020-09-02T07:19:17.000Z | 2021-12-05T08:57:50.000Z | from weldx.asdf.util import dataclass_serialization_class
from weldx.measurement import SignalSource
__all__ = ["SignalSource", "SignalSourceConverter"]
SignalSourceConverter = dataclass_serialization_class(
class_type=SignalSource, class_name="measurement/source", version="0.1.0"
)
| 29.1 | 77 | 0.821306 |
a39715724a34e51cf7b15a4f030411898b87a5ec | 1,706 | py | Python | test/test_entities_api.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | null | null | null | test/test_entities_api.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | 1 | 2022-02-24T11:21:49.000Z | 2022-02-24T11:21:49.000Z | test/test_entities_api.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | null | null | null | """
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.5.1
Generated by: https://openapi-generator.tech
"""
import unittest
import graphsense
from graphsense.api.entities_api import EntitiesApi # noqa: E501
class TestEntitiesApi(unittest.TestCase):
"""EntitiesApi unit test stubs"""
def setUp(self):
self.api = EntitiesApi() # noqa: E501
def tearDown(self):
pass
def test_get_entity(self):
"""Test case for get_entity
Get an entity, optionally with tags # noqa: E501
"""
pass
def test_list_entity_addresses(self):
"""Test case for list_entity_addresses
Get an entity's addresses # noqa: E501
"""
pass
def test_list_entity_links(self):
"""Test case for list_entity_links
Get transactions between two entities # noqa: E501
"""
pass
def test_list_entity_neighbors(self):
"""Test case for list_entity_neighbors
Get an entity's neighbors in the entity graph # noqa: E501
"""
pass
def test_list_entity_txs(self):
"""Test case for list_entity_txs
Get all transactions an entity has been involved in # noqa: E501
"""
pass
def test_list_tags_by_entity(self):
"""Test case for list_tags_by_entity
Get tags for a given entity for the given level # noqa: E501
"""
pass
def test_search_entity_neighbors(self):
"""Test case for search_entity_neighbors
Search deeply for matching neighbors # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.871795 | 73 | 0.623681 |
a39afee8e197b6834391bc0d4c2a7ba0f29e4cdf | 622 | py | Python | tests/test_versions_in_sync.py | simon-graham/pure_interface | da7bf05151c1c906c753987fbf7e3251905b4ba0 | [
"MIT"
] | 10 | 2018-08-27T04:15:53.000Z | 2021-08-18T09:45:35.000Z | tests/test_versions_in_sync.py | simon-graham/pure_interface | da7bf05151c1c906c753987fbf7e3251905b4ba0 | [
"MIT"
] | 35 | 2018-08-27T04:17:44.000Z | 2021-09-22T05:39:57.000Z | tests/test_versions_in_sync.py | tim-mitchell/pure_interface | 46a2de2574f4543980303cafd89cfcbdb643fbbb | [
"MIT"
] | 3 | 2018-09-19T21:32:01.000Z | 2020-11-17T00:58:55.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import pure_interface
class TestVersionsMatch(unittest.TestCase):
def test_versions(self):
setup_py = os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')
with open(setup_py, 'r') as f:
setup_contents = f.readlines()
for line in setup_contents:
if 'version =' in line:
self.assertIn(pure_interface.__version__, line)
break
else:
self.fail('did not find version in setup.py')
| 29.619048 | 82 | 0.636656 |
a39ce7f687dbc4302e562228dd957da1ccaaa084 | 315 | py | Python | catalog/bindings/wfs/get_capabilities_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/wfs/get_capabilities_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/wfs/get_capabilities_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.wfs.get_capabilities_type_2 import GetCapabilitiesType2
__NAMESPACE__ = "http://www.opengis.net/wfs/2.0"
@dataclass
class GetCapabilities2(GetCapabilitiesType2):
class Meta:
name = "GetCapabilities"
namespace = "http://www.opengis.net/wfs/2.0"
| 26.25 | 69 | 0.755556 |
a39d78970a2b5428929cac47bbcd677dcd4fd411 | 2,169 | py | Python | timeStamps/admin.py | zandegran/django-timeStamp | 2c598d5543dc9b9198f41f0712406f22e60d5fa6 | [
"MIT"
] | 1 | 2017-12-15T17:36:58.000Z | 2017-12-15T17:36:58.000Z | timeStamps/admin.py | zandegran/django-timeStamp | 2c598d5543dc9b9198f41f0712406f22e60d5fa6 | [
"MIT"
] | null | null | null | timeStamps/admin.py | zandegran/django-timeStamp | 2c598d5543dc9b9198f41f0712406f22e60d5fa6 | [
"MIT"
] | null | null | null | """
This module is to define how TimeStamp model is represented in the Admin site
It also registers the model to be shown in the admin site
.. seealso:: :class:`..models.TimeStamp`
"""
from django.contrib import admin
from .models import TimeStamp
class FilterUserAdmin(admin.ModelAdmin):
"""
Makes the timestamps of one user not visible to others unless for a superuser
"""
def save_model(self, request, obj, form, change):
if getattr(obj, 'user', None) is None: #Assign user only the first time #if obj.user == None:
obj.user = request.user
obj.save()
def get_queryset(self, request):
qs = super(FilterUserAdmin, self).get_queryset(request)
#qs = admin.ModelAdmin.queryset(self, request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
def has_change_permission(self, request, obj=None):
if not obj:
# the changelist itself
return True # So they can see the change list page
return obj.user == request.user or request.user.is_superuser
class TimeStampAdmin(FilterUserAdmin):
"""
This is configuration of TimeStamp model is admin page
This inherits class FilterUserAdmin
.. seealso:: :class:`FilterUserAdmin`
"""
list_display = ('time_stamp','user') # Fields to show in the listing
list_filter = ['user'] # Enables to se timeStamps of any single user
def has_add_permission(self, request):
"""
Disables addition of timeStamps from the admin page
"""
return False
def get_readonly_fields(self, request, obj=None):
"""
Disables editing in admin page
"""
if obj: # editing an existing object
return self.readonly_fields + ('time_stamp', 'user')
return self.readonly_fields
def has_delete_permission(self, request, obj=None):
"""
Disable deletion of rcords in admin page
"""
return False
admin.site.register(TimeStamp,TimeStampAdmin) # Registers the TimeStamp Model with TimeStampAdmin setting in the Admin site | 32.863636 | 125 | 0.654219 |
a39e36cdbd6fb2489b1dabdf74c900884f32c597 | 718 | py | Python | setup.py | Gearheart-team/django-accounts | e0c2f12d350846fa31143b6dbdb0cf6fa713fb11 | [
"MIT"
] | null | null | null | setup.py | Gearheart-team/django-accounts | e0c2f12d350846fa31143b6dbdb0cf6fa713fb11 | [
"MIT"
] | null | null | null | setup.py | Gearheart-team/django-accounts | e0c2f12d350846fa31143b6dbdb0cf6fa713fb11 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='izeni-django-accounts',
version='1.1.2a',
namespace_packages=['izeni', 'izeni.django'],
packages=find_packages(),
include_package_data=True,
author='Izeni, Inc.',
author_email='django-accounts@izeni.com',
description=open('README.md').read(),
url='https://dev.izeni.net/izeni/django-accounts',
install_requires=[
'Django==1.11.7',
'djangorestframework>3.4',
#'python-social-auth==0.2.13',
'social-auth-app-django',
'requests==2.8.1',
],
dependency_links=[
'https://github.com/izeni-team/python-social-auth.git@v0.2.21-google-fix#egg=python-social-auth-0',
]
)
| 29.916667 | 107 | 0.637883 |
a39ece0f6a490b1cd3625b5fef325786496075c3 | 2,973 | py | Python | train.py | Saaaber/urban-segmentation | fc893feb9208d3206d7c5329b1ccf4cfab97ed31 | [
"MIT"
] | 3 | 2020-11-16T20:21:25.000Z | 2021-06-11T13:09:30.000Z | train.py | Saaaber/urban-segmentation | fc893feb9208d3206d7c5329b1ccf4cfab97ed31 | [
"MIT"
] | null | null | null | train.py | Saaaber/urban-segmentation | fc893feb9208d3206d7c5329b1ccf4cfab97ed31 | [
"MIT"
] | 3 | 2020-11-11T23:43:15.000Z | 2022-03-17T09:03:42.000Z | # Copyright (c) Ville de Montreal. All rights reserved.
# Licensed under the MIT license.
# See LICENSE file in the project root for full license information.
import os
import json
import torch
import argparse
import datetime
from utils.factories import ModelFactory, OptimizerFactory, TrainerFactory
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Semantic Segmentation Training")
parser.add_argument('-c', '--config', default=None, type=str,
help="config file path (default: None)")
parser.add_argument('-r', '--resume', default=None, type=str,
help="path to latest checkpoint (default: None)")
parser.add_argument('-d', '--dir', default=None, type=str,
help="experiment dir path (default: None)")
args = parser.parse_args()
# Check for GPU
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
torch.backends.cudnn.deterministic = True
# Check if Colab run
COLAB = os.path.exists("/content/gdrive")
if args.config:
# Load config file
config = json.load(open(args.config))
elif args.resume:
# Load config file from checkpoint
config = torch.load(args.resume, map_location=device)['config']
# Change log dir if colab run
if COLAB is True:
config['trainer']['log_dir'] = "/content/gdrive/My Drive/colab_saves/logs/"
# Set experiment dir to current time if none provided
if args.dir:
experiment_dir = args.dir
else:
experiment_dir = datetime.datetime.now().strftime("%m%d_%H%M%S")
# Init model and optimizer from config with factories
model = ModelFactory.get(config['model'])
params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = OptimizerFactory.get(config['optimizer'], params)
# Check if semi-supervised run
if config['semi'] is True:
# Init model_d and optimizer_d from config with factories
model_d = ModelFactory.get(config['model_d'])
params_d = filter(lambda p: p.requires_grad, model_d.parameters())
optimizer_d = OptimizerFactory.get(config['optimizer_d'], params_d)
# Init semi-supervised trainer object from config with factory
trainer = TrainerFactory.get(config)(
model,
model_d,
optimizer,
optimizer_d,
config=config,
resume=args.resume,
experiment_dir=experiment_dir,
**config['trainer']['options'])
else:
# Init supervised trainer object from config with factory
trainer = TrainerFactory.get(config)(
model,
optimizer,
config=config,
resume=args.resume,
experiment_dir=experiment_dir,
**config['trainer']['options'])
# Run a training experiment
trainer.train()
| 33.784091 | 83 | 0.636731 |
a3a01913f52507b8c2e9c60bffcef520ae43b4db | 1,036 | py | Python | pypeit/core/wavecal/spectrographs/templ_soar_goodman.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | pypeit/core/wavecal/spectrographs/templ_soar_goodman.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | pypeit/core/wavecal/spectrographs/templ_soar_goodman.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | """ Generate the wavelength templates for SOAR Goodman"""
import os
from pypeit.core.wavecal import templates
from IPython import embed
def soar_goodman_400(overwrite=False):
binspec = 2
outroot = 'soar_goodman_400_SYZY.fits'
# PypeIt fits
wpath = os.path.join(templates.template_path, 'SOAR_Goodman', '400_SYZY')
basefiles = ['MasterWaveCalib_A_1_01_M1.fits', 'MasterWaveCalib_A_1_01_M2.fits']
wfiles = [os.path.join(wpath, basefile) for basefile in basefiles]
# Snippets
ifiles = [0,1]
slits = [495, 496]
wv_cuts = [6800.]
assert len(wv_cuts) == len(slits)-1
# det_dict
det_cut = None
#
templates.build_template(wfiles, slits, wv_cuts, binspec, outroot,
ifiles=ifiles, det_cut=det_cut, chk=True,
normalize=True, lowredux=False,
subtract_conti=True, overwrite=overwrite,
shift_wave=True)
if __name__ == '__main__':
soar_goodman_400(overwrite=True) | 31.393939 | 85 | 0.638031 |
a3a1c89d1bcdd899b6c1712a17770e89aa6ef0b0 | 5,062 | py | Python | vivarium/lidar.py | Pyrofoux/vivarium | 90c07384929f6c34915f053fd8e95e91358c4e58 | [
"MIT"
] | 2 | 2020-10-30T15:28:06.000Z | 2022-01-31T17:13:25.000Z | vivarium/lidar.py | Pyrofoux/vivarium | 90c07384929f6c34915f053fd8e95e91358c4e58 | [
"MIT"
] | null | null | null | vivarium/lidar.py | Pyrofoux/vivarium | 90c07384929f6c34915f053fd8e95e91358c4e58 | [
"MIT"
] | null | null | null | from simple_playgrounds.entities.agents.sensors.sensor import *
from simple_playgrounds.entities.agents.sensors.semantic_sensors import *
from collections import defaultdict
from pymunk.vec2d import Vec2d
import math
#@SensorGenerator.register('lidar')
class LidarSensor(SemanticSensor):
def __init__(self, anchor, invisible_elements=None,
remove_occluded=True, allow_duplicates=False, **sensor_params):
self.sensor_type = 'lidar'
#Todo later: add default config, as in visual_sensors
sensor_param = {**sensor_params}
super(LidarSensor, self).__init__(anchor, invisible_elements, sensor_param, fov=0)
#Sensor paramters TODO: make it parametrable
self.FoV = sensor_param.get('FoV',100) #in pixels
self.angle_ranges = sensor_param.get('angle_ranges',[(0,90),(90,180),(180,270),(270,359)])
self.cones_number = len(self.angle_ranges)
self.observation = None
self.anchor = anchor
def update_sensor(self, pg):
#current_agent, entities, agents):
entities = pg.scene_elements
agents = pg.agents
current_agent = self.anchor
#Initialising ouput
output = [dict() for i in range(self.cones_number)]
#Current's agent Shape
agent_position = current_agent.position
agent_coord = Vec2d(agent_position[0], agent_position[1])
agent_angle = agent_position[2]
#Gathering positions of entities and agents, in sorted dict by entity/agent type
sorted_positions = dict()
#Gathering key and shapes from entities
for entity in entities:
key = type(entity).__name__ #Key in matrix
if not key in sorted_positions:
sorted_positions[key] = []
#Looks like the relevant Pymunk position is the last one
#To check in entity.py
sorted_positions[key].append(entity.position)
#Gathering key and shapes from agents
for agent in agents:
key = type(agent).__name__ #Key in matrix
if not key in sorted_positions:
sorted_positions[key] = []
#Agent shouldn't detect itself
if not agent is current_agent:
sorted_positions[key].append(agent.position)
#For each entity type
for entity_type, positions in sorted_positions.items():
#add here: Tests on entity_type : can the entity be detected ?
#Value initialisation: initial activation = 0
for i in range(self.cones_number):
output[i][entity_type] = 0
#For each entity
for position in positions:
#Calculating the nearest point on the entity's surface
#query = shape.segment_query(agent_coord, shape_position)
#near_point = query.point
#For debugging purpose
#Approximation : center ph position instead of projection
near_point = position
#if entity_type == 'Candy':
#self.logger.add((position[0], position[1]),"near_point")
#self.logger.add((agent_position[0], agent_position[1]), "agent_position")
#Distance check - is the object too far ?
distance = agent_coord.get_distance(near_point)
if distance > self.FoV:
continue
#Angle check - In which cone does it fall ?
dy = (near_point[1] - agent_coord[1])
dx = (near_point[0] - agent_coord[0])
target_angle = math.atan2(dy, dx)
relative_angle = target_angle - agent_angle #Add agent angle to count for rotation
#if entity_type == 'Candy':
#self.logger.add(relative_angle,"relative_angle")
#self.logger.add(target_angle,"target_angle")
#self.logger.add(agent_angle, "agent_angle")
relative_angle_degrees =math.degrees(relative_angle)%360 #To avoid negative and angles > to 360
cone = None
#Calculating in which cone the position is detected
for i in range(len(self.angle_ranges)):
angle_range = self.angle_ranges[i]
if relative_angle_degrees >= angle_range[0] and relative_angle_degrees < angle_range[1]:
cone = i
if cone is None:
continue
if not entity_type in output[cone]:
output[cone][entity_type] = 0
normalised_distance = distance/self.FoV
activation = 1 - normalised_distance
#Keeping only the nearest distance = highest activation
if output[cone][entity_type] < activation:
output[cone][entity_type] = activation
self.observation = output
return output
def get_shape_observation(self):
pass
| 34.435374 | 111 | 0.600356 |
a3a2b31e0b527f3675dc65a92359c7b90836c880 | 511 | py | Python | apilos_settings/models.py | MTES-MCT/apilos | 6404b94b0f668e39c1dc12a6421aebd26ef1c98b | [
"MIT"
] | null | null | null | apilos_settings/models.py | MTES-MCT/apilos | 6404b94b0f668e39c1dc12a6421aebd26ef1c98b | [
"MIT"
] | 2 | 2021-12-15T05:10:43.000Z | 2021-12-15T05:11:00.000Z | apilos_settings/models.py | MTES-MCT/apilos | 6404b94b0f668e39c1dc12a6421aebd26ef1c98b | [
"MIT"
] | 1 | 2021-12-28T13:06:06.000Z | 2021-12-28T13:06:06.000Z | from django.db import models
class Departement(models.Model):
id = models.AutoField(primary_key=True)
nom = models.CharField(max_length=255, unique=True)
code_insee = models.CharField(max_length=3, unique=True)
code_postal = models.CharField(max_length=3)
def natural_key(self):
return (self.code_insee,)
def get_by_natural_key(self, code_insee):
return self.get(code_insee=code_insee)
def __str__(self) -> str:
return f"{self.code_insee} - {self.nom}"
| 26.894737 | 60 | 0.700587 |
a3a3cd19889c828efa32a912a6cda2aa73fb4ca6 | 4,310 | py | Python | bin/allplots.py | Gabaldonlab/karyon | ba81828921b83b553f126892795253be1fd941ba | [
"MIT"
] | null | null | null | bin/allplots.py | Gabaldonlab/karyon | ba81828921b83b553f126892795253be1fd941ba | [
"MIT"
] | 2 | 2021-07-07T08:40:56.000Z | 2022-01-06T16:10:27.000Z | bin/allplots.py | Gabaldonlab/karyon | ba81828921b83b553f126892795253be1fd941ba | [
"MIT"
] | null | null | null | #!/bin/python
import sys, os, re, subprocess, math
import argparse
import psutil
from pysam import pysam
from Bio import SeqIO
import numpy as np
import numpy.random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import seaborn as sns
import pandas as pd
import scipy.stats
from scipy.stats import gaussian_kde
from scipy import stats
from decimal import Decimal
import string, random
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fasta', required=True, help="fasta file used as input")
parser.add_argument('-d', '--output_directory', default="./", help='Directory where all the output files will be generated.')
parser.add_argument('-o', '--output_name', required=True, help="Output prefix")
parser.add_argument('-v', '--vcf', required=True, help="VCF file used as input")
parser.add_argument('-p', '--pileup', required=True, help="Mpileup file used as input")
parser.add_argument('-b', '--bam', required=True, help="Bam file used as input")
parser.add_argument('-l', '--library', required=True, nargs='+', help="Illumina libraries used for the KAT plot")
parser.add_argument('--configuration', default=False, help="Configuration file. By default will use ./configuration.txt as the configuration file.")
parser.add_argument('-w', '--window_size', default=1000, help="Window size for plotting")
parser.add_argument('-x', '--max_scaf2plot', default=20, help="Number of scaffolds to analyze")
parser.add_argument('-s', '--scafminsize', default=False, help="Will ignore scaffolds with length below the given threshold")
parser.add_argument('-S', '--scafmaxsize', default=False, help="Will ignore scaffolds with length above the given threshold")
parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated by the different programs. If false, the program will assign a name consisting of a string of 6 random alphanumeric characters.')
args = parser.parse_args()
true_output = os.path.abspath(args.output_directory)
if true_output[-1] != "/":
true_output=true_output+"/"
def parse_config(config):
config_dict = {}
prev = 0
for line in open(config):
if line[0] == "#": continue
elif line[0] == "+":
prev = line[1:-1]
config_dict[prev] = ["","",""]
elif line[0] == "@":
if config_dict[prev][0] != "": continue
config_dict[prev][0] = line[1:-1]
elif line[0] == ">":
config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + " "
elif line[0] == "?":
if config_dict[prev][2] != "": continue
config_dict[prev][2] = line[1:-1] + " "
return config_dict
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
config_path = args.configuration
if not args.configuration:
selfpath = os.path.dirname(os.path.realpath(sys.argv[0]))
config_path = selfpath[:selfpath.rfind('/')]
config_path = selfpath[:selfpath.rfind('/')]+"/configuration.txt"
config_dict = parse_config(config_path)
counter = int(args.max_scaf2plot)
window_size=int(args.window_size)
step=window_size/2
true_output = os.path.abspath(args.output_directory)
cwd = os.path.abspath(os.getcwd())
os.chdir(true_output)
os.system("bgzip -c "+ args.vcf + " > " + args.vcf + ".gz")
os.system("tabix -p vcf "+ args.vcf+".gz")
#vcf_file = pysam.VariantFile(args.vcf+".gz", 'r')
bam_file = pysam.AlignmentFile(args.bam, 'rb')
home = config_dict["karyon"][0]
job_ID = args.job_id if args.job_id else id_generator()
name = args.output_name if args.output_name else job_ID
kitchen = home + "tmp/"+job_ID
lendict = {}
fastainput = SeqIO.index(args.fasta, "fasta")
for i in fastainput:
lendict[i] = len(fastainput[i].seq)
from karyonplots import katplot, allplots
from report import report, ploidy_veredict
df = allplots(window_size,
args.vcf,
args.fasta,
args.bam,
args.pileup,
args.library[0],
config_dict['nQuire'][0],
config_dict["KAT"][0],
kitchen,
true_output,
counter,
job_ID, name,
args.scafminsize,
args.scafmaxsize, False)
df2 = ploidy_veredict(df, true_output, name, window_size)
report(true_output, name, df2, True, False, window_size, False, False)
df2.to_csv(true_output+"/Report/"+name+".csv", index=False)
os.chdir(cwd)
| 35.916667 | 236 | 0.710905 |
a3a50e8b6b7936872866a8a4572b115958922c08 | 713 | py | Python | console/middleware.py | laincloud/Console | 9d4fb68ad5378279697803ca45a4eda58d72d9a3 | [
"MIT"
] | 11 | 2016-05-04T11:55:01.000Z | 2018-09-29T01:00:05.000Z | console/middleware.py | laincloud/Console | 9d4fb68ad5378279697803ca45a4eda58d72d9a3 | [
"MIT"
] | 21 | 2016-05-25T06:54:44.000Z | 2019-06-06T00:38:38.000Z | console/middleware.py | laincloud/Console | 9d4fb68ad5378279697803ca45a4eda58d72d9a3 | [
"MIT"
] | 16 | 2016-05-13T08:20:43.000Z | 2021-12-31T09:23:14.000Z | # -*- coding: utf-8
from django.http import JsonResponse, HttpResponse
# from commons.settings import ARCHON_HOST
class CORSMiddleware:
def process_request(self, request):
if request.method == 'OPTIONS':
r = HttpResponse('', content_type='text/plain', status=200)
r['Access-Control-Allow-Methods'] = ', '.join(['DELETE', 'GET', 'PATCH', 'POST', 'PUT'])
r['Access-Control-Allow-Headers'] = ', '.join(['access-token', 'content-type'])
r['Access-Control-Max-Age'] = 86400
return r
return None
def process_response(self, request, response):
response['Access-Control-Allow-Origin'] = '*'
return response
| 32.409091 | 100 | 0.605891 |
a3a546b361a588aac685878f310be185f371649f | 538 | py | Python | pypxl/errors.py | Kile/pypxl | 0aabe5492386bffc1e246100cb55448bbac521ec | [
"MIT"
] | 1 | 2021-04-02T09:05:33.000Z | 2021-04-02T09:05:33.000Z | pypxl/errors.py | Kile/pypxl | 0aabe5492386bffc1e246100cb55448bbac521ec | [
"MIT"
] | null | null | null | pypxl/errors.py | Kile/pypxl | 0aabe5492386bffc1e246100cb55448bbac521ec | [
"MIT"
] | null | null | null | class PxlapiException(Exception):
"""
The base exception for anything related to pypxl
"""
pass
class InvalidFlag(PxlapiException):
pass
class InvalidFilter(PxlapiException):
pass
class InvalidEyes(PxlapiException):
pass
class TooManyCharacters(PxlapiException):
pass
class InvalidSafety(PxlapiException):
pass
class PxlObjectError(PxlapiException):
"""
A class which all errors originating from using the PxlOnject come from
"""
pass
class InvalidBytes(PxlObjectError):
pass | 18.551724 | 75 | 0.728625 |
a3a5bb350e05522589702afb78e2a9430fe6a8c4 | 1,061 | py | Python | test.py | vinsmokemau/NQueens | 7c9291f655b8e4f0ce4c6c5d07a80440f8f2c0a8 | [
"MIT"
] | null | null | null | test.py | vinsmokemau/NQueens | 7c9291f655b8e4f0ce4c6c5d07a80440f8f2c0a8 | [
"MIT"
] | null | null | null | test.py | vinsmokemau/NQueens | 7c9291f655b8e4f0ce4c6c5d07a80440f8f2c0a8 | [
"MIT"
] | null | null | null | import unittest
from algorithm import NQueens
class TestNQueens(unittest.TestCase):
def test_1_queen(self):
self.assertEqual(NQueens(1).solutions, 1)
def test_2_queen(self):
self.assertEqual(NQueens(2).solutions, 0)
def test_3_queen(self):
self.assertEqual(NQueens(3).solutions, 0)
def test_4_queen(self):
self.assertEqual(NQueens(4).solutions, 2)
def test_5_queen(self):
self.assertEqual(NQueens(5).solutions, 10)
def test_6_queen(self):
self.assertEqual(NQueens(6).solutions, 4)
def test_7_queen(self):
self.assertEqual(NQueens(7).solutions, 40)
def test_8_queen(self):
self.assertEqual(NQueens(8).solutions, 92)
def test_9_queen(self):
self.assertEqual(NQueens(9).solutions, 352)
def test_10_queen(self):
self.assertEqual(NQueens(10).solutions, 724)
def test_float_size(self):
n_queen = NQueens(8.5)
self.assertEqual(n_queen.solutions, 0)
self.assertEqual(n_queen.error, "The size isn't a digit")
| 25.878049 | 65 | 0.673893 |
6e55e971b17323a0b8342354a7a6ad601469f01e | 18,524 | py | Python | syntropynac/resolve.py | SyntropyNet/syntropy-nac | 8beddcd606d46fd909f51d0c53044be496cec995 | [
"MIT"
] | 3 | 2021-01-06T08:24:47.000Z | 2021-02-27T08:08:07.000Z | syntropynac/resolve.py | SyntropyNet/syntropy-nac | 8beddcd606d46fd909f51d0c53044be496cec995 | [
"MIT"
] | null | null | null | syntropynac/resolve.py | SyntropyNet/syntropy-nac | 8beddcd606d46fd909f51d0c53044be496cec995 | [
"MIT"
] | null | null | null | import functools
from dataclasses import dataclass
from itertools import combinations
import click
import syntropy_sdk as sdk
from syntropy_sdk import utils
from syntropynac.exceptions import ConfigureNetworkError
from syntropynac.fields import ALLOWED_PEER_TYPES, ConfigFields, PeerState, PeerType
@dataclass
class ConnectionServices:
agent_1: int
agent_2: int
agent_1_service_names: list
agent_2_service_names: list
@classmethod
def create(cls, link, endpoints):
endpoint_1, endpoint_2 = endpoints
return cls(
link[0],
link[1],
cls._get_services(endpoint_1),
cls._get_services(endpoint_2),
)
@staticmethod
def _get_services(endpoint):
service_names = endpoint[1].get(ConfigFields.SERVICES)
if service_names is None:
return []
if isinstance(service_names, str):
return [service_names]
if not isinstance(service_names, list) or any(
not isinstance(name, str) for name in service_names
):
raise ConfigureNetworkError(
f"Services parameter must be a list of service names for endpoint {endpoint[0]}"
)
return service_names
def get_subnets(self, endpoint_id, agents):
agent_id = getattr(self, f"agent_{endpoint_id}")
service_names = getattr(self, f"agent_{endpoint_id}_service_names")
agent = agents[agent_id]
return [
subnet["agent_service_subnet_id"]
for service in agent["agent_services"]
for subnet in service["agent_service_subnets"]
if service["agent_service_name"] in service_names
]
@functools.lru_cache(maxsize=None)
def resolve_agent_by_name(api, name, silent=False):
return [
agent["agent_id"]
for agent in utils.WithPagination(sdk.AgentsApi(api).platform_agent_index)(
filter=f"name:'{name}'", _preload_content=False
)["data"]
]
@functools.lru_cache(maxsize=None)
def get_all_agents(api, silent=False):
all_agents = utils.WithPagination(sdk.AgentsApi(api).platform_agent_index)(
_preload_content=False
)["data"]
return {agent["agent_id"]: agent for agent in all_agents}
def resolve_agents(api, agents, silent=False):
"""Resolves endpoint names to ids inplace.
Args:
api (PlatformApi): API object to communicate with the platform.
agents (dict): A dictionary containing endpoints.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
"""
for name, id in agents.items():
if id is not None:
continue
result = resolve_agent_by_name(api, name, silent=silent)
if len(result) != 1:
error = f"Could not resolve endpoint name {name}, found: {result}."
if not silent:
click.secho(
error,
err=True,
fg="red",
)
continue
else:
raise ConfigureNetworkError(error)
agents[name] = result[0]
def get_peer_id(peer_name, peer_config):
peer_type = peer_config.get(ConfigFields.PEER_TYPE, PeerType.ENDPOINT)
if peer_type == PeerType.ENDPOINT:
return peer_config.get(ConfigFields.ID)
elif peer_type == PeerType.ID:
try:
return int(peer_name)
except ValueError:
return None
else:
return None
def resolve_present_absent(agents, present, absent):
"""Resolves agent connections by objects into agent connections by ids.
Additionally removes any present connections if they were already added to absent.
Present connections are the connections that appear as "present" in the config
and will be added to the network.
Absent connections are the connections that appear as "absent" in the config and
will be removed from the existing network.
Services is a list of service names assigned to the connection's corresponding endpoints.
Args:
agents (dict[str, int]): Agent map from name to id.
present (list): A list of connections that are marked as present in the config.
absent (list): A list of connections that are marked as absent in the config.
Returns:
tuple: Three items that correspond to present/absent connections and a list
of ConnectionServices objects that correspond to present connections.
Present/absent connections is a list of lists of two elements, where
elements are agent ids.
"""
present_ids = [[agents[src[0]], agents[dst[0]]] for src, dst in present]
absent_ids = [[agents[src[0]], agents[dst[0]]] for src, dst in absent]
services = [
ConnectionServices.create(link, conn)
for link, conn in zip(present_ids, present)
if link not in absent_ids
and link[::-1] not in absent_ids
and link[0] != link[1]
]
return (
[
link
for link in present_ids
if link not in absent_ids
and link[::-1] not in absent_ids
and link[0] != link[1]
],
[i for i in absent_ids if i[0] != i[1]],
services,
)
def validate_connections(connections, silent=False, level=0):
"""Check if the connections structure makes any sense.
Recursively goes inside 'connect_to' dictionary up to 1 level.
Args:
connections (dict): A dictionary describing connections.
silent (bool, optional): Indicates whether to suppress output to stderr.
Raises ConfigureNetworkError instead. Defaults to False.
level (int, optional): Recursion level depth. Defaults to 0.
Raises:
ConfigureNetworkError: If silent==True, then raise an exception in case of irrecoverable error.
Returns:
bool: Returns False in case of invalid connections structure.
"""
if level > 1:
silent or click.secho(
(
f"Field {ConfigFields.CONNECT_TO} found at level {level + 1}. This will be ignored, "
"however, please double check your configuration file."
)
)
return True
for name, con in connections.items():
if not name or not isinstance(name, (str, int)):
error = f"Invalid endpoint name found."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if not isinstance(con, dict):
error = f"Entry '{name}' in {ConfigFields.CONNECT_TO} must be a dictionary, but found {con.__class__.__name__}."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.PEER_TYPE not in con:
error = f"Endpoint '{name}' {ConfigFields.PEER_TYPE} must be present."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if con[ConfigFields.PEER_TYPE] not in ALLOWED_PEER_TYPES:
error = f"Endpoint '{name}' {ConfigFields.PEER_TYPE} '{con[ConfigFields.PEER_TYPE]}' is not allowed."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
probably_an_id = False
try:
name_as_id = int(name)
probably_an_id = True
except ValueError:
name_as_id = name
if probably_an_id and con[ConfigFields.PEER_TYPE] == PeerType.ENDPOINT:
click.secho(
(
f"Endpoint '{name}' {ConfigFields.PEER_TYPE} is {PeerType.ENDPOINT}, however, "
f"it appears to be an {PeerType.ID}."
),
err=True,
fg="yellow",
)
if not probably_an_id and con[ConfigFields.PEER_TYPE] == PeerType.ID:
error = (
f"Endpoint '{name}' {ConfigFields.PEER_TYPE} is {PeerType.ID}, however, "
f"it appears to be an {PeerType.ENDPOINT}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.ID in con and con[ConfigFields.ID] is not None:
try:
_ = int(con[ConfigFields.ID])
id_valid = True
except ValueError:
id_valid = False
if (
not isinstance(con[ConfigFields.ID], (str, int))
or not con[ConfigFields.ID]
or not id_valid
):
error = f"Endpoint '{name}' {ConfigFields.ID} is invalid."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if (
con[ConfigFields.PEER_TYPE] == PeerType.ID
and int(con[ConfigFields.ID]) != name_as_id
):
error = f"Endpoint '{name}' {ConfigFields.ID} field does not match endpoint id."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.SERVICES in con:
if not isinstance(con[ConfigFields.SERVICES], (list, tuple)):
error = (
f"Endpoint '{name}' {ConfigFields.SERVICES} must be a "
f"list, but found {con[ConfigFields.SERVICES].__class__.__name__}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
for service in con[ConfigFields.SERVICES]:
if not isinstance(service, (str, int)):
error = (
f"Endpoint '{name}' service must be a string"
f", but found {service.__class__.__name__}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.CONNECT_TO in con:
if not validate_connections(
con[ConfigFields.CONNECT_TO], silent, level + 1
):
return False
return True
def resolve_p2p_connections(api, connections, silent=False):
"""Resolves configuration connections for Point to Point topology.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections as described in the config file.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
agents = {}
for src in connections.items():
dst = src[1].get(ConfigFields.CONNECT_TO)
if dst is None or len(dst.keys()) == 0:
continue
dst = list(dst.items())[0]
agents[src[0]] = get_peer_id(*src)
agents[dst[0]] = get_peer_id(*dst)
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
def expand_agents_tags(api, dst_dict, silent=False):
"""Expand tag endpoints into individual endpoints.
Args:
api (PlatformApi): API object to communicate with the platform.
dst_dict (dict): Connections dictionary that contain tags as endpoints.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Raises:
ConfigureNetworkError: In case of any errors
Returns:
Union[dict, None]: Dictionary with expanded endpoints where key is the name and value is the config(id, state, type).
"""
items = {}
# First expand tags
for name, dst in dst_dict.items():
if dst.get(ConfigFields.PEER_TYPE) != PeerType.TAG:
continue
agents = utils.WithPagination(sdk.AgentsApi(api).platform_agent_index)(
filter=f"tags_names[]:{name}",
_preload_content=False,
)["data"]
if not agents:
error = f"Could not find endpoints by the tag {name}"
if not silent:
click.secho(error, err=True, fg="red")
return
else:
raise ConfigureNetworkError(error)
tag_state = dst.get(ConfigFields.STATE, PeerState.PRESENT)
for agent in agents:
agent_name = agent["agent_name"]
if agent_name not in items or (
tag_state == PeerState.ABSENT
and items[agent_name][ConfigFields.STATE] == PeerState.PRESENT
):
items[agent_name] = {
ConfigFields.ID: agent["agent_id"],
ConfigFields.STATE: tag_state,
ConfigFields.PEER_TYPE: PeerType.ENDPOINT,
ConfigFields.SERVICES: dst.get(ConfigFields.SERVICES),
}
# Then override with explicit configs
for name, dst in dst_dict.items():
if dst.get(ConfigFields.PEER_TYPE) != PeerType.TAG:
items[name] = dst
continue
return items
def resolve_p2m_connections(api, connections, silent=False):
"""Resolves configuration connections for Point to Multipoint topology. Also, expands tags.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections as described in the config file.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
agents = {}
for src in connections.items():
dst_dict = src[1].get(ConfigFields.CONNECT_TO)
if dst_dict is None or len(dst_dict.keys()) == 0:
continue
dst_dict = expand_agents_tags(api, dst_dict)
if dst_dict is None:
return resolve_present_absent({}, [], [])
agents[src[0]] = get_peer_id(*src)
for dst in dst_dict.items():
agents[dst[0]] = get_peer_id(*dst)
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT)
== PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
def resolve_mesh_connections(api, connections, silent=False):
"""Resolves configuration connections for mesh topology. Also, expands tags.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
connections = expand_agents_tags(api, connections)
if connections is None:
return resolve_present_absent({}, [], [])
agents = {
name: get_peer_id(name, connection) for name, connection in connections.items()
}
# NOTE: Assuming connections are bidirectional
for src, dst in combinations(connections.items(), 2):
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
| 36.608696 | 125 | 0.589721 |
6e56c45295d74ab6452768ca7c9600d73e511225 | 10,298 | py | Python | idact/detail/nodes/node_impl.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 5 | 2018-12-06T15:40:34.000Z | 2019-06-19T11:22:58.000Z | idact/detail/nodes/node_impl.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 9 | 2018-12-06T16:35:26.000Z | 2019-04-28T19:01:40.000Z | idact/detail/nodes/node_impl.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 2 | 2019-04-28T19:18:58.000Z | 2019-06-17T06:56:28.000Z | """This module contains the implementation of the cluster node interface."""
import datetime
from typing import Optional, Any, Callable
import bitmath
import fabric.operations
import fabric.tasks
import fabric.decorators
from fabric.exceptions import CommandTimeout
from fabric.state import env
from idact.core.retry import Retry
from idact.core.config import ClusterConfig
from idact.core.jupyter_deployment import JupyterDeployment
from idact.core.node_resource_status import NodeResourceStatus
from idact.detail.auth.authenticate import authenticate
from idact.detail.helper.raise_on_remote_fail import raise_on_remote_fail
from idact.detail.helper.retry import retry_with_config
from idact.detail.helper.stage_info import stage_debug
from idact.detail.helper.utc_from_str import utc_from_str
from idact.detail.helper.utc_now import utc_now
from idact.detail.jupyter.deploy_jupyter import deploy_jupyter
from idact.detail.log.capture_fabric_output_to_log import \
capture_fabric_output_to_log
from idact.detail.log.get_logger import get_logger
from idact.detail.nodes.node_internal import NodeInternal
from idact.detail.nodes.node_resource_status_impl import NodeResourceStatusImpl
from idact.detail.serialization.serializable_types import SerializableTypes
from idact.detail.tunnel.build_tunnel import build_tunnel
from idact.detail.tunnel.get_bindings_with_single_gateway import \
get_bindings_with_single_gateway
from idact.detail.tunnel.ssh_tunnel import SshTunnel
from idact.detail.tunnel.tunnel_internal import TunnelInternal
from idact.detail.tunnel.validate_tunnel_ports import validate_tunnel_ports
ANY_TUNNEL_PORT = 0
class NodeImpl(NodeInternal):
"""Implementation of cluster node interface.
:param config: Client cluster config.
"""
def connect(self, timeout: Optional[int] = None):
result = self.run("echo 'Testing connection...'", timeout=timeout)
if result != 'Testing connection...':
raise RuntimeError("Unexpected test command output.")
def __init__(self,
config: ClusterConfig):
self._config = config
self._host = None # type: Optional[str]
self._port = None # type: Optional[int]
self._cores = None # type: Optional[int]
self._memory = None # type: Optional[bitmath.Byte]
self._allocated_until = None # type: Optional[datetime.datetime]
def _ensure_allocated(self):
"""Raises an exception if the node is not allocated."""
if self._host is None:
raise RuntimeError("Node is not allocated.")
if self._allocated_until and self._allocated_until < utc_now():
message = "'{node}' was terminated at '{timestamp}'."
raise RuntimeError(message.format(
node=self._host,
timestamp=self._allocated_until.isoformat()))
def run(self,
command: str,
timeout: Optional[int] = None) -> str:
return self.run_impl(command=command,
timeout=timeout,
install_keys=False)
def run_impl(self,
command: str,
timeout: Optional[int] = None,
install_keys: bool = False) -> str:
try:
@fabric.decorators.task
def task():
"""Runs the command with a timeout."""
with capture_fabric_output_to_log():
return fabric.operations.run(command,
pty=False,
timeout=timeout)
return self.run_task(task=task,
install_keys=install_keys)
except CommandTimeout as e:
raise TimeoutError("Command timed out: '{command}'".format(
command=command)) from e
except RuntimeError as e:
raise RuntimeError("Cannot run '{command}'".format(
command=command)) from e
def run_task(self,
task: Callable,
install_keys: bool = False) -> Any:
try:
self._ensure_allocated()
with raise_on_remote_fail(exception=RuntimeError):
with authenticate(host=self._host,
port=self._port,
config=self._config,
install_shared_keys=install_keys):
result = fabric.tasks.execute(task)
output = next(iter(result.values()))
return output
except RuntimeError as e:
raise RuntimeError("Cannot run task.") from e
def make_allocated(self,
host: str,
port: int,
cores: Optional[int],
memory: Optional[bitmath.Byte],
allocated_until: Optional[datetime.datetime]):
"""Updates the allocation info.
:param host: Hostname of the cluster node.
:param port: SSH port of the cluster node.
:param cores: Allocated core count.
:param memory: Allocated memory.
:param allocated_until: Timestamp for job termination. Must be UTC
or contain timezone info.
None is treated as unlimited allocation.
"""
self._host = host
self._port = port
self._cores = cores
self._memory = memory
self._allocated_until = allocated_until
def make_cancelled(self):
"""Updates the allocation info after the allocation was cancelled."""
self._host = None
self._port = None
self._cores = None
self._memory = None
self._allocated_until = None
def __str__(self):
if not self._host:
return "Node(NotAllocated)"
return "Node({host}:{port}, {allocated_until})".format(
host=self._host,
port=self._port,
allocated_until=self._allocated_until)
def __repr__(self):
return str(self)
def tunnel(self,
there: int,
here: Optional[int] = None) -> TunnelInternal:
try:
log = get_logger(__name__)
with stage_debug(log, "Opening tunnel %s -> %d to %s",
here, there, self):
self._ensure_allocated()
here, there = validate_tunnel_ports(here=here,
there=there)
first_try = [True]
def get_bindings_and_build_tunnel() -> TunnelInternal:
bindings = get_bindings_with_single_gateway(
here=here if first_try[0] else ANY_TUNNEL_PORT,
node_host=self._host,
node_port=self._port,
there=there)
first_try[0] = False
return build_tunnel(config=self._config,
bindings=bindings,
ssh_password=env.password,
ssh_pkey=env.key_filename)
with authenticate(host=self._host,
port=self._port,
config=self._config):
if here == ANY_TUNNEL_PORT:
return get_bindings_and_build_tunnel()
return retry_with_config(
get_bindings_and_build_tunnel,
name=Retry.TUNNEL_TRY_AGAIN_WITH_ANY_PORT,
config=self._config)
except RuntimeError as e:
raise RuntimeError(
"Unable to tunnel {there} on node '{host}'.".format(
there=there,
host=self._host)) from e
def tunnel_ssh(self,
here: Optional[int] = None) -> TunnelInternal:
return SshTunnel(tunnel=self.tunnel(here=self.port, there=self.port))
def deploy_notebook(self, local_port: int = 8080) -> JupyterDeployment:
return deploy_jupyter(node=self,
local_port=local_port)
@property
def config(self) -> ClusterConfig:
return self._config
@property
def host(self) -> Optional[str]:
return self._host
@property
def port(self) -> Optional[int]:
return self._port
@property
def cores(self) -> Optional[int]:
return self._cores
@property
def memory(self) -> Optional[bitmath.Byte]:
return self._memory
@property
def resources(self) -> NodeResourceStatus:
return NodeResourceStatusImpl(node=self)
def serialize(self) -> dict:
return {'type': str(SerializableTypes.NODE_IMPL),
'host': self._host,
'port': self._port,
'cores': self._cores,
'memory': (None if self._memory is None
else str(self._memory)),
'allocated_until': (None if self._allocated_until is None
else self._allocated_until.isoformat())}
@staticmethod
def deserialize(config: ClusterConfig, serialized: dict) -> 'NodeImpl':
try:
assert serialized['type'] == str(SerializableTypes.NODE_IMPL)
node = NodeImpl(config=config)
node.make_allocated(
host=serialized['host'],
port=serialized['port'],
cores=serialized['cores'],
memory=(None if serialized['memory'] is None
else bitmath.parse_string(serialized['memory'])),
allocated_until=(
None if serialized['allocated_until'] is None
else utc_from_str(serialized['allocated_until'])))
return node
except KeyError as e:
raise RuntimeError("Unable to deserialize.") from e
@property
def allocated_until(self) -> Optional[datetime.datetime]:
return self._allocated_until
def __eq__(self, other):
return self.__dict__ == other.__dict__
| 37.721612 | 79 | 0.583026 |
6e5770f83af2ce49e0548c12ebb2126470694c34 | 2,012 | py | Python | geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py | arnaud-morvan/geoportailv3 | b9d676cf78e45e12894f7d1ceea99b915562d64f | [
"MIT"
] | 17 | 2015-01-14T08:40:22.000Z | 2021-05-08T04:39:50.000Z | geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py | arnaud-morvan/geoportailv3 | b9d676cf78e45e12894f7d1ceea99b915562d64f | [
"MIT"
] | 1,477 | 2015-01-05T09:58:41.000Z | 2022-03-18T11:07:09.000Z | geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py | arnaud-morvan/geoportailv3 | b9d676cf78e45e12894f7d1ceea99b915562d64f | [
"MIT"
] | 14 | 2015-07-24T07:33:13.000Z | 2021-03-02T13:51:48.000Z | """create table for hierarchy of accounts
Revision ID: 17fb1559a5cd
Revises: 3b7de32aebed
Create Date: 2015-09-16 14:20:30.972593
"""
# revision identifiers, used by Alembic.
revision = '17fb1559a5cd'
down_revision = '3b7de32aebed'
branch_labels = None
depends_on = None
from alembic import op, context
import sqlalchemy as sa
def downgrade():
schema = context.get_context().config.get_main_option('schema')
op.drop_table('lux_user_inheritance', schema=schema)
op.execute("DROP FUNCTION IF EXISTS "
"%(schema)s.getMainAccount(VARCHAR)"
% {"schema": schema})
def upgrade():
schema = context.get_context().config.get_main_option('schema')
op.create_table(
'lux_user_inheritance',
sa.Column(
'login', sa.VARCHAR(), autoincrement=False,
nullable=False),
sa.Column(
'login_father', sa.VARCHAR(), autoincrement=False,
nullable=False),
schema=schema
)
op.create_primary_key(
"lux_user_inheritance_pkey", "lux_user_inheritance",
['login', 'login_father'],
schema=schema
)
op.execute(
"CREATE OR REPLACE FUNCTION %(schema)s.getMainAccount "
"(child_login VARCHAR)"
"RETURNS VARCHAR AS "
"$$ "
"DECLARE "
"cur_login_father VARCHAR;"
"res_login_father VARCHAR;"
"c_father Cursor (p_login VARCHAR) FOR "
"Select login_father From %(schema)s.lux_user_inheritance Where "
"login = p_login;"
"BEGIN "
"cur_login_father := child_login;"
"LOOP "
"OPEN c_father(cur_login_father);"
"FETCH FIRST FROM c_father into res_login_father;"
"IF FOUND THEN "
"cur_login_father := res_login_father;"
"END IF;"
"CLOSE c_father;"
"IF NOT FOUND THEN "
"RETURN cur_login_father;"
"END IF;"
"END LOOP;"
"END;"
"$$"
"LANGUAGE plpgsql;" % {"schema": schema})
| 27.944444 | 73 | 0.611332 |
6e596f23ab56bd2dd8dd6ce01540892f3e46cdad | 1,076 | py | Python | tests/test_migrate.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | 1 | 2017-08-25T07:17:04.000Z | 2017-08-25T07:17:04.000Z | tests/test_migrate.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | 1 | 2018-06-09T18:03:35.000Z | 2018-06-09T18:03:35.000Z | tests/test_migrate.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import os
from tests import FlexGetBase
class TestMigrate(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'foobar'}
accept_all: yes
"""
def setup(self):
import logging
logging.critical('TestMigrate.setup()')
db_filename = os.path.join(self.base_path, 'upgrade_test.sqlite')
# in case running on windows, needs double \\
filename = db_filename.replace('\\', '\\\\')
self.database_uri = 'sqlite:///%s' % filename
super(TestMigrate, self).setup()
# This fails on windows when it tries to delete upgrade_test.sqlite
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process: 'upgrade_test.sqlite'
#@with_filecopy('db-r1042.sqlite', 'upgrade_test.sqlite')
def test_upgrade(self):
# TODO: for some reason this will fail
return
self.execute_task('test')
assert self.task.accepted
| 31.647059 | 132 | 0.636617 |
6e5a5481a3630f1bb09ba60f327038cb691a80cf | 2,422 | py | Python | src/challenges/CtCI/dynamic/P1_triple_step.py | Ursidours/pythonic_interviews | a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045 | [
"MIT"
] | 2 | 2021-11-13T01:30:25.000Z | 2022-02-11T18:17:22.000Z | src/challenges/CtCI/dynamic/P1_triple_step.py | arnaudblois/pythonic_interviews | a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045 | [
"MIT"
] | null | null | null | src/challenges/CtCI/dynamic/P1_triple_step.py | arnaudblois/pythonic_interviews | a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045 | [
"MIT"
] | null | null | null | """
Problem 1 of Chapter 8 in CtCi
Triple Step: A child is running up a staircase with N steps and can hop either
1 step, 2 steps, or 3 steps at a time. Return the number of possible ways exist
this can be done.
General idea of the solution: At any step N, the child must necessarily come
from the steps N-3, N-2 or N-1. The possible ways to go to N are therefore the
sums of the possible ways to come to N-3, N-2 and N-1. This is the definition
of the tribonacci numbers, a generalization of the Fibonacci sequence.
"""
from src.utils.decorators import Memoize
def tribonacci_number(N):
"""
Closed-form formula to calculate the Nth Tribonacci number. Of course, no
one would expect this in an interview :)
"""
a1 = (19 + 3 * 33**0.5)**(1 / 3)
a2 = (19 - 3 * 33**0.5)**(1 / 3)
b = (586 + 102 * 33**0.5)**(1 / 3)
numerator = 3 * b * (1 / 3 * (a1 + a2 + 1))**(N + 1)
denominator = b**2 - 2 * b + 4
result = round(numerator / denominator)
return result
def triple_step_iterative(nb_of_steps):
"""
The most naive implementation, using 3 variables corresponding
to the 3 previous states, we calculate the next and update them
continuously until we've looped up to nb_of_steps.
"""
a, b, c = 0, 0, 1
for step in range(nb_of_steps):
temp_var = a + b + c
a = b
b = c
c = temp_var
return c
def triple_step_bottom_up(nb_of_steps):
"""
As with all bottom-up approaches, we initiate a list which we
update as we calculate the next step.
"""
nb_possible_ways = [1, 1, 2] + [None for _ in range(3, nb_of_steps + 1)]
for step in range(3, nb_of_steps + 1):
nb_possible_ways[step] = (
nb_possible_ways[step - 1]
+ nb_possible_ways[step - 2]
+ nb_possible_ways[step - 3]
)
return nb_possible_ways[nb_of_steps]
@Memoize
def triple_step_top_down(nb_of_steps):
"""
In the top-down approach, the problem is broken down into easier
problems: solving for N corresponds to solving for N-1, N-2 and
N-3 and adding them. The use of memoization avoids recomputation.
"""
if nb_of_steps == 0:
return 1
if nb_of_steps in [1, 2]:
return nb_of_steps
return (
triple_step_top_down(nb_of_steps - 1)
+ triple_step_top_down(nb_of_steps - 2)
+ triple_step_top_down(nb_of_steps - 3)
)
| 31.454545 | 79 | 0.641618 |
6e5a95d6b33481e439c3c6dd74b69db486074c51 | 117 | py | Python | lib/JumpScale/lib/docker/__init__.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | 1 | 2015-10-26T10:38:13.000Z | 2015-10-26T10:38:13.000Z | lib/JumpScale/lib/docker/__init__.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | null | null | null | lib/JumpScale/lib/docker/__init__.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | null | null | null | from JumpScale import j
j.base.loader.makeAvailable(j, 'tools')
from Docker import Docker
j.tools.docker = Docker()
| 19.5 | 39 | 0.769231 |
6e5ab1e623f341546ab1d75882702a30b02894e2 | 3,704 | py | Python | scenes/capture/motor_pi/stepper/motorclient.py | tum-pbs/reconstructScalarFlows | 948efeaa99b90c3879f9fb544da9a596b0cb5852 | [
"Apache-2.0"
] | null | null | null | scenes/capture/motor_pi/stepper/motorclient.py | tum-pbs/reconstructScalarFlows | 948efeaa99b90c3879f9fb544da9a596b0cb5852 | [
"Apache-2.0"
] | 1 | 2020-02-20T12:37:38.000Z | 2020-02-20T17:04:53.000Z | scenes/capture/motor_pi/stepper/motorclient.py | tum-pbs/reconstructScalarFlows | 948efeaa99b90c3879f9fb544da9a596b0cb5852 | [
"Apache-2.0"
] | 3 | 2020-01-23T04:32:46.000Z | 2020-02-20T05:48:36.000Z | #!/usr/bin/env python2
import sys
import socket
import datetime
import math
import time
from time import sleep
# The c binary for controlling the stepper motor is loaded via ctypes
from ctypes import *
stepper_lib = cdll.LoadLibrary('./stepper.so')
# buffer containing the incomplete commands
recvBuffer = str()
# all my socket messages will follow the scheme: "<Control code>|<data>~"
def sendMsg(s, msg):
s.sendall("%s~" % msg)
# waits until a full message is received
def getMsg(s):
global recvBuffer
while True:
# receive until full message
delim = recvBuffer.find("~")
if(delim != -1):
# full message -> extract it and remove from buffer
result = recvBuffer[0:delim]
recvBuffer = recvBuffer[delim + 1:]
return result
try:
currentRecv = s.recv(4096, 0)
except KeyboardInterrupt:
print "Keyborad interrupt -> EXIT"
s.close()
sys.exit(0)
except:
return ""
if(len(currentRecv) == 0):
# this means a empty string was received -> this should not happen
return ''
print "recv: %s" % currentRecv
recvBuffer = recvBuffer + currentRecv
# Init the native c library
def slide_init():
#res = stepper_lib.init()
if res == 0:
raise Exception("Failed to initialize stepper lib")
slide_set(0.5)
print "testest"
# set the slide to the given relative (0-1) position
def slide_set(pos):
# Length of the slide in steps
slide_length = 20000
# Small offset to avoid the slide crashing into the end switch
slide_min_ofs = 30
# relative value to step value
pos = (slide_length - slide_min_ofs) * pos + slide_min_ofs
res = stepper_lib.set_position(c_long(int(pos)))
if res == 0:
raise Exception("Failed to set_position of the slide")
def main(argv):
if len(argv) <= 1:
# it requires one argument (the host ip)
print "Missing arguments!\nUsage: motorclient.py <control host>"
return
s = socket.socket()
host = socket.gethostbyname(argv[1])
try:
# connect
s.connect((host, 54321))
# send HI messag with CS (for "callibration slide") as client id
# The host will store this client as a non-camera client
sendMsg(s, "HI|CS")
# wait for answer...
m = getMsg(s)
# ... and check if answer is expected
if(m != ("CON|CS")):
print "Invalid answer from control host: %s" % m
return
except:
print "Failed to connect to control host"
return
slide_init()
# main loop
try:
while True:
# get a command
msg = getMsg(s)
# split command
delim = msg.find("|")
if (msg == "" or delim == -1):
# command invalid
print "Connection terminated or received invalid command"
s.close()
sys.exit(0)
# cmd ~ command
# data ~ data for command
cmd = msg[0:delim]
data = msg[delim + 1:]
print "CMD: \"%s\"" % cmd
if(cmd == "EXIT"):
# end program
s.close()
sys.exit(0)
elif(cmd == "SET"):
print "Set the slide to ", data
# set slide position
# the data is a float value defining the destination
slide_set(float(data))
sendMsg(s, "OK|SET")
except KeyboardInterrupt:
s.close()
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
| 23.896774 | 78 | 0.560475 |
6e5d48ba91cb1100ebbf354d7f7d6405aa099be0 | 20,335 | py | Python | bots/invaders/agent.py | alv67/Lux-AI-challenge | 4fdd623a8ff578f769a6925ec0200170f84d4737 | [
"MIT"
] | null | null | null | bots/invaders/agent.py | alv67/Lux-AI-challenge | 4fdd623a8ff578f769a6925ec0200170f84d4737 | [
"MIT"
] | 27 | 2021-10-17T22:46:41.000Z | 2021-12-05T23:41:19.000Z | bots/invaders/agent.py | alv67/Lux-AI-challenge | 4fdd623a8ff578f769a6925ec0200170f84d4737 | [
"MIT"
] | 3 | 2021-11-14T19:22:16.000Z | 2021-12-04T06:46:33.000Z | import os
import math
import sys
from typing import List, Tuple
# for kaggle-environments
from abn.game_ext import GameExtended
from abn.jobs import Task, Job, JobBoard
from abn.actions import Actions
from lux.game_map import Position, Cell, RESOURCE_TYPES
from lux.game_objects import City
from lux.game_constants import GAME_CONSTANTS
from lux import annotate
## DEBUG ENABLE
DEBUG_SHOW_TIME = False
DEBUG_SHOW_CITY_JOBS = False
DEBUG_SHOW_CITY_FULLED = False
DEBUG_SHOW_EXPAND_MAP = True
DEBUG_SHOW_EXPAND_LIST = False
DEBUG_SHOW_INPROGRESS = True
DEBUG_SHOW_TODO = True
DEBUG_SHOW_ENERGY_MAP = False
DEBUG_SHOW_ENEMY_CITIES = False
DEBUG_SHOW_INVASION_MAP = False
DEBUG_SHOW_EXPLORE_MAP = False
MAX_CITY_SIZE = 10
DISTANCE_BETWEEN_CITIES = 5
def find_closest_city_tile(pos, player):
closest_city_tile = None
if len(player.cities) > 0:
closest_dist = math.inf
# the cities are stored as a dictionary mapping city id to the city object, which has a citytiles field that
# contains the information of all citytiles in that city
for k, city in player.cities.items():
for city_tile in city.citytiles:
dist = city_tile.pos.distance_to(pos)
if dist < closest_dist:
closest_dist = dist
closest_city_tile = city_tile
return closest_city_tile
def can_build_worker(player) -> int:
# get nr of cytititles
nr_cts = 0
for k, c in player.cities.items():
nr_cts += len(c.citytiles)
return max(0, nr_cts - len(player.units))
def city_can_expand(city: City, jobs: JobBoard) -> bool:
# City can expand if has fuel to pass the night
has_energy = city.isFulled()
# City can expand to MAX_CITY_SIZE tiles
can_expand = len(city.citytiles) + jobs.count(Task.BUILD, city_id=city.cityid) < MAX_CITY_SIZE
return has_energy & can_expand
# Define global variables
game_state = GameExtended()
actions = Actions(game_state)
lets_build_city = False
build_pos = None
jobs = game_state.job_board
completed_cities = []
def agent(observation, configuration, DEBUG=False):
global game_state
global actions
global lets_build_city
global build_pos
global completed_cities
### Do not edit ###
game_state._update(observation)
actions.update()
path: List[Tuple] = []
### AI Code goes down here! ###
player = game_state.player
opponent = game_state.opponent
# width, height = game_state.map.width, game_state.map.height
if DEBUG_SHOW_TIME:
actions.append(annotate.sidetext(f"Time : {game_state.time}"))
actions.append(annotate.sidetext(f" {game_state.lux_time}h till night"))
if game_state.isMorning() : dbg = "Morning"
elif game_state.isEvening() : dbg = "Evening"
elif game_state.isNight() : dbg = "Night"
else: dbg = "Daytime"
actions.append(annotate.sidetext(f"it is {dbg}"))
#---------------------------------------------------------------------------------------------------------
# Cities Management
#---------------------------------------------------------------------------------------------------------
for _, city in player.cities.items():
city_size = len(city.citytiles)
#--- EXPAND THE CITY ---
if DEBUG_SHOW_EXPAND_LIST:
exp_pos = game_state.expand_map.get(city.cityid)
actions.append(annotate.sidetext(f"{city.cityid} expand in "))
for x, y, v in exp_pos:
actions.append(annotate.sidetext(f" ({x}; {y}) {v}"))
if city_can_expand(city, jobs) and city.isFulled():
exp_pos = game_state.expand_map.get(city.cityid)
if exp_pos:
x, y, v = exp_pos[0]
#if v: # expand only if there is a resource nearby
jobs.addJob(Task.BUILD, Position(x, y), city_id=city.cityid)
#else:
# jobs.addJob(Task.INVASION, None, city_id=city.cityid)
#--- SPAWN WORKERS OR RESEARCH ---
for ct in city.citytiles:
pxy = ct.pos
if DEBUG_SHOW_CITY_FULLED:
actions.append(annotate.text(pxy.x, pxy.y, f"{city.isFulled()}"))
if ct.can_act():
if can_build_worker(player) - actions.new_workers > 0:
actions.build_worker(ct)
# actions.append(ct.build_worker())
elif not player.researched_uranium():
actions.append(ct.research())
if not city.isFulled(): # and not game_state.isNight():
if jobs.count(Task.ENERGIZE, city_id=city.cityid) < (city_size + 1) // 2:
dbg = jobs.count(Task.ENERGIZE, city_id=city.cityid)
dbg2 = (city_size + 1) // 2
if DEBUG_SHOW_CITY_JOBS:
actions.append(annotate.sidetext(f"{city.cityid}: NRG {dbg} < {dbg2}"))
jobs.addJob(Task.ENERGIZE, ct.pos, city_id = city.cityid)
# Debug jobs.count
if DEBUG_SHOW_CITY_JOBS:
dbg = jobs.count(Task.BUILD, city_id=city.cityid)
actions.append(annotate.sidetext(f"{city.cityid}: {dbg} BLD"))
dbg = jobs.count(Task.ENERGIZE, city_id=city.cityid)
actions.append(annotate.sidetext(f"{city.cityid}: {dbg} NRG"))
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
# Units Management
#---------------------------------------------------------------------------------------------------------
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f"[INPROGRESS]"))
sorted_units = sorted(player.units, key=lambda u: u.cooldown, reverse=True)
for unit in sorted_units:
# if the unit is a worker (can mine resources) and can perform an action this turn
if unit.is_worker():
my_job = jobs.jobRequest(unit)
if not unit.can_act():
actions.stay(unit)
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f"!{my_job}"))
continue
else:
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f">{my_job}"))
# Check if is evening time, if so, to survive, every
# job with risk of not having enough energy is dropped
# and a new HARVEST job is taken.
# if game_state.isNight():
# if (my_job.task == Task.BUILD and my_job.subtask > 0) or \
# (my_job.task == Task.EXPLORE and my_job.subtask > 0):
# actions.stay(unit)
# jobs.jobDrop(unit.id)
# continue
if my_job.task == Task.HARVEST:
# if not in a city and in a cell with energy available stay here to harvest
if game_state.getEnergy(unit.pos.x, unit.pos.y) != 0 and \
not game_state.map.get_cell_by_pos(unit.pos).citytile:
actions.stay(unit) # stay in the same position
else: # find a new resource position
if unit.pos == my_job.pos:
tile = game_state.find_closest_resources(unit.pos)
if not tile: # no more resources to harvest
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else: # move to resource
my_job.pos = tile.pos
if unit.pos != my_job.pos:
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction): # cannot move to a resource tile
jobs.jobReject(unit.id)
if unit.get_cargo_space_left() == 0:
actions.stay(unit)
jobs.jobDone(unit.id)
elif my_job.task == Task.ENERGIZE:
if my_job.subtask == 0: # search for resource
if game_state.getEnergy(my_job.pos.x, my_job.pos.y) != 0:
# citytile is adiacent to a resource so go directly there
my_job.subtask = 1
# If unit is in the citytile and can grab energy then job is done (unit stay there)
elif unit.energy >= 10 * unit.light_upkeep:
# citytile is adiacent to a resource so go directly there
my_job.subtask = 1
elif unit.get_cargo_space_left() == 0:
my_job.subtask = 1
elif (game_state.map.get_cell_by_pos(unit.pos).citytile or
game_state.getEnergy(unit.pos.x, unit.pos.y) == 0 ):
tile = game_state.find_closest_resources(unit.pos)
if not tile:
actions.stay(unit) # stay in the same position
jobs.jobReject(unit.id)
else:
move = unit.pos.path_to(tile.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction): # cannot move to a resource tile
jobs.jobReject(unit.id)
if my_job.subtask == 1: # go to citytile
if unit.pos == my_job.pos:
actions.stay(unit) # stay in the same position
jobs.jobDone(unit.id)
else:
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
jobs.jobReject(unit.id)
elif my_job.task == Task.BUILD:
if my_job.subtask == 0: # First need to full up unit
if unit.get_cargo_space_left() == 0:
my_job.subtask = 1
elif (game_state.map.get_cell_by_pos(unit.pos).citytile or
game_state.getEnergy(unit.pos.x, unit.pos.y) == 0 ):
tile = game_state.find_closest_resources(unit.pos)
if not tile: # no reacheable resource
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else:
move = unit.pos.path_to(tile.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
jobs.jobDrop(unit.id)
if my_job.subtask == 1: # Go to Build position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() > 0:
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else:
actions.build_city(unit)
my_job.subtask = 2
else:
move = unit.pos.path_to(my_job.pos, game_state.map, noCities=True)
if move.path:
if not actions.move(unit, move.direction):
jobs.jobDrop(unit.id)
# actions.append(unit.move(move_dir))
# Draw the path
actions.append(annotate.x(my_job.pos.x, my_job.pos.y))
for i in range(len(move.path)-1):
actions.append(annotate.line(
move.path[i][1], move.path[i][2],
move.path[i+1][1], move.path[i+1][2]))
else: # not path found
jobs.jobDone(unit.id)
elif my_job.subtask == 2:
# if city has adiacent energy then Unit Stay until new day
if game_state.getEnergy(unit.pos.x, unit.pos.y) > 0:
if game_state.time >= 39:
jobs.jobDone(unit.id)
else:
jobs.jobDone(unit.id)
elif my_job.task == Task.SLEEP:
if unit.pos == my_job.pos:
if game_state.time >= 39:
jobs.jobDone(unit.id)
else:
move_dir = unit.pos.direction_to(my_job.pos)
if not actions.move(unit, move_dir):
jobs.jobReject(unit.id)
elif my_job.task == Task.EXPLORE:
# this is a multistate task so my_job.subtask is the state
if my_job.subtask == 0: # find the position of resource (min 4 step from city)
# get position of city that emitted the job
if my_job.city_id in player.cities:
pos = player.cities[my_job.city_id].citytiles[0].pos
else:
pos = my_job.pos
explore_pos = game_state.getClosestExploreTarget(pos, min_distance=DISTANCE_BETWEEN_CITIES)
if explore_pos:
my_job.subtask = 1 # HARVEST resource from position
my_job.pos = explore_pos
else:
jobs.jobDone(unit.id)
if my_job.subtask == 1: # HARVEST resource from position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() > 0:
if not game_state.map.get_cell_by_pos(unit.pos).has_resource:
#jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
else: # next subtask
my_job.pos = game_state.find_closest_freespace(unit.pos)
my_job.subtask = 2 # BUILD A NEW CITY
else:
# move_dir = unit.pos.direction_to(my_job.pos)
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
# jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
if my_job.subtask == 2: # BUILD A NEW CITY
if unit.pos == my_job.pos:
# TODO: need to wait until next day
actions.build_city(unit)
my_job.subtask = 3 # WAIT UNTIL NEXT DAY
else:
#move_dir = unit.pos.direction_to(my_job.pos)
move = unit.pos.path_to(my_job.pos, game_state.map, noCities=True, playerid=game_state.id)
if not actions.move(unit, move.direction):
action = unit.build_city()
# jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
if my_job.subtask == 3: # Now feed that city
my_job.task = Task.ENERGIZE
my_job.subtask = 0
actions.stay(unit)
elif my_job.task == Task.INVASION:
if my_job.subtask == 0:
# get an invasion target position
target_pos = game_state.getClosestInvasionTarget(unit.pos)
if not target_pos:
actions.stay(unit)
jobs.jobDone(unit.id)
continue
my_job.data["target"] = target_pos
if unit.get_cargo_space_left() == 0: # if unit is full
my_job.pos = target_pos
my_job.subtask = 2
else:
# find a resource in the halfway to the target
res_cell = game_state.find_closest_resources(unit.pos.halfway(target_pos))
if res_cell:
my_job.subtask = 1 # HARVEST resource from position
my_job.pos = res_cell.pos
else:
actions.stay(unit)
jobs.jobDone(unit.id)
continue
if my_job.subtask == 1: # HARVEST resource from position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() == 0:
my_job.pos = my_job.data["target"]
my_job.subtask = 2 # BUILD A NEW CITY
elif not game_state.getEnergy(unit.pos.x, unit.pos.y) > 0:
res_cell = game_state.find_closest_resources(unit.pos)
if res_cell:
my_job.pos = res_cell.pos
else:
actions.stay(unit)
jobs.jobDone(unit.id)
continue
else: # next subtask
actions.stay(unit) # stay untill cargo is fulled
else:
# move_dir = unit.pos.direction_to(my_job.pos)
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction): # no way to move
jobs.jobDrop(unit.id)
if my_job.subtask == 2: # BUILD A NEW CITY
if unit.pos == my_job.pos:
actions.build_city(unit)
jobs.jobDone(unit.id)
else:
move = unit.pos.path_to(my_job.pos, game_state.map, noCities=True, playerid=game_state.id)
if not actions.move(unit, move.direction):
if unit.get_cargo_space_left() == 0 and not game_state.map.get_cell_by_pos(unit.pos).has_resource:
actions.build_city(unit)
jobs.jobDone(unit.id)
else:
actions.stay(unit)
## Debug Text
if DEBUG_SHOW_TODO:
actions.append(annotate.sidetext(f"[TODO] {len(jobs.todo)}"))
for task in jobs.todo:
actions.append(annotate.sidetext(task))
#--------------------------------------------------------------------------------------------------------
# Debug "show expand map"
if DEBUG_SHOW_EXPAND_MAP:
for x, y, e in [ p for a in game_state.expand_map.values() for p in a]:
actions.append(annotate.circle(x, y))
actions.append(annotate.text(x, y, e))
## Debug "show energy map"
if DEBUG_SHOW_ENERGY_MAP:
for (x, y),v in game_state.energy_map.items():
actions.append(annotate.text(x, y, v))
## Debug "show enemy map"
if DEBUG_SHOW_ENEMY_CITIES:
for x, y in game_state.enemy_map:
actions.append(annotate.circle(x, y))
## Debug "show invasion map"
if DEBUG_SHOW_INVASION_MAP:
for x, y in game_state.invasion_map:
actions.append(annotate.x(x, y))
## Debug "show explore map"
if DEBUG_SHOW_EXPLORE_MAP:
for x, y in game_state.explore_map:
actions.append(annotate.x(x, y))
# actions.append(annotate.sidetext(f"[INPROGRESS] {len(jobs.inprogress)}"))
# for task in jobs.inprogress:
# actions.append(annotate.sidetext(jobs.inprogress[task]))
# actions.append(annotate.sidetext("-[CEMETERY]-"))
# for uid in jobs.rip:
# actions.append(annotate.sidetext(uid))
return actions.actions
| 47.847059 | 126 | 0.503369 |
6e5de644fd911fb842013165cff69e62361a9159 | 12,503 | py | Python | PySRCG/src/Tabs/cyberware_tab.py | apampuch/PySRCG | bb3777aed3517b473e5860336c015e2e8d0905e9 | [
"MIT"
] | null | null | null | PySRCG/src/Tabs/cyberware_tab.py | apampuch/PySRCG | bb3777aed3517b473e5860336c015e2e8d0905e9 | [
"MIT"
] | null | null | null | PySRCG/src/Tabs/cyberware_tab.py | apampuch/PySRCG | bb3777aed3517b473e5860336c015e2e8d0905e9 | [
"MIT"
] | null | null | null | from copy import copy
from tkinter import *
from tkinter import ttk
from src import app_data
from src.CharData.augment import Cyberware
from src.Tabs.notebook_tab import NotebookTab
from src.statblock_modifier import StatMod
from src.utils import treeview_get, recursive_treeview_fill, calculate_attributes, get_variables
# list of attributes that we need to look for variables in, eg "Cost: rating * 500"
ATTRIBUTES_TO_CALCULATE = ["essence", "cost", "availability_rating", "availability_time", "mods"]
STRINGS_TO_IGNORE = [] # nyi
class CyberwareTab(NotebookTab):
@property
def library_selected(self):
return treeview_get(self.cyberware_library, self.tree_library_dict)
@property
def list_selected_index(self) -> int:
"""index of the index of the selected item"""
selection = self.cyberware_list.curselection()
if len(selection) is 0:
return None
return selection[-1]
def __init__(self, parent):
super().__init__(parent)
# used to validate input
self.vcmd = (self.register(self.int_validate), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
self.tree_library_dict = {} # maps library terminal children iids to (skill name, skill attribute) tuple
# cyberware library
self.cyberware_library = ttk.Treeview(self, height=20, show="tree")
self.cyberware_library_scroll = ttk.Scrollbar(self, orient=VERTICAL, command=self.cyberware_library.yview)
# cyberware list
self.cyberware_list = Listbox(self, width=40)
self.cyberware_list_scroll = ttk.Scrollbar(self, orient=VERTICAL, command=self.cyberware_list.yview)
# description box
self.desc_box = Text(self, width=40, state=DISABLED, bg='#d1d1d1')
self.desc_box_scroll = ttk.Scrollbar(self, orient=VERTICAL, command=self.desc_box.yview)
# radio boxes
self.grade_var = StringVar()
self.grade_frame = LabelFrame(self, text="Grade")
self.standard_radio = Radiobutton(self.grade_frame,
text="Standard",
variable=self.grade_var,
value="standard")
self.alpha_radio = Radiobutton(self.grade_frame,
text="Alphaware",
variable=self.grade_var,
value="alpha")
self.beta_radio = Radiobutton(self.grade_frame,
text="Betaware",
variable=self.grade_var,
value="beta")
self.delta_radio = Radiobutton(self.grade_frame,
text="Deltaware",
variable=self.grade_var,
value="delta")
# buttons
self.buy_sell_frame = Frame(self)
self.buy_button = Button(self.buy_sell_frame, text="Buy", command=self.on_buy_click)
self.sell_button = Button(self.buy_sell_frame, text="Sell", command=self.on_sell_click)
# variable objects frame and list
self.variables_frame = Frame(self)
self.variables_dict = {}
# bind events
self.cyberware_library["yscrollcommand"] = self.cyberware_library_scroll.set
self.cyberware_library.bind("<<TreeviewSelect>>", self.on_tree_item_click)
self.cyberware_list["yscrollcommand"] = self.cyberware_list_scroll.set
self.cyberware_list.bind("<<ListboxSelect>>", self.on_inv_item_click)
self.desc_box["yscrollcommand"] = self.desc_box_scroll.set
# grids
self.cyberware_library.grid(column=0, row=0, sticky=(N, S))
self.cyberware_library_scroll.grid(column=1, row=0, sticky=(N, S))
self.desc_box.grid(column=3, row=0, sticky=(N, S))
self.desc_box_scroll.grid(column=4, row=0, sticky=(N, S))
self.cyberware_list.grid(column=5, row=0, sticky=(N, S))
self.cyberware_list_scroll.grid(column=6, row=0, sticky=(N, S))
self.buy_sell_frame.grid(column=5, row=1, sticky=E)
self.buy_button.grid(column=0, row=0, sticky=W)
self.sell_button.grid(column=1, row=0, sticky=W)
self.grade_frame.grid(column=0, row=1, sticky=W, columnspan=4)
self.standard_radio.grid(column=0, row=0)
self.alpha_radio.grid(column=1, row=0)
self.beta_radio.grid(column=2, row=0)
self.delta_radio.grid(column=3, row=0)
self.standard_radio.select()
self.standard_radio.invoke()
self.variables_frame.grid(column=0, row=3)
def augment_tab_recurse_check(val):
return "essence" not in val.keys()
def augment_tab_recurse_end_callback(key, val, iid):
# key is a string
# val is a dict from a json
try:
self.tree_library_dict[iid] = Cyberware(name=key, **val)
except TypeError as e:
print("Error with cyberware {}:".format(key))
print(e)
print()
recursive_treeview_fill(self.parent.game_data["Augments"]["Cyberware"], "", self.cyberware_library,
augment_tab_recurse_check, augment_tab_recurse_end_callback)
def on_buy_click(self):
# TODO make this set rating value
if self.library_selected is not None:
current_essence = self.statblock.essence
# make copies of info we need to copy from the dict
cyber = copy(self.library_selected)
cyber.grade = str(self.grade_var.get())
# make a new dict from the variables dict that we can pass into parse_arithmetic()
# because parse_arithmetic() can't take IntVars
var_dict = {}
for key in self.variables_dict.keys():
var_dict[key] = self.variables_dict[key].get()
# calculate any arithmetic expressions we have
calculate_attributes(cyber, var_dict, ATTRIBUTES_TO_CALCULATE)
cyber.essence = self.calc_essence_cost(cyber, cyber.grade)
cyber.cost = int(self.calc_yen_cost(cyber, cyber.grade))
# if we have enough essence
if cyber.essence < current_essence:
# if we have enough money
if app_data.pay_cash(cyber.cost):
self.add_cyberware_item(cyber)
self.calculate_total()
else:
print("Not enough essence left!")
else:
print("Can't get that!")
def on_sell_click(self):
# don't do anything if nothing is selected
if len(self.cyberware_list.curselection()) is 0:
return
# return cash value
self.statblock.cash += self.statblock.cyberware[self.list_selected_index].cost
self.remove_cyberware_item(self.list_selected_index)
self.calculate_total()
def add_cyberware_item(self, cyber):
"""
:type cyber: Cyberware
"""
for key in cyber.mods.keys():
value = cyber.mods[key]
StatMod.add_mod(key, value)
self.statblock.cyberware.append(cyber)
self.cyberware_list.insert(END, cyber.name)
def remove_cyberware_item(self, index):
cyber = self.statblock.cyberware[index]
for key in cyber.mods.keys():
value = cyber.mods[key]
StatMod.remove_mod(key, value)
del self.statblock.cyberware[index]
self.cyberware_list.delete(index)
def calc_essence_cost(self, cyber, grade):
essence = cyber.essence
if grade == "standard":
pass
elif grade == "alpha":
essence *= 0.8
elif grade == "beta":
essence *= 0.6
elif grade == "delta":
essence *= 0.5
else:
raise ValueError("Invalid grade {}.".format(grade))
if cyber.fits is None:
return essence
fit_dict = self.statblock.make_fit_dict()
if cyber.fits in fit_dict.keys():
hold_amount = fit_dict[cyber.fits][0]
fit_amount = fit_dict[cyber.fits][1]
# subtract fit amount from held amount to get
subtotal = max(hold_amount - fit_amount, 0)
total = max(essence - subtotal, 0)
return total
else:
return essence
def calc_yen_cost(self, cyber, grade):
cost = cyber.cost
if grade == "standard":
pass
elif grade == "alpha":
cost *= 2
elif grade == "beta":
cost *= 4
elif grade == "delta":
cost *= 8
else:
raise ValueError("Invalid grade {}.".format(grade))
return cost
def fill_description_box(self, contents):
"""Clears the item description box and fills it with contents."""
# temporarily unlock box, clear it, set the text, then re-lock it
self.desc_box.config(state=NORMAL)
self.desc_box.delete(1.0, END)
self.desc_box.insert(END, contents)
self.desc_box.config(state=DISABLED)
def on_tree_item_click(self, event):
# only select the last one selected if we've selected multiple things
selected = self.cyberware_library.selection()[-1]
if selected in self.tree_library_dict.keys():
selected_cyberware = self.tree_library_dict[selected]
self.fill_description_box(selected_cyberware.report())
# destroy all variable objects
self.variables_dict = {}
for child in self.variables_frame.winfo_children():
child.destroy()
# get any variables in the item
self.variables_dict = get_variables(selected_cyberware, ATTRIBUTES_TO_CALCULATE)
# make variable objects if any
i = 0
for var in self.variables_dict.keys():
var_frame = Frame(self.variables_frame)
Label(var_frame, text="{}:".format(var)).grid(column=0, row=0) # label
Entry(var_frame, textvariable=self.variables_dict[var], validate="key", validatecommand=self.vcmd) \
.grid(column=1, row=0)
var_frame.grid(column=0, row=i)
i += 1
def int_validate(self, action, index, value_if_allowed,
prior_value, text, validation_type, trigger_type, widget_name):
"""
Validates if entered text can be an int and over 0.
:param action:
:param index:
:param value_if_allowed:
:param prior_value:
:param text:
:param validation_type:
:param trigger_type:
:param widget_name:
:return: True if text is valid
"""
if value_if_allowed == "":
return True
if value_if_allowed:
try:
i = int(value_if_allowed)
if i > 0:
return True
else:
self.bell()
return False
except ValueError:
self.bell()
return False
else:
self.bell()
return False
def on_inv_item_click(self, event):
curselection_ = self.cyberware_list.curselection()[-1]
item_report = self.statblock.cyberware[curselection_].report()
self.fill_description_box(item_report)
def calculate_total(self):
# unlike the other tabs places we directly manipulate the top bar
# since this has nothing to do with the generation mode
app_data.top_bar.update_karma_bar("{:.2f}".format(self.statblock.essence),
self.statblock.base_attributes["essence"],
"Augments Tab")
# app_data.top_bar.karma_fraction.set(("{}/{}".format("{:.2f}".format(self.statblock.essence),
# self.statblock.base_attributes["essence"])))
def on_switch(self):
self.calculate_total()
def load_character(self):
# clear everything
# self.tree_library_dict = {}
self.cyberware_list.delete(0, END)
# add stuff to the list
for cyber in self.statblock.cyberware:
self.cyberware_list.insert(END, cyber.name)
# self.on_switch() | 38.589506 | 116 | 0.589698 |
6e5f43493f76b33f089dfbae79e524b7b68ad4b5 | 337 | py | Python | myapp/mymetric/my-metric.py | affoliveira/hiring-engineers | 4064d8c7b6cead9a88197e95fcd6a0f2395e4d44 | [
"Apache-2.0"
] | null | null | null | myapp/mymetric/my-metric.py | affoliveira/hiring-engineers | 4064d8c7b6cead9a88197e95fcd6a0f2395e4d44 | [
"Apache-2.0"
] | null | null | null | myapp/mymetric/my-metric.py | affoliveira/hiring-engineers | 4064d8c7b6cead9a88197e95fcd6a0f2395e4d44 | [
"Apache-2.0"
] | null | null | null | from datadog import initialize, statsd
import time
import random
import os
options = {
'statsd_host':os.environ['DD_AGENT_HOST'],
'statsd_port':8125
}
initialize(**options)
i = 0
while(1):
i += 1
r = random.randint(0, 1000)
statsd.gauge('mymetric',r , tags=["environment:dev"])
time.sleep(int(os.environ['interval'])) | 17.736842 | 55 | 0.68546 |
6e5f8bfb8859c97984af510e67f81278396d3ad6 | 277 | py | Python | 1 ano/logica-de-programacao/list-telefone-lucio.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | 1 | 2021-09-24T16:26:04.000Z | 2021-09-24T16:26:04.000Z | 1 ano/logica-de-programacao/list-telefone-lucio.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | null | null | null | 1 ano/logica-de-programacao/list-telefone-lucio.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | null | null | null | n = [0,0,0,0,0,0,0,0,0,0]
t = [0,0,0,0,0,0,0,0,0,0]
c=0
while(c<10):
n[c]=input("Digite o nome")
t[c]=input("Digite o telefone")
c+=1
const=""
while(const!="fim"):
cons=input("Digite nome a consultar")
if(n[c]==const):
print(f"TEl: {t[c]}")
c+=1 | 21.307692 | 41 | 0.516245 |
6e61986199cea39f158bd8be59e6773d5f58be23 | 8,979 | py | Python | serve_tiny_performance_mdrnn.py | cpmpercussion/robojam | 8f9524be0ad850bdfc0c3459b0e4b677f5f70a84 | [
"MIT"
] | 10 | 2017-11-18T04:01:03.000Z | 2022-03-06T21:07:09.000Z | serve_tiny_performance_mdrnn.py | cpmpercussion/robojam | 8f9524be0ad850bdfc0c3459b0e4b677f5f70a84 | [
"MIT"
] | 17 | 2018-06-12T20:54:40.000Z | 2022-02-09T23:27:24.000Z | serve_tiny_performance_mdrnn.py | cpmpercussion/robojam | 8f9524be0ad850bdfc0c3459b0e4b677f5f70a84 | [
"MIT"
] | 2 | 2017-12-05T23:39:42.000Z | 2018-06-13T13:46:33.000Z | #!/usr/bin/env python3
"""A flask server for Robojam"""
import json
import time
from io import StringIO
import pandas as pd
import tensorflow as tf
import robojam
from tensorflow.compat.v1.keras import backend as K
from flask import Flask, request
from flask_cors import CORS
# Start server.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) # set logging.
app = Flask(__name__)
cors = CORS(app)
compute_graph = tf.compat.v1.Graph()
with compute_graph.as_default():
sess = tf.compat.v1.Session()
# Network hyper-parameters:
N_MIX = 5
N_LAYERS = 2
N_UNITS = 512
TEMP = 1.5
SIG_TEMP = 0.01
# MODEL_FILE = 'models/robojam-td-model-E12-VL-4.57.hdf5'
MODEL_FILE = 'models/robojam-metatone-layers2-units512-mixtures5-scale10-E30-VL-5.65.hdf5'
@app.route("/api/predict", methods=['POST'])
def reaction():
"""Produces a Reaction Performance using the MDRNN."""
tf.compat.v1.logging.info("Responding to a prediction request.")
start = time.time()
data = request.data.decode("utf-8")
if data == "":
params = request.form
input_perf = json.loads(params['perf'])
else:
tf.compat.v1.logging.info("Perf in data as string.")
params = json.loads(data)
input_perf = params['perf']
input_perf_df = pd.read_csv(StringIO(input_perf), parse_dates=False)
input_perf_array = robojam.perf_df_to_array(input_perf_df)
# Run the response prediction:
K.set_session(sess)
with compute_graph.as_default():
net.reset_states() # reset LSTM state.
out_perf = robojam.condition_and_generate(net, input_perf_array, N_MIX, temp=TEMP, sigma_temp=SIG_TEMP) # predict
out_df = robojam.perf_array_to_df(out_perf)
out_df.at[out_df[:1].index, 'moving'] = 0 # set first touch to a tap
out_perf_string = out_df.to_csv()
json_data = json.dumps({'response': out_perf_string})
tf.compat.v1.logging.info("Completed request, time was: %f" % (time.time() - start))
return json_data
if __name__ == "__main__":
"""Start a TinyPerformance MDRNN Server"""
tf.compat.v1.logging.info("Starting RoboJam Server.")
K.set_session(sess)
with compute_graph.as_default():
net = robojam.load_robojam_inference_model(model_file=MODEL_FILE, layers=N_LAYERS, units=N_UNITS, mixtures=N_MIX)
app.run(host='0.0.0.0', ssl_context=('keys/cert.pem', 'keys/key.pem'))
# Command line tests.
# curl -i -k -X POST -H "Content-Type:application/json" https://127.0.0.1:5000/api/predict -d '{"perf":"time,x,y,z,moving\n0.005213, 0.711230, 0.070856, 25.524292, 0\n0.097298, 0.719251, 0.062834, 25.524292, 1\n0.126225, 0.719251, 0.057487, 25.524292, 1\n0.194616, 0.707219, 0.045455, 38.290771, 1\n0.212923, 0.704545, 0.045455, 38.290771, 1\n0.343579, 0.703209, 0.108289, 38.290771, 1\n0.495085, 0.701872, 0.070856, 38.290771, 1\n0.523921, 0.693850, 0.061497, 38.290771, 1\n0.712066, 0.711230, 0.155080, 38.290771, 1\n0.730294, 0.717914, 0.155080, 38.290771, 1\n0.896367, 0.696524, 0.041444, 38.290771, 1\n1.083786, 0.696524, 0.151070, 38.290771, 1\n1.301470, 0.684492, 0.049465, 38.290771, 1\n1.328134, 0.680481, 0.053476, 38.290771, 1\n1.504139, 0.705882, 0.136364, 38.290771, 1\n1.527875, 0.712567, 0.120321, 38.290771, 1\n1.702672, 0.675134, 0.076203, 38.290771, 1\n1.719294, 0.675134, 0.096257, 38.290771, 1\n1.901434, 0.715241, 0.145722, 38.290771, 1\n1.922717, 0.717914, 0.136364, 38.290771, 1\n2.062994, 0.684492, 0.109626, 38.290771, 1\n2.091680, 0.680481, 0.129679, 38.290771, 1\n2.231362, 0.697861, 0.207219, 38.290771, 1\n2.393213, 0.712567, 0.124332, 38.290771, 1\n2.525774, 0.632353, 0.149733, 38.290771, 1\n2.546701, 0.625668, 0.169786, 38.290771, 1\n2.686487, 0.585561, 0.360963, 38.290771, 1\n2.715316, 0.580214, 0.387701, 38.290771, 1\n2.867526, 0.490642, 0.633690, 38.290771, 1\n2.880361, 0.481283, 0.645722, 38.290771, 1\n3.054443, 0.319519, 0.689840, 38.290771, 1\n3.218741, 0.121658, 0.585561, 38.290771, 1\n3.230362, 0.102941, 0.557487, 38.290771, 1\n3.391456, 0.089572, 0.534759, 38.290771, 1"}'
# curl -i -k -X POST -H "Content-Type:application/json" https://138.197.179.234:5000/api/predict -d '{"perf":"time,x,y,z,moving\n0.005213, 0.711230, 0.070856, 25.524292, 0\n0.097298, 0.719251, 0.062834, 25.524292, 1\n0.126225, 0.719251, 0.057487, 25.524292, 1\n0.194616, 0.707219, 0.045455, 38.290771, 1\n0.212923, 0.704545, 0.045455, 38.290771, 1\n0.343579, 0.703209, 0.108289, 38.290771, 1\n0.495085, 0.701872, 0.070856, 38.290771, 1\n0.523921, 0.693850, 0.061497, 38.290771, 1\n0.712066, 0.711230, 0.155080, 38.290771, 1\n0.730294, 0.717914, 0.155080, 38.290771, 1\n0.896367, 0.696524, 0.041444, 38.290771, 1\n1.083786, 0.696524, 0.151070, 38.290771, 1\n1.301470, 0.684492, 0.049465, 38.290771, 1\n1.328134, 0.680481, 0.053476, 38.290771, 1\n1.504139, 0.705882, 0.136364, 38.290771, 1\n1.527875, 0.712567, 0.120321, 38.290771, 1\n1.702672, 0.675134, 0.076203, 38.290771, 1\n1.719294, 0.675134, 0.096257, 38.290771, 1\n1.901434, 0.715241, 0.145722, 38.290771, 1\n1.922717, 0.717914, 0.136364, 38.290771, 1\n2.062994, 0.684492, 0.109626, 38.290771, 1\n2.091680, 0.680481, 0.129679, 38.290771, 1\n2.231362, 0.697861, 0.207219, 38.290771, 1\n2.393213, 0.712567, 0.124332, 38.290771, 1\n2.525774, 0.632353, 0.149733, 38.290771, 1\n2.546701, 0.625668, 0.169786, 38.290771, 1\n2.686487, 0.585561, 0.360963, 38.290771, 1\n2.715316, 0.580214, 0.387701, 38.290771, 1\n2.867526, 0.490642, 0.633690, 38.290771, 1\n2.880361, 0.481283, 0.645722, 38.290771, 1\n3.054443, 0.319519, 0.689840, 38.290771, 1\n3.218741, 0.121658, 0.585561, 38.290771, 1\n3.230362, 0.102941, 0.557487, 38.290771, 1\n3.391456, 0.089572, 0.534759, 38.290771, 1"}'
# curl -i -k -X POST -H "Content-Type:application/json" https://138.197.179.234:5000/api/predict -d '{"perf":"time,x,y,z,moving\n0.002468, 0.106414, 0.122449, 20.000000, 0\n0.020841, 0.106414, 0.125364, 20.000000, 1\n0.043218, 0.107872, 0.137026, 20.000000, 1\n0.065484, 0.107872, 0.176385, 20.000000, 1\n0.090776, 0.107872, 0.231778, 20.000000, 1\n0.110590, 0.109329, 0.301749, 20.000000, 1\n0.133338, 0.115160, 0.357143, 20.000000, 1\n0.155677, 0.125364, 0.412536, 20.000000, 1\n0.178238, 0.134111, 0.432945, 20.000000, 1\n0.516467, 0.275510, 0.180758, 20.000000, 0\n0.542726, 0.274052, 0.205539, 20.000000, 1\n0.560772, 0.274052, 0.249271, 20.000000, 1\n0.583259, 0.282799, 0.316327, 20.000000, 1\n0.605750, 0.295918, 0.376093, 20.000000, 1\n0.628259, 0.309038, 0.415452, 20.000000, 1\n0.653835, 0.316327, 0.432945, 20.000000, 1\n0.673523, 0.325073, 0.440233, 20.000000, 1\n1.000294, 0.590379, 0.179300, 20.000000, 0\n1.022137, 0.593294, 0.183673, 20.000000, 1\n1.044706, 0.594752, 0.208455, 20.000000, 1\n1.067020, 0.606414, 0.279883, 20.000000, 1\n1.091137, 0.626822, 0.355685, 20.000000, 1\n1.111968, 0.647230, 0.425656, 20.000000, 1\n1.134535, 0.655977, 0.462099, 20.000000, 1\n1.156987, 0.657434, 0.485423, 20.000000, 1\n1.619212, 0.857143, 0.263848, 20.000000, 0\n1.642492, 0.854227, 0.281341, 20.000000, 1\n1.663123, 0.851312, 0.320700, 20.000000, 1\n1.685776, 0.846939, 0.413994, 20.000000, 1\n1.708192, 0.846939, 0.510204, 20.000000, 1\n1.730717, 0.858601, 0.591837, 20.000000, 1\n1.753953, 0.868805, 0.632653, 20.000000, 1\n1.775862, 0.876093, 0.660350, 20.000000, 1\n4.376275, 0.542274, 0.860058, 20.000000, 0\n4.419554, 0.543732, 0.860058, 20.000000, 1"}'
# curl -i -k -X POST -H "Content-Type:application/json" https://0.0.0.0:5000/api/predict -d '{"perf":"time,x,y,z,moving\n0.002468, 0.106414, 0.122449, 20.000000, 0\n0.020841, 0.106414, 0.125364, 20.000000, 1\n0.043218, 0.107872, 0.137026, 20.000000, 1\n0.065484, 0.107872, 0.176385, 20.000000, 1\n0.090776, 0.107872, 0.231778, 20.000000, 1\n0.110590, 0.109329, 0.301749, 20.000000, 1\n0.133338, 0.115160, 0.357143, 20.000000, 1\n0.155677, 0.125364, 0.412536, 20.000000, 1\n0.178238, 0.134111, 0.432945, 20.000000, 1\n0.516467, 0.275510, 0.180758, 20.000000, 0\n0.542726, 0.274052, 0.205539, 20.000000, 1\n0.560772, 0.274052, 0.249271, 20.000000, 1\n0.583259, 0.282799, 0.316327, 20.000000, 1\n0.605750, 0.295918, 0.376093, 20.000000, 1\n0.628259, 0.309038, 0.415452, 20.000000, 1\n0.653835, 0.316327, 0.432945, 20.000000, 1\n0.673523, 0.325073, 0.440233, 20.000000, 1\n1.000294, 0.590379, 0.179300, 20.000000, 0\n1.022137, 0.593294, 0.183673, 20.000000, 1\n1.044706, 0.594752, 0.208455, 20.000000, 1\n1.067020, 0.606414, 0.279883, 20.000000, 1\n1.091137, 0.626822, 0.355685, 20.000000, 1\n1.111968, 0.647230, 0.425656, 20.000000, 1\n1.134535, 0.655977, 0.462099, 20.000000, 1\n1.156987, 0.657434, 0.485423, 20.000000, 1\n1.619212, 0.857143, 0.263848, 20.000000, 0\n1.642492, 0.854227, 0.281341, 20.000000, 1\n1.663123, 0.851312, 0.320700, 20.000000, 1\n1.685776, 0.846939, 0.413994, 20.000000, 1\n1.708192, 0.846939, 0.510204, 20.000000, 1\n1.730717, 0.858601, 0.591837, 20.000000, 1\n1.753953, 0.868805, 0.632653, 20.000000, 1\n1.775862, 0.876093, 0.660350, 20.000000, 1\n4.376275, 0.542274, 0.860058, 20.000000, 0\n4.419554, 0.543732, 0.860058, 20.000000, 1"}'
| 121.337838 | 1,670 | 0.702639 |
6e63b1a8022fa7d3c4dd2cc0d17b00043e002831 | 1,024 | py | Python | youtube_sync/tasks.py | abhayagiri/youtube-sync | ce3861f1b0c1448b1d48e5ba17925f5c082f04a2 | [
"MIT"
] | null | null | null | youtube_sync/tasks.py | abhayagiri/youtube-sync | ce3861f1b0c1448b1d48e5ba17925f5c082f04a2 | [
"MIT"
] | null | null | null | youtube_sync/tasks.py | abhayagiri/youtube-sync | ce3861f1b0c1448b1d48e5ba17925f5c082f04a2 | [
"MIT"
] | null | null | null | from datetime import datetime
import os
import re
import subprocess
from . import app, celery, db
from .database import Job
@celery.task()
def make_audio(youtube_id):
worker_path = os.path.join(app.root_path, 'worker.sh')
env = {
'DYNAMIC_AUDIO_NORMALIZER_BIN': app.config['DYNAMIC_AUDIO_NORMALIZER_BIN'],
'DESTINATION_SERVER_PATH': app.config['DESTINATION_SERVER_PATH'],
}
job([worker_path, youtube_id], env=env)
def job(cmd, env={}):
job_env = os.environ.copy()
job_env.update(env)
job = Job(command=repr(cmd))
db.session.add(job)
db.session.commit()
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=job_env)
return_code = 0
except subprocess.CalledProcessError as e:
output = e.output
return_code = e.returncode
job.complete = True
job.return_code = return_code
job.output = output.decode('utf-8')
job.completed_at = datetime.now()
db.session.commit()
return return_code == 0
| 26.947368 | 84 | 0.682617 |
6e6b8e97a66c01a64f2cca3a534d23843f440130 | 560 | py | Python | setup.py | garethrylance/python-sdk-example | 3f21c3a6c28f46050688ce1be66e33433a801e7c | [
"CC0-1.0"
] | null | null | null | setup.py | garethrylance/python-sdk-example | 3f21c3a6c28f46050688ce1be66e33433a801e7c | [
"CC0-1.0"
] | null | null | null | setup.py | garethrylance/python-sdk-example | 3f21c3a6c28f46050688ce1be66e33433a801e7c | [
"CC0-1.0"
] | null | null | null | from setuptools import setup
setup(
name="python-sdk-example",
version="0.1",
description="The dispatch model loader - lambda part.",
url="https://github.com/garethrylance/python-sdk-example",
author="Gareth Rylance",
author_email="gareth@rylance.me.uk",
packages=["example_sdk"],
install_requires=["pandas"],
zip_safe=False,
entry_points={"console_scripts": [""]},
setup_requires=["pytest-runner"],
tests_require=["pytest"],
extras_require={"development": ["flake8", "black", "pytest", "snapshottest"]},
)
| 31.111111 | 82 | 0.671429 |
6e6bf3bcb9f6b04ecf66cf6829603687c806b677 | 4,140 | py | Python | markov.py | themichaelusa/zuckerkov | d68780f987b3f032d6382ea75118c84e7f205a39 | [
"MIT"
] | 1 | 2020-03-17T23:34:17.000Z | 2020-03-17T23:34:17.000Z | markov.py | themichaelusa/zuckerkov | d68780f987b3f032d6382ea75118c84e7f205a39 | [
"MIT"
] | null | null | null | markov.py | themichaelusa/zuckerkov | d68780f987b3f032d6382ea75118c84e7f205a39 | [
"MIT"
] | null | null | null | ### IMPORTS
import json
import glob
import string
import random
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
import markovify
### CONSTANTS/GLOBALS/LAMBDAS
SYMBOLS_TO_RM = tuple(list(string.punctuation) + ['\xad'])
NUMBERS_TO_RM = tuple(string.digits)
spacy.prefer_gpu()
NLP_ENGINE = spacy.load("en_core_web_sm")
def clean_word(word):
word_chars = list(word)
ignore_flag = False
for s in SYMBOLS_TO_RM:
if s in word_chars:
ignore_flag = True
break
for n in NUMBERS_TO_RM:
if n in word_chars:
ignore_flag = True
break
if not ignore_flag and len(word) >= 1:
return word.lower()
else:
return None
def clean_set(raw_set, by_letters=False):
clean_set = []
for l in raw_set:
words = l.split(' ')[:-1]
clean_sentence = []
for w in words:
cleaned_word = None
if by_letters:
cw_temp = clean_word(w)
if cw_temp is None:
continue
cleaned_word = cw_temp
else:
cleaned_word = clean_word(w)
if cleaned_word is not None:
clean_sentence.append(cleaned_word)
clean_sentence = ' '.join(clean_sentence)
if clean_sentence != '':
clean_set.append(clean_sentence)
return clean_set
def gen_user_corpus(sender, wpath):
parsed_mesgs = []
for mesg_corpus_path in glob.glob('message_*.json'):
with open(mesg_corpus_path) as rjson:
raw_data = json.load(rjson)
# parse only textual mesgs from given sender
for mesg in raw_data['messages']:
sname = mesg['sender_name']
if sname == sender:
text_mesg = mesg.get('content')
if text_mesg is not None:
#text_mesg = text_mesg.decode('utf-8')
parsed_mesgs.append(text_mesg)
cset = clean_set((pm for pm in parsed_mesgs))
# derive corpus of only words
word_set = set()
for sent in cset:
words = sent.split(' ')
for word in words:
word_set.add(word)
cset.extend(word_set)
# generate final corpus
with open(wpath, 'w+') as corpus:
for mesg in cset:
corpus.write(mesg + '\n')
def build_mm_for_user(sender, corpus_path):
with open(corpus_path, 'r') as corpus:
cread = corpus.read()
model = markovify.NewlineText(cread)
return model.compile()
def gen_valid_sent(model, init_state=None):
if init_state is not None:
init_state = ('___BEGIN__', init_state)
sent = model.make_sentence(init_state=init_state)
while sent is None:
sent = model.make_sentence(init_state=init_state)
return sent
def get_next_sent_subj(sent):
doc = NLP_ENGINE(sent)
subj_toks = [tok.text.lower() for tok in doc]
subj_toks = [NLP_ENGINE.vocab[tok] for tok in subj_toks]
subj_toks = [tok.text for tok in subj_toks if not tok.is_stop]
no_stop_str = ' '.join(subj_toks)
no_stop_doc = NLP_ENGINE(no_stop_str)
subjs = [tok.text for tok in no_stop_doc if tok.pos_ == 'NOUN']
if len(subjs) == 0:
return None
else:
return random.choice(subjs)
if __name__ == '__main__':
mu = gen_user_corpus('Michael Usachenko', 'mu_corpus.txt')
mu_model = build_mm_for_user('Michael Usachenko', 'mu_corpus.txt')
js = gen_user_corpus('Jonathan Shobrook', 'js_corpus.txt')
js_model = build_mm_for_user('Jonathan Shobrook', 'js_corpus.txt')
# generate starting sentence
init_sent = gen_valid_sent(mu_model)
init_subj = get_next_sent_subj(init_sent)
# WIP: back and forth conversation. need to modify markovify libs
# works for a few cycles, then errors
past_init = False
prior_resp = None
"""
for i in range(100):
if not past_init:
past_init = True
js_resp = gen_valid_sent(js_model, init_state=init_subj)
print('JONATHAN:', js_resp)
prior_resp = js_resp
else:
next_subj = get_next_sent_subj(prior_resp)
mu_resp = gen_valid_sent(mu_model, init_state=next_subj)
print('MICHAEL:', mu_resp)
next_subj = get_next_sent_subj(mu_resp)
js_resp = gen_valid_sent(js_model, init_state=next_subj)
print('JONATHAN:', js_resp)
prior_resp = js_resp
"""
for i in range(100):
#next_subj = get_next_sent_subj(prior_resp)
mu_resp = gen_valid_sent(mu_model)
print('MICHAEL:', mu_resp)
#next_subj = get_next_sent_subj(mu_resp)
js_resp = gen_valid_sent(js_model)
print('JONATHAN:', js_resp)
#prior_resp = js_resp
| 23 | 67 | 0.717874 |
6e6ceb4b1bd05af797219ac67e3f71b01f520394 | 6,211 | py | Python | src/cnf_shuffler.py | jreeves3/BiPartGen-Artifact | d7c6db628cad25701a398da67ab87bb725513a61 | [
"MIT"
] | null | null | null | src/cnf_shuffler.py | jreeves3/BiPartGen-Artifact | d7c6db628cad25701a398da67ab87bb725513a61 | [
"MIT"
] | null | null | null | src/cnf_shuffler.py | jreeves3/BiPartGen-Artifact | d7c6db628cad25701a398da67ab87bb725513a61 | [
"MIT"
] | null | null | null | #/**********************************************************************************
# Copyright (c) 2021 Joseph Reeves and Cayden Codel, Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# **************************************************************************************************/
# @file cnf_shuffler.py
#
# @usage python cnf_shuffler.py [-cnsv] <input.cnf>
#
# @author Cayden Codel (ccodel@andrew.cmu.edu)
#
# @bug No known bugs.
import random
import sys
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--clauses", dest="clauses", action="store_true",
help="Shuffle the order of the clause lines in the CNF")
parser.add_option("-n", "--names", dest="names", action="store_true",
help="Shuffle the names of the literals in the clauses")
parser.add_option("-r", "--random", dest="seed",
help="Provide a randomization seed")
parser.add_option("-s", "--signs", dest="signs",
help="Switch the sign of literals with the provided prob")
parser.add_option("-v", "--variables", dest="variables",
help="Shuffle the order of the variables with prob")
(options, args) = parser.parse_args()
f_name = sys.argv[-1]
if len(sys.argv) == 1:
print("Must supply a CNF file")
exit()
# Parse the provided CNF file
if not os.path.exists(f_name) or os.path.isdir(f_name):
print("Supplied CNF file does not exist or is directory", file=sys.stderr)
exit()
cnf_file = open(f_name, "r")
cnf_lines = cnf_file.readlines()
cnf_file.close()
# Verify that the file has at least one line
if len(cnf_lines) == 0:
print("Supplied CNF file is empty", file=sys.stderr)
exit()
# Do treatment on the lines
cnf_lines = list(map(lambda x: x.strip(), cnf_lines))
# Verify that the file is a CNF file
header_line = cnf_lines[0].split(" ")
if header_line[0] != "p" or header_line[1] != "cnf":
print("Supplied file doesn't follow DIMACS CNF convention")
exit()
num_vars = int(header_line[2])
num_clauses = int(header_line[3])
print(" ".join(header_line))
cnf_lines = cnf_lines[1:]
# If the -r option is specified, initialize the random library
if options.seed is not None:
random.seed(a=int(options.seed))
else:
random.seed()
# If the -c option is specified, permute all other lines
if options.clauses:
cnf_lines = random.shuffle(cnf_lines)
# If the -v option is specified, permute the order of variables
if options.variables is not None:
var_prob = float(options.variables)
if var_prob <= 0 or var_prob > 1:
print("Prob for var shuffling not between 0 and 1", file=sys.stderr)
exit()
# TODO this doesn't work if each line is a single variable, etc.
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
atoms = line.split(" ")
if atoms[0][0] == "c" or random.random() > var_prob:
continue
if atoms[-1] == "0":
atoms = atoms[:-1]
random.shuffle(atoms)
atoms.append("0")
else:
random.shuffle(atoms)
cnf_lines[i] = " ".join(atoms)
# Now do one pass through all other lines to get the variable names
if options.names:
literals = {}
for line in cnf_lines:
if line[0] == "c":
continue
atoms = line.split(" ")
for atom in atoms:
lit = abs(int(atom))
if lit != 0:
literals[lit] = True
# After storing all the literals, permute
literal_keys = list(literals.keys())
p_keys = list(literals.keys())
random.shuffle(p_keys)
zipped = list(zip(literal_keys, p_keys))
for k, p in zipped:
literals[k] = p
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
if line[0] == "c":
continue
atoms = line.split(" ")
for j in range(0, len(atoms)):
if atoms[j] != "0":
if int(atoms[j]) < 0:
atoms[j] = "-" + str(literals[abs(int(atoms[j]))])
else:
atoms[j] = str(literals[int(atoms[j])])
cnf_lines[i] = " ".join(atoms)
if options.signs is not None:
signs_prob = float(options.signs)
if signs_prob < 0 or signs_prob > 1:
print("Sign prob must be between 0 and 1", file=sys.stderr)
exit()
flipped_literals = {}
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
if line[0] == "c":
continue
# For each symbol inside, flip weighted coin and see if flip
atoms = line.split(" ")
for j in range(0, len(atoms)):
atom = atoms[j]
if atom != "0":
if flipped_literals.get(atom) is None:
if random.random() <= signs_prob:
flipped_literals[atom] = True
else:
flipped_literals[atom] = False
if flipped_literals[atom]:
atoms[j] = str(-int(atom))
cnf_lines[i] = " ".join(atoms)
# Finally, output the transformed lines
for line in cnf_lines:
print(line)
| 34.893258 | 101 | 0.605378 |
6e6dbb5cefe12073382965816c2a9d3f10ed725c | 4,171 | py | Python | test/app_page_scraper_test.py | googleinterns/betel | 2daa56081ccc753f5b7eafbd1e9a48e3aca4b657 | [
"Apache-2.0"
] | 1 | 2020-09-21T12:52:33.000Z | 2020-09-21T12:52:33.000Z | test/app_page_scraper_test.py | googleinterns/betel | 2daa56081ccc753f5b7eafbd1e9a48e3aca4b657 | [
"Apache-2.0"
] | null | null | null | test/app_page_scraper_test.py | googleinterns/betel | 2daa56081ccc753f5b7eafbd1e9a48e3aca4b657 | [
"Apache-2.0"
] | 1 | 2020-07-31T09:55:33.000Z | 2020-07-31T09:55:33.000Z | import pathlib
import pytest
from betel import app_page_scraper
from betel import betel_errors
from betel import utils
ICON_HTML = """
<img src="%s" class="T75of sHb2Xb">
"""
CATEGORY_HTML = """
<a itemprop="genre">Example</a>
"""
FILTERED_CATEGORY_HTML = """
<a itemprop="genre">Filtered</a>
"""
SIMPLE_HTML = """
<p>Simple paragraph.</p>
"""
ICON_SUBDIR = pathlib.Path("icon_subdir")
APP_ID = "com.example"
ICON_NAME = "icon_com.example"
EXPECTED_CATEGORY = "example"
FILE = "file:"
@pytest.fixture
def icon_dir(tmp_path_factory):
return tmp_path_factory.mktemp("icon_dir")
@pytest.fixture
def test_dir(tmp_path_factory):
return tmp_path_factory.mktemp("test_dir")
@pytest.fixture
def play_scraper(icon_dir, test_dir):
base_url = FILE + str(test_dir) + "/"
return app_page_scraper.PlayAppPageScraper(base_url, icon_dir, ["example"])
@pytest.fixture
def input_dir(tmp_path_factory):
return tmp_path_factory.mktemp("input_dir")
class TestAppPageScraper:
def test_get_icon(self, play_scraper, test_dir, icon_dir):
rand_icon = _create_icon(test_dir)
_create_html_file(test_dir, ICON_HTML, icon_src=True)
play_scraper.get_app_icon(APP_ID, ICON_SUBDIR)
read_icon = icon_dir / ICON_SUBDIR / ICON_NAME
assert read_icon.exists()
assert read_icon.read_text() == rand_icon.read_text()
def test_get_category(self, play_scraper, test_dir):
_create_html_file(test_dir, CATEGORY_HTML)
genre = play_scraper.get_app_category(APP_ID)
assert genre == EXPECTED_CATEGORY
def test_missing_icon_class(self, play_scraper, test_dir):
_create_html_file(test_dir, SIMPLE_HTML)
with pytest.raises(betel_errors.PlayScrapingError) as exc:
play_scraper.get_app_icon(APP_ID, ICON_SUBDIR)
assert str(exc.value) == "Icon class not found in html."
def test_missing_category_itemprop(self, play_scraper, test_dir):
_create_html_file(test_dir, SIMPLE_HTML)
with pytest.raises(betel_errors.PlayScrapingError) as exc:
play_scraper.get_app_category(APP_ID)
assert str(exc.value) == "Category itemprop not found in html."
def test_invalid_base_url(self, icon_dir):
random_url = "https://127.0.0.1/betel-test-invalid-base-url-835AHD/"
play_scraper = app_page_scraper.PlayAppPageScraper(random_url, icon_dir)
with pytest.raises(betel_errors.AccessError) as exc:
play_scraper.get_app_category(APP_ID)
assert "Can not open URL." in str(exc.value)
def test_invalid_icon_url(self, play_scraper, test_dir):
_create_html_file(test_dir, ICON_HTML, icon_src=True)
with pytest.raises(betel_errors.AccessError) as exc:
play_scraper.get_app_icon(APP_ID)
assert "Can not retrieve icon." in str(exc.value)
def test_store_app_info(self, play_scraper, test_dir, icon_dir):
expected_info = f"{APP_ID},{EXPECTED_CATEGORY}"
_create_html_file(test_dir, ICON_HTML + CATEGORY_HTML, icon_src=True)
rand_icon = _create_icon(test_dir)
play_scraper.store_app_info(APP_ID)
retrieved_icon = icon_dir / ICON_NAME
info_file = icon_dir / utils.SCRAPER_INFO_FILE_NAME
assert retrieved_icon.exists()
assert rand_icon.read_text() == retrieved_icon.read_text()
assert expected_info in info_file.read_text()
def test_store_app_info_filter(self, play_scraper, test_dir, icon_dir):
_create_html_file(test_dir, ICON_HTML + FILTERED_CATEGORY_HTML, icon_src=True)
_create_icon(test_dir)
play_scraper.store_app_info(APP_ID)
retrieved_icon = icon_dir / ICON_NAME
assert not retrieved_icon.exists()
def _create_html_file(test_dir, text, icon_src=False):
html_file = test_dir / "details?id=com.example"
if icon_src:
html_img_src = FILE + str(test_dir / ICON_NAME)
text = text % html_img_src
html_file.write_text(text)
def _create_icon(test_dir):
rand_array = str([15, 934, 8953, 409, 32])
rand_icon = test_dir / ICON_NAME
rand_icon.write_text(rand_array)
return rand_icon
| 28.182432 | 86 | 0.714217 |