seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
7180333833 | '''
Command line tool to use Github's API for automation.
Usage: githubot [--version] <command> [<args>...]
options:
-h --help Show this message and exit.
-v --version Show version.
Subcommand:
config Config management.
release Releases management.
file Files management.
'''
import sys
import runpy
from docopt import docopt
from githubot import __version__
from githubot import release, config
def set_default_args():
token = None
repo = None
for i, v in enumerate(sys.argv):
if '--token' in v:
token = (i, v)
if '--repo' in v:
repo = (i, v)
cfg = config.read_config()
if not token:
sys.argv.append(f'--token={cfg["token"]}')
if not repo:
sys.argv.append(f'--repo={cfg["repo"]}')
def main():
args = docopt(__doc__, version=__version__, options_first=True)
set_default_args()
if args['<command>'] == 'release':
from githubot import release
release.main()
elif args['<command>'] == 'file':
from githubot import file
file.main()
elif args['<command>'] == 'config':
from githubot import config
config.main()
else:
pass
if __name__ == '__main__':
main()
| WqyJh/githubot | githubot/githubot.py | githubot.py | py | 1,256 | python | en | code | 1 | github-code | 13 |
11792188193 | from dataclasses import dataclass, field
from sql import session_scope, engine_named, safe_check_dot_db
from sql.helpers import query_stmt, db_add_or_merge, ss_result_to_namedtuples
from sql.models import (
AcadDocumentBase,
DesignError,
AcDbAttributeBase,
TitleBlockEtc,
AcDbBlockReferenceBase,
RevStrip
)
from sqlalchemy.orm import ColumnProperty
from settings import constants as cn
from mylogger import logger
from typing import Optional
from operator import attrgetter
from itertools import groupby
import json
def processing_error(category: str, friendly: str, verbose: str, fatal: bool, session):
if fatal:
friendly = f'Unable to proceed! {friendly}'
logger.error(friendly)
else:
logger.warning(friendly)
de = DesignError(category=category, friendly=friendly, verbose=verbose, fatal=fatal)
session.add(de)
def matching_records_attrs(rows: list, model, field_name: str, content_field_name: str) -> Optional[dict]:
"""
Given rows of attributes in a block, get all values for a given field name and return which ones
match against columns in a given model
Expecting to use this when querying for all attributes in a block, and putting desired content from each attribute
into a block table
:param rows:
:param model:
:param field_name:
:param content_field_name:
:return:
"""
try:
# below dict will have column names as keys, not arg names
attr_dict_all = {getattr(r, field_name): getattr(r, content_field_name) for r in rows}
except AttributeError:
logger.error(f'object {rows[0]} is missing one or both attributes: {field_name}, {content_field_name}')
return
# we're checking the field names in the rows against the column names, BUT returning them on the arg name,
# so that it can be loaded into the model instance. This should be the same in most cases but varies
# i.e. arg->dwg, column->DWG#
column_arg_mapping = {prop.columns[0].name: prop.key for prop in model.__mapper__.iterate_properties if isinstance(prop, ColumnProperty)}
model_cols = list(column_arg_mapping.keys())
attr_dict = {column_arg_mapping[k]: v for k, v in attr_dict_all.items() if k in model_cols}
# handle case where target table class has document_name but source table (acad object) has document
try:
doc = rows[0].document
except AttributeError:
doc = None
if doc and 'document_name' in model_cols:
attr_dict['document_name'] = doc
return attr_dict
def get_blocks_from_name(session, name):
dwgs = session.query(AcDbBlockReferenceBase).filter_by(name=name).distinct().all()
if not dwgs:
return
return dwgs
@dataclass
class PkgQuery(object):
"""
As a general rule, helper functions with preceding _ will take the session scope as an arg,
because it's expected this will run within a larger function that will initialize the session
"""
db_name: str
db: str = field(init=False)
def __post_init__(self):
self.db = safe_check_dot_db(self.db_name)
@staticmethod
def _get_owned_dwg_list(ss):
# looking for SSOE Logo block
blocks = get_blocks_from_name(ss, cn.OWNED_DWG_BLOCK_NAME)
if not blocks:
return
return [b.document for b in blocks]
@staticmethod
def _get_ifr_dwg_list(ss):
blocks = get_blocks_from_name(ss, cn.IFR_BLOCK_NAME)
if not blocks:
return
return [b.document for b in blocks]
@staticmethod
def _get_nfc_dwg_list(ss):
blocks = get_blocks_from_name(ss, cn.NFC_BLOCK_NAME)
if not blocks:
return
return [b.document for b in blocks]
def build_titleblocks(self):
with session_scope(engine_named(self.db)) as ss:
# don't want to assume all sheets have the same title block name (especially IFR sheets by other firms
# we'll look for an attribute with tag_string = 'DWG#', should be common attribute among all title blocks
tblock_dwg_attrs_q = ss.query(AcDbAttributeBase).filter_by(tag_string=cn.TITLE_BLOCK_REQUIRED_ATTR).group_by(AcDbAttributeBase.handle)
tblock_dwg_attrs = tblock_dwg_attrs_q.all()
if not tblock_dwg_attrs:
friendly = 'Titleblocks are not as expected'
verbose = f'{query_stmt(tblock_dwg_attrs_q)} returns 0 rows'
processing_error(category='title block', friendly=friendly, verbose=verbose, fatal=True, session=ss)
return
owned_dwg_list = self._get_owned_dwg_list(ss)
ifr_dwg_list = self._get_ifr_dwg_list(ss)
nfc_dwg_list = self._get_nfc_dwg_list(ss)
doc_list = []
for attr in tblock_dwg_attrs:
# query for all attrs under the block returned given the title block required attr query
tblock_attrs = ss.query(AcDbAttributeBase).filter_by(block_reference_handle_=attr.block_reference_handle_).group_by(AcDbAttributeBase.handle).all()
# get attr dictionary for all fields that match the TitleBlock table
# (should still work for other firms titleblock, but may not return as many values)
attr_dict = matching_records_attrs(rows=tblock_attrs, model=TitleBlockEtc, field_name='tag_string', content_field_name='text_string')
tb = TitleBlockEtc(**attr_dict)
if tb.document_name not in doc_list:
doc_list.append(tb.document_name)
if owned_dwg_list:
tb.owned_dwg_ = tb.document_name in owned_dwg_list
if ifr_dwg_list:
tb.ifr_dwg_ = tb.document_name in ifr_dwg_list
if nfc_dwg_list:
tb.nfc_dwg_ = tb.document_name in nfc_dwg_list
db_add_or_merge(instance=tb, session_scope=ss)
else:
logger.warning(f'{tb.document_name} was already added, are there multiple titleblocks in this drawing?')
def build_rev_strips(self):
with session_scope(engine_named(self.db)) as ss:
rev_strip_blocks_q = ss.query(AcDbBlockReferenceBase).filter(AcDbBlockReferenceBase.name.like(cn.REV_STRIP_NAME_SSOE)).group_by(AcDbBlockReferenceBase.handle)
rev_strip_blocks = rev_strip_blocks_q.all()
if not rev_strip_blocks:
friendly = 'no rev strips found'
verbose = f'{query_stmt(rev_strip_blocks_q)} returns 0 rows'
processing_error(category='rev strip error', friendly=friendly, verbose=verbose, fatal=True, session=ss)
return
# group rev strip blocks by document name, and order by y coordinate
# (not using coordinates table for this)
rev_strip_blocks.sort(key=lambda x: (x.document, x.insertion_point[1]))
rs_blocks_grouped = groupby(rev_strip_blocks, key=attrgetter('document'))
for doc, revs in rs_blocks_grouped:
# since we're ordering by y coord ascending, let's define a rank to easily identify and list what the rev order should be for each document
for rank, rev in enumerate(revs):
rev_attrs_q = ss.query(AcDbAttributeBase).filter_by(block_reference_handle_=rev.handle).group_by(AcDbAttributeBase.handle).order_by(AcDbAttributeBase.handle)
rev_attrs = rev_attrs_q.all()
if not rev_attrs:
friendly = 'revision strip has no attributes'
verbose = f'{query_stmt(rev_attrs_q)} returns 0 rows'
processing_error(category='rev strip error', friendly=friendly, verbose=verbose, fatal=False, session=ss)
attr_dict = matching_records_attrs(rows=rev_attrs, model=RevStrip, field_name='tag_string', content_field_name='text_string')
rs = RevStrip(**attr_dict)
# due to tag_string not having a unique requirement in a block, and allowing invalid chars, need to manually get the rev numbers under '#'
# ther are two '#' tags, the main one used will have the larger height
hashes = [ra for ra in rev_attrs if ra.tag_string == '#']
hashes.sort(key=attrgetter('height'), reverse=True)
rs.hash_1 = hashes[0].text_string
rs.hash_2 = hashes[1].text_string
rs.rank_ = rank
rs.block_reference_handle_ = rev.handle
db_add_or_merge(instance=rs, session_scope=ss)
| cobujo/tieint-remote | processing/pkg.py | pkg.py | py | 8,735 | python | en | code | 0 | github-code | 13 |
3082718515 | import pytest
from company.tests import factories
from exportplan import serializers
@pytest.mark.django_db
def test_company_exportplan_serializer_save():
company = factories.CompanyFactory.create(number='01234567')
export_commodity_codes = [{'commodity_name': 'gin', 'commodity_code': '101.2002.123'}]
export_countries = [{'country_name': 'China', 'country_iso2_code': 'CN'}]
ui_options = {'target_ages': ['25-34', '35-44']}
serializer = serializers.CompanyExportPlanSerializer(
data={
'company': company.pk,
'sso_id': 5,
"export_commodity_codes": export_commodity_codes,
"export_countries": export_countries,
"ui_options": ui_options,
}
)
assert serializer.is_valid() is True
export_plan = serializer.save()
assert export_plan.company == company
assert export_plan.export_commodity_codes == export_commodity_codes
assert export_plan.export_countries == export_countries
assert export_plan.ui_options == ui_options
assert export_plan.sso_id == 5
@pytest.mark.django_db
def test_company_exportplan_serializer_export_countries_fail():
company = factories.CompanyFactory.create(number='01234567')
serializer = serializers.CompanyExportPlanSerializer(
data={
'company': company.pk,
'sso_id': 5,
"export_countries": [{'country_name': None, 'country_iso2_code': 'CN'}],
}
)
assert serializer.is_valid() is False
assert serializer.errors['export_countries']['country_name']
@pytest.mark.django_db
def test_company_exportplan_serializer_commodity_codes_fail():
company = factories.CompanyFactory.create(number='01234567')
serializer = serializers.CompanyExportPlanSerializer(
data={
'company': company.pk,
'sso_id': 5,
'export_commodity_codes': [{'commodity_name': None, 'commodity_code': '101.2002.123'}],
}
)
assert serializer.is_valid() is False
assert serializer.errors['export_commodity_codes']['commodity_name']
@pytest.mark.django_db
def test_export_plan_objectives_serializer_fail():
data = {
'companyexportplan': None,
'description': None,
}
serializer = serializers.CompanyObjectivesSerializer(data=data)
assert serializer.is_valid() is False
| uktrade/directory-api | exportplan/tests/test_serializers.py | test_serializers.py | py | 2,365 | python | en | code | 3 | github-code | 13 |
5290815776 | #!/usr/bin/env python3
#
# Python module that knows where all the data, models, and protocols are, and
# can load them.
#
from __future__ import division, print_function
import inspect
import myokit
import numpy as np
import os
# Get root of this project
try:
frame = inspect.currentframe()
ROOT = os.path.dirname(inspect.getfile(frame))
finally:
del(frame) # Must be manually deleted
ROOT = os.path.join(ROOT, '..')
# Data directory
DATA = os.path.join(ROOT, 'data')
# Model directory
MODEL = os.path.join(ROOT, 'model-and-protocols')
# Protocol directory
PROTO = os.path.join(ROOT, 'model-and-protocols')
def load(cell, protocol, cached=None, cap_filter=True):
"""
Returns data for the given cell and protocol, with capacitance filtering
applied.
Arguments:
``cell``
The cell to use (integer).
``protocol``
The protocol to use (integer)
``cached``
Optional cached data. If given, this will be returned directly.
``cap_filter``
Enable capacitance filtering (default: True)
Returns a myokit DataLog.
"""
if cached is not None:
return cached
# Get path to data file
trad = os.path.join(DATA, 'traditional-data')
data_files = {
1: os.path.join(trad, 'pr1-activation-kinetics-1-cell-' + str(cell)),
2: os.path.join(trad, 'pr2-activation-kinetics-2-cell-' + str(cell)),
3: os.path.join(trad, 'pr3-steady-activation-cell-' + str(cell)),
4: os.path.join(trad, 'pr4-inactivation-cell-' + str(cell)),
5: os.path.join(trad, 'pr5-deactivation-cell-' + str(cell)),
6: os.path.join(DATA, 'validation-data', 'ap-cell-' + str(cell)),
7: os.path.join(DATA, 'sine-wave-data', 'cell-' + str(cell)),
}
data_file = data_files[protocol]
# Load protocol for capacitance filtering.
variant = protocol < 3 and (cell == 7 or cell == 8)
if variant:
print('Loading variant protocol for capacitance filtering')
else:
print('Loading protocol for capacitance filtering')
protocol = load_myokit_protocol(protocol, variant)
# Load data from zip or csv
if os.path.exists(data_file + '.zip'):
print('Loading ' + data_file + '.zip')
log = myokit.DataLog.load(data_file + '.zip').npview()
else:
print('Loading ' + data_file + '.csv')
log = myokit.DataLog.load_csv(data_file + '.csv').npview()
log.save(data_file + '.zip')
# Apply capacitance filtering
voltage = 'voltage' in log
if cap_filter:
dt = 0.1
signals = [log.time(), log['current']]
if voltage:
signals.append(log['voltage'])
signals = capacitance(protocol, dt, *signals)
else:
signals = [log.time(), log['current']]
if voltage:
signals.append(log['voltage'])
log = myokit.DataLog()
log.set_time_key('time')
log['time'] = signals[0]
log['current'] = signals[1]
if voltage:
log['voltage'] = signals[2]
# Return
return log
def save(cell, protocol, log):
"""
Stores synthetic data for the given cell and protocol.
Arguments:
``cell``
The cell to use (integer).
``protocol``
The protocol to use (integer)
``log``
The log to store
"""
# Test cell index
if cell < 10:
raise ValueError('Artificial cell index must be 10 or greater.')
# Test log
for key in ['time', 'current', 'voltage']:
if key not in log:
raise ValueError('Missing log entry: ' + key)
# Get path to data file
trad = os.path.join(DATA, 'traditional-data')
data_files = {
1: os.path.join(trad, 'pr1-activation-kinetics-1-cell-' + str(cell)),
2: os.path.join(trad, 'pr2-activation-kinetics-2-cell-' + str(cell)),
3: os.path.join(trad, 'pr3-steady-activation-cell-' + str(cell)),
4: os.path.join(trad, 'pr4-inactivation-cell-' + str(cell)),
5: os.path.join(trad, 'pr5-deactivation-cell-' + str(cell)),
6: os.path.join(DATA, 'validation-data', 'ap-cell-' + str(cell)),
7: os.path.join(DATA, 'sine-wave-data', 'cell-' + str(cell)),
}
data_file = os.path.abspath(data_files[protocol])
# Store
print('Storing cell ' + str(cell) + ' data for protocol ' + str(protocol)
+ ' to ' + data_file)
log.save(data_file + '.zip')
log.save_csv(data_file + '.csv')
def load_myokit_model():
"""
Loads the HH version of the Beattie (Myokit) model.
"""
return myokit.load_model(os.path.join(MODEL, 'beattie-2017-ikr-hh.mmt'))
def load_myokit_protocol(protocol, variant=False):
"""
Loads the Myokit protocol with the given index (1-7). For Pr6 and Pr7, the
protocol only has the steps for capacitance filtering.
"""
protocol_files = {
1: os.path.join(PROTO, 'pr1-activation-kinetics-1.mmt'),
2: os.path.join(PROTO, 'pr2-activation-kinetics-2.mmt'),
3: os.path.join(PROTO, 'pr3-steady-activation.mmt'),
4: os.path.join(PROTO, 'pr4-inactivation.mmt'),
5: os.path.join(PROTO, 'pr5-deactivation.mmt'),
6: os.path.join(PROTO, 'pr6-ap-steps.mmt'),
7: os.path.join(PROTO, 'pr7-sine-wave-steps.mmt'),
}
# Load variants for Pr1 and Pr2 for cells 7 and 8
if variant:
if protocol == 1:
protocol = os.path.join(PROTO, 'pr1b.mmt')
elif protocol == 2:
protocol = os.path.join(PROTO, 'pr2b.mmt')
else:
raise ValueError('Variants only exist for Pr1 and Pr2')
else:
protocol = protocol_files[protocol]
# Load Myokit protocol
return myokit.load_protocol(protocol)
def load_ap_protocol():
"""
Returns a tuple ``(times, values)`` representing Pr6.
"""
data_file = os.path.join(DATA, 'validation-data', 'ap')
# Load data from zip or csv
if os.path.exists(data_file + '.zip'):
print('Loading ' + data_file + '.zip')
log = myokit.DataLog.load(data_file + '.zip').npview()
else:
print('Loading ' + data_file + '.csv')
log = myokit.DataLog.load_csv(data_file + '.csv').npview()
log.save(data_file + '.zip')
return log
def load_protocol_values(protocol, variant=False):
"""
Returns a (capacitance filtered) tuple ``(times, voltages)`` for the
selected ``protocol``.
"""
p = load_myokit_protocol(protocol, variant)
if protocol == 6:
log = load_ap_protocol().npview()
t, v = log['time'], log['voltage']
elif protocol == 7:
m = load_myokit_model()
m.get('membrane.V').set_rhs(
'if(engine.time >= 3000.1 and engine.time < 6500.1,'
+ ' - 30'
+ ' + 54 * sin(0.007 * (engine.time - 2500.1))'
+ ' + 26 * sin(0.037 * (engine.time - 2500.1))'
+ ' + 10 * sin(0.190 * (engine.time - 2500.1))'
+ ', engine.pace)')
p = load_myokit_protocol(protocol)
s = myokit.Simulation(m, p)
tmax = p.characteristic_time()
t = np.arange(0, tmax, 0.1)
v = s.run(tmax + 0.1, log=['membrane.V'], log_times=t)
v = np.array(v['membrane.V'])
else:
t = np.arange(0, p.characteristic_time(), 0.1)
v = np.array(p.value_at_times(t))
return capacitance(p, 0.1, t, v)
def capacitance(protocol, dt, *signals):
"""
Creates and applies a capacitance filter, based on a Myokit protocol.
Arguments:
``protocol``
A Myokit protocol.
``dt``
The sampling interval of the given signals.
``signals``
One or more signal files to filter.
Returns a filtered version of the given signals.
"""
cap_duration = 5 # Same as Kylie
fcap = np.ones(len(signals[0]), dtype=int)
steps = [step for step in protocol]
for step in steps[1:]:
i1 = int(step.start() / dt)
i2 = i1 + int(cap_duration / dt)
fcap[i1:i2] = 0
fcap = fcap > 0
if False:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(signals[0], signals[1])
for step in steps[1:]:
plt.axvline(step.start(), color='k', alpha=0.25)
plt.show()
# Apply filter
return [x[fcap] for x in signals]
def model_path(model_file):
"""
Returns the path to the given Myokit model file.
"""
return os.path.join(MODEL, model_file)
def protocol_path(protocol_file):
"""
Returns the path to the given Myokit protocol file.
"""
return os.path.join(PROTO, protocol_file)
| CardiacModelling/FourWaysOfFitting | python/data.py | data.py | py | 8,572 | python | en | code | 4 | github-code | 13 |
15392761267 | import copy
import sys
import time
from .module import Module
import numpy
import numpy as np
import importlib.util as imp
if imp.find_spec("cupy"):
import cupy
import cupy as np
na = np.newaxis
# -------------------------------
# Sequential layer
# -------------------------------
class Sequential(Module):
'''
Top level access point and incorporation of the neural network implementation.
Sequential manages a sequence of computational neural network modules and passes
along in- and outputs.
'''
def __init__(self,modules):
'''
Constructor
Parameters
----------
modules : list, tuple, etc. enumerable.
an enumerable collection of instances of class Module
'''
Module.__init__(self)
self.modules = modules
#make sure to migrate py-modules and nn-modules to the same backend
if imp.find_spec("cupy"):
self.to_cupy()
else:
self.to_numpy()
def to_cupy(self):
global np
for m in self.modules:
m.to_cupy()
np = cupy # ensure correct numerics backend
def to_numpy(self):
global np
for m in self.modules:
m.to_numpy()
np = numpy # ensure correct numerics backend
def drop_softmax_output_layer(self):
'''
This function removes the softmax output layer from the model, if there is any.
'''
from .softmax import SoftMax
if isinstance(self.modules[-1], SoftMax):
print('removing softmax output mapping')
del self.modules[-1]
else:
print('output layer is not softmax. nothing to do')
def forward(self,X,lrp_aware=False):
'''
Realizes the forward pass of an input through the net
Parameters
----------
X : numpy.ndarray
a network input.
lrp_aware : bool
controls whether the forward pass is to be computed with awareness for multiple following
LRP calls. this will sacrifice speed in the forward pass but will save time if multiple LRP
calls will follow for the current X, e.g. wit different parameter settings or for multiple
target classes.
Returns
-------
X : numpy.ndarray
the output of the network's final layer
'''
for m in self.modules:
X = m.forward(X,lrp_aware=lrp_aware)
return X
def backward(self,DY):
for m in self.modules[::-1]:
DY = m.backward(DY)
return DY
def update(self,lrate):
for m in self.modules:
m.update(lrate)
def clean(self):
'''
Removes temporary variables from all network layers.
'''
for m in self.modules:
m.clean()
def train(self, X, Y, Xval = [], Yval = [], batchsize = 25, iters = 10000, lrate = 0.005, lrate_decay = None, lfactor_initial=1.0 , status = 250, convergence = -1, transform = None, silent=False):
'''
Provides a method for training the neural net (self) based on given data.
Parameters
----------
X : numpy.ndarray
the training data, formatted to (N,D) shape, with N being the number of samples and D their dimensionality
Y : numpy.ndarray
the training labels, formatted to (N,C) shape, with N being the number of samples and C the number of output classes.
Xval : numpy.ndarray
some optional validation data. used to measure network performance during training.
shaped (M,D)
Yval : numpy.ndarray
the validation labels. shaped (M,C)
batchsize : int
the batch size to use for training
iters : int
max number of training iterations
lrate : float
the initial learning rate. the learning rate is adjusted during training with increased model performance. See lrate_decay
lrate_decay : string
controls if and how the learning rate is adjusted throughout training:
'none' or None disables learning rate adaption. This is the DEFAULT behaviour.
'sublinear' adjusts the learning rate to lrate*(1-Accuracy**2) during an evaluation step, often resulting in a better performing model.
'linear' adjusts the learning rate to lrate*(1-Accuracy) during an evaluation step, often resulting in a better performing model.
lfactor_initial : float
specifies an initial discount on the given learning rate, e.g. when retraining an established network in combination with a learning rate decay,
it might be undesirable to use the given learning rate in the beginning. this could have been done better. TODO: do better.
Default value is 1.0
status : int
number of iterations (i.e. number of rounds of batch forward pass, gradient backward pass, parameter update) of silent training
until status print and evaluation on validation data.
convergence : int
number of consecutive allowed status evaluations with no more model improvements until we accept the model has converged.
Set <=0 to disable. Disabled by DEFAULT.
Set to any value > 0 to control the maximal consecutive number (status * convergence) iterations allowed without model improvement, until convergence is accepted.
transform : function handle
a function taking as an input a batch of training data sized [N,D] and returning a batch sized [N,D] with added noise or other various data transformations. It's up to you!
default value is None for no transformation.
expected syntax is, with X.shape == Xt.shape == (N,D)
def yourFunction(X):
Xt = someStuff(X)
return Xt
'''
def randperm(N,b):
'''
helper method for picking b unique random indices from a range [0,N[.
we do not use numpy.random.permutation or numpy.random.choice
due to known severe performance issues with drawing without replacement.
if the ratio of N/b is high enough, we should see a huge performance gain.
N : int
range of indices [0,N[ to choose from.m, s = divmod(seconds, 60)
b : the number of unique indices to pick.
'''
assert(b <= N) # if this fails no valid solution can be found.
I = numpy.arange(0)
while I.size < b:
I = numpy.unique(numpy.append(I,numpy.random.randint(0,N,[b-I.size,])))
return np.array(I)
t_start = time.time()
untilConvergence = convergence; learningFactor = lfactor_initial
bestAccuracy = 0.0; bestLayers = copy.deepcopy(self.modules)
bestLoss = np.Inf; bestIter = 0
N = X.shape[0]
for d in range(iters):
#the actual training:
#first, pick samples at random
samples = randperm(N,batchsize)
#transform batch data (maybe)
if transform == None:
batch = X[samples,:]
else:
batch = transform(X[samples,:])
#forward and backward propagation steps with parameter update
Ypred = self.forward(batch)
self.backward(Ypred - Y[samples,:]) #l1-loss
self.update(lrate*learningFactor)
#periodically evaluate network and optionally adjust learning rate or check for convergence.
if (d+1) % status == 0:
if not len(Xval) == 0 and not len(Yval) == 0: #if given, evaluate on validation data
Ypred = self.forward(Xval)
acc = np.mean(np.argmax(Ypred, axis=1) == np.argmax(Yval, axis=1))
l1loss = np.abs(Ypred - Yval).sum()/Yval.shape[0]
if not np == numpy: acc = np.asnumpy(acc); l1loss = np.asnumpy(l1loss)
if not silent: print('Accuracy after {0} iterations on validation set: {1}% (l1-loss: {2:.4})'.format(d+1, acc*100, l1loss))
else: #evaluate on the training data only
Ypred = self.forward(X)
acc = np.mean(np.argmax(Ypred, axis=1) == np.argmax(Y, axis=1))
l1loss = np.abs(Ypred - Y).sum()/Y.shape[0]
if not numpy == np: acc = np.asnumpy(acc); l1loss = np.asnumpy(l1loss)
if not silent: print('Accuracy after {0} iterations on training data: {1}% (l1-loss: {2:.4})'.format(d+1,acc*100,l1loss))
#save current network parameters if we have improved
#if acc >= bestAccuracy and l1loss <= bestLoss:
# only go by loss
if l1loss <= bestLoss:
if not silent: print(' New loss-optimal parameter set encountered. saving....')
bestAccuracy = acc
bestLoss = l1loss
bestLayers = copy.deepcopy(self.modules)
bestIter = d
#adjust learning rate
if lrate_decay == None or lrate_decay == 'none':
pass # no adjustment
elif lrate_decay == 'sublinear':
#slow down learning to better converge towards an optimum with increased network performance.
learningFactor = 1.-(acc*acc)
if not silent: print(' Adjusting learning rate to {0} ~ {1}% of its initial value'.format(learningFactor*lrate, numpy.round(learningFactor*100,2)))
elif lrate_decay == 'linear':
#slow down learning to better converge towards an optimum with increased network performance.
learningFactor = 1.-acc
if not silent: print(' Adjusting learning rate to {0} ~ {1}% of its initial value'.format(learningFactor*lrate, numpy.round(learningFactor*100,2)))
#refresh number of allowed search steps until convergence
untilConvergence = convergence
else:
untilConvergence-=1
if untilConvergence == 0 and convergence > 0:
if not silent: print(' No more recorded model improvements for {0} evaluations. Accepting model convergence.'.format(convergence))
break
t_elapsed = time.time() - t_start
percent_done = float(d+1)/iters #d+1 because we are after the iteration's heavy lifting
t_remaining_estimated = t_elapsed/percent_done - t_elapsed
t_m, t_s = divmod(t_remaining_estimated, 60)
t_h, t_m = divmod(t_m, 60)
t_d, t_h = divmod(t_h, 24)
timestring = '{}d {}h {}m {}s'.format(int(t_d), int(t_h), int(t_m), int(t_s))
if not silent: print(' Estimate time until current training ends : {} ({:.2f}% done)'.format(timestring, percent_done*100))
elif (d+1) % (status/10) == 0:
# print 'alive' signal
#sys.stdout.write('.')
l1loss = np.abs(Ypred - Y[samples,:]).sum()/Ypred.shape[0]
if not np == numpy: l1loss = np.asnumpy(l1loss)
if not silent:
sys.stdout.write('batch# {}, lrate {}, l1-loss {:.4}\n'.format(d+1,lrate*learningFactor,l1loss))
sys.stdout.flush()
#after training, either due to convergence or iteration limit
if not silent:
t_elapsed = time.time() - t_start
m, s = divmod(t_elapsed, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
timestring = '{}d {}h {}m {}s'.format(int(d), int(h), int(m), int(s))
print('Training terminated after {}'.format(timestring))
print('Setting network parameters to best encountered network state with {}% accuracy and a loss of {} from iteration {}.'.format(bestAccuracy*100, bestLoss, bestIter))
self.modules = bestLayers
def set_lrp_parameters(self,lrp_var=None,param=None):
for m in self.modules:
m.set_lrp_parameters(lrp_var=lrp_var,param=param)
def lrp(self,R,lrp_var=None,param=None):
'''
Performs LRP by calling subroutines, depending on lrp_var and param or
preset values specified via Module.set_lrp_parameters(lrp_var,lrp_param)
If lrp parameters have been pre-specified (per layer), the corresponding decomposition
will be applied during a call of lrp().
Specifying lrp parameters explicitly when calling lrp(), e.g. net.lrp(R,lrp_var='alpha',param=2.),
will override the preset values for the current call.
How to use:
net.forward(X) #forward feed some data you wish to explain to populat the net.
then either:
net.lrp() #to perform the naive approach to lrp implemented in _simple_lrp for each layer
or:
for m in net.modules:
m.set_lrp_parameters(...)
net.lrp() #to preset a lrp configuration to each layer in the net
or:
net.lrp(somevariantname,someparameter) # to explicitly call the specified parametrization for all layers (where applicable) and override any preset configurations.
Parameters
----------
R : numpy.ndarray
final layer relevance values. usually the network's prediction of some data points
for which the output relevance is to be computed
dimensionality should be equal to the previously computed predictions
lrp_var : str
either 'none' or 'simple' or None for standard Lrp ,
'epsilon' for an added epsilon slack in the denominator
'alphabeta' or 'alpha' for weighting positive and negative contributions separately. param specifies alpha with alpha + beta = 1
'flat' projects an upper layer neuron's relevance uniformly over its receptive field.
'ww' or 'w^2' only considers the square weights w_ij^2 as qantities to distribute relevances with.
param : double
the respective parameter for the lrp method of choice
Returns
-------
R : numpy.ndarray
the first layer relevances as produced by the neural net wrt to the previously forward
passed input data. dimensionality is equal to the previously into forward entered input data
Note
----
Requires the net to be populated with temporary variables, i.e. forward needed to be called with the input
for which the explanation is to be computed. calling clean in between forward and lrp invalidates the
temporary data
'''
for m in self.modules[::-1]:
R = m.lrp(R,lrp_var,param)
return R
| sebastian-lapuschkin/lrp_toolbox | python/modules/sequential.py | sequential.py | py | 15,100 | python | en | code | 311 | github-code | 13 |
16817073036 | from django.db import models
from django.urls import reverse
from django.utils.html import format_html
from itertools import chain
from overview.make_gantt import *
class Group(models.Model):
""" Модель, описывающая группы туристов """
group_name = models.CharField(max_length=50,
verbose_name='Название группы'
)
date_of_arrival = models.DateField(verbose_name='Дата прибытия группы',
null=True,
blank=True
)
date_of_departure = models.DateField(verbose_name='Дата убытия группы',
null=True, blank=True
)
STATUS = (
('f', 'группа формируется'),
('c', 'группа прибыла'),
('g', 'группа уехала'),
)
status = models.CharField(
max_length=1,
choices=STATUS,
blank=True,
default='r',
verbose_name='Статус группы',
)
def __str__(self):
return f' Группа {self.group_name}'
class Meta:
verbose_name_plural = "Группы"
ordering = ['date_of_arrival']
class Tourist(models.Model):
""" Модель, описывающая каждого туриста по отдельности """
name = models.CharField(max_length=200, verbose_name='ФИО Туриста')
phone = models.CharField(max_length=20, verbose_name='Телефон')
email = models.EmailField(
max_length=50,
blank=True,
verbose_name='email'
)
note = models.TextField(
max_length=100,
verbose_name='Примечание',
blank=True, null=True
)
visa = models.FileField(
blank=True,
null=True,
upload_to='files',
verbose_name='Копия визы'
)
insurance = models.FileField(
blank=True, null=True,
upload_to='files',
verbose_name='Копия страховки'
)
passport = models.FileField(
blank=True,
null=True,
upload_to='files',
verbose_name='Копия паспорта'
)
others = models.FileField(
blank=True, null=True,
upload_to='files',
verbose_name='Другие документы'
)
group = models.ForeignKey(
'Group',
on_delete=models.SET_NULL,
blank=True,
null=True,
verbose_name='Группа'
)
STATUS = (
('w', 'ожидается приезд'),
('n', 'ничем не занят'),
('e', 'на экскурсии'),
('p', 'питается'),
('y', 'уехал'),
('g', 'не в группе'),
)
status = models.CharField(
max_length=1,
choices=STATUS,
blank=True,
default='w',
verbose_name='Статус туриста',
)
def colored_name(self):
if self.status == 'w': color = 'ff9900'
elif self.status == 'n': color = '66ff33'
elif self.status == 'e': color = '0000ff'
elif self.status == 'p': color = 'ffcc00'
elif self.status == 'y': color = '000000'
elif self.status == 'g': color = 'ff0000'
else:
color = 'grey'
return format_html('<b><span style="color: #{};">{}</span><b>',
color, self.name)
colored_name.short_description = 'ФИО Туриста'
colored_name.allow_tags = True
def gantt_to_html(self) -> str:
""" Функция берет список всех занятий туриста и рисует по ним диаграммы
возвращает строковое представление HTML странички с диаграммами """
all_business = chain(
# DatelineForHotel.objects.filter(tourist=self).values_list(
# 'hotel__name', 'time_from', 'time_to'),
TimelineForNutrition.objects.filter(tourist=self).values_list(
'nutrition__name', 'time_from', 'time_to'),
TimelineForExcursion.objects.filter(tourist=self).values_list(
'excursion__name', 'time_from', 'time_to')
)
list_of_business = [i for i in all_business]
return start_gantt(list_of_business)
def check_doc(self):
doc_pack = Tourist.objects.filter(
id=self.id
).values('visa', 'insurance', 'passport', 'others')
for pack in doc_pack:
for _, doc in pack.items():
if doc is None or doc == '':
return False
return True
def check_hotel(self):
hotels = DatelineForHotel.objects.filter(tourist=self.id).values_list('hotel__name', flat=True)
return hotels
def check_nutrition(self):
nutritions = TimelineForNutrition.objects.filter(tourist=self.id).values_list('nutrition__name', flat=True)
return nutritions
def __str__(self):
""" Функция, отображающая имя туриста и его телефон"""
return f'{self.name} {self.phone}'
class Meta:
verbose_name_plural = "Туристы"
permissions = (("can_edit", "Editing data"),
("can_get_report", "Getting report"), )
class Event(models.Model):
""" Модель, описывающая события, в которых могут участвовать туристы """
name = models.CharField(max_length=200, verbose_name='Название события')
manager = models.CharField(
max_length=200,
verbose_name='Менеджер группы туристов',
blank=True
)
manager_phone = models.CharField(max_length=20, blank=True)
STATUS = (
('p', 'планируется'),
('c', 'длится'),
('e', 'закончилось')
)
status = models.CharField(
max_length=1,
choices=STATUS,
blank=True,
default='p',
help_text='Статус события',
)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Событие'
verbose_name_plural = 'События'
class TimelineForNutrition(models.Model):
""" Промежуточная модель для хранения времени начала и окончания питания """
time_from = models.DateTimeField(verbose_name='Начало')
time_to = models.DateTimeField(verbose_name='Окончание')
tourist = models.ForeignKey(
'Tourist',
on_delete=models.CASCADE,
verbose_name='Турист'
)
nutrition = models.ForeignKey(
'Nutrition',
on_delete=models.CASCADE,
blank=True,
null=True,
verbose_name='Питание'
)
event = models.ForeignKey(
'Event',
on_delete=models.SET_NULL,
blank=True,
null=True,
verbose_name='Событие'
)
class Meta:
get_latest_by = "date_from"
verbose_name_plural = "Время для питания"
class TimelineForExcursion(models.Model):
""" Промежуточная модель для хранения времени начала и окончания экскурсий """
time_from = models.DateTimeField(verbose_name='начало экскурсии')
time_to = models.DateTimeField(verbose_name='окончание экскурсии')
tourist = models.ForeignKey(
'Tourist',
on_delete=models.CASCADE,
verbose_name='Турист'
)
excursion = models.ForeignKey(
'Excursion',
on_delete=models.SET_NULL,
blank=True,
null=True,
verbose_name='Экскурсии'
)
event = models.ForeignKey(
'Event',
on_delete=models.CASCADE,
blank=True,
null=True,
verbose_name='Событие'
)
class Meta:
get_latest_by = "date_from"
verbose_name_plural = "Время для экскурсий"
class DatelineForHotel(models.Model):
""" Промежуточная модель для хранения дат заселения и выселения из отеля """
tourist = models.ForeignKey('Tourist', on_delete=models.CASCADE)
hotel = models.ForeignKey('Hotel', on_delete=models.CASCADE)
time_from = models.DateField(verbose_name='Дата заселения')
time_to = models.DateField(verbose_name='Дата выселения')
class Meta:
get_latest_by = "time_from"
verbose_name_plural = "Временная ось для пребывания в отелях"
class Excursion(models.Model):
""" Модель описывающая экскурсии, которые посещает турист"""
name = models.CharField(
max_length=300,
help_text='Введите название экскурсии'
)
note = models.TextField(
max_length=500,
verbose_name='Описание',
blank=True, null=True
)
cost = models.DecimalField(max_digits=7, decimal_places=2)
timelines = models.ManyToManyField(Tourist,
through='TimelineForExcursion')
def __str__(self):
""" Функция, отображающая название экскурсии """
return self.name
class Meta:
verbose_name = 'Экскурсия'
verbose_name_plural = 'Экскурсии'
class Nutrition(models.Model):
""" Модель описывающая питание туриста """
name = models.CharField(
max_length=300,
help_text='Введите название питания'
)
note = models.TextField(
max_length=500,
verbose_name='Описание',
blank=True, null=True
)
cost = models.DecimalField(max_digits=7, decimal_places=2)
timelines = models.ManyToManyField(Tourist,
through='TimelineForNutrition')
def __str__(self):
""" Функция, отображающая наименование питания """
return self.name
class Meta:
verbose_name = 'Питание'
verbose_name_plural = 'Питание'
class Hotel(models.Model):
""" Модель описывающая отель для туристов """
name = models.CharField(
max_length=300,
verbose_name='Название отеля',
help_text='Введите название отеля'
)
addres = models.CharField(max_length=300)
phone = models.CharField(max_length=20)
cost_for_one_day = models.DecimalField(max_digits=7, decimal_places=2)
check_in = models.TimeField()
check_out = models.TimeField()
datelines = models.ManyToManyField(Tourist, through='DatelineForHotel')
def __str__(self):
""" Функция, отображающая название отеля """
return self.name
class Meta:
verbose_name = 'Отель'
verbose_name_plural = 'Отели'
class FeedFile(models.Model):
file = models.FileField(blank=True, null=True, upload_to="files/%Y/%m/%d")
feed = models.ForeignKey(Tourist, on_delete=models.CASCADE)
class Meta:
verbose_name = 'Другие документы'
verbose_name_plural = 'Другие документы'
| n1energy/tourist | tourists/models.py | models.py | py | 12,160 | python | ru | code | 0 | github-code | 13 |
8151351202 | import csv
from flask.ext.script import Command
from flask.ext.script import Option
from knotmarker.models import PolygonType, User
class TypesImporter(Command):
option_list = (
Option('--types', '-t', dest='types_csv',
help='File with types definition'),
)
def run(self, types_csv):
with open(types_csv) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
polygon_type = PolygonType(**row)
system_user = User.objects().get(email='system')
polygon_type.creator = system_user
polygon_type.save()
| TruePositiveLab/knotmarker | knotmarker/commands/import_types.py | import_types.py | py | 646 | python | en | code | 0 | github-code | 13 |
20182828121 | n=int(input())
lis=input().split(" ")
lis1=[]
lis2=[]
for i in lis:
if i=="0":
lis1.append(int(i))
else:
lis2.append(int(i))
num=""
num0=""
lis1.sort()
for y1 in lis1:
num0+=str(y1)
lis2.sort()
for y2 in lis2:
num+=str(y2)
num=num[0]+num0+num[1:]
print(int(num)) | Chonapatcc/beta-programming-thailand | numbers/Main.py | Main.py | py | 300 | python | en | code | 0 | github-code | 13 |
5776113398 | import torch
from torch import nn
import numpy as np
import torchvision.transforms as transforms
from torchvision import models
from torchvision.models import resnet50,ResNet50_Weights
from PIL import Image
i2l = { '1000': 'ЗА',
'0100': 'ПРОТИВ',
'0010': 'ВОЗДЕРЖАЛСЯ',
'0001': 'НЕГОЛОСОВАЛ',
'1100': 'И-ЗП',
'1010': "И-ЗВ",
'0110': 'И-ПВ',
'1110': 'И-ЗПВ',
'1101': 'И-ЗП',
'1011': 'И-ЗВ',
'0111': 'И-ПВ',
'1111': 'И-ЗПВ',
'0011': 'ВОЗДЕРЖАЛСЯ',
'1001': 'ЗА',
'0000': 'НЕГОЛОСОВАЛ'}
def reslabel(a):
r=''.join([str(s) for s in a.astype(int).tolist()])
return r
class Resnext50ml(torch.nn.Module):
def __init__(self, mpath,device,n_classes=4):
super().__init__()
resnet = models.resnext50_32x4d()
resnet.fc = torch.nn.Sequential(
nn.Dropout(p=0.2),
torch.nn.Linear(in_features=resnet.fc.in_features, out_features=n_classes)
)
self.base_model = resnet
self.sigm = torch.nn.Sigmoid() # for multi label we need sigmoid! not crossentropy
self.load_state_dict(torch.load(mpath,map_location=device))
# inference mode
self.eval()
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.device=device
self.transform = transforms.Compose([
transforms.Resize((256, 256)),
# transforms.ToTensor(),
# transforms.Normalize(self.mean, self.std)
])
def forward(self, x):
return self.sigm(self.base_model(x))
def preprocess(self, img, xmin, ymin, xmax, ymax):
(h,w,c) = img.shape
za = [xmin*w, ymin*h, xmax*w, ymax*h]
im = Image.fromarray(img).crop(za)
tim = torch.from_numpy(np.array(self.transform(im)))
tim = torch.permute(tim, (2, 0, 1)).unsqueeze(0)
tim = tim.type(torch.FloatTensor)
return(tim)
def predict(self,im,thr):
with torch.no_grad():
im.to(self.device)
raw_prob = self.forward(im.float()).detach().numpy()[0]
if thr==None:
thr=0.4
raw_pred = np.array(raw_prob > thr, dtype=int)
pred_lab = i2l[reslabel(raw_pred)]
mlabel=str(raw_pred[0])+str(raw_pred[1])+str(raw_pred[2])+str(raw_pred[3])
rp = np.round(raw_prob,decimals=3)
r = [str(rp[0]),str(rp[1]),str(rp[2]),str(rp[3])]
print('r rounded list:',r)
return {'label': pred_lab, 'mlabel': mlabel, 'prob': r}
################################
class Resnet50s(torch.nn.Module):
def __init__(self,mpath,device,n_classes=2):
super().__init__()
resnet = resnet50(weights=ResNet50_Weights.IMAGENET1K_V1)
resnet.fc = torch.nn.Sequential(
nn.Linear(2048, 128),
nn.ReLU(inplace=True),
nn.Linear(128, n_classes))
resnet.load_state_dict(torch.load(mpath,map_location=device))
self.base_model = resnet
# inference mode
self.eval()
self.device=device
self.transform = transforms.Compose([
transforms.Resize((256, 256)),
#transforms.ToTensor(),
])
def forward(self, x):
return self.base_model(x)
def preprocess(self, img, xmin, ymin, xmax, ymax):
print(type(img))
img=np.array(img)
print(xmin, ymin, xmax, ymax)
(h,w) = img.shape[:2]
print(h,w)
za = [xmin*w, ymin*h, xmax*w, ymax*h]
print(za)
im=Image.fromarray(img).crop(za)
im = torch.from_numpy(np.array(self.transform(im)))
im = torch.permute(im,(2, 0, 1)).unsqueeze(0)
return(im)
def predict(self,im):
with torch.no_grad():
im.to(self.device)
raw_prob = self.forward(im.float()).detach().numpy()[0]
print(raw_prob)
raw_pred = np.argmax(raw_prob)
mlabel=str(raw_pred)
print(raw_pred)
if (raw_pred==1):
pred_lab = 'НЕ_ПОДПИСАН'
else:
pred_lab = 'ПОДПИСАН'
np.round(raw_prob,decimals=3)
rp = np.round(raw_prob,decimals=3)
r = [str(rp[0]),str(rp[1])]
print('r rounded list:',r)
return {'label': pred_lab, 'mlabel': mlabel, 'logit': r}
| terrainternship/Pragmatick_OCR_g | YURI_KOBYZEV/SITE/votemodels/votemodel.py | votemodel.py | py | 4,515 | python | en | code | 0 | github-code | 13 |
43702697672 | """
Run the shift algo by supplying a directory name
"""
# External Packages
import datetime as dt
import helper_functions as hf
import logging
import logging_config
import numpy as np
import os
import re
# Internal Packages
from . import shift_algo
from algorithm import auto_shift
# Set logger for this module
logging_config.configure_logger_env()
logger = logging.getLogger("controller")
def parse_files(f):
"""
Controls the data parsing process
:param list[str] f: A list of all the filenames to process
:return: The experiment date, a list containing the CCNC data, a list containing the SMPS data
:rtype: (str, list[list[str]], list[list[str]])
"""
ccnc_csv_files = [] # Should be hourly files # TODO Add error handling
smps_txt_files = [] # Should be one file # TODO Add error handling
# Acquire the smps and ccnc files from the input files
for a_file in f:
if a_file.lower().endswith('.csv'):
ccnc_csv_files.append(a_file)
elif a_file.lower().endswith('.txt'):
smps_txt_files.append(a_file)
# Stringify each item in the list
ccnc_csv_files = [str(val) for val in ccnc_csv_files]
smps_txt_files = [str(val) for val in smps_txt_files]
# Turn smps to a str instead of a list - Assumes only one file
smps_txt_files = smps_txt_files[0]
exp_date, ccnc_dat = hf.process_csv_files(ccnc_csv_files)
smps_dat = hf.process_tab_sep_files(smps_txt_files)
return exp_date, ccnc_dat, smps_dat
def get_smps_counts(smps_dat, c2c):
"""
Finds the basic SMPS counts data
:param list[list[str]] smps_dat: The raw SMPS data
:param float c2c: The flow rate conversion factor
:return: The raw SMPS counts
:rtype: list[float]
"""
# Determine where data is in file
scan_start_times = smps_dat[0]
# create an empy list of the length needed.
# noinspection PyUnusedLocal
raw_smps_cts = [[] for k in range(len(scan_start_times))]
start_line_index = 0
end_line_index = len(smps_dat) - 1
# Find where second data section begins
for m in range(3, len(smps_dat)):
# Find beginning of middle text section
if re.search('[a-zA-Z]', smps_dat[m][0]):
for k in range(m + 1, len(smps_dat)):
# Find end of middle text section
if not re.search('[a-zA-Z]', smps_dat[k][0]):
start_line_index = k
break
break
target_time = 1
curr_line_index = start_line_index
count_by_scans = [0] * len(scan_start_times)
# Find values and update scans
while True:
curr_time = float(smps_dat[curr_line_index][0])
for k in range(0, len(scan_start_times)):
count = int(smps_dat[curr_line_index][k * 2 + 2])
count_by_scans[k] += count
if hf.are_floats_equal(curr_time, target_time) or curr_line_index == end_line_index:
target_time += 1
for k in range(0, len(scan_start_times)):
# self.scans[j].add_to_raw_smps_counts(count_by_scans[j])
raw_smps_cts[k].append(count_by_scans[k] * c2c)
count_by_scans = [0] * len(scan_start_times)
curr_line_index += 1
if curr_line_index >= end_line_index:
break
return raw_smps_cts
def get_ccnc_counts(ccnc_dat, smps_dat):
"""
Finds the basic CCNC counts data
:param list[list[str]] ccnc_dat: The raw CCNC data
:param list[list[str]] smps_dat: The raw SMPS data
:return: The raw CCNC counts
:rtype: list[list[float]]
"""
scan_start_times = smps_dat[0]
# noinspection PyUnusedLocal
raw_ccnc_cts = [[] for k in range(len(scan_start_times))]
# Get the first position of CCNC count in the ccnc file
scan_number = 0
curr_scan_start_time = dt.datetime.strptime(scan_start_times[scan_number], "%H:%M:%S")
# the index at which ccnc data is in sync with smps data
ccnc_index = 0
while True:
curr_ccnc_time = dt.datetime.strptime(ccnc_dat[ccnc_index][0], "%H:%M:%S")
if curr_ccnc_time > curr_scan_start_time:
scan_number += 1
curr_scan_start_time = dt.datetime.strptime(scan_start_times[scan_number], "%H:%M:%S")
elif curr_ccnc_time < curr_scan_start_time:
ccnc_index += 1
else: # the current ccnc_index is where ccnc starts being in sync with smps
break
finish_scanning_ccnc_dat = False
while not finish_scanning_ccnc_dat:
finish_scanning_ccnc_dat = False
duration = 135
# we do one thing at a time
for k in range(duration + duration // 4): # RESEARCH not evenly div by 4 - what's this?
curr_ccnc_index = ccnc_index + k
# if we reach out of ccnc data bound
if curr_ccnc_index >= len(ccnc_dat):
# stop scanning ccnc data
finish_scanning_ccnc_dat = True
break
# collect a bunch of data from ccnc file
ccnc_count = ccnc_dat[curr_ccnc_index][-3]
raw_ccnc_cts[scan_number].append(ccnc_count)
scan_number += 1
# if we run of out scans to compare with ccnc data, stop scanning ccnc data
if scan_number >= len(scan_start_times):
break
# find the next ccnc_index
# we got to based on the start time, since the duration values are always off
next_scan_start_time = dt.datetime.strptime(scan_start_times[scan_number], "%H:%M:%S")
while True:
curr_ccnc_time = dt.datetime.strptime(ccnc_dat[ccnc_index][0], "%H:%M:%S")
if curr_ccnc_time < next_scan_start_time:
ccnc_index += 1
# if we reach out of ccnc data bound
if ccnc_index >= len(ccnc_dat):
# stop scanning ccnc data
finish_scanning_ccnc_dat = True
break
else:
break
return raw_ccnc_cts
if __name__ == '__main__':
# Get files in directory
# directory = 'C:/Users/purpl/repos/TestData/O3 (150), VOC (150) TRIAL 6/Analysis'
directory = 'C:/Users/purpl/repos/TestData/Penn State 2019/Caryophyllene (150), Ozone (200), dry/ANALYSIS'
# Mark the algo index and pref index
# algo_index = [0, 0, 0, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 0, 0, 0, 0, 13, 13, 0, 12, 12, 0, 12, 0, 0, 0,
# 0, 0, 0, 12, 12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0]
# pref_index = [0, 0, 0, 13, 13, 0, 0, 0, 0, 0, 0, 0, 0, 13, 13, 0, 0, 0, 0, 14, 14, 0, 13, 13, 0, 13, 0, 0, 0,
# 0, 0, 0, 13, 13, 13, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 13, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0]
# Set filesnames for this dir
filenames = os.listdir(directory)
directory = os.path.abspath(directory)
for i in range(len(filenames)):
filenames[i] = os.path.join(directory, filenames[i])
filenames.sort()
experiment_date, ccnc_data, smps_data = parse_files(filenames)
all_raw_smps_counts = get_smps_counts(smps_data, c2c=1.2)
all_raw_ccnc_counts = get_ccnc_counts(ccnc_data, smps_data)
all_raw_ccnc_counts = [[float(j) for j in i] for i in all_raw_ccnc_counts]
all_pro1_smps_counts = np.asarray(all_raw_smps_counts)
all_pro1_ccnc_counts = np.asarray(all_raw_ccnc_counts)
scans = []
for i in range(len(all_raw_ccnc_counts)):
smps = np.asarray(all_pro1_smps_counts[i]).astype(float)
ccnc = np.asarray(all_pro1_ccnc_counts[i]).astype(float)
scans.append([smps, ccnc])
scan_up_time = 0
scan_down_time = 0
for i in range(len(smps_data)):
if ''.join(smps_data[i][0].split()).lower() == "scanuptime(s)":
scan_up_time = int(smps_data[i][1])
scan_down_time = int(smps_data[i + 1][1]) # this is the retrace time
break
debug = {"data": False, "peaks": False, "iter_details": False, "plot": False}
scan_index = None
result1 = shift_algo.process_autoshift(all_pro1_smps_counts, all_pro1_ccnc_counts, index=scan_index, debug=debug)
result2 = []
if scan_index is None:
shift_factors = []
for i in range(len(scans)):
a_scan = scans[i]
smps = a_scan[0]
ccnc = a_scan[1]
shift_factors.append(auto_shift.get_auto_shift(smps, ccnc, scan_up_time, 0)[0])
median_shift = sorted(shift_factors)[(len(shift_factors) + 1) // 2]
for i in range(len(scans)):
a_scan = scans[i]
smps = a_scan[0]
ccnc = a_scan[1]
shift_factor, err_msg = auto_shift.get_auto_shift(smps, ccnc, scan_up_time, median_shift)
result2.append([shift_factor, err_msg])
for index, value in enumerate(err_msg):
if index == 0:
logger.warning("get_auto_shift error on scan: " + str(i))
logger.warning(" (%d) %s" % (index, value))
else:
# noinspection PyTypeChecker
smps = scans[scan_index][0]
# noinspection PyTypeChecker
ccnc = scans[scan_index][1]
shift_factor, err_msg = auto_shift.get_auto_shift(smps, ccnc, scan_up_time, 0)
result2.append([shift_factor, err_msg])
for index, value in enumerate(err_msg):
if index == 0:
logger.warning("get_auto_shift error on scan: " + str(scan_index))
logger.warning(" (%d) %s" % (index, value))
for i in range(len(result2)):
if scan_index is None:
printstring = "Index: %2d" % i
else:
# noinspection PyStringFormat
printstring = "Index: %2d" % scan_index
if result1.iloc[i][0] == result2[i][0]:
printstring += " Match: %3d" % result1.iloc[i][0]
else:
printstring += " O: %3d N: %3d" % (result1.iloc[i][0], result2[i][0])
print(printstring)
| Lilyheart/LILAC | tests/shift_by_files.py | shift_by_files.py | py | 10,087 | python | en | code | 0 | github-code | 13 |
6792115640 | # import external libraries
import pandas as pd
import sys
import os
from cmath import nan
# import internal classes
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '.')))
from model import Model
from project import Project
from structuralMaterial import StructuralMaterial
from structuralCrossSection import StructuralCrossSection
from structuralPointConnection import StructuralPointConnection
class StructuralSchema(Model,
Project,
StructuralMaterial,
StructuralCrossSection,
StructuralPointConnection):
def __init__(self):
self.modelDict = {
'fields' : [
'Name',
'Description',
'Discipline',
'Level of detail',
'Status',
'Owner',
'Revision number',
'Created',
'Last update',
'Source type',
'Source application',
'Source company',
'Global coordinate system',
'LCS of cross-section',
'System of units',
'SAF Version',
'Module version',
'Ignored objects',
'Ignored groups',
'Id'
],
'responses': [
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
''
]
}
self.projectDict = {
'fields' : [
'Name',
'Description',
'Project nr',
'Created',
'Last update',
'Project type',
'Project kind',
'Building type',
'Status',
'Location'
],
'responses' :[
'',
'',
'',
'',
'',
'',
'',
'',
'',
''
]
}
self.structuralMaterialDict = {
'Name' : [],
'Type' : [],
'Subtype' : [],
'Quality' : [],
'Unit mass [kg/m3]' : [],
'E modulus [MPa]' : [],
'G modulus [MPa]' : [],
'Poisson Coefficient' : [],
'Thermal expansion [1/K]' : [],
'Design properties' : [],
'Id' : []
}
self.structuralCrossSectionDict = {
'Name' : [],
'Material' : [],
'Cross-section type' : [],
'Shape' : [],
'Parameters [mm]' : [],
'Profile' : [],
'Form code' : [],
'Description ID of the profile' : [],
'Iy [m4]' : [],
'Iz [m4]' : [],
'It [m4]' : [],
'Iw [m6]' : [],
'Wply [m3]' : [],
'Wplz [m3]' : [],
'Id' : []
}
self.compositeShapeDefDict = {
'Name' : [],
'Material Name 1' : [],
'Polygon contour 1' : [],
'Id' : []
}
self.structuralPointConnectionDict = {
'Name' : [],
'Coordinate X [m]' : [],
'Coordinate Y [m]' : [],
'Coordinate Z [m]' : [],
'Id' : []
}
self.structuralCurveEdge = {
'Name' : [],
'2D member' : [],
'Nodes' : [],
'Segments' : [],
'Id' : []
}
self.structuralCurveMemberDict = {
'Name': [],
'Type' : [],
'Cross section' : [],
'Arbitary definition' : [],
'Nodes' : [],
'Segments' : [],
'Begin node' : [],
'End node' : [],
'Internal nodes' : [],
'Length [m]' : [],
'Geometrical shape' : [],
'Parent ID' : [],
'LCS' : [],
'LCS Rotation [deg]' : [],
'Coordinate X [m]' : [],
'Coordinate Y [m]' : [],
'Coordinate Z [m]' : [],
'System line' : [],
'Eccentricity ey [mm]' : [],
'Eccentricity ez [mm]' : [],
'Layer' : [],
'Behaviour in analysis' : [],
'Color' : [],
'Id' : []
}
self.structuralCurveMemberVarying = {
'Name': [],
'Cross sections 1': [],
'Span 1': [],
'Alignment 1': [],
'Cross sections 2': [],
'Span 2': [],
'Alignment 2': [],
'Cross sections 3': [],
'Span 3': [],
'Alignment 3': [],
'Id': [],
}
self.structuralCurveMemberRib = {
'Name': [],
'2D Member': [],
'Cross Section': [],
'Nodes': [],
'Segments': [],
'Begin Node': [],
'End Node': [],
'Internal Nodes': [],
'Length [m]': [],
'Geometrical Shape': [],
'Alignment': [],
'Eccentricity ez [mm]': [],
'Type of connection': [],
'Shape of the rib': [],
'Layer': [],
'Behaviour in analysis': [],
'Effective width': [],
'Width left for check [mm]': [],
'Width left for internal forces [mm]': [],
'Color': [],
'Parent ID': [],
'Id': []
}
self.structuralSurfaceMemberDict = {
'Name' : [],
'Type' : [],
'Material' : [],
'Thickness type' : [],
'Thickness [mm]' : [],
'System plane at' : [],
'Nodes' : [],
'Internal nodes' : [],
'Edges' : [],
'Parent ID' : [],
'Layer' : [],
'LCS Type' : [],
'Coordinate X [m]' : [],
'Coordinate Y [m]' : [],
'Coordinate Z [m]' : [],
'LCS Rotation [deg]' : [],
'Eccentricity ez [mm]' : [],
'Shape' : [],
'Behavior in analysis' : [],
'Color' : [],
'Id' : []
}
self.structuralSurfaceMemberOpeningDict = {
'Name':[],
'2D Member':[],
'Nodes':[],
'Edges':[],
'Id':[]
}
self.structuralSurfaceMemberRegionDict = {
'Name': [],
'Material': [],
'Thickness [mm]': [],
'System plane at': [],
'2D Member': [],
'Nodes': [],
'Edges': [],
'Eccentricity ez [mm]': [],
'Area [m2]': [],
'Parent ID': [],
'Id': []
}
self.structuralStoreyDict = {
'Name' : [],
'Height level [m]' : [],
'Id' : []
}
self.structuralProxyElementDict = {
'Name': [],
'Material': [],
'Color': [],
'Layer': [],
'Id':[]
}
self.structuralProxyElementVerticesDict = {
'Structural proxy element': [],
'Index': [],
'X [m]': [],
'Y [m]': [],
'Z [m]': []
}
self.structuralProxyElementFacesDict = {
'Structural proxy element': [],
'Index': [],
'Definition': []
}
self.structuralPointSupportDict = {
'Name' : [],
'Type' : [],
'Node' : [],
'ux' : [],
'uy' : [],
'uz' : [],
'fix' : [],
'fiy' : [],
'fiz' : [],
'Stiffness X [MN/m]' : [],
'Stiffness Y [MN/m]' : [],
'Stiffness Z [MN/m]' : [],
'Stiffness Fix [MNm/rad]' : [],
'Stiffness Fix [MNm/rad]' : [],
'Stiffness Fiz [MNm/rad]' : [],
'Id' : []
}
self.structuralSurfaceConnectionDict = {
'Name': [],
'2D Member': [],
'2D Member Region': [],
'Subsoil': [],
'Description': [],
'C1x [MN/m3]': [],
'C1y [MN/m3]': [],
'C1z Spring': [],
'C1z [MN/m3]': [],
'C2x [MN/m]': [],
'C2y [MN/m]': [],
'Parent ID': [],
'Id': []
}
self.structuralCurveConnectionDict = {
'Name': [],
'Type': [],
'Member': [],
'Member rib': [],
'ux': [],
'uy': [],
'uz': [],
'fix': [],
'fiy': [],
'fiz': [],
'Stiffness X [MN/m2]': [],
'Stiffness Y [MN/m2]': [],
'Stiffness Z [MN/m2]': [],
'Stiffness Fix [MNm/rad/m]': [],
'Stiffness Fiy [MNm/rad/m]': [],
'Stiffness Fiz [MNm/rad/m]': [],
'Coordinate system': [],
'Coordinate definition': [],
'Origin': [],
'Start point [m]': [],
'End point [m]': [],
'Parent ID': [],
'Id': []
}
self.structuralEdgeConnectionDict = {
'Name' : [],
'Type' : [],
'2D Member' : [],
'Edge' : [],
'ux' : [],
'uy' : [],
'uz' : [],
'fix' : [],
'fiy' : [],
'fiz' : [],
'Stiffness X [MN/m2]' : [],
'Stiffness Y [MN/m2]' : [],
'Stiffness Z [MN/m2]' : [],
'Stiffness Fix [MNm/rad/m]' : [],
'Stiffness Fiy [MNm/rad/m]' : [],
'Stiffness Fiz [MNm/rad/m]' : [],
'Coordinate system' : [],
'Coordinate definition' : [],
'Origin' : [],
'Start point [m]' : [],
'End point [m]' : [],
'Id' : []
}
self.relConnectsStructuralMemberDict = {
'Name': [],
'Member': [],
'Position': [],
'ux': [],
'uy': [],
'uz': [],
'fix': [],
'fiy': [],
'fiz': [],
'Stiffness X [MN/m]': [],
'Stiffness Y [MN/m]': [],
'Stiffness Z [MN/m]': [],
'Stiffness Fix [MNm/rad]': [],
'Stiffness Fiy [MNm/rad]': [],
'Stiffness Fiz [MNm/rad]': [],
'Parent ID': [],
'Id': []
}
self.relConnectsSurfaceEdge = {
'Name': [],
'2D Member': [],
'Edge': [],
'ux': [],
'uy': [],
'uz': [],
'fix': [],
'fiy': [],
'fiz': [],
'Stiffness X [MN/m2]': [],
'Stiffness Y [MN/m2]': [],
'Stiffness Z [MN/m2]': [],
'Stiffness Fix [MNm/rad/m]': [],
'Stiffness Fiy [MNm/rad/m]': [],
'Stiffness Fiz [MNm/rad/m]': [],
'Coordinate definition': [],
'Origin': [],
'Start point [m]': [],
'End point [m]': [],
'Parent ID': [],
'Id': []
}
self.relConnectsRigidCrossDict = {
'Name': [],
'1D Members': [],
'Type': [],
'u1': [],
'u2': [],
'u': [],
'fi1': [],
'fi2': [],
'fi': [],
'Stiffness u1 [MN/m]': [],
'Resistance u1 [MN]': [],
'Stiffness u2 [MN/m]': [],
'Resistance u2 [MN]': [],
'Stiffness u [MN/m]': [],
'Resistance u [MN]': [],
'Stiffness fi1 [MNm/rad]': [],
'Resistance fi1 [MNm]': [],
'Stiffness fi2 [MNm/rad]': [],
'Resistance fi2 [MNm]': [],
'Stiffness fi [MNm/rad]': [],
'Resistance fi [MNm]': [],
'Parent ID': [],
'Id': []
}
self.relConnectsRigidLink = {
'Name': [],
'Nodes': [],
'Hinge position': [],
'ux': [],
'uy': [],
'uz': [],
'fix': [],
'fiy': [],
'fiz': [],
'Stiffness X [MN/m]': [],
'Resistance X [MN]': [],
'Stiffness Y [MN/m]': [],
'Resistance Y [MN]': [],
'Stiffness Z [MN/m]': [],
'Resistance Z [MN]': [],
'Stiffness Fix [MNm/rad]': [],
'Resistance Fix [MNm]': [],
'Stiffness Fiy [MNm/rad]': [],
'Resistance Fiy [MNm]': [],
'Stiffness Fiz [MNm/rad]': [],
'Resistance Fiz [MNm]': [],
'Id': []
}
self.relConnectsRigidMemberDict = {
'Name' : [],
'Node' : [],
'2D Members' : [],
'Edges' : [],
'Internal edge' : [],
'1D Members' : [],
'Type' : [],
'ux' : [],
'uy' : [],
'uz' : [],
'fix' : [],
'fiy' : [],
'fiz' : [],
'Stiffness X [MN/m2]' : [],
'Stiffness Y [MN/m2]' : [],
'Stiffness Z [MN/m2]' : [],
'Stiffness Fix [MNm/rad/m]' : [],
'Stiffness Fiy [MNm/rad/m]' : [],
'Stiffness Fiz [MNm/rad/m]' : [],
'Resistance Fiz [MNm/m]' : [],
'Id' : []
}
self.structuralLoadGroupDict = {
'Name' : [],
'Load group type' : [],
'Relation' : [],
'Load type' : [],
'Id' : []
}
self.structuralLoadCaseDict = {
'Name' : [],
'Description' : [],
'Action type' : [],
'Load group' : [],
'Load type' : [],
'Duration' : [],
'Id' : []
}
self.structuralLoadCombinationDict = {
'Name': [],
'Description': [],
'Category': [],
'National standard': [],
'Type': [],
'Load Factor 1': [],
'Multiplier 1': [],
'Load Case name 1': [],
'Load Factor 2': [],
'Multiplier 2': [],
'Load Case name 2': [],
'Id': []
}
self.structuralPointActionDict = {
'Name': [],
'Type': [],
'Direction': [],
'Force action': [],
'Reference node': [],
'Reference member': [],
'Value [kN]': [],
'Vector (X;Y;Z) [kN]': [],
'Load case': [],
'Coordinate system': [],
'Origin': [],
'Coordinate definition': [],
'Position x [m]': [],
'Repeat (n)': [],
'Delta x [m]': [],
'Id': []
}
self.structuralPointMomentDict = {
'Name': [],
'Type': [],
'Direction': [],
'Force action': [],
'Reference node': [],
'Value [kNm]': [],
'Load case': [],
'Coordinate system': [],
'Origin': [],
'Coordinate definition': [],
'Position X [m]': [],
'Repeat (n)': [],
'Delta x [m]': [],
'Id': [],
}
self.structuralCurveActionDict = {
'Name': [],
'Type': [],
'Force action': [],
'Distribution': [],
'Direction': [],
'Value 1 [kN/m]': [],
'Value 2 [kN/m]': [],
'Vector 1(X;Y;Z) [kN/m]': [],
'Vector 2(X;Y;Z) [kN/m]': [],
'Member': [],
'Member rib': [],
'2D Member': [],
'2D Member Region': [],
'2D Member Opening': [],
'Edge': [],
'Internal edge': [],
'Load case': [],
'Coordinate system': [],
'Location': [],
'Coordinate definition': [],
'Origin': [],
'Extent': [],
'Start point [m]': [],
'End point [m]': [],
'Eccentricity ey [mm]': [],
'Eccentricity ez [mm]': [],
'Parent ID': [],
'Id': []
}
self.structuralCurveMomentDict= {
'Name': [],
'Type': [],
'Force action': [],
'Distribution': [],
'Direction': [],
'Value 1 [kNm/m]': [],
'Value 2 [kNm/m]': [],
'Member': [],
'Member rib': [],
'2D Member': [],
'2D Member Region': [],
'2D Member Opening': [],
'Edge': [],
'Internal edge': [],
'Load case': [],
'Coordinate system': [],
'Location': [],
'Coordinate definition': [],
'Origin': [],
'Extent': [],
'Start point [m]': [],
'End point [m]': [],
'Parent ID': [],
'Id': []
}
self.structuralSurfaceActionDict = {
'Name': [],
'Direction': [],
'Type': [],
'Force action': [],
'Value [kN/m2]': [],
'2D Member': [],
'2D Member Region': [],
'2D Member Distribution': [],
'Load case': [],
'Coordinate system': [],
'Location': [],
'Parent ID': [],
'Id': []
}
self.structuralSurfaceActionThermalDict = {
'Name': [],
'Variation': [],
'TempT [°C]': [],
'TempB [°C]': [],
'2D Member': [],
'2D Member Region ': [],
'Load case': [],
'Parent ID': [],
'Id': []
}
self.structuralCurveActionThermal = {
'Name': [],
'Force action': [],
'Variation': [],
'deltaT [°C]': [],
'TempL [°C]': [],
'TempR [°C]': [],
'TempB [°C]': [],
'Member': [],
'Member rib': [],
'Load case': [],
'Coordinate definition': [],
'Origin': [],
'Start point [m]': [],
'End point [m]': [],
'Parent ID': [],
'Id': []
}
self.structuralPointActionFreeDict = {
'Name': [],
'Type': [],
'Direction': [],
'Value [kN]': [],
'Vector (X;Y;Z) [kN]': [],
'Load case': [],
'Coordinate X [m]': [],
'Coordinate Y [m]': [],
'Coordinate Z [m]': [],
'Coordinate system': [],
'Id': []
}
self.structuralCurveActionFreeDict = {
'Name': [],
'Type': [],
'Distribution': [],
'Direction': [],
'Value 1 [kN/m]': [],
'Value 2 [kN/m]': [],
'Vector 1(X;Y;Z) [kN/m]': [],
'Vector 2(X;Y;Z) [kN/m]': [],
'Load case': [],
'Coordinate X [m]': [],
'Coordinate Y [m]': [],
'Coordinate Z [m]': [],
'Segments': [],
'Coordinate system': [],
'Location': [],
'Id': []
}
self.structuralSurfaceActionFreeDict = {
'Name': [],
'Direction': [],
'Type': [],
'Distribution': [],
'q [kN/m2]': [],
'Load case': [],
'Coordinate X [m]': [],
'Coordinate Y [m]': [],
'Coordinate Z [m]': [],
'Edges': [],
'Coordinate system': [],
'Location': [],
'Id': []
}
self.structuralSurfaceActionDistDict = {
'Name': [],
'Type': [],
'Nodes': [],
'Edges': [],
'Layer': [],
'LCS Type': [],
'Coordinate X [m]': [],
'Coordinate Y [m]': [],
'Coordinate Z [m]': [],
'LCS Rotation [deg]': [],
'Distribution to': [],
'Load applied to': [],
'Id': []
}
self.resultInternalForce1DDict = {
'Result on': [],
'Member': [],
'Member Rib': [],
'Result for': [],
'Load case': [],
'Load combination': [],
'Combination key': [],
'Section at [m]': [],
'Index': [],
'N [kN]': [],
'Vy [kN]': [],
'Vz [kN]': [],
'Mx [kNm]': [],
'My [kNm]': [],
'Mz [kNm]': []
}
def readExcel(self):
'''
Read an external xlsx file to define a StructuralSchema object.
'''
# read excel sheets
self.externalModel = pd.read_excel(r"C:\Users\KaratasD\Desktop\Folders\safpy\safGeometryTest.xlsx", sheet_name="Model", index_col=False)
self.externalProject = pd.read_excel(r"C:\Users\KaratasD\Desktop\Folders\safpy\safGeometryTest.xlsx", sheet_name="Project", index_col=False)
self.externalStructuralMaterial = pd.read_excel(r"C:\Users\KaratasD\Desktop\Folders\safpy\safGeometryTest.xlsx", sheet_name="StructuralMaterial")
# assign model information to object dictionary
if len(self.externalModel.columns) == 2:
self.modelResponses = self.externalModel.iloc[: ,1].to_list()
if self.externalModel.columns[1] == 'Unnamed: 1':
self.modelResponses.insert(0, nan)
else:
self.modelResponses.insert(0, self.externalModel.columns[1])
else:
pass
# assign project information to object dictionary
if len(self.externalProject.columns) == 2:
self.projectResponses = self.externalProject.iloc[: ,1].to_list()
if self.externalProject.columns[1] == 'Unnamed: 1':
self.projectResponses.insert(0, nan)
else:
self.projectResponses.insert(0, self.externalProject.columns[1])
else:
pass
# assign material information to the object dictionary
self.structuralMaterialDict['Name'] = self.externalStructuralMaterial['Name'].to_list()
self.structuralMaterialDict['Type'] = self.externalStructuralMaterial['Type'].to_list()
self.structuralMaterialDict['Subtype'] = self.externalStructuralMaterial['Subtype'].to_list()
self.structuralMaterialDict['Quality'] = self.externalStructuralMaterial['Quality'].to_list()
self.structuralMaterialDict['Unit mass [kg/m3]'] = self.externalStructuralMaterial['Unit mass [kg/m3]'].to_list()
self.structuralMaterialDict['E modulus [MPa]'] = self.externalStructuralMaterial['NaE modulus [MPa]me'].to_list()
self.structuralMaterialDict['G modulus [MPa]'] = self.externalStructuralMaterial['G modulus [MPa]'].to_list()
self.structuralMaterialDict['Poisson Coefficient'] = self.externalStructuralMaterial['Poisson Coefficient'].to_list()
self.structuralMaterialDict['Thermal expansion [1/K]'] = self.externalStructuralMaterial['Thermal expansion [1/K]'].to_list()
self.structuralMaterialDict['Design properties'] = self.externalStructuralMaterial['Design properties'].to_list()
self.structuralMaterialDict['Id'] = self.externalStructuralMaterial['Id'].to_list()
def exportExcel(self):
modelFrame = pd.DataFrame(self.modelDict)
projectFrame = pd.DataFrame(self.projectDict)
materialFrame = pd.DataFrame(self.structuralMaterialDict)
sectionFrame = pd.DataFrame(self.sectionDict)
pointFrame = pd.DataFrame(self.pointDict)
safFile = pd.ExcelWriter(r'C:\Users\KaratasD\Desktop\Folders\safpy\mySAF.xlsx')
modelFrame.to_excel(safFile, sheet_name='Model', index=False, header=False)
projectFrame.to_excel(safFile, sheet_name='Project', index=False, header=False)
materialFrame.to_excel(safFile, sheet_name='StructuralMaterial', index=False)
sectionFrame.to_excel(safFile, sheet_name='StructuralCrossSection', index=False)
pointFrame.to_excel(safFile, sheet_name='StructuralPointConnection', index=False)
safFile.save()
| dogukankaratas/safpy | safpy/structure.py | structure.py | py | 24,850 | python | en | code | 0 | github-code | 13 |
30361059047 | import socket
from rhizome.protocol.client import RhizomeClient
from rhizome.protocol.messages import BroadcastMessage
if __name__ == '__main__':
# Create client
sender_id = socket.gethostname()
rhizome_client = RhizomeClient(sender_id)
# Create message
message = BroadcastMessage(sender_id, socket.gethostname(), 54321)
# Send message
host = "localhost"
port = 54321
rhizome_client.send_message(message, host, port)
# Get responses
| scottbarnesg/rhizome | rhizome/run_client.py | run_client.py | py | 476 | python | en | code | 0 | github-code | 13 |
73488257297 | # https://leetcode.com/problems/number-of-valid-words-in-a-sentence/
# A sentence consists of lowercase letters ('a' to 'z'), digits ('0' to '9'), hyphens ('-'), punctuation marks ('!', '.', and ','),
# and spaces (' ') only. Each sentence can be broken down into one or more tokens separated by one or more spaces ' '.
# A token is a valid word if all three of the following are true:
# It only contains lowercase letters, hyphens, and/or punctuation (no digits).
# There is at most one hyphen '-'. If present, it must be surrounded by lowercase characters ("a-b" is valid, but "-ab" and "ab-" are not valid).
# There is at most one punctuation mark. If present, it must be at the end of the token ("ab,", "cd!", and "." are valid, but "a!b" and "c.,"
# are not valid).
# Examples of valid words include "a-b.", "afad", "ba-c", "a!", and "!".
# Given a string sentence, return the number of valid words in sentence.
# Example 1:
# Input: sentence = "cat and dog"
# Output: 3
# Explanation: The valid words in the sentence are "cat", "and", and "dog".
# Example 2:
# Input: sentence = "!this 1-s b8d!"
# Output: 0
# Explanation: There are no valid words in the sentence.
# "!this" is invalid because it starts with a punctuation mark.
# "1-s" and "b8d" are invalid because they contain digits.
# Example 3:
# Input: sentence = "alice and bob are playing stone-game10"
# Output: 5
# Explanation: The valid words in the sentence are "alice", "and", "bob", "are", and "playing".
# "stone-game10" is invalid because it contains digits.
class Solution:
def countValidWords(self, sentence: str) -> int:
# let's first split the string into words
words = sentence.split(' ')
# a helper function to check for hyphens and punctuation marks and digits
def hyph_punct_dig_violation(input_str):
if input_str[0] == '-' or input_str[len(input_str)-1] == '-':
return True
hyphen_found = False
for i in range(len(input_str)):
# check for hyphen
if input_str[i] == '-':
if not hyphen_found:
hyphen_found = True
else:
return True
# check for digit
elif input_str[i].isdigit():
return True
# check for punctuations
elif not input_str[i].isalpha():
if i == len(input_str)-1 and (input_str[len(input_str)-2] != '-'):
continue
else:
return True
return False
count = 0
for word in words:
if word == '':
continue
if not hyph_punct_dig_violation(word):
count += 1
return count
| aslamovamir/LeetCode | number_of_valid_words_in_a_sentence.py | number_of_valid_words_in_a_sentence.py | py | 2,981 | python | en | code | 0 | github-code | 13 |
73709421457 | # -*- coding: utf-8 -*-
import math
from pgmagick import CompositeOperator as co, Geometry
from pgmagick.api import Image as pgai, Draw
__author__ = 'myth'
LEFT_TOP = 'lt'
LEFT_BOTTOM = 'lb'
RIGHT_TOP = 'rt'
RIGHT_BOTTOM = 'rb'
WIDTH_GRID = 30.0
HEIGHT_GRID = 30.0
def dotted_line(start, end, step=5):
"""
虚线坐标
:param start:
:type start:
:param end:
:type end:
:param step:
:type step:
:return:
:rtype:
"""
xl, yl = start
xr, yr = end
w = abs(xl - xr)
# h = abs(yl - yr)
#将坐标转换到第三象限
yl, yr = -yl, -yr
#斜率
k = float((yr - yl)) / (xr - xl)
#偏移
b = yl - k * xl
pos = [start]
pos_y = lambda x: k * x + b
# pos_x = lambda y: (y - b)/k
if step > 0:
step = math.ceil(step)
_steps = w / step
_steps = _steps-1 if _steps == math.floor(_steps) else math.floor(_steps)
steps = int(_steps)
for s in xrange(steps):
_s = s + 1
x = xl + _s * step
y = pos_y(x)
_pos = (x, -y)
pos.append(_pos)
pos.append(end)
return pos
def oblique_line(xy, width, height, obliquity=45):
"""
斜线坐标
:param xy:
:type xy:
:param width:
:type width:
:param height:
:type height:
:param obliquity:
:type obliquity:
:return:
:rtype:
"""
x, y = xy
#将坐标转换到第三象限
y = -y
w, h = width, -height
#角度转弧度
# p = obliquity/180. * math.pi
p = math.radians(obliquity)
#斜率
k = math.tan(p)
#偏移
b = y - k*x
pos_y = lambda x: k * x + b
pos_x = lambda y: (y - b)/k
if k > 0:
#左边x坐标
xl = 0
#左边y坐标
yl = pos_y(xl)
if yl < h:
yl = h
xl = pos_x(yl)
#右边y坐标
yr = 0
#右边x坐标
xr = pos_x(yr)
if xr > w:
xr = w
yr = pos_y(xr)
else:
#左边y坐标
yl = 0
#左边x坐标
xl = pos_x(yl)
if xl < 0:
xl = 0
yl = pos_y(xl)
#右边x坐标
xr = w
#右边y坐标
yr = pos_y(xr)
if yr < h:
yr = h
xr = pos_x(yr)
pos = ((round(xl), round(-yl)), (round(xr), round(-yr)))
return pos
def draw_grid(im):
"""
图片绘制网格
"""
w, h = im.width, im.height
draw = Draw()
default_step = 50
step = w / 10
step = step if step > default_step else default_step
positives = dotted_line((0, 0), (w, h), step=step)
reverses = dotted_line((0, h), (w, 0), step=step)
#线条颜色
draw.stroke_color((220, 220, 220))
#线条宽度
draw.stroke_width(2)
#透明度
draw.stroke_opacity(0.2)
def _draw_lines(coordinates, draw, size, obliquity=45):
w, h = size
for _pos in coordinates:
pos = oblique_line(_pos, w, h, obliquity=obliquity)
start, end = pos
pos = dotted_line(start, end, step=30)
for i, p in enumerate(pos):
if i % 3 == 1:
start_x, start_y = start
end_x, end_y = p
draw.line(start_x, start_y, end_x, end_y)
if i % 3 == 0:
start = p
_draw_lines(positives[1:-1], draw, (w, h), obliquity=30)
_draw_lines(reverses[1:-1], draw, (w, h), obliquity=-30)
im.draw(draw)
def mark_layout(im, mark, layout=RIGHT_BOTTOM):
"""
设置水印位置
:param im:
:type im: pgmagick.api.Image
:param mark:
:type mark: pgmagick.api.Image
:param layout:
:type layout:
:return:
:rtype:
"""
im_width, im_height = im.width, im.height
mark_width, mark_height = mark.width, mark.height
coordinates = {LEFT_TOP: (int(im_width/WIDTH_GRID), int(im_height/HEIGHT_GRID)),
LEFT_BOTTOM: (int(im_width/WIDTH_GRID), int(im_height - mark_height - im_height/HEIGHT_GRID)),
RIGHT_TOP: (int(im_width - mark_width - im_width/WIDTH_GRID), int(im_height/HEIGHT_GRID)),
RIGHT_BOTTOM: (int(im_width - mark_width - im_width/WIDTH_GRID),
int(im_height - mark_height - im_height/HEIGHT_GRID))}
return coordinates[layout]
def set_mark_picture(im, mark):
"""
设置水印图片的大小
:param im: 原始图片
:type im: pgmagick.api.Image
:param mark: 水印图片
:type mark: pgmagick.api.Image
"""
im_width, im_height = im.width, im.height
mark_width, mark_height = mark.width, mark.height
min_scale = 0.18
max_scale = 0.3
x_scale = float(mark_width) / float(im_width)
y_scale = float(mark_height) / float(im_height)
is_scale = False
if x_scale < min_scale and y_scale < min_scale:
mark_width = int(min_scale * im_width)
mark_height = int(min_scale * im_height)
is_scale = True
if x_scale > max_scale or y_scale > max_scale:
mark_width = int(max_scale * im_width)
mark_height = int(max_scale * im_height)
is_scale = True
if is_scale:
#设置水印图片的尺寸
mark.img.scale(Geometry(mark_width, mark_height))
mark.img.strokeAntiAlias(False)
def draw_watermark(im, mark, layout=RIGHT_BOTTOM):
"""
绘制水印
:param im: 原始图片
:type im: pgmagick.api.Image
:param mark: 水印图片
:type mark: pgmagick.api.Image
"""
set_mark_picture(im, mark)
x, y = mark_layout(im, mark, layout=layout)
geo = Geometry(0, 0, x, y)
color = im.img.pixelColor(x, y)
# print color.to_std_string()
b = color.blueQuantum()
r = color.redQuantum()
g = color.greenQuantum()
if r > 100 or g > 100 or b > 100:
op = co.MinusCompositeOp
else:
op = co.OverCompositeOp
im.composite(mark.img, geo, op)
if __name__ == '__main__':
MARK_IMAGE = './kuaiyin.png'
new_image_s_filename = '/home/myth/temp/tmp/test.jpg'
mark = pgai(MARK_IMAGE)
im = pgai(new_image_s_filename)
draw_grid(im)
draw_watermark(im, mark)
im.write('/home/myth/temp/tmp/b2.jpg')
| ederrafo/bottle | util/thumbnail/watermark.py | watermark.py | py | 6,283 | python | en | code | 0 | github-code | 13 |
13527458682 | from figuras.cuadrado import area_cuadrado, perimetro_cuadrado
from figuras.circulo import area_circulo, perimetro_circulo
lado = 4
cuadrado = {
"lado": lado,
"area": area_cuadrado(lado), #Al importar nos permite ejecutar las funciones que contienen
"perimetro": perimetro_cuadrado(lado)
}
print("cuadrado:", cuadrado)
perimetro = perimetro_cuadrado(lado)
print(perimetro)
radio = 5
circulo = {
"radio": radio,
"area": area_circulo(radio),
"perimetro": perimetro_circulo(radio)
}
print("circulo:", circulo) | alberto006-esp/curso-python-esencial | main.py | main.py | py | 534 | python | pt | code | 0 | github-code | 13 |
39778396552 | import constants as c
import shared_build_steps as u
def add_java_build_step(platform_config):
# after the maven build is complete, copy the JAR artifact to the central output directory
__add_maven_step(platform_config, c.build_j2v8_java, u.java_build_cmd, [u.copyOutput])
def add_java_test_step(platform_config):
# running maven tests by themselves usually does not generate any output we need to copy
__add_maven_step(platform_config, c.build_j2v8_test, u.java_tests_cmd)
def __add_maven_step(platform_config, build_step, step_cmd, post_step_cmds = []):
# add the common preparation sequence for a maven build-step to the platform-config
if not hasattr(platform_config, "prepare_maven"):
platform_config.prepare_maven = lambda config: \
u.clearNativeLibs(config) + \
u.copyNativeLibs(config) + \
u.setJavaHome(config)
#-----------------------------------------------------------------------
# add a build-step that involves running maven and requires some preparation
def java_build_step():
def build_func(config):
# update maven pom.xml settings
u.apply_maven_config_settings(config)
# add the extra step arguments to the command if we got some
step_args = getattr(config, "args", None)
step_args = " " + step_args if step_args else ""
post_cmds = []
# post-cmds can be strings or functions
for ps_cmd in post_step_cmds:
if callable(ps_cmd):
ps = ps_cmd(config)
post_cmds += ps
else:
post_cmds.append(ps_cmd)
# assemble the commands for this build-step
# includes the preparation commands for maven
# and also any commands that should be run after the maven command is finished
steps = \
platform_config.prepare_maven(config) + \
[step_cmd + step_args] + \
post_cmds
# the shell was already prepared for running maven,
# if another java step will run later on this does not to be done again
platform_config.prepare_maven = lambda cfg: ["echo Native lib already copied..."]
return steps
return build_func
#-----------------------------------------------------------------------
platform_config.build_step(build_step, java_build_step())
#-----------------------------------------------------------------------
| eclipsesource/J2V8 | build_system/java_build_steps.py | java_build_steps.py | py | 2,560 | python | en | code | 2,446 | github-code | 13 |
20033826330 | import heapq
import queue
"""
Nhận xét :
Với mỗi một two-way road proposed, tính chi phí đường đi từ S->T. Chi phí nào có đường đi ngắn nhất thì đó chính là kết quả.
Lưu ý: Để làm được việc này thì mỗi lần gắn đường đi hai chiều vào bạn phải chạy lại thuật toán Dijkstra. Việc này sẽ làm bài bạn bị quá thời gian (TLE).
Bạn phải cải tiến thuật toán Dijkstra và cải tiến luôn việc không thể gọi Dijkstra mỗi lần gắn đường đi hai chiều mới vào bằng cách sau:
Tính Dijkstra từ S -> T và tính chiều ngược lại T->S (gọi Dijkstra hai lần).
Sau khi chạy Dijkstra ta có:
distS[u]: tìm đường đi từ đỉnh S đến đỉnh u (trong đồ thị demo tren platform la 1->5).
distT[v]: tìm đường đi từ đỉnh v đến đỉnh T (trong đồ thị demo tren platform la 3->4).
Gắn từng đường đi hai chiều vào, lúc này dùng công thức bên dưới, công thức này giống như việc gọi Dijkstra lại mỗi lần gắn đường đi hai chiều vào.
Sau khi gắn d vào, nếu tổng này nhỏ hơn đường đi hiện tại đang có thì nó chính là đường đi ngắn nhất.
"""
class Node:
def __init__(self, id, dist):
self.id = id
self.dist = dist
def __lt__(self, other):
return self.dist <= other.dist
def Dijkstra(s, dist, graph):
pq = queue.PriorityQueue()
pq.put(Node(s, 0))
dist[s] = 0
while pq.empty() == False:
top = pq.get()
u = top.id
w = top.dist
if dist[u] != w: # (*)
continue
for neighbor in graph[u]:
if neighbor.dist + w < dist[neighbor.id]:
dist[neighbor.id] = neighbor.dist + w
pq.put(Node(neighbor.id, dist[neighbor.id]))
return dist
nb_dataset = int(input())
for i in range(nb_dataset):
N, M, K, S, T = map(int, input().split())
MAX = N + 1
INF = M * 1001
cost = INF
path_S_T = []
graph_S = [[] for i in range(MAX)]
graph_T = [[] for i in range(MAX)]
dist_ST = [INF for i in range(MAX)]
dist_TS = [INF for i in range(MAX)]
for m in range(M):
u, v, w = map(int, input().split())
graph_S[u].append(Node(v, w))
graph_T[v].append(Node(u, w))
dist_ST = Dijkstra(S, dist_ST, graph_S)
dist_TS = Dijkstra(T, dist_TS, graph_T)
for k in range(K):
u, v, w = map(int, input().split())
dist_S_U = dist_ST[u] # shortest path from s to u, in the road from s to t
dist_V_T = dist_TS[v] # shortest path from t to v, in the road from t to s
dist_S_V = dist_ST[v] # shortest path from s to v, in the road from s to t
dist_U_T = dist_TS[u] # shortest path from t to u, in the road from t to s
dist_S_U_V_T = dist_S_U + w + dist_V_T
dist_S_V_U_T = dist_S_V + w + dist_U_T
cost = min(cost, min(dist_S_U_V_T, dist_S_V_U_T))
if cost == INF:
print(-1)
else:
print(cost)
| luffy2106/bigO_coding | Lecture8_Dijkstra/TrafficNetwork.py | TrafficNetwork.py | py | 3,096 | python | vi | code | 0 | github-code | 13 |
35311452489 | from ast import Break
from asyncio.windows_events import NULL
import random
opcion = 0
numEscogido = 0
numAleatorio = 0
#----------------------------------------------------------------
def ahorcado():
with open("juego_penjat.txt","r") as file:
alltext=file.read()
words = list(map(str, alltext.split()))
palabraRandom=random.choice(words)
numeroLetras=len(palabraRandom)
palabra_sin_descubrir = numeroLetras * "_"
contador_intentos=numeroLetras*2
print(palabra_sin_descubrir)
lista=list(palabra_sin_descubrir)
count=0
while contador_intentos>0 and "_" in palabra_sin_descubrir:
letra=input("Selecciona la letra que crees que esta").upper()
count=0
#compruebo por cada letra de la cadena si la letra que me introducen coincide y en ese caso me quedo la posicion y en esa posicion remplazo el simbolo _ por la letra
for i in palabraRandom:
if(i==letra):
lista[count]=letra
palabra_sin_descubrir="".join(lista)
count=count +1
print(palabra_sin_descubrir)
contador_intentos=contador_intentos-1
if(contador_intentos==0):
print("Te has quedado sin intentos has perdido")
file.close()
juego()
print(palabra_sin_descubrir)
print("Has ganado felicidades")
file.close()
juego()
#----------------------------------------------------------------
def numeroAleatorio():
jugada = 0
while(jugada < 3):
numAleatorio=random.randint(1, 10)
numEscogido = input("Indica un numero entero del 1 al 10 ")
jugada = jugada + 1
if(str.isdigit(numEscogido) == True):
numEscogido = int(numEscogido)
numAleatorio = int(numAleatorio)
if (numAleatorio != numEscogido):
print("No has acertado tu has escogido ",numEscogido," la maquina ha escogido ",numAleatorio)
else:
print("Has acertado tu has escogido ",numEscogido," la maquina ha escogido ",numAleatorio,"\n volviendo al menu principal")
juego()
else:
print (numEscogido , " no es un numero valido")
jugada = jugada - 1
print("Has superado los intentos maximos volviendo al menu principal")
juego()
#----------------------------------------------------------------
def piedraPapelTisores():
puntosJugador = 0
puntosMaquina = 0
while (puntosMaquina < 3 and puntosJugador < 3):
jugadaEscogida = input("Escoge una jugada: piedra, papel, tisores ")
jugadaEscogida = jugadaEscogida.lower()
jugadaPosibles = ["piedra","papel","tisores"]
jugadaAleatoria = random.randint(0,2)
jugadaMaquina=jugadaPosibles[jugadaAleatoria]
if(jugadaEscogida == "piedra" or jugadaEscogida == "papel" or jugadaEscogida == "tisores" ):
if (jugadaMaquina == jugadaEscogida):
print("Has empatado la maquina escogio ",jugadaMaquina," tu has escogido ",jugadaEscogida)
elif (jugadaMaquina == "tisores" and jugadaEscogida == "piedra" or jugadaMaquina == "papel" and jugadaEscogida == "tisores" or jugadaMaquina == "piedra" and jugadaEscogida == "papel"):
print("Has ganado la maquina escogio ",jugadaMaquina," tu has escogido ",jugadaEscogida)
puntosJugador+=1
else:
print("Has perdido la maquina escogio ",jugadaMaquina," tu has escogido ",jugadaEscogida)
puntosMaquina+=1
else:
print(jugadaEscogida ," no es una opcion valida")
piedraPapelTisores()
if (puntosJugador == 3):
print("Has ganado al mejor de tres")
juego()
elif(puntosMaquina == 3 ):
print("La maquina ha ganado al mejor de tres")
juego()
#----------------------------------------------------------------
def juego():
print("Selecione el juego al que quiere jugar \n 1.Adivina el numero del 1 al 10 \n 2.Paper pedra tisores \n 3.Ahorcado \n 4.Salir")
opcion = input("Indica el numero del juego al que quieres jugar ")
if(str.isdigit(opcion) == True):
opcion = int(opcion)
if (opcion >=1 and opcion <= 4 ):
if opcion == 1:
numeroAleatorio()
elif opcion == 2:
piedraPapelTisores()
elif opcion == 3:
ahorcado()
elif opcion == 4:
print("Hasta la proxima....")
else:
print("La opcion " ,opcion," no es valida vuelve a escoger ")
juego()
else:
print("La opcion " ,opcion," no es valida vuelve a escoger ")
juego()
#----------------------------------------------------------------
juego() | AdriaRodriguez/Actividad-1-PRogramacion | ACTIVIDAD1.py | ACTIVIDAD1.py | py | 5,221 | python | es | code | 0 | github-code | 13 |
1025455033 |
from typing import List
from .metadata import MetaData
from ..utils import get_yes_no_input
from .analogy import evaluate_analogy_folder
from .similarity import evaluate_similarity_folder
def add_metadata_file(folder: str, file_type: str, extension: str, attributes: List[str]):
with MetaData(
folder,
file_type,
attributes,
extension) as md:
if md.metadata_exists:
overwrite = get_yes_no_input("Do you want to overwrite the existing metadata file?")
if overwrite == "yes":
print("INFO: Existing metadata will be overwritten.")
else:
print("INFO: Exiting the program.")
exit()
for file in md.files_in_folder:
file_attrs = {}
for attr in attributes:
score = ""
while not isinstance(score, float):
score = input(f" -> `{attr}` metric for `{file}`: ")
try:
score = float(score)
except:
print("WARNING: Enter a valid float.")
score = ""
file_attrs[attr] = score
md.update_metadata(file, file_attrs)
| Turkish-Word-Embeddings/Word-Embeddings-Repository-for-Turkish | evaluation/package/experiment/__init__.py | __init__.py | py | 1,303 | python | en | code | 1 | github-code | 13 |
19904999357 | class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
return sum(nums[::2])
s = Solution()
a = s.arrayPairSum([1,4,3,2])
print(a) | littleliona/leetcode | easy/561.Array_partition_I.py | 561.Array_partition_I.py | py | 246 | python | en | code | 0 | github-code | 13 |
17122484874 | # -*- coding: utf-8 -*-
# base
# data science
import numpy as np
import pandas as pd
from scipy.stats import levene, ttest_ind, f_oneway
#from scipy.stats import ttest_rel, mannwhitneyu, skew, kurtosis
#from scipy.stats.mstats import kruskalwallis
#from statsmodels.formula.api import ols
#from statsmodels.stats.anova import anova_lm
from statsmodels.stats.multicomp import pairwise_tukeyhsd
# mechine learning
from sklearn.model_selection import GridSearchCV
from xgboost.sklearn import XGBRegressor
import xgboost as xgb
# plot
import matplotlib.pyplot as plt
import seaborn as sns
# 基础特征工程-1
def data_transform_1(df, is_train):
# 缺失值处理
df.loc[df.Open.isnull(), "Open"] = 1 # Open为空的门店默认视为营业
df.loc[df.PromoInterval.isnull(), "PromoInterval"] = "" # PromoInterval为nan,置为""
# 枚举值替换, a = public holiday, b = Easter holiday, c = Christmas, 0 = None
mappings = {"0":0, "a":1, "b":2, "c":3, "d":4}
df.StoreType = df.StoreType.replace(mappings)
df.Assortment = df.Assortment.replace(mappings)
df.StateHoliday = df.StateHoliday.replace(mappings)
# 构造时间特征
df["Year"] = df.Date.dt.year
df["Month"] = df.Date.dt.month
df["Day"] = df.Date.dt.day
df["DayOfWeek"] = df.Date.dt.dayofweek + 1
df["WeekOfYear"] = df.Date.dt.weekofyear
df["DayOfYear"] = df.Date.dt.dayofyear
df["Tenday"] = np.nan
df.loc[df.Day <= 10, "Tenday"] = 0
df.loc[(df.Day >= 11) & (df.Day <= 20), "Tenday"] = 1
df.loc[df.Day >= 21, "Tenday"] = 2
df.Tenday = df.Tenday.astype(np.int64)
# 获取销售日当天 对应的 竞争对手开业 累计天数,置换为1和0
df["CompetitionOpen"] = 365*(df.Year-df.CompetitionOpenSinceYear) + 30*(df.Month-df.CompetitionOpenSinceMonth) + (df.Day-df.CompetitionOpenSinceDay)
df.CompetitionOpen = df.CompetitionOpen.apply(lambda x: 0 if x < 0 else x)
df.CompetitionOpen = df.CompetitionOpen.apply(lambda x: 1 if x >= 1 else 0)
# 用是否开业修改竞争对手距离,如果竞争对手尚未开业,则距离为0
df.loc[df.CompetitionOpen == 0, "CompetitionDistance"] = 0
# 获取销售日当天 对应的 长促 累计月数
df["Promo2Open"] = 12*(df.Year-df.Promo2SinceYear) + (df.WeekOfYear-df.Promo2SinceWeek)/4.0
df.Promo2Open = df.Promo2Open.apply(lambda x: 0 if x < 0 else x)
# 用长促是否存在和对应的月份,添加门店是否处在长促中
MonthStr = {1:"Jan", 2:"Feb", 3:"Mar", 4:"Apr", 5:"May", 6:"Jun", 7:"Jul", 8:"Aug", 9:"Sept", 10:"Oct", 11:"Nov", 12:"Dec"}
df["MonthStr"] = df.Month.map(MonthStr)
df["InPromo2"] = 0
for interval in df.PromoInterval.unique():
if interval != "":
for month in interval.split(","):
df.loc[(df.MonthStr == month) & (df.Promo2Open > 0), "InPromo2"] = 1
# 特征序列
if is_train:
features = ["Store","Date","DayOfWeek","Sales","Open","Promo","StateHoliday","SchoolHoliday",\
"StoreType","Assortment","CompetitionDistance","Year","Month","Day","WeekOfYear",\
"DayOfYear","Tenday","CompetitionOpen","InPromo2"]
else:
features = ["Id","Store","Date","DayOfWeek","Open","Promo","StateHoliday","SchoolHoliday",\
"StoreType","Assortment","CompetitionDistance","Year","Month","Day","WeekOfYear",\
"DayOfYear","Tenday","CompetitionOpen","InPromo2"]
df = df[features]
# 调整字段类型
df.Store = df.Store.astype(np.int64)
df.DayOfWeek = df.DayOfWeek.astype(np.int64)
df.Open = df.Open.astype(np.int64)
df.Promo = df.Promo.astype(np.int64)
df.StateHoliday = df.StateHoliday.astype(np.int64)
df.SchoolHoliday = df.SchoolHoliday.astype(np.int64)
df.StoreType = df.StoreType.astype(np.int64)
df.Assortment = df.Assortment.astype(np.int64)
df.CompetitionDistance = df.CompetitionDistance.astype(np.float64)
df.Year = df.Year.astype(np.int64)
df.Month = df.Month.astype(np.int64)
df.Day = df.Day.astype(np.int64)
df.WeekOfYear = df.WeekOfYear.astype(np.float64)
df.DayOfYear = df.DayOfYear.astype(np.float64)
df.Tenday = df.Tenday.astype(np.int64)
df.CompetitionOpen = df.CompetitionOpen.astype(np.int64)
df.InPromo2 = df.InPromo2.astype(np.int64)
return df
# 基础特征工程-2
def data_transform_2(df):
# 店铺列表
store_list = list(set(df.Store))
# 新增特征
df["WillClosedTomorrow_TodayIsSat"] = 0
df["WillClosedTomorrow_TodayIsNotSat"] = 0
df["WasClosedYesterday_TodayIsMon"] = 0
df["WasClosedYesterday_TodayIsNotMon"] = 0
# 对每个店铺做循环
t0 = pd.Timestamp.now()
log = []
for store in store_list:
# 获取单个店铺数据
df_tmp = df[df.Store == store]
# 计算明天时,Open数据向上移一个单位
df_tmp["Open_m1"] = df_tmp.Open.shift(-1)
df_tmp.loc[(df_tmp.DayOfWeek == 6) & (df_tmp.Open_m1 == 0), "WillClosedTomorrow_TodayIsSat"] = 1
df_tmp.loc[(df_tmp.DayOfWeek != 6) & (df_tmp.Open_m1 == 0), "WillClosedTomorrow_TodayIsNotSat"] = 1
# 获取对应的索引
idx_1 = df_tmp[df_tmp.WillClosedTomorrow_TodayIsSat == 1].index
idx_2 = df_tmp[df_tmp.WillClosedTomorrow_TodayIsNotSat == 1].index
# 将索引对应的值赋值到总表
df.loc[idx_1, "WillClosedTomorrow_TodayIsSat"] = 1
df.loc[idx_2, "WillClosedTomorrow_TodayIsNotSat"] = 1
# 计算昨天时,Open数据向下移一个单位
df_tmp["Open_p1"] = df_tmp.Open.shift(1)
df_tmp.loc[(df_tmp.DayOfWeek == 1) & (df_tmp.Open_p1 == 0), "WasClosedYesterday_TodayIsMon"] = 1
df_tmp.loc[(df_tmp.DayOfWeek != 1) & (df_tmp.Open_p1 == 0), "WasClosedYesterday_TodayIsNotMon"] = 1
# 获取对应的索引
idx_3 = df_tmp[df_tmp.WasClosedYesterday_TodayIsMon == 1].index
idx_4 = df_tmp[df_tmp.WasClosedYesterday_TodayIsNotMon == 1].index
# 将索引对应的值赋值到总表
df.loc[idx_3, "WasClosedYesterday_TodayIsMon"] = 1
df.loc[idx_4, "WasClosedYesterday_TodayIsNotMon"] = 1
# 打印计算日志
log.append(store)
if len(log) % 100 == 0:
print("已计算 %d 家门店" % len(log))
t1 = pd.Timestamp.now()
print(t1-t0)
# 调整字段类型
df.WillClosedTomorrow_TodayIsSat = df.WillClosedTomorrow_TodayIsSat.astype(np.int64)
df.WillClosedTomorrow_TodayIsNotSat = df.WillClosedTomorrow_TodayIsNotSat.astype(np.int64)
df.WasClosedYesterday_TodayIsMon = df.WasClosedYesterday_TodayIsMon.astype(np.int64)
df.WasClosedYesterday_TodayIsNotMon = df.WasClosedYesterday_TodayIsNotMon.astype(np.int64)
return df
# 两独立样本T检验
def two_sample_ttest(df, x, y, val_1, val_2, W, H, plot=True):
# mean_compare_table
mean_compare_table = df.groupby(x, as_index=False)[[y]].mean()
print(mean_compare_table)
a = df.loc[df[x] == val_1, y].tolist()
b = df.loc[df[x] == val_2, y].tolist()
if plot:
# plot-1
plt.figure(figsize=(W,H))
sns.violinplot(x, y, data=df)
# plot-2
plt.figure(figsize=(W,H))
sns.kdeplot(a, shade=True, label=val_1)
sns.kdeplot(b, shade=True, label=val_2)
# T-test
groups = [a, b]
levene_test = levene(*groups)
if levene_test.pvalue >= 0.05:
t_test = ttest_ind(a, b, equal_var=True) # standard independent 2 sample test
else:
t_test = ttest_ind(a, b, equal_var=False) # Welch's t-test
p_value = t_test.pvalue
# 结论
if p_value <= 0.05:
print(p_value)
print("%s 在 %s 上存在显著性差异" % (y, x))
else:
print(p_value)
print("%s 在 %s 上不存在显著性差异" % (y, x))
return mean_compare_table
# 单因素方差分析
def oneway_anova(df, x, y, W, H, use_hsd=True, plot=True):
# mean_compare_table
mean_compare_table = df.groupby(x, as_index=False)[[y]].mean()
print(mean_compare_table)
if plot:
# plot
plt.figure(figsize=(W,H))
sns.violinplot(x, y, data=df)
# set group
val_list = list(set(df[x]))
groups = []
for val in val_list:
groups.append(df.loc[df[x] == val, y].tolist())
# anova
levene_test = levene(*groups)
if levene_test.pvalue >= 0.05:
print("方差齐")
f_value, p_value = f_oneway(*groups)
else:
print("方差不齐")
f_value, p_value = f_oneway(*groups) # 实际都使用f_oneway
#h_value, p_value = kruskalwallis(*groups)
# 结论
print(p_value)
if use_hsd:
hsd = pairwise_tukeyhsd(endog=df[y], groups=df[x], alpha=0.05)
print(hsd.summary())
return mean_compare_table
# 特征图
def create_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
# rmspe
rmspe = lambda y, y_hat: np.sqrt(np.mean((y_hat/y-1)**2))
def rmspe_xg(y_hat, y):
y = np.expm1(y.get_label())
y_hat = np.expm1(y_hat)
return "rmspe", rmspe(y, y_hat)
# 单体模型训练函数
def train(x_train, y_train, x_valid, y_valid, n_estimators_0, objective, eval_metric, scoring, rmspe_xg, kfold, esr):
# 1-设置参数初始值
print("1-设置参数初始值")
reg = XGBRegressor(
# General Parameters
booster="gbtree",
silent=1,
nthread=-1,
n_jobs=-1,
# Booster Parameters
learning_rate=0.1,
n_estimators=n_estimators_0,
gamma=0,
max_depth=7,
min_child_weight=0.001,
subsample=0.9,
colsample_bytree=0.9,
reg_alpha=0,
reg_lambda=1,
max_delta_step=0,
scale_pos_weight=1,
# Learning Task Parameters
objective=objective,
eval_metric=eval_metric,
seed=0
)
# 2-训练最优弱分类器个数:n_estimators_1
print("2-训练最优弱分类器个数:n_estimators_1")
xgb_param = reg.get_xgb_params()
d_train = xgb.DMatrix(x_train, y_train)
d_valid = xgb.DMatrix(x_valid, y_valid)
watchlist = [(d_train, "train"), (d_valid, "valid")]
t_begin = pd.Timestamp.now()
xgb_cv = xgb.cv(params=xgb_param,
dtrain=d_train,
num_boost_round=xgb_param["n_estimators"],
nfold=kfold,
feval=rmspe_xg,
#metrics=eval_metric,
early_stopping_rounds=int(xgb_param["n_estimators"]/esr),
verbose_eval=None)
t1 = pd.Timestamp.now()
n_estimators_1 = xgb_cv.shape[0]
reg.set_params(n_estimators=n_estimators_1)
xgb_param = reg.get_xgb_params()
print("分类器个数:%s, 用时:%s" % (n_estimators_1, (t1-t_begin)))
# 3-暴力搜索:learning_rate
print("3-暴力搜索:learning_rate")
param = {"learning_rate": [0.1, 0.2, 0.3]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_3 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
#model_3.grid_scores_; model_3.best_score_; model_3.best_estimator_
best_param = model_3.best_params_["learning_rate"]
reg.set_params(learning_rate=best_param)
xgb_param = reg.get_xgb_params()
print("learning_rate:%s, 用时:%s" % (best_param, (t1-t0)))
# 4-暴力搜索:max_depth, min_child_weight
print("4-暴力搜索:max_depth, min_child_weight")
param = {"max_depth": [3,5,7,9,11], "min_child_weight": [0.001, 0.01, 0.1, 1]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_4 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param_1 = model_4.best_params_["max_depth"]
best_param_2 = model_4.best_params_["min_child_weight"]
print("max_depth:%s,min_child_weight:%s,用时:%s" % (best_param_1, best_param_2, (t1-t0)))
# 5-精确搜索:max_depth
print("5-精确搜索:max_depth")
param = {"max_depth": [best_param_1-1, best_param_1, best_param_1+1]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_5 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param_1 = model_5.best_params_["max_depth"]
reg.set_params(max_depth=best_param_1)
xgb_param = reg.get_xgb_params()
print("max_depth:%s,用时:%s" % (best_param_1, (t1-t0)))
# 6-暴力搜索:gamma
print("6-暴力搜索:gamma")
param = {"gamma": [0, 0.5, 1, 1.5, 2, 2.5]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_6 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param = model_6.best_params_["gamma"]
print("gamma:%s,用时:%s" % (best_param, (t1-t0)))
# 7-精确搜索:gamma
print("7-精确搜索:gamma")
if best_param == 0:
param = {"gamma": [0, 0.1, 0.2, 0.3, 0.4]}
else:
param = {"gamma": np.arange(best_param-0.2, best_param+0.3, 0.1)}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_7 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param = model_7.best_params_["gamma"]
reg.set_params(gamma=best_param)
xgb_param = reg.get_xgb_params()
print("gamma:%s,用时:%s" % (best_param, (t1-t0)))
# 8-调整最优弱分类器个数:n_estimators_2
print("8-调整最优弱分类器个数:n_estimators_2")
reg.set_params(n_estimators=n_estimators_0)
xgb_param = reg.get_xgb_params()
t0 = pd.Timestamp.now()
xgb_cv = xgb.cv(params=xgb_param,
dtrain=d_train,
num_boost_round=xgb_param["n_estimators"],
nfold=kfold,
feval=rmspe_xg,
#metrics=eval_metric,
early_stopping_rounds=int(xgb_param["n_estimators"]/esr),
verbose_eval=None)
t1 = pd.Timestamp.now()
n_estimators_2 = xgb_cv.shape[0]
reg.set_params(n_estimators=n_estimators_2)
xgb_param = reg.get_xgb_params()
print("分类器个数:%s, 用时:%s" % (n_estimators_2, (t1-t0)))
# 9-暴力搜索:subsample, colsample_bytree
print("9-暴力搜索:subsample, colsample_bytree")
param = {"subsample": [0.6,0.7,0.8,0.9], "colsample_bytree": [0.6,0.7,0.8,0.9]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_8 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param_1 = model_8.best_params_["subsample"]
best_param_2 = model_8.best_params_["colsample_bytree"]
print("subsample:%s,colsample_bytree:%s,用时:%s" % (best_param_1, best_param_2, (t1-t0)))
# 10-精确搜索:subsample, colsample_bytree
print("10-精确搜索:subsample, colsample_bytree")
param = {"subsample": [best_param_1-0.05, best_param_1, best_param_1+0.05],
"colsample_bytree": [best_param_2-0.05, best_param_2, best_param_2+0.05]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_9 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param_1 = model_9.best_params_["subsample"]
best_param_2 = model_9.best_params_["colsample_bytree"]
reg.set_params(subsample=best_param_1, colsample_bytree=best_param_2)
xgb_param = reg.get_xgb_params()
print("subsample:%s,colsample_bytree:%s,用时:%s" % (best_param_1, best_param_2, (t1-t0)))
# 11-暴力搜索:reg_alpha
print("11-暴力搜索:reg_alpha")
param = {"reg_alpha": [0, 1, 2, 3]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_11 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param = model_11.best_params_["reg_alpha"]
reg.set_params(reg_alpha=best_param)
xgb_param = reg.get_xgb_params()
print("reg_alpha:%s,用时:%s" % (best_param, (t1-t0)))
# 12-精确搜索:reg_alpha
print("12-精确搜索:reg_alpha")
if best_param == 0:
param = {"reg_alpha": [0, 0.1, 0.2, 0.3, 0.4, 0.5]}
else:
param = {"reg_alpha": np.arange(best_param-0.5, best_param+0.5, 0.2)}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_12 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param = model_12.best_params_["reg_alpha"]
reg.set_params(reg_alpha=best_param)
xgb_param = reg.get_xgb_params()
print("reg_alpha:%s,用时:%s" % (best_param, (t1-t0)))
# 13-暴力搜索:reg_lambda
print("13-暴力搜索:reg_lambda")
param = {"reg_lambda": [1,3,5,7]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_13 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param = model_13.best_params_["reg_lambda"]
reg.set_params(reg_lambda=best_param)
xgb_param = reg.get_xgb_params()
print("reg_lambda:%s,用时:%s" % (best_param, (t1-t0)))
# 14-精确搜索:reg_lambda
print("14-精确搜索:reg_lambda")
param = {"reg_lambda": np.arange(best_param-1, best_param+1, 0.2)}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_14 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param = model_14.best_params_["reg_lambda"]
reg.set_params(reg_lambda=best_param)
xgb_param = reg.get_xgb_params()
print("reg_lambda:%s,用时:%s" % (best_param, (t1-t0)))
# 15-精确搜索:max_delta_step, scale_pos_weight
print("15-精确搜索:max_delta_step, scale_pos_weight")
param = {"max_delta_step": [0, 1, 3, 5],
"scale_pos_weight": [1, 3, 5, 7]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_12 = reg_gscv.fit(x_train, y_train)
t1 = pd.Timestamp.now()
best_param_1 = model_12.best_params_["max_delta_step"]
best_param_2 = model_12.best_params_["scale_pos_weight"]
reg.set_params(max_delta_step=best_param_1, scale_pos_weight=best_param_2)
xgb_param = reg.get_xgb_params()
print("max_delta_step:%s,scale_pos_weight:%s,用时:%s" % (best_param_1, best_param_2, (t1-t0)))
# 16-调整最优弱分类器个数:n_estimators_3
print("16-调整最优弱分类器个数:n_estimators_3")
reg.set_params(n_estimators=n_estimators_0)
xgb_param = reg.get_xgb_params()
t0 = pd.Timestamp.now()
xgb_cv = xgb.cv(params=xgb_param,
dtrain=d_train,
num_boost_round=xgb_param["n_estimators"],
nfold=kfold,
feval=rmspe_xg,
#metrics=eval_metric,
early_stopping_rounds=int(xgb_param["n_estimators"]/esr),
verbose_eval=None)
t1 = pd.Timestamp.now()
n_estimators_3 = xgb_cv.shape[0]
reg.set_params(n_estimators=n_estimators_3)
xgb_param = reg.get_xgb_params()
print("分类器个数:%s, 用时:%s" % (n_estimators_3, (t1-t0)))
# 17-精确搜索:learning_rate
print("17-精确搜索:learning_rate")
lr = xgb_param["learning_rate"]
param = {"learning_rate": [lr-0.05, lr, lr+0.05]}
reg_gscv = GridSearchCV(estimator=reg, param_grid=param, scoring=scoring, n_jobs=-1, iid=False, cv=kfold)
t0 = pd.Timestamp.now()
model_16 = reg_gscv.fit(x_train, y_train)
t_1 = pd.Timestamp.now()
best_param = model_16.best_params_["learning_rate"]
reg.set_params(learning_rate=best_param)
xgb_param = reg.get_xgb_params()
print("learning_rate:%s,用时:%s" % (best_param, (t_1-t0)))
# 18-终极训练
print("18-终极训练")
model_res = xgb.train(params=xgb_param,
dtrain=d_train,
num_boost_round=xgb_param["n_estimators"],
evals=watchlist,
feval=rmspe_xg,
early_stopping_rounds=int(xgb_param["n_estimators"]/esr))
t_end = pd.Timestamp.now()
print("参数训练完毕,总用时:%s" % (t_end-t_begin))
return model_res, reg
| Lukaschen1986/Udacity-Capstone-Project | 3-script/func_detail.py | func_detail.py | py | 21,156 | python | en | code | 0 | github-code | 13 |
5455567620 | from .models import Paciente
import django
import sys
def imagen_usuario(request):
try:
imagen = None
usuario = request.usuario
up = Perfil.objects.get(perfil_usuario=usuario)
#print up
imagen = 'http://localhost:8000/media/%s'%up.imagen
except:
imagen = 'http://localhost:8000/media/debian.jpg'
return imagen
def myprocessors(request):
get_version_django = django.get_version()
get_version_python = sys.version
p = Student.objects.get(id=1)
dic = {'valor':p, 'get_image_profile':user_image(request), 'django_version': get_version_django, 'python_version': get_version_python}
return dic
| guille1194/Django-Practices | practica23/demo/apps/home/processors.py | processors.py | py | 611 | python | es | code | 0 | github-code | 13 |
33246234689 | num = [4,3,2,7,9,2,3,1]
n = len(num)
num = set(num)
output = list()
for i in range(1, n):
if i in num:
continue
else:
output.append(i)
print(output)
| Narek-Papyan/ml | Practical_5/find-all-numbers-disappeared-in-an-array.py | find-all-numbers-disappeared-in-an-array.py | py | 173 | python | en | code | 0 | github-code | 13 |
44993182135 | # -*- coding: utf-8 -*-
"""
Author: whung
This is the main script to run the fire spread model.
"""
import numpy as np
import pandas as pd
import os
'''Settings'''
namelist = pd.read_csv('./input/namelist', header=None, delimiter='=')
namelist = namelist[1]
## input/ouput files
frp_input = namelist[0].replace(' ', '')
model_input = namelist[1].replace(' ', '')
model_output = namelist[2].replace(' ', '')
## frp source
frp_source = int(namelist[3]) # 0: rave
## forecast period
time_start = str(namelist[4].replace(' ', '')) # yyyymmddHHMM
time_end = str(namelist[5].replace(' ', '')) # yyyymmddHHMM
time_freq = int(namelist[6].replace(' ', '')) # unit: hr
time = pd.date_range(start=time_start, end=time_end, freq=str(time_freq)+'H')
## domain
lat_lim = [float(namelist[7]), float(namelist[8])]
lon_lim = [float(namelist[9]), float(namelist[10])]
## function options
opt_frpgen = int(namelist[11]) # input generator option (0: off, 1: on)
opt_inputgen = int(namelist[12]) # input generator option (0: off, 1: on)
opt_forecast = int(namelist[13]) # forecast model option (0: off, 1: on)
opt_mapgen = int(namelist[14]) # FRP map generator option (0: off, 1: on)
opt_corr = int(namelist[15]) # model correction option (0: off, 1: on)
## output scaling option
scale_opt = int(namelist[16]) # output scale option (0: off, 1: on)
scale_val = float(namelist[17]) # output scale factor
TT = len(time)
print('--------------------')
print('---- Fire spread model initializing...')
print('---- Model cycle:', time_start, '-', time_end, ', freq=', time_freq, 'H, cycle=', TT)
print('---- Model domain:', lat_lim, lon_lim)
if not os.path.exists('./input/'+time_start):
os.makedirs('./input/'+time_start)
if not os.path.exists('./output/'+time_start):
os.makedirs('./output/'+time_start)
f_frp = './input/'+time_start+'/'+frp_input+'.'+time_start+'.nc'
'''Creating initial FRP file'''
if opt_frpgen == 1:
if frp_source == 0:
import rave_preprocessor
code = rave_preprocessor.preprocessor(frp_input, time_start, lat_lim, lon_lim)
if code == 0:
print('---- Initial FRP generated!')
elif code == 1:
print('No available or unknown initial FRP. Model Terminated!')
exit()
'''Running the Model'''
for i in np.arange(TT):
print('--------------------')
print('---- Cycle t+'+str(i+1), time[i].strftime('%Y%m%d%H%M'), ' running...')
f_input = './input/'+time_start+'/'+model_input+'.'+time_start+'.f'+('%02i'%i)+'.nc'
f_output = './output/'+time_start+'/'+model_output+'.'+time_start+'.f'+('%02i'%(i+1))+'.nc'
## generate model input based on gridded frp
if opt_inputgen == 1:
import fire_inputgen
code = fire_inputgen.main_driver(i, f_frp, f_input, lat_lim, lon_lim)
if code == 0:
print('---- Input generated!')
elif code == 1:
print('---- No available input. Model terminated!')
exit()
elif code == 2:
print('---- No available fire frame. Model terminated!')
exit()
## run model forecast + post-process
if opt_forecast == 1:
import fire_forecast
fire_forecast.spread_forecast(f_input, f_output)
print('---- Spread prediction generated!')
## model correction
if opt_corr == 1:
from fire_corr import mlr_correction
mlr_correction(f_input, f_output)
print('---- Intensity prediction generated!')
## generate predicted fire maps
if opt_mapgen == 1:
from fire_mapgen import fire_mapper
fire_mapper(f_frp, f_output, opt_corr, scale_opt, scale_val)
print('---- Fire map generated!')
## prepare for next cycle
if i != TT-1:
f_frp = f_output
print('---- Cycle t+'+str(i+1), time[i].strftime('%Y%m%d%H%M'), ' complete!!!')
del [f_input, f_output]
'''Create model complete file'''
with open('./output/'+time_start+'/complete_flag.txt', 'w') as file:
file.write('MODEL COMPLETE!\n')
| angehung5/fire-spread-model | src/fire_model.py | fire_model.py | py | 4,099 | python | en | code | 0 | github-code | 13 |
28298767939 | def check_rhythm(poem):
syllables = []
for phrase in poem.split():
phrase_syllables = []
for word in phrase.split('-'):
num_syllables = count_syllables(word)
phrase_syllables.append(num_syllables)
syllables.append(phrase_syllables)
return all(phrase == syllables[0] for phrase in syllables)
def count_syllables(word):
vowels = 'аеёиоуыэюя'
count = sum(1 for letter in word.lower() if letter in vowels)
return count
# Ввод стихотворения
poem = input("Введите стихотворение Винни-Пуха: ")
# Проверка количества слов в стихотворении
if len(poem.split()) == 1:
print("Это одно слово, нужно ввести стихотворение")
else:
# Проверка ритма
if check_rhythm(poem):
print("Парам пам-пам")
else:
print("Пам парам")
| AndreiZaRich/python_1 | Task_34.py | Task_34.py | py | 970 | python | en | code | 0 | github-code | 13 |
10573975877 | from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import scipy.io as sio
import numpy as np
# Process images of this size. A number which make impact to entire model
# architecture.
IMAGE_SIZE = 24
# Global constants describing the CIFAT-10 data set
NUM_CLASSES = 11
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_svhn_mat_files(filenames):
data = None
labels = None
# Dict Keys from mat files
# mat file keys :['y', 'X', '__version__', '__header__', '__globals__' ]
for mat_file in filenames:
if not tf.gfile.Exists(mat_file):
raise ValueError('Failed to find file: ' + mat_file)
print(mat_file)
with open(mat_file, 'rb') as m:
save = sio.loadmat(m)
s_data = save['X']
s_labels = save['y']
del save
print('data set', s_data.shape, s_labels.shape)
data = np.append(data, s_data, axis=0) if data is not None else s_data
labels = np.append(labels, s_labels, axis=0) if labels is not None else s_labels
print('Data set: ', data.shape, labels.shape)
data_t = data.transpose(3, 0, 1, 2)
return data_t, labels
def read_svhn_to_queue(filenames):
data, labels= read_svhn_mat_files(filenames)
data_queue = tf.train.input_producer(data, shuffle=False)
labels_queue = tf.train.input_producer(labels, shuffle=False)
return data_queue, labels_queue
def read_svhn_reader(data_q, labels_q):
return data_q.dequeue(), labels_q.dequeue()
def read_svhn(filenames):
class SVHNRecord(object):
pass
result = SVHNRecord()
# Dimensions of the images in the svhn dataset.
# See http://ufldl.stanford.edu/housenumbers/ for a description of the
# input format.
label_bytes = 1
result.height = 32
result.width = 32
result.depth = 3
data_q, label_q = read_svhn_to_queue(filenames)
data, label = read_svhn_reader(data_q, label_q)
print(data.get_shape(), data.dtype)
print(label.get_shape(), label.dtype)
result.label = tf.cast(label, tf.int32)
# depth_major = tf.reshape(data, [result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
# result.uint8image = tf.transpose(depth_major, [1, 2, 0])
result.uint8image = data
print(data.get_shape(), data.dtype)
print(result.label.get_shape(), result.label.dtype)
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples
#capacity=1280 + 3 * batch_size,
#min_after_dequeue=1280
)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.image_summary('images', images)
print ('mark: _generate_image_and_label_batch')
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
filenames = [os.path.join(data_dir, 'train_32x32.mat')]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
print(filenames)
# Read examples from files in the filename queue.
read_input = read_svhn(filenames)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
print ('mark: distorted_inputs')
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
if not eval_data:
filenames = [os.path.join(data_dir, 'train_32x32.mat')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_32x32.mat')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Read examples from files in the filename queue.
read_input = read_svhn(filenames)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False)
| ccuulinay/udacity_deep_learning | svhn/svhn_input.py | svhn_input.py | py | 7,186 | python | en | code | 0 | github-code | 13 |
29294167278 | from collections import defaultdict
import math
input_file = 'day-20/input.txt'
def get_divisors(n):
small_divisors = [i for i in range(1, int(math.sqrt(n)) + 1) if n % i == 0]
large_divisors = [n / d for d in small_divisors if n != d * d]
return small_divisors + large_divisors
def part1(input):
target = int(input)
house_no = 0
while True:
house_no +=1
presents = sum(get_divisors(house_no)) * 10
if presents >= target:
return house_no
def part2(input):
target = int(input)
house_no = 0
while True:
house_no +=1
presents = sum(d for d in get_divisors(house_no) if house_no / d <= 50) * 11
if presents >= target:
return house_no
if __name__ == "__main__":
with open(input_file) as f:
data = f.read()
print("Part 1: ", part1(data))
print("Part 2: ", part2(data)) | stevenhorsman/advent-of-code-2015 | day-20/infinite_elves.py | infinite_elves.py | py | 833 | python | en | code | 0 | github-code | 13 |
16541950589 | '''
* no. of inputs: 4
- 1 string (f) [the function which will be integrated]
- 3 numbers
- n [count of points in Gauss formula which may have value of 2 or 3 only]
- 2 limits of integration which may have values from -1e9 (negative infinity) to 1e9 (infinity)
* no. of outputs: 1 [Value of Integration]
'''
'''
Team members:
1- Omar Mohammed Mohammed
2- Walid Mohammed
3- Yahia Ali
'''
from math import *
def solve(symF, polyorder, a, b, flag = 0):
xs = None
Ws = None
if polyorder == 2:
xs = [-0.57735027, 0.57735027]
Ws = [1,1]
elif polyorder == 3:
Ws = [0.55555556, 0.88888889, 0.55555556]
xs = [-0.77459667,0,0.77459667]
if not xs or not Ws:
print("Error! Inappropriate number of points for gauss formula!")
return "error"
ans = 0.0
for i in range (len(Ws)):
expr = ((a+b) * 0.5) + ((b-a) * 0.5 * xs[i])
ans += (b-a)*0.5*Ws[i]*calculate(symF, expr, flag)
return ans
def calculate(f, n, flag):
if flag:
x = 1/n
return eval(f) * (-1/(n*n))
else:
x = n
return eval(f)
def Integration_GaussLegendre(f, n, a, b):
f = str(f) # you should make type casting if you enter sympy string
sign = 1
if a > b:
a, b = b, a
sign = -1
if a == -1e9: # negative infinity
if b == 0:
ans = solve(f, n, 0, -1, flag = 1) + solve(f, n, -1, 0)
elif b == 1e9:
ans = solve(f, n, 0, 1, flag = 1) + solve(f, n, 1, 0, flag = 1)
else:
ans = solve(f, n, 0, 1/b, flag = 1)
elif a == 0:
if b == 1e9:
ans = solve(f, n, 0, 1) + solve(f, n, 1, 0, flag = 1)
else:
ans = solve(f, n, a, b)
else:
if b == 1e9: # infinity
ans = solve(f, n, 1/a, 0, flag = 1)
else:
ans = solve(f, n, a, b)
return ans * sign
'''
A test case:
equation = "1/(x**2 + 4)"
n = 2
a = 2
b = 1e9
print (Integration_GaussLegendre(equation, n, a, b))
''' | NumericalA/Numerical | Team 7 - Integration with Gauss-Legendre.py | Team 7 - Integration with Gauss-Legendre.py | py | 2,054 | python | en | code | 1 | github-code | 13 |
75077652176 | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Flatten
def dice_coefficient(y_true, y_pred,smooth = 100):
y_true_flatten = K.flatten(y_true)
y_pred_flatten = K.flatten(y_pred)
intersection = K.sum(y_true_flatten * y_pred_flatten)
union = K.sum(y_true_flatten) + K.sum(y_pred_flatten)
return (2 * intersection + smooth ) / (union + smooth)
def dice_coefficient_loss(y_true,y_pred,smooth = 100):
return -dice_coefficient(y_true,y_pred,smooth)
# Why we use negative because, when dice_coefficient is high its means, it has high accuracy in the prediction, so loss to need to be some quantity that will be high when the accuracy is low. Thats why take opposite of the accuracy (negative).
def iou (y_true,y_pred, smooth = 100):
intersection = K.sum(y_true * y_pred)
sum = K.sum(y_true + y_pred)
iou = (intersection + smooth) / (sum - intersection + smooth)
return iou
def jaccured_distance(y_true,y_pred,smooth = 100):
y_true_flatten = K.flatten(y_true)
y_pred_flatten = K.flatten(y_pred)
return -iou(y_true_flatten,y_pred_flatten,smooth) | Vampaxx/brain_tumor_Unet | src/brain_tumor/utils/loss_functions.py | loss_functions.py | py | 1,159 | python | en | code | 0 | github-code | 13 |
74011888656 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 11:09:42 2021
@author: dingxu
"""
import numpy as np
from sklearn.cluster import KMeans,DBSCAN,AgglomerativeClustering
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import mixture
from matplotlib.pyplot import MultipleLocator
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.manifold import TSNE
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
import imageio
from skimage import draw
data = np.loadtxt('NGC4337.txt')
print(len(data))
#data = data[data[:,2]>0]
#data = data[data[:,2]<1]
data = data[data[:,3]<15]
data = data[data[:,3]>-15]
data = data[data[:,4]<15]
data = data[data[:,4]>-15]
X = np.copy(data[:,0:5])
X = StandardScaler().fit_transform(X)
#X = MinMaxScaler().fit_transform(X)
data_zs = np.copy(X)
clt = DBSCAN(eps = 0.21, min_samples = 14)
datalables = clt.fit_predict(data_zs)
r1 = pd.Series(datalables).value_counts()
print(r1)
datapro = np.column_stack((data ,datalables))
highdata = datapro[datapro[:,8] == 0]
lowdata = datapro[datapro[:,8] == -1]
np.savetxt('highdata.txt', highdata)
np.savetxt('lowdata.txt', lowdata)
temp = [0 for i in range (50)]
def lendata(datax,RAmean, DECmean):
data = np.copy(datax)
lendata = len(data)
for i in range(lendata):
x = data[i][0]
y = data[i][1]
d = np.sqrt((x-RAmean)**2+(y-DECmean)**2)
for j in range(0,50):
if (d<(j+1)/60 and d>j/60):
temp[j] = temp[j]+1
for i in range(0,50):
s = ((i+1))**2*np.pi - (i)**2*np.pi
temp[i] = np.float64(temp[i])/s
return temp
plt.figure(1)
plt.scatter(lowdata[:,0], lowdata[:,1], c = 'b', marker='o', s=0.01)
plt.scatter(highdata[:,0], highdata[:,1], c ='r', marker='o', s=1)
plt.xlabel('RA',fontsize=14)
plt.ylabel('DEC',fontsize=14)
plt.plot(np.mean(highdata[:,0]),np.mean(highdata[:,1]),'o',c='g')
plt.figure(2)
plt.hist(lowdata[:,0], bins=500, density = 1, facecolor='blue', alpha=0.5)
plt.hist(highdata[:,0], bins=10, density = 1, facecolor='red', alpha=0.5)
print('RAmean = ', np.mean(highdata[:,0]))
print('RAstd = ', np.std(highdata[:,0]))
print('DECmean = ', np.mean(highdata[:,1]))
print('DECstd = ', np.std(highdata[:,1]))
RAmean = np.mean(highdata[:,0])
DECmean = np.mean(highdata[:,1])
temp = lendata(data,RAmean, DECmean)
plt.figure(3)
xr = np.arange(0,50,1)
#plt.plot(np.log10(xr), np.log10(temp), '.')
plt.plot(xr, temp, '.') | dingxu6207/NGC4337 | NGC4337/RDP.py | RDP.py | py | 2,517 | python | en | code | 0 | github-code | 13 |
42081421416 | def solution(array):
answer = 0
max_ = 0
visit = [0] * 1000
for i in array:
visit[i] += 1
for i in range(1000):
if visit[i] > max_:
max_ = visit[i]
answer = visit.index(max_)
if visit.count(max_)>1:
answer = -1
return answer | HotBody-SingleBungle/HBSB-ALGO | HB/pysrc/프로그래머스/레벨0/Day3/최빈값_구하기.py | 최빈값_구하기.py | py | 315 | python | en | code | 0 | github-code | 13 |
5074443035 |
import logging
from .url import UrlMgr
from .helper import urldecode, normalize_title
log = logging.getLogger(__name__)
# maintains lowlevel information about the video file
# basically name, title and stream object
class VideoInfo(object):
def __init__(self, url):
self.subdir = ""
self.flv_url = ""
self.title = ""
self.has_stream = False
# don't set the following values because they are set by getattr
# self.stream = None
if isinstance(url, UrlMgr):
self.stream_url = url.url
else:
self.stream_url = urldecode(url)
def __hash__(self):
# the hash will always start with "h" to create also a good filename
# hash will be used, if title-extraction won't work
return 'h %s' % hash(self.stream_url)
def __getattr__(self, key):
if key == 'subdir':
return self.get_subdir()
elif key == 'stream_url':
return self.get_stream()
elif key == 'stream':
self.get_stream()
return self.stream
elif key == 'flv_url':
return self.get_flv()
elif key == 'flv_type':
if self.has_stream:
self.flv_type = self.stream.ename
else:
self.flv_type = None
return self.flv_type
def __repr__(self):
return "%s: %s .-. %s" % (self.__class__.__name__, self.flv_type, self.title)
def get_subdir(self):
self.subdir = self.name
return self.subdir
def get_flv(self):
self.flv_url = self.stream.get(self)
return self.flv_url
def get_title(self):
log.error("TITLE must be downloaded from overviewpage")
if not self.title:
# it isn't fatal if we don't have the title, just use the own hash, which should be unique
# maybe in future, we should set a variable here, so that we know from outside,
# if the title is only the hash and we need to discover a better one
self.title = hash(self) # normalize_title isn't needed, the hash will make sure that the title looks ok
log.info('couldnt extract title - will now use the hash from this url: %s', self.title)
else:
self.title = normalize_title(self.title)
return self.title
def get_stream(self):
# to avoid recursive dependencies, import it here
from .plugins import getStreamByLink
stream = getStreamByLink(self.stream_url)
if self.flv_type: # normally a stream knows its flv_type - but redirection pages don't..
stream.flv_type = self.flv_type
# this would open the page and look for common flash embedding to find a link for the download
# I think this code doesn't belong here and should go to each individual page extractor (only if needed - most won't need this)
# if stream is None:
# streamData = extract_stream(UrlMgr(url=self.stream_url).data)
# if streamData and streamData['url']:
# stream = findStream(streamData['url'])
# self.stream_url = streamData['url']
if stream is None or stream.flvUrl is None:
log.warning('couldn\'t find a supported streamlink in: %s', self.stream_url)
self.stream_url = None
self.stream = None
return None
self.stream = stream
self.has_stream = True
return self.stream_url
| balrok/Flashget | flashget/videoinfo.py | videoinfo.py | py | 3,500 | python | en | code | 6 | github-code | 13 |
22295443747 | import json
import unittest
import ddt
import requests
from common import requests_handler
from middleware.handler import Handler
# 初始化
logger = Handler.logger
test_data = Handler.excel.read_data("shop_register")
env_data = Handler()
@ddt.ddt
class RegTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
pass
# cls.token = env_data.token
# cls.member_id = env_data.member_id
def setUp(self) -> None:
self.db = env_data.MysqlDbClient()
self.session = env_data.login_easy()
def tearDown(self) -> None:
self.db.close()
self.session.close()
@ddt.data(*test_data)
def test_add_success(self, test_info):
data = json.loads(test_info["data"])
headers = json.loads(test_info["headers"])
url=env_data.yaml["host"]["pf"]+"/"+test_info["interface"]
print(self.session.cookies)
reg_res = self.session.post(url=url,headers=headers,json=data)
print(reg_res.status_code)
print(reg_res.json())
assert reg_res.status_code==200
print(reg_res.json())
assert reg_res.json()["success"] is True
#
# if "#member_id#" in data:
# data = data.replace("#member_id#", str(self.member_id))
#
# headers = test_info["headers"]
#
# if "#token#" in headers:
# headers = headers.replace("#token#", self.token)
#
#
# if test_info["check_sql"]:
# before_loan = self.db.query(
# "SELECT * FROM futureloan.loan WHERE member_id={}".format(self.member_id),
# one=False
# )
#
# data = eval(data)
# headers = json.loads(headers)
#
# # 查询之前的余额
#
# resp = requests_handler.visit(
# url=env_data.yaml["host"] + test_info["url"],
# method=test_info["method"],
# headers=headers,
# json=data
# )
#
# expected = json.loads(test_info["expected"])
# self.assertEqual(expected["code"], resp["code"])
# self.assertEqual(expected["msg"], resp["msg"])
#
# if resp["code"] == 0:
# after_loan = self.db.query(
# "SELECT * FROM futureloan.loan WHERE member_id={}".format(self.member_id),
# one=False
# )
# self.assertEqual(len(before_loan) + 1, len(after_loan))
| indyix/auto_interface | tests/1test_reg.py | 1test_reg.py | py | 2,496 | python | en | code | 0 | github-code | 13 |
36441370954 | import justpy as jp
def app():
# Create the Quasar webpage
wp = jp.QuasarPage()
# Create QDiv components linked to the webpage
h1 = jp.QDiv(a=wp, text="Analysis of Course Reviews", classes="text-h3 text-center")
# Typography classes list in https://quasar.dev/style/typography
p1 = jp.QDiv(a=wp, text="These graphs represent course review analysis")
return wp
jp.justpy(app)
| daniel-ob/python-mega-course | app8_DataVisualisationWebApp/0-simple-app.py | 0-simple-app.py | py | 408 | python | en | code | 1 | github-code | 13 |
163708056 | import json
from PIL import Image, ImageDraw, ImageFont
from PIL import ImagePath
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
import pandas as pd
import sys
from matplotlib.backends.backend_pdf import PdfPages
import explore as ex
def get_font(fontsize=40):
"""Attempts to retrieve a reasonably-looking TTF font from the system.
We don't make much of an effort, but it's what we can reasonably do without
incorporating additional dependencies for this task.
"""
if sys.platform == 'win32':
font_names = ['Arial']
elif sys.platform in ['linux', 'linux2']:
font_names = ['DejaVuSans-Bold', 'DroidSans-Bold']
elif sys.platform == 'darwin':
font_names = ['Menlo', 'Helvetica']
font = None
for font_name in font_names:
try:
font = ImageFont.truetype(font_name,fontsize)
break
except IOError:
continue
return font
def makeMask(size,tups):
black = Image.new('1', im.size)
imd = ImageDraw.Draw(black)
imd.polygon(tups,fill="white",outline="white")
return(np.array(black))
res = []
pats = os.listdir("PreviewImages")
pats = [pat for pat in pats if "." not in pat]
pixthresh = 0
font = ImageFont.truetype("LEMONMILK-Regular.otf",75)
for pat in pats:
print(pat)
annotationfiles = os.listdir(os.path.join("PreviewImages",pat,"geojson_annotations"))
annotationfiles = [f for f in annotationfiles if ".geojson" in f]
imdir = os.path.join("PreviewImages",pat)
files = os.listdir(imdir)
ims = [f for f in files if (".jpg" in f) and ("_Q." not in f)]
im = Image.open(os.path.join(imdir,ims[0]))
allblack = Image.new('L', im.size)
imdall = ImageDraw.Draw(allblack)
for annotationfile in annotationfiles:
annroot = annotationfile.replace(".geojson","")
anndir = os.path.join("PreviewImages",pat,annroot)
if not os.path.exists(anndir):
os.mkdir(anndir)
black = Image.new('L', im.size)
red = Image.new('L', im.size)
with open(os.path.join("PreviewImages",pat,"geojson_annotations",annotationfile)) as f:
data = json.load(f)
Nann = len(data["features"])
coordvec = []
for f in range(0,Nann):
blob = data["features"][f]["geometry"]
if blob["type"] == "LineString":
coords = blob["coordinates"]
if blob["type"] == "Polygon":
coords = blob["coordinates"][0]
coordvec.append(coords)
tups = [tuple(coord) for coord in coords]
mask = makeMask(im.size,tups)
# Let's have a look at raw data in multipage tiff files
fnames = []
for root,subdir,files in os.walk(os.path.join("MultiTIFFs",pat)):
for fnm in files:
if ".tiff" in fnm:
fnames.append(os.path.join(root,fnm))
if len(fnames)>1:
print("Error: More than one .tiff file found for "+pat)
break
images,metals,unsorted_labels = ex.parseMultiTIFF(fnames[0])
labdict = {lab:i for i,lab in enumerate(unsorted_labels)}
labels = sorted(unsorted_labels, key=str.casefold)
area = np.sum(mask)
corrs = np.zeros((len(unsorted_labels),len(unsorted_labels)))
fig = plt.figure(constrained_layout=True, figsize=(25*0.75, 15*0.75), dpi=100)
for l,label in enumerate(labels):
i = labdict[label]
chanpix = images[i,:,:]
allpix = chanpix[mask]
qmax = 0.95
eps = 0.5
pixtqm = np.quantile(chanpix[mask],qmax)
pixtqa = np.quantile(chanpix,qmax)
# Write log intensity histogram to .pdf
mlabel = "log("+label+"+0.5)"
print("Plotting histogram: "+pat+" "+annroot+'_{:04d} '.format(f)+label)
sp = plt.subplot(6,10,l+1)
ldat = np.log(allpix+eps)
n, bins, patches = plt.hist(ldat, bins = np.linspace(-1,12,131), range=[np.log(pixthresh+eps),np.max(np.log(chanpix+eps))],density=True, facecolor='g', alpha=0.75, figure=fig,log=True)
changebin = 1+5+np.argmax(np.abs(np.diff(n[5:])))
pixtadapt = np.exp(bins[changebin])-eps
nadapt = n[changebin]
l = plt.axvline(np.log(np.ceil(pixthresh+eps)),color="red",linestyle="dashed")
l2 = plt.axvline(np.log(np.ceil(pixtadapt+eps)),color=(0,0,1,0.25),linestyle="dashed")
l3 = plt.axvline(np.log(np.ceil(pixtqm+eps)), color="pink",linestyle="dashed")
l4 = plt.axvline(np.log(np.ceil(pixtqa+eps)), color="orange",linestyle="dashed")
pt = plt.xlabel(mlabel,figure=fig)
tx = plt.text(1.05*np.log(np.ceil(pixtadapt+eps)),0.3*np.max(n),str(round(np.log(np.ceil(pixtadapt+eps)),3)),color="blue",ha="left")
pixthresh = pixtadapt
maskp = np.logical_and(mask,chanpix > pixthresh)
pospix = chanpix[maskp]
mean = np.mean(pospix)
median = np.median(pospix)
mean_all = np.mean(allpix)
median_all = np.median(allpix)
posarea = np.sum(maskp)
posfrac = float(posarea)/float(area)
for l2,label2 in enumerate(labels):
j = labdict[label2]
pospix2 = images[j,:,:][maskp]
corrs[i,j] = np.corrcoef(pospix,pospix2)[0,1]
summ = {
"image":pat,
"annotation_file":annotationfile,
"roi_number":f,
"label":label2,
"area":area,
"mean_intensity_pos_signal":mean,
"median_intensity_pos_signal":median,
"mean_intensity_all_pixels":mean_all,
"median_intensity_all_pixels":median_all,
"pos_area":posarea,
"pos_fraction":posfrac
}
res.append(summ)
st = fig.suptitle(pat+" "+annroot+'_{:04d} '.format(f), fontsize=14)
plt.savefig(os.path.join(anndir,pat+"_"+annroot+'_{:04d}.png'.format(f)),dpi=400)
print("Writing correlation matrix to file: "+'{:04d}'.format(f))
df = pd.DataFrame(corrs,columns=labels,index=labels)
df.to_csv(os.path.join(anndir,pat+"_"+annroot+"_CorrelationMatrix_"+'ROI{:04d}'.format(f)+".csv"))
# Convert to long format
b = np.tile(df.columns, len(df.index))
a = np.repeat(df.index, len(df.columns))
c = df.values.ravel()
df_long = pd.DataFrame({'label_a':a, 'label_b':b, 'correlation':c})
df_long = df_long[df_long.label_a!=df_long.label_b]
df_long = df_long.sort_values("correlation",ascending=False)
df_long.to_csv(os.path.join(anndir,pat+"_"+annroot+"_CorrelationsRanked_"+'ROI{:04d}'.format(f)+".csv"), index=False)
for lab in labels:
i = labdict[lab]
for j,coords in enumerate(coordvec):
tups = [tuple(coord) for coord in coords]
imdall.polygon(tups,fill="white",outline="white")
filename = pat+"_"+lab+".jpg"
arr = np.array(images[i,:,:])
posim = Image.fromarray(np.array((arr>pixthresh)*255,dtype=np.uint8),mode="L")
im = Image.open(os.path.join(imdir,filename))
maskp = np.logical_and(mask,images[i,:,:]>pixthresh)
imrgb = im.convert("RGB")
R, G, B = imrgb.split()
empty = Image.fromarray(np.zeros(posim.size[::-1],dtype=np.uint8),mode="L")
newImage = Image.merge("RGB", (posim,empty,allblack))
imd2 = ImageDraw.Draw(newImage)
for j,coords in enumerate(coordvec):
mid = np.average(coords,axis=0)
imd2.text((int(round(mid[0])),int(round(mid[1]))),'{:04d}'.format(j),fill="white",font=font)
newImage.save(os.path.join(anndir,annroot+"_"+filename))
outres = pd.DataFrame(res)
outres.to_csv(os.path.join("PreviewImages","ROI_summaries.csv"),index=False)
| CnrLwlss/manannan | src/annotation.py | annotation.py | py | 8,461 | python | en | code | 0 | github-code | 13 |
33596572108 | import matplotlib as mpl
import matplotlib.pyplot as plt
from random import randint
import time
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
points = []
Ttime=0
for i in range(0,50):
start = time.time()
Fib = 0
temp = 0
num = 1
num2 = 1
in1 = i
inp = in1-2
while inp>0:
temp = num
num = num + num2
num2 = temp
inp = inp-1
Fib = num
end = time.time()
print (str(Fib))
Ttime = Ttime + (end-start)
points.append(Ttime)
print ("Int = 93th para overflow, aunque se puede aumentar el limite por memoria")
plt.plot(points)
plt.show()
ax.plot(points)
fig.savefig('graph.png')
| miacastroco/Iphyton | fibonacciPython.py | fibonacciPython.py | py | 664 | python | en | code | 0 | github-code | 13 |
13520881726 | """
Run like this : `AWS_DEFAULT_REGION='us-east-1' pytest`
"""
import os
import json
import sys
import zipfile
import io
import logging
import mock
import socket
import boto3
import sure
from moto import mock_autoscaling, mock_ec2, mock_iam, mock_lambda
sys.path.append('..')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
EXAMPLE_AMI_ID = "ami-12c6146b"
@mock_ec2
def setup_networking():
az = f"{os.environ['AWS_DEFAULT_REGION']}a"
ec2 = boto3.resource("ec2")
vpc = ec2.create_vpc(CidrBlock="10.1.0.0/16")
public_subnet = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock="10.1.1.0/24",
AvailabilityZone=f"{az}"
)
private_subnet = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock="10.1.2.0/24",
AvailabilityZone=f"{az}",
)
private_subnet_two = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock="10.1.3.0/24",
AvailabilityZone=f"{az}",
)
route_table = ec2.create_route_table(VpcId=vpc.id)
route_table_two = ec2.create_route_table(VpcId=vpc.id)
sg = ec2.create_security_group(GroupName="test-sg", Description="test-sg")
ec2_client = boto3.client("ec2")
allocation_id = ec2_client.allocate_address(Domain="vpc")["AllocationId"]
nat_gw_id = ec2_client.create_nat_gateway(
SubnetId=public_subnet.id,
AllocationId=allocation_id
)["NatGateway"]["NatGatewayId"]
eni = ec2_client.create_network_interface(
SubnetId=public_subnet.id, PrivateIpAddress="10.1.1.5"
)
ec2_client.associate_route_table(
RouteTableId=route_table.id,
SubnetId=private_subnet.id
)
ec2_client.create_route(
DestinationCidrBlock="0.0.0.0/0",
NetworkInterfaceId=eni["NetworkInterface"]["NetworkInterfaceId"],
RouteTableId=route_table.id
)
ec2_client.associate_route_table(
RouteTableId=route_table.id,
SubnetId=private_subnet_two.id
)
ec2_client.create_route(
DestinationCidrBlock="0.0.0.0/0",
NetworkInterfaceId=eni["NetworkInterface"]["NetworkInterfaceId"],
RouteTableId=route_table_two.id
)
return {
"vpc": vpc.id,
"public_subnet": public_subnet.id,
"private_subnet": private_subnet.id,
"private_subnet_two": private_subnet_two.id,
"nat_gw": nat_gw_id,
"route_table": route_table.id,
"route_table_two": route_table_two.id,
"sg": sg.id,
}
def verify_nat_gateway_route(mocked_networking):
ec2_client = boto3.client("ec2")
filters = [{"Name": "route-table-id", "Values": [mocked_networking["route_table"],mocked_networking["route_table_two"]]}]
route_tables = ec2_client.describe_route_tables(Filters=filters)["RouteTables"]
route_tables.should.have.length_of(2)
route_tables[0]["Routes"].should.have.length_of(2)
route_tables[1]["Routes"].should.have.length_of(2)
for rt in route_tables:
for route in rt["Routes"]:
if route["DestinationCidrBlock"] == "0.0.0.0/0":
zero_route = route
zero_route.should.have.key("NatGatewayId").equals(mocked_networking["nat_gw"])
@mock_autoscaling
@mock_ec2
def test_handler():
mocked_networking = setup_networking()
ec2_client = boto3.client("ec2")
template = ec2_client.create_launch_template(
LaunchTemplateName="test_launch_template",
LaunchTemplateData={"ImageId": EXAMPLE_AMI_ID, "InstanceType": "t2.micro"},
)["LaunchTemplate"]
autoscaling_client = boto3.client("autoscaling")
autoscaling_client.create_auto_scaling_group(
AutoScalingGroupName="alternat-asg",
VPCZoneIdentifier=mocked_networking["public_subnet"],
MinSize=1,
MaxSize=1,
LaunchTemplate={
"LaunchTemplateId": template["LaunchTemplateId"],
"Version": str(template["LatestVersionNumber"]),
},
)
from app import handler
script_dir = os.path.dirname(__file__)
with open(os.path.join(script_dir, "../sns-event.json"), "r") as file:
asg_termination_event = file.read()
az = f"{os.environ['AWS_DEFAULT_REGION']}a".upper().replace("-", "_")
os.environ[az] = ",".join([mocked_networking["route_table"],mocked_networking["route_table_two"]])
handler(json.loads(asg_termination_event), {})
verify_nat_gateway_route(mocked_networking)
@mock_iam
def get_role():
iam = boto3.client("iam")
return iam.create_role(
RoleName="my-role",
AssumeRolePolicyDocument="some policy",
Path="/my-path/",
)["Role"]["Arn"]
def get_test_zip_file1():
pfunc = """
def lambda_handler(event, context):
print("custom log event")
return event
"""
return _process_lambda(pfunc)
def _process_lambda(func_str):
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", func_str)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
@mock_lambda
@mock_ec2
@mock.patch('urllib.request.urlopen')
def test_connectivity_test_handler(mock_urlopen):
from app import connectivity_test_handler
mocked_networking = setup_networking()
lambda_client = boto3.client("lambda")
lambda_function_name = "alternat-connectivity-test"
lambda_client.create_function(
FunctionName=lambda_function_name,
Role=get_role(),
Code={"ZipFile": get_test_zip_file1()},
)
script_dir = os.path.dirname(__file__)
with open(os.path.join(script_dir, "../cloudwatch-event.json"), "r") as file:
cloudwatch_event = file.read()
class Context:
function_name = lambda_function_name
mock_urlopen.side_effect = socket.timeout()
os.environ["ROUTE_TABLE_IDS_CSV"] = ",".join([mocked_networking["route_table"], mocked_networking["route_table_two"]])
os.environ["PUBLIC_SUBNET_ID"] = mocked_networking["public_subnet"]
connectivity_test_handler(event=json.loads(cloudwatch_event), context=Context())
verify_nat_gateway_route(mocked_networking)
| 1debit/alternat | functions/replace-route/tests/test_replace_route.py | test_replace_route.py | py | 6,218 | python | en | code | 873 | github-code | 13 |
12663723982 | from bs4 import BeautifulSoup
def html(name, clean):
with open(name, "r") as file:
contents = file.read()
soup = BeautifulSoup(contents, 'lxml')
title = soup.text
file = open(clean, 'w')
file.write(title)
file.close()
return soup
name = input("Absolute path ot file: ")
clean = input("Name of cleaned file: ")
print(html(name, clean))
| ls500pymaster/Python_Basics | py_basic_HM/Clear html in file_HW_31.py | Clear html in file_HW_31.py | py | 355 | python | en | code | 0 | github-code | 13 |
16184452473 | """
미확인 도착지(https://www.acmicpc.net/problem/9370)
- 입력 : 테스트 케이스의 개수 T(1 <= T <= 100)
첫번째 줄에 3개의 정수 n, m, t(2 <= n <= 2,000, 1 <= m <= 50,000, 1 <= 1t <= 100)
각각 교차로, 도로, 목적지 후보의 개수
두번째 줄에 3개의 정수 s, g, h(1 <= s, g, h <= n)
s는 예술가들의 출발지, g, h는 지나간 교차로의 사이(g != h)
이후 m개의 각 줄마다 3개의 정수 a, b, d(1 <= a < b <= n, 1 <\ d <= 1,000)
a와 b사이에 길이 d의 양방향 도로가 있다는 뜻
이후 t개의 각 줄마다 정수 x가 주어지는데, t개의 목적지 후보들을 의미
이 t개의 지점들은 서로 다른 위치이며 모두 s와 같지 않음
교차로 사이에는 도로가 많아봐야 1개, m개의 줄 중에서 g와 h 사이의 도로를 나타낸 것이 존재
또한 이 도로는 목적지 후보들 중 적어도 1개로 향하는 최단 경로의 일부
- 출력 : 각 테스트 케이스마다 입력에서 주어진 목적지 후보들 중 불가능한 경우를 제외한 목적지들을 공백으로 분리시킨
오름차순의 정수들로 출력
* 참고 : https://hooongs.tistory.com/165
"""
from sys import stdin
import heapq
INF = float(1e9)
def dijkstra(n, start, graph):
distance = [INF] * (n + 1)
distance[start] = 0
q = []
heapq.heappush(q, (0, start))
while q:
dist, now = heapq.heappop(q)
if distance[now] < dist:
continue
for i in graph[now]:
cost = dist + i[1]
if cost < distance[i[0]]:
distance[i[0]] = cost
heapq.heappush(q, (cost, i[0]))
return distance
for _ in range(int(stdin.readline().strip())):
n, m, t = map(int, stdin.readline().split())
s, g, h = map(int, stdin.readline().split())
graph = [[] for _ in range(n + 1)]
for _ in range(m):
a, b, d = map(int, stdin.readline().split())
if (a == g and b == h) or (a == h and b == g):
d -= 0.1 # h, g 사이의 거리
graph[a].append((b, d))
graph[b].append((a, d))
targets = [int(stdin.readline().strip()) for _ in range(t)]
targets.sort()
dijkstra_results = dijkstra(n, s, graph)
for target in targets:
if dijkstra_results[target] != INF and type(dijkstra_results[target]) == float:
print(target, end=' ')
print()
| akana0321/Algorithm | BaekJoon/Shortest Path/unidentified_destination_9370.py | unidentified_destination_9370.py | py | 2,588 | python | ko | code | 0 | github-code | 13 |
25550895192 | player = {
"Move Forward": [glass.Key.UP, ord("W")],
"Move Backward": [glass.Key.DOWN, ord("S")],
"Move Left": [glass.Key.LEFT, ord("A")],
"Move Right": [glass.Key.RIGHT, ord("D")],
"Jump": [glass.Key.SPACE, glass.Key.LEFT_CONTROL,glass.Key.RIGHT_CONTROL],
"Sprint": [glass.Key.LEFT_SHIFT],
"Attack / Action": [200], #left mouse
"Block / Leap": [201], #right mouse
"Rapid Attack": [ord("R")],
"Auto-Attack Toggle": [glass.Key.PAGE_DOWN],
"Select Primary Weapon": [ord("1")],
"Select Secondary Weapon": [ord("2")],
"Select Item 1": [ord("3")],
"Select Item 2": [ord("4")],
"Select Item 3": [ord("5")],
"Select Next Item/Weapon": [], #TODO MouseWheelUp
"Select Previous Item/Weapon": [], #TODO MouseWheelDown
"Enter Building": [ord("E")],
"Officer Mark": [ord("F")],
"Officer Mark Last Enemy": [ord("G")],
"Eject from Siege": [ord("X")],
"Unit Selection": [ord("L")],
"Request Buff 1": [],
"Request Buff 2": [],
"Request Buff 3": [],
"Zoom": [ord("Z")],
"Vote Yes": [glass.Key.F1],
"Vote No": [glass.Key.F2],
"Ignore Vote": [glass.Key.F3],
"Chat (All)": [ord("T")],
"Chat (Team)": [ord("Y")],
"Chat (Commander)": [ord("U")],
"Chat (Squad)": [ord("I")],
"Voice Chat": [ord("V")],
"Show Chat History": [ord("B")],
"Show Scoreboard": [glass.Key.TAB],
"Show Accuracy": [glass.Key.END],
"Show Research" : [],
"Show Graphics Options": [glass.Key.DELETE],
"Toggle Resources": [],
"Toggle Minimap": [ord("M")],
"Toggle Minimap Size": [ord("N")],
"Screenshot": [glass.Key.F9],
"Return to Lobby": [glass.Key.ESCAPE]
};
comm = {
"Scroll Up": [glass.Key.UP, ord("W")],
"Scroll Down": [glass.Key.DOWN, ord("S")],
"Scroll Left": [glass.Key.LEFT, ord("A")],
"Scroll Right": [glass.Key.RIGHT, ord("D")],
"Zoom In": [], #TODO MouseWheelUp
"Zoom Out": [], #TODO MouseWheelDown
"Rotate View CCW": [ord(",")],
"Rotate View CW": [ord(".")],
"Default View": [glass.Key.END],
"Vote Yes": [glass.Key.F1],
"Vote No": [glass.Key.F2],
"Hide Vote": [glass.Key.F3],
"Chat (All)": [ord("T")],
"Chat (Team)": [ord("Y")],
"Voice Chat": [ord("V")],
"Show Chat History": [ord("H")],
"Rotate Building CCW": [ord("Q")],
"Rotate Building CW": [ord("E")],
"Go To Base": [glass.Key.HOME],
"Go To Idle Worker": [ord("I")],
"Go To Last Message": [glass.Key.SPACE],
"Go To Officers": [],
"Go To Officer 1": [],
"Go To Officer 2": [],
"Go To Officer 3": [],
"Show Scoreboard": [glass.Key.TAB],
"Show Unit List": [glass.Key.TAB],
"Toggle Research": [ord("R")],
"Toggle Minimap": [ord("M")],
"Toggle Minimap Size": [ord("N")],
"Screenshot": [glass.Key.F9]
};
spec = {
"Move Forward": [glass.Key.UP, ord("W")],
"Move Backward": [glass.Key.DOWN, ord("S")],
"Move Left": [glass.Key.LEFT, ord("A")],
"Move Right": [glass.Key.RIGHT, ord("D")],
"Move Up": [glass.Key.SPACE, glass.Key.LEFT_CONTROL,glass.Key.RIGHT_CONTROL],
"Move Faster": [glass.Key.LEFT_SHIFT],
"Vote Yes": [glass.Key.F1],
"Vote No": [glass.Key.F2],
"Ignore Vote": [glass.Key.F3],
"Chat (All)": [ord("T")],
"Chat (Team)": [ord("Y")],
"Voice Chat": [ord("V")],
"Show Chat History": [ord("B")],
"Show Scoreboard": [glass.Key.TAB],
"Show Research" : [ord("R")],
"Show Graphics Options": [glass.Key.DELETE],
"Toggle Minimap": [ord("M")],
"Toggle Minimap Size": [ord("N")],
"Screenshot": [glass.Key.F9],
"Return to Lobby": [glass.Key.ESCAPE],
"Unit Selection": [ord("L")]
};
maps = [player, comm, spec];
#this is what actually gets used. note the order!
| biggeruniverse/srdata | client/game/settings/default_bindactions.py | default_bindactions.py | py | 3,814 | python | en | code | 1 | github-code | 13 |
35165339951 | from Person import *
from Car import *
class Employee(Person):
employeesNum = 0
def __init__(self, id, car, email, salary, distanceToWork):
self.id = id
self.car = car
self.email = email
self.salary = salary
self.distanceToWork = distanceToWork
Employee.employeesNum += 1
def reful(self, gazAmount):
car.fuelRate = gazAmount
print("reful assign value success \n Done in reful() ")
def drive(self, distance):
print("drive call")
self.drive_obj = Car.run(self, velocity=3, distance=3)
print("Done")
def work(self, hours):
if hours == 8:
print("Happy!")
elif hours > 8:
print("Tired!")
elif hours < 8:
print("Lazy")
else:
print("else")
def sendMail(self, to, receiverName, sub, msg):
f = open("mail.txt", "a")
f.write("|".join([to, receiverName, sub, msg]))
f.write('\n')
f.close()
@classmethod
def CalcNumsEmp(cls):
cls.employeesNum += 1
print(f" EmpsNum are {cls.employeesNum}")
employee = Employee(1, "f16", "medo@medo.com", 1000, 20)
employee2 = Employee(2, "f16", "medo@medo.com", 1000, 20)
# employee.drive(20)
employee.reful(0)
| Mohamadmahgoub910/Lab-4-Py | Employee.py | Employee.py | py | 1,290 | python | en | code | 0 | github-code | 13 |
20612544772 | import unittest
from work_file import reverse
class TestReverse(unittest.TestCase):
def test_if_empty_string_will_return_empty_output(self):
# Arrange
my_string = ''
#Act
result = reverse(my_string)
# Assert
self.assertEqual(result, "")
def test_if_single_string_will_return_empty_output(self):
# Arrange
my_string = 'a'
#Act
result = reverse(my_string)
# Assert
self.assertEqual(result, "a") | cholards/training2023Jan | test_work_file.py | test_work_file.py | py | 513 | python | en | code | 0 | github-code | 13 |
38148380288 | # imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import f_regression
from math import sqrt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from statsmodels.formula.api import ols
def plot_residuals(target, yhat):
'''
plot_residuals will take in a target series and prediction series
and plot the residuals as a scatterplot.
'''
residual = target - yhat
plt.scatter(target, residual)
plt.axhline(y = 0, ls = ':')
plt.xlabel("target")
plt.ylabel("residual")
plt.title('Residual Plot')
plt.show
def regression_errors(target, yhat):
'''
regression_errors takes in a target and prediction series
and prints out the regression error metrics.
'''
residual = target - yhat
mse = mean_squared_error(target, yhat)
sse = (residual **2).sum()
rmse = sqrt(mse)
tss = ((target - yhat.mean()) ** 2).sum()
ess = ((yhat - target.mean()) ** 2).sum()
print(f"""
MSE: {round(mse,2)}
SSE: {round(sse,2)}
RMSE: {round(rmse,2)}
TSS: {round(tss,2)}
ESS: {round(ess,2)}
""")
def baseline_mean_errors(target):
'''
baseline_mean_errors takes in a target
and prints out the regression error metrics for the baseline.
'''
baseline = target.mean()
residual = target - (baseline)
sse_baseline = (residual **2).sum()
mse_baseline = sse_baseline * len(target)
rmse_baseline = sqrt(mse_baseline)
print(f"""
MSE_baseline: {round(MSE_baseline,2)}
SSE_baseline: {round(SSE_baseline,2)}
RMSE_baseline: {round(RMSE_baseline,2)}
""")
def better_than_baseline(target, yhat):
'''
better_than_baseline takes in a target and prediction
and returns boolean answering if the model is better than the baseline.
'''
rmse_baseline = sqrt((((target - (target.mean())) **2).sum()) * len(target))
rmse_model = sqrt((((target - yhat) **2).sum()) * len(target))
return rmse_model < rmse_baseline
def model_significance(ols_model):
return {
'r^2 -- variance explained': ols_model.rsquared,
'p-value -- P(data|model == baseline)': ols_model.f_pvalue,
}
def residuals(actual, predicted):
return actual - predicted
def sse(actual, predicted):
return (residuals(actual, predicted) **2).sum()
def mse(actual, predicted):
n = actual.shape[0]
return sse(actual, predicted) / n
def rmse(actual, predicted):
return sqrt(mse(actual, predicted))
def ess(actual, predicted):
return ((predicted - actual.mean()) ** 2).sum()
def tss(actual):
return ((actual - actual.mean()) ** 2).sum()
def reg_error_metrics(target, yhat):
'''
reg_error_metrics takes in target and prediction series
and returns a dataframe that contains the SSE/MSE/RMSE metrics
for. both model and baseline
and answers if the model is better than the baseline.
'''
df = pd.DataFrame(np.array(['SSE', 'MSE','RMSE']), columns=['metric'])
df['model_error'] = np.array([sse(target, yhat), mse(target, yhat), rmse(target, yhat)])
df['baseline_error'] = np.array([sse(target, target.mean()), mse(target, target.mean()), rmse(target, target.mean())])
df['better_than_baseline'] = df.baseline_error > df.model_error
df = df.set_index("metric")
return df
| o0amandagomez0o/regression-exercises | evaluate.py | evaluate.py | py | 3,661 | python | en | code | 0 | github-code | 13 |
74638008656 | if __name__ == '__main__':
records = []; # 전체를 받을 배열입니다.
scores = []; # 점수를 받을 배열입니다.
for _ in range(int(input())):
name = input()
score = float(input())
records.append([name, score]) # 이름과 점수를 입력받아 배열에 넣습니다.
scores.append(score) # 점수만 넣습니다.
scores = list(set(scores)) # 정렬을 위해 list로 선언합니다.
scores.sort(); # 정렬합니다. - 파이썬의 list.sort() 메소드는 Tim Sort를 사용합니다.
scoreAnswer = scores[1]; # 2번째로 높은 값을 찾기위해 정렬된 값의 1번 인덱스를 취합니다.
names = []; # 대상의 이름을 보관하기 위한 배열입니다.
for i in records: # 전체 배열을 순회합니다.
if (i[1] == scoreAnswer): # 2번째로 높은 값을 찾으면
names.append(i[0]) # 해당 이름을 배열에 더합니다.
names.sort(); # 알파벳 순으로 정렬합니다.
for i in names:
print(i) # 출력합니다.
| Coding-Test-Study-Group/Coding-Test-Study | Cobluesky/hackerrank - nested_list.py | hackerrank - nested_list.py | py | 1,109 | python | ko | code | 4 | github-code | 13 |
5591423461 | import tkinter as tk
class BudgetYearMonthMenu:
def __init__(self, root, budzety):
self.root = root
self.root.title("Budget Year Month Menu")
self.budzety = budzety
self.budget_label = tk.Label(root, text="Select Budget:")
self.budget_label.pack()
self.budget_var = tk.StringVar(root)
self.budget_var.set(self.budzety[0].name)
self.budget_var.trace("w", self.update_years)
self.budget_dropdown = tk.OptionMenu(root, self.budget_var, *[b.name for b in self.budzety])
self.budget_dropdown.pack()
self.year_label = tk.Label(root, text="Select Year:")
self.year_label.pack()
self.year_var = tk.StringVar(root)
self.year_var.set("Select Year")
self.year_dropdown = tk.OptionMenu(root, self.year_var, [])
self.year_dropdown.pack()
self.month_label = tk.Label(root, text="Select Month:")
self.month_label.pack()
self.month_var = tk.StringVar(root)
self.month_var.set("Select Month")
self.month_dropdown = tk.OptionMenu(root, self.month_var, "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December")
self.month_dropdown.pack()
self.submit_button = tk.Button(root, text="Submit", command=self.submit)
self.submit_button.pack()
def update_years(self, *args):
selected_budget = next((b for b in self.budzety if b.name == self.budget_var.get()), None)
if selected_budget is not None:
self.year_var.set("Select Year")
self.year_dropdown["menu"].delete(0, "end")
self.year_options = selected_budget.list_years()
for rok in self.year_options:
self.year_dropdown["menu"].add_command(label=rok, command=tk._setit(self.year_var, rok))
def submit(self):
selected_budget = self.budget_var.get()
selected_year = self.year_var.get()
selected_month = self.month_var.get()
print(f"Selected Budget: {selected_budget}")
print(f"Selected Year: {selected_year}")
print(f"Selected Month: {selected_month}") | Melkorn/MoneyTracker | gui.py | gui.py | py | 2,184 | python | en | code | 0 | github-code | 13 |
30739832655 | def is_diagonal(matrix):
for i in range(len(matrix)):
for j in range(len(matrix)):
if i != j and matrix[i][j] != 0:
return False
return True
def list(row, column):
matrix = []
ilist = []
for i in range(1, row+1):
ilist = []
for j in range(1, column+1):
ilist.append(1)
matrix.append(ilist)
return matrix
row = int(input("Enter the number of rows: "))
column = int(input("Enter the number of columns: "))
matrix = list(row, column)
for k in range(1, row+1):
for l in range(1, column+1):
ask = int(input(f"Enter the value for a{k}{l}: "))
matrix[k-1][l-1] = ask
for m in range(row):
print(matrix[m])
if is_diagonal(matrix):
print("matrix is a diagonal matrix")
else:
print(" matrix is not a diagonal matrix") | rnlifts/semester2 | New folder/diagonal_matrix.py | diagonal_matrix.py | py | 823 | python | en | code | 0 | github-code | 13 |
34785880288 | from rct229.rulesets.ashrae9012019.ruleset_functions.get_hvac_zone_list_w_area_dict import (
get_hvac_zone_list_w_area_dict,
)
from rct229.schema.config import ureg
from rct229.schema.schema_utils import quantify_rmr
from rct229.schema.validate import schema_validate_rmr
TEST_RMR = {
"id": "test_rmr",
"buildings": [
{
"id": "bldg_1",
"building_open_schedule": "bldg_open_sched_1",
"building_segments": [
{
"id": "bldg_seg_1",
"heating_ventilating_air_conditioning_systems": [
{"id": "hvac_1_1"}
],
"zones": [
{
"id": "zone_1_1",
"thermostat_cooling_setpoint_schedule": "tcs_sched_1",
"thermostat_heating_setpoint_schedule": "ths_sched_1",
"spaces": [
{
"id": "space_1_1_1",
"floor_area": 1000, # m2
"occupant_multiplier_schedule": "om_sched_1",
},
{
"id": "space_1_1_2",
"floor_area": 500, # m2
"occupant_multiplier_schedule": "om_sched_1",
},
],
"terminals": [
{
"id": "terminal_1_1_1",
"served_by_heating_ventilating_air_conditioning_system": "hvac_1_1",
},
{
"id": "terminal_1_1_2",
"served_by_heating_ventilating_air_conditioning_system": "hvac_1_1",
},
],
},
{
"id": "zone_1_2",
"thermostat_cooling_setpoint_schedule": "tcs_sched_1",
"thermostat_heating_setpoint_schedule": "ths_sched_1",
},
],
}
],
}
],
}
TEST_RMR_12 = {"id": "229_01", "ruleset_model_descriptions": [TEST_RMR]}
TEST_BUILDING = quantify_rmr(TEST_RMR_12)["ruleset_model_descriptions"][0]["buildings"][
0
]
def test__TEST_RMD__is_valid():
schema_validation_result = schema_validate_rmr(TEST_RMR_12)
assert schema_validation_result[
"passed"
], f"Schema error: {schema_validation_result['error']}"
def test__get_hvac_zone_list_w_area_dict():
assert get_hvac_zone_list_w_area_dict(TEST_BUILDING) == {
"hvac_1_1": {"zone_list": ["zone_1_1"], "total_area": 1500 * ureg.m2}
}
| pnnl/ruleset-checking-tool | rct229/rulesets/ashrae9012019/ruleset_functions/get_hvac_zone_list_w_area_dict_test.py | get_hvac_zone_list_w_area_dict_test.py | py | 3,020 | python | en | code | 6 | github-code | 13 |
23029063132 | from sklearn import preprocessing
import numpy as np
class ScalerToolkit(object):
def __init__(self, data_train):
self.__data_train = data_train
def __show_result(func):
def inner(*args, **kwargs):
result = func(*args, **kwargs)
print(f"结果:\n{result}")
print(f"平均值:\n{result.mean(0)}")
print(f"标准差:\n{result.std(0)}\n")
return inner
@__show_result
def scale(self):
# 归一化数据
return preprocessing.scale(self.__data_train)
@__show_result
def standard_scale(self):
# 计算归一化系数
scaler = preprocessing.StandardScaler().fit(self.__data_train)
# 用归一化系数归一化数据, 可归一化多组数据
return scaler.transform(self.__data_train)
@__show_result
def min_max_scale(self):
# X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
# X_scaled = X_std * (max - min) + min
scaler = preprocessing.MinMaxScaler()
return scaler.fit_transform(self.__data_train)
@__show_result
def max_abs_scale(self):
scaler = preprocessing.MaxAbsScaler()
return scaler.fit_transform(self.__data_train)
@__show_result
def robust_scale(self):
# 对含有离群点的数据效果较好
scaler = preprocessing.RobustScaler().fit(self.__data_train)
return scaler.transform(self.__data_train)
@__show_result
def nomalize_scale(self):
# 可用于向量单位化(行向量)
# 此处fit没有实际意义
scaler = preprocessing.Normalizer().fit(self.__data_train)
return scaler.transform(self.__data_train)
def main():
data_train = np.array([[1., -1., 2.], [2., 0., 0.], [0., 1., -1.]])
print(f"原始数据:\n{data_train}")
scaler_toolkit = ScalerToolkit(data_train)
print("scaler_toolkit.scale()")
scaler_toolkit.scale()
print("scaler_toolkit.standard_scale()")
scaler_toolkit.standard_scale()
print("scaler_toolkit.min_max_scale()")
scaler_toolkit.min_max_scale()
print("scaler_toolkit.max_abs_scale()")
scaler_toolkit.max_abs_scale()
print("scaler_toolkit.robust_scale()")
scaler_toolkit.robust_scale()
print("scaler_toolkit.nomalize_scale()")
scaler_toolkit.nomalize_scale()
if __name__ == '__main__':
main() | IBNBlank/toy_code | ai_tool_lesson/sklearn/scaler_toolkit.py | scaler_toolkit.py | py | 2,393 | python | en | code | 0 | github-code | 13 |
4568620818 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return None
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
def invertTree2(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return None
nodeList = [root]
while nodeList:
node = nodeList.pop(0)
if node:
nodeList.append(node.left)
nodeList.append(node.right)
node.left, node.right = node.right, node.left
return root
| Weikoi/OJ_Python | leetcode/easy/easy 201-400/226_翻转二叉树.py | 226_翻转二叉树.py | py | 912 | python | en | code | 0 | github-code | 13 |
7223508335 | import PySimpleGUI as sg
import numpy as np
import xlsxwriter
from tabulate import tabulate
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1) # убирает размытость!!!
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
def fill_xml(arr_rows_xml):
with xlsxwriter.Workbook('credit_calc_option.xlsx') as workbook:
sheet = workbook.add_worksheet()
for row_num, data in enumerate(arr_rows_xml):
sheet.write_row(row_num, 0, data)
def calc_differnt_payment(s, month, rate):
s_rest = s
mpay_no_perc = s / month
arr_mpays_real = []
arr_mpays_perc = []
while month != 0:
arr_mpays_real.append(mpay_no_perc)
arr_mpays_perc.append(s_rest * rate / 1200)
s_rest -= mpay_no_perc
month -= 1
arr_mpays_perc = np.around(np.array(arr_mpays_perc), decimals=2)
arr_mpays_real = np.around(np.array(arr_mpays_real), decimals=2)
arr_mpays = np.around(arr_mpays_real + arr_mpays_perc, decimals=2)
return arr_mpays_real, arr_mpays_perc, arr_mpays, round(sum(arr_mpays) - s, 2)
def calc_annuit_payment(s, month, rate):
month_rate = rate / 1200
ak = (month_rate * (1 + month_rate) ** month) \
/ (((1 + month_rate) ** month) - 1)
mpay = s * ak
return round(mpay, 2), round((mpay * month) - s, 2)
if __name__ == '__main__':
sg.theme('Reddit')
font_window = 'Arial 12 bold'
font_input = 'Arial 12'
layout = [
[sg.Text('Вид платежа', font=font_window),
sg.Radio('Дифференцированный', "Pay", font=font_window, default=True, key='-payd-'),
sg.Radio('Аннуитетный', "Pay", font=font_window, key='-paya-')],
[sg.Text('Сумма (в руб.)', font=font_window),
sg.InputText(size=(13, 2), font=font_input, key='-sum-')],
[sg.Text('Срок (в мес.)', font=font_window),
sg.InputText(size=(7, 2), font=font_input, key='-time-'), ],
[sg.Text('Процентная ставка ', font=font_window),
sg.InputText(size=(7, 2), font=font_input, key='-rate-'),
sg.Text('% годовых', font=font_window)],
[sg.Button('Рассчитать', button_color='PaleGreen4', font=font_window)],
[sg.Text('\n\n')],
[sg.Text('Общая переплата ... руб.', font=font_input, key='-total-')],
[sg.Text('Ежемесячная выплата ... руб.', font=font_input, key='-mpay-'),
sg.Button('Показать таблицу', button_color='PaleGreen4',
font=font_input, key='-show_btn-', visible=False)],
[sg.Text('Все данные сохраняются в файл credit_calc_option.xlsx в директории с файлом программы', font='Arial 8')]
]
window = sg.Window('Simple credit calculator ©KVA', layout)
arr_rows_xml = []
diff_for_popup = None
while True:
event, values = window.read()
if event in (None, 'Exit'):
break
if event == 'Рассчитать':
sum_ = values['-sum-'].replace(' ', '')
time_ = values['-time-'].replace(' ', '')
rate_ = values['-rate-'].replace(' ', '')
if sum_ == '' or time_ == '' or rate_ == '':
sg.PopupOK('Пожалуйста, заполните все поля.', title='Ошибка')
continue
if not all([sym in numbers or sym == '.' or sym == ',' for sym in sum_]):
sg.PopupOK('При заполнении поля "Сумма" можно использовать '
'только цифры и десятичные разделители.', title='Ошибка')
continue
if not all([sym in numbers for sym in time_]):
sg.PopupOK('При заполнении поля "Срок" можно использовать '
'только цифры.', title='Ошибка')
continue
if not all([sym in numbers or sym == '.' or sym == ',' for sym in rate_]):
sg.PopupOK('При заполнении поля "Процентная ставка" можно использовать '
'только цифры и десятичные разделители.', title='Ошибка')
continue
sum_ = sum_.replace(',', '.')
if sum_[0] == '.' or sum_[-1] == '.':
sg.PopupOK('Поле "Сумма" заполнено некорректно.', title='Ошибка')
continue
rate_ = rate_.replace(',', '.')
if rate_[0] == '.' or rate_[-1] == '.':
sg.PopupOK('Поле "Процентная ставка" заполнено некорректно.')
continue
if values['-paya-']:
window['-show_btn-'].update(visible=False)
monthpay, total = calc_annuit_payment(float(sum_), int(time_), float(rate_))
window['-total-'].update(f'Общая переплата {total} руб.', background_color='gray85')
window['-mpay-'].update(f'Ежемесячная выплата {monthpay} руб.', background_color='gray85')
arr_rows_xml.append(['Аннуитетный', float(sum_), int(time_), f'{float(rate_)}%',
'', total, 'Ежемесячный платеж:', monthpay])
try:
fill_xml(arr_rows_xml)
except xlsxwriter.exceptions.FileCreateError as e:
sg.PopupOK('Данные текущих вычислений не были сохранены.\n'
'Пожалуйста, закройте файл credit_calc_option.xlsx '
'и нажмите на кнопку "Рассчитать" еще раз.', title='Ошибка')
arr_rows_xml.pop()
if values['-payd-']:
window['-show_btn-'].update(visible=True)
monthpay_real_arr, monthpay_perc_arr, monthpay_arr, total = \
calc_differnt_payment(float(sum_), int(time_), float(rate_))
window['-total-'].update(f'Общая переплата {total} руб.', background_color='gray85')
window['-mpay-'].update(f'Помесячный график платежей: ', background_color='gray85')
diff_for_popup = \
[[i , monthpay_real_arr[i], monthpay_perc_arr[i], monthpay_arr[i]] for i in range(int(time_))]
list_for_save = ['Дифференцированный', float(sum_), int(time_), f'{float(rate_)}%',
'', total, 'Помесячные платежи:']
list_for_save.extend(monthpay_arr)
arr_rows_xml.append(list_for_save)
try:
fill_xml(arr_rows_xml)
except xlsxwriter.exceptions.FileCreateError as e:
sg.PopupOK('Данные текущих вычислений не были сохранены.\n'
'Пожалуйста, закройте файл credit_calc_option.xlsx '
'и нажмите на кнопку "Рассчитать" еще раз.', title='Ошибка')
arr_rows_xml.pop()
if event == '-show_btn-':
column_names = ["Месяц |", "Погашение основного долга |", "Погашение процентов |", "Общая сумма платежа |"]
diff_for_popup.insert(0, column_names)
table_str = tabulate(diff_for_popup, headers="firstrow",
numalign="right", floatfmt=".2f")
table_str = table_str.replace('-', '')
sg.PopupScrolled(table_str, title='Помесячный график платежей',
background_color='white', button_color='PaleGreen4',
font=font_input, size=(80,30), non_blocking=True, modal=False)
diff_for_popup.pop(0)
# sg.Print(table_str, background_color='white', no_button=True, size=(90,30))
| IgelSchnauze/bank-informatics | credit_calculator_v3.py | credit_calculator_v3.py | py | 8,428 | python | ru | code | 0 | github-code | 13 |
23439446949 | # 86. Crie um programa que crie uma matriz de dimensão 3x3 e preencha com
# valores lidos pelo teclado.
'''
_____
0 |_|_|_|
1 |_|_|_|
2 |_|_|_|
0 1 2
'''
# No final, mostre a matriz na tela, com a formatação correta.
# Definimos o esqueleto com os "0" para não ter que usar o append
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# For de alimentação
for linha in range(0, 3):
# Enquanto não percorrer do 0 ao 2, não passa pra segunda iteração, de
# 0 pra 1 no caso
for coluna in range(0, 3):
# Insere na posição correta
matriz[linha][coluna] = int(
input(f'Digite um valor para [{linha}, {coluna}]: ')
)
print('-=' * 30)
# For de exibição
for linha in range(0, 3):
for coluna in range(0, 3):
# Colocamos o abrangente para ficar melhor identado
print(f'[{matriz[linha][coluna]:^5}]', end='')
print()
| rafaelribeiroo/scripts_py | Mundo 03: Estruturas Compostas/26. Exercícios: Listas II/86. Matriz em python.py | 86. Matriz em python.py | py | 918 | python | pt | code | 2 | github-code | 13 |
42090513609 | from glob import glob
import cv2
import os
import io
import random
import zipfile
import requests
import numpy as np
# set random seed for reproducibility
random.seed(10)
cv2.setRNGSeed(10)
# path to the downloaded zip file
noisy_office_zip_path = "NoisyOffice.zip"
with zipfile.ZipFile(noisy_office_zip_path, 'r') as zip_file:
zip_file.extractall("")
# get paths of all clean and noisy images
all_paths_noisy = glob("NoisyOffice/SimulatedNoisyOffice/simulated_noisy_images_grayscale/*")
all_paths_clean = glob("NoisyOffice/SimulatedNoisyOffice/clean_images_grayscale/*")
# randomly sort images
total_images = len(all_paths_noisy)
random_indices = random.sample(range(0, total_images), total_images)
all_paths_noisy = np.array(all_paths_noisy)
all_paths_noisy = list(all_paths_noisy[random_indices])
# get all name only
noisy_names = [ os.path.basename(cpath) for cpath in all_paths_noisy]
clean_names = [ os.path.basename(cpath) for cpath in all_paths_clean]
# write path for train, validate and test data
write_path_train= "train/train/"
write_path_validate= "validate/validate/"
write_path_test = "test/test/"
# create folders
os.makedirs(write_path_train+"train_noisy")
os.makedirs(write_path_train+"train_cleaned")
os.makedirs(write_path_validate+"validate_noisy")
os.makedirs(write_path_validate+"validate_cleaned")
os.makedirs(write_path_test+"test_noisy")
os.makedirs(write_path_test+"test_cleaned")
# train, validate and test image number
train_number = 144
validate_number = 16
test_number = 56
for i, noisy_name in enumerate(noisy_names):
# get characters from noisy image for matching purpose
check_first_7 = noisy_name[:7]
check_last_3 = noisy_name[-6:]
for j, clean_name in enumerate(clean_names):
# get characters from clean image for matching purpose
check_first_7c = clean_name[:7]
check_last_3c = clean_name[-6:]
# matching noisy and clean image, save them into each folder
if check_first_7 == check_first_7c and check_last_3 == check_last_3c:
img_noisy = cv2.imread(all_paths_noisy[i])
img_clean = cv2.imread(all_paths_clean[j])
if i < train_number:
write_noisy_path = write_path_train + "train_noisy/"
write_clean_path = write_path_train + "train_cleaned/"
elif i > train_number and i <= train_number + validate_number:
write_noisy_path = write_path_validate + "validate_noisy/"
write_clean_path = write_path_validate + "validate_cleaned/"
else:
write_noisy_path = write_path_test + "test_noisy/"
write_clean_path = write_path_test + "test_cleaned/"
# save images
cv2.imwrite(write_noisy_path+noisy_name, img_noisy)
cv2.imwrite(write_clean_path+noisy_name, img_clean)
| kwcckw/Converted_noisy_office | conversion_code.py | conversion_code.py | py | 2,994 | python | en | code | 0 | github-code | 13 |
13238455935 | """
Concrete CollectorStrategy classes for the GitHub built-in module
"""
import re
from anchorhub.collector import CollectorStrategy
import anchorhub.builtin.regex.markdown as mdrx
class MarkdownATXCollectorStrategy(CollectorStrategy):
"""
Concrete collector strategy used to parse ATX style headers that have
AnchorHub tags specified
ATX style headers begin with 1-5 hash '#' characters, and then use
the rest of the line to specify the text of the header. For example:
# This is an ATX header!
### So is this!
"""
def __init__(self, opts):
"""
Initializes the object to utilize the AnchorHub tag wrapper
as specified.
:param opts: Namespace with the attribute 'wrapper_pattern',
typically obtained through command-line argument parsing
"""
self._open = opts.open
self._close = opts.close
self._header_pattern = r"^#+ .+" + opts.wrapper_regex + r"\s*$"
self._regex = re.compile(self._header_pattern, re.UNICODE)
def test(self, file_lines, index):
"""
Is this line an ATX header with an AnchorHub tag specified? Return
True if it is.
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: True if the line in file_lines at index is an ATX header
with an AnchorHub tag declared. False otherwise
"""
return self._regex.match(file_lines[index])
def get(self, file_lines, index):
"""
Extract the specified AnchorHub tag, as well as the portion of the
line that should be converted from the ATX style Markdown header.
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: [tag, string] - tag: the extracted AnchorHub tag. string -
the portion of the header that should be converted into an anchor
"""
line = file_lines[index]
start_index = line.find('# ') + 2 # Start index for header text
start_tag = line.rfind(self._open) # Start index of AnchorHub tag
end_tag = line.rfind(self._close) # End index of AnchorHub tag
# The magic '+1' below knocks out the hash '#' character from extraction
tag = line[start_tag + len(self._open) + 1:end_tag]
string = line[start_index:start_tag]
return [tag, string]
class MarkdownSetextCollectorStrategy(CollectorStrategy):
"""
Concrete collector strategy used to parse Setext style headers that have
AnchorHub tags specified
Setext style headers are 'underlined' with a line comprised entirely of
equals-signs or hyphens. These are valid Setext headers:
This is an H1 header
====================
This is an H2 header
-
Note that the underlining characters are mutually exclusive. This is
_not_ a valid header:
This is not a header
====----
"""
def __init__(self, opts):
"""
Initializes the object to utilize the AnchorHub tag wrapper
as specified.
:param opts: Namespace with the attribute 'wrapper_pattern',
typically obtained through command-line argument parsing
"""
self._open = opts.open
self._close = opts.close
self._header_pattern = opts.wrapper_regex + r"\s*$"
self._header_regex = re.compile(self._header_pattern, re.UNICODE)
self._underline_regex = re.compile(mdrx.setext_underline)
def test(self, file_lines, index):
"""
This is line a Setext header with an AnchorHub tag specified? Return
True if it is.
file_lines and index _must_ be provided to this function, or it will
throw a ValueError
:param file_lines: list of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: True if the line in line_files at index is a Setext header
with an AnchorHub tag declared. False otherwise
"""
if file_lines is None: raise ValueError("file_lines list must be "
"provided to test() method in "
"MarkdownSetextCollectorStrategy")
if index is None: raise ValueError("index must be provided to test() "
"method in "
"MarkdownSetextCollectorStrategy")
# If index is at len(file_lines) - 1, it's the last line in file
# Since it needs an underline, cannot be a header
index_in_bounds = index < len(file_lines) - 1
if (index_in_bounds and self._header_regex.search(file_lines[index]) and
self._underline_regex.match(file_lines[index+1])):
return True
else:
return False
def get(self, file_lines, index):
"""
Extract the specified AnchorHub tag, as well as the portion of the
line that should be converted from the ATX style Markdown header.
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: [tag, string] - tag: the extracted AnchorHub tag. string -
the portion of the header that should be converted into an anchor
"""
line = file_lines[index]
start_tag = line.rfind(self._open) # Start index of AnchorHub tag
end_tag = line.rfind(self._close) # End index of AnchorHub tag
# The magic '+1' below knocks out the hash '#' character from extraction
tag = line[start_tag + len(self._open) + 1:end_tag]
string = line[:start_tag]
return [tag, string]
| samjabrahams/anchorhub | anchorhub/builtin/github/cstrategies.py | cstrategies.py | py | 5,978 | python | en | code | 6 | github-code | 13 |
28124975030 | import sys
sys.path.append('/workspace/classification/code/') # zjl
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
from configs import config
def load_images(data_type):
assert data_type in ['train', 'test']
if data_type == 'train':
transform_train = transforms.Compose([
transforms.RandomCrop(28, padding=2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.286,), (0.353,))
])
data_set = torchvision.datasets.SVHN(root=config.data_svhn,
split=data_type,
download=True,
transform=transform_train)
else:
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.286,), (0.353,))
])
data_set = torchvision.datasets.SVHN(root=config.data_svhn,
split=data_type,
download=True,
transform=transform_test)
data_loader = DataLoader(
dataset=data_set,
shuffle=True,
num_workers=4,
batch_size=128)
return data_loader, len(data_set)
if __name__ == '__main__':
print('==')
from utils import image_util
data_loader, data_size = load_images('train')
print(data_size)
for i, samples in enumerate(data_loader):
inputs, targets = samples
print(inputs.shape)
image_util.show_torch_images(inputs, mode='gray')
if i == 1:
break
| LIRUIJIE0330/model-doctor6 | loaders/svhn_loader.py | svhn_loader.py | py | 1,732 | python | en | code | 0 | github-code | 13 |
38587939933 | from sys import argv
if len(argv) != 2:
print('Usage: ' + argv[0] + ' <VCF file>')
exit(1)
vcffilename = argv[1]
def hwe_chi_squared(gt0, gt1, gt2):
total = gt0 + gt1 + gt2
pfreq = (gt0 + 0.5 * gt1) / total
qfreq = (gt2 + 0.5 * gt1) / total
exp_gt0 = max(pfreq * pfreq * total, 0.0000000000001)
exp_gt1 = max(2 * pfreq * qfreq * total, 0.0000000000001)
exp_gt2 = max(qfreq * qfreq * total, 0.0000000000001)
chi0 = ((gt0 - exp_gt0) * (gt0 - exp_gt0)) / exp_gt0
chi1 = ((gt1 - exp_gt1) * (gt1 - exp_gt1)) / exp_gt1
chi2 = ((gt2 - exp_gt2) * (gt2 - exp_gt2)) / exp_gt2
return chi0 + chi1 + chi2
with open(vcffilename, 'r') as vcffile:
for line in vcffile:
if line[0] == '#':
print(line.strip())
continue
fields = line.split()
info = fields[7].split(';')
gt0, gt1, gt2 = [0, 0, 0]
for sample in fields[10:]:
if sample[0:3] == '0/0':
gt0 += 1
if sample[0:3] == '0/1':
gt1 += 1
if sample[0:3] == '1/1':
gt2 += 1
chi_squared = hwe_chi_squared(gt0, gt1, gt2)
if chi_squared < 6.635:
print('\t'.join(fields[0:7] + [fields[7] + ';HWE=' + str(chi_squared)] + fields[8:]))
| kehrlab/PopDel-scripts | polaris_kids_cohort/plots/hwe.py | hwe.py | py | 1,301 | python | en | code | 2 | github-code | 13 |
30784575425 | from fail2ban.server.actions import ActionBase
import requests, json
class telegramAction(ActionBase):
def __init__(self, jail, name):
self.installpath = '/etc/fail2ban/action.d/'
try: self.config = json.loads(open(self.installpath + 'telegram_config.json', 'r').read())
except Exception as e: print(f'Config file not found: {e}')
self.token = self.config['telegram_api_token']
self.get_chat_id()
def get_chat_id(self):
self.chat_id = self.config['telegram_chat_id']
if self.chat_id == '' or self.chat_id == None:
try:
self.chat_id = requests.get(f'https://api.telegram.org/bot{self.token}/getUpdates').json()['result'][0]['message']['chat']['id']
self.config['telegram_chat_id'] = self.chat_id
open(self.installpath + 'telegram_config.json', 'w').write(json.dumps(str(self.config))).close()
except Exception as e:
self.get_chat_id()
print(f'Logged: {e}')
print(f'Got chat ID: {self.chat_id}')
def send_message(self, message):
try: requests.post(f'https://api.telegram.org/bot{self.token}/sendMessage', params={"chat_id": self.chat_id, "text": message})
except Exception as e:
self.send_message(message)
print(f'Logged: {e}')
def start(self):
self.send_message("Fail2ban started.")
def stop(self):
self.send_message("Fail2ban stopped.")
def ban(self, aInfo):
if self.config['receive_banned']:
location = self.get_location(aInfo['ip'])
self.send_message(f"{aInfo['ip']} from {location} banned")
def unban(self, aInfo):
if self.config['receive_unbanned']:
location = self.get_location(aInfo['ip'])
self.send_message(f"{aInfo['ip']} from {location} unbanned")
def get_location(self, ip):
try: response = requests.get(f'https://ipapi.co/{ip}/json/').json()
except: return 'Unknown'
return response['country_name']
Action = telegramAction | Pyenb/fail2telegram | telegram.py | telegram.py | py | 2,122 | python | en | code | 1 | github-code | 13 |
27110575919 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cryptapp', '0003_auto_20151014_1244'),
]
operations = [
migrations.AddField(
model_name='fback',
name='email',
field=models.CharField(default=b'', max_length=100),
),
]
| aditi73/cryptic-mining | cryptapp/migrations/0004_fback_email.py | 0004_fback_email.py | py | 413 | python | en | code | 2 | github-code | 13 |
10923971676 | import pandas as pd
import pipit.trace
from pipit.graph import Graph, Node
class MetaReader:
# adds new context id and return new nid
def _add_context_id(self, context_id) -> int:
self.nid_to_ctx[self.current_nid] = context_id
self.current_nid += 1
return self.current_nid - 1
def __init__(self, file_location):
# open the file to ready in binary mode (rb)
self.file = open(file_location, "rb")
# setting necessary read options
self.byte_order = "little"
self.signed = False
self.encoding = "ASCII"
self.current_nid = 0
self.nid_to_ctx = {}
self.node_map = {}
# The meta.db header consists of the common .db header and n sections.
# We're going to do a little set up work, so that's easy to change if
# any revisions change the orders.
# We're are going to specify 2 maps:
# - One dictionary maps the section name to an index
# (which follows the order that the sections are seen
# in the meta.db header)
# - The second dictionary maps the section name to a
# function that reads the section. Each function is defined
# as __read_<section_name>_section(self, section_pointer: int,
# section_size: int) -> None
# Here I'm mapping the section name to it's order in the meta.db header
header_map = {
"General Properties": 0,
"Identifier Names": 1,
"Performance Metrics": 2,
"Context Tree": 3,
"Common String Table": 4,
"Load Modules": 5,
"Source Files": 6,
"Functions": 7,
}
# Now let's create a function to section map
reader_map = {
"General Properties": self.__read_general_properties_section,
"Common String Table": self.__read_common_string_table_section,
"Source Files": self.__read_source_files_section,
"Functions": self.__read_functions_section,
"Load Modules": self.__read_load_modules_section,
"Context Tree": self.__read_context_tree_section,
"Identifier Names": self.__read_identifier_names_section,
"Performance Metrics": self.__read_performance_metrics_section,
}
# Another thing thing that we should consider is the order to read the sections.
# Here is a list of section references (x -> y means x references y)
# - "Source Files" -> "Common String Table"
# - "Functions" -> "Common String Table"
# - "Context Tree" -> "Common String Table"
# - "Load Modules" -> "Common String Table"
# - "Functions" -> "Source Files"
# - "Context Tree" -> "Functions"
# - "Context Tree" -> "Source Files"
# - "Functions" -> "Load Modules"
#
# Thus we want to keep this order when reading sections:
# "Common String Table" -> "Source Files" -> "Functions" -> "Context Tree", and
# "Common String Table -> "Load Modules" -> "Context Tree"
# Here I'm specifying the order of reading the file
self.read_order = [
"Common String Table",
"General Properties",
"Source Files",
"Load Modules",
"Functions",
"Context Tree",
"Identifier Names",
"Performance Metrics",
]
# Let's make sure that we include every section in the read order and reader_map
assert set(self.read_order) == set(header_map) and set(header_map) == set(
reader_map
)
# Now to the actual reading of the meta.db file
# reading the meta.db header
self.__read_common_header()
# now let's read all the sections
for section_name in self.read_order:
section_index = header_map[section_name]
section_pointer = self.section_pointer[section_index]
section_size = self.section_size[section_index]
section_reader = reader_map[section_name]
section_reader(section_pointer, section_size)
def get_information_from_context_id(self, context_id: int):
context: dict = self.context_map[context_id]
if "string_index" in context:
return {
"module": "",
"file": "",
"function": self.common_strings[context["string_index"]],
"relation": -1,
"lexical_type": -1,
"line": -1,
"loop_type": False,
}
# return self.common_strings[context["string_index"]]
# context = {"relation": relation, "lexical_type": lexical_type, \
# "function_index": function_index, \
# "source_file_index": source_file_index, \
# "source_file_line": source_file_line, \
# "load_module_index":load_module_index, \
# "load_module_offset": load_module_offset}
load_module_index = context["load_module_index"]
source_file_index = context["source_file_index"]
source_file_line = context["source_file_line"]
function_index = context["function_index"]
lexical_type = context["lexical_type"]
source_file_string = ""
module_string = ""
function_string = ""
file_line = -1
loop_type = False
if lexical_type == 1:
# loop construct
function_string = "loop"
loop_type = True
elif function_index is not None:
# The function map
function = self.functions_list[function_index]
# current_function_map = {"string_index": function_name_index, \
# "source_line": source_line,
# "load_modules_index": load_module_index, \
# "source_file_index": source_file_index}
# getting function name
function_string = self.common_strings[function["string_index"]]
else:
# function is unkown
function_string = "<unkown function>"
if load_module_index is not None:
load_module = self.load_modules_list[load_module_index]
module_string = self.common_strings[load_module["string_index"]]
if source_file_index is not None:
source_file = self.source_files_list[source_file_index]
source_file_string = self.common_strings[source_file["string_index"]]
# if function_index is not None:
# function = self.functions_list[function_index]
# function_string = self.common_strings[function["string_index"]]
if source_file_line is not None:
file_line = str(source_file_line)
return {
"module": module_string,
"file": source_file_string,
"function": function_string,
"line": file_line,
"lexical_type": context["lexical_type"],
"relation": context["relation"],
"loop_type": loop_type,
}
def __read_common_header(self) -> None:
"""
Reads common .db file header version 4.0
"""
# read Magic identifier ("HPCPROF-tracedb_")
# first ten buyes are HPCTOOLKIT in ASCII
identifier = str(self.file.read(10), encoding=self.encoding)
assert identifier == "HPCTOOLKIT"
# next 4 bytes (u8) are the "Specific format identifier"
format_identifier = str(self.file.read(4), encoding=self.encoding)
assert format_identifier == "meta"
# next byte (u8) contains the "Common major version, currently 4"
self.major_version = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
# next byte (u8) contains the "Specific minor version"
self.minor_version = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
self.section_pointer = []
self.section_size = []
# In the header each section is given 16 bytes:
# - First 8 bytes specify the total size of the section (in bytes)
# - Last 8 bytes specify a pointer to the beggining of the section
for i in range(len(self.read_order)):
self.section_size.append(
int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
)
self.section_pointer.append(
int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
)
def __read_general_properties_section(
self, section_pointer: int, section_size: int
) -> None:
"""Reads the general properties of the trace.
Sets:
self.database_title: Title of the database. May be provided by the user.
self.database_description: Human-readable Markdown description of the database.
"""
# go to the right spot in the file
self.file.seek(section_pointer)
title_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
description_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
self.database_title = self.__read_string(title_pointer)
self.database_description = self.__read_string(description_pointer)
def __get_common_string(self, string_pointer: int) -> str:
"""Given the file pointer to find string, returns the string."""
if string_pointer in self.common_string_index_map:
return self.common_strings[self.common_string_index_map[string_pointer]]
else:
return None
def __read_common_string_table_section(
self, section_pointer: int, section_size: int
) -> None:
# Let's go to the section
self.file.seek(section_pointer)
# We know that this section is just a densely packed list of strings,
# seperated by the null character
# So to create a list of these strings, we'll read them all into one string then
# split them by the null character
# Reading entire section into a string
total_section: str = str(self.file.read(section_size), encoding="UTF-8")
# Splitting entire section into list of strings
self.common_strings: list[str] = total_section.split("\0")
# Now we are creating a map between the original location to the string
# to the index of the string in self.common_strings.
# This is because we are passed pointers to find the string in other sections
pointer_index = section_pointer
# pointer_index = 0
self.common_string_index_map: dict = {}
for i in range(len(self.common_strings)):
self.common_string_index_map[pointer_index] = i
pointer_index += len(self.common_strings[i]) + 1
def __get_load_modules_index(self, load_module_pointer: int) -> int:
"""
Given the pointer to where the file would exists in meta.db,
returns the index of the file in self.source_files_list.
"""
return (
load_module_pointer - self.load_modules_pointer
) // self.load_module_size
def __read_load_modules_section(
self, section_pointer: int, section_size: int
) -> None:
"""
Reads the "Load Modules" Section of meta.db.
"""
# go to the right spot in meta.db
self.file.seek(section_pointer)
# Load modules used in this database
self.load_modules_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Number of load modules listed in this section (u32)
num_load_modules = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
# Size of a Load Module Specification, currently 16 (u16)
self.load_module_size = int.from_bytes(
self.file.read(2), byteorder=self.byte_order, signed=self.signed
)
# Going to store file's path in self.load_modules_list.
# Each will contain the index of file's path string in
# self.common_string
self.load_modules_list: list[dict] = []
for i in range(num_load_modules):
current_index = self.load_modules_pointer + (i * self.load_module_size)
self.file.seek(current_index)
# Flags -- Reserved for future use (u32)
# flags = int.from_bytes(
# self.file.read(4), byteorder=self.byte_order, signed=self.signed
# )
self.file.read(4)
# empty space that we need to skip
self.file.read(4)
# Full path to the associated application binary
path_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
module_map = {"string_index": self.common_string_index_map[path_pointer]}
self.load_modules_list.append(module_map)
def __read_string(self, file_pointer: int) -> str:
"""
Helper function to read a string from the file starting at the file_pointer
and ending at the first occurence of the null character
"""
self.file.seek(file_pointer)
name = ""
while True:
read = str(self.file.read(1), encoding="UTF-8")
if read == "\0":
break
name += read
return name
def get_identifier_name(self, kind: int):
"""
returns the identifier name, given the kind
"""
return self.identifier_names[kind]
def __read_identifier_names_section(
self, section_pointer: int, section_size: int
) -> None:
"""
Reads "Identifier Names" Section and Identifier Name strings in self.names_list
"""
# go to correct section of file
self.file.seek(section_pointer)
# Human-readable names for Identifier kinds
names_pointer_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Number of names listed in this section
num_names = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
self.identifier_names: list[str] = []
for i in range(num_names):
self.file.seek(names_pointer_pointer + (i * 8))
names_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
self.identifier_names.append(self.__read_string(names_pointer))
def __read_performance_metrics_section(
self, section_pointer: int, section_size: int
) -> None:
pass
def __get_function_index(self, function_pointer: int) -> int:
"""
Given the pointer to where the function would exists in meta.db,
returns the index of the file in self.functions_list.
"""
index = (function_pointer - self.functions_array_pointer) // self.function_size
assert index < len(self.functions_list)
return index
def __read_functions_section(self, section_pointer: int, section_size: int) -> None:
"""
Reads the "Functions" section of meta.db.
"""
# go to correct section in file
self.file.seek(section_pointer)
self.functions_array_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
num_functions = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
self.function_size = int.from_bytes(
self.file.read(2), byteorder=self.byte_order, signed=self.signed
)
self.functions_list: list[dict] = []
for i in range(num_functions):
current_index = self.functions_array_pointer + (i * self.function_size)
self.file.seek(current_index)
function_name_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
modules_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
modules_offset = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
file_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
source_line = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
# flags = int.from_bytes(
# self.file.read(4), byteorder=self.byte_order, signed=self.signed
# )
self.file.read(4)
source_file_index = None
load_module_index = None
function_name_index = None
if function_name_pointer != 0:
function_name_index = self.common_string_index_map[
function_name_pointer
]
if modules_pointer != 0:
load_module_index = self.__get_load_modules_index(modules_pointer)
# currently ignoring offset -- no idea how that's used
if file_pointer != 0:
source_file_index = self.__get_source_file_index(file_pointer)
current_function_map = {
"string_index": function_name_index,
"source_line": source_line,
"load_modules_index": load_module_index,
"source_file_index": source_file_index,
"load_modules_offset": modules_offset,
}
self.functions_list.append(current_function_map)
def __get_source_file_index(self, source_file_pointer: int) -> int:
"""
Given the pointer to where the file would exists in meta.db,
returns the index of the file in self.source_files_list.
"""
index = (
source_file_pointer - self.source_files_pointer
) // self.source_file_size
assert index < len(self.source_files_list)
return index
def __read_source_files_section(
self, section_pointer: int, section_size: int
) -> None:
"""
Reads the "Source Files" Section of meta.db.
"""
self.file.seek(section_pointer)
# Source files used in this database
self.source_files_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Number of source files listed in this section (u32)
num_files = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
# Size of a Source File Specification, currently 16 (u16)
self.source_file_size = int.from_bytes(
self.file.read(2), byteorder=self.byte_order, signed=self.signed
)
# Looping through individual files to get there information now
self.file.seek(self.source_files_pointer)
# Going to store file's path in self.files_list.
# Each will contain the index of file's path string in
# self.common_string
self.source_files_list: list[dict] = []
for i in range(num_files):
# Reading information about each individual source file
self.file.seek(self.source_files_pointer + (i * self.source_file_size))
# flag = int.from_bytes(
# self.file.read(4), byteorder=self.byte_order, signed=self.signed
# )
self.file.read(4)
# empty space that we need to skip
self.file.read(4)
# Path to the source file. Absolute, or relative to the root database
# directory. The string pointed to by pPath is completely within the
# Common String Table section, including the terminating NUL byte.
file_path_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
string_index = self.common_string_index_map[file_path_pointer]
source_file_map = {"string_index": string_index}
self.source_files_list.append(source_file_map)
def __read_context_tree_section(
self, section_pointer: int, section_size: int
) -> None:
"""
Reads the "Context Tree" section of meta.db.
Loops and calls __read_single_entry_point with the correct pointer to read
the correct entry and add it to the CCT.
"""
self.cct = Graph()
self.context_map: dict[int, dict] = {}
# Reading "Context Tree" section header
# make sure we're in the right spot of the file
self.file.seek(section_pointer)
# ({Entry}[nEntryPoints]*)
entry_points_array_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# (u16)
num_entry_points = int.from_bytes(
self.file.read(2), byteorder=self.byte_order, signed=self.signed
)
# (u8)
entry_point_size = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
for i in range(num_entry_points):
current_pointer = entry_points_array_pointer + (i * entry_point_size)
self.__read_single_entry_point(current_pointer)
def __read_single_entry_point(self, entry_point_pointer: int) -> None:
"""
Reads single (root) context entry.
Reads the correct entry and adds it to the CCT.
"""
self.file.seek(entry_point_pointer)
# Reading information about child contexts
# Total size of *pChildren (I call pChildren children_pointer), in bytes (u64)
children_size = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Pointer to the array of child contexts
children_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Reading information about this context
# Unique identifier for this context (u32)
context_id = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
# Type of entry point used here (u16)
# entry_point_type = int.from_bytes(
# self.file.read(2), byteorder=self.byte_order, signed=self.signed
# )
self.file.read(2)
# next 2 bytes are blank
self.file.read(2)
# Human-readable name for the entry point
pretty_name_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# map context for this context
string_index = self.common_string_index_map[pretty_name_pointer]
context = {
"relation": -1,
"lexical_type": -1,
"function_index": -1,
"source_file_index": -1,
"source_file_line": -1,
"load_module_index": -1,
"load_module_offset": -1,
"string_index": string_index,
}
# context = {"string_index": string_index}
self.context_map[context_id] = context
# Create Node for this context
node: Node = Node(self._add_context_id(context_id), None)
# Adding the Node to the CCT
self.cct.add_root(node)
self.node_map[context_id] = node
# Reading the children contexts
self.__read_children_contexts(children_pointer, children_size, node, context_id)
def __read_children_contexts(
self,
context_array_pointer: int,
total_size: int,
parent_node: Node,
parent_context_id: int,
) -> None:
"""
Recursive function to read all child contexts and add it to the CCT
"""
if total_size <= 0 or context_array_pointer <= 0:
return
self.file.seek(context_array_pointer)
index = 0
while index < total_size:
# Reading information about child contexts (as in the children of
# this context)
# Total size of *pChildren (I call pChildren children_pointer),
# in bytes (u64)
children_size = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
index += 8
# Pointer to the array of child contexts
children_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
index += 8
# Reading information about this context
# Unique identifier for this context (u32)
context_id = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
index += 4
# Reading flags (u8)
flags = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
index += 1
# Relation this context has with its parent (u8)
relation = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
index += 1
# Type of lexical context represented (u8)
lexical_type = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
index += 1
# Size of flex, in u8[8] "words" (bytes / 8) (u8)
num_flex_words = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
index += 1
# Bitmask for defining propagation scopes (u16)
# propogation = int.from_bytes(
# self.file.read(2), byteorder=self.byte_order, signed=self.signed
# )
self.file.read(2)
index += 2
# Empty space
self.file.read(6)
index += 6
# reading flex
flex = self.file.read(8 * num_flex_words)
index += 8 * num_flex_words
function_index: int = None
source_file_index: int = None
source_file_line: int = None
load_module_index: int = None
load_module_offset: int = None
# flex is u8[8][num_flex],
# meaning that one flex word is 8 bytes or u64
# Bit 0: hasFunction. If 1, the following sub-fields of flex are present:
# flex[0]: FS* pFunction: Function associated with this context
if flags & 1 != 0:
sub_flex = int.from_bytes(
flex[0:8], byteorder=self.byte_order, signed=self.signed
)
flex = flex[8:]
function_index = self.__get_function_index(sub_flex)
# Bit 1: hasSrcLoc. If 1, the following sub-fields of flex are present:
# flex[1]: SFS* pFile: Source file associated with this context
# flex[2]: u32 line: Associated source line in pFile
if flags & 2 != 0:
sub_flex_1 = int.from_bytes(
flex[0:8], byteorder=self.byte_order, signed=self.signed
)
sub_flex_2 = int.from_bytes(
flex[8:10], byteorder=self.byte_order, signed=self.signed
)
flex = flex[10:]
source_file_index = self.__get_source_file_index(sub_flex_1)
source_file_line = sub_flex_2
# Bit 2: hasPoint. If 1, the following sub-fields of flex are present:
# flex[3]: LMS* pModule: Load module associated with this context
# flex[4]: u64 offset: Associated byte offset in *pModule
if flags & 4 != 0:
sub_flex_1 = int.from_bytes(
flex[0:8], byteorder=self.byte_order, signed=self.signed
)
sub_flex_2 = int.from_bytes(
flex[8:16], byteorder=self.byte_order, signed=self.signed
)
flex = flex[16:]
load_module_index = self.__get_load_modules_index(sub_flex_1)
load_module_offset = sub_flex_2
# Now we take a look at the relationship and type of the context
if lexical_type == 2 or lexical_type == 3:
# source line type or single line instruction
# meaning we don't want to create a node for this
self.node_map[context_id] = parent_node
next_parent_node = parent_node
else:
# otherwise we do want to create a node
# Creating Node for this context
node = Node(self._add_context_id(context_id), parent_node)
# Connecting this node to the parent node
parent_node.add_child(node)
# Adding this node to the graph
self.node_map[context_id] = node
if lexical_type == 0:
# function call
# this means that information about the
# source file and module are with the parent
parent_information = self.context_map[parent_context_id]
if (
"string_index" in parent_information
and function_index is not None
):
# This means that the parent is the root, and it's
# information is useless
function = self.functions_list[function_index]
# Getting source file line
source_file_line = function["source_line"]
# Getting source file name
source_file_index = function["source_file_index"]
# Getting Load Module Name
load_module_index = function["load_modules_index"]
# Getting Load Module Offset
load_module_offset = function["load_modules_offset"]
else:
if source_file_index is None:
source_file_index = parent_information["source_file_index"]
if source_file_line is None:
source_file_line = parent_information["source_file_line"]
if load_module_index is None:
load_module_index = parent_information["load_module_index"]
load_module_offset = parent_information[
"load_module_offset"
]
next_parent_node = node
# creating a map for this context
context = {
"relation": relation,
"lexical_type": lexical_type,
"function_index": function_index,
"source_file_index": source_file_index,
"source_file_line": source_file_line,
"load_module_index": load_module_index,
"load_module_offset": load_module_offset,
}
self.context_map[context_id] = context
# recursively call this function to add more children
return_address = self.file.tell()
self.__read_children_contexts(
children_pointer, children_size, next_parent_node, context_id
)
self.file.seek(return_address)
class ProfileReader:
# class to read self.data from profile.db file
def __init__(self, file_location, meta_reader):
# gets the pi_ptr variable to be able to read the identifier tuples
self.meta_reader: MetaReader = meta_reader
# open the file to ready in binary mode (rb)
self.file = open(file_location, "rb")
# setting necessary read options
self.byte_order = "little"
self.signed = False
self.encoding = "ASCII"
# The profile.db header consists of the common .db header and n sections.
# We're going to do a little set up work, so that's easy to change if
# any revisions change the orders.
# We're are going to specify 2 maps:
# - One dictionary maps the section name to an index
# (which follows the order that the sections are seen
# in the profile.db header)
# - The second dictionary maps the section name to a
# function that reads the section. Each function is defined
# as __read_<section_name>_section(self, section_pointer: int,
# section_size: int) -> None
# Here I'm mapping the section name to it's order in the profile.db header
header_map = {"Profiles Information": 0, "Hierarchical Identifier Tuples": 1}
# Now let's create a function to section map
reader_map = {
"Profiles Information": self.__read_profiles_information_section,
"Hierarchical Identifier Tuples": self.__read_hit_section,
}
# Another thing thing that we should consider is the order to read the sections.
# Here is a list of section references (x -> y means x references y)
# - "Profiles Information" -> "Hierarchical Identifier Tuples"
#
# Thus we want to keep this order when reading sections:
# "Profiles Information" -> "Hierarchical Identifier Tuples"
# Here I'm specifying the order of reading the file
self.read_order = ["Hierarchical Identifier Tuples", "Profiles Information"]
# Let's make sure that we include every section in the read order and reader_map
assert set(self.read_order) == set(header_map) and set(header_map) == set(
reader_map
)
# Now to the actual reading of the profile.db file
# reading the profile.db header
self.__read_common_header()
# now let's read all the sections
for section_name in self.read_order:
section_index = header_map[section_name]
section_pointer = self.section_pointer[section_index]
section_size = self.section_size[section_index]
section_reader = reader_map[section_name]
section_reader(section_pointer, section_size)
def __read_profiles_information_section(
self, section_pointer: int, section_size: int
) -> None:
"""
Reads Profile Information section.
"""
self.file.seek(section_pointer)
# Description for each profile (u64)
profiles_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Number of profiles listed in this section (u32)
num_profiles = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
# Size of a {PI} structure, currently 40 (u8)
profile_size = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
self.profile_info_list = []
for i in range(num_profiles):
file_index = profiles_pointer + (i * profile_size)
self.file.seek(file_index)
# Header for the values for this profile
psvb = self.file.read(0x20)
# Identifier tuple for this profile
hit_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# (u32)
flags = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
profile_map = {"hit_pointer": hit_pointer, "flags": flags, "psvb": psvb}
self.profile_info_list.append(profile_map)
if hit_pointer == 0:
# this is a summary profile
self.summary_profile_index = i
def get_hit_from_profile(self, index: int) -> list:
profile = self.profile_info_list[index]
hit_pointer = profile["hit_pointer"]
if hit_pointer != 0:
return self.hit_map[hit_pointer]
else:
profile = self.profile_info_list[self.summary_profile_index]
hit_pointer = profile["hit_pointer"]
return self.hit_map[hit_pointer]
def __read_hit_section(self, section_pointer: int, section_size: int) -> None:
"""
Reads Hierarchical Identifier Tuples section of profile.db
"""
# let's get to the correct spot in the file
self.file.seek(section_pointer)
self.hit_map = {}
while (self.file.tell() - section_pointer) < section_size:
# hit pointer
hit_pointer = self.file.tell()
# print((self.file.tell() - section_pointer))
# Number of identifications in this tuple (u16)
num_tuples = int.from_bytes(
self.file.read(2), byteorder=self.byte_order, signed=self.signed
)
# empty space
self.file.read(6)
# Identifications for an application thread
# Read H.I.T.s
tuples_list = []
for i in range(num_tuples):
# One of the values listed in the profile.db
# Identifier Names section. (u8)
kind = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
# empty space
self.file.read(1)
# flag
# flags = int.from_bytes(
# self.file.read(2), byteorder=self.byte_order, signed=self.signed
# )
self.file.read(2)
# Logical identifier value, may be arbitrary but dense towards 0. (u32)
# logical_id = int.from_bytes(
# self.file.read(4), byteorder=self.byte_order, signed=self.signed
# )
self.file.read(4)
# Physical identifier value, eg. hostid or PCI bus index. (u64)
physical_id = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
identifier_name = self.meta_reader.get_identifier_name(kind)
tuples_list.append((identifier_name, physical_id))
self.hit_map[hit_pointer] = tuples_list
def __read_common_header(self) -> None:
"""
Reads common .db file header version 4.0
"""
# read Magic identifier ("HPCPROF-tracedb_")
# first ten buyes are HPCTOOLKIT in ASCII
identifier = str(self.file.read(10), encoding=self.encoding)
assert identifier == "HPCTOOLKIT"
# next 4 bytes (u8) are the "Specific format identifier"
format_identifier = str(self.file.read(4), encoding=self.encoding)
assert format_identifier == "prof"
# next byte (u8) contains the "Common major version, currently 4"
self.major_version = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
# next byte (u8) contains the "Specific minor version"
self.minor_version = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
self.section_pointer = []
self.section_size = []
# In the header each section is given 16 bytes:
# - First 8 bytes specify the total size of the section (in bytes)
# - Last 8 bytes specify a pointer to the beggining of the section
for i in range(len(self.read_order)):
self.section_size.append(
int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
)
self.section_pointer.append(
int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
)
class TraceReader:
def __init__(
self, file_location: str, meta_reader: MetaReader, profile_reader: ProfileReader
) -> None:
# open file
self.file = open(file_location, "rb")
self.meta_reader = meta_reader
self.profile_reader = profile_reader
# setting necessary read options
self.byte_order = "little"
self.signed = False
self.encoding = "ASCII"
# The trace.db header consists of the common .db header and n sections.
# We're going to do a little set up work, so that's easy to change if
# any revisions change the orders.
# We're are going to specify 2 maps:
# - One dictionary maps the section name to an index
# (which follows the order that the sections are seen
# in the trace.db header)
# - The second dictionary maps the section name to a
# function that reads the section. Each function is defined
# as __read_<section_name>_section(self, section_pointer: int,
# section_size: int) -> None
# Here I'm mapping the section name to it's order in the trace.db header
header_map = {"Context Trace Headers": 0}
# Now let's create a function to section map
reader_map = {"Context Trace Headers": self.__read_trace_headers_section}
# Another thing thing that we should consider is the order to read the sections.
# Here I'm specifying the order of reading the file
self.read_order = ["Context Trace Headers"]
# Let's make sure that we include every section in the read order and reader_map
assert set(self.read_order) == set(header_map) and set(header_map) == set(
reader_map
)
# Now to the actual reading of the trace.db file
# reading the trace.db header
self.__read_common_header()
# now let's read all the sections
for section_name in self.read_order:
section_index = header_map[section_name]
section_pointer = self.section_pointer[section_index]
section_size = self.section_size[section_index]
section_reader = reader_map[section_name]
section_reader(section_pointer, section_size)
def __read_common_header(self) -> None:
"""
Reads common .db file header version 4.0
"""
# read Magic identifier ("HPCPROF-tracedb_")
# first ten buyes are HPCTOOLKIT in ASCII
identifier = str(self.file.read(10), encoding=self.encoding)
assert identifier == "HPCTOOLKIT"
# next 4 bytes (u8) are the "Specific format identifier"
format_identifier = str(self.file.read(4), encoding=self.encoding)
assert format_identifier == "trce"
# next byte (u8) contains the "Common major version, currently 4"
self.major_version = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
# next byte (u8) contains the "Specific minor version"
self.minor_version = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
self.section_pointer = []
self.section_size = []
# In the header each section is given 16 bytes:
# - First 8 bytes specify the total size of the section (in bytes)
# - Last 8 bytes specify a pointer to the beggining of the section
for i in range(len(self.read_order)):
self.section_size.append(
int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
)
self.section_pointer.append(
int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
)
def __read_trace_headers_section(
self, section_pointer: int, section_size: int
) -> None:
"""
Reader Context Trace Headers section of trace.db
"""
# get to the right place in the file
self.file.seek(section_pointer)
# Header for each trace (u64)
trace_headers_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Number of traces listed in this section (u32)
num_trace_headers = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
# Size of a {TH} structure, currently 24
trace_header_size = int.from_bytes(
self.file.read(1), byteorder=self.byte_order, signed=self.signed
)
# empty space
self.file.read(3)
# Smallest timestamp of the traces listed in *pTraces (u64)
self.min_time_stamp = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Largest timestamp of the traces listed in *pTraces (u64)
self.max_time_stamp = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
self.data = {
"Timestamp (ns)": [],
"Event Type": [],
"Name": [],
"Thread": [],
"Process": [],
"Host": [],
"Node": [],
"Source File Name": [],
"Source File Line Number": [],
"Calling Context ID": [],
}
for i in range(num_trace_headers):
header_pointer = trace_headers_pointer + (i * trace_header_size)
self.__read_single_trace_header(header_pointer)
def __read_single_trace_header(self, header_pointer: int) -> None:
"""
Reads a single trace header and all trace elements associated with it
"""
self.file.seek(header_pointer)
# Index of a profile listed in the profile.db (u32)
profile_index = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
hit = self.profile_reader.get_hit_from_profile(profile_index)
# empty space
self.file.read(4)
# Pointer to the first element of the trace line (array)
start_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
# Pointer to the after-end element of the trace line (array)
end_pointer = int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
self.file.seek(start_pointer)
# setting up some variables
last_id = -1 # refers to the previous context id
last_node: Node = None # refers to the node associated with the last context
context_id: int = -1 # refers to the current context id
current_node: Node = (
None # refers to the current node associated with the current context id
)
# refers to the least common ancestor between common_node and last_node
# as in finding the node that is the parent of both common_node and last_node
common_node: Node = None
while self.file.tell() < end_pointer:
# Timestamp (nanoseconds since epoch)
timestamp = (
int.from_bytes(
self.file.read(8), byteorder=self.byte_order, signed=self.signed
)
- self.min_time_stamp
)
# Sample calling context id (in meta.db)
# can use this to get name of function from meta.db
# Procedure tab
context_id = int.from_bytes(
self.file.read(4), byteorder=self.byte_order, signed=self.signed
)
if context_id == last_id:
# nothing changed between samples.
# means we don't have to do anything
continue
elif context_id == 0:
# process is idling
current_node = None
else:
# at a new non-idle context
current_node = self.meta_reader.node_map[context_id]
# First we want to close all the "enter" events from the last sample
# that aren't still running
if last_node is not None:
if current_node is None:
common_node: Node = None
else:
common_node: Node = current_node.get_intersection(last_node)
# closing each "enter" column until we reach the common_node
while last_node != common_node:
curr_ctx_id = self.meta_reader.nid_to_ctx[last_node._pipit_nid]
context_information = (
self.meta_reader.get_information_from_context_id(curr_ctx_id)
)
self.data["Name"].append(str(context_information["function"]))
if context_information["loop_type"]:
# HPCViewer only puts loops in CCT, but not trace view, so
# we use a special Loop Enter/Leave event type
self.data["Event Type"].append("Loop Leave")
else:
self.data["Event Type"].append("Leave")
self.data["Timestamp (ns)"].append(timestamp)
self.data["Process"].append(hit[1][1])
self.data["Thread"].append(hit[2][1])
self.data["Host"].append(hit[0][1])
self.data["Node"].append(last_node)
self.data["Source File Name"].append(context_information["file"])
self.data["Source File Line Number"].append(
context_information["line"]
)
self.data["Calling Context ID"].append(curr_ctx_id)
last_node = last_node.parent
# Now we want to add all the new "enter" events after
# the common_node event
if current_node is not None:
if common_node is None:
intersect_level = -1
else:
intersect_level = common_node.get_level()
entry_nodes = current_node.get_node_list(intersect_level)
for i in range(len(entry_nodes)):
entry_node = entry_nodes[-1 * i - 1]
curr_ctx_id = self.meta_reader.nid_to_ctx[entry_node._pipit_nid]
context_information = (
self.meta_reader.get_information_from_context_id(curr_ctx_id)
)
self.data["Name"].append(str(context_information["function"]))
if context_information["loop_type"]:
# HPCViewer only puts loops in CCT, but not trace view, so
# we use a special Loop Enter/Leave event type
self.data["Event Type"].append("Loop Enter")
else:
self.data["Event Type"].append("Enter")
self.data["Timestamp (ns)"].append(timestamp)
self.data["Process"].append(hit[1][1])
self.data["Thread"].append(hit[2][1])
self.data["Host"].append(hit[0][1])
self.data["Node"].append(entry_node)
self.data["Source File Name"].append(context_information["file"])
self.data["Source File Line Number"].append(
context_information["line"]
)
self.data["Calling Context ID"].append(curr_ctx_id)
last_node = current_node
last_id = context_id
# Now we want to close all the "enter" events from the last sample
current_node = None
timestamp = self.max_time_stamp - self.min_time_stamp
if last_node is not None:
if current_node is None:
common_node: Node = None
else:
common_node: Node = current_node.get_intersection(last_node)
# closing each "enter" column until we reach the common_node
while last_node != common_node:
curr_ctx_id = self.meta_reader.nid_to_ctx[last_node._pipit_nid]
context_information = self.meta_reader.get_information_from_context_id(
curr_ctx_id
)
self.data["Name"].append(str(context_information["function"]))
if context_information["loop_type"]:
self.data["Event Type"].append("Loop Leave")
else:
self.data["Event Type"].append("Leave")
self.data["Timestamp (ns)"].append(timestamp)
self.data["Process"].append(hit[1][1])
self.data["Thread"].append(hit[2][1])
self.data["Host"].append(hit[0][1])
self.data["Node"].append(last_node)
self.data["Source File Name"].append(context_information["file"])
self.data["Source File Line Number"].append(context_information["line"])
self.data["Calling Context ID"].append(curr_ctx_id)
last_node = last_node.parent
class HPCToolkitReader:
def __init__(self, directory: str) -> None:
self.meta_reader: MetaReader = MetaReader(directory + "/meta.db")
self.profile_reader = ProfileReader(directory + "/profile.db", self.meta_reader)
self.trace_reader = TraceReader(
directory + "/trace.db", self.meta_reader, self.profile_reader
)
def read(self) -> pipit.trace.Trace:
trace_df = pd.DataFrame(self.trace_reader.data)
# Need to sort df by timestamp then index
# (since many events occur at the same timestamp)
# rename the index axis, so we can sort with it
trace_df.rename_axis("index", inplace=True)
# sort by timestamp then index
trace_df.sort_values(
by=["Timestamp (ns)", "index"],
axis=0,
ascending=True,
inplace=True,
ignore_index=True,
)
trace_df = trace_df.astype(
{
"Event Type": "category",
"Name": "category",
"Thread": "category",
"Process": "category",
"Host": "category",
}
)
self.trace_df = trace_df
return pipit.trace.Trace(None, trace_df)
| hpcgroup/pipit | pipit/readers/hpctoolkit_reader.py | hpctoolkit_reader.py | py | 55,684 | python | en | code | 20 | github-code | 13 |
38256141882 | import numpy as np
from image_processing.image_processing import imageSumAlongY
from karabo.middlelayer import (
AccessMode, Assignment, Configurable, DaqDataType, DaqPolicy, Device,
Double, InputChannel, Node, OutputChannel, QuantityValue, Slot, State,
Unit, VectorDouble, VectorInt32, VectorString, get_timestamp)
from processing_utils.rate_calculator import RateCalculator
try:
from ._version import version as deviceVersion
from .common_mdl import ErrorNode
except ImportError:
from imageProcessor._version import version as deviceVersion
from imageProcessor.common_mdl import ErrorNode
class DataNode(Configurable):
daqDataType = DaqDataType.TRAIN
spectrum = VectorDouble(
displayedName="Spectrum",
accessMode=AccessMode.READONLY)
class ChannelNode(Configurable):
data = Node(DataNode)
class ImageNormRoi(Device):
# provide version for classVersion property
__version__ = deviceVersion
def __init__(self, configuration):
super().__init__(configuration)
self.output.noInputShared = "drop"
# TODO base class for MDL: interfaces, frameRate, errorCounter, input
interfaces = VectorString(
displayedName="Interfaces",
defaultValue=["Processor"],
accessMode=AccessMode.READONLY,
daqPolicy=DaqPolicy.OMIT
)
frameRate = Double(
displayedName="Input Frame Rate",
description="Rate of processed images.",
unitSymbol=Unit.HERTZ,
accessMode=AccessMode.READONLY,
defaultValue=0.
)
errorCounter = Node(ErrorNode)
@InputChannel(
raw=False,
displayedName="Input",
accessMode=AccessMode.INITONLY,
assignment=Assignment.MANDATORY)
async def input(self, data, meta):
if self.state != State.PROCESSING:
self.state = State.PROCESSING
try:
ts = get_timestamp(meta.timestamp.timestamp)
image = data.data.image.pixels.value
self.frame_rate.update()
fps = self.frame_rate.refresh()
if fps:
self.frameRate = fps
# Apply ROI and calculate integral
x_roi = self.dataRoiPosition[0]
y_roi = self.dataRoiPosition[1]
x_norm_roi = self.normRoiPosition[0]
y_norm_roi = self.normRoiPosition[1]
width_roi = self.roiSize[0]
height_roi = self.roiSize[1]
if width_roi == 0 and height_roi == 0:
# In case of [0, 0] no ROI is applied
raise RuntimeError("ROI is [0, 0], please provide a valid one")
else:
# First the data_image
data_image = image[y_roi:y_roi + height_roi,
x_roi:x_roi + width_roi]
norm_image = image[y_norm_roi:y_norm_roi + height_roi,
x_norm_roi:x_norm_roi + width_roi]
# Normalize the images
data = data_image.astype('double')
norm = norm_image.astype('double')
difference = data - norm
spectrum = imageSumAlongY(difference)
self.spectrumIntegral = QuantityValue(spectrum.sum(),
timestamp=ts)
self.errorCounter.update_count() # success
if self.status != "PROCESSING":
self.status = "PROCESSING"
except Exception as e:
spectrum = np.full((1,), np.nan)
self.spectrumIntegral = QuantityValue(np.NaN, timestamp=ts)
if self.errorCounter.warnCondition == 0:
# Only update if not yet in WARN
msg = f"Exception while processing input image: {e}"
self.status = msg
self.log.ERROR(msg)
self.errorCounter.update_count(True)
# Write spectrum to output channel
self.output.schema.data.spectrum = spectrum.tolist()
await self.output.writeData(timestamp=ts)
@input.endOfStream
def input(self, name):
self.frameRate = 0.
if self.state != State.ON:
self.state = State.ON
roi_default = [0, 0]
@VectorInt32(
displayedName="ROI Size",
description="The user-defined region of interest (ROI), "
"specified as [width_roi, height_roi]. ",
minSize=2,
maxSize=2,
defaultValue=roi_default)
def roiSize(self, value):
if value is None:
self.logger.error(f"Invalid initial ROI = {value.value}, reset to "
"default.")
self.roiSize = [0, 0]
return
self.roiSize = value
dataRoiPosition = VectorInt32(
displayedName="Data Roi Position",
description="The user-defined position of the data ROI of the "
"image [x, y]. Coordinates are taken top-left!",
minSize=2,
maxSize=2,
defaultValue=roi_default)
normRoiPosition = VectorInt32(
displayedName="Norm Roi Position",
description="The user-defined position of the ROI to normalize the "
"image [x, y]. Coordinates are taken top-left!",
minSize=2,
maxSize=2,
defaultValue=roi_default)
output = OutputChannel(
ChannelNode,
displayedName="Output")
spectrumIntegral = Double(
displayedName="Spectrum Integral",
description="Integral of the spectrum, after applying ROI.",
accessMode=AccessMode.READONLY)
@Slot(displayedName='Reset', description="Reset error count.")
async def resetError(self):
self.errorCounter.error_counter.clear()
self.errorCounter.evaluate_warn()
if self.state != State.ON:
self.state = State.ON
async def onInitialization(self):
""" This method will be called when the device starts.
"""
self.frame_rate = RateCalculator(refresh_interval=1.0)
self.state = State.ON
| European-XFEL/imageProcessor | src/imageProcessor/ImageNormRoi.py | ImageNormRoi.py | py | 6,044 | python | en | code | 0 | github-code | 13 |
24000199053 | from collections import defaultdict, deque
from itertools import accumulate
from typing import IO, Deque, Dict, Generic, Hashable, Iterable, TypeVar
H = TypeVar("H", bound=Hashable)
class RollingWindow(Generic[H]):
def __init__(self, size: int):
self.size = size
self.q: Deque[H] = deque()
self.nunique = 0
self.counts: Dict[H, int] = defaultdict(int)
def add(self, item: H) -> "RollingWindow":
if len(self.q) == self.size:
self.pop()
self.q.append(item)
c = self.counts[item]
self.counts[item] = c + 1
if c == 0:
self.nunique += 1
return self
def pop(self) -> H:
item = self.q.popleft()
c = self.counts[item]
self.counts[item] = c - 1
if c == 1:
self.nunique -= 1
return item
def first_unique_substring_index(message: Iterable[H], size: int) -> int:
window: RollingWindow[H] = RollingWindow(size)
windows = accumulate(message, RollingWindow.add, initial=window)
return next(ix for ix, w in enumerate(windows) if w.nunique == size)
def run(input_: IO[str], n: int = 14) -> int:
chars = iter(lambda: input_.read(1), "")
return first_unique_substring_index(chars, n)
def test():
for s, n in [
("bvwbjplbgvbhsrlpgdmjqwftvncz", 5),
("nppdvjthqldpwncqszvftbrmjlhg", 6),
("nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg", 10),
("zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw", 11),
]:
assert first_unique_substring_index(s, 4) == n
| mattHawthorn/advent_of_code_2022 | solutions/day06.py | day06.py | py | 1,547 | python | en | code | 1 | github-code | 13 |
17061317154 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class VehicleDashboardResult(object):
def __init__(self):
self._class_name = None
self._label = None
self._score = None
@property
def class_name(self):
return self._class_name
@class_name.setter
def class_name(self, value):
self._class_name = value
@property
def label(self):
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def score(self):
return self._score
@score.setter
def score(self, value):
self._score = value
def to_alipay_dict(self):
params = dict()
if self.class_name:
if hasattr(self.class_name, 'to_alipay_dict'):
params['class_name'] = self.class_name.to_alipay_dict()
else:
params['class_name'] = self.class_name
if self.label:
if hasattr(self.label, 'to_alipay_dict'):
params['label'] = self.label.to_alipay_dict()
else:
params['label'] = self.label
if self.score:
if hasattr(self.score, 'to_alipay_dict'):
params['score'] = self.score.to_alipay_dict()
else:
params['score'] = self.score
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = VehicleDashboardResult()
if 'class_name' in d:
o.class_name = d['class_name']
if 'label' in d:
o.label = d['label']
if 'score' in d:
o.score = d['score']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/VehicleDashboardResult.py | VehicleDashboardResult.py | py | 1,749 | python | en | code | 241 | github-code | 13 |
16788282154 | """
This is a basic class to create a carla env.
"""
# ==================================================
# import carla module
from train.gym_carla.config.carla_config import version_config
carla_version = version_config['carla_version']
root_path = version_config['root_path']
import glob
import os
import sys
carla_root = os.path.join(root_path, 'CARLA_'+carla_version)
carla_path = os.path.join(carla_root, 'PythonAPI')
sys.path.append(carla_path)
sys.path.append(os.path.join(carla_root, 'PythonAPI/carla'))
sys.path.append(os.path.join(carla_root, 'PythonAPI/carla/agents'))
try:
sys.path.append(glob.glob(carla_path + '/carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import argparse
import logging
import time
import heapq
from datetime import datetime
import random
import numpy as np
import math
import traceback
# common carla color
red = carla.Color(r=255, g=0, b=0)
green = carla.Color(r=0, g=255, b=0)
blue = carla.Color(r=0, g=0, b=255)
yellow = carla.Color(r=255, g=255, b=0)
magenta = carla.Color(r=255, g=0, b=255)
yan = carla.Color(r=0, g=255, b=255)
orange = carla.Color(r=255, g=162, b=0)
white = carla.Color(r=255, g=255, b=255)
class BasicEnv:
"""
The basic class to generate a carla env for test.
"""
def __init__(self,
host='localhost',
port=int(2000),
tm_port=int(8000),
town='Town03',
client_timeout=100.0,
timestep=0.05,
frequency=None,
sync_mode=True,
):
# setup client
self.client = carla.Client(host, port)
self.client.set_timeout(client_timeout)
self.map_name = town # str, name of the map
self.world = self.client.load_world(town)
self.map = self.world.get_map()
self.debug_helper = self.world.debug # world debug for plot
self.blueprint_library = self.world.get_blueprint_library() # blueprint
# frequency has priority
if frequency:
self.timestep = 1 / frequency
else:
self.timestep = timestep
self.sync_mode = sync_mode
self.set_world(sync_mode=sync_mode) # world settings
self.set_weather() # weather
self.spectator = self.world.get_spectator()
self.traffic_manager = self.client.get_trafficmanager(tm_port) # check carla version before using
# vehicles information
self.ego_vehicle = None
# if there are multiple ego vehicles
self.ego_vehicles = []
# vehicles except ego vehicle
self.npc_vehicles = []
def reload_carla_world(self, reset_settings=False):
"""
todo carla.Client.reload_world() method is fixed in 0.9.11 version.
we can keep previous world settings with an arg reset_settings(False)
Reload carla world.
"""
self.world = self.client.load_world(self.map_name)
# todo fix api with newer version of carla, add all setting params to args
# set world with previous settings
self.set_world(sync_mode=self.sync_mode)
# update world related attributes
self.map = self.world.get_map()
self.debug_helper = self.world.debug
self.blueprint_library = self.world.get_blueprint_library()
self.spectator = self.world.get_spectator()
def get_env_api(self):
"""
Get carla environment management API.
"""
carla_management = {
'client': self.client,
'world': self.world,
'map': self.map, # carla.Map
'debug_helper': self.debug_helper,
'blueprint_library': self.blueprint_library,
'spectator': self.spectator,
'traffic_manager': self.traffic_manager,
}
return carla_management
def set_world(self, sync_mode=True, no_render_mode=False):
"""
Setup carla world settings.
Under sync_mode(sync_mode = True), require world.tick() to run
"""
settings = self.world.get_settings()
# world settings parameters
settings.fixed_delta_seconds = self.timestep
settings.no_rendering_mode = no_render_mode
settings.synchronous_mode = sync_mode
self.world.apply_settings(settings)
self.world.tick() # refresh world
def set_weather(self, weather='ClearNoon'):
"""
Set weather for the world
Common weather in carla:
ClearNoon, CloudyNoon, WetNoon, WetCloudyNoon,
SoftRainNoon, MidRainyNoon, HardRainNoon, ClearSunset,
CloudySunset, WetSunset, WetCloudySunset, SoftRainSunset,
MidRainSunset, HardRainSunset.
"""
weather_dict = {
'ClearNoon': carla.WeatherParameters.ClearNoon,
'CloudyNoon': carla.WeatherParameters.CloudyNoon,
'WetNoon': carla.WeatherParameters.WetNoon,
'WetCloudyNoon': carla.WeatherParameters.WetCloudyNoon,
'SoftRainNoon': carla.WeatherParameters.SoftRainNoon,
'MidRainyNoon': carla.WeatherParameters.MidRainyNoon,
'HardRainNoon': carla.WeatherParameters.HardRainNoon,
'ClearSunset': carla.WeatherParameters.ClearSunset,
'CloudySunset': carla.WeatherParameters.CloudySunset,
'WetSunset': carla.WeatherParameters.WetSunset,
'WetCloudySunset': carla.WeatherParameters.WetCloudySunset,
'SoftRainSunset': carla.WeatherParameters.SoftRainSunset,
'MidRainSunset': carla.WeatherParameters.MidRainSunset,
'HardRainSunset': carla.WeatherParameters.HardRainSunset,
}
weather_selection = None
for key in weather_dict:
if key == weather:
weather_selection = weather_dict[key]
if not weather_selection:
print('Specified weather not found. ClearNoon is set.')
weather_selection = carla.WeatherParameters.ClearNoon
self.world.set_weather(weather_selection)
self.world.tick()
def set_spectator_actor(self, actor):
"""Set behind view on an actor"""
transform = actor.get_transform()
# location = transform.location
# rotation = transform.rotation
# behind distance - d, height - h
_d = 8
_h = 6
angle = transform.rotation.yaw
a = math.radians(180 + angle)
location = carla.Location(x=_d * math.cos(a),
y=_d * math.sin(a),
z=_h) + transform.location
rotation = carla.Rotation(yaw=angle, pitch=6)
self.spectator.set_transform(carla.Transform(location, rotation))
self.world.tick()
print("Spectator is set to behind view.")
def set_spectator_overhead(self, location, yaw=0, h=50):
"""
Set spectator from an overview.
param location: location of the spectator
param h(float): height of spectator when using the overhead view
"""
height = h
# height = 100
location = carla.Location(0, 0, height) + location
rotation = carla.Rotation(yaw=yaw, pitch=-90) # rotate to forward direction
self.spectator.set_transform(carla.Transform(location, rotation))
self.world.tick()
print("Spectator is set to overhead view.")
def draw_waypoint(self, transform, color=(red, green)):
"""
Draw a point determined by transform(or waypoint).
A spot to mark location
An arrow to x-axis direction vector
:param transform: carla.Transform or carla.Waypoint
:param color: color of arrow and spot
"""
if isinstance(transform, carla.Waypoint):
transform = transform.transform
scalar = 1.5
yaw = np.deg2rad(transform.rotation.yaw)
vector = scalar * np.array([np.cos(yaw), np.sin(yaw)])
start = transform.location
end = start + carla.Location(x=vector[0], y=vector[1], z=0)
# plot the waypoint
self.debug_helper.draw_point(start, size=0.05, color=color[0], life_time=99999)
self.debug_helper.draw_arrow(start, end, thickness=0.25, arrow_size=0.20, color=color[1], life_time=99999)
self.world.tick()
def update_vehicles(self):
"""
Update existing vehicles in current world.
"""
self.npc_vehicles = []
self.ego_vehicle = None
# update vehicle list
actor_list = self.world.get_actors()
vehicle_list = actor_list.filter('vehicle.*') # as a actorlist instance, iterable
if vehicle_list:
for veh in vehicle_list:
attr = veh.attributes # dict
# filter ego vehicle by role name
if attr['role_name'] == 'ego' or attr['role_name'] == 'hero':
self.ego_vehicle = veh
else:
self.npc_vehicles.append(veh)
# print('All vehicles are updated.')
# if not self.ego_vehicle:
# print('ego vehicle not found.')
@staticmethod
def coord2loc(coords):
"""
Transform a coords to carla location.
Coords will be set default on the ground, z=0.
:param coords: coordinates of np.array of list.
:return: carla.location
"""
location = carla.Location(x=float(coords[0]), y=float(coords[1]), z=0.0)
return location
@staticmethod
def loc2coord(location):
"""
Transform a carla location to coords.
:param location: carla.Location
:return: coordinate in carla world.
"""
coords = np.array([location.x, location.y, location.z])
return coords
def test():
env = BasicEnv()
env.set_world(sync_mode=False)
# jucntion in Town03
junction_center = carla.Location(x=-1.32, y=132.69, z=0.00)
env.set_spectator_overhead(junction_center, h=75)
print('test env is created.')
if __name__ == '__main__':
test()
| liuyuqi123/ComplexUrbanScenarios | train/gym_carla/envs/BasicEnv.py | BasicEnv.py | py | 10,264 | python | en | code | 37 | github-code | 13 |
27732617006 | """Module for generating test data and adding them to database"""
import random
import string
from typing import Optional, Union
from app.db import Student, Group, Course, db_session
# 20 first names.
FIRST_NAME = ['Monica', 'Rachel', 'Phoeby', 'Daniela', 'Rebecca', 'Eva',
'Alexandra', 'Katherine', 'Lisa', 'Hannah', 'Sonny', 'Pedro',
'Kasey', 'George', 'Jan', 'Devon', 'Alexander', 'Markus',
'Logan', 'Israel']
# 20 last names.
LAST_NAME = ['Mcneil', 'Fritz', 'Hansen', 'Hampton', 'Neal', 'Phelps',
'Mccormick', 'Sullivan', 'Bass', 'Madden', 'Brock', 'Jarvis',
'Vance', 'Delgado', 'Harrison', 'Hart', 'Wong', 'Wong', 'Newton',
'Hopkins']
# 10 courses.
LIST_OF_COURSES = ['Mathematics', 'History', 'Literature', 'Art', 'Computer Science',
'Philosophy', 'Biology', 'Chemistry', 'Physics', 'Sociology']
# Number of randomly generated groups.
NUMBER_OF_GROUPS = 10
# Number of choices.
NUMBER_OF_CHOICES = 2
# Number of randomly generated students.
NUMBER_OF_STUDENTS = 200
# Number of students in the group (from 10 to 30)
START_RANGE = 10
END_RANGE = 31
# Random course number (from 1 to 3)
COURSE_START_RANGE = 1
COURSE_END_RANGE = 4
def generate_groups() -> list[str]:
"""Generates random groups.
Generates 10 groups with randomly generated names.
The name contain 2 characters, hyphen, 2 numbers.
Example:
Group name: AB-21.
Returns:
List of group names.
"""
groups = []
for _ in range(NUMBER_OF_GROUPS):
# Get 2 random characters.
letters = ''.join(random.choices(string.ascii_uppercase, k=NUMBER_OF_CHOICES))
# Get 2 random digits.
numbers = ''.join(random.choices(string.digits, k=NUMBER_OF_CHOICES))
# Generate group name.
group_name = '-'.join([letters, numbers])
groups.append(group_name)
return groups
def generate_students() -> list[str]:
"""Generates random students.
Generates 200 students randomly combining 20 first names and 20 last names.
Returns:
List of students.
"""
students = []
for _ in range(NUMBER_OF_STUDENTS):
# Generate student full name.
student = ' '.join([random.choice(FIRST_NAME), random.choice(LAST_NAME)])
students.append(student)
return students
def students_to_groups() -> dict[Optional[str]: Union[list[str], list]]:
"""Randomly assigns students to groups.
Each group could contain from 10 to 30 students. It is possible
that some groups will be without students or students without groups.
Returns:
Dictionary of groups and students.
"""
# Get names of group.
groups = generate_groups()
# Get names of students.
students = generate_students()
student_groups = {}
while True:
# Get random number of students.
number = random.choice(range(START_RANGE, END_RANGE))
# Check if number of students left is not less than random number and
# empty group is available.
if len(students) >= number and groups:
# Shuffle list of students and groups.
random.shuffle(students)
random.shuffle(groups)
# get randomly generated number of students.
random_students = students[:number]
# Update list of students.
students = students[number:]
# Add students to the group, where group is key
# and list of students is value.
student_groups[groups.pop(-1)] = random_students
else:
break
while groups:
# Assign empty list to group.
student_groups[groups.pop(-1)] = []
if students:
# Assign student list with no group to None.
student_groups[None] = students
return student_groups
def get_courses_with_description() -> dict[str: str]:
"""Create courses with description.
Returns:
Dictionary where key is course and value is description.
"""
course_description = {course: f'Subject of {course}' for course in LIST_OF_COURSES}
return course_description
def add_test_data_to_groups_table(list_of_groups: list) -> None:
"""Adds test data to groups table.
Args:
list_of_groups: list of randomly generated groups.
"""
Group.create_multiple_groups(list_of_groups)
def add_test_data_to_students_table(
students_groups: dict[Optional[str]: Union[list[str], list]]) -> None:
"""Add test data to students table.
Args:
students_groups: dictionary of groups and students.
"""
for group, students in students_groups.items():
# Create students in single group.
Student.create_multiple_students(students, group)
def add_test_data_to_course_table() -> None:
"""Add test data to course table."""
# Get input data.
course_dict = get_courses_with_description()
Course.create_multiple_courses(course_dict)
def add_students_to_course() -> None:
"""Assign course to student.
Randomly assign from 1 to 3 courses for each student.
"""
with db_session() as session:
# Get all students.
students = session.query(Student).all()
for student in students:
# For each student get random number from 1 to 3 and get equivalent
# number of unique courses from list of courses.
number = random.choice(range(COURSE_START_RANGE, COURSE_END_RANGE))
course_list = random.sample(LIST_OF_COURSES, k=number)
for course_name in course_list:
# Assign each course to student.
course = session.query(Course).filter(Course.course_name == course_name).first()
student.courses.append(course)
session.commit()
def add_test_data_to_database() -> None:
"""Adds test data to database"""
# Check if DB already have test data in it.
with db_session() as session:
if not session.query(Student).first():
# Get randomly generated groups and students.
students_groups = students_to_groups()
# Remove all None value keys to create groups.
group_list = [group
for group in list(students_groups.keys())
if group is not None]
add_test_data_to_groups_table(group_list)
add_test_data_to_students_table(students_groups)
add_test_data_to_course_table()
add_students_to_course()
| SvyatElkind/students-courses | app/db/test_data.py | test_data.py | py | 6,550 | python | en | code | 0 | github-code | 13 |
33391965432 | # This is the main script used two identify cpt url from NZGD.
# Make sure you get authorization to download data from NZGD, you need username and password to login.
# Charles Wang
# updated Aug 02, 2017
import requests
import numpy as np
from bs4 import BeautifulSoup as bs
import selenium
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
import time
import wget
home = 'https://www.nzgd.org.nz/(S(scnf1sdhnfocsffjm5zfl12v))/ARCGISMapViewer/mapviewer.aspx'
link = 'https://www.nzgd.org.nz/(S(z0hp2wn5amri2plh1wj5sgpw))/GeotechnicalInvestigationDataEntry.aspx?popup=1#/Upload/Location/64/InvestigationLog/64'
# configure the browser
browser = webdriver.Chrome('/usr/local/bin/chromedriver') # Get local session of chrome
c = browser.get(link)
time.sleep(3)
elem_user = browser.find_element_by_id("UserName")
elem_user.send_keys("your login email") # email
elem_pwd = browser.find_element_by_id("Password")
elem_pwd.send_keys("your password") # password
login_url = browser.current_url
# click login
try:
login_btn = browser.find_element_by_id("LoginButton").click()
except:
print("Login failed!\n")
time.sleep(5)
# check login status
waittime = 5
while browser.current_url == login_url:
print('waiting another 1s to login...')
time.sleep(1)
waittime += 1
if waittime == 10:
print("too long to login, stoped.")
exit()
print('login maybe succeeded...\n')
# get cookies
cookies = browser.get_cookies()
cookie = [item["name"] + "=" + item["value"] for item in cookies]
cookiestr = ';'.join(item for item in cookie)
# go to the mapview
c = browser.get(home)
# now you can zoom the map to sellect your ROI and customize your data, then wait...
time.sleep(120)
# hide some elements in the mapview
b1 = browser.find_element_by_class_name('esriControlsBR')
b2 = browser.find_element_by_id('map_zoom_slider')
b3 = browser.find_element_by_id('mapLeftMenu')
b4 = browser.find_element_by_id('mapMenuContainer')
b5 = browser.find_element_by_class_name('esriScalebar')
browser.execute_script("arguments[0].parentNode.removeChild(arguments[0]);", b1)
browser.execute_script("arguments[0].parentNode.removeChild(arguments[0]);", b2)
browser.execute_script("arguments[0].parentNode.removeChild(arguments[0]);", b3)
browser.execute_script("arguments[0].parentNode.removeChild(arguments[0]);", b4)
browser.execute_script("arguments[0].parentNode.removeChild(arguments[0]);", b5)
# find the handles of symbols, they are too large
container = browser.find_element_by_id('Investigation_Log_Location_layer')
images = container.find_elements_by_css_selector("image[width*='20']")
# reduce the sizes of symbols
for point in images:
browser.execute_script("arguments[0].setAttribute('width','5');", point)
browser.execute_script("arguments[0].setAttribute('height','5');", point)
# read urls from each symbols, each represents the page of a data point where investigation was made.
pop = browser.find_element_by_class_name('esriPopup')
pagelinks=[]
count=0.0
all=len(images)
for point in images:
browser.execute_script("arguments[0].style.visibility='hidden';", pop)
try:
point.click()
time.sleep(4)
#browser.execute_script("arguments[0].style.visibility='visible';", pop)
frame = browser.find_element_by_tag_name('iframe')
pagelink = frame.get_attribute("src")
pagelinks.append(pagelink.strip()+"\n")
except:
print("not clicked...")
count += 1
print(count/all*100.0)
# output urls
with open("pagelinks.txt", "w+") as fo:
line = fo.writelines(pagelinks)
| charlesxwang/NZGD-DataHack | hackNZGDReally.py | hackNZGDReally.py | py | 3,695 | python | en | code | 0 | github-code | 13 |
74087814739 | # CRISTIAN ECHEVERRÍA RABÍ
import wx
from cer.widgets import cw
import cer.widgets.propeditor as pe
#-----------------------------------------------------------------------------------------
class Cuenta(object):
def __init__(self, banco, numero):
self.banco = banco
self.numero = numero
class Curso(object):
def __init__(self, nombre, alumnos):
self.nombre = nombre
self.alumnos = alumnos
class Persona(object):
def __init__(self, nombre, edad, peso, deporte):
self.nombre = nombre
self.edad = edad
self.peso = peso
self.deporte = deporte
self.casado = 1
self.cuenta = None
self.curso = None
yo = Persona("Cristian", 38, 85, "Futbol")
yo.cuenta = Cuenta("Santander", 100200)
yo.curso = Curso("Matematica", 35)
curso2 = Curso("Historia", 25)
curso3 = Curso("Lenguaje", 6)
def getSiNo(value):
if value == 1:
return "Si"
else:
return "No"
#-----------------------------------------------------------------------------------------
data1 = pe.EditorData(
pe.Group("Generales", msg="Datos Generales"),
pe.Text("nombre", msg="Nombre completo"),
pe.Float("edad", msg=u"Edad en años", unit="Años"),
pe.Float("peso", vmin=0, msg=u"Peso del empleado en kilos", unit="Kg"),
pe.Choice("deporte", ["Futbol","Tenis","Voleiball","Otras"], msg="Actividad Extra-programatica"),
pe.Switch("casado", [1,2], getText=getSiNo),
pe.Group("Cuenta Bancaria", msg="Datos cuenta banco"),
pe.Text("cuenta.banco", u"Cuenta", msg=u"Nombre del Banco"),
pe.Int("cuenta.numero", u"N°", msg=u"N° de cuenta"),
pe.Group("Curso", msg="Ejemplo de como seleccionar de una lista de objetos y mostrar detalles"),
pe.Choice("curso", [yo.curso, curso2, curso3], "Nombre", getText=cw.GetTextAttrFunc("nombre"), msg="Selecione curso"),
pe.Int("curso.alumnos", u"N° alumnos", msg=u"N° de alumnos", edit=False)
)
#data1.Edit = False
#-----------------------------------------------------------------------------------------
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,"PropertyEditor Test", size=(500,600))
p = wx.Panel(self,-1)
box = wx.BoxSizer(wx.VERTICAL)
txt = cw.TipBox(p, size=(-1,100), showValue=True)
plc1 = pe.Editor(p, data1, yo, colswidth=(150, 130, 100))#, style=cw.PREDIT_DEF_STYLE|wx.TR_TWIST_BUTTONS)
plc1.MsgBox = txt
box.Add(plc1, 1, wx.EXPAND|wx.ALL, 5)
#box.Add(plc2, 1, wx.EXPAND|wx.ALL, 5)
box.Add(txt, 0, wx.EXPAND|wx.ALL, 5)
p.SetAutoLayout(True)
p.SetSizer(box)
box.Fit(p)
#self.Fit()
plc1.Bind(pe.EVTC_PREDIT_VALCHANGE, self.Test)#, id=pe1.GetId())
self.Center(wx.BOTH)
def Test(self, event):
ctrl = event.Ctrl
ctrl.UpdateView()
name = event.Item.Name
print("Cambia valor de la propiedad %s" % name)
print(">>>>>", ctrl.Obj is yo)
obj = ctrl.Obj
print (obj.nombre, obj.edad, obj.peso, obj.deporte)
print (obj.cuenta.banco, obj.cuenta.numero, id(obj.cuenta))
print (obj.curso.nombre, obj.curso.alumnos, id(obj.curso))
print (obj.casado)
#-----------------------------------------------------------------------------------------
if __name__ == '__main__':
from wx.lib import colourdb
app = wx.App(False) # True para capturar stderr y stdout
app.SetAssertMode(wx.APP_ASSERT_DIALOG)
colourdb.updateColourDB()
MainFrame().Show(True)
app.MainLoop() | cer1969/py-cer-widgets | propeditor/test/pe_test.py | pe_test.py | py | 3,775 | python | es | code | 1 | github-code | 13 |
41632153560 | from django.db import models
from datetime import date
# Create your models here.
class EventType(models.Model):
"""
A list of event types that will be associated with
public documents. This will be used to create a picklist
for ChurchEvents.
"""
eventType = models.CharField(max_length=30)
def __unicode__(self):
"""
Event type
"""
return self.eventType
class Language(models.Model):
"""
A list of languages that will be associated with
public documents. This will be used to create a picklist
for ChurchEvents.
"""
language = models.CharField(max_length=20)
def __unicode__(self):
"""
Language name, in unicode so can use proper characterset.
"""
return self.language
class ChurchEvent(models.Model):
"""
Church events are associated with documents that will be
uploaded through a password-protected admin page to
MEDIA_ROOT and will then be available for public download.
"""
eventType = models.ForeignKey(EventType)
presenter = models.CharField(max_length=50)
date = models.DateTimeField()
class Meta:
ordering = ['date']
def __unicode__(self):
"""
Unicode string representation of the sermon,
which will be abbreviated at first.
"""
return '\n'.join([
'Presenter: ' + self.presenter,
'Date: ' + date(self.date).isoformat()])
class EventDocument(models.Model):
"""
A list of public documentsassociated with a ChurchEvent.
"""
churchEvent = models.ForeignKey('ChurchEvent')
language = models.ForeignKey(Language)
title = models.CharField(max_length=200)
filepath = models.FileField(upload_to='upload')
def __unicode__(self):
"""
Unicode string representation of the sermon,
which will be abbreviated at first.
"""
return '\n'.join([
'Title: ' + self.title])
| redmanr/whuc | whuc/church/models.py | models.py | py | 2,014 | python | en | code | 0 | github-code | 13 |
41768211071 | import discord
import requests
import asyncio
import configparser
import sqlite3
from time import sleep
def get_last_rate():
try:
nano = requests.get("https://nanex.co/api/public/ticker/grlcnano", timeout=10)
except requests.Timeout:
return None
else:
last_rate = float(nano.json()["last_trade"])
if last_rate:
return last_rate
else:
return None
def deactivate(to_deactivate):
with sqlite3.connect("nanexbot.sqlite3") as db:
cursor = db.cursor()
for table_id in to_deactivate[0]:
sql = 'UPDATE `sell` SET `active` = "0" WHERE `id` = "{}";'.format(table_id)
cursor.execute(sql)
for table_id in to_deactivate[1]:
sql = 'UPDATE `buy` SET `active` = "0" WHERE `id` = "{}";'.format(table_id)
cursor.execute(sql)
db.commit()
def get_active_alarms(table, price):
comp_operator = {"sell": "<=", "buy": ">="}
with sqlite3.connect("nanexbot.sqlite3") as db:
cursor = db.cursor()
sql = 'SELECT `id`, `user_id`, `price` FROM {0} WHERE `active` = 1 AND `price` {1} {2}'.format(table, comp_operator[table], price)
print(sql)
cursor.execute(sql)
alarms = cursor.fetchall()
return alarms
def get_warnings(price):
# Get all active sell alarms that are >= price (table id, user_id and value)
sell_alarms = get_active_alarms("sell", price)
# Get all active buy alarms that are <= price (table id, user_id and value)
buy_alarms = get_active_alarms("buy", price)
return sell_alarms, buy_alarms
def get_user(client, uid):
for server in client.servers:
user = server.get_member(str(uid))
if user:
return user
print("Can't find user! (uid = {})".format(uid))
return None
def main():
conf = configparser.RawConfigParser()
conf.read("config.txt")
BOT_TOKEN = conf.get('nanexbot_conf', 'BOT_TOKEN')
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as {} <@{}>'.format(client.user.name, client.user.id))
print('------')
last_price = 0
while True:
current_price = get_last_rate()
# Check the database only if the price changed
if current_price != last_price:
sell_warnings, buy_warnings = get_warnings(current_price)
# Check if list is empty
if len(sell_warnings) > 0:
# Iterate through each users and send them a PM
for warning in sell_warnings:
# Load an User object using its id and the server to which the bot is connected
if warning[2] == current_price:
await client.send_message(get_user(client, warning[1]), "Your {} order might be sold! (last trade: {})".format(warning[2], current_price))
else:
await client.send_message(get_user(client, warning[1]), "Your {} order is sold! (last trade: {})".format(warning[2], current_price))
if len(buy_warnings) > 0:
# Iterate through each users and send them a PM
for warning in buy_warnings:
if warning[2] == current_price:
print(int(warning[1]))
await client.send_message(get_user(client, warning[1]), "Your {} order might be bought! (last trade: {})".format(warning[2], current_price))
else:
print(int(warning[1]))
await client.send_message(get_user(client, warning[1]), "Your {} order is bought! (last trade: {})".format(warning[2], current_price))
# Set active to 0 in the DB if the price != last trade (trade might be done, but not sure)
# to_deactivate = [[SELL], [BUY]]
to_deactivate = [[], []]
for warnings in sell_warnings:
if warnings != current_price:
to_deactivate[0].append(warnings[0])
for warnings in buy_warnings:
if warnings != current_price:
to_deactivate[1].append(warnings[0])
deactivate(to_deactivate)
# Set current_price to last_price
last_price = current_price
sleep(60)
client.run(BOT_TOKEN)
if __name__ == "__main__":
main()
| GarlicoinForum/NanexBot | price_watcher.py | price_watcher.py | py | 4,549 | python | en | code | 3 | github-code | 13 |
25155734092 | def set_permission(cfg, team, repo, permission):
cfg.increase_rate_counter()
team_handle = cfg.org_handle.get_team_by_slug(team)
cfg.increase_rate_counter()
repo_handle = cfg.org_handle.get_repo(repo)
# check if team is already added to repo
tmp_list = []
cfg.increase_rate_counter()
for tmp_handle in team_handle.get_repos():
tmp_list.append(tmp_handle.name)
# team has to be added to the repo
if repo not in tmp_list:
if not cfg.dry_run:
cfg.increase_rate_counter()
team_handle.update_team_repository(repo_handle, permission)
cfg.log_addition(f"{team} repo {repo} permission {permission}")
# team is already added
else:
# check for possible changes (yes, I have to do it this way)
cfg.increase_rate_counter()
old_permission = team_handle.get_repo_permission(repo_handle)
if not cfg.dry_run:
cfg.increase_rate_counter()
team_handle.update_team_repository(repo_handle, permission)
cfg.increase_rate_counter()
new_permission = team_handle.get_repo_permission(repo_handle)
permissions_are_equal = False
if old_permission.triage == new_permission.triage and \
old_permission.push == new_permission.push and \
old_permission.pull == new_permission.pull and \
old_permission.maintain == new_permission.maintain and \
old_permission.admin == new_permission.admin:
permissions_are_equal = True
if not permissions_are_equal:
cfg.log_change(f"{team} repo {repo} permission {permission}")
def set_teams_permissions(cfg, repo, teams):
for team in teams:
permission = teams[team]
set_permission(cfg, team, repo, permission)
def add_new_default_repo_teams(cfg):
if "default_teams" in cfg.config['repository_defaults']:
for repo in cfg.config['repositories']:
set_teams_permissions(cfg, repo, cfg.config['repository_defaults']['default_teams'])
def add_new_repo_teams(cfg):
for repo in cfg.config['repositories']:
repo_values = cfg.config['repositories'][repo]
# check if something repo specific needs to be done
if repo_values:
if "teams" in repo_values:
set_teams_permissions(cfg, repo, repo_values['teams'])
def delete_old_repo_teams(cfg):
# check each team in the org
cfg.increase_rate_counter()
for team in cfg.org_handle.get_teams():
# check if the team in defaults
if "default_teams" in cfg.config['repository_defaults']:
if team.slug in cfg.config['repository_defaults']['default_teams'].keys():
# team is in default, nothing has to be done
continue
# loop each repo to check for possible team settings
for repo in cfg.config['repositories']:
cfg.increase_rate_counter()
repo_handle = cfg.org_handle.get_repo(repo)
repo_values = cfg.config['repositories'][repo]
if repo_values:
if "teams" in repo_values:
if team.slug not in repo_values['teams'].keys():
# check if team has to be removed repo
for tmp_repo in team.get_repos():
if not cfg.dry_run:
cfg.increase_rate_counter()
team.remove_from_repos(repo_handle)
cfg.log_deletion(f"{team.slug} repo {repo}")
else:
# check if team has to be removed repo
for tmp_repo in team.get_repos():
if tmp_repo.name == repo:
if not cfg.dry_run:
cfg.increase_rate_counter()
team.remove_from_repos(repo_handle)
cfg.log_deletion(f"{team.slug} repo {repo}")
def set_team_repos(cfg):
cfg.set_log_compartment("team-repo")
add_new_default_repo_teams(cfg)
add_new_repo_teams(cfg)
if cfg.config['allow_team_repo_removal']:
delete_old_repo_teams(cfg)
| tibeer/ghom | ghom/team_repos.py | team_repos.py | py | 4,183 | python | en | code | 4 | github-code | 13 |
41011347486 | # 원화(₩)에서 달러($)로 변환하는 함수
def krw_to_usd(krw):
count = 0
while count < len(krw):
krw[count] = round(krw[count] / 1000, 1)
count += 1
return krw
# 달러($)에서 엔화(¥)로 변환하는 함수
def usd_to_jpy(usd):
count = 0
while count < len(usd):
usd[count] = round(usd[count] / 8 * 1000, 1)
count += 1
return usd
# 원화(₩)으로 각각 얼마인가요?
prices = [34000, 13000, 5000, 21000, 1000, 2000, 8000, 3000]
print("한국 화폐: " + str(prices))
# prices를 원화(₩)에서 달러($)로 변환하기
# 여기에 코드를 작성하세요
prices = krw_to_usd(prices)
# 달러($)로 각각 얼마인가요?
print("미국 화폐: " + str(prices))
# prices를 달러($)에서 엔화(¥)으로 변환하기
# 여기에 코드를 작성하세요
prices = usd_to_jpy(prices)
# 엔화(¥)으로 각각 얼마인가요?
print("일본 화폐: " + str(prices)) | kyumin1227/python-codeit | 3_프로그래밍과-데이터-in-Python/환전 서비스.py | 환전 서비스.py | py | 962 | python | ko | code | 0 | github-code | 13 |
72731801937 | from watson import text_to_trees
from knowledge import Noun, Verb
import unittest
from concurrencytest import ConcurrentTestSuite, fork_for_tests
def text_to_obj(text, constructor):
tree = text_to_trees(text)[0]
return constructor(tree)
def text_to_verb(text, subj_str, obj_str):
noun_subj = text_to_obj(subj_str, Noun)
noun_obj = None
if obj_str:
noun_obj = text_to_obj(obj_str, Noun)
verb_tree = text_to_trees(text)[0]
return Verb(verb_tree, noun_sbj, noun_obj)
def execute_test_class(test_class):
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(test_class))
runner = unittest.TextTestRunner()
concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(50))
runner.run(concurrent_suite)
| AxelUlmestig/chatterbot | test/test_util.py | test_util.py | py | 765 | python | en | code | 0 | github-code | 13 |
35183198780 | #インポート
import streamlit as st
#データ加工
import pandas as pd
import re
#モデル
import lightgbm as lgb
#保存
import pickle
#スクレイピング
import requests
from bs4 import BeautifulSoup
#その他
import os
import datetime
# サイドバー
date = st.sidebar.date_input("日付を選択", datetime.date.today())
formatted_date = date.strftime('%Y%m%d')
url = f"https://yoso.netkeiba.com/nar/?pid=race_list&kaisai_date={formatted_date}"
# netkeibaのスクレイピングコード
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
race_lists = soup.find_all('div', class_='RaceList')
race_data = []
for race_list in race_lists:
venue = race_list.find('div', class_='Jyo').a.text
races = race_list.find('div', class_='RaceList_Main').find_all('a')
for race in races:
race_number = race.find('div', class_='RaceNum').span.text
race_id = race['href'].split('=')[-1]
race_data.append({
'会場': venue,
'レース番号': race_number,
'レースID': race_id
})
###後で消す### 正規表現を用いて、ユーザーの入力値からドメインとrace_idを取得す
def get_url(user_input):
domain_match = re.search(r'https://(.*?)/', user_input)
domain = domain_match.group(1) if domain_match else None
race_id_match = re.search(r'race_id=(\d+)', user_input)
race_id = race_id_match.group(1) if race_id_match else None
url = f'https://{domain}/race/shutuba.html?race_id={race_id}'
return url, race_id
# URLからスクレイピングをするコード
def scraping(url, race_id):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
# レースデータ
race_list = [race_id] # レースデータを保存するためのリスト
horse_list_list = [] # 馬データを保存するためのリスト
title = soup.find('title').text # レースタイトル
race_name, date = title.split(' | ')[0], title.split(' | ')[1]
try:
extracted_race_name = race_name.split(' ')[0]
race_list.append(extracted_race_name)
except:
race_list.append(race_name)
match_date = re.search(r'(\d{4})年(\d{1,2})月(\d{1,2})日', date) # 日付
year, month, day = match_date.groups()
race_list.append(int(year))
race_list.append(int(month))
race_list.append(int(day))
match_place_race = re.search(r'(\w+)(\d{1,2})R', date) # 開催場所とレース番号
place, race_number = match_place_race.groups()
race_list.append(place)
race_list.append(int(race_number))
try: # 頭数
race_data_02_element = soup.select_one('.RaceData02')
if race_data_02_element:
race_data_02_text = race_data_02_element.get_text()
horse_count_match = re.search(r'(\d+)頭', race_data_02_text)
if horse_count_match:
race_list.append(int(horse_count_match.group(1)))
except:
race_list.append(None)
try: # レースタイプ
race_data_element = soup.select_one('.RaceData01')
if race_data_element:
race_data_text = race_data_element.get_text()
ground_match = re.search(r'(ダ|芝)\d+m', race_data_text)
if ground_match:
ground_type = ground_match.group(1)
race_list.append(ground_type)
except:
race_list.append(None)
try: # 天候と馬場状態
race_data_element = soup.select_one('.RaceData01')
if race_data_element:
race_data_text = race_data_element.get_text()
weather_match = re.search(r'天候:(\w+)', race_data_text)
if weather_match:
race_list.append(weather_match.group(1))
track_condition_match = re.search(r'馬場:(\w+)', race_data_text)
if track_condition_match:
race_list.append(track_condition_match.group(1))
except:
race_list.extend([None, None])
# 馬データ
cancel_count = 0 # 不参加の競走馬数をカウントするカウンター
horse_tables = soup.findAll("table", class_="RaceTable01")
horse_table = horse_tables[0].findAll('tr', class_="HorseList")
for i in range(len(horse_table)): # 競走馬数分のループ
horse_list = [race_id]
result_row = horse_table[i].findAll("td")
horse_list.append(result_row[3].get_text().strip()) # 馬名
if "Cancel" in horse_table[i].get("class"): # 出場状況
horse_list.append("取消")
cancel_count += 1
else:
horse_list.append("出走")
horse_list.append(int(result_row[0].get_text().strip())) # 枠
horse_list.append(int(result_row[1].get_text().strip())) # 番号
try:
weight_match = re.match(r'(\d+)\(([-+]?\d+)\)', result_row[8].get_text().strip()) # 馬体重
body_weight, body_weight_diff = weight_match.groups()
horse_list.append(float(body_weight))
horse_list.append(float(body_weight_diff))
except:
horse_list.extend([None, None])
odds = result_row[9].get_text().strip().split('\n')[0] # オッズ
cleaned_odds = re.sub(r"[^0-9.]", "", odds)
if cleaned_odds:
horse_list.append(float(cleaned_odds))
else:
horse_list.append(None)
popularity = result_row[10].get_text().strip().split('\n')[0] # 人気
cleaned_popularity = re.sub(r"[^0-9]", "", popularity)
if cleaned_popularity:
horse_list.append(float(cleaned_popularity))
else:
horse_list.append(None)
horse_list_list.append(horse_list)
# リストからデータフレームを作成する
race_df = pd.DataFrame(columns = ['race_id','race_title','year', 'month', 'day', 'place', 'race_number', 'total', 'type', 'weather', 'condition'])
horse_df = pd.DataFrame(columns = ['race_id', 'horse_name', 'mark', 'frame_number','horse_number', 'weight', 'weight_diff', 'odds', 'popular'])
for horse_list in horse_list_list:
horse_se = pd.Series( horse_list, index=horse_df.columns)
horse_df = pd.concat([horse_df, horse_se.to_frame().T], ignore_index=True)
race_se = pd.Series(race_list, index=race_df.columns )
race_df = pd.concat([race_df, race_se.to_frame().T], ignore_index=True)
race_df['total'] = race_df['total'] - cancel_count # 出場取消頭数分をひく
return race_df, horse_df
def open_model():
current_dir = os.path.dirname(os.path.abspath(__file__))
model_file_path = os.path.join(current_dir, 'models', 'model.pkl')
with open(model_file_path, 'rb') as f:
model = pickle.load(f)
return model
def open_df():
current_dir = os.path.dirname(os.path.abspath(__file__))
df_file_path = os.path.join(current_dir, 'data', 'merged_df.csv')
df = pd.read_csv(df_file_path)
return df
def mapping(race_df, horse_df):
weather_mapping = {
'晴': '1',
'曇': '2',
'雨': '3',
'小雨': '4',
'雪': '5',
'小雪': '6',
}
race_df['tenko_code'] = race_df['weather'].map(weather_mapping)
condition_mapping = {
'良': '1',
'稍': '2',
'稍重': '2',
'重': '3',
'不良': '4',
}
race_df['babajotai_code_dirt'] = race_df['condition'].map(condition_mapping)
condition_mapping = {
'札幌': '01',
'函館': '02',
'福島': '03',
'新潟': '04',
'東京': '05',
'中山': '06',
'中京': '07',
'京都': '08',
'阪神': '09',
'小倉': '10',
'門別': '30',
'北見': '31',
'岩見沢': '32',
'帯広': '33',
'旭川': '34',
'盛岡': '35',
'水沢': '36',
'上山': '37',
'三条': '38',
'足利': '39',
'宇都宮': '40',
'高崎': '41',
'浦和': '42',
'船橋': '43',
'大井': '44',
'川崎': '45',
'金沢': '46',
'笠松': '47',
'名古屋': '48',
'紀三井寺': '49',
'園田': '50',
'姫路': '51',
'益田': '52',
'福山': '53',
'高知': '54',
'佐賀': '55',
'荒尾': '56',
'中津': '57',
'札幌(地方)': '58',
'函館(地方)': '59',
'新潟(地方)': '60',
'中京(地方)': '61'
}
race_df['keibajo_code'] = race_df['place'].map(condition_mapping)
return race_df, horse_df
def choose_race(df, race_df):
race_df['day'] = race_df['day'].astype(str).str.zfill(2)
race_df['monthday'] = race_df['month'].astype(str) + race_df['day'].astype(str)
race_df['group'] = race_df['year'].astype(int).astype(str) +"-"+ race_df['monthday'].astype(int).astype(str) +"-"+ race_df['keibajo_code'].astype(int).astype(str) +"-"+ race_df['race_number'].astype(int).astype(str)
# df = df[df['group'].isin(race_df['group'])]
return df, race_df
def merged_df(df, race_df, horse_df):
# df['tenko_code'] = race_df['tenko_code'].iloc[0]
# df['babajotai_code_dirt'] = race_df['babajotai_code_dirt'].iloc[0]
# df['shusso_tosu'] = race_df['total'].iloc[0]
horse_df = horse_df.rename(columns={
'horse_name': 'bamei',
'horse_number': 'umaban',
'weight': 'bataiju',
'weight_diff': 'zogen_ryou'
})
# horse_df['umaban'] = horse_df['umaban'].astype(int)
# df['umaban'] = df['umaban'].astype(int)
# df = df.merge(horse_df[['umaban', 'bataiju']], on='umaban', how='left', suffixes=('', '_new'))
# df = df.merge(horse_df[['umaban', 'zogen_ryou']], on='umaban', how='left', suffixes=('', '_new'))
# df['bataiju'] = df['bataiju_new'].combine_first(df['bataiju'])
# df['zogen_ryou'] = df['zogen_ryou_new'].combine_first(df['zogen_ryou'])
# df.drop(columns=['bataiju_new', 'zogen_ryou_new'], inplace=True)
# df['hutan_wariai'] = df['futan_juryo'].astype(int) / pd.to_numeric(df['bataiju'], errors='coerce')
df = horse_df.copy()
return df, race_df, horse_df
def convert_datatype(df):
columns_to_convert = [
'umaban',
# 'kyori',
# 'grade_code',
# 'seibetsu_code',
# 'moshoku_code',
# 'barei',
# 'chokyoshi_code',
# 'banushi_code',
# 'kishu_code',
# 'kishu_minarai_code',
# 'kyoso_shubetsu_code',
# 'juryo_shubetsu_code',
# 'shusso_tosu',
# 'tenko_code',
# 'babajotai_code_dirt',
# 'hutan_wariai',
'zogen_ryou',
# 'track_code',
# 'keibajo_code'
]
for column in columns_to_convert:
df[column].fillna(0, inplace=True)
try:
if df[column].astype(float).apply(lambda x: x.is_integer()).all():
df[column] = df[column].astype(int)
else:
df[column] = df[column].astype(float)
except ValueError:
df[column] = df[column].astype(float)
return df
def prediction(race_df, horse_df):
df = open_df()
model = open_model()
race_df, horse_df = mapping(race_df, horse_df)
df, race_df = choose_race(df, race_df)
df, race_df, horse_df = merged_df(df, race_df, horse_df)
df = convert_datatype(df)
cancelled_umabans = horse_df[horse_df['mark'] == '取消']['umaban']#出場取消馬を削除
df = df[~df['umaban'].isin(cancelled_umabans)]
features = [
'umaban',
# 'kyori',
# 'grade_code',
# 'seibetsu_code',
# 'moshoku_code',
# 'barei',
# 'chokyoshi_code',
# 'banushi_code',
# 'kishu_code',
# 'kishu_minarai_code',
# 'kyoso_shubetsu_code',
# 'juryo_shubetsu_code',
# 'shusso_tosu',
# 'tenko_code',
# 'babajotai_code_dirt',
# 'hutan_wariai',
'zogen_ryou',
# 'track_code',
# 'keibajo_code'
]
# target = 'kakutei_chakujun'
df['y_pred'] = model.predict(df[features], num_iteration=model.best_iteration)
# df['predicted_rank'] = df.groupby('group')['y_pred'].rank(method='min')
df['predicted_rank'] = df['y_pred'].rank(method='min')
#データ成形
# sorted_df = df.sort_values(by=['group', 'predicted_rank'])
sorted_df = df.sort_values(by=['predicted_rank'])
sorted_df = sorted_df[['predicted_rank',
'bamei',
'umaban',
# 'bataiju',
# 'zogen_ryou',
'y_pred']]
return sorted_df
# アプリケーション
st.markdown(
"""
<style>
.sidebar .selectbox {
font-family: Arial, sans-serif;
}
.reportview-container {
max-width: 100%;
}
</style>
""",
unsafe_allow_html=True,
)
# サイドバー
st.sidebar.divider()
# venues = list(set([data['会場'] for data in race_data]))
venues = [venue for venue in (set(data['会場'] for data in race_data)) if venue != '帯広(ば)']
selected_venue = st.sidebar.selectbox('会場を選択:', venues)
race_numbers = [data['レース番号'] for data in race_data if data['会場'] == selected_venue]
selected_race_number = st.sidebar.selectbox('レース番号を選択:', race_numbers)
# メイン画面
st.sidebar.divider()
if st.sidebar.button('予測を表示', type='primary' ,use_container_width=True):
for data in race_data:
if data['会場'] == selected_venue and data['レース番号'] == selected_race_number:
url = f"https://nar.netkeiba.com/race/shutuba.html?race_id={data['レースID']}"
race_id = data['レースID']
race_df, horse_df = scraping(url, race_id)
type = race_df['type'].replace({'ダ': 'ダート', '芝': '芝'})
weather = race_df['weather']
condition = race_df['condition']
col1, col2, col3 = st.columns(3)
col1.metric(label="レース種別", value=type.iloc[0])
col2.metric(label="天気", value=weather.iloc[0])
col3.metric(label="馬場状態", value=condition.iloc[0])
st.divider()
# st.dataframe(race_df)
# st.dataframe(horse_df)
with st.spinner("予測中..."):
sorted_df = prediction(race_df, horse_df)
sorted_df = pd.merge(sorted_df, horse_df[['horse_number','popular','odds']], left_on='umaban', right_on='horse_number', how='left')
sorted_df.drop('horse_number', axis=1, inplace=True)
sorted_df = sorted_df.rename(columns={'predicted_rank': '予想順位', 'bamei': '馬名', 'umaban': '馬番', 'y_pred': '予測値', 'popular': '人気順位', 'odds': 'オッズ'})
max_val = sorted_df['予測値'].max()
sorted_df['予測値'] = max_val - sorted_df['予測値']
st.dataframe(
sorted_df,
hide_index=True,
column_config={
"予測値": st.column_config.ProgressColumn(
"予測値",
format="%.2f",
min_value=sorted_df['予測値'].min(),
max_value=sorted_df['予測値'].max(),
),
},
) | kawamottyan/horse_racing | app/app.py | app.py | py | 15,908 | python | en | code | 0 | github-code | 13 |
2032636850 | # 1. 처음 위치에서 더 싼곳이 나올 때까지 거리를 계속 더한다.
# 2. 더 싼 곳이 나오면 처음 위치의 리터당 가격 * 이동 거리를 계산하고 더 싼 주유소의 가격으로 1번을 반복한다.
# 3. 도착했을 경우 최종 값 출력
n = int(input())
_meter = list(map(int, input().split()))
_price = list(map(int, input().split()))
_min = _price[0]
_sum = 0
_now = 0
for i in range(0, n-1):
_now += _meter[i]
if _price[i+1] < _min:
_sum += (_now*_min)
_now = 0
_min = _price[i+1]
elif i+1 == n-1:
_sum += (_now*_min)
print(_sum)
| YeonHoLee-dev/Python | BAEKJOON/[13305] 주유소.py | [13305] 주유소.py | py | 621 | python | ko | code | 0 | github-code | 13 |
72338023699 | import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
import numpy as np
import pandas as pd
from torch.utils.data.sampler import SequentialSampler, RandomSampler
# =============================================================================
# def get_dataloader(csv_path, iid=False):
# df = pd.read_csv(csv_path)
#
# # transform = transforms.Compose([
# # transforms.Resize((32,32)),
# # transforms.ToTensor(),
# # ])
#
# if iid:
# # i.i.d condition
# trn, dev, tst = np.split(df.sample(frac=1), [int(.6*len(df)), int(.8*len(df))])
# else:
# # time series condition
# trn, dev, tst = np.split(df, [int(.8*len(df)), int(.9*len(df))])
#
# trn_dataset = BuildDataset(trn)
# dev_dataset = BuildDataset(dev)
# tst_dataset = BuildDataset(tst)
#
# rand_sampler_tr = RandomSampler(trn_dataset,replacement=True,num_samples= np.int(len(trn_dataset)*0.01))
# rand_sampler_val = RandomSampler(dev_dataset,replacement=True,num_samples= np.int(len(dev_dataset)*0.01))
#
#
# trn_dataloader = torch.utils.data.DataLoader(
# trn_dataset, batch_size=32, shuffle=False, num_workers=0,sampler= rand_sampler_tr
# )
# dev_dataloader = torch.utils.data.DataLoader(
# dev_dataset, batch_size=32, shuffle=False, num_workers=0,sampler= rand_sampler_val
# )
# tst_dataloader = torch.utils.data.DataLoader(
# tst_dataset, batch_size=32, shuffle=False, num_workers=0
# )
#
# return trn_dataloader, dev_dataloader, tst_dataloader
# =============================================================================
class BuildDataset(Dataset):
def __init__(self, df):
self.img_path = df['image'].values
self.labels = df['label'].values
#self.transforms = transforms
self.transforms = transforms.Compose([
transforms.Resize((32,32)),
transforms.ToTensor(),
])
self.img_path1 = [] # label high ratio
self.img_path2 = [] # label low ratio
self.label_path1 = []
self.label_path2 = []
self.threshold = 2.5 #일단 해두고 수정
for i,label_value in enumerate((self.labels)):
if label_value > self.threshold : # CHANGE FOR ENHANCING
self.label_path1.append(self.labels[i])
self.img_path1.append(self.img_path[i])
else :
self.label_path2.append(self.labels[i])
self.img_path2.append(self.img_path[i])
self.data_rate = (len(self.img_path1)/(len(self.img_path1)+len(self.img_path2)))
def __len__(self):
return len(self.img_path1) + len(self.img_path2)
def __getitem__(self, idx):
# 확률적으로 label 크기 에 따른 balanced 작업 수행
if np.random.choice(2, 1, p=[1-self.data_rate, self.data_rate]) == 0:
#idx = idx % len(self.img_path1)
idx = np.random.randint(len(self.img_path1))
img_path = self.img_path1[idx]
label_path = self.label_path1[idx]
else :
idx = np.random.randint(len(self.img_path2))
img_path = self.img_path2[idx]
label_path = self.label_path2[idx]
frame = Image.open(img_path)
label = torch.tensor(label_path, dtype=torch.float)
return self.transforms(frame), label
| lepoeme20/daewoo | utils/build_dataset_imbalanced.py | build_dataset_imbalanced.py | py | 3,577 | python | en | code | 0 | github-code | 13 |
8571488855 | from enum import Enum
from queue import PriorityQueue
import numpy as np
import time
def create_grid(data, drone_altitude, safety_distance):
"""
Returns a grid representation of a 2D configuration space
based on given obstacle data, drone altitude and safety distance
arguments.
"""
# minimum and maximum north coordinates
north_min = np.floor(np.min(data[:, 0] - data[:, 3]))
north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))
# minimum and maximum east coordinates
east_min = np.floor(np.min(data[:, 1] - data[:, 4]))
east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))
# given the minimum and maximum coordinates we can
# calculate the size of the grid.
north_size = int(np.ceil(north_max - north_min))
east_size = int(np.ceil(east_max - east_min))
# Initialize an empty grid
grid = np.zeros((north_size, east_size))
# Populate the grid with obstacles
for i in range(data.shape[0]):
north, east, alt, d_north, d_east, d_alt = data[i, :]
if alt + d_alt + safety_distance > drone_altitude:
bottom = north - d_north - safety_distance - north_min
top = north + d_north + safety_distance - north_min
left = east - d_east - safety_distance - east_min
right = east + d_east + safety_distance - east_min
obstacle = [
int(np.clip(np.floor(bottom), 0, north_size - 1)),
int(np.clip(np.ceil(top), 0, north_size - 1)),
int(np.clip(np.floor(left), 0, east_size - 1)),
int(np.clip(np.ceil(right), 0, east_size - 1)),
]
grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1
return grid, int(north_min), int(east_min)
# Assume all actions cost the same.
class Action(Enum):
"""
An action is represented by a 3 element tuple.
The first 2 values are the delta of the action relative
to the current grid position. The third and final value
is the cost of performing the action.
"""
WEST = (0, -1, 1)
EAST = (0, 1, 1)
NORTH = (-1, 0, 1)
SOUTH = (1, 0, 1)
NORTH_WEST = (-1, -1, np.sqrt(2))
NORTH_EAST = (-1, 1, np.sqrt(2))
SOUTH_WEST = (1, -1, np.sqrt(2))
SOUTH_EAST = (1, 1, np.sqrt(2))
@property
def cost(self):
return self.value[2]
@property
def delta(self):
return (self.value[0], self.value[1])
def valid_actions(grid, current_node, current_action=None, move=1):
"""
Returns a list of valid actions given a grid and current node.
"""
all_actions = list(Action)
valid_actions_nodes = []
n, m = grid.shape[0] - 1, grid.shape[1] - 1
# To prevent zigzags add a cost to changing action
# Move previous action first
if (current_action is not None and
current_action in all_actions):
all_actions.remove(current_action)
all_actions = [current_action] + all_actions
for new_action in all_actions:
new_x = current_node[0] + new_action.delta[0] * move
new_y = current_node[1] + new_action.delta[1] * move
if (new_x < 0 or new_x > n or
new_y < 0 or new_y > m or
grid[new_x, new_y]):
pass
else:
valid_actions_nodes.append((new_action, (new_x, new_y)))
return valid_actions_nodes
def a_star(grid, h, start, goal, max_move=1):
path = []
path_cost = 0
queue = PriorityQueue()
queue.put((0, start))
visited = set(start)
branch = {}
found = False
# To give information about the planning process
depth = 0
depth_act = 0
report_int = 1024
t0 = time.time()
while not queue.empty():
item = queue.get()
current_node = item[1]
current_q_cost = item[0]
move = max_move
if current_node in visited:
continue
visited.add(current_node)
depth += 1
if current_node == start:
current_cost = 0.0
current_action = None
else:
current_cost = branch[current_node][0]
current_action = branch[current_node][2]
if depth % report_int == 0:
print("#Nodes:%s, #Actions:%s, Cost:%.2f, Currenct Node:%s,"
" Time:%.2f" % (depth, depth_act, current_cost,
current_node, time.time() - t0))
report_int *= 2
current_h_cost = current_q_cost - current_cost
if current_h_cost < np.sqrt(2) * float(max_move):
move = 1
else:
move = max_move
if current_node == goal:
print('Found a path.')
found = True
print("#Nodes:%s, #Actions:%s, Cost:%.2f, Currenct Node:%s,"
" Time:%.2f" % (depth, depth_act, current_cost,
current_node, time.time() - t0))
break
else:
val_act_nod = valid_actions(
grid, current_node, current_action, move)
for action, next_node in val_act_nod:
depth_act += 1
action_cost = action.cost * move
branch_cost = current_cost + action_cost
h_cost = h(next_node, goal)
queue_cost = branch_cost + h_cost
if next_node in branch:
cost_in_branch = branch[next_node][0]
if branch_cost < cost_in_branch:
branch[next_node] = (branch_cost, current_node, action)
queue.put((queue_cost, next_node))
else:
branch[next_node] = (branch_cost, current_node, action)
queue.put((queue_cost, next_node))
path = []
path_cost = 0
if found:
# retrace steps
path = []
n = goal
path_cost = branch[n][0]
while branch[n][1] != start:
path.append(branch[n][1])
n = branch[n][1]
path.append(branch[n][1])
else:
print('**********************')
print('Failed to find a path!')
print('**********************')
return path[::-1], path_cost
def heuristic(position, goal_position):
return np.linalg.norm(np.array(position) - np.array(goal_position))
| seyfig/3DMotionPlanning | planning_utils.py | planning_utils.py | py | 6,335 | python | en | code | 3 | github-code | 13 |
18019446647 | # Задайте список из вещественных чисел.
# Напишите программу, которая найдёт разницу между
# максимальным и минимальным значением дробной части элементов.
# Пример:
# - [1.1, 1.2, 3.1, 5, 10.01] => 0.19
list = [1.1, 1.2, 3.1, 5, 10.01]
print(list)
def dif(list):
dif_max_min =[]
for i in range(len(list)):
dif_max_min.append(list[i]%1)
return max(dif_max_min) - min(dif_max_min)
print(round(dif(list),2))
| Boris-1980/Python_homework | 013.py | 013.py | py | 561 | python | ru | code | 3 | github-code | 13 |
20669887400 | #!/usr/bin/env python3.6
import numpy as np
from Point import Point
from Ride import Ride
from Problem import Problem
from Vehicle import Vehicle
import time
import sys
"""
Main project for hashcode
"""
def read_file(f):
all_data = np.loadtxt(f, dtype=int, delimiter = " ", skiprows = 0)
first_row = all_data[0,:]
all_data = all_data[1:all_data.shape[0],:]
return first_row, all_data
def parse_ride(r, id):
return Ride(id, Point(r[0], r[1]), Point(r[2], r[3]), r[4], r[5])
def main():
# all_examples = [
# "a_example.in",
# "b_should_be_easy.in",
# "c_no_hurry.in",
# "d_metropolis.in",
# "e_high_bonus.in"
# ]
curTime = time.time()
i = sys.argv[1]
print(i)
run_one(i)
newTime = time.time()
print("Took ", newTime - curTime, " seconds")
print("Done with " + i)
def rideSort(rides):
return sorted(rides, key=lambda a: a.rating())
def run_one(filename):
# Setup
rProbl, rRides = read_file("example/" + filename)
rides = []
p = Problem(rProbl[0], rProbl[1], rProbl[2], rProbl[3], rProbl[4], rProbl[5])
vehicles = []
i = 0
while i < p.numRides:
rides.append(parse_ride(rRides[i,:], i))
i += 1
rideToDelete = Ride()
for i in range(0,p.numVehicles):
vehicles.append(Vehicle(i))
for t in range(0,p.timeSlots):
rides = rideSort(rides)
for v in vehicles:
if v.busy:
if v.unavailableUntil == 1:
v.busy = False
v.unavailableUntil = 0
v.unavailableUntil -= 1
continue
for r in rides[:]:
isReachable = r.finishTime - r.sp.dist(r.ep) - v.pos.dist(r.sp) > t
if isReachable:
# Ride is reachable
v.handleRide(r, t)
rides.remove(r)
break
# Print all vehicle actions
s = ""
for v in vehicles:
s += v.printVehicle() + "\n"
with open("out/" + filename, "w") as f:
f.write(s)
if __name__ == "__main__":
main() | Recognition2/HashCode2018 | main.py | main.py | py | 2,153 | python | en | code | 0 | github-code | 13 |
27551133556 | #!/usr/bin/python3
"""Module is an introduction to networking with requests in Python."""
import sys
import requests
def url_fetch():
"""Displays id for given GitHub credentials using the GitHub API."""
if len(sys.argv) < 2:
return
url = 'https://api.github.com/users/{}'.format(sys.argv[1])
with requests.get(url) as html:
try:
return html.json()['id']
except KeyError:
return
if __name__ == '__main__':
print(url_fetch())
| adobki/alx-higher_level_programming | 0x11-python-network_1/10-my_github.py | 10-my_github.py | py | 498 | python | en | code | 0 | github-code | 13 |
25093447143 | import itertools
import operator
from ast import literal_eval
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
SQLITE_FILE = '../benchmarks.db'
GENERAL_PLOTS_PATH = './graphs/memory/general/'
BAR_PLOTS_PATH = './graphs/memory/'
PS_PLOTS_PATH = './graphs/memory/processsystem/'
Y_AXIS_LABEL = "Max GC memory (MB)"
BENCHNAMES = [
("chameneos", "Chameneos: number of chameneos", Y_AXIS_LABEL),
("counting", "Counting actors: numbers to add", Y_AXIS_LABEL),
("forkjoin_creation", "FJC: number of processes", Y_AXIS_LABEL),
("forkjoin_throughput", "FJT: number of processes", Y_AXIS_LABEL),
("pingpong", "Ping-pong: number of pairs", Y_AXIS_LABEL),
("ring", "Thread ring: number of ring members", Y_AXIS_LABEL),
("ringstream", "Thread ring stream: number of ring members", Y_AXIS_LABEL)
]
PSNAMES = [
('akka', '-.'),
('statemachinemultistep', '--'),
('runnerimproved', '-')
]
def gc_usage_vs_size():
for bn, xl, yl in BENCHNAMES:
print('Generating size vs. GC usage for benchmark: ' + bn)
gc_usage_vs_size_per_benchmark(bn, xl, yl)
def gc_usage_vs_size_per_benchmark(benchname, xl, yl):
(fig_w, fig_h) = (4, 4)
f = plt.figure(figsize=(fig_w, fig_h))
gs = plt.GridSpec(2, 1)
ax1 = plt.subplot(gs[0, :])
ax2 = plt.subplot(gs[1, :], sharex=ax1)
points = assemble_data(benchname, PSNAMES)
for psName, sizes, records, _e, avg_calls, sty in points:
records = [r / 1000000 for r in records]
ax1.loglog(sizes, records, marker='o', markersize=6, linestyle=sty)
for psName, sizes, records, _e, avg_calls, sty in points:
ax2.loglog(sizes, avg_calls, marker='o', markersize=6, linestyle=sty)
ax2.set_xscale("log")
ax2.set_xlabel(xl)
ax1.get_xaxis().set_visible(False)
# ax1.set_ylabel(yl, rotation=0)
# ax2.set_ylabel('Number of GC calls', rotation=0)
ax1.text(-0.1, 1.15, yl, fontsize=12, transform=ax1.transAxes,
verticalalignment='top')
ax2.text(
-0.1, 1.15, 'Number of GC calls',
fontsize=12, transform=ax2.transAxes,
verticalalignment='top'
)
f.savefig('{}{}.pdf'.format(BAR_PLOTS_PATH, benchname), bbox_inches='tight')
plt.close(f)
def gc_calls_vs_size_barchart():
for bn, xl, yl in BENCHNAMES:
print('Generating size vs. GC calls bar chart for benchmark: ' + bn)
gc_calls_vs_size_barchart_per_benchmark(bn, xl, yl)
def gc_calls_vs_size_barchart_per_benchmark(benchname, xl, yl):
f, ax = plt.subplots()
points = assemble_data(benchname, PSNAMES)
# for psName, sizes, records, _e, avg_calls, sty in points:
# print(avg_calls)
points = assemble_data(benchname, PSNAMES)
for psName, sizes, records, _e, avg_calls, sty in points:
ax.loglog(sizes, records, marker='o', markersize=3, linestyle=sty)
psName, sizes, records, _e, avg_calls, sty = points[0]
w = np.array(sizes) / 4
ax.bar(sizes - w, avg_calls, width=w, align="edge", color='orange', label=psName)
psName, sizes, records, _e, avg_calls, sty = points[1]
w = np.array(sizes) / 4
ax.bar(sizes, avg_calls, width=w, align="edge", color='green', label=psName)
psName, sizes, records, _e, avg_calls, sty = points[2]
w = np.array(sizes) / 4
ax.bar(sizes + w, avg_calls, width=w, align="edge", color='blue', label=psName)
plt.xlabel(xl)
plt.ylabel(yl)
ax.set_xscale("log")
# plt.xlabel(xl)
# plt.ylabel(yl)
# plt.legend(loc=" left")
f.savefig('{}{}.pdf'.format(BAR_PLOTS_PATH, benchname), bbox_inches='tight')
plt.close(f)
def plot_memory_vs_size_general():
for bn, xl, yl in BENCHNAMES:
print('Generating size vs. GC memory plot for benchmark: ' + bn)
plot_memory_vs_size_general_per_benchmark(bn, xl, yl)
def plot_memory_vs_size_general_per_benchmark(benchname, xl, yl):
f, ax = plt.subplots()
points = assemble_data(benchname, PSNAMES)
for psName, sizes, records, _e, avg_calls, sty in points:
ax.loglog(sizes, records, marker='o', markersize=3, linestyle=sty)
plt.xlabel(xl)
plt.ylabel(yl)
# plt.legend(loc="upper left")
f.savefig('{}{}.pdf'.format(GENERAL_PLOTS_PATH, benchname), bbox_inches='tight')
plt.close(f)
def assemble_data(benchname, PSNAMES, gid = None):
points = []
for psName, sty in PSNAMES:
sizes, avg_records, e, _raw_records, avg_calls = fetch_data(benchname, psName)
points.append((psName, sizes, avg_records, e, avg_calls, sty))
return points
def fetch_data(bench_name, system, gid = None):
import sqlite3
with sqlite3.connect('file:{}?mode=ro'.format(SQLITE_FILE), uri=True) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
if gid is None:
# Select the latest benchmark group id
c.execute("SELECT `id` FROM benchmark_group "
"WHERE `end` IS NOT NULL "
"ORDER BY `end` DESC LIMIT 1")
gid = c.fetchone()['id']
# Which DB field is the "size", for the x-axis of the plot?
size_fields = {
'chameneos' : 'size',
'counting' : 'count',
'pingpong' : 'pairs',
'forkjoin_creation' : 'size',
'forkjoin_throughput' : 'size',
'ring' : 'size',
'ringstream' : 'size'
}
size_field = size_fields[bench_name]
# Select id,size pairs for all benchmkars with given name and group
c.execute("SELECT benchmark.`id`, %(b)s.`%(f)s` "
"FROM benchmark "
"INNER JOIN %(b)s "
"ON (benchmark.`id` = %(b)s.`id`) "
"WHERE benchmark.`group` = ? AND benchmark.`name` = ? "
"AND benchmark.`system` = ? "
"AND benchmark.`type` = 'size_vs_memory'" % {
'b' : 'benchmark_' + bench_name,
'f' : size_field
},
(gid, bench_name, system))
id_sizes = c.fetchall()
bench_ids = [r['id'] for r in id_sizes]
sizes = [r[size_field] for r in id_sizes]
records = [c.execute("SELECT `max_bytes` "
"FROM benchmark_memory "
"WHERE `benchmark_id` = ?",
(bid,)).fetchall()
for bid in bench_ids]
calls = [c.execute("SELECT `calls` "
"FROM benchmark_memory "
"WHERE `benchmark_id` = ?",
(bid,)).fetchall()
for bid in bench_ids]
# avg_records = [np.average(r) for r in records]
# errors = [np.std(r) for r in records]
# avg_calls = [int(np.average(c)) for c in calls]
avg_records = [empty_safe_avg(r) for r in records]
avg_calls = [empty_safe_avg(c) for c in calls]
# errors = empty_safe_std(records)
# avg_calls = empty_safe_avg(calls)
# avg_records = [1 for r in records]
errors =[empty_safe_std(r) for r in records]
# avg_calls =[1 for r in records]
return filter_out_empty_records(
sizes, avg_records, errors, records, avg_calls)
def empty_safe_avg(ls):
return np.average(ls) if ls else -1
def empty_safe_std(ls):
return np.std(ls) if ls else -1
def filter_out_empty_records(sizes, avg_rs, errs, rs, avg_calls):
all_info = list(zip(sizes, avg_rs, errs, rs, avg_calls))
filtered_info = [info for info in all_info if info[1] != -1]
unziped_info = list(zip(*filtered_info))
return tuple([list(ui) for ui in unziped_info])
def main():
# plot_memory_vs_size_general()
# gc_calls_vs_size_barchart()
gc_usage_vs_size()
# plot_memory_vs_size_error_bar()
if __name__ == "__main__":
main()
| alcestes/effpi | scripts/gc_memory_vs_size.py | gc_memory_vs_size.py | py | 7,981 | python | en | code | 47 | github-code | 13 |
70505870099 | import torch
import torch.nn as nn
from spec_unet import get_model as get_spec_unet
from audio_unet import get_model as get_audio_unet
class HybridUnet(nn.Module):
def __init__(self, spec_unet, wav_unet):
super().__init__()
self.spec_unet = spec_unet
self.wav_unet = wav_unet
def forward(self, input):
return self.wav_unet(self.spec_unet(input))
def get_model(width=16, device='cpu'):
return HybridUnet(get_spec_unet(width=width, device=device),
get_audio_unet(width=width))
if __name__ == '__main__':
model = get_model()
print(model)
print(sum(p.numel() for p in model.parameters() if p.requires_grad))
a = torch.zeros(1, 1, 16384)
with torch.no_grad():
y = model(a)
print(y.shape)
| bob80333/audio_bandwidth_extension | hybrid_unet.py | hybrid_unet.py | py | 794 | python | en | code | 1 | github-code | 13 |
1652760245 | # 一开始一直错,生成数组长度都一样,终于发现原因了..
# [[]]是一个含有一个空列表元素的列表,所以[[]]*3表示3个指向这个空列表元素的引用,修改任何一个元素都会改变整个列表:
# 应该这样创建多维数组:lists = [[] for i in range(3)]
# 改了果然可以了,c双重循环都能超过88%(36ms)..
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
ans = [[] for i in range(numRows)] #关键!
for i in xrange(0,numRows):
for j in xrange(0,i+1):
if j==0 or j==i:
ans[i].append(1)
else:
ans[i].append(ans[i-1][j-1]+ans[i-1][j])
return ans
| fire717/Algorithms | LeetCode/python/_118.Pascal'sTriangle.py | _118.Pascal'sTriangle.py | py | 834 | python | zh | code | 6 | github-code | 13 |
73958536016 | import numpy as np
def get_angular_letter(total_l_string):
#translates L into the coresponding symbol.
total_l = int(total_l_string)
angular_dictionary = ['S','P','D','F','G','H']
num = len(angular_dictionary)-1
if total_l > num:
return str(total_l)
else:
return angular_dictionary[total_l]
def convert_a_value_string_to_float(a_value_string):
array = [*a_value_string]
array.remove('.')
#this converts the hilariously outdated float format in adf04 files into an actual number
#print(array)
a_value_float = float(array[0]) + float(array[1])*0.1 + float(array[2])*0.01
exponent = int(float(array[-2])*10 + float(array[-1]))
#print(exponent)
#it could be made more efficient if i could be bothered.
if array[-3] == '+':
a_value_float*= (10**exponent)
elif array[-3] == '-':
a_value_float *=(10**(-exponent))
else:
print('failure in a value conversion')
return a_value_float
def convert_many_a_values(avalue_string_array):
num = len(avalue_string_array)
a_values = np.zeros(num)
for jj in range(0,num):
a_values[jj] = convert_a_value_string_to_float(a_value_string=avalue_string_array[jj])
return a_values
def get_transition_data(num_levels,path):
print("Attempting to find transition data")
num_transitions = int(num_levels * (num_levels-1)/2)
transition_data = np.loadtxt(path,skiprows=num_levels+3,dtype=str,usecols=[0,1,2],max_rows=num_transitions)
#it is possible i may need to add an expection here if it doesnt find the expected number of transitions
print("Expecting ",num_transitions," transitions")
upper_levels = transition_data[:,0].astype(int)
lower_levels = transition_data[:,1].astype(int)
a_values_string_array = transition_data[:,2]
print("found ",len(a_values_string_array),' transitions')
num_transitions = len(a_values_string_array)
a_values_float = convert_many_a_values(avalue_string_array=a_values_string_array)
print("transition data found successfully")
print("-------------------------")
return a_values_float,upper_levels,lower_levels,num_transitions
def process_term_strings_and_j_values(term_L_S,term_J,num_levels):
term_strings = []
jvalues = np.zeros(num_levels)
for ii in range(0,num_levels):
current_first_string = term_L_S[ii]
m = current_first_string[1]
total_l = current_first_string[3]
jtot = term_J[ii].strip(')')
jvalues[ii] = float(jtot)
L_string = get_angular_letter(total_l)
new_term_string = m + L_string + jtot
term_strings.append(new_term_string)
return term_strings,jvalues
def get_level_and_term_data(path,num_levels):
print("Attempting to find term data")
level_data = np.loadtxt(path,skiprows=1,max_rows=num_levels,dtype = str)
#the adf04 format seems to split these in two, so i parse them seperately.
term_L_S = level_data[:,2]
term_J = level_data[:,3]
csfs_strings = level_data[:,1]
level_numbers = level_data[:,0].astype(int)
#needs to be 4 as the adf04 puts 3D4 to 3D 4 - maybe by changing delimiter we can avoid this
term_strings,jvalues = process_term_strings_and_j_values(term_L_S,term_J,num_levels)
energy_levels_cm_minus_one = level_data[:,4].astype(float)
print("term data found, probably")
print("-------------------------")
return csfs_strings,term_strings,jvalues,energy_levels_cm_minus_one
def read_in_initial(path):
f = open(path,'r')
first_line = f.readline()
print(first_line)
new_stuff = first_line[0:5].replace("+"," ").split()
other_stuff = first_line[5:].split()
print(other_stuff)
atomic_symbol = new_stuff[0]
effective_charge = new_stuff[1]
atomic_number = other_stuff[0]
effective_charge_int = int(effective_charge[0])
#effective_charge_for_element_code = str(effective_charge_int)
#if effective_charge_int < 10:
# effective_charge_for_element_code = '0'+effective_charge_for_element_code
#will fail for charges higher than 10 with current string manipulation
print("Found atomic symbol ",atomic_symbol)
print("Found atomic number",atomic_number)
print("Found ionisation ",effective_charge)
elementcode = int(atomic_number) + effective_charge_int/100
print("Element code for TARDIS ",elementcode)
print("-------------------------")
checker = False
level_counter = 0
while checker == False:
current_line = f.readline()
string_to_be_checked = current_line.strip()[0]
target_string = '-'
if string_to_be_checked == target_string:
checker = True
else:
level_counter += 1
print("found ",level_counter," levels")
f.close()
return elementcode,level_counter
| LeoMul/adf04_to_kurucz | parsing_adf04.py | parsing_adf04.py | py | 4,889 | python | en | code | 0 | github-code | 13 |
32294916083 | import random
max_integer_number = 2*10**5
def random_integer_numbers__iterator(steps, min_number, max_number, number_count=2) -> tuple:
for i in range(steps):
numbers = []
for _ in range(number_count):
numbers.append(get_random_integer_number(min_number=min_number, max_number=max_number))
yield tuple(numbers)
def random_integer_numbers__iterator2(steps, random_number_limiters) -> tuple:
for i in range(steps):
numbers = []
for limit in random_number_limiters:
numbers.append(get_random_integer_number(min_number=limit['min_number'], max_number=limit['max_number']))
yield tuple(numbers)
def random_integer_number__iterator(steps, min_number, max_number) -> tuple:
for i in range(steps):
yield (get_random_integer_number(min_number=min_number, max_number=max_number),)
def get_random_integer_number(min_number=0, max_number=max_integer_number):
return random.randint(min_number, max_number)
def create_integer_dataset_list(filename, lines=20, steps=100, min_number=1, max_number=max_integer_number):
with open(filename, "a") as f:
for i in range(0, lines):
integer_list = []
for random_number in random_integer_number__iterator(
steps=steps,
min_number=min_number,
max_number=max_number
):
integer_list.append(random_number)
print(f'write {i} {integer_list}')
f.write(f"{integer_list}\n")
if __name__ == '__main__':
create_integer_dataset_list("dataset_list.txt", lines=20, steps=100)
create_integer_dataset_list("big_dataset_list.txt", lines=20, steps=max_integer_number)
| boloninanajulia/challanges | tests/code_quality_score/generate_datasets.py | generate_datasets.py | py | 1,744 | python | en | code | 0 | github-code | 13 |
39594766394 | spendings = [140, 30, 999, 145, 538, 878, 901, 613, 471, 286, 147, 90]
income = [300, 40, 0, 4000, 8911, 73, 85, 0, 9000, 941, 658, 190]
def func(list_1, list_2):
coeff_year = 0
new_list = []
for month in range(12):
try:
coeff = list_1[month] / list_2[month]
new_list.append(coeff)
except ZeroDivisionError:
coeff = 0
new_list.append(coeff)
finally:
coeff_year += coeff
monthly_coefficient = {f'month {i + 1}': new_list[i] for i in range(12)}
print(monthly_coefficient)
print(f'Coefficient for the year: {coeff_year / 12}')
func(spendings, income)
| Sultan1488/homework_2_5 | code_2_5_3.py | code_2_5_3.py | py | 658 | python | en | code | 0 | github-code | 13 |
42617409293 | from vidar.utils.types import is_seq
def invert_intrinsics(K):
"""Invert camera intrinsics"""
Kinv = K.clone()
Kinv[:, 0, 0] = 1. / K[:, 0, 0]
Kinv[:, 1, 1] = 1. / K[:, 1, 1]
Kinv[:, 0, 2] = -1. * K[:, 0, 2] / K[:, 0, 0]
Kinv[:, 1, 2] = -1. * K[:, 1, 2] / K[:, 1, 1]
return Kinv
def scale_intrinsics(K, ratio):
"""Scale intrinsics given a ratio (tuple for individual hw ratios, float for the same ratio)"""
if is_seq(ratio):
ratio_h, ratio_w = ratio
else:
ratio_h = ratio_w = ratio
K = K.clone()
K[..., 0, 0] *= ratio_w
K[..., 1, 1] *= ratio_h
# K[..., 0, 2] = (K[..., 0, 2] + 0.5) * x_scale - 0.5
# K[..., 1, 2] = (K[..., 1, 2] + 0.5) * y_scale - 0.5
K[..., 0, 2] = K[..., 0, 2] * ratio_w
K[..., 1, 2] = K[..., 1, 2] * ratio_h
return K
| bingai/vidar | vidar/geometry/camera_utils.py | camera_utils.py | py | 831 | python | en | code | 1 | github-code | 13 |
10000847163 | from notion.client import NotionClient
from notion.block import *
from progress.bar import Bar
# Insert the URL of the page you want to edit (Open Notion is browser)
page_url = "Insert url"
# Obtain the `token_v2` value by inspecting your browser cookies on a logged-in (non-guest) session on Notion.so
tok_v2 = "Insert token v2"
asset_folder = "images"
# Export deck as 'Cards as Plain Text' with 'Include HTML and media references' ticked
textfile = "test.txt"
print("Preparing data")
#Split the input file into questions and answers
list_ = open(textfile).read().split('\n')
questions = []
answers = []
for complete in list_:
qans = complete.split('\t')
if len(qans) == 2:
questions.append(qans[0])
answers.append(qans[1])
def remove_leading_quotes(list_):
for i in range(0, len(list_)):
l = list_[i]
if l[0] == "\"":
l = l[1: len(l) - 1]
list_[i] = l
def remove_trailing_space(list_):
for i in range(0, len(list_)):
list_[i] = list_[i].strip()
def remove_multiple_quotes(list_):
for i in range(0, len(list_)):
l = list_[i]
list_[i] = l.replace("\"\"", "\"")
def remove_subclass_info(list_):
for i in range(0, len(list_)):
l = list_[i]
subclassinfo, quest = l.split(":</br> </div> ")
list_[i] = quest
def split_into_mult_on_image(list_):
for i in range(0, len(list_)):
l = list_[i]
parts = l.split("<img ")
totlist = []
for j in range(0, len(parts)):
ll = parts[j]
subparts = ll.split("\">")
for p in subparts:
totlist.append(p)
list_[i] = totlist
def remove_br_and_div(list_):
for i in range(0, len(list_)):
sublist = list_[i]
acc = []
for j in range(0, len(sublist)):
subpart = sublist[j]
subpart = subpart.replace("<div>", "")
subpart = subpart.replace("</div>", "")
subpart = subpart.replace("<br>", "")
acc.append(subpart)
list_[i] = acc
def remove_empty_sublist(list_):
for i in range(0, len(list_)):
sublist = list_[i]
acc = []
for j in range(0, len(sublist)):
if sublist[j] != "":
acc.append(sublist[j])
list_[i] = acc
remove_leading_quotes(questions)
remove_trailing_space(questions)
remove_multiple_quotes(questions)
remove_subclass_info(questions)
split_into_mult_on_image(questions)
remove_br_and_div(questions)
remove_empty_sublist(questions)
remove_leading_quotes(answers)
remove_trailing_space(answers)
remove_multiple_quotes(answers)
split_into_mult_on_image(answers)
remove_br_and_div(answers)
remove_empty_sublist(answers)
print("Connecting to Notion")
# Obtain the `token_v2` value by inspecting your browser cookies on a logged-in (non-guest) session on Notion.so
client = NotionClient(token_v2=tok_v2)
# Replace this URL with the URL of the page you want to edit
page = client.get_block(page_url)
# Note: You can use Markdown! We convert on-the-fly to Notion's internal formatted text data structure.
page.title = "Imported From Anki"
bar = Bar('Importing to Notion', max=len(questions))
for i in range(len(questions)):
current_q = questions[i]
current_a = answers[i]
image_question = False
image_pos = {}
question_string = ""
image_missing_shown = False
#Insert question
for i in range(0, len(current_q)):
if current_q[i].startswith("src=\""):
image_pos[i] = True
image_question = True
else:
image_pos[i] = False
question_string += current_q[i]
togglechild = page.children.add_new(ToggleBlock, title = question_string)
if image_question:
for i in range(0, len(image_pos)):
if image_pos[i]:
try:
imagechild = togglechild.children.add_new(EmbedOrUploadBlock)
imagechild.upload_file(asset_folder + "/" + current_q[i][5 : len(current_q[i])])
except:
if not image_missing_shown:
togglechild.title = togglechild.title + " (Missing image)"
image_missing_shown = True
togglechild = togglechild.children.add_new(ToggleBlock, title = "Answer")
#Insert answer
for i in range(0, len(current_a)):
if current_a[i].startswith("src=\""):
try:
imagechild = togglechild.children.add_new(EmbedOrUploadBlock)
imagechild.upload_file(asset_folder + "/" + current_a[i][5 : len(current_a[i])])
except:
if not image_missing_shown:
togglechild.title = togglechild.title + " (Missing image)"
image_missing_shown = True
else:
textchild = togglechild.children.add_new(TextBlock, title = current_a[i])
bar.next()
bar.finish()
| moscars/anki2notion | main.py | main.py | py | 4,963 | python | en | code | 0 | github-code | 13 |
26057758534 | # Imports
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget, QPushButton, QStyle, QHBoxLayout, \
QSlider, QSizePolicy, QSpinBox, QLineEdit, QLabel, QMenu, QInputDialog
from PyQt5.QtMultimedia import QMediaPlayer
from PyQt5.QtGui import QIntValidator
# Constants
ICON_SIZE = QSize(16, 16)
INITIAL_NUM_CHARACTERS = 4
# TODO: Decide on font
# Classes
class ControlBar(QWidget):
# This is another view
def __init__(self):
super().__init__()
self._controller = None
# Also based off: https://stackoverflow.com/a/57842233
# TODO: Improve tooltips and make dynamic
# Play/pause button
self._play_pause_button = QPushButton()
self._play_pause_button.setToolTip('Play/pause the video')
self._play_pause_button.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
# Scrubber
self._scrubber = QSlider(Qt.Horizontal)
self._scrubber.setRange(0, 0)
# TODO: Add helpful tooltips
# Frame decrement button
self._frame_decrement_button = QPushButton()
self._frame_decrement_button.setToolTip('Decrement the current frame '
'by the set amount')
self._frame_decrement_button.setIcon(
self.style().standardIcon(QStyle.SP_MediaSeekBackward))
# Frame increment button
self._frame_increment_button = QPushButton()
self._frame_increment_button.setToolTip('Increment the current frame '
'by the set amount')
self._frame_increment_button.setIcon(
self.style().standardIcon(QStyle.SP_MediaSeekForward))
# Frame display button
self._current_position_box = QLineEdit()
self._current_position_box.setToolTip('The current frame number')
self._current_position_box.setValidator(QIntValidator())
self._current_position_box.setMaxLength(INITIAL_NUM_CHARACTERS)
self._current_position_box.setMaximumWidth(10 * (INITIAL_NUM_CHARACTERS + 1))
self._divider = QLabel('/')
self._total_length_button = QPushButton()
self._total_length_button.setToolTip('The duration of the video in '
'frames')
self._total_length_button.setMaximumWidth(10 * (INITIAL_NUM_CHARACTERS + 1))
self._total_length_button.setSizePolicy(QSizePolicy.Fixed,
QSizePolicy.MinimumExpanding)
# Frame skip amount chooser
self._frame_skip_amount_button = QSpinBox()
self._frame_skip_amount_button.setToolTip('The amount to increment or '
'decrement the video by')
self._frame_skip_amount_button.setMinimum(1)
self._frame_skip_amount_button.setMaximum(10**INITIAL_NUM_CHARACTERS)
self._layout = QHBoxLayout()
self._layout.addWidget(self._play_pause_button)
self._layout.addWidget(self._frame_decrement_button)
self._layout.addWidget(self._frame_skip_amount_button)
self._layout.addWidget(self._frame_increment_button)
self._layout.addWidget(self._scrubber)
self._layout.addWidget(self._current_position_box)
self._layout.addWidget(self._divider)
self._layout.addWidget(self._total_length_button)
self._layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self._layout)
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
self._controls = (
self._play_pause_button,
self._frame_decrement_button,
self._frame_increment_button,
self._scrubber,
self._total_length_button,
self._current_position_box,
self._frame_skip_amount_button,
)
self.set_enabled_controls(False)
# Disable the buttons until a video is opened
def register_controller(self, controller):
"""
Registers a controller to the control bar.
Expects the following methods to be defined:
play_pause_toggle()
position_changed(new_position)
increment_position()
decrement_position()
increment_changed(new_increment)
:param controller: The controller to connect the signals to
"""
self._controller = controller
self._play_pause_button.clicked.connect(controller.play_pause_toggle)
self._scrubber.sliderMoved.connect(controller.position_changed)
self._frame_increment_button.clicked.connect(
controller.increment_position)
self._frame_decrement_button.clicked.connect(
controller.decrement_position)
self._frame_skip_amount_button.valueChanged.connect(
controller.increment_changed)
self._current_position_box.textEdited.connect(
self._position_changed)
self._total_length_button.clicked.connect(
self._total_length_clicked)
def _position_changed(self, new_position):
"""
Slot for when the user has changed the current position.
Updates the current position in the control bar, etc.
:param new_position: The new position (units)
"""
try:
new_position = int(new_position)
except ValueError:
new_position = 0
self._controller.position_changed(
self._controller.position_to_ms(new_position))
def set_enabled_controls(self, are_enabled):
"""
Changes whether the controls (buttons, scrubber, etc) are enabled.
:param are_enabled: The new enabled status of the controls.
"""
for control in self._controls:
control.setEnabled(are_enabled)
def set_duration(self, new_duration):
"""
Sets the duration of the scrubber bar.
:param new_duration: The new duration (ms)
"""
self._scrubber.setRange(0, new_duration)
duration_unit = self._controller.ms_to_position(new_duration)
unit_characters = len(str(duration_unit))
self._set_position_sizes(unit_characters)
self._total_length_button.setText(str(duration_unit))
self._frame_skip_amount_button.setMaximum(duration_unit)
def _set_position_sizes(self, num_characters):
"""
Sets the reserved length of the position and duration buttons/labels.
:param num_characters: The number of characters wide to make the items.
"""
self._current_position_box.setMaxLength(num_characters)
self._current_position_box.setMaximumWidth(10 * (num_characters + 1))
self._total_length_button.setMaximumWidth(10 * (num_characters + 1))
def set_position(self, new_position):
"""
Sets the position of the scrubber bar.
:param new_position: The new position (ms)
"""
self._scrubber.setValue(new_position)
self._current_position_box.setText(str(
self._controller.get_current_position()))
def set_media_state(self, new_state):
"""
Sets the media state of the control bar (for the play/pause button)
:param new_state: The new state to use
"""
if new_state == QMediaPlayer.PlayingState:
self._play_pause_button.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
else:
self._play_pause_button.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
def _set_time(self):
frame_number = self._controller.get_current_position('frames')
value, ok = QInputDialog().getDouble(
self, 'Set Time at Frame %d' % frame_number,
'Time (in seconds) at %d:' % frame_number, 0.0)
if ok:
self._controller.set_time(frame_number, value)
def _set_framerate(self):
value, ok = QInputDialog().getDouble(
self, 'Set Framerate',
'New Framerate (fps):', self._controller.get_fps(), 0.0)
if ok and value > 0:
self._controller.set_fps(value)
def _total_length_clicked(self):
menu = QMenu()
actions = {
'Set time': self._set_time,
'Set framerate': self._set_framerate,
}
for text in actions.keys():
menu.addAction(text)
# Determine the position to place the menu, so that it's lower-right
# corner aligns with the top-right of the button
menu_pos = self.mapToGlobal(self._total_length_button.pos())
menu_pos.setX(menu_pos.x() - menu.sizeHint().width()
+ self._total_length_button.width())
menu_pos.setY(menu_pos.y() - menu.sizeHint().height())
action = menu.exec_(menu_pos)
if action is None:
return
actions[action.text()]()
| Benjymack/video-tracker | video_tracker/video_display/control_bar.py | control_bar.py | py | 8,964 | python | en | code | 2 | github-code | 13 |
31662645034 | dic= {
"album_name": "The Dark Side of the Moon",
"band": "Pink Floyd",
"year": 1973,
"songs": (
"Speak to Me",
"Breathe",
"On the Run",
"Time",
"The Great Gig in the Sky",
"Money",
"Us and Them",
"Any Colour You Like",
"Brain Damage",
"Eclipse"
)
}
for key, value in dic.items():
print(f"{key}: {value}")
del dic["songs"]
del dic["year"]
dic["release_date"] = "March 1st, 1973" | kungfumanda/30DaysOfPython | exercises/day10.py | day10.py | py | 427 | python | en | code | 0 | github-code | 13 |
42231792222 | import sys
file_name = "text.txt"
def temperature(file_name):
try:
with open(file_name, 'r') as f:
lines = f.readlines()
except IOError:
print("Error occurred opening the file")
dict_france = {}
dict_sweden = {}
dict_germany = {}
for i, k in enumerate(lines):
num = k.split(" ")
if i != 0:
dict_france[num[3][0:4]] = int(num[0])
dict_sweden[num[3][0:4]] = int(num[1])
dict_germany[num[3][0:4]] = int(num[2]
)
def temp_max_min(args):
max_temp = 16
min_temp = 18
new_max= ""
new_min = ""
for k, v in args.items():
if v > max_temp:
max_temp = v
new_max = k
for k, v in args.items():
if v < min_temp:
min_temp = v
new_min = k
return new_min, new_max
france_temp = temp_max_min(dict_france)
sweden_temp = temp_max_min(dict_sweden)
german_temp = temp_max_min(dict_germany)
n1 = "\n"
return f"France => {france_temp} {n1}Sweden = > {sweden_temp} {n1}Germany = > {german_temp}"
print(temperature(file_name)) | tszabad/ibs-2020-10-coding-fundamentals-normal-exam | avgtemp/avgtemp.py | avgtemp.py | py | 1,212 | python | en | code | 0 | github-code | 13 |
35691584566 | #!/usr/bin/env python3
from gi.repository import Gtk, WebKit
import os, re
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
class WebWindow(Gtk.Window):
def __init__(self, html="", tpl={}):
Gtk.Window.__init__(self, title='Progress bar')
self.view = WebKit.WebView()
self.add(self.view)
settings = self.view.get_settings()
settings.set_property('enable-default-context-menu', False)
self.view.set_settings(settings)
self.progress = 0
if html:
self.load_html(html, tpl)
def load_html(self, html, tpl):
fd = open(html, "r")
self.tmp_page = fd.read()
fd.close()
output_page = self.tmp_page
for key, value in tpl.items():
output_page = output_page.replace("{%s}" % key, value)
self.view.load_html_string(output_page, ROOT_DIR + '/html/')
def dispatch(self,view, frame, req, data=None):
uri = req.get_uri()
if uri.find("://") > 0:
scheme, path = uri.split("://", 1)
else:
return False
if scheme == 'file':
return False
elif scheme == 'admin' and path == "progress":
self.progress += 1
output_page = self.tmp_page.replace("{progress}", "%d" % self.progress)
win.view.load_html_string(output_page, ROOT_DIR + '/html/')
return True
if __name__ == '__main__':
tpl = { "progress" : "0" }
html = ROOT_DIR + "/html/progress.html"
win = WebWindow(html, tpl)
win.set_default_size(800, 600)
win.set_position(Gtk.WindowPosition.CENTER)
win.connect("delete-event", Gtk.main_quit)
win.view.connect("navigation-requested", win.dispatch)
win.show_all()
Gtk.main()
| daneshih1125/pygtk | webkit/gtkprogress.py | gtkprogress.py | py | 1,761 | python | en | code | 0 | github-code | 13 |
31713510244 | __author__ = 'guang'
from bst import TreeNode
class Codec:
def is_leaf(self, node):
return node and node.left is None and node.right is None
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
>>> codec = Codec()
>>> root = TreeNode(3)
>>> one = TreeNode(1)
>>> root.right = one
>>> string = codec.serialize(root)
>>> string
'3,#,1'
>>> one, two, three, four, five, six = TreeNode(1), TreeNode(2), TreeNode(3), TreeNode(4), TreeNode(5), TreeNode(6)
>>> one.left, one.right = two, three
>>> two.left, three.right = four, five
>>> four.right = six
>>> codec.serialize(one)
'1,2,3,4,#,#,5,#,6'
>>> codec.serialize(six)
'6'
>>> codec.serialize(None)
'#'
>>> one, negative_one, two, three = TreeNode(1), TreeNode(-1), TreeNode(2), TreeNode(3)
>>> one.left, one.right = negative_one, two
>>> two.left = three
>>> codec.serialize(one)
'1,-1,2,#,#,3'
"""
if root is None:
return "#"
nodes = [root]
for node in nodes:
if node:
nodes.append(node.left)
nodes.append(node.right)
values = [str(node.val) if node else "#" for node in nodes]
result = ','.join(values)
result = result.rstrip(',#')
return result
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
>>> codec = Codec()
>>> codec.deserialize("")
>>> tree = codec.deserialize("1")
>>> tree.val
1
>>> tree = codec.deserialize("1,#,3")
>>> tree.val
1
>>> tree.left is None
True
>>> tree.right.val
3
>>> tree = codec.deserialize("#")
>>> tree is None
True
>>> tree = codec.deserialize("1,2,#,3,#,4,#,5")
>>> tree.show()
>>> tree = codec.deserialize("5,2,3,#,#,2,4,3,1")
>>> tree.show()
>>> tree = codec.deserialize("1,2,3,4,5")
>>> tree.show()
"""
if not data:
return None
values = data.split(',')
nodes = [TreeNode(int(value)) if value != '#' else None for value in values]
i, j, length = 0, 1, len(nodes)
while j < length:
root = nodes[i]
if root is None:
i += 1
continue
else:
left = nodes[j]
right = nodes[j + 1] if j + 1 < length else None
root.left = left
root.right = right
i += 1
j += 2
return nodes[0]
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| gsy/leetcode | serialize_and_deserialize_tree.py | serialize_and_deserialize_tree.py | py | 2,954 | python | en | code | 1 | github-code | 13 |
43656671877 | from typing import List
from interface0718.api.base_api import BaseApi
class ContactApi(BaseApi):
def add(self, userid, name, department: List[int], **kwargs):
path = f"/cgi-bin/user/create?access_token={self.token}"
data = {
"userid": userid,
"name": name,
"department": department,
"access_token": self.token,
**kwargs
}
re = self.http_post(path, json=data)
return re
def update(self, userid, **kwargs):
path = f"/cgi-bin/user/update?access_token={self.token}"
data = {
"userid": userid,
**kwargs
}
re = self.http_post(path, json=data)
return re
def delete(self, userid):
path = "/cgi-bin/user/delete"
data = {
"userid": userid,
"access_token": self.token
}
re = self.http_get(path, params=data)
return re
def get(self, userid):
path = "/cgi-bin/user/get"
data = {
"userid": userid,
"access_token": self.token
}
re = self.http_get(path, params=data)
return re
# def get_all_member(self):
# pass
| dg961111/homework | interface0718/api/contact.py | contact.py | py | 1,224 | python | en | code | 0 | github-code | 13 |
15448392963 | from django.shortcuts import render, redirect
from django.urls import reverse
from django.db import connection
from .forms import PollingUnitResultForm
from django.db import connection
from django.utils import timezone
import os
from .utils import get_client_ip
from django.contrib import messages
def polling_unit_result(request, unique_id):
with connection.cursor() as cursor:
sql = "SELECT * FROM announced_pu_results WHERE polling_unit_uniqueid = %s"
cursor.execute(sql, (unique_id,))
results = cursor.fetchall()
# Render a template to display the results
context = {'results': results}
return render(request, 'polling_unit_result.html', context)
def local_government_result(request):
# Fetch the list of local governments from the database
with connection.cursor() as cursor:
cursor.execute("SELECT uniqueid, polling_unit_name FROM polling_unit")
local_governments = [{'id': row[0], 'name': row[1]} for row in cursor.fetchall()]
total_results = {}
if request.method == 'POST':
selected_lga_id = request.POST['selected_lga']
# Calculate the summed total result for the selected local government
with connection.cursor() as cursor:
cursor.execute(
"SELECT party_abbreviation, SUM(party_score) "
"FROM announced_pu_results AS pu "
"JOIN polling_unit AS pu_unit ON pu.polling_unit_uniqueid = pu_unit.uniqueid "
"WHERE pu_unit.uniqueid = %s "
"GROUP BY party_abbreviation",
[selected_lga_id]
)
total_results = dict(cursor.fetchall())
context = {
'local_governments': local_governments,
'total_results': total_results
}
return render(request, 'local_government_result.html', context)
def add_polling_unit_result(request):
if request.method == 'POST':
form = PollingUnitResultForm(request.POST)
if form.is_valid():
# Get the cleaned data from the form
cleaned_data = form.cleaned_data
# Extract party scores from cleaned_data
party_scores = {
'PDP': cleaned_data['pdp_score'],
'DPP': cleaned_data['dpp_score'],
'ACN': cleaned_data['acn_score'],
'PPA': cleaned_data['ppa_score'],
'CDC': cleaned_data['cdc_score'],
'JP': cleaned_data['jp_score'],
# Add more party scores as needed
}
# Save the results in the database using raw SQL queries or Django models
with connection.cursor() as cursor:
for party, score in party_scores.items():
cursor.execute(
"INSERT INTO announced_pu_results "
"(polling_unit_uniqueid, party_abbreviation, party_score, entered_by_user, date_entered, user_ip_address) "
"VALUES (%s, %s, %s, %s, %s, %s)",
[
cleaned_data['polling_unit_unique_id'],
party,
score,
'Ifeanyi Onyekwelu',
timezone.now(),
get_client_ip(request),
]
)
messages.success(request, 'Polling unit result saved successfully')
return redirect('election_results_app:polling_unit_result', unique_id=cleaned_data['polling_unit_unique_id'])
else:
form = PollingUnitResultForm()
context = {
'form': form,
}
return render(request, 'polling_unit_result_form.html', context)
| ifekel/Election-Polling | election_results_app/views.py | views.py | py | 3,739 | python | en | code | 0 | github-code | 13 |
27341641648 | import argparse
import torch
import numpy as np
import torch.nn as nn
import tensorflow as tf
from resnet import get_resnet, name_to_params
parser = argparse.ArgumentParser(description='SimCLR converter')
parser.add_argument('tf_path', type=str, help='path of the input tensorflow file (ex: model.ckpt-250228)')
parser.add_argument('--ema', action='store_true')
parser.add_argument('--supervised', action='store_true')
args = parser.parse_args()
def main():
use_ema_model = args.ema
prefix = ('ema_model/' if use_ema_model else '') + 'base_model/'
head_prefix = ('ema_model/' if use_ema_model else '') + 'head_contrastive/'
# 1. read tensorflow weight into a python dict
vars_list = []
contrastive_vars = []
for v in tf.train.list_variables(args.tf_path):
if v[0].startswith(prefix) and not v[0].endswith('/Momentum'):
vars_list.append(v[0])
elif v[0] in {'head_supervised/linear_layer/dense/bias', 'head_supervised/linear_layer/dense/kernel'}:
vars_list.append(v[0])
elif v[0].startswith(head_prefix) and not v[0].endswith('/Momentum'):
contrastive_vars.append(v[0])
sd = {}
ckpt_reader = tf.train.load_checkpoint(args.tf_path)
for v in vars_list:
sd[v] = ckpt_reader.get_tensor(v)
split_idx = 2 if use_ema_model else 1
# 2. convert the state_dict to PyTorch format
conv_keys = [k for k in sd.keys() if k.split('/')[split_idx].split('_')[0] == 'conv2d']
conv_idx = []
for k in conv_keys:
mid = k.split('/')[split_idx]
if len(mid) == 6:
conv_idx.append(0)
else:
conv_idx.append(int(mid[7:]))
arg_idx = np.argsort(conv_idx)
conv_keys = [conv_keys[idx] for idx in arg_idx]
bn_keys = list(set([k.split('/')[split_idx] for k in sd.keys()
if k.split('/')[split_idx].split('_')[0] == 'batch']))
bn_idx = []
for k in bn_keys:
if len(k.split('_')) == 2:
bn_idx.append(0)
else:
bn_idx.append(int(k.split('_')[2]))
arg_idx = np.argsort(bn_idx)
bn_keys = [bn_keys[idx] for idx in arg_idx]
depth, width, sk_ratio = name_to_params(args.tf_path)
model, head = get_resnet(depth, width, sk_ratio)
conv_op = []
bn_op = []
for m in model.modules():
if isinstance(m, nn.Conv2d):
conv_op.append(m)
elif isinstance(m, nn.BatchNorm2d):
bn_op.append(m)
assert len(vars_list) == (len(conv_op) + len(bn_op) * 4 + 2) # 2 for fc
for i_conv in range(len(conv_keys)):
m = conv_op[i_conv]
w = torch.from_numpy(sd[conv_keys[i_conv]]).permute(3, 2, 0, 1)
assert w.shape == m.weight.shape, f'size mismatch {w.shape} <> {m.weight.shape}'
m.weight.data = w
for i_bn in range(len(bn_keys)):
m = bn_op[i_bn]
gamma = torch.from_numpy(sd[prefix + bn_keys[i_bn] + '/gamma'])
assert m.weight.shape == gamma.shape, f'size mismatch {gamma.shape} <> {m.weight.shape}'
m.weight.data = gamma
m.bias.data = torch.from_numpy(sd[prefix + bn_keys[i_bn] + '/beta'])
m.running_mean = torch.from_numpy(sd[prefix + bn_keys[i_bn] + '/moving_mean'])
m.running_var = torch.from_numpy(sd[prefix + bn_keys[i_bn] + '/moving_variance'])
w = torch.from_numpy(sd['head_supervised/linear_layer/dense/kernel']).t()
assert model.fc.weight.shape == w.shape
model.fc.weight.data = w
b = torch.from_numpy(sd['head_supervised/linear_layer/dense/bias'])
assert model.fc.bias.shape == b.shape
model.fc.bias.data = b
if args.supervised:
save_location = f'r{depth}_{width}x_sk{1 if sk_ratio != 0 else 0}{"_ema" if use_ema_model else ""}.pth'
torch.save({'resnet': model.state_dict(), 'head': head.state_dict()}, save_location)
return
sd = {}
for v in contrastive_vars:
sd[v] = ckpt_reader.get_tensor(v)
linear_op = []
bn_op = []
for m in head.modules():
if isinstance(m, nn.Linear):
linear_op.append(m)
elif isinstance(m, nn.BatchNorm1d):
bn_op.append(m)
for i, (l, m) in enumerate(zip(linear_op, bn_op)):
l.weight.data = torch.from_numpy(sd[f'{head_prefix}nl_{i}/dense/kernel']).t()
common_prefix = f'{head_prefix}nl_{i}/batch_normalization/'
m.weight.data = torch.from_numpy(sd[f'{common_prefix}gamma'])
if i != 2:
m.bias.data = torch.from_numpy(sd[f'{common_prefix}beta'])
m.running_mean = torch.from_numpy(sd[f'{common_prefix}moving_mean'])
m.running_var = torch.from_numpy(sd[f'{common_prefix}moving_variance'])
# 3. dump the PyTorch weights.
save_location = f'r{depth}_{width}x_sk{1 if sk_ratio != 0 else 0}{"_ema" if use_ema_model else ""}.pth'
torch.save({'resnet': model.state_dict(), 'head': head.state_dict()}, save_location)
if __name__ == '__main__':
main()
| Separius/SimCLRv2-Pytorch | convert.py | convert.py | py | 4,943 | python | en | code | 96 | github-code | 13 |
2251925569 | from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
import logging
from django.conf import settings
import sys
import boto3
import json
from client.consumers import WsConsumer, emit_message_to_user
from client.serializers import UserSerializer
logger = logging.getLogger(__name__)
class Actions:
INSERT = 0
UPDATE = 1
DELETE = 2
def insert_user(data, timestamp):
username = data['username']
serialized_user = UserSerializer(data=data)
serialized_user.create(validated_data=data)
logging.info(f"user: {username} created at {timestamp}")
def update_user(data, timestamp):
username = data['username']
try:
user = User.objects.get(username=data['username'])
serialized_user = UserSerializer(user)
serialized_user.update(user, data)
logging.info(f"user: {username} updated at {timestamp}")
except User.DoesNotExist:
logging.info(f"user: {username} don't exits. Creating ...")
insert_user(data, timestamp)
def delete_user(data, timestamp):
username = data['username']
try:
user = User.objects.get(username=username)
user.delete()
logging.info(f"user: {username} deleted at {timestamp}")
except User.DoesNotExist:
logging.info(f"user: {username} don't exits. Don't deleted")
switch_actions = {
Actions.INSERT: insert_user,
Actions.UPDATE: update_user,
Actions.DELETE: delete_user,
}
def notify_to_user(user):
username = user['username']
serialized_user = UserSerializer(user)
emit_message_to_user(
message=serialized_user.data,
username=username, )
class Command(BaseCommand):
help = 'sqs listener'
def handle(self, *args, **options):
self.stdout.write(self.style.WARNING("starting listener"))
sqs = boto3.client('sqs')
queue_url = settings.SQS_REACTIVE_TABLES
def process_message(message):
decoded_body = json.loads(message['Body'])
data = json.loads(decoded_body['Message'])
switch_actions.get(data['action'])(
data=data['user'],
timestamp=message['Attributes']['SentTimestamp']
)
notify_to_user(data['user'])
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=message['ReceiptHandle'])
def loop():
response = sqs.receive_message(
QueueUrl=queue_url,
AttributeNames=[
'SentTimestamp'
],
MaxNumberOfMessages=10,
MessageAttributeNames=[
'All'
],
WaitTimeSeconds=20
)
if 'Messages' in response:
messages = [message for message in response['Messages'] if 'Body' in message]
[process_message(message) for message in messages]
try:
while True:
loop()
except KeyboardInterrupt:
sys.exit(0)
| gonzalo123/django_reactive_users | client/client/management/commands/listener.py | listener.py | py | 3,167 | python | en | code | 1 | github-code | 13 |
4827778194 | from django.http import HttpResponse
from django.contrib.auth.models import User, Group
from .models import Student, Faculty
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import redirect, render
from .forms import StudentForm, FacultyForm, UserRegistration
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .decorators import *
from classes.models import Classroom,Post, student_classroom
@unauthenticated_user
def FacultyRegistration(request):
userform = UserRegistration()
facultyform = FacultyForm()
if request.method == 'POST':
userform = UserRegistration(request.POST)
facultyform = FacultyForm(request.POST, request.FILES)
if userform.is_valid() and facultyform.is_valid():
user = userform.save()
group = Group.objects.get(name ='Faculty')
user.groups.add(group)
user.save()
faculty = facultyform.save(commit=False)
faculty.user = user
faculty.save()
messages.success(request,'Account was created for ' + faculty.user.username)
return redirect ('loginPage')
else:
print (userform.errors, facultyform.errors)
context = {'facultyform': facultyform, 'userform': userform}
return render(request, 'faculty.html', context)
@unauthenticated_user
def StudentRegistration(request):
userform = UserRegistration()
studentform = StudentForm()
if request.method == 'POST':
userform = UserRegistration(request.POST)
studentform = StudentForm(request.POST, request.FILES)
if userform.is_valid() and studentform.is_valid():
user = userform.save()
group = Group.objects.get(name ='Student')
user.groups.add(group)
user.save()
student = studentform.save(commit=False)
student.user = user
student.save()
messages.success(request,'Account was created for ' + student.user.username)
return redirect ('loginPage')
else:
print (userform.errors, studentform.errors)
context = {'userform': userform, 'studentform': studentform}
return render(request, 'student.html', context)
@unauthenticated_user
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username = username, password = password)
if user is not None:
login(request, user)
return redirect ('homePage')
else:
messages.info (request, 'Email address or Password is incorrect')
#return render(request, 'sign_in.html')
context = {}
return render(request, 'sign_in.html', context)
@login_required(login_url='loginPage')
@student_only
def homePage(request):
classroom = student_classroom.objects.all()
context = {'classroom':classroom}
return render (request,'index.html',context)
@login_required(login_url='loginPage')
@student_only
def studentProfile(request,pk):
previousStudentInfo = Student.objects.get(id = pk)
newStudentForm = StudentForm(instance=previousStudentInfo)
newUserForm = UserRegistration(instance=request.user)
if request.method =='POST':
newUserForm = UserRegistration(request.POST, instance= request.user)
newStudentForm = StudentForm(request.POST, request.FILES, instance= previousStudentInfo)
if newUserForm.is_valid() and newStudentForm.is_valid():
newUserForm.save()
newStudentForm.save()
return redirect ('/')
else:
context = {'newUserForm':newUserForm,'newStudentForm':newStudentForm}
return render(request,'student_profile_view.html',context)
@login_required(login_url='loginPage')
def facultyDashboard(request):
classroom = Classroom.objects.all()
class_count=Classroom.objects.filter(faculty=request.user).count()
context={
'classroom' : classroom,'class_count':class_count
}
return render(request, 'instructor_dashboard.html',context)
@login_required(login_url='loginPage')
def facultyProfile(request,pk):
previousFacultyInfo = Faculty.objects.get(id = pk)
newFacultyForm = FacultyForm(instance=previousFacultyInfo)
newUserForm = UserRegistration(instance=request.user)
if request.method =='POST':
newUserForm = UserRegistration(request.POST, instance= request.user)
newFacultyForm = FacultyForm(request.POST, request.FILES, instance= previousFacultyInfo)
if newUserForm.is_valid() and newFacultyForm.is_valid():
newUserForm.save()
newFacultyForm.save()
return redirect ('/')
else:
context = {'newUserForm':newUserForm,'newFacultyForm':newFacultyForm}
return render(request,'faculty_profile_view.html',context)
def logoutUser(request):
logout(request)
return redirect('loginPage') | nayeemsweb/CSE499-Spring21-Project | Code/Backend/classroom/accounts/views.py | views.py | py | 5,100 | python | en | code | 0 | github-code | 13 |
7033712506 | from django.shortcuts import render
from AppAirsoft.models import *
from AppAirsoft.forms import *
# Creamos nuestras views
def vista_inicio(request):
return render(request, "Airsoft\index.html")
def vista_registro(request):
if request.method == "POST":
formulario = UsuarioForm(request.POST)
if formulario.is_valid():
data = formulario.cleaned_data
user = Usuario(nombre = data["nombre"], apellido = data["apellido"], edad = data["edad"], email = data["email"], nombre_usuario = data["nombre_usuario"], contrasenia = data["contrasenia"])
user.save()
formulario = UsuarioForm()
return render(request, "Airsoft/registro.html", {"formulario": formulario})
def vista_equipamiento(request):
if request.method == "POST":
formulario = EquipamientoForm(request.POST)
if formulario.is_valid():
data = formulario.cleaned_data
equipamiento = Equipamiento(tipo_de_equipamiento = data["tipo_de_equipamiento"], precio = data["precio"], accesorios = data["accesorios"])
equipamiento.save()
formulario = EquipamientoForm()
return render(request, "Airsoft/equipamiento.html", {"formulario": formulario})
def vista_replica(request):
if request.method == "POST":
formulario = ReplicaForm(request.POST)
if formulario.is_valid():
data = formulario.cleaned_data
replica = Replica(nombre_replica = data["nombre_replica"], precio = data["precio"])
replica.save()
formulario = ReplicaForm()
return render(request, "Airsoft/replica.html", {"formulario": formulario})
def vista_inicio_sesion(request):
return render(request, "Airsoft/iniciar_sesion.html")
def vista_busqueda(request):
return render(request, "Airsoft/busqueda.html")
| AngeloPettinari/Prueba | AppAirsoft/views.py | views.py | py | 1,820 | python | es | code | 0 | github-code | 13 |
7397890083 | class Node:
def __init__(self, val, next):
self.val = val
self.next = next
class Queue:
def __init__(self):
self.head=None
self.tail=None
def enqueue(self,v):
if self.head==None:
self.head=Node(v,None)
self.tail=self.head
else:
self.tail.next=Node(v,None)
self.tail=self.tail.next
def dequeue(self):
if self.head==None:
print("Queue is empty")
else:
val=self.head.val
self.head=self.head.next
if self.head==None:
self.tail==None
return val
def isempty(self):
return self.head==None
n=Queue()
n.enqueue(2)
n.enqueue(4)
n.enqueue(4)
a=n.dequeue()
print(a)
a=n.dequeue()
print(a)
b=n.isempty()
print(b)
a=n.dequeue()
print(a)
a=n.dequeue()
print(a)
b=n.isempty()
print(b) | NandhniV25/Data-Structures | 03_Queue/03_queue_optimize.py | 03_queue_optimize.py | py | 883 | python | en | code | 0 | github-code | 13 |
13143905125 | """
Scrapes data from a CSV file of marriage certificates mined from the Royal BC Museum's
genealogy database.
"""
import csv
from datetime import datetime
from db.db_models import MarriageCert, Person
from scraping.utils import extract_name_fields
from utils.bcmuseum_miner import FIELDS
def scrape_marriagecerts_csv(csv_file):
with open(csv_file, 'r', encoding="latin-1") as file:
reader = csv.DictReader(file, FIELDS)
next(reader) # skip header
for row in reader:
# Extract bride and groom first, middle and last names
groom_first, groom_middle, groom_last = extract_name_fields(row["Groom"])
bride_first, bride_middle, bride_last = extract_name_fields(row["Bride"])
date_str = row["Event Date (YYYY-MM-DD)"]
if date_str:
event_date = datetime.strptime(row["Event Date (YYYY-MM-DD)"], "%Y-%m-%d")
else:
event_date = None
event_place = row["Event Place"].upper()
yield MarriageCert(
Bride_FirstName=bride_first,
Bride_MiddleName=bride_middle,
Bride_LastName=bride_last,
Groom_FirstName=groom_first,
Groom_MiddleName=groom_middle,
Groom_LastName=groom_last,
Event_Place=event_place,
Event_Date=event_date
)
| ajdeziel/your-name-here | scraping/marriagecerts.py | marriagecerts.py | py | 1,414 | python | en | code | 0 | github-code | 13 |
26963933265 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 13:14:01 2022
@author: elect
"""
i=int(input('enter the limit:'))
sum=0
k=1
while k<=i:
sum+=k
k+=1
print('sum is =',sum)
| Syamkrishna123/MyPythonProgramming_practice | while.py | while.py | py | 186 | python | en | code | 1 | github-code | 13 |
9533082967 | # -*- coding: utf-8 -*-
import json
import scrapy
from lianjia.items import LianjiaItem
from scrapy import Request
from scrapy_redis.spiders import RedisSpider
class LianjiacrawlSpider(RedisSpider):
name = 'lianjiacrawl'
allowed_domains = ['cd.lianjia.com']
# 新盘
# start_urls = 'https://cd.fang.lianjia.com/loupan/pg{page}'
domains_url = 'https://cd.lianjia.com'
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:60.0) Gecko/20100101 Firefox/60.0'
headers = {'User-Agent': user_agent}
# def start_requests(self):
# 新房获取100页数据
# for i in range(1, 101):
# yield Request(self.start_urls.format(page=i), headers=self.headers,
# callback=self.parse)
# def parse(self, response):
#
# return item
# 二手房
start_urls = 'https://cd.lianjia.com/ershoufang'
def start_requests(self):
yield Request(self.start_urls, headers=self.headers, callback=self.get_area_url)
def get_area_url(self, response):
areas_link = response.xpath('/html/body/div[3]/div/div[1]/dl[2]/dd/div[1]/div/a')
for area in areas_link:
area_href = area.xpath('./@href').extract()[0]
area_name = area.xpath('./text()').extract()[0]
yield Request(self.domains_url + area_href, headers=self.headers,
callback=self.get_page, meta={'area': area_name, 'href': area_href})
def get_page(self, response):
page_box = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-data').extract()
page_num = json.loads(page_box[0]).get('totalPage')
for i in range(1, page_num + 1):
yield Request(self.domains_url + response.meta.get('href') + 'pg' + str(i),
headers=self.headers, meta={'name': response.meta.get('area')},
callback=self.get_house_info)
def get_house_info(self, response):
house_list = response.xpath('/html/body/div[4]/div[1]/ul/li')
for house in house_list:
try:
items = LianjiaItem()
# 数量
items['house_amount'] = response.xpath('/html/body/div[4]/div[1]/div[2]/h2/span/text()').extract()[0]
# 房屋名 西雅苑标准套二,中楼层,安静不临街
items['title'] = house.xpath('div[1]/div[1]/a/text()').extract()[0]
# 街道/位置 较场坝东街67号
items['street'] = house.xpath('div[1]/div[2]/div/a/text()').extract()[0]
# 总价
items['price'] = house.xpath('div[1]/div[6]/div[1]/span/text()').extract()[0]
# 单价
items['unit_price'] = house.xpath('div[1]/div[6]/div[2]/span/text()').extract()[0]
# 房屋信息 | 2室1厅 | 54.25平米 | 北 东北 | 精装 | 无电梯
house_info = house.xpath('div[1]/div[2]/div/text()').extract()[0]
# 楼层信息 高楼层(共7层)板楼
items['floor'] = house.xpath('div[1]/div[3]/div/text()').extract()[0]
# 商圈 合江亭
items['local'] = house.xpath('div[1]/div[3]/div/a/text()').extract()[0]
# 关注信息 237人关注 / 共37次带看 / 1个月以前发布
follow_info = house.xpath('div[1]/div[4]/text()').extract()[0]
# 房屋类型
items['type'] = '二手房'
items['area'] = response.meta.get('name')
huxing = house_info.split('|')[1]
mianji = house_info.split('|')[2]
info = house_info.split('|')[3:]
house_infos = {'huxing': huxing, 'mianji': mianji, 'info': info}
follow_num = follow_info.split('/')[0]
publish_time = follow_info.split('/')[-1]
follow_infos = {'follow_num': follow_num, 'publish_time': publish_time}
items['house_info'] = house_infos
items['follow_info'] = follow_infos
# infos = LianjiaInfoItem()
# infos['title'] = items['title']
# infos['house_info'] = house_infos
# infos['follow_info'] = follow_infos
yield items
# yield infos
except Exception:
pass
| simonzhao88/practice | scrapy_pro/lianjia/lianjia/spiders/lianjiacrawl.py | lianjiacrawl.py | py | 4,432 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.