id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11496117
|
import random
import sys
from nekoyume.battle.simul import DummyBattle
def main():
seed = 1
if len(sys.argv) >= 2:
seed = int(sys.argv[1])
simulator = DummyBattle(random.Random(seed))
simulator.logger.print = True
simulator.simulate()
if __name__ == '__main__':
main()
|
11496141
|
import os
from _pytest.capture import CaptureFixture
from kaldi_helpers.input_scripts.clean_json import *
from kaldi_helpers.script_utilities import write_data_to_json_file
EXAMPLE_JSON_DATA = [
{"transcript": "Comment t'appelles tu?"},
{"transcript": "Je m'appelle François."},
{"transcript": "Est-ce tu a une livre préférér."},
{"transcript": "Oui, j'adore L'histoire secrète par <NAME>."},
{"transcript": "Vraiment? Je n'ai jamais lu ça."},
]
def test_get_english_words() -> None:
english_words = get_english_words()
assert "test" in english_words
assert "français" not in english_words
def test_clean_utterance_remove_english() -> None:
example_utterance = {"transcript": "je veux une petite dejeuner"}
english_words = get_english_words()
cleaned_utterance, english_word_count = clean_utterance(example_utterance,
remove_english=True,
english_words=english_words,
punctuation="…’“–”‘°",
special_cases=['<silence>'])
assert cleaned_utterance == ['je', 'veux', 'une'] # Apparently dejeuner is in English?
assert english_word_count == 2
def test_clean_utterance_keep_english() -> None:
example_utterance = {"transcript": "I say, jeune homme!"}
english_words = get_english_words()
cleaned_utterance, english_word_count = clean_utterance(example_utterance,
remove_english=True,
english_words=english_words,
punctuation="…,’“–”‘°!",
special_cases=['<silence>'])
assert cleaned_utterance == ['i', 'say', 'jeune', 'homme']
assert english_word_count == 0
def test_is_valid_utterance_remove_english() -> None:
cleaned_utterance = ['je', 'veux', 'acheter', 'la', 'nouveau', 'bonbon', 'pour', 'ma', 'mère',
'et', 'mon', 'père']
langid_identifier = LanguageIdentifier.from_modelstring(model,
norm_probs=True)
assert is_valid_utterance(clean_words=cleaned_utterance,
english_word_count=0,
remove_english=True,
use_langid=True,
langid_identifier=langid_identifier) is True
def test_is_valid_utterance_keep_english() -> None:
cleaned_utterance = ['i', 'say', 'jeune', 'homme']
langid_identifier = LanguageIdentifier.from_modelstring(model,
norm_probs=True)
assert is_valid_utterance(clean_words=cleaned_utterance,
english_word_count=0,
remove_english=False,
use_langid=False,
langid_identifier=langid_identifier) is True
def test_clean_json_data_full() -> None:
clean_data = clean_json_data(EXAMPLE_JSON_DATA,
remove_english=True,
use_langid=True)
print(clean_data)
assert clean_data == [
{"transcript": "je mappelle françois"},
{"transcript": "vraiment je nai jamais lu ça"}
]
def test_clean_json_data_full_file() -> None:
file_in_name = 'file_in.json'
file_out_name = 'file_out.json'
# os.system('touch file_out.json') # Not Windows compatible
open(file_out_name, 'w')
write_data_to_json_file(EXAMPLE_JSON_DATA, file_in_name)
os.system(f"clean_json.py --infile {file_in_name} --outfile {file_out_name}"
f" --removeEng --useLangid")
# assert load_json_file(file_out_name) == [
# {"transcript": "je mappelle françois"},
# {"transcript": "vraiment je nai jamais lu ça"}
# ]
os.remove(file_in_name)
os.remove(file_out_name)
def test_clean_json_data_full_command_line(capsys: CaptureFixture) -> None:
pass
|
11496151
|
import logging
from typing import TypeVar
from jmetal.lab.visualization import InteractivePlot
LOGGER = logging.getLogger('Sequoya')
S = TypeVar('S')
class MSAPlot(InteractivePlot):
def export_to_html(self, filename: str) -> None:
html_string = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Sequoya dashboard</title>
<!-- Plotly -->
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
<!-- Bulma stylesheet -->
<link href="https://cdnjs.cloudflare.com/ajax/libs/bulma/0.7.4/css/bulma.min.css" rel="stylesheet" />
<style>
.smenubar a.smenubar_alink {
background: #00d1b2;
border-radius: 5px;
padding: 2px 20px;
text-decoration: none;
background-image: none;
color: #fff;
padding: 3px 10px;
margin-left: 10px;
text-decoration: none;
}
.box {
overflow: hidden;
}
.biojs_msa_div {
margin: 5px;
overflow: hidden;
}
</style>
</head>
<body>
<section class="hero is-primary">
<div class="hero-body">
<div class="container">
<figure class="image is-128x128">
<img src="https://raw.githubusercontent.com/benhid/Sequoya/develop/docs/sequoya-white.png" />
</figure>
</div>
</div>
</section>
<section class="section">
<div class="container">
''' + self.export_to_div(include_plotlyjs=False) + '''
</div>
</section>
<section class="section">
<div class="container">
<h1 class="subtitle">MSA viewer</h1>
<div class="box">
<div id="menuDiv"></div>
<div id="rootDiv">(select solution from figure)</div>
</div>
</div>
</section>
<footer class="footer">
<div class="content has-text-centered">
<p>
<strong>Sequoya</strong> by <a href="https://benhid.com"><NAME></a>.
</p>
</div>
</footer>
<script src="http://cdn.bio.sh.s3.eu-central-1.amazonaws.com/msa/latest/msa.min.gz.js"></script>
<script>
var myPlot = document.getElementsByClassName('plotly-graph-div js-plotly-plot')[0];
myPlot.on('plotly_click', function (data) {
var pts = '';
for (var i = 0; i < data.points.length; i++) {
pts = '(x, y) = (' + data.points[i].x + ', ' + data.points[i].y.toPrecision(4) + ')';
multiple_seq = data.points[i].customdata
if (multiple_seq == undefined) multiple_seq = "";
}
// read msa
var opts = {
el: document.getElementById("rootDiv"),
seqs: msa.io.fasta.parse(multiple_seq),
vis: {
conserv: false,
overviewbox: false,
seqlogo: true
},
conf: {
dropImport: true,
debug: false,
},
zoomer: {
menuFontsize: "12px",
autoResize: true
}
};
// init msa
var m = new msa.msa(opts);
renderMSA();
function renderMSA() {
// the menu is independent to the MSA container
var menuOpts = {
el: document.getElementById('menuDiv'),
msa: m
};
var defMenu = new msa.menu.defaultmenu(menuOpts);
m.addView("menu", defMenu);
// call render at the end to display the whole MSA
m.render();
}
});
</script>
</body>
</html>'''
with open(filename + '.html', 'w') as outf:
outf.write(html_string)
|
11496183
|
from typing import Generator
from contextlib import contextmanager
from subprocess import CompletedProcess
from pathlib import Path
from tempfile import TemporaryDirectory
from . import run
class Git:
def __init__(self, root: str = '.') -> None:
self._root = root
self.path = Path(root)
def _run(self, *args: str, **kwargs) -> CompletedProcess:
return run(*args, cwd=self._root, **kwargs)
@property
def head_ref(self):
return self._run('git', 'symbolic-ref', 'HEAD', capture_output=True).stdout.decode().strip()
def get_commit_from_rev(self, rev: str) -> str:
return self._run('git', 'rev-parse', '--verify', rev, capture_output=True).stdout.decode().strip()
def get_commit_timestamp(self, rev: str) -> int:
return int(self._run('git', 'show', '-s', '--format=%ct', rev, capture_output=True).stdout.decode().strip())
@contextmanager
def worktree(self, rev: str, *, detach: bool = False) -> Generator['Git', None, None]:
wt = ''
try:
with TemporaryDirectory() as wt:
self._run('git', 'worktree', 'add', *(['--detach'] if detach else []), wt, rev)
yield Git(wt)
finally:
if wt:
self._run('git', 'worktree', 'remove', wt)
|
11496195
|
import json
import os
import pickle
def load_tasks(dir_path, task_num=None):
tasks, filenames = [], [os.path.join(dir_path, f) for f in os.listdir(dir_path)]
for filename in filenames:
if filename.endswith(".json"):
with open(filename, encoding="utf-8") as f:
dt = f.read().encode("utf-8")
data = json.loads(dt)
tasks += [d for d in data if "id" in d and int(d["id"]) == task_num]
return tasks
def load_pickle(path):
with open(path, "rb") as f:
return pickle.load(f)
def save_pickle(obj, path):
with open(path, "wb") as f:
pickle.dump(obj, f)
def read_config(config_path):
if isinstance(config_path, str):
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
return config
def save_config(config_path, object_to_save):
if isinstance(config_path, str):
with open(config_path, "w+", encoding="utf-8") as f:
json.dump(object_to_save, f, ensure_ascii=False, indent=4)
|
11496222
|
from create_grids import create_grids
from create_ics import create_ics
from run_model import run_model
from stress_divergence_scaling import stress_divergence_scaling
from error_analysis_stress_divergence import error_analysis_stress_divergence
create_grids()
create_ics()
run_model()
stress_divergence_scaling()
error_analysis_stress_divergence()
|
11496230
|
import asyncio
from datetime import datetime
from typing import Optional, Union, Any
from aiohttp import ClientSession
from .questions import QuestionsCategory, parse_questions_category
from .requester import Requester
class Questionnaire:
def __init__(self, categories: list[QuestionsCategory]):
self._categories = categories
@property
def size(self) -> int:
return len(self._categories)
def get_category(self, category_index: int) -> QuestionsCategory:
return self._categories[category_index]
def get_next_indices(self, category_index: int, question_index: int) -> \
Optional[tuple[int, int]]:
# case: the very last question
if question_index == self.get_category(category_index).size - 1 and \
category_index == self.size - 1:
return None
# case: last question in the category
elif question_index == self.get_category(category_index).size - 1:
return category_index + 1, 0
return category_index, question_index + 1
class QuestionnaireAPI:
def __init__(self, api_url: str, session: ClientSession,
update_frequency: int = 60):
self._API_URL: str = api_url
self._session = session
self._requester = Requester(self._session)
self._questionnaire: Optional[Questionnaire] = None
self._last_updated: Optional[datetime] = None
self._update_frequency = update_frequency
self._lock = asyncio.Lock()
@property
def questionnaire(self):
return self._questionnaire
@property
def last_updated(self):
return self._last_updated
@property
def update_frequency(self):
return self._update_frequency
async def retrieve_questionnaire(self):
async with self._lock:
categories_info = await self._retrieve_categories_info()
questions_categories = []
for category_info in categories_info:
questions_category = await self._retrieve_questions_category(
category_id=category_info['id'],
category_name=category_info['title']
)
questions_categories.append(questions_category)
questionnaire = Questionnaire(questions_categories)
self._questionnaire = questionnaire
self._last_updated = datetime.utcnow()
async def _retrieve_categories_info(self) \
-> list[dict[str, Union[int, str]]]:
uri = '/risks/categories'
response = await self._requester.request('GET', self._API_URL + uri)
categories_info = await response.json()
return categories_info
async def _retrieve_questions_category(
self,
category_id: int,
category_name: str) -> QuestionsCategory:
uri = f'/risks/questions/{category_name}'
response = await self._requester.request('GET', self._API_URL + uri)
raw_questions = await response.json()
questions_category = parse_questions_category(category_id,
category_name,
raw_questions)
return questions_category
async def get_results(self, session: ClientSession,
answers: list[dict[str, Union[str, Any]]]) -> dict:
requester = Requester(session)
send_answers_uri = '/risks/response/'
await requester.request(
method='POST',
url=self._API_URL + send_answers_uri,
json=answers
)
await asyncio.sleep(1)
get_results_uri = '/risks/result/'
response = await requester.request(method='GET',
url=self._API_URL + get_results_uri)
result = await response.json()
return result
async def get_illness_statistics(self, session: ClientSession,
illness_name: str) -> dict:
requester = Requester(session)
get_statistics_uri = f'/risks/result/statistics/{illness_name}'
response = await requester.request(
method='GET',
url=self._API_URL + get_statistics_uri
)
statistics = await response.json()
return statistics
|
11496294
|
from __future__ import division
import numpy as np
import scipy.spatial.distance as ssd
import settings
import tps
import solver
import lfd.registration
if lfd.registration._has_cuda:
from lfd.tpsopt.batchtps import batch_tps_rpm_bij, GPUContext, TgtContext
class Registration(object):
def __init__(self, demo, test_scene_state, f, corr):
self.demo = demo
self.test_scene_state = test_scene_state
self.f = f
self.corr = corr
def get_objective(self):
raise NotImplementedError
class TpsRpmRegistration(Registration):
def __init__(self, demo, test_scene_state, f, corr, rad):
super(TpsRpmRegistration, self).__init__(demo, test_scene_state, f, corr)
self.rad = rad
def get_objective(self):
x_nd = self.demo.scene_state.cloud[:,:3]
y_md = self.test_scene_state.cloud[:,:3]
cost = self.get_objective2(x_nd, y_md, self.f, self.corr, self.rad)
return cost
@staticmethod
def get_objective2(x_nd, y_md, f, corr_nm, rad):
r"""Returns the following 5 objectives:
- :math:`\frac{1}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} ||y_j - f(x_i)||_2^2`
- :math:`\lambda Tr(A^\top K A)`
- :math:`Tr((B - I) R (B - I))`
- :math:`\frac{2T}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} \log m_{ij}`
- :math:`-\frac{2T}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij}`
"""
cost = np.zeros(5)
xwarped_nd = f.transform_points(x_nd)
dist_nm = ssd.cdist(xwarped_nd, y_md, 'sqeuclidean')
n = len(x_nd)
cost[0] = (corr_nm * dist_nm).sum() / n
cost[1:3] = f.get_objective()[1:]
corr_nm = np.reshape(corr_nm, (1,-1))
nz_corr_nm = corr_nm[corr_nm != 0]
cost[3] = (2*rad / n) * (nz_corr_nm * np.log(nz_corr_nm)).sum()
cost[4] = -(2*rad / n) * nz_corr_nm.sum()
return cost
class TpsRpmBijRegistration(Registration):
def __init__(self, demo, test_scene_state, f, g, corr, rad):
super(TpsRpmBijRegistration, self).__init__(demo, test_scene_state, f, corr)
self.rad = rad
self.g = g
def get_objective(self):
x_nd = self.demo.scene_state.cloud[:,:3]
y_md = self.test_scene_state.cloud[:,:3]
cost = self.get_objective2(x_nd, y_md, self.f, self.g, self.corr, self.rad)
return cost
@staticmethod
def get_objective2(x_nd, y_md, f, g, corr_nm, rad):
r"""Returns the following 10 objectives:
- :math:`\frac{1}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} ||y_j - f(x_i)||_2^2`
- :math:`\lambda Tr(A_f^\top K A_f)`
- :math:`Tr((B_f - I) R (B_f - I))`
- :math:`\frac{2T}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} \log m_{ij}`
- :math:`-\frac{2T}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij}`
- :math:`\frac{1}{m} \sum_{j=1}^m \sum_{i=1}^n m_{ij} ||x_i - g(y_j)||_2^2`
- :math:`\lambda Tr(A_g^\top K A_g)`
- :math:`Tr((B_g - I) R (B_g - I))`
- :math:`\frac{2T}{m} \sum_{j=1}^m \sum_{i=1}^n m_{ij} \log m_{ij}`
- :math:`-\frac{2T}{m} \sum_{j=1}^m \sum_{i=1}^n m_{ij}`
"""
cost = np.r_[TpsRpmRegistration.get_objective2(x_nd, y_md, f, corr_nm, rad),
TpsRpmRegistration.get_objective2(y_md, x_nd, g, corr_nm.T, rad)]
return cost
class RegistrationFactory(object):
def __init__(self, demos=None):
"""Inits RegistrationFactory with demonstrations
Args:
demos: dict that maps from demonstration name to Demonstration.
This is used by batch_registration and batch_cost.
"""
if demos is None:
self.demos = {}
else:
self.demos = demos
def register(self, demo, test_scene_state, callback=None):
"""Registers demonstration scene onto the test scene
Args:
demo: Demonstration which has the demonstration scene
test_scene_state: SceneState of the test scene
callback: callback function; the derived classes define the
arguments of the functoin
Returns:
A Registration
"""
raise NotImplementedError
def batch_register(self, test_scene_state, callback=None):
"""Registers every demonstration scene in demos onto the test scene
Returns:
A dict that maps from the demonstration names that are in demos
to the Registration
Note:
Derived classes might ignore the argument callback
"""
registrations = {}
for name, demo in self.demos.iteritems():
registrations[name] = self.register(demo, test_scene_state, callback=callback)
return registrations
def cost(self, demo, test_scene_state):
"""Gets costs of registering the demonstration scene onto the
test scene
Args:
demo: Demonstration which has the demonstration scene
test_scene_state: SceneState of the test scene
Returns:
A 1-dimensional numpy.array containing the partial costs used for
the registration; the sum of these is the objective used for the
registration. The exact definition of these partial costs is given
by the derived classes.
"""
raise NotImplementedError
def batch_cost(self, test_scene_state):
"""Gets costs of every demonstration scene in demos registered onto
the test scene
Returns:
A dict that maps from the demonstration names that are in demos
to the numpy.array of partial cost
"""
costs = {}
for name, demo in self.demos.iteritems():
costs[name] = self.cost(demo, test_scene_state)
return costs
class TpsRpmRegistrationFactory(RegistrationFactory):
r"""As in:
<NAME> and <NAME>, "A new point matching algorithm for non-rigid registration," Computer Vision and Image Understanding, vol. 89, no. 2, pp. 114-141, 2003.
Tries to solve the optimization problem
.. math::
:nowrap:
\begin{align*}
& \min_{f, M}
& \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} ||y_j - f(x_i)||_2^2
+ \lambda Tr(A^\top K A)
+ Tr((B - I) R (B - I)) \\
&& + \frac{2T}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} \log m_{ij}
- \frac{2T}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} \\
& \text{subject to}
& X^\top A = 0, 1^\top A = 0 \\
&& \sum_{i=1}^{n+1} m_{ij} = 1, \sum_{j=1}^{m+1} m_{ij} = 1, m_{ij} \geq 0 \\
\end{align*}
"""
def __init__(self, demos=None,
n_iter=settings.N_ITER, em_iter=settings.EM_ITER,
reg_init=settings.REG[0], reg_final=settings.REG[1],
rad_init=settings.RAD[0], rad_final=settings.RAD[1],
rot_reg=settings.ROT_REG,
outlierprior=settings.OUTLIER_PRIOR, outlierfrac=settings.OURLIER_FRAC,
prior_fn=None,
f_solver_factory=solver.AutoTpsSolverFactory()):
"""Inits TpsRpmRegistrationFactory with demonstrations and parameters
Args:
demos: dict that maps from demonstration name to Demonstration
n_iter: outer iterations for tps-rpm
em_iter: inner iterations for tps-rpm
reg_init/reg_final: regularization on curvature
rad_init/rad_final: radius (temperature) for correspondence calculation (meters)
rot_reg: regularization on rotation
prior_fn: function that takes the demo and test SceneState and returns the prior probability (i.e. NOT cost)
f_solver_factory: solver factory for forward registration
Note:
Pick a T_init that is about 1/10 of the largest square distance of all point pairs.
"""
super(TpsRpmRegistrationFactory, self).__init__(demos=demos)
self.n_iter = n_iter
self.em_iter = em_iter
self.reg_init = reg_init
self.reg_final = reg_final
self.rad_init = rad_init
self.rad_final = rad_final
self.rot_reg = rot_reg
self.outlierprior = outlierprior
self.outlierfrac = outlierfrac
self.prior_fn = prior_fn
self.f_solver_factory = f_solver_factory
def register(self, demo, test_scene_state, callback=None):
if self.prior_fn is not None:
prior_prob_nm = self.prior_fn(demo.scene_state, test_scene_state)
else:
prior_prob_nm = None
x_nd = demo.scene_state.cloud[:,:3]
y_md = test_scene_state.cloud[:,:3]
f, corr = tps.tps_rpm(x_nd, y_md,
f_solver_factory=self.f_solver_factory,
n_iter=self.n_iter, em_iter=self.em_iter,
reg_init=self.reg_init, reg_final=self.reg_final,
rad_init=self.rad_init, rad_final=self.rad_final,
rot_reg=self.rot_reg,
outlierprior=self.outlierprior, outlierfrac=self.outlierfrac,
prior_prob_nm=prior_prob_nm, callback=callback)
return TpsRpmRegistration(demo, test_scene_state, f, corr, self.rad_final)
def cost(self, demo, test_scene_state):
"""Gets the costs of the thin plate spline objective of the
resulting registration
Args:
demo: Demonstration which has the demonstration scene
test_scene_state: SceneState of the test scene
Returns:
A 1-dimensional numpy.array containing the residual, bending and
rotation cost, each already premultiplied by the respective
coefficients.
"""
reg = self.register(demo, test_scene_state, callback=None)
cost = reg.f.get_objective()
return cost
class TpsRpmBijRegistrationFactory(RegistrationFactory):
r"""As in:
<NAME>, <NAME>, <NAME>, and <NAME>, "Learning from Demonstrations through the Use of Non-Rigid Registration," in Proceedings of the 16th International Symposium on Robotics Research (ISRR), 2013.
Tries to solve the optimization problem
.. math::
:nowrap:
\begin{align*}
& \min_{f, M}
& \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} ||y_j - f(x_i)||_2^2
+ \lambda Tr(A_f^\top K A_f)
+ Tr((B_f - I) R (B_f - I)) \\
&& + \frac{2T}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} \log m_{ij}
- \frac{2T}{n} \sum_{i=1}^n \sum_{j=1}^m m_{ij} \\
&& + \frac{1}{m} \sum_{j=1}^m \sum_{i=1}^n m_{ij} ||x_i - g(y_j)||_2^2
+ \lambda Tr(A_g^\top K A_g)
+ Tr((B_g - I) R (B_g - I)) \\
&& + \frac{2T}{m} \sum_{j=1}^m \sum_{i=1}^n m_{ij} \log m_{ij}
- \frac{2T}{m} \sum_{j=1}^m \sum_{i=1}^n m_{ij} \\
& \text{subject to}
& X^\top A_f = 0, 1^\top A_f = 0 \\
&& Y^\top A_g = 0, 1^\top A_g = 0 \\
&& \sum_{i=1}^{n+1} m_{ij} = 1, \sum_{j=1}^{m+1} m_{ij} = 1, m_{ij} \geq 0 \\
\end{align*}
"""
def __init__(self, demos=None,
n_iter=settings.N_ITER, em_iter=settings.EM_ITER,
reg_init=settings.REG[0], reg_final=settings.REG[1],
rad_init=settings.RAD[0], rad_final=settings.RAD[1],
rot_reg=settings.ROT_REG,
outlierprior=settings.OUTLIER_PRIOR, outlierfrac=settings.OURLIER_FRAC,
prior_fn=None,
f_solver_factory=solver.AutoTpsSolverFactory(),
g_solver_factory=solver.AutoTpsSolverFactory(use_cache=False)):
"""Inits TpsRpmBijRegistrationFactory with demonstrations and parameters
Args:
demos: dict that maps from demonstration name to Demonstration
n_iter: outer iterations for tps-rpm
em_iter: inner iterations for tps-rpm
reg_init/reg_final: regularization on curvature
rad_init/rad_final: radius (temperature) for correspondence calculation (meters)
rot_reg: regularization on rotation
prior_fn: function that takes the demo and test SceneState and returns the prior probability (i.e. NOT cost)
f_solver_factory: solver factory for forward registration
g_solver_factory: solver factory for backward registration
Note:
Pick a T_init that is about 1/10 of the largest square distance of all point pairs.
You might not want to cache for the target SolverFactory.
"""
super(TpsRpmBijRegistrationFactory, self).__init__(demos=demos)
self.n_iter = n_iter
self.em_iter = em_iter
self.reg_init = reg_init
self.reg_final = reg_final
self.rad_init = rad_init
self.rad_final = rad_final
self.rot_reg = rot_reg
self.outlierprior = outlierprior
self.outlierfrac = outlierfrac
self.prior_fn = prior_fn
self.f_solver_factory = f_solver_factory
self.g_solver_factory = g_solver_factory
def register(self, demo, test_scene_state, callback=None):
if self.prior_fn is not None:
prior_prob_nm = self.prior_fn(demo.scene_state, test_scene_state)
else:
prior_prob_nm = None
x_nd = demo.scene_state.cloud[:,:3]
y_md = test_scene_state.cloud[:,:3]
f, g, corr = tps.tps_rpm_bij(x_nd, y_md,
f_solver_factory=self.f_solver_factory, g_solver_factory=self.g_solver_factory,
n_iter=self.n_iter, em_iter=self.em_iter,
reg_init=self.reg_init, reg_final=self.reg_final,
rad_init=self.rad_init, rad_final=self.rad_final,
rot_reg=self.rot_reg,
outlierprior=self.outlierprior, outlierfrac=self.outlierfrac,
prior_prob_nm=prior_prob_nm, callback=callback)
return TpsRpmBijRegistration(demo, test_scene_state, f, g, corr, self.rad_final)
def cost(self, demo, test_scene_state):
"""Gets the costs of the forward and backward thin plate spline
objective of the resulting registration
Args:
demo: Demonstration which has the demonstration scene
test_scene_state: SceneState of the test scene
Returns:
A 1-dimensional numpy.array containing the residual, bending and
rotation cost of the forward and backward spline, each already
premultiplied by the respective coefficients.
"""
reg = self.register(demo, test_scene_state, callback=None)
cost = np.r_[reg.f.get_objective(), reg.g.get_objective()]
return cost
class BatchGpuTpsRpmRegistrationFactory(TpsRpmRegistrationFactory):
"""
Similar to TpsRpmRegistrationFactory but batch_register and batch_cost are computed in batch using the GPU
"""
def __init__(self, demos):
if not lfd.registration._has_cuda:
raise NotImplementedError("CUDA not installed")
raise NotImplementedError
def register(self, demo, test_scene_state, callback=None):
raise NotImplementedError
def batch_register(self, test_scene_state):
raise NotImplementedError
def cost(self, demo, test_scene_state):
raise NotImplementedError
def batch_cost(self, test_scene_state):
raise NotImplementedError
class BatchGpuTpsRpmBijRegistrationFactory(TpsRpmBijRegistrationFactory):
"""
Similar to TpsRpmBijRegistrationFactory but batch_register and batch_cost are computed in batch using the GPU
"""
def __init__(self, demos, actionfile=None,
n_iter=settings.N_ITER, em_iter=settings.EM_ITER,
reg_init=settings.REG[0], reg_final=settings.REG[1],
rad_init=settings.RAD[0], rad_final=settings.RAD[1],
rot_reg=settings.ROT_REG,
outlierprior=settings.OUTLIER_PRIOR, outlierfrac=settings.OURLIER_FRAC,
prior_fn=None,
f_solver_factory=solver.AutoTpsSolverFactory(),
g_solver_factory=solver.AutoTpsSolverFactory(use_cache=False)):
if not lfd.registration._has_cuda:
raise NotImplementedError("CUDA not installed")
super(BatchGpuTpsRpmBijRegistrationFactory, self).__init__(demos=demos,
n_iter=n_iter, em_iter=em_iter,
reg_init=reg_init, reg_final=reg_final,
rad_init=rad_init, rad_final=rad_final,
rot_reg=rot_reg,
outlierprior=outlierprior, outlierfrac=outlierfrac,
prior_fn=prior_fn,
f_solver_factory=f_solver_factory, g_solver_factory=g_solver_factory)
self.actionfile = actionfile
if self.actionfile:
self.bend_coefs = tps.loglinspace(self.reg_init, self.reg_final, self.n_iter)
self.src_ctx = GPUContext(self.bend_coefs)
self.src_ctx.read_h5(actionfile)
self.warn_clip_cloud = True
def _clip_cloud(self, cloud):
if len(cloud) > settings.MAX_CLD_SIZE:
cloud = cloud[np.random.choice(range(len(cloud)), size=settings.MAX_CLD_SIZE, replace=False)]
if self.warn_clip_cloud:
import warnings
warnings.warn("The cloud has more points than the maximum for GPU and it is being clipped")
self.warn_clip_cloud = False
return cloud
def batch_register(self, test_scene_state):
raise NotImplementedError
def batch_cost(self, test_scene_state):
if not(self.actionfile):
raise ValueError('No actionfile provided for gpu context')
tgt_ctx = TgtContext(self.src_ctx)
cloud = test_scene_state.cloud
cloud = self._clip_cloud(cloud)
tgt_ctx.set_cld(cloud)
cost_array = batch_tps_rpm_bij(self.src_ctx, tgt_ctx,
T_init=self.rad_init, T_final=self.rad_final,
outlierfrac=self.outlierfrac, outlierprior=self.outlierprior,
outliercutoff=settings.OUTLIER_CUTOFF,
em_iter=self.em_iter,
component_cost=True)
costs = dict(zip(self.src_ctx.seg_names, cost_array))
return costs
class TpsSegmentRegistrationFactory(RegistrationFactory):
def __init__(self, demos):
raise NotImplementedError
def register(self, demo, test_scene_state, callback=None):
raise NotImplementedError
def batch_register(self, test_scene_state):
raise NotImplementedError
def cost(self, demo, test_scene_state):
raise NotImplementedError
def batch_cost(self, test_scene_state):
raise NotImplementedError
class TpsnRpmRegistrationFactory(RegistrationFactory):
"""
TPS-RPM using normals information
"""
def __init__(self, demos):
raise NotImplementedError
def register(self, demo, test_scene_state, callback=None):
raise NotImplementedError
def batch_register(self, test_scene_state):
raise NotImplementedError
def cost(self, demo, test_scene_state):
raise NotImplementedError
def batch_cost(self, test_scene_state):
raise NotImplementedError
|
11496299
|
from django.conf.urls import include, url
from django.conf import settings
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from oscar_docdata.dashboard.app import application as docdata_dashboard_app
from oscar.app import application as oscar_application
urlpatterns = [
url(r'^admin/', admin.site.urls),
# Include docdata URLs
url(r'^dashboard/docdata/', docdata_dashboard_app.urls),
url(r'^api/docdata/', include('oscar_docdata.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'', oscar_application.urls),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
11496334
|
from PySide import QtGui
f=QtGui.QApplication.font()
f.pointSize()
f.setPointSize(10) # replace 10 with whatever size you want for 2k to 4k monitors
f=QtGui.QApplication.setFont(f)
|
11496373
|
import os
from twill.utils import gather_filenames
def test_gather_dir():
this_dir = os.path.dirname(__file__)
test_gather = os.path.join(this_dir, 'test_gather')
cwd = os.getcwd()
os.chdir(test_gather)
try:
files = gather_filenames(('.',))
if os.sep != '/':
files = [f.replace(os.sep, '/') for f in files]
assert sorted(files) == sorted([
'./00-testme/x-script.twill',
'./01-test/b.twill', './02-test2/c.twill',
'./02-test2/02-subtest/d.twill']), files
finally:
os.chdir(cwd)
|
11496404
|
import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
import locale
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, '_'):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), '')
self.assertTrue(not hasattr(builtins, '_'))
dh(42)
self.assertEqual(out.getvalue(), '42\n')
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile('42', '<string>', 'single')
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile('42', '<string>', 'single')
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith('ValueError: 42\n'))
def test_excepthook(self):
with test.support.captured_output('stderr') as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue(
'TypeError: print_exception(): Exception expected for value, str found'
in stderr.getvalue())
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
with self.assertRaises(SystemExit) as cm:
sys.exit('exit')
self.assertEqual(cm.exception.code, 'exit')
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
check_exit_message(
'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b'unflushed,message')
check_exit_message('import sys; sys.exit("surrogates:\\uDCFF")',
b'surrogates:\\udcff')
check_exit_message('import sys; sys.exit("h\\xe9")', b'h\xe9',
PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
self.assertIsInstance(sys.getdefaultencoding(), str)
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in (0, 100, 120, orig):
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, 'a')
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
self.assertTrue(orig < 0.5, orig)
try:
for n in (1e-05, 0.05, 3.0, orig):
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
continue
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
'cannot set the recursion limit to [0-9]+ at the recursion depth [0-9]+: the limit is too low'
)
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
code = textwrap.dedent(
"""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()"""
)
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b'Fatal Python error: Cannot recover from stack overflow',
err)
def test_getwindowsversion(self):
test.support.get_attribute(sys, 'getwindowsversion')
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, 'setdlopenflags'),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, 'getdlopenflags'))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags + 1)
self.assertEqual(sys.getdlopenflags(), oldflags + 1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c + 1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, 'gettotalrefcount'):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(SysModuleTest.test_getframe.__code__ is sys.
_getframe().f_code)
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = []
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == 'f123':
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, 'g456()')
filename, lineno, funcname, sourceline = stack[i + 1]
self.assertEqual(funcname, 'g456')
self.assertIn(sourceline, ['leave_g.wait()', 'entered_g.set()'])
leave_g.set()
t.join()
def current_frames_without_threads(self):
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ('little', 'big'))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2 ** sys.hash_info.width)
for x in range(1, 100):
self.assertEqual(pow(x, sys.hash_info.modulus - 1, sys.
hash_info.modulus), 1,
'sys.hash_info.modulus {} is a non-prime'.format(sys.
hash_info.modulus))
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var('Py_HASH_ALGORITHM')
if sys.hash_info.algorithm in {'fnv', 'siphash24'}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, 'siphash24')
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, 'fnv')
else:
self.assertIn(sys.hash_info.algorithm, {'fnv', 'siphash24'})
else:
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 1114111)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ('alpha', 'beta', 'candidate', 'final'))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ('alpha', 'beta', 'candidate', 'final'))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1, 0, 0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = 'never interned before' + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S('abc'))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ('debug', 'inspect', 'interactive', 'optimize',
'dont_write_bytecode', 'no_user_site', 'no_site',
'ignore_environment', 'verbose', 'bytes_warning', 'quiet',
'hash_randomization', 'isolated')
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def assert_raise_on_new_sys_type(self, sys_attr):
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
test.support.get_attribute(sys, 'getwindowsversion')
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
env['PYTHONIOENCODING'] = 'cp424'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xa2))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ('¢' + os.linesep).encode('cp424')
self.assertEqual(out, expected)
env['PYTHONIOENCODING'] = 'ascii:replace'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xa2))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env['PYTHONIOENCODING'] = 'ascii'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(b"'\\xa2'", err)
env['PYTHONIOENCODING'] = 'ascii:'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(b"'\\xa2'", err)
env['PYTHONIOENCODING'] = ':surrogateescape'
p = subprocess.Popen([sys.executable, '-c', 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.
getpreferredencoding(False), 'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env['PYTHONIOENCODING'] = ''
p = subprocess.Popen([sys.executable, '-c', 'print(%a)' % test.
support.FS_NONASCII], stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(['nonexistent', '-c',
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'
], executable=sys.executable, stdout=subprocess.PIPE, cwd=
python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode('ASCII')
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode(
'ascii', 'backslashreplace'))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, isolated=False, encoding=None):
env = os.environ.copy()
env['LC_ALL'] = 'C'
code = '\n'.join(('import sys', 'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))', 'dump("stdin")',
'dump("stdout")', 'dump("stderr")'))
args = [sys.executable, '-c', code]
if isolated:
args.append('-I')
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=
subprocess.STDOUT, env=env, universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def test_c_locale_surrogateescape(self):
out = self.c_locale_get_error_handler(isolated=True)
self.assertEqual(out,
"""stdin: surrogateescape
stdout: surrogateescape
stderr: backslashreplace
"""
)
out = self.c_locale_get_error_handler(encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\nstdout: ignore\nstderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\nstdout: strict\nstderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\nstdout: strict\nstderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding=':')
self.assertEqual(out,
"""stdin: surrogateescape
stdout: surrogateescape
stderr: backslashreplace
"""
)
out = self.c_locale_get_error_handler(encoding='')
self.assertEqual(out,
"""stdin: surrogateescape
stdout: surrogateescape
stderr: backslashreplace
"""
)
def test_implementation(self):
levels = {'alpha': 10, 'beta': 11, 'candidate': 12, 'final': 15}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 | version.
micro << 8 | levels[version.releaselevel] << 4 | version.serial <<
0)
self.assertEqual(sys.implementation.hexversion, hexversion)
self.assertEqual(sys.implementation.name, sys.implementation.name.
lower())
@test.support.cpython_only
def test_debugmallocstats(self):
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b'free PyDictObjects', err)
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, 'getallocatedblocks'),
'sys.getallocatedblocks unavailable on this build')
def test_getallocatedblocks(self):
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
self.assertGreaterEqual(a, 0)
try:
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ['sentinel']
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)), sys.
maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
check(True, vsize('') + self.longdigit)
check(len, size('4P'))
samples = [b'', b'u' * 100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
check(iter(bytearray()), size('nP'))
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
check(complex(0, 1), size('2d'))
check(str.lower, size('3PP'))
import datetime
check(datetime.timedelta.days, size('3PP'))
import collections
check(collections.defaultdict.default_factory, size('3PP'))
check(int.__add__, size('3P2P'))
check({}.__iter__, size('2P'))
check({}, size('nQ2P') + calcsize('2nP2n') + 8 + 8 * 2 // 3 *
calcsize('n2P'))
longdict = {(1): 1, (2): 2, (3): 3, (4): 4, (5): 5, (6): 6, (7): 7,
(8): 8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + 16 * 2 // 3 *
calcsize('n2P'))
check({}.keys(), size('P'))
check({}.values(), size('P'))
check({}.items(), size('P'))
check(iter({}), size('P2nPn'))
check(iter({}.keys()), size('P2nPn'))
check(iter({}.values()), size('P2nPn'))
check(iter({}.items()), size('P2nPn'))
class C(object):
pass
check(C.__dict__, size('P'))
check(BaseException(), size('5Pb'))
check(UnicodeEncodeError('', '', 0, 0, ''), size('5Pb 2P2nP'))
check(UnicodeDecodeError('', b'', 0, 0, ''), size('5Pb 2P2nP'))
check(UnicodeTranslateError('', 0, 1, ''), size('5Pb 2P2nP'))
check(Ellipsis, size(''))
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
check(enumerate([]), size('n3P'))
check(reversed(''), size('nP'))
check(float(0), size('d'))
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = (x.f_code.co_stacksize + x.f_code.co_nlocals + ncells +
nfrees - 1)
check(x, vsize('12P3ic' + CO_MAXBLOCKS * '3i' + 'P' + extras * 'P'))
def func():
pass
check(func, size('12P'))
class c:
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
check(foo, size('PP'))
check(bar, size('PP'))
def get_gen():
yield 1
check(get_gen(), size('Pb2PPP'))
check(iter('abc'), size('lP'))
import re
check(re.finditer('', ''), size('2P'))
samples = [[], [1, 2, 3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample) * self.P)
check(iter([]), size('lP'))
check(reversed([]), size('nP'))
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2 ** sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2 * self.longdigit)
check(int(PyLong_BASE ** 2 - 1), vsize('') + 2 * self.longdigit)
check(int(PyLong_BASE ** 2), vsize('') + 3 * self.longdigit)
check(unittest, size('PnPPP'))
check(None, size(''))
check(NotImplemented, size(''))
check(object(), size(''))
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, '')
check(x, size('4Pi'))
check(iter(range(1)), size('4l'))
check(reversed(''), size('nP'))
check(range(1), size('4P'))
check(range(66000), size('4P'))
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE * 'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0:
tmp = 1
minused = minused * 2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize * calcsize('nP'))
check(frozenset(sample), s + newsize * calcsize('nP'))
check(iter(set()), size('P3n'))
check(slice(0), size('3P'))
check(super(int), size('3P'))
check((), vsize(''))
check((1, 2, 3), vsize('') + 3 * self.P)
fmt = 'P2n15Pl4Pn9Pn11PIP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
s = vsize(fmt + '3P36P3P10P2P4P')
s += calcsize('2nP2n') + 8 + 5 * calcsize('n2P')
class newstyleclass(object):
pass
check(newstyleclass, s)
check(newstyleclass().__dict__, size('nQ2P' + '2nP2n'))
samples = ['1' * 100, 'ÿ' * 50, 'Ā' * 40, '\uffff' * 100, '𐀀' * 30,
'\U0010ffff' * 100]
asciifields = 'nnbP'
compactfields = asciifields + 'nPn'
unicodefields = compactfields + 'P'
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2 * (len(s) + 1)
else:
L = size(compactfields) + 4 * (len(s) + 1)
check(s, L)
s = chr(16384)
check(s, size(compactfields) + 4)
compile(s, '<stdin>', 'eval')
check(s, size(compactfields) + 4 + 4)
import weakref
check(weakref.ref(int), size('2Pn2P'))
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
if tb is not None:
check(tb, size('2P2i'))
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == '__main__':
test_main()
|
11496447
|
from binaryninja_cortex.platforms import MCU
class Chip(MCU):
NAME="VF6XX"
IRQ=MCU.IRQ+ [
"NVIC_CPU2CPU_INT0_IRQ",
"NVIC_CPU2CPU_INT1_IRQ",
"NVIC_CPU2CPU_INT2_IRQ",
"NVIC_CPU2CPU_INT3_IRQ",
"NVIC_DIRECTED0_SEMA4_IRQ",
"NVIC_DIRECTED1_MCM_IRQ",
"NVIC_DIRECTED2_IRQ",
"NVIC_DIRECTED3_IRQ",
"NVIC_DMA0_IRQ",
"NVIC_DMA0_ERROR_IRQ",
"NVIC_DMA1_IRQ",
"NVIC_DMA1_ERROR_IRQ",
"NVIC_RESERVED0_IRQ",
"NVIC_RESERVED1_IRQ",
"NVIC_MSCM_ECC0_IRQ",
"NVIC_MSCM_ECC1_IRQ",
"NVIC_CSU_ALARM_IRQ",
"NVIC_RESERVED2_IRQ",
"NVIC_MSCM_ACTZS_IRQ",
"NVIC_RESERVED3_IRQ",
"NVIC_WDOG_A5_IRQ",
"NVIC_WDOG_M4_IRQ",
"NVIC_WDOG_SNVS_IRQ",
"NVIC_CP1_BOOT_FAIL_IRQ",
"NVIC_QSPI0_IRQ",
"NVIC_QSPI1_IRQ",
"NVIC_DDRMC_IRQ",
"NVIC_SDHC0_IRQ",
"NVIC_SDHC1_IRQ",
"NVIC_RESERVED4_IRQ",
"NVIC_DCU0_IRQ",
"NVIC_DCU1_IRQ",
"NVIC_VIU_IRQ",
"NVIC_RESERVED5_IRQ",
"NVIC_RESERVED6_IRQ",
"NVIC_RLE_IRQ",
"NVIC_SEG_LCD_IRQ",
"NVIC_RESERVED7_IRQ",
"NVIC_RESERVED8_IRQ",
"NVIC_PIT_IRQ",
"NVIC_LPTIMER0_IRQ",
"NVIC_RESERVED9_IRQ",
"NVIC_FLEXTIMER0_IRQ",
"NVIC_FLEXTIMER1_IRQ",
"NVIC_FLEXTIMER2_IRQ",
"NVIC_FLEXTIMER3_IRQ",
"NVIC_RESERVED10_IRQ",
"NVIC_RESERVED11_IRQ",
"NVIC_RESERVED12_IRQ",
"NVIC_RESERVED13_IRQ",
"NVIC_USBPHY0_IRQ",
"NVIC_USBPHY1_IRQ",
"NVIC_RESERVED14_IRQ",
"NVIC_ADC0_IRQ",
"NVIC_ADC1_IRQ",
"NVIC_DAC0_IRQ",
"NVIC_DAC1_IRQ",
"NVIC_RESERVED15_IRQ",
"NVIC_FLEXCAN0_IRQ",
"NVIC_FLEXCAN1_IRQ",
"NVIC_RESERVED16_IRQ",
"NVIC_UART0_IRQ",
"NVIC_UART1_IRQ",
"NVIC_UART2_IRQ",
"NVIC_UART3_IRQ",
"NVIC_UART4_IRQ",
"NVIC_UART5_IRQ",
"NVIC_SPI0_IRQ",
"NVIC_SPI1_IRQ",
"NVIC_SPI2_IRQ",
"NVIC_SPI3_IRQ",
"NVIC_I2C0_IRQ",
"NVIC_I2C1_IRQ",
"NVIC_I2C2_IRQ",
"NVIC_I2C3_IRQ",
"NVIC_USBC0_IRQ",
"NVIC_USBC1_IRQ",
"NVIC_RESERVED17_IRQ",
"NVIC_ENET0_IRQ",
"NVIC_ENET1_IRQ",
"NVIC_ENET0_1588_IRQ",
"NVIC_ENET1_1588_IRQ",
"NVIC_ENET_SWITCH_IRQ",
"NVIC_NFC_IRQ",
"NVIC_SAI0_IRQ",
"NVIC_SAI1_IRQ",
"NVIC_SAI2_IRQ",
"NVIC_SAI3_IRQ",
"NVIC_ESAI_BIFIFO_IRQ",
"NVIC_SPDIF_IRQ",
"NVIC_ASRC_IRQ",
"NVIC_VREG_IRQ",
"NVIC_WKPU0_IRQ",
"NVIC_RESERVED18_IRQ",
"NVIC_CCM_FXOSC_IRQ",
"NVIC_CCM_IRQ",
"NVIC_SRC_IRQ",
"NVIC_PDB_IRQ",
"NVIC_EWM_IRQ",
"NVIC_RESERVED19_IRQ",
"NVIC_RESERVED20_IRQ",
"NVIC_RESERVED21_IRQ",
"NVIC_RESERVED22_IRQ",
"NVIC_RESERVED23_IRQ",
"NVIC_RESERVED24_IRQ",
"NVIC_RESERVED25_IRQ",
"NVIC_RESERVED26_IRQ",
"NVIC_GPIO0_IRQ",
"NVIC_GPIO1_IRQ",
"NVIC_GPIO2_IRQ",
"NVIC_GPIO3_IRQ",
"NVIC_GPIO4_IRQ",
]
|
11496452
|
from abc import ABC
from typing import Tuple, Union
import numpy as np
from slick_dnn.autograd import Autograd, Context
class Reshape(Autograd):
def __init__(self, *new_shape):
self.new_shape = new_shape
def forward(self, ctx, tensor):
ctx.save_for_back(tensor.shape)
return np.reshape(tensor, self.new_shape)
def backward(self, ctx, grad):
old_shape, = ctx.data_for_back
# bach grad
if len(self.new_shape) + 1 == len(grad.shape):
old_shape = (grad.shape[0], *old_shape)
return np.reshape(grad, old_shape)
class Flatten(Autograd):
def forward(self, ctx, tensor):
ctx.save_for_back(tensor.shape)
return np.reshape(tensor, (tensor.shape[0], -1))
def backward(self, ctx, grad):
old_shape, = ctx.data_for_back
return np.reshape(grad, old_shape)
class SwapAxes(Autograd):
def __init__(self, axis1, axis2):
self.axis1 = axis1
self.axis2 = axis2
def forward(self, ctx, tensor):
return np.swapaxes(tensor, self.axis1, self.axis2)
def backward(self, ctx: Context, grad):
return np.swapaxes(grad, self.axis1, self.axis2)
class GetItem(Autograd):
def __init__(self, item):
self.item = item
def forward(self, ctx, tensor):
ctx.save_for_back(tensor.shape)
return tensor[self.item]
def backward(self, ctx, grad):
old_shape, = ctx.data_for_back
new_grad = np.zeros(old_shape, dtype=np.float32)
new_grad[self.item] = grad
return new_grad
class Img2Col(Autograd):
@staticmethod
def img_2_col_forward(kernel_size, stride, merge_channels, image):
has_batches = len(image.shape) == 4
img_w = image.shape[-1]
img_h = image.shape[-2]
channels = image.shape[-3]
# new image width
new_w = (img_w - kernel_size[0]) // stride[0] + 1
# new image height
new_h = (img_h - kernel_size[1]) // stride[1] + 1
if merge_channels:
ret_shape = (channels * kernel_size[0] * kernel_size[1], new_w * new_h)
flattened_part_shape = (-1,)
else:
ret_shape = (channels, kernel_size[0] * kernel_size[1], new_w * new_h)
flattened_part_shape = (channels, -1)
if has_batches:
ret_shape = (image.shape[0], *ret_shape)
flattened_part_shape = (image.shape[0], *flattened_part_shape)
ret_image = np.zeros(ret_shape)
for i in range(new_h):
for j in range(new_w):
part = image[
...,
i * stride[1]:i * stride[1] + kernel_size[1],
j * stride[0]:j * stride[0] + kernel_size[0]]
part = np.reshape(part, flattened_part_shape)
ret_image[..., :, i * new_w + j] = part
return ret_image
@staticmethod
def img_2_col_backwards(kernel_size, stride, old_shape, grad):
channels = old_shape[-3]
img_w = old_shape[-1]
# new image width
old_w = (img_w - kernel_size[0]) // stride[0] + 1
ret_grad = np.zeros(old_shape, dtype=np.float32)
for i in range(grad.shape[-1]):
col = grad[..., :, i]
col = np.reshape(
col,
(-1, channels, kernel_size[1], kernel_size[0])
)
h_start = (i // old_w) * stride[1]
w_start = (i % old_w) * stride[0]
ret_grad[..., h_start:h_start + kernel_size[1], w_start:w_start + kernel_size[0]] += col
return ret_grad
def __init__(self, kernel_size, stride: Union[int, Tuple[int, int]] = 1):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
self.kernel_size = kernel_size
self.stride = stride
def forward(self, ctx: Context, image: np.array):
"""
Performs Img to Col transformation.
Args:
ctx (Context): usual context,
image (np.array): image to be transformed, allowed shapes:
[N, C, H, W], [C, H, W]
N - batches,
C - channels,
H - height,
W - width
"""
ctx.save_for_back(image.shape)
return self.img_2_col_forward(
self.kernel_size,
self.stride,
True,
image
)
def backward(self, ctx: Context, grad: np.array = None):
old_shape, = ctx.data_for_back
return self.img_2_col_backwards(
self.kernel_size,
self.stride,
old_shape,
grad
)
class BasePool(Autograd, ABC):
def __init__(self, kernel_size, stride=1):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
self.kernel_size = kernel_size
self.stride = stride
@staticmethod
def _fill_coll(to_fill, new_shape):
repeats = new_shape[-2]
my_ret = np.repeat(to_fill, repeats, -2)
my_ret = np.reshape(my_ret, new_shape)
return my_ret
class MaxPool2d(BasePool):
def forward(self, ctx: Context, image):
"""
Performs 2d max pool over input tensor
Args:
ctx (Context): Autograd Conext
image (np.array): input image. Allowed shapes:
[N, C, H, W], [C, H, W]
N - batches,
C - channels,
H - height,
W - width
Returns:
tensor (np.array):
"""
img_w = image.shape[-1]
img_h = image.shape[-2]
channels = image.shape[-3]
# new image width
new_w = (img_w - self.kernel_size[0]) // self.stride[0] + 1
# new image height
new_h = (img_h - self.kernel_size[1]) // self.stride[1] + 1
img_out = Img2Col.img_2_col_forward(
self.kernel_size,
self.stride,
False,
image
)
maxed = np.max(img_out, -2)
ctx.save_for_back(img_out, image.shape, maxed.shape)
return np.reshape(maxed, (-1, channels, new_h, new_w))
def backward(self, ctx: Context, grad: np.array = None):
reshaped_image, old_shape, maxed_shape = ctx.data_for_back
grad = np.reshape(grad, maxed_shape)
mask = (reshaped_image == np.max(reshaped_image, -2, keepdims=True))
new_grad = self._fill_coll(grad, reshaped_image.shape)
new_grad = np.where(
mask,
new_grad,
0
)
return Img2Col.img_2_col_backwards(
self.kernel_size,
self.stride,
old_shape,
new_grad
)
class AvgPool2d(BasePool):
def forward(self, ctx: Context, image):
"""
Performs 2d average pool over input tensor
Args:
ctx (Context): Autograd Conext
image (np.array): input image. Allowed shapes:
[N, C, H, W], [C, H, W]
N - batches,
C - channels,
H - height,
W - width
Returns:
tensor (np.array):
"""
img_w = image.shape[-1]
img_h = image.shape[-2]
channels = image.shape[-3]
# new image width
new_w = (img_w - self.kernel_size[0]) // self.stride[0] + 1
# new image height
new_h = (img_h - self.kernel_size[1]) // self.stride[1] + 1
img_out = Img2Col.img_2_col_forward(
self.kernel_size,
self.stride,
False,
image
)
averaged = np.average(img_out, -2)
ctx.save_for_back(img_out, image.shape, averaged.shape)
return np.reshape(averaged, (-1, channels, new_h, new_w))
def backward(self, ctx: Context, grad: np.array = None):
reshaped_image, old_shape, averaged_shape = ctx.data_for_back
grad = np.reshape(grad, averaged_shape)
new_grad = self._fill_coll(grad, reshaped_image.shape) / (self.kernel_size[0] * self.kernel_size[1])
return Img2Col.img_2_col_backwards(
self.kernel_size,
self.stride,
old_shape,
new_grad
)
|
11496472
|
from django.db import models
import hashlib, datetime
class Zip(models.Model):
filename = models.CharField(max_length=20, primary_key=True)
password = models.CharField(max_length=40)
date_created = models.DateTimeField(auto_now_add=True)
def encrypt(self, text):
h = hashlib.sha1()
h.update(self.filename)
h.update(text)
return h.hexdigest()
def save(self, *args, **kwargs):
self.password = self.encrypt(self.password)
super(Zip, self).save(*args, **kwargs)
def is_correct(self, passwordAttempt):
return self.encrypt(passwordAttempt) == self.password
|
11496517
|
from uuid import UUID, uuid4
import ujson as json
from flask import Flask, jsonify
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, UUID):
return str(obj)
return JSONEncoder.default(self, obj)
def encode(self, o):
return json.dumps(o)
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
@app.route("/")
def index():
return jsonify({"foo": uuid4()})
app.run()
|
11496542
|
import moviepy.editor as mp
clip = mp.AudioFileClip("insert_path_to_webm_file").subclip()
clip.write_audiofile("insert_path_to_save_mp3_file")
clip.close()
|
11496618
|
try:
# For distributions
from ._version_lock import version # type: ignore # pylint: disable=unused-import
except ImportError:
# For development trees
from os import environ
from os.path import dirname, join
try:
from dulwich.porcelain import describe # type: ignore
except ImportError:
from subprocess import SubprocessError, run
from warnings import warn
def describe(repo):
env = dict(environ)
env['GIT_DIR'] = repo
try:
return (run(['git', 'describe'],
capture_output=True,
check=True,
env=env,
encoding='ascii')
.stdout.rstrip('\n'))
except SubprocessError as e: # pragma: no cover
warn("Could not determine dvrip version: {}"
.format(e))
return '0.0.0-0-unknown'
if '_repo' not in globals(): # except for setup.py
_repo = join(dirname(dirname(__file__)), '.git')
_desc = describe(_repo).split('-', 3)
version = ('{0}.dev{1}+{2}'.format(*_desc)
if len(_desc) == 3 else _desc[0])
|
11496700
|
from keras import optimizers
DEFAULT_OPTIMIZER = 'adam'
def get_optimizer(config):
"""Return optimizer specified by configuration."""
config = vars(config)
name = config.get('optimizer', DEFAULT_OPTIMIZER)
optimizer = optimizers.get(name) # Default parameters
lr = config.get('learning_rate')
if lr is not None:
optimizer = type(optimizer)(lr=lr)
return optimizer
|
11496733
|
class Solution:
def halvesAreAlike(self, s: str) -> bool:
def isVowel(char):
return char in ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
def getNumberOfVowels(s):
count = 0
for char in s:
count += (1 if isVowel(char) else 0)
return count
return getNumberOfVowels(s[ : len(s) // 2]) == getNumberOfVowels(s[len(s) // 2 : ])
|
11496765
|
from packaging.version import parse
from qtpy import QT_VERSION
from qtpy.QtCore import Qt, Signal
from qtpy.QtWidgets import QComboBox, QCompleter
class SearchComboBox(QComboBox):
"""
ComboCox with completer for fast search in multiple options
"""
if parse(QT_VERSION) < parse("5.14.0"):
textActivated = Signal(str) # pragma: no cover
def __init__(self, parent=None):
super().__init__(parent)
self.setEditable(True)
self.completer_object = QCompleter()
self.completer_object.setCaseSensitivity(Qt.CaseInsensitive)
self.completer_object.setCompletionMode(QCompleter.PopupCompletion)
self.completer_object.setFilterMode(Qt.MatchContains)
self.setCompleter(self.completer_object)
self.setInsertPolicy(QComboBox.NoInsert)
if parse(QT_VERSION) < parse("5.14.0"): # pragma: no cover
self.currentIndexChanged.connect(self._text_activated)
def _text_activated(self): # pragma: no cover
self.textActivated.emit(self.currentText())
def addItem(self, *args):
super().addItem(*args)
self.completer_object.setModel(self.model())
def addItems(self, *args):
super().addItems(*args)
self.completer_object.setModel(self.model())
|
11496778
|
from fabric.colors import green as _green, yellow as _yellow, red as _red
from settings import cloud_connections, DEFAULT_PROVIDER
from ghost_log import log
from ghost_tools import get_aws_connection_data, GCallException, boolify
from ghost_tools import b64decode_utf8, get_ghost_env_variables
from ghost_tools import config as ghost_config
from libs.host_deployment_manager import HostDeploymentManager
from libs.blue_green import get_blue_green_from_app
from libs.ec2 import get_ec2_instance
from libs.deploy import launch_executescript
COMMAND_DESCRIPTION = "Execute a script/commands on every instance"
RELATED_APP_FIELDS = []
def is_available(app_context=None):
return boolify(ghost_config.get('enable_executescript_command', True))
class Executescript():
_app = None
_job = None
_log_file = -1
def __init__(self, worker):
self._app = worker.app
self._job = worker.job
self._config = worker._config
self._worker = worker
self._log_file = worker.log_file
self._connection_data = get_aws_connection_data(
self._app.get('assumed_account_id', ''),
self._app.get('assumed_role_name', ''),
self._app.get('assumed_region_name', '')
)
self._cloud_connection = cloud_connections.get(self._app.get('provider', DEFAULT_PROVIDER))(
self._config,
**self._connection_data
)
blue_green, self._color = get_blue_green_from_app(self._app)
def _get_notification_message_done(self):
"""
>>> class worker:
... app = {'name': 'app1'}
... job = None
... log_file = None
... _config = None
>>> Executescript(worker=worker())._get_notification_message_done()
'Execute script OK for app [app1]'
"""
return 'Execute script OK for app [{0}]'.format(self._app['name'])
def _get_notification_message_failed(self, e):
"""
>>> class worker:
... app = {'name': 'app1'}
... job = None
... log_file = None
... _config = None
>>> Executescript(worker=worker())._get_notification_message_failed('Exception')
'Execute script Failed for app [app1] Exception'
>>> Executescript(worker=worker())._get_notification_message_failed('Exception-test')
'Execute script Failed for app [app1] Exception-test'
"""
return "Execute script Failed for app [{0}] {1}".format(self._app['name'], str(e))
def _get_notification_message_aborted(self, message):
"""
>>> class worker:
... app = {'name': 'app1'}
... job = None
... log_file = None
... _config = None
>>> Executescript(worker=worker())._get_notification_message_aborted('No script provided')
'Execute script Aborted for app [app1] - No script provided'
>>> Executescript(worker=worker())._get_notification_message_aborted('Invalid module')
'Execute script Aborted for app [app1] - Invalid module'
"""
return "Execute script Aborted for app [{0}] - {1}".format(self._app['name'], message)
def _abort(self, message):
return self._worker.update_status("aborted", message=self._get_notification_message_aborted(message))
def _get_module_path_and_uid(self, module_name):
"""
Get the destination path for the given module, if any, '/tmp' otherwise
Get the user ID for the given module, if any, "0" (root) otherwise
"""
for item in self._app['modules']:
if 'name' in item and item['name'] == module_name:
return item['path'], item.get('uid', 0), item
return '/tmp', 0, None
def _exec_script(self, script, module_name, fabric_execution_strategy, safe_deployment_strategy):
context_path, sudoer_uid, module = self._get_module_path_and_uid(module_name)
ghost_env_vars = get_ghost_env_variables(self._app, module, self._job['user'])
deploy_manager = HostDeploymentManager(self._cloud_connection, self._app, module, self._log_file,
self._app['safe-deployment'], fabric_execution_strategy,
'executescript', {
'script': script,
'context_path': context_path,
'sudoer_uid': sudoer_uid,
'jobid': self._job['_id'],
'env_vars': ghost_env_vars,
})
deploy_manager.deployment(safe_deployment_strategy)
def _exec_script_single_host(self, script, module_name, single_host_ip):
context_path, sudoer_uid, module = self._get_module_path_and_uid(module_name)
ghost_env_vars = get_ghost_env_variables(self._app, module, self._job['user'])
ec2_obj = get_ec2_instance(self._cloud_connection, self._app['region'], {
'private-ip-address': single_host_ip,
'vpc-id': self._app['vpc_id'],
})
if not ec2_obj or ec2_obj.vpc_id!= self._app['vpc_id'] or ec2_obj.private_ip_address != single_host_ip:
raise GCallException("Cannot found the single instance with private IP '{ip}' in VPC '{vpc}'".format(ip=single_host_ip, vpc=self._app['vpc_id']))
if ec2_obj.tags['app'] != self._app['name'] or ec2_obj.tags['env'] != self._app['env'] or ec2_obj.tags['role'] != self._app['role']:
raise GCallException("Cannot execute script on this instance ({ip} - {id}), invalid Ghost tags".format(ip=single_host_ip, id=ec2_obj.id))
log("EC2 instance found, ready to execute script ({ip} - {id} - {name})".format(ip=single_host_ip, id=ec2_obj.id, name=ec2_obj.tags.get('Name', '')), self._log_file)
launch_executescript(self._app, script, context_path, sudoer_uid, self._job['_id'], [single_host_ip], 'serial', self._log_file, ghost_env_vars)
def execute(self):
if not boolify(self._config.get('enable_executescript_command', True)):
return self._abort("This command has been disabled by your administrator.")
script = self._job['options'][0] if 'options' in self._job and len(self._job['options']) > 0 else None
module_name = self._job['options'][1] if 'options' in self._job and len(self._job['options']) > 1 else None
execution_strategy = self._job['options'][2] if 'options' in self._job and len(self._job['options']) > 2 else None
if execution_strategy == 'single':
# option[3] is a single Host IP
fabric_execution_strategy = None
safe_deployment_strategy = None
single_host_ip = self._job['options'][3] if 'options' in self._job and len(self._job['options']) > 3 else None
else:
# option[2] is fabric type, option[3] might be Safe deploy group param
fabric_execution_strategy = execution_strategy
safe_deployment_strategy = self._job['options'][3] if 'options' in self._job and len(self._job['options']) > 3 else None
single_host_ip = None
try:
log(_green("STATE: Started"), self._log_file)
try:
if not script or not script.strip():
return self._abort("No valid script provided")
script_data = b64decode_utf8(script)
allowed_shebang = (
'#!/bin/bash',
'#! /bin/bash',
'#!/bin/sh',
'#! /bin/sh'
)
if not script_data.startswith(allowed_shebang):
return self._abort("No valid shell script provided (shebang missing)")
except:
return self._abort("No valid script provided")
if single_host_ip:
log(_yellow("Executing script on a single host: %s" % single_host_ip), self._log_file)
self._exec_script_single_host(script_data, module_name, single_host_ip)
else:
log(_yellow("Executing script on every running instance"), self._log_file)
self._exec_script(script_data, module_name, fabric_execution_strategy, safe_deployment_strategy)
self._worker.update_status("done", message=self._get_notification_message_done())
log(_green("STATE: End"), self._log_file)
except Exception as e:
self._worker.update_status("failed", message=self._get_notification_message_failed(e))
log(_red("STATE: End"), self._log_file)
|
11496811
|
from graphviz import Digraph
import os
def create_graph(filepath: str, nodes_data: dict, show: bool, legend=False):
"""
Visualizes the energy system as graph.
Creates, using the library Graphviz, a graph containing all
components and connections from "nodes_data" and returns this as
a PNG file.
:param filepath: path where the PNG-result shall be saved
:type filepath: str
:param nodes_data: dictionary containing data from excel
scenario file.
:type nodes_data: dict
:param legend: specifies, whether a legend will be added to the
graph or not
:type legend: bool
<NAME> - <EMAIL>
"""
def linebreaks(text: str):
"""
Adds linebreaks a given string.
Function which adds a line break to strings every ten
characters. Up to four strings are added.
:param text: string to which line breaks will be added
:type text: str
<NAME> - <EMAIL>
"""
text_length = len(text)
if text_length > 10:
text = str(text[0:9] + "-\n" + text[9:])
if text_length > 20:
text = str(text[0:21] + "-\n" + text[21:])
if text_length > 30:
text = str(text[0:33] + "-\n" + text[33:])
if text_length > 40:
text = str(text[0:45] + "-\n" + text[45:])
return text
# Defines the location of Graphviz as path necessary for windows
os.environ["PATH"] += \
os.pathsep + 'C:\\Program Files (x86)\\Graphviz2.38\\bin'
# Creates the Directed-Graph
dot = Digraph(format='png')
# Creates a Legend if Legend = True
if legend:
shape = {'Bus': ['ellipse'], 'Source': ['trapezium'],
'Sink': ['invtrapezium'], 'Transformer\nLinks': ['box'],
'Storage': ['box']}
for i in shape.keys():
dot.node(i, shape=shape[i][0], fontsize="10", fixedsize='shape',
width='1.1', height='0.6',
style='dashed' if i == 'Storage' else '')
shapes = {'sources': ['trapezium'], 'sinks': ['invtrapezium'],
'transformers': ['box'], 'storages': ['box'],
'links': ['box']}
bus = {'buses': ['label'], 'sources': ['output'], 'sinks': ['input'],
'transformers': ['input'], 'storages': ['bus'], 'links': ['bus1']}
for i in bus.keys():
for j, b in nodes_data[i].iterrows():
if b['active']:
# sets component label
label = b['label']
if i == 'buses':
if b['shortage']:
label = b['label'] + '_shortage'
elif b['excess']:
label = b['label'] + '_excess'
label = linebreaks(label)
if i != 'buses':
dot.node(label, shape=shapes[i][0], fontsize="10",
fixedsize='shape', width='1.1', height='0.6',
style='dashed' if i == 'storages' else '')
if i == 'sources':
# Creates graph elements for solar heat
if b['technology'] == "solar_thermal_flat_plate" or \
b['technology'] == "CSP":
# creates additional transformer
transformer = b['label'] + '_collector'
transformer = linebreaks(transformer)
dot.node(transformer, shape='box', fontsize="10",
fixedsize='shape', width='1.1',
height='0.6')
# creates additional bus
c_bus = b['label'] + '_bus'
c_bus=linebreaks(c_bus)
dot.node(c_bus, shape='ellipse', fontsize="10")
# Adds edge for transformer, source and bus to the graph
dot.edge(b['input'], transformer)
dot.edge(c_bus, transformer)
dot.edge(transformer, b['output'])
dot.edge(label, c_bus)
else:
if b['shortage']:
dot.node(label, shape='trapezium', fontsize="10",
fixedsize='shape', width='1.1', height='0.6')
if b['excess'] and not b['shortage']:
dot.node(label, shape='invtrapezium', fontsize="10",
fixedsize='shape', width='1.1', height='0.6')
# creates bus nodes
dot.node(b[bus[i][0]], shape='ellipse', fontsize="10")
if i == 'links':
dot.node(b['bus2'], shape='ellipse')
# creates edges
if i == 'sinks' or i == 'storages' or i == 'links' \
or (i == 'buses' and b['excess']
and not b['shortage']):
dot.edge(b[bus[i][0]], label)
if (i == 'sources' and (b['technology'] not in
["solar_thermal_flat_plate", "CSP"])) \
or i == 'storages' or (i == 'buses' and b['shortage']):
dot.edge(label, b[bus[i][0]])
if i == 'links':
dot.edge(label, b['bus2'])
if b['(un)directed'] == 'undirected':
dot.edge(b['bus2'], label)
dot.edge(label, b['bus1'])
elif i == 'transformers':
dot.node(b['output'], shape='ellipse', fontsize="10")
dot.edge(b[bus[i][0]], label)
dot.edge(label, b['output'])
if b['output2'] not in [0, 'None', 'none']:
dot.node(b['output2'], shape='ellipse', fontsize="10")
dot.edge(label, b['output2'])
if b['transformer type'] == 'compression_heat_transformer':
# consideration of mode of operation
if b['mode'] == 'heat_pump':
temp = '_low_temp'
elif b['mode'] == 'chiller':
temp = '_high_temp'
# creates label for source and bus depending on mode
cmpr_abs_source = label + temp + '_source'
cmpr_abs_bus = label + temp + '_bus'
# Linebreaks, so that the labels fit the boxes
cmpr_abs_source = linebreaks(cmpr_abs_source)
cmpr_abs_bus = linebreaks(cmpr_abs_bus)
# Adds a second input and a heat source (node and edge)
# for compressionand absorption heat transformers
dot.node(cmpr_abs_bus,
shape='ellipse',
fontsize="10")
dot.edge(cmpr_abs_bus, label)
dot.node(cmpr_abs_source, shape='trapezium',
fontsize="10",
fixedsize='shape', width='1.1', height='0.6')
dot.edge(cmpr_abs_source, cmpr_abs_bus)
elif i == 'buses':
if b['excess'] and b['shortage']:
label = b['label'] + '_excess'
label = linebreaks(label)
dot.node(label, shape='invtrapezium', fontsize="10",
fixedsize='shape', width='1.1', height='0.6')
dot.node(b[bus[i][0]], shape='ellipse', fontsize="10")
dot.edge(b[bus[i][0]], label)
dot.render(filepath + '/graph.gv', view=show)
|
11496830
|
import random
class Solution:
def repeatedNTimes(self, A: List[int]) -> int:
while True:
v1, v2 = random.sample(A, 2)
if v1 == v2:
return v1
|
11496838
|
from setuptools import find_packages, setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='stacksort',
version='0.1.0',
author='<NAME> <<EMAIL>',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
install_requires=[
'stackapi',
'lxml',
'bs4'
],
)
|
11496850
|
from test import multibytecodec_support
import unittest
class TestCP949Map(multibytecodec_support.TestBase_Mapping, unittest.TestCase):
encoding = 'cp949'
mapfileurl = 'http://www.pythontest.net/unicode/CP949.TXT'
class TestEUCKRMap(multibytecodec_support.TestBase_Mapping, unittest.TestCase):
encoding = 'euc_kr'
mapfileurl = 'http://www.pythontest.net/unicode/EUC-KR.TXT'
pass_enctest = [(b'\xa4\xd4', 'ㅤ')]
pass_dectest = [(b'\xa4\xd4', 'ㅤ')]
class TestJOHABMap(multibytecodec_support.TestBase_Mapping, unittest.TestCase):
encoding = 'johab'
mapfileurl = 'http://www.pythontest.net/unicode/JOHAB.TXT'
pass_enctest = [(b'\\', '₩')]
pass_dectest = [(b'\\', '₩')]
if __name__ == '__main__':
unittest.main()
|
11496853
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
requirements = {
"package": [
"PyYAML>=5",
"six",
],
"test": [
"nose",
"mock",
"pytest",
"pytest-mock",
"pytest-pudb",
],
"setup": [
"pytest-runner",
],
}
requirements.update(all=sorted(set().union(*requirements.values())))
setup(
name='yamlsettings',
version='2.1.0',
description='Yaml Settings Configuration Module',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/KyleJamesWalker/yamlsettings',
packages=['yamlsettings', 'yamlsettings.extensions'],
package_dir={'yamlsettings':
'yamlsettings'},
include_package_data=True,
install_requires=requirements['package'],
extras_require=requirements,
setup_requires=requirements['setup'],
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='tests',
tests_require=requirements['test'],
)
|
11496871
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import xml.etree.ElementTree as ET
import numpy as np
import re
import math
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(FILE_PATH, "../data/")
LABEL_DATA_PATH = os.path.join(DATA_PATH, "ascii/")
STROKES_DATA_PATH = os.path.join(DATA_PATH, "lineStrokes/")
ESCAPE_CHAR = '~!@#$%^&*()_+{}:"<>?`-=[];\',./|\n'
def find_textline_by_id(filename):
"""
Inputs:
filename: string, textline prefix, eg: 'a01-020w-01'
Return:
label: string, label of one whole textline, eg: 'No secret talks # - Macleod.'
"""
dir_name_L1 = filename[:3] # eg: 'a01'
dir_name_L2 = filename[:7] # eg: 'a01-020'
file_name = filename[:-3] + ".txt" # eg: 'a01-020w.txt' or 'a01-020.txt'
line_id = int(filename[-2:]) # eg: 1
filepath = os.path.join(
LABEL_DATA_PATH, dir_name_L1, dir_name_L2, file_name)
line_counter = -2 # because line start after 2 new lines from "CSR:\n"
label = []
flag = False
for line in open(filepath, 'r'):
if line.startswith('CSR'):
flag = True
if flag:
line_counter += 1
if line_counter == line_id:
for char in ESCAPE_CHAR:
line = line.replace(char, '')
label = line
break
return label
def main():
# parse STROKES (.xml)
text_line_data_all = []
label_text_line_all = []
for path_1, _, files in os.walk(STROKES_DATA_PATH):
files = sorted(files)
for file_name in files: # TextLine files
############# label data #############
# split our .xml (eg: a01-020w-01.xml -> a01-020w-01)
text_line_id = file_name[:-4]
label_text_line = find_textline_by_id(text_line_id)
if len(label_text_line) != 0: # prevent missing data in ascii(label data)
label_text_line_all.append(label_text_line)
############# trajectory data #############
text_line_path = os.path.join(path_1, file_name)
e_tree = ET.parse(text_line_path).getroot()
x_list = []
y_list = []
time_stamp = []
first_time = 0.0
for atype in e_tree.findall('StrokeSet/Stroke/Point'):
x_list.append(int(atype.get('x')))
y_list.append(int(atype.get('y')))
if len(time_stamp) == 0:
first_time = float(atype.get('time'))
time_stamp.append(0.0)
else:
time_stamp.append(
float(atype.get('time')) - first_time)
# print("x:", atype.get('x'), "y:", atype.get('y'), "time:", float(atype.get('time')) - first_time)
# input()
####curvature and speed #######################################
# x y coordinate
x_list = np.asarray(x_list, dtype=np.float32)
y_list = np.asarray(y_list, dtype=np.float32)
x_cor = np.copy(x_list)
y_cor = np.copy(y_list)
cor_dial = e_tree.findall(
'WhiteboardDescription/DiagonallyOppositeCoords')[0]
x_max = int(cor_dial.get('x'))
y_max = int(cor_dial.get('y'))
x_min = min(x_list)
y_min = min(y_list)
scale = 1.0 / (y_max - y_min)
# normalize x_cor , y_cor
x_cor = (x_cor - x_min) * scale
y_cor = (y_cor - y_min) * scale
sin_list = []
cos_list = []
x_sp_list = []
y_sp_list = []
pen_up_list = []
writing_sin = []
writing_cos = []
for stroke in e_tree.findall('StrokeSet/Stroke'):
x_point, y_point, time_list = [], [], []
for point in stroke.findall('Point'):
x_point.append(int(point.get('x')))
y_point.append(int(point.get('y')))
if len(time_list) == 0:
first_time = float(point.get('time'))
time_list.append(0.0)
else:
time_list.append(
float(point.get('time')) - first_time)
# calculate cos and sin
x_point[:] = [(point - x_min) * scale for point in x_point]
y_point[:] = [(point - y_min) * scale for point in x_point]
angle_stroke = []
if len(x_point) < 3:
# print("Oh no",len(x_point))
for _ in range(len(x_point)):
sin_list += [0]
cos_list += [1]
else:
for idx in range(1, len(x_point) - 1):
x_prev = x_point[idx - 1]
y_prev = y_point[idx - 1]
x_next = x_point[idx + 1]
y_next = y_point[idx + 1]
x_now = x_point[idx]
y_now = y_point[idx]
p0 = [x_prev, y_prev]
p1 = [x_now, y_now]
p2 = [x_next, y_next]
v0 = np.array(p0) - np.array(p1)
v1 = np.array(p2) - np.array(p1)
angle = np.math.atan2(
np.linalg.det([v0, v1]), np.dot(v0, v1))
angle_stroke.append(angle)
new_angle_stroke = [0] + angle_stroke + [0]
sin_stroke = np.sin(new_angle_stroke).tolist()
cos_stroke = np.cos(new_angle_stroke).tolist()
sin_list += sin_stroke
cos_list += cos_stroke
# calculate speed
if len(x_point) < 2:
for _ in range(len(x_point)):
x_sp_list += [0]
y_sp_list += [0]
if len(x_point) < 1:
print("Meet 0")
exit()
x_sp = [0]
y_sp = [0]
else:
time_list = np.asarray(time_list, dtype=np.float32)
time_list_moved = np.array(time_list)[1:]
time_diff = np.subtract(
time_list_moved, time_list[:-1])
for idx, v in enumerate(time_diff):
if v == 0:
time_diff[idx] = 0.001
x_point_moved = np.array(x_point)[1:]
y_point_moved = np.array(y_point)[1:]
x_diff = np.subtract(x_point_moved, x_point[:-1])
y_diff = np.subtract(y_point_moved, y_point[:-1])
x_sp = np.divide(x_diff, time_diff).tolist()
y_sp = np.divide(y_diff, time_diff).tolist()
x_sp = [0] + x_sp
y_sp = [0] + y_sp
x_sp_list += x_sp
y_sp_list += y_sp
# pen up and down
pen_up = [1] * (len(x_point) - 1) + [0]
pen_up_list += pen_up
# writing direction
w_sin_stroke = []
w_cos_stroke = []
for idx, x_v in enumerate(x_sp):
y_v = y_sp[idx]
slope = np.sqrt(x_v * x_v + y_v * y_v)
if slope != 0:
w_sin_stroke.append(y_v / slope)
w_cos_stroke.append(x_v / slope)
else:
w_sin_stroke.append(0)
w_cos_stroke.append(1)
writing_sin += w_sin_stroke
writing_cos += w_cos_stroke
####curvature done####################
time_stamp = np.asarray(time_stamp, dtype=np.float32)
sin_list = np.asarray(sin_list, dtype=np.float32)
cos_list = np.asarray(cos_list, dtype=np.float32)
x_sp_list = np.asarray(x_sp_list, dtype=np.float32)
y_sp_list = np.asarray(y_sp_list, dtype=np.float32)
pen_up_list = np.asarray(pen_up_list, dtype=np.float32)
writing_cos = np.asarray(writing_cos, dtype=np.float32)
writing_sin = np.asarray(writing_sin, dtype=np.float32)
# x y coordinate
# time_list = np.asarray(time_list, dtype=np.float32)
# # time 1st order
# time_list_moved = np.array(time_list)
# time_list_moved = time_list_moved[1:]
# time_list_new = np.subtract(
# time_list_moved, time_list[:-1])
# for idx, v in enumerate(time_list_new):
# if v == 0:
# time_list_new[idx] = 0.001 # prevent divide by 0
# x_list_moved = np.array(x_list)
# x_list_moved = x_list_moved[1:]
# # x_list throw away the last element
# x_list_new = np.subtract(x_list_moved, x_list[:-1])
# # x_list_new = np.divide(x_list_new, time_list_new)
# # y 1st order
# y_list_moved = np.array(y_list)
# y_list_moved = y_list_moved[1:]
# y_list_new = np.subtract(y_list_moved, y_list[:-1])
# y_list_new = np.divide(y_list_new, time_list_new)
# stack x', y', time
# print(x_sp_list.shape)
# print(y_sp_list.shape)
# print(x_cor.shape)
# print(y_cor.shape)
# print(sin_list.shape)
# print(cos_list.shape)
# print(time_stamp.shape)
# print(writing_sin.shape)
# print(writing_cos.shape)
# print(pen_up_list.shape)
text_line_data = np.stack(
(x_sp_list, y_sp_list, x_cor, y_cor, sin_list, cos_list, writing_sin, writing_cos, pen_up_list, time_stamp), axis=1)
# (x_cor, y_cor, sin_list, cos_list, time_stamp), axis=1)
# text_line_data = np.stack(
# (x_list, y_list, time_list), axis=1)
temp_length = text_line_data.shape[0]
# subsampling
# text_line_data = text_line_data[[
# i % 3 == 0 for i in range(temp_length)]]
text_line_data_all.append(text_line_data)
print("Finished a file ", files)
# print(text_line_data)
# print(text_line_data.shape)
# print(text_line_path)
# print(np.array(text_line_data_all).shape)
# input()
text_line_data_all = np.array(text_line_data_all)
label_text_line_all = np.array(label_text_line_all)
print(text_line_data_all.shape)
print(label_text_line_all.shape)
# save as .npy
np.save("data", text_line_data_all)
np.save("label", label_text_line_all)
print("Successfully saved!")
if __name__ == "__main__":
main()
|
11496874
|
from channels.base import Channel, ChannelsError, ChannelAttachment, ChannelBoard, ChannelThread
from channels.base import ChannelPost, SettingDefinition, Client, PostingError
import requests
import re
import urllib
class FourChanClient(Client):
def __init__(self, channel_name, client_id):
super().__init__(channel_name, client_id)
self.session = requests.Session()
self.authorized = False
class FourChanChannel(Channel):
base_url = "https://a.4cdn.org"
attachment_url = "https://i.4cdn.org"
post_reply_pattern = re.compile(r'>>([0-9]+)', re.MULTILINE)
def __init__(self):
super().__init__()
def name(self):
return "4chan"
def get_setting_definitions(self, client):
return [
SettingDefinition("pass", "PASS code for posting"),
SettingDefinition("pin", "PIN code for posting"),
]
def new_client(self, client_id):
return FourChanClient(self.name(), client_id)
def authorize(self, client: FourChanClient):
if client.authorized:
return True
if not client.settings.get("pass", None):
return False
if not client.settings.get("pin", None):
return False
r = client.session.post("https://sys.4chan.org/auth", data={
"act": "do_login",
"id": client.settings["pass"],
"pin": client.settings["pin"],
"long_login": "yes"
})
if r.status_code == 200:
client.authorized = True
return True
return False
def get_boards(self, client, limit):
r = client.session.get(FourChanChannel.base_url + "/boards.json")
if r.status_code != 200:
raise ChannelsError(ChannelsError.UNKNOWN_ERROR)
response = r.json()
boards = []
for board in response["boards"]:
boards.append(ChannelBoard(board["board"], None, board["title"]))
return boards
def get_threads(self, client, board):
r = client.session.get(FourChanChannel.base_url + "/" + board + "/catalog.json")
if r.status_code != 200:
raise ChannelsError(ChannelsError.UNKNOWN_ERROR)
pages = r.json()
threads = []
for page in pages:
for thread in page["threads"]:
if "no" not in thread:
continue
if "com" not in thread:
continue
result_thread = ChannelThread(str(thread["no"]))
if "sub" in thread:
result_thread.title = thread["sub"]
result_thread.num_replies = thread["replies"] if "replies" in thread else 0
if ("tim" in thread) and ("ext" in thread):
url = FourChanChannel.attachment_url + "/" + board + "/" + str(thread["tim"]) + thread["ext"]
result_thread.attachments.append(ChannelAttachment(url))
result_thread.comment = Channel.strip_html(thread["com"])
threads.append(result_thread)
return threads
def get_thread(self, client, board, thread):
r = client.session.get(FourChanChannel.base_url + "/" + board + "/thread/" + thread + ".json")
if r.status_code != 200:
raise ChannelsError(ChannelsError.UNKNOWN_ERROR)
result = r.json()
posts = []
posts_by_id = {}
for post in result["posts"]:
if "no" not in post:
continue
if "com" not in post:
continue
for index, strip in enumerate(Channel.split_comment(Channel.strip_html(post["com"]))):
post_id = str(post["no"]) if index == 0 else "{0}.{1}".format(str(post["no"]), index)
result_post = ChannelPost(post_id)
if index == 0:
if "sub" in post:
result_post.title = post["sub"]
if ("tim" in post) and ("ext" in post):
url = FourChanChannel.attachment_url + "/" + board + "/" + str(post["tim"]) + post["ext"]
result_post.attachments.append(ChannelAttachment(url))
for reply in re.finditer(FourChanChannel.post_reply_pattern, strip):
reply_to = reply.group(1)
if reply_to in posts_by_id:
posts_by_id[reply_to].replies.append(result_post.id)
else:
result_post.title = "... cont {0}".format(index)
posts_by_id[str(post["no"])].replies.append(result_post.id)
result_post.comment = strip
posts.append(result_post)
posts_by_id[result_post.id] = result_post
return posts
def post(self, client, board, thread, comment, reply_to):
if not self.authorize(client):
raise PostingError("Cannot authorize. Provide a valid PASS and PIN.")
if reply_to:
comment = ">>{0}\n{1}".format(reply_to, comment)
r = client.session.post("https://sys.4chan.org/{0}/post".format(board), data={
"mode": "regist",
"resto": thread,
"com": comment
})
if r.status_code != 200:
raise PostingError("Cannot post: {0}. Is your PASS valid?".format(r.status_code))
CHANNEL_NAME = "4chan"
CHANNEL_CLASS = FourChanChannel
CHANNEL_DESCRIPTION = "4chan is a simple image-based bulletin board where anyone can post comments and share images."
|
11496931
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("DBGeometryTest")
#process.load("DetectorDescription.OfflineDBLoader.test.cmsIdealGeometryForWrite_cfi")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.myprint = cms.OutputModule("AsciiOutputModule")
#046 std::string attribute = "MuStructure"; // could come from outside
#047 std::string value = "MuonEndcapCSC";
process.prod = cms.EDAnalyzer("TestSpecParAnalyzer"
, specName = cms.string("MuStructure")
, specStrValue = cms.untracked.string("MuonEndcapCSC")
#, specDblValue = cms.untracked.double(0.0)
)
process.Timing = cms.Service("Timing")
process.p1 = cms.Path(process.prod)
process.e1 = cms.EndPath(process.myprint)
|
11496935
|
fi=open('GSE109125_Gene_count_table.csv.uniq.rpk')
fo=open('GSE109125_Gene_count_table.csv.uniq.rpk.pure','w')
#fi.readline()
header=fi.readline().rstrip().split('\t')
h=[]
for one in header:
h.append(one.split('#')[0])
header=h
G={}
i=1
while i<len(header):
if header[i] in G:
G[header[i]].append(i)
else:
G[header[i]]=[i]
i=i+1
new_header=[]
for one in G:
new_header.append(one)
fo.write('\t'.join(new_header)+'\n')
for line in fi:
seq=line.rstrip().split('\t')
fo.write(seq[0])
for one in new_header:
tmp=[]
for exp in G[one]:
tmp.append(float(seq[exp]))
tmp=sum(tmp)/float(len(tmp))
fo.write('\t'+str(tmp))
fo.write('\n')
|
11496956
|
name ="Tom"
age = 16
print("Hello Myname is "+name+" and I am "+str(age))
print("Hello Myname is {} and I am {}".format(name,age))
print("Hello Myname is {0} and My age is {1}".format(name,age))
# 最常用 fstring
print(f"Hello Myname is {name} and I am {age+1}")
|
11496959
|
import sublime
import re
import os
ALIAS_SETTING = "alias"
DEFAULT_INITIAL_SETTING = "default_initial"
AUTOFILL_RENAME = "autofill_path_the_existing"
USE_CURSOR_TEXT_SETTING = "use_cursor_text"
SHOW_FILES_SETTING = "show_files"
SHOW_PATH_SETTING = "show_path"
DEFAULT_ROOT_SETTING = "default_root"
DEFAULT_PATH_SETTING = "default_path"
DEFAULT_FOLDER_INDEX_SETTING = "default_folder_index"
OS_SPECIFIC_ALIAS_SETTING = "os_specific_alias"
IGNORE_CASE_SETTING = "ignore_case"
ALIAS_ROOT_SETTING = "alias_root"
ALIAS_PATH_SETTING = "alias_path"
ALIAS_FOLDER_INDEX_SETTING = "alias_folder_index"
DEBUG_SETTING = "debug"
AUTO_REFRESH_SIDEBAR_SETTING = "auto_refresh_sidebar"
COMPLETION_TYPE_SETTING = "completion_type"
COMPLETE_SINGLE_ENTRY_SETTING = "complete_single_entry"
USE_FOLDER_NAME_SETTING = "use_folder_name"
RELATIVE_FROM_CURRENT_SETTING = "relative_from_current"
DEFAULT_EXTENSION_SETTING = "default_extension"
FILE_PERMISSIONS_SETTING = "file_permissions"
FOLDER_PERMISSIONS_SETTING = "folder_permissions"
RENAME_DEFAULT_SETTING = "rename_default"
VCS_MANAGEMENT_SETTING = "vcs_management"
FILE_TEMPLATES_SETTING = "file_templates"
SHELL_INPUT_SETTING = "shell_input"
APPEND_EXTENSION_ON_MOVE_SETTING = "append_extension_on_move"
RELATIVE_FALLBACK_INDEX_SETTING = "relative_fallback_index"
APPEND_EXTENSION_ON_COPY_SETTING = "append_extension_on_copy"
COPY_DEFAULT_SETTING = "copy_default"
CUT_TO_FILE_DEFAULT_SETTING = "cut_to_file_default"
CURRENT_FALLBACK_TO_PROJECT_SETTING = "current_fallback_to_project"
WARN_OVERWRITE_ON_MOVE_SETTING = "warn_overwrite_on_move"
NEW_FILE_DEFAULT_ROOT_SETTING = "new_file_default_root"
RENAME_FILE_DEFAULT_ROOT_SETTING = "rename_file_default_root"
COPY_FILE_DEFAULT_ROOT_SETTING = "copy_file_default_root"
DEFAULT_NEW_FILE = "empty_filename_action"
CURSOR_BEFORE_EXTENSION_SETTING = "cursor_before_extension"
SETTINGS = [
ALIAS_SETTING,
DEFAULT_INITIAL_SETTING,
AUTOFILL_RENAME,
USE_CURSOR_TEXT_SETTING,
SHOW_FILES_SETTING,
SHOW_PATH_SETTING,
DEFAULT_ROOT_SETTING,
DEFAULT_PATH_SETTING,
DEFAULT_FOLDER_INDEX_SETTING,
OS_SPECIFIC_ALIAS_SETTING,
IGNORE_CASE_SETTING,
ALIAS_ROOT_SETTING,
ALIAS_PATH_SETTING,
ALIAS_FOLDER_INDEX_SETTING,
DEBUG_SETTING,
AUTO_REFRESH_SIDEBAR_SETTING,
COMPLETION_TYPE_SETTING,
COMPLETE_SINGLE_ENTRY_SETTING,
USE_FOLDER_NAME_SETTING,
RELATIVE_FROM_CURRENT_SETTING,
DEFAULT_EXTENSION_SETTING,
FILE_PERMISSIONS_SETTING,
FOLDER_PERMISSIONS_SETTING,
RENAME_DEFAULT_SETTING,
VCS_MANAGEMENT_SETTING,
FILE_TEMPLATES_SETTING,
SHELL_INPUT_SETTING,
APPEND_EXTENSION_ON_MOVE_SETTING,
RELATIVE_FALLBACK_INDEX_SETTING,
APPEND_EXTENSION_ON_COPY_SETTING,
COPY_DEFAULT_SETTING,
CUT_TO_FILE_DEFAULT_SETTING,
CURRENT_FALLBACK_TO_PROJECT_SETTING,
WARN_OVERWRITE_ON_MOVE_SETTING,
NEW_FILE_DEFAULT_ROOT_SETTING,
RENAME_FILE_DEFAULT_ROOT_SETTING,
COPY_FILE_DEFAULT_ROOT_SETTING,
DEFAULT_NEW_FILE,
CURSOR_BEFORE_EXTENSION_SETTING
]
NIX_ROOT_REGEX = r"^/"
WIN_ROOT_REGEX = r"[a-zA-Z]:(/|\\)"
HOME_REGEX = r"^~"
PLATFORM = sublime.platform()
TOP_LEVEL_SPLIT_CHAR = ":"
IS_ST3 = int(sublime.version()) > 3000
IS_X64 = sublime.arch() == "x64"
REGION_KEY = "anf_cut_to_file"
def generate_creation_path(settings, base, path, append_extension=False):
if PLATFORM == "windows":
if not re.match(WIN_ROOT_REGEX, base):
if IS_ST3:
drive, _ = os.path.splitdrive(base)
else:
drive, _ = os.path.splitunc(base)
if len(drive) == 0:
return base + TOP_LEVEL_SPLIT_CHAR + path
else:
return os.path.join(base, path)
else:
if not re.match(NIX_ROOT_REGEX, base):
return base + TOP_LEVEL_SPLIT_CHAR + path
tokens = re.split(r"[/\\]", base) + re.split(r"[/\\]", path)
if tokens[0] == "":
tokens[0] = "/"
if PLATFORM == "windows":
tokens[0] = base[0:3]
full_path = os.path.abspath(os.path.join(*tokens))
if re.search(r"[/\\]$", path) or len(path) == 0:
full_path += os.path.sep
elif re.search(r"\.", tokens[-1]):
if re.search(r"\.$", tokens[-1]):
full_path += "."
elif append_extension:
filename = os.path.basename(full_path)
if not os.path.exists(full_path):
full_path += settings.get(DEFAULT_EXTENSION_SETTING)
return full_path
def get_settings(view):
settings = sublime.load_settings("AdvancedNewFile.sublime-settings")
project_settings = {}
local_settings = {}
if view is not None:
project_settings = view.settings().get('AdvancedNewFile', {})
for setting in SETTINGS:
local_settings[setting] = settings.get(setting)
if type(project_settings) != dict:
print("Invalid type %s for project settings" % type(project_settings))
return local_settings
for key in project_settings:
if key in SETTINGS:
if key == "alias":
if IS_ST3:
local_settings[key] = dict(
local_settings[key].items() |
project_settings.get(key).items()
)
else:
local_settings[key] = dict(
local_settings[key].items() +
project_settings.get(key).items()
)
else:
local_settings[key] = project_settings[key]
else:
print("AdvancedNewFile[Warning]: Invalid key " +
"'%s' in project settings.", key)
return local_settings
def get_project_folder_data(use_folder_name):
folders = []
folder_entries = []
window = sublime.active_window()
project_folders = window.folders()
if IS_ST3:
project_data = window.project_data()
if project_data is not None:
if use_folder_name:
for folder in project_data.get("folders", []):
folder_entries.append({})
else:
folder_entries = project_data.get("folders", [])
else:
for folder in project_folders:
folder_entries.append({})
for index in range(len(folder_entries)):
folder_path = project_folders[index]
folder_entry = folder_entries[index]
if "name" in folder_entry:
folders.append((folder_entry["name"], folder_path))
else:
folders.append((os.path.basename(folder_path), folder_path))
return folders
|
11497000
|
from typing import List
import lab as B
from lab import dispatch
from lab.util import abstract
from plum import Union
@dispatch
@abstract()
def take_along_axis(a: B.Numeric, index: B.Numeric, axis: int = 0):
"""
Gathers elements of `a` along `axis` at `index` locations.
"""
@dispatch
@abstract()
def from_numpy(_: B.Numeric, b: Union[List, B.Numeric]):
"""
Converts the array `b` to a tensor of the same backend as `a`
"""
|
11497032
|
from __future__ import with_statement # this is to work with python2.5
from pyps import workspace
from terapyps import workspace as teraw, Maker
workspace.delete("addcst")
with teraw("addcst.c", name="addcst", deleteOnClose=False, recoverInclude=False) as w:
for f in w.fun:
f.terapix_code_generation(debug=True)
# w.compile(Maker())
|
11497084
|
import numpy as np
import torch
from PIL import Image, ImageEnhance
import pickle
import random
import os
import torchvision.transforms as transforms
import json
def augment_image(image):
if (random.random() < 0.5):
image = image.transpose(Image.FLIP_LEFT_RIGHT)
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(random.random() * 0.6 + 0.7)
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(random.random() * 0.6 + 0.7)
return image
class MUSIC_Dataset(object):
def __init__(self, opt):
# self.root = root
# root = '/mnt/scratch/hudi/MUSIC/solo'
self.opt = opt
if self.opt.mode == 'train':
self.audio_root = '/home/ruiq/Music/synthetic/train/train/audio'
self.video_root = '/home/ruiq/Music/synthetic/train/train/video'
else:
self.audio_root = '/home/ruiq/Music/synthetic/test1/audio'
self.video_root = '/home/ruiq/Music/synthetic/test1/video'
self.box_root = '/home/ruiq/Music/synthetic/test1/box'
self.audio_list = os.listdir(self.audio_root)
self.video_list = os.listdir(self.video_root)
self.box_list = os.listdir(self.box_root)
self.audio_list.sort()
self.video_list.sort()
self.box_list.sort()
assert len(self.audio_list) == len(self.video_list)
if self.opt.mode == 'val' or self.opt.mode == 'test':
img_transform_list = [transforms.Resize((224, 224)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
else:
img_transform_list = [transforms.Resize((224, 224)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.img_transform = transforms.Compose(img_transform_list)
def __len__(self):
return len(self.audio_list)
def __getitem__(self, index):
# positive
cur_audio_segment = self.audio_list[index]
posi_video_segment = self.video_list[index]
if self.opt.mode == 'val':
box_segment = self.box_list[index]
# load data
with open(os.path.join(self.audio_root, cur_audio_segment), 'rb') as fid:
cur_audio_data = pickle.load(fid)
cur_audio_data = np.expand_dims(cur_audio_data, 0)
posi_img_path = os.path.join(self.video_root, posi_video_segment)
posi_img = Image.open(posi_img_path)
if (self.opt.enable_img_augmentation and self.opt.mode == 'train'):
posi_img = augment_image(posi_img)
posi_img = self.img_transform(posi_img)
while (1):
nega_video_segment = random.choice(self.video_list)
if nega_video_segment != posi_video_segment:
break
nega_img_path = os.path.join(self.video_root, nega_video_segment)
nega_img = Image.open(nega_img_path)
if (self.opt.enable_img_augmentation and self.opt.mode == 'train'):
nega_img = augment_image(nega_img)
nega_img = self.img_transform(nega_img)
if self.opt.mode == 'val':
box = np.load(os.path.join(self.box_root, box_segment))
return cur_audio_data, posi_img, nega_img, box
return cur_audio_data, posi_img, nega_img
|
11497088
|
import ik
import unittest
class TestVec3(unittest.TestCase):
def test_default_construct(self):
v = ik.Vec3()
self.assertEqual(v.x, 0.0)
self.assertEqual(v.y, 0.0)
self.assertEqual(v.z, 0.0)
def test_construct_with_two_values(self):
v = ik.Vec3(1, 2)
self.assertEqual(v.x, 1.0)
self.assertEqual(v.y, 2.0)
self.assertEqual(v.z, 0.0);
def test_construct_with_three_values(self):
v = ik.Vec3(1, 2, 3)
self.assertEqual(v.x, 1.0)
self.assertEqual(v.y, 2.0)
self.assertEqual(v.z, 3.0)
def test_construct_with_invalid_types(self):
with self.assertRaises(TypeError):
v = ik.Vec3("haha", "b", "c")
def test_set_zero(self):
v = ik.Vec3(1, 2, 3)
v.set_zero()
self.assertEqual(v.x, 0.0)
self.assertEqual(v.y, 0.0)
self.assertEqual(v.z, 0.0)
def test_set_using_another_vector(self):
v = ik.Vec3()
v.set(ik.Vec3(4, 5, 6))
self.assertEqual(v.x, 4)
self.assertEqual(v.y, 5)
self.assertEqual(v.z, 6)
def test_set_using_tuple(self):
v = ik.Vec3()
v.set((4, 5, 6))
self.assertEqual(v.x, 4)
self.assertEqual(v.y, 5)
self.assertEqual(v.z, 6)
|
11497144
|
import os
from django.utils.translation import ugettext_lazy as _
import environ
import rollbar
# Initialize environ
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env = environ.Env(DEBUG=(bool, False),)
env_file_path = os.path.join(BASE_DIR, '.env')
environ.Env.read_env(env_file_path) # reading .env file
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', default=[])
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party Apps
'rest_framework',
'rest_framework_tracking',
'corsheaders',
# Internal Apps
'quran_text',
'quran_tafseer',
'docs',
]
PRE_MIDDLEWARE = env.list('PRE_MIDDLEWARE', default=[])
POST_MIDDLEWARE = env.list('POST_MIDDLEWARE', default=[])
MIDDLEWARE = PRE_MIDDLEWARE + [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'rollbar.contrib.django.middleware.RollbarNotifierMiddleware',
] + POST_MIDDLEWARE
ROOT_URLCONF = 'tafseer_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = env('WSGI_APPLICATION', default='tafseer_api.wsgi.application')
DATABASES = {'default': env.db()}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en'
LANGUAGES = [
('en', _('English')),
('ar', _('Arabic'))
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# REST framework
REST_FRAMEWORK_RENDERER = env.list('REST_FRAMEWORK_RENDERER', default=[])
REST_FRAMEWORK_PARSER = env.list('REST_FRAMEWORK_PARSER', default=[])
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
] + REST_FRAMEWORK_RENDERER,
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.JSONParser',
] + REST_FRAMEWORK_PARSER
}
# Rollbar
ROLLBAR = {
'access_token': env('ROLLBAR_ACCESS_TOKEN', default=''),
'environment': 'development' if DEBUG else 'production',
'root': BASE_DIR,
}
rollbar.init(**ROLLBAR)
# CORS support
CORS_ALLOW_ALL_ORIGINS = True
|
11497225
|
from dart.client.python.dart_client import Dart
from dart.model.dataset import Column, DatasetData, Dataset, DataFormat, FileFormat, RowFormat, DataType, Compression, \
LoadType
if __name__ == '__main__':
dart = Dart('localhost', 5000)
assert isinstance(dart, Dart)
dataset = dart.save_dataset(Dataset(data=(DatasetData(
name='owen_outclick_us_v02',
description='Owen outclick data, based on overlord schema version. Considered a replacement for outclick events.',
table_name='outclick',
location='s3://example-bucket/prd/inbound/overlord/raw-firehose-02/rmn-outclicks',
load_type=LoadType.MERGE,
data_format=DataFormat(
file_format=FileFormat.TEXTFILE,
row_format=RowFormat.JSON,
),
compression=Compression.GZIP,
partitions=[
Column('year', DataType.STRING),
Column('month', DataType.STRING),
Column('day', DataType.STRING),
],
primary_keys=['eventInstanceUuid'],
merge_keys=['eventInstanceUuid'],
sort_keys=['eventTimestamp', 'eventInstanceUuid', 'derivedEventInstanceId'],
distribution_keys=['eventInstanceUuid'],
batch_merge_sort_keys=['owenProcessed DESC'],
columns=[
Column('advertiserUuid', DataType.VARCHAR, length=2048, path='owen.context.advertiserUuid'),
Column('appBadgeCount', DataType.INT, path='owen.context.appBadgeCount'),
Column('appForegroundFlag', DataType.BOOLEAN, path='owen.context.appForegroundFlag'),
Column('bluetoothBeaconId', DataType.VARCHAR, length=50, path='owen.context.bluetoothBeaconId'),
Column('bluetoothBeaconType', DataType.VARCHAR, length=25, path='owen.context.bluetoothBeaconType'),
Column('bluetoothEnabledFlag', DataType.BOOLEAN, path='owen.context.bluetoothEnabledFlag'),
Column('breadCrumb', DataType.VARCHAR, length=2048, path='owen.context.breadCrumb'),
Column('browserFamily', DataType.VARCHAR, length=50, path='owen.context.browserFamily'),
Column('browserVersion', DataType.VARCHAR, length=50, path='owen.context.browserVersion'),
Column('carrier', DataType.VARCHAR, length=25, path='owen.context.carrier'),
Column('city', DataType.VARCHAR, length=75, path='owen.context.city'),
Column('connectionType', DataType.VARCHAR, length=25, path='owen.context.connectionType'),
Column('country', DataType.VARCHAR, length=2, path='owen.context.country'),
Column('custom', DataType.VARCHAR, path='owen.context.custom'),
Column('deviceCategory', DataType.VARCHAR, length=2048, path='owen.context.deviceCategory'),
Column('deviceFingerprint', DataType.VARCHAR, length=26, path='owen.context.deviceFingerprint'),
Column('dma', DataType.INT, path='owen.context.dma'),
Column('environment', DataType.VARCHAR, length=2048, path='owen.context.environment'),
Column('experimentObject', DataType.VARCHAR, length=1024, path='owen.context.experiment'),
Column('failureFlag', DataType.BOOLEAN, path='owen.context.failureFlag'),
Column('failureReason', DataType.VARCHAR, length=2048, path='owen.context.failureReason'),
Column('favoriteFlag', DataType.BOOLEAN, path='owen.context.favoriteFlag'),
Column('featureFlags', DataType.VARCHAR, path='owen.context.featureFlags'),
Column('geofenceUuid', DataType.VARCHAR, length=2048, path='owen.context.geofenceUuid'),
Column('inventoryCount', DataType.INT, path='owen.context.inventoryCount'),
Column('inventory_affiliateNetwork', DataType.VARCHAR, length=50, path='owen.context.inventory[0].affiliateNetwork'),
Column('inventory_brand', DataType.VARCHAR, length=100, path='owen.context.inventory[0].brand'),
Column('inventory_claimUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].claimUuid'),
Column('inventory_clickLocation', DataType.VARCHAR, length=100, path='owen.context.inventory[0].clickLocation'),
Column('inventory_commentsCount', DataType.INT, path='owen.context.inventory[0].commentsCount'),
Column('inventory_conquestingFlag', DataType.BOOLEAN, path='owen.context.inventory[0].conquestingFlag'),
Column('inventory_couponRank', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].couponRank'),
Column('inventory_deepLinkUrl', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].deepLinkUrl'),
Column('inventory_deepLinkUrlScheme', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].deepLinkUrlScheme'),
Column('inventory_exclusivityFlag', DataType.BOOLEAN, path='owen.context.inventory[0].exclusivityFlag'),
Column('inventory_expirationDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].expirationDate'),
Column('inventory_finalPrice', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].finalPrice'),
Column('inventory_instoreType', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].instoreType'),
Column('inventory_inventoryChannel', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryChannel'),
Column('inventory_inventoryName', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryName'),
Column('inventory_inventorySource', DataType.VARCHAR, length=50, path='owen.context.inventory[0].inventorySource'),
Column('inventory_inventoryType', DataType.VARCHAR, length=25, path='owen.context.inventory[0].inventoryType'),
Column('inventory_inventoryUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryUuid'),
Column('inventory_lastVerifiedDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].lastVerifiedDate'),
Column('inventory_monetizableFlag', DataType.BOOLEAN, path='owen.context.inventory[0].monetizableFlag'),
Column('inventory_noVotes', DataType.INT, path='owen.context.inventory[0].noVotes'),
Column('inventory_onlineType', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].onlineType'),
Column('inventory_originalPrice', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].originalPrice'),
Column('inventory_outRedirectUrl', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].outRedirectUrl'),
Column('inventory_outclickUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].outclickUuid'),
Column('inventory_parentInventoryUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].parentInventoryUuid'),
Column('inventory_personalizationFlag', DataType.BOOLEAN, path='owen.context.inventory[0].personalizationFlag'),
Column('inventory_position', DataType.INT, path='owen.context.inventory[0].position'),
Column('inventory_proximity', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].proximity'),
Column('inventory_proximityUnit', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].proximityUnit'),
Column('inventory_recommendedFlag', DataType.BOOLEAN, path='owen.context.inventory[0].recommendedFlag'),
Column('inventory_redemptionChannel', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].redemptionChannel'),
Column('inventory_retailCategory', DataType.VARCHAR, length=75, path='owen.context.inventory[0].retailCategory'),
Column('inventory_savedFlag', DataType.BOOLEAN, path='owen.context.inventory[0].savedFlag'),
Column('inventory_siteUuid', DataType.VARCHAR, length=26, path='owen.context.inventory[0].siteUuid'),
Column('inventory_startDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].startDate'),
Column('inventory_successPercentage', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].successPercentage'),
Column('inventory_usedByCount', DataType.INT, path='owen.context.inventory[0].usedByCount'),
Column('inventory_yesVotes', DataType.INT, path='owen.context.inventory[0].yesVotes'),
Column('ipAddress', DataType.VARCHAR, length=45, path='owen.context.ipAddress'),
Column('language', DataType.VARCHAR, length=6, path='owen.context.language'),
Column('latitude', DataType.NUMERIC, precision=18, scale=4, path='owen.context.latitude'),
Column('locationEnabledFlag', DataType.BOOLEAN, path='owen.context.locationEnabledFlag'),
Column('loggedInFlag', DataType.BOOLEAN, path='owen.context.loggedInFlag'),
Column('longitude', DataType.NUMERIC, precision=18, scale=4, path='owen.context.longitude'),
Column('macAddress', DataType.VARCHAR, length=2048, path='owen.context.macAddress'),
Column('marketing_adGroup', DataType.VARCHAR, length=2048, path='owen.context.marketing.adGroup'),
Column('marketing_campaign', DataType.VARCHAR, length=50, path='owen.context.marketing.campaign'),
Column('marketing_campaignSendCount', DataType.INT, path='owen.context.marketing.campaignSendCount'),
Column('marketing_campaignUuid', DataType.VARCHAR, length=2048, path='owen.context.marketing.campaignUuid'),
Column('marketing_cdRank', DataType.INT, path='owen.context.marketing.cdRank'),
Column('marketing_channel', DataType.VARCHAR, length=50, path='owen.context.marketing.channel'),
Column('marketing_content', DataType.VARCHAR, length=2048, path='owen.context.marketing.content'),
Column('marketing_medium', DataType.VARCHAR, length=50, path='owen.context.marketing.medium'),
Column('marketing_notificationUuid', DataType.VARCHAR, length=2048, path='owen.context.marketing.notificationUuid'),
Column('marketing_source', DataType.VARCHAR, length=100, path='owen.context.marketing.source'),
Column('marketing_term', DataType.VARCHAR, length=2048, path='owen.context.marketing.term'),
Column('marketing_vendor', DataType.VARCHAR, length=25, path='owen.context.marketing.vendor'),
Column('mobileDeviceMake', DataType.VARCHAR, length=25, path='owen.context.mobileDeviceMake'),
Column('mobileDeviceModel', DataType.VARCHAR, length=50, path='owen.context.mobileDeviceModel'),
Column('notificationEnabledFlag', DataType.BOOLEAN, path='owen.context.notificationEnabledFlag'),
Column('osFamily', DataType.VARCHAR, length=25, path='owen.context.osFamily'),
Column('osName', DataType.VARCHAR, length=2048, path='owen.context.osName'),
Column('osVersion', DataType.VARCHAR, length=2048, path='owen.context.osVersion'),
Column('pageName', DataType.VARCHAR, length=2048, path='owen.context.pageName'),
Column('pageType', DataType.VARCHAR, length=100, path='owen.context.pageType'),
Column('partialSearchTerm', DataType.VARCHAR, length=2048, path='owen.context.partialSearchTerm'),
Column('personalizationFlag', DataType.BOOLEAN, path='owen.context.personalizationFlag'),
Column('previousPageName', DataType.VARCHAR, length=2048, path='owen.context.previousPageName'),
Column('previousViewInstanceUuid', DataType.VARCHAR, length=2048, path='owen.context.previousViewInstanceUuid'),
Column('promptName', DataType.VARCHAR, length=2048, path='owen.context.promptName'),
Column('propertyName', DataType.VARCHAR, length=20, path='owen.context.propertyName'),
Column('referrer', DataType.VARCHAR, length=2048, path='owen.context.referrer'),
Column('region', DataType.VARCHAR, length=25, path='owen.context.region'),
Column('screenHeight', DataType.INT, path='owen.context.screenHeight'),
Column('screenWidth', DataType.INT, path='owen.context.screenWidth'),
Column('session', DataType.VARCHAR, length=2048, path='owen.context.session'),
Column('test_testUuid', DataType.VARCHAR, length=26, path='owen.context.test.testUuid'),
Column('udid', DataType.VARCHAR, length=40, path='owen.context.udid'),
Column('userAgent', DataType.VARCHAR, length=2048, path='owen.context.userAgent'),
Column('userQualifier', DataType.VARCHAR, length=26, path='owen.context.userQualifier'),
Column('userUuid', DataType.VARCHAR, length=2048, path='owen.context.userUuid'),
Column('vendorObject', DataType.VARCHAR, length=512, path='owen.context.vendor'),
Column('viewInstanceUuid', DataType.VARCHAR, length=128, path='owen.context.viewInstanceUuid'),
Column('eventAction', DataType.VARCHAR, length=2048, path='owen.event.eventAction'),
Column('eventCategory', DataType.VARCHAR, length=25, path='owen.event.eventCategory'),
Column('eventInstanceUuid', DataType.VARCHAR, length=26, path='owen.event.eventInstanceUuid'),
Column('eventName', DataType.VARCHAR, length=50, path='owen.event.eventName'),
Column('eventPlatform', DataType.VARCHAR, length=25, path='owen.event.eventPlatform'),
Column('eventPlatformVersion', DataType.VARCHAR, length=25, path='owen.event.eventPlatformVersion'),
Column('eventTarget', DataType.VARCHAR, length=2048, path='owen.event.eventTarget'),
Column('eventVersion', DataType.VARCHAR, length=25, path='owen.event.eventVersion'),
Column('eventTimestamp', DataType.DATETIME, date_pattern="yyyy-MM-dd'T'HH:mm:ss'Z'", path='owen.event.eventTimestamp'),
Column('derivedEventInstanceId', DataType.VARCHAR, length=64, path='metadata.derivedEventInstanceId'),
Column('owenProcessed', DataType.DATETIME, date_pattern="yyyy-MM-dd'T'HH:mm:ss'Z'", path='metadata.analyticsTopologyFinishTime'),
],
))))
print 'created dataset: %s' % dataset.id
|
11497230
|
import argparse
import csv
import re
# import modules needed for logging
import logging
import os
import random
logger = logging.getLogger(__name__) # module logger
def parse_arguments():
"""
Function to parse command line arguements
from the user
Returns
-------
opts : dict
command line arguements from the user
"""
info = 'Divides pdb info files for parallelization'
parser = argparse.ArgumentParser(description=info)
# program arguments
parser.add_argument('-f', '--in-file',
type=str,
required=True,
help='PDB info file to divide')
parser.add_argument('-n', '--num-splits',
default=1000,
type=int,
help='Number of splits to perform (Default: 1000)')
parser.add_argument('-m', '--mut-file',
type=str,
required=True,
help='File containing mutation information')
parser.add_argument('--split-dir',
default = "../data/split_pdbs/",
type=str,
help='Output directory for split PDB info files')
args = parser.parse_args()
opts = vars(args)
return opts
def read_file(in_file):
'''
Read the file and output into a dictionary
'''
dictionary = {}
# iterate through file
for line in in_file:
# tab delimited file
fields = line.split('\t')
# leave out the ones with no info
if fields[1] == '':
continue
# check if id is already in the dictionary
if not fields[0] in dictionary:
dictionary[fields[0]] = []
dictionary[fields[0]].append(line)
return dictionary
def split_file(in_file, num_splits, split_dir, mut_file):
"""
Splits the file and creates files in the desired directory
"""
# create the output directory if it does
# not exist
if not os.path.exists(split_dir):
os.mkdir(split_dir)
# open the info file
f = open(in_file)
pdb_header = f.readline()
# open the mutation file
m = open(mut_file)
mut_header = m.readline()
# read into a dictionary containing
# structure ids as keys and lines pertaining
# to it as values
pdb_dict = read_file(f)
mut_dict = read_file(m)
# determine total num of ids in file
total_ids = len(pdb_dict.keys())
print(total_ids)
# determine num of ids to put in each split
num_ids = int(total_ids/num_splits)
# counters
count_file = 0
count_id = num_ids
# randomize order of insertions
keys = pdb_dict.keys()
random.shuffle(keys)
# iterate through dict and write to files
#for key in sorted(pdb_dict):
for key in keys:
# check if we need a new file
if (count_id == num_ids and count_file < num_splits):
count_id = 0
pdb_out = open(split_dir + "/pdb_info_split_" + str(count_file) + ".txt", 'w')
pdb_out.write(pdb_header)
mut_out = open(split_dir + "/mut_info_split_" + str(count_file) + ".txt", 'w')
mut_out.write(mut_header)
count_file += 1
# write all lines pertaining to the structure id
for line in pdb_dict[key]:
pdb_out.write(line)
if key in mut_dict:
for line in mut_dict[key]:
mut_out.write(line)
count_id += 1
def main(opts):
"""
Splits a PDB file into the desired number of splits
Parameters
----------
opts: dict
command line arguements from the user
"""
# split the file
split_file(opts['in_file'], opts['num_splits'], opts['split_dir'], opts['mut_file'])
if __name__ == "__main__":
opts = parse_arguments()
main(opts)
|
11497259
|
import numpy as np
import concurrent.futures
from operator import itemgetter
from sim_exp import arrival_rate_upperbound
from mapper import *
from policygrad_learning import *
from q_learning import *
# ############################################ Scher ########################################### #
class Scher(object):
def __init__(self, mapping_m, sching_m):
self.sching_m = sching_m
self.mapper = Mapper(mapping_m)
self._type = 'Scher'
self._id = 'Scher_a={}'.format(sching_m['a'] )
self.s_len = 1
if sching_m['type'] == 'plain':
self.schedule = self.plain
elif sching_m['type'] == 'expand_if_totaldemand_leq':
self.schedule = self.expand_if_totaldemand_leq
elif sching_m['type'] == 'opportunistic':
self.schedule = self.opportunistic
def __repr__(self):
return 'Scher[sching_m={}, mapper= {}]'.format(self.sching_m, self.mapper)
def plain(self, j, w_l, cluster, expand=True):
w_l = self.mapper.worker_l(j, w_l)
if len(w_l) < j.k:
return None, -1, None
a = self.sching_m['a'] if expand else 0
# j.n = min(int(j.k + a), len(w_l) )
j.n = int(j.k + a)
if len(w_l) < j.n:
return None, -1, None
return None, a, w_l[:j.n]
def expand_if_totaldemand_leq(self, j, w_l, cluster):
try:
D = j.totaldemand
except AttributeError: # use_lessreal_sim = True
D = j.k*j.reqed*j.lifetime
expand = True if D < self.sching_m['threshold'] else False
return self.plain(j, w_l, cluster, expand)
def opportunistic(self, j, w_l, cluster):
if self.sching_m['mapping_type'] == 'packing':
w_l_ = []
for w in w_l:
sched_reqed = sum([t.reqed for t in w.t_l if t.type_ == 's'] )
if j.reqed <= w.cap - sched_reqed:
w_l_.append(w)
elif self.sching_m['mapping_type'] == 'spreading':
w_load_l = []
for w in w_l:
sched_reqed = sum([t.reqed for t in w.t_l if t.type_ == 's'] )
if j.reqed <= w.cap - sched_reqed:
w_load_l.append((w, sched_reqed/w.cap) )
w_l_ = [w for w, _ in sorted(w_load_l, key=itemgetter(1) ) ]
if len(w_l_) < j.k:
return None, -1, None
return None, 1, w_l_[:int(j.k + self.sching_m['a'] ) ]
# ################################# Scher_wMultiplicativeExpansion ############################# #
class Scher_wMultiplicativeExpansion(object):
def __init__(self, mapping_m, sching_m, _id=None):
self.sching_m = sching_m
self.mapper = Mapper(mapping_m)
self._type = 'Scher_wMultiplicativeExpansion'
if sching_m['type'] == 'plain':
self.schedule = self.plain
elif sching_m['type'] == 'expand_if_totaldemand_leq':
self.schedule = self.expand_if_totaldemand_leq
self._id = 'd={}'.format(self.sching_m['threshold'] )
if _id is not None:
self._id = _id
def __repr__(self):
return 'Scher_wMultiplicativeExpansion[sching_m={}, mapper= {}, _id= {}]'.format(self.sching_m, self.mapper, self._id)
def plain(self, j, w_l, cluster, expand=True):
r = self.sching_m['r'] if expand else 1
j.n = int(j.k*r)
w_l = self.mapper.worker_l(j, w_l)
if len(w_l) < j.n:
return None, -1, None
return None, r, w_l[:j.n]
def expand_if_totaldemand_leq(self, j, w_l, cluster):
expand = True if j.k*j.reqed*j.lifetime < self.sching_m['threshold'] else False
return self.plain(j, w_l, cluster, expand)
# ########################################### RLScher ########################################## #
NN_len = 20 # 10
class RLScher():
def __init__(self, sinfo_m, mapping_m, sching_m, save_dir='save', save_suffix=None):
self.sinfo_m = sinfo_m
self._type = 'RLScher'
self._id = 'RLScher_{}'.format(save_suffix)
self.mapper = Mapper(mapping_m)
self.s_len = STATE_LEN
self.a_len = sching_m['a'] + 1
self.N, self.T = sching_m['N'], sinfo_m['njob']
if sching_m['learner'] == 'PolicyGradLearner':
self.learner = PolicyGradLearner(self.s_len, self.a_len, nn_len=NN_len, w_actorcritic=True, save_dir=save_dir, save_suffix=save_suffix)
elif sching_m['learner'] == 'QLearner':
self.learner = QLearner(self.s_len, self.a_len, nn_len=NN_len, save_dir=save_dir, save_suffix=save_suffix)
elif sching_m['learner'] == 'QLearner_wTargetNet':
self.learner = QLearner_wTargetNet(self.s_len, self.a_len, nn_len=NN_len, save_dir=save_dir, save_suffix=save_suffix)
elif sching_m['learner'] == 'QLearner_wTargetNet_wExpReplay':
self.learner = QLearner_wTargetNet_wExpReplay(self.s_len, self.a_len, exp_buffer_size=sching_m['exp_buffer_size'], exp_batch_size=sching_m['exp_batch_size'], nn_len=NN_len, save_dir=save_dir, save_suffix=save_suffix)
def __repr__(self):
return 'RLScher[learner= {}]'.format(self.learner)
def save(self, step):
return self.learner.save(step)
def restore(self, step, save_suffix=None):
return self.learner.restore(step, save_suffix)
def summarize(self):
print("////////////////////////////////////////////////////")
if 'totaldemand_rv' in self.sinfo_m:
D = self.sinfo_m['totaldemand_rv']
l, u = D.l_l, D.u_l
i = u/10
elif 'reqed_rv' in self.sinfo_m:
R = self.sinfo_m['reqed_rv']
L = self.sinfo_m['lifetime_rv']
l = R.l_l*L.l_l
u = 500*l
i = u/10
logl, logi, logu = math.log10(l), math.log10(i), math.log10(u)
D_l = list(np.logspace(logl, logi, 5, endpoint=False) ) + list(np.logspace(logi, logu, 5) )
if STATE_LEN == 1:
for D in D_l:
# for D in np.linspace(1, 300, 10):
qa_l = self.learner.get_a_q_l(state_(D) )
print("D= {}, qa_l= {}".format(D, qa_l) )
blog(a=np.argmax(qa_l) )
elif STATE_LEN == 2:
for Eload in [0.1, 0.5, 0.9]:
for D in D_l:
qa_l = self.learner.get_a_q_l(state_(jtotaldemand=D, wload_l=[Eload]) )
print("Eload= {}, D= {}, qa_l= {}".format(Eload, D, qa_l) )
blog(a=np.argmax(qa_l) )
elif STATE_LEN == 3:
# for wait_time in [0, 100, 1000, 100000]:
for Eload in [0.1, 0.5, 0.9]:
for k in [1, 3, 7]:
for lifetime in [20, 1000, 10000]:
# qa_l = self.learner.get_a_q_l(state_(jk=k, jlifetime=lifetime, wait_time=wait_time) )
# print("wait_time= {}, k= {}, lifetime= {}; qa_l= {}".format(wait_time, k, lifetime, qa_l) )
qa_l = self.learner.get_a_q_l(state_(jk=k, jlifetime=lifetime, wload_l=[Eload] ) )
print("Eload= {}, k= {}, lifetime= {}; qa_l= {}".format(Eload, k, lifetime, qa_l) )
blog(a=np.argmax(qa_l) )
# elif 3 <= STATE_LEN <= 6:
# for wload_l in [[0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.9, 0.9, 0.9, 0.9, 0.9, 0.9] ]:
# print(">>> wload_l= {}".format(wload_l) )
# for cluster_qlen in [0, 1, 2, 10]:
# print(">> cluster_qlen= {}".format(cluster_qlen) )
# for D in D_l:
# qa_l = self.learner.get_a_q_l(state_(D, wload_l, cluster_qlen) )
# print("D= {}, qa_l= {}".format(D, qa_l) )
# blog(a=np.argmax(qa_l) )
print("----------------------------------------------------")
def schedule(self, j, w_l, cluster):
w_l = self.mapper.worker_l(j, w_l)
if len(w_l) < j.k:
return None, -1, None
# s = state(j, [w.sched_load() for w in w_l], cluster)
s = state(j, [w.sched_load() for w in cluster.w_l], cluster)
# log(INFO, "s= {}".format(s) )
a = self.learner.get_random_action(s)
j.n = int(j.k + a)
if len(w_l) < j.n:
return None, -1, None
return s, a, w_l[:j.n]
def train(self, nsteps):
for i in range(nsteps):
alog(">> i= {}".format(i) )
n_t_s_l, n_t_a_l, n_t_r_l = np.zeros((self.N, self.T, self.s_len)), np.zeros((self.N, self.T, 1)), np.zeros((self.N, self.T, 1))
for n in range(self.N):
t_s_l, t_a_l, t_r_l, t_sl_l = sample_traj(self.sinfo_m, self)
alog("n= {}, avg_a= {}, avg_r= {}, avg_sl= {}".format(n, np.mean(t_a_l), np.mean(t_r_l), np.mean(t_sl_l) ) )
n_t_s_l[n], n_t_a_l[n], n_t_r_l[n] = t_s_l, t_a_l, t_r_l
self.learner.train_w_mult_trajs(n_t_s_l, n_t_a_l, n_t_r_l)
def train_multithreaded(self, nsteps):
if self.learner.restore(nsteps):
log(WARNING, "learner.restore is a success, will not retrain.")
else:
tp = concurrent.futures.ThreadPoolExecutor(max_workers=100)
for i in range(1, nsteps+1):
alog(">> i= {}".format(i) )
n_t_s_l, n_t_a_l, n_t_r_l = np.zeros((self.N, self.T, self.s_len)), np.zeros((self.N, self.T, 1)), np.zeros((self.N, self.T, 1))
future_n_m = {tp.submit(sample_traj, self.sinfo_m, self): n for n in range(self.N) }
for future in concurrent.futures.as_completed(future_n_m):
n = future_n_m[future]
try:
t_s_l, t_a_l, t_r_l, t_sl_l = future.result()
except Exception as exc:
log(ERROR, "exception;", exc=exc)
alog("n= {}, avg_a= {}, avg_r= {}, avg_sl= {}".format(n, np.mean(t_a_l), np.mean(t_r_l), np.mean(t_sl_l) ) )
n_t_s_l[n], n_t_a_l[n], n_t_r_l[n] = t_s_l, t_a_l, t_r_l
self.learner.train_w_mult_trajs(n_t_s_l, n_t_a_l, n_t_r_l)
# self.learner.save(i)
if __name__ == '__main__':
sinfo_m = {
'njob': 2000, 'nworker': 10, 'wcap': 10, # 10000
'totaldemand_rv': TPareto(100, 10000, 1.1),
'demandperslot_mean_rv': TPareto(0.1, 5, 1.1),
'k_rv': DUniform(1, 1),
'straggle_m': {
'slowdown': lambda load: np.random.uniform(0.01, 0.1),
'straggle_dur_rv': TPareto(10, 100, 1),
'normal_dur_rv': TPareto(10, 100, 1) } }
ar_ub = arrival_rate_upperbound(sinfo_m)
sinfo_m['ar'] = 2/4*ar_ub
sching_m = {'a': 1, 'N': 10}
blog(sinfo_m=sinfo_m, sching_m=sching_m)
scher = RLScher(sinfo_m, sching_m)
# sinfo_m['max_exprate'] = max_exprate
print("scher= {}".format(scher) )
scher.train_multithreaded(40) # train(40)
evaluate(sinfo_m, scher)
|
11497272
|
import logging
from itertools import chain
from contextlib import ExitStack
from typing import List, Optional, Type
from mangum.protocols import HTTPCycle, LifespanCycle
from mangum.handlers import ALB, HTTPGateway, APIGateway, LambdaAtEdge
from mangum.exceptions import ConfigurationError
from mangum.types import (
ASGI,
LifespanMode,
LambdaConfig,
LambdaEvent,
LambdaContext,
LambdaHandler,
)
logger = logging.getLogger("mangum")
HANDLERS: List[Type[LambdaHandler]] = [
ALB,
HTTPGateway,
APIGateway,
LambdaAtEdge,
]
class Mangum:
def __init__(
self,
app: ASGI,
lifespan: LifespanMode = "auto",
api_gateway_base_path: str = "/",
custom_handlers: Optional[List[Type[LambdaHandler]]] = None,
) -> None:
if lifespan not in ("auto", "on", "off"):
raise ConfigurationError(
"Invalid argument supplied for `lifespan`. Choices are: auto|on|off"
)
self.app = app
self.lifespan = lifespan
self.api_gateway_base_path = api_gateway_base_path or "/"
self.config = LambdaConfig(api_gateway_base_path=self.api_gateway_base_path)
self.custom_handlers = custom_handlers or []
def infer(self, event: LambdaEvent, context: LambdaContext) -> LambdaHandler:
for handler_cls in chain(self.custom_handlers, HANDLERS):
if handler_cls.infer(event, context, self.config):
handler = handler_cls(event, context, self.config)
break
else:
raise RuntimeError( # pragma: no cover
"The adapter was unable to infer a handler to use for the event. This "
"is likely related to how the Lambda function was invoked. (Are you "
"testing locally? Make sure the request payload is valid for a "
"supported handler.)"
)
return handler
def __call__(self, event: LambdaEvent, context: LambdaContext) -> dict:
handler = self.infer(event, context)
with ExitStack() as stack:
if self.lifespan in ("auto", "on"):
lifespan_cycle = LifespanCycle(self.app, self.lifespan)
stack.enter_context(lifespan_cycle)
http_cycle = HTTPCycle(handler.scope, handler.body)
http_response = http_cycle(self.app)
return handler(http_response)
assert False, "unreachable" # pragma: no cover
|
11497281
|
import unittest
from flow_py_sdk.cadence import Address, String, Int
from flow_py_sdk.tx import Tx, TxSignature, ProposalKey
class TestTx(unittest.TestCase):
def test_transaction_rlp_encoding_is_consistent(self):
cases = [
{
"name": "Complete transaction",
"tx": base_tx(),
"payload": "f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001040a880000000000000001c9880000000000000001",
"envelope": "f899f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001040a880000000000000001c9880000000000000001e4e38004a0f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162",
},
{
"name": "Empty reference block",
"tx": base_tx().with_reference_block_id(b"\0" * 32),
"payload": "f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a000000000000000000000000000000000000000000000000000000000000000002a880000000000000001040a880000000000000001c9880000000000000001",
"envelope": "f899f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a000000000000000000000000000000000000000000000000000000000000000002a880000000000000001040a880000000000000001c9880000000000000001e4e38004a0f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162",
},
{
"name": "Zero gas limit",
"tx": base_tx().with_gas_limit(0),
"payload": "f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b80880000000000000001040a880000000000000001c9880000000000000001",
"envelope": "f899f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b80880000000000000001040a880000000000000001c9880000000000000001e4e38004a0f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162",
},
{
"name": "Empty proposal key ID",
"tx": base_tx().with_proposal_key(
proposal_key=ProposalKey(
key_id=0,
key_address=Address.from_hex("01"),
key_sequence_number=10,
)
),
"payload": "f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001800a880000000000000001c9880000000000000001",
"envelope": "f899f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001800a880000000000000001c9880000000000000001e4e38004a0f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162",
},
{
"name": "Empty sequence number",
"tx": base_tx().with_proposal_key(
proposal_key=ProposalKey(
key_id=4,
key_address=Address.from_hex("01"),
key_sequence_number=0,
)
),
"payload": "f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a8800000000000000010480880000000000000001c9880000000000000001",
"envelope": "f899f872b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a8800000000000000010480880000000000000001c9880000000000000001e4e38004a0f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162",
},
{
"name": "Multiple authorizers",
"tx": base_tx().add_authorizers(Address.from_hex("02")),
"payload": "f87bb07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001040a880000000000000001d2880000000000000001880000000000000002",
"envelope": "f8a2f87bb07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207dc0a0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001040a880000000000000001d2880000000000000001880000000000000002e4e38004a0f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162",
},
{
"name": "Single argument",
"tx": base_tx().add_arguments(String("foo")),
"payload": "f893b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207de1a07b2274797065223a22537472696e67222c2276616c7565223a22666f6f227d0aa0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001040a880000000000000001c9880000000000000001",
"envelope": "f8baf893b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207de1a07b2274797065223a22537472696e67222c2276616c7565223a22666f6f227d0aa0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001040a880000000000000001c9880000000000000001e4e38004a0f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162",
},
{
"name": "Multiple arguments",
"tx": base_tx().add_arguments(String("foo"), Int(42)),
"payload": "f8b1b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207df83ea07b2274797065223a22537472696e67222c2276616c7565223a22666f6f227d0a9c7b2274797065223a22496e74222c2276616c7565223a223432227d0aa0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001040a880000000000000001c9880000000000000001",
"envelope": "f8d8f8b1b07472616e73616374696f6e207b2065786563757465207b206c6f67282248656c6c6f2c20576f726c64212229207d207df83ea07b2274797065223a22537472696e67222c2276616c7565223a22666f6f227d0a9c7b2274797065223a22496e74222c2276616c7565223a223432227d0aa0f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b2a880000000000000001040a880000000000000001c9880000000000000001e4e38004a0f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162",
},
]
for case in cases:
with self.subTest(msg=case["name"]):
tx = case["tx"]
payload = tx.payload_message().hex()
envelope = tx.envelope_message().hex()
self.assertEqual(case["payload"], payload)
self.assertEqual(case["envelope"], envelope)
def base_tx() -> Tx:
sig = bytes.fromhex(
"f7225388c1d69d57e6251c9fda50cbbf9e05131e5adb81e5aa0422402f048162"
)
tx = (
Tx(
code="""transaction { execute { log("Hello, World!") } }""",
reference_block_id=bytes.fromhex(
"f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b"
),
proposal_key=ProposalKey(
key_id=4, key_address=Address.from_hex("01"), key_sequence_number=10
),
payer=Address.from_hex("01"),
)
.with_gas_limit(42)
.add_authorizers(Address.from_hex("01"))
)
signature = TxSignature(
address=Address.from_hex("01"), key_id=4, signer_index=0, signature=sig
)
tx.payload_signatures.append(signature)
return tx
if __name__ == "__main__":
unittest.main()
|
11497297
|
import re
from decimal import Decimal
from urllib.parse import urlencode
import aiohttp
from ...box import box
from ...command import argument
from ...event import Message
from ...utils import json
QUERY_RE = re.compile(
r'^(\d+(?:\.\d+)?)\s*(\S+)(?:\s+(?:to|->|=)\s+(\S+))?$', re.IGNORECASE
)
SHORTCUT_TABLE: dict[str, str] = {
'$': 'USD',
'달러': 'USD',
'\\': 'KRW',
'원': 'KRW',
'엔': 'JPY',
'유로': 'EUR',
}
class ExchangeError(Exception):
"""Exception for exchange."""
class SameBaseAndTo(ExchangeError):
"""Base and to symbol is same."""
class WrongUnit(ExchangeError):
"""Wrong unit."""
async def get_exchange_rate(base: str, to: str) -> dict:
"""Get exchange rate."""
if base == to:
raise SameBaseAndTo()
url = 'https://api.manana.kr/exchange/rate.json?{}'.format(
urlencode({'base': to, 'code': base})
)
async with aiohttp.ClientSession() as session:
async with session.get(url) as res:
data = await res.json(loads=json.loads)
if isinstance(data, list):
return data[0]
else:
raise WrongUnit()
@box.command('환율', ['exchange'])
@argument('query', nargs=-1, concat=True)
async def exchange(bot, event: Message, query: str):
"""
환전시 얼마가 되는지 계산.
`{PREFIX}환율 100엔` (100 JPY가 KRW로 얼마인지 계산)
`{PREFIX}환율 100 JPY to USD` (100 JPY가 USD로 얼마인지 계산)
"""
match = QUERY_RE.match(query)
if match:
quantity = Decimal(match.group(1))
base = SHORTCUT_TABLE.get(match.group(2), match.group(2))
to = SHORTCUT_TABLE.get(match.group(3), match.group(3)) or 'KRW'
data = None
error = None
try:
data = await get_exchange_rate(base, to)
except SameBaseAndTo:
error = '변환하려는 두 화폐가 같은 단위에요!'
except WrongUnit:
error = '지원되는 통화기호가 아니에요!'
if error:
await bot.say(event.channel, error)
return
if data:
date = data['date']
rate = Decimal(data['rate'])
result = quantity * rate
await bot.say(
event.channel,
f'{quantity} {base} == {result:.2f} {to} ({date})',
)
else:
await bot.say(event.channel, '알 수 없는 에러가 발생했어요! 아빠에게 문의해주세요!')
else:
await bot.say(event.channel, '주문을 이해하는데에 실패했어요!')
|
11497303
|
from setuptools import setup, find_packages
import iarm
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
long_description.replace('\r', '') # PyPi doesnt like '\r\n', only '\n'
except:
long_description = open('README.md').read()
setup(name=iarm.__title__,
version=iarm.__version__,
description="An interpreter for the ARM instruction set and an accompanying Jupyter kernel",
long_description=long_description,
url="https://github.com/DeepHorizons/iarm",
author="<NAME>",
author_email="<EMAIL>",
license='MIT',
packages=find_packages('.'),
install_requires=[
'ipykernel',
'jupyter-client',
'ipython',
],
zip_safe=True)
|
11497319
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from collections import namedtuple
from typing import Optional
import numpy as np
import pyspiel
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms.best_response import BestResponsePolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.utils.annotations import override
from grl.envs.poker_multi_agent_env import OBS_SHAPES, VALID_ACTIONS_SHAPES
from grl.rl_apps.psro.poker_utils import openspiel_policy_from_nonlstm_rllib_policy, \
tabular_policies_from_weighted_policies, JointPlayerPolicy
# Used to return tuple actions as a list of batches per tuple element
TupleActions = namedtuple("TupleActions", ["batches"])
OBSERVATION = 'observation'
VALID_ACTIONS_MASK = 'valid_actions_mask'
logger = logging.getLogger(__name__)
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def policy_to_dict(player_policy,
game,
all_states=None,
state_to_information_state=None,
player_id: Optional = None):
"""Converts a Policy instance into a tabular policy represented as a dict.
This is compatible with the C++ TabularExploitability code (i.e.
pyspiel.exploitability, pyspiel.TabularBestResponse, etc.).
While you do not have to pass the all_states and state_to_information_state
arguments, creating them outside of this funciton will speed your code up
dramatically.
Args:
player_policy: The policy you want to convert to a dict.
game: The game the policy is for.
all_states: The result of calling get_all_states.get_all_states. Can be
cached for improved performance.
state_to_information_state: A dict mapping str(state) to
state.information_state for every state in the game. Can be cached for
improved performance.
Returns:
A dictionary version of player_policy that can be passed to the C++
TabularBestResponse, Exploitability, and BestResponse functions/classes.
"""
if all_states is None:
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
state_to_information_state = {
state: str(
np.asarray(all_states[state].information_state_tensor(), dtype=np.float32).tolist()) for
state in all_states
}
tabular_policy = dict()
for state in all_states:
if player_id is not None and all_states[state].current_player() != player_id:
continue
information_state = state_to_information_state[state]
tabular_policy[information_state] = list(
player_policy.action_probabilities(all_states[state]).items())
return tabular_policy
class PokerOracleBestResponsePolicy(Policy):
@override(Policy)
def __init__(self, observation_space, action_space, config):
Policy.__init__(self, observation_space=observation_space, action_space=action_space, config=config)
env_config = config['env_config']
self.game_version = env_config['version']
self.game = pyspiel.load_game(self.game_version)
self.policy_dict = None
# information state has player as onehot in both kuhn and leduc
# from c++ code:
# // Mark who I am.
# (*values)[player] = 1;
# exit()
# print("LOADED CFR POLICY EXPLOITABILITY:", exploitability.exploitability(game, self.tabular_policy))
def compute_best_response(self, policy_to_exploit, br_only_as_player_id=None, policy_mixture_dict=None,
set_policy_weights_fn=None):
if policy_mixture_dict is None:
openspiel_policy_to_exploit = openspiel_policy_from_nonlstm_rllib_policy(openspiel_game=self.game,
rllib_policy=policy_to_exploit)
else:
if set_policy_weights_fn is None:
raise ValueError(
"If policy_mixture_dict is passed a value, a set_policy_weights_fn must be passed as well.")
def policy_iterable():
for policy_spec in policy_mixture_dict.keys():
set_policy_weights_fn(policy_spec)
print("making openspeil policy")
single_openspiel_policy = openspiel_policy_from_nonlstm_rllib_policy(openspiel_game=self.game,
rllib_policy=policy_to_exploit)
print("made policy")
yield single_openspiel_policy
openspiel_policy_to_exploit = JointPlayerPolicy(
game=self.game, policies=tabular_policies_from_weighted_policies(game=self.game,
policy_iterable=policy_iterable(),
weights=policy_mixture_dict.values())
)
print("made policy average")
br_player_ids = [br_only_as_player_id] if br_only_as_player_id is not None else [0, 1]
br = {player_id: BestResponsePolicy(game=self.game, player_id=player_id,
policy=openspiel_policy_to_exploit) for player_id in br_player_ids}
policy_dict = {}
for player_id, br_policy in br.items():
policy_dict.update(policy_to_dict(
player_policy=br_policy, game=self.game, player_id=player_id))
return policy_dict
def set_policy_dict(self, policy_dict):
self.policy_dict = policy_dict
def _get_action_probs_for_infoset(self, infoset):
action_probs = np.zeros(shape=(self.action_space.n,), dtype=np.float32)
policy_lookup_val = self.policy_dict[str(np.asarray(infoset, dtype=np.float32).tolist())]
for action, prob in policy_lookup_val:
action_probs[action] = prob
return action_probs
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
"""Compute actions for the current policy.
Arguments:
obs_batch (np.ndarray): batch of observations
state_batches (list): list of RNN state input batches, if any
prev_action_batch (np.ndarray): batch of previous action values
prev_reward_batch (np.ndarray): batch of previous rewards
info_batch (info): batch of info objects
episodes (list): MultiAgentEpisode for each obs in obs_batch.
This provides access to all of the internal episode state,
which may be useful for model-based or multiagent algorithms.
kwargs: forward compatibility placeholder
Returns:
actions (np.ndarray): batch of output actions, with shape like
[BATCH_SIZE, ACTION_SHAPE].
state_outs (list): list of RNN state output batches, if any, with
shape like [STATE_SIZE, BATCH_SIZE].
info (dict): dictionary of extra feature batches, if any, with
shape like {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
"""
info_state_length = OBS_SHAPES[self.game_version][0]
info_states = [obs[:info_state_length] for obs in obs_batch]
actions = []
policy_probs = []
for info_state in zip(info_states):
assert self.policy_dict is not None
action_probs = self._get_action_probs_for_infoset(info_state[0])
action = np.random.choice(range(self.action_space.n), p=action_probs)
actions.append(action)
policy_probs.append(action_probs)
return actions, [], {}
def compute_gradients(self, postprocessed_batch):
"""Computes gradients against a batch of experiences.
Either this or learn_on_batch() must be implemented by subclasses.
Returns:
grads (list): List of gradient output values
info (dict): Extra policy-specific values
"""
pass
def apply_gradients(self, gradients):
"""Applies previously computed gradients.
Either this or learn_on_batch() must be implemented by subclasses.
"""
pass
def get_weights(self):
"""Returns model weights.
Returns:
weights (obj): Serializable copy or view of model weights
"""
return None
def set_weights(self, weights):
"""Sets model weights.
Arguments:
weights (obj): Serializable copy or view of model weights
"""
pass
def get_initial_state(self):
"""Returns initial RNN state for the current policy."""
return []
def get_state(self):
"""Saves all local state.
Returns:
state (obj): Serialized local state.
"""
return self.get_weights()
def set_state(self, state):
"""Restores all local state.
Arguments:
state (obj): Serialized local state.
"""
self.set_weights(state)
def on_global_var_update(self, global_vars):
"""Called on an update to global vars.
Arguments:
global_vars (dict): Global variables broadcast from the driver.
"""
pass
def export_model(self, export_dir):
"""Export Policy to local directory for serving.
Arguments:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
def export_checkpoint(self, export_dir):
"""Export Policy checkpoint to local directory.
Argument:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
|
11497332
|
from bibliopixel.animation import animation
from bibliopixel.colors import COLORS
"""
This is an example Animation class which has a single field -
`color` with the default value `COLORS.green`.
You can edit this as you like, add and remove fields and
write Python code in general.
"""
class Example26(animation.Animation):
def __init__(
self, *args,
# The fields for your class go here!
color=COLORS.green,
# End of the fields for your class
**kwds):
super().__init__(*args, **kwds)
self.color = color
# Your initialization code goes here.
def step(self, amt=1):
this_pixel = self.cur_step % len(self.color_list)
# Set the previous pixel to black.
self.color_list[this_pixel - 1] = COLORS.black
# Set this pixel to the color
self.color_list[this_pixel] = self.color
#
# Everything below here is optional, and you can delete it if you aren't
# using it.
#
# pre_run is called right before the animation starts running.
def pre_run(self):
super().pre_run()
# Your code goes here.
def cleanup(self, clean_layout=True):
super().cleanup(clean_layout)
# Your code goes here.
class Example28(Example26):
def step(self, amt=1):
this_pixel = self.cur_step % len(self.color_list)
# Set the previous pixel to black.
self.color_list[this_pixel - 1] = COLORS.black
# Set this pixel to the color
self.color_list[this_pixel] = self.color
self.color_list[this_pixel - 2] = COLORS.yellow
self.color_list[this_pixel - 3] = COLORS.black
|
11497401
|
import numpy as np
import pylab
from scipy import sparse
import regreg.api as rr
Y = np.random.standard_normal(500); Y[100:150] += 7; Y[250:300] += 14
loss = rr.quadratic.shift(-Y, coef=0.5)
sparsity = rr.l1norm(len(Y), 1.4)
# TODO should make a module to compute typical Ds
D = sparse.csr_matrix((np.identity(500) + np.diag([-1]*499,k=1))[:-1])
fused = rr.l1norm.linear(D, 25.5)
problem = rr.container(loss, sparsity, fused)
solver = rr.FISTA(problem)
solver.fit(max_its=100)
solution = solver.composite.coefs
delta1 = np.fabs(D * solution).sum()
delta2 = np.fabs(solution).sum()
fused_constraint = rr.l1norm.linear(D, bound=delta1)
sparsity_constraint = rr.l1norm(500, bound=delta2)
constrained_problem = rr.container(loss, fused_constraint, sparsity_constraint)
constrained_solver = rr.FISTA(constrained_problem)
constrained_solver.composite.lipschitz = 1.01
vals = constrained_solver.fit(max_its=10, tol=1e-06, backtrack=False, monotonicity_restart=False)
constrained_solution = constrained_solver.composite.coefs
fused_constraint = rr.l1norm.linear(D, bound=delta1)
smoothed_fused_constraint = rr.smoothed_atom(fused_constraint, epsilon=1e-2)
smoothed_constrained_problem = rr.container(loss, smoothed_fused_constraint, sparsity_constraint)
smoothed_constrained_solver = rr.FISTA(smoothed_constrained_problem)
vals = smoothed_constrained_solver.fit(tol=1e-06)
smoothed_constrained_solution = smoothed_constrained_solver.composite.coefs
#pylab.clf()
pylab.scatter(np.arange(Y.shape[0]), Y,c='red', label=r'$Y$')
pylab.plot(solution, c='yellow', linewidth=5, label='Lagrange')
pylab.plot(constrained_solution, c='green', linewidth=3, label='Constrained')
pylab.plot(smoothed_constrained_solution, c='black', linewidth=1, label='Smoothed')
pylab.legend()
#pylab.plot(conjugate_coefs, c='black', linewidth=3)
#pylab.plot(conjugate_coefs_gen, c='gray', linewidth=1)
|
11497410
|
import uvicore
from uvicore.auth.models.role import Role
from uvicore.support.dumper import dump, dd
from uvicore.auth.models.permission import Permission
@uvicore.seeder()
async def seed():
uvicore.log.item('Seeding table roles')
# Get all permissions
perms = await Permission.query().key_by('name').get()
await Role.insert_with_relations([
{
'name': 'Administrator',
'permissions': [
perms.get('admin'),
]
},
{
'name': 'User',
},
{
'name': 'Anonymous',
},
])
# await Role(name='Administrator').save()
# user = await Role(name='Users').save()
# await user.link('permissions', [
# perms['attributes.create'],
# perms['attributes.read'],
# perms['attributes.update'],
# perms['attributes.delete'],
# ])
|
11497432
|
class SomeClass:
x: int
s: str
z: str
w = 2
@classmethod
def setUpClass(cls):
cls.x = 10
cls.s = "hello"
cls.z = "bye"
def test_something(self):
print(self.s + ", world!")
print(self.z)
def test_something_else(self):
assert self.w != 1
|
11497470
|
from datetime import datetime, date
from marqeta.response_models.text_value import TextValue
from marqeta.response_models.text_value import TextValue
from marqeta.response_models import datetime_object
import json
import re
class Text(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def name_line_1(self):
if 'name_line_1' in self.json_response:
return TextValue(self.json_response['name_line_1'])
@property
def name_line_2(self):
if 'name_line_2' in self.json_response:
return TextValue(self.json_response['name_line_2'])
def __repr__(self):
return '<Marqeta.response_models.text.Text>' + self.__str__()
|
11497492
|
from fastinference.models.Tree import Tree
def optimize(model, **kwargs):
"""Performs swap optimization. Swaps two child nodes if the probability to visit the left tree is smaller than the probability to visit the right tree. This way, the probability to visit the left tree is maximized which in-turn improves the branch-prediction during pipelining in the CPU.
You can activate this optimization by simply passing :code:`"swap"` to the optimizer, e.g.
.. code-block::
loaded_model = fastinference.Loader.model_from_file("/my/nice/tree.json")
loaded_model.optimize("swap", None)
Reference:
Buschjäger, Sebastian, et al. "Realization of random forest for real-time evaluation through tree framing." 2018 IEEE International Conference on Data Mining (ICDM). IEEE, 2018.
Args:
model (Tree): The tree model.
Returns:
Tree: The tree model with swapped nodes.
"""
remaining_nodes = [model.head]
while(len(remaining_nodes) > 0):
cur_node = remaining_nodes.pop(0)
if cur_node.probLeft < cur_node.probRight:
left = cur_node.leftChild
right = cur_node.rightChild
cur_node.leftChild = right
cur_node.rightChild = left
if cur_node.prediction is not None:
remaining_nodes.append(cur_node.leftChild)
remaining_nodes.append(cur_node.rightChild)
return model
|
11497522
|
import math
import random
import numpy as np
import pygame
import pygame.freetype
from gamescript import commonscript
from gamescript.tactical import rangeattack, longscript
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.a_star import AStarFinder
from pygame.transform import scale
infinity = float("inf")
rotationxy = commonscript.rotationxy
def create_troop_stat(self, stat, starthp, startstamina, unitscale):
stat_header = self.stat_list.troop_list_header
weapon_header = self.weapon_list.weapon_list_header
armour_header = self.armour_list.armour_list_header
grade_header = self.stat_list.grade_list_header
mount_header = self.stat_list.mount_list_header
mount_grade_header = self.stat_list.mount_grade_list_header
trait_header = self.stat_list.trait_list_header
self.name = stat[0] # name according to the preset
self.grade = stat[stat_header["Grade"]] # training level/class grade
self.race = stat[stat_header["Race"]] # creature race
self.trait = stat[stat_header["Trait"]] # trait list from preset
self.trait = self.trait + self.stat_list.grade_list[self.grade][grade_header["Trait"]] # add trait from grade
skill = stat[stat_header["Skill"]] # skill list according to the preset
self.skill_cooldown = {}
self.cost = stat[stat_header["Cost"]]
gradestat = self.stat_list.grade_list[self.grade]
self.base_attack = stat[stat_header["Melee Attack"]] + \
gradestat[grade_header["Melee Attack Bonus"]] # base melee attack with grade bonus
self.base_meleedef = stat[stat_header["Melee Defence"]] + \
gradestat[grade_header["Defence Bonus"]] # base melee defence with grade bonus
self.base_rangedef = stat[stat_header["Ranged Defence"]] + \
gradestat[grade_header["Defence Bonus"]] # base range defence with grade bonus
self.armourgear = stat[stat_header["Armour"]] # armour equipement
self.base_armour = self.armour_list.armour_list[self.armourgear[0]][1] \
* self.armour_list.quality[self.armourgear[1]] # armour stat is calculate from based armour * quality
self.base_accuracy = stat[stat_header["Accuracy"]] + gradestat[grade_header["Accuracy Bonus"]]
self.base_sight = stat[stat_header["Sight"]] # base sight range
self.magazine_left = stat[stat_header["Ammunition"]] # amount of ammunition
self.base_reload = stat[stat_header["Reload"]] + gradestat[grade_header["Reload Bonus"]]
self.base_charge = stat[stat_header["Charge"]]
self.base_chargedef = 50 # All infantry subunit has default 50 charge defence
self.chargeskill = stat[stat_header["Charge Skill"]] # For easier reference to check what charge skill this subunit has
self.skill = [self.chargeskill] + skill # Add charge skill as first item in the list
self.troop_health = stat[stat_header["Health"]] * gradestat[grade_header["Health Effect"]] # Health of each troop
self.stamina = stat[stat_header["Stamina"]] * gradestat[grade_header["Stamina Effect"]] * (startstamina / 100) # starting stamina with grade
self.mana = stat[stat_header["Mana"]] # Resource for magic skill
# vv Weapon stat
self.primary_main_weapon = stat[stat_header["Primary Main Weapon"]]
self.primary_sub_weapon = stat[stat_header["Primary Sub Weapon"]]
self.secondary_main_weapon = stat[stat_header["Secondary Main Weapon"]]
self.secondary_sub_weapon = stat[stat_header["Secondary Sub Weapon"]]
self.melee_dmg = [0, 0]
self.melee_penetrate = 0
self.range_dmg = [0, 0]
self.range_penetrate = 0
self.meleespeed = 0
self.magazine_size = 0
weapon_reload = 0
base_range = []
arrowspeed = []
# vvv Combine weapon stat
self.weight = 0
for index, weapon in enumerate([self.primary_main_weapon, self.primary_sub_weapon, self.secondary_main_weapon, self.secondary_sub_weapon]):
if self.weapon_list.weapon_list[weapon[0]][weapon_header["Range"]] == 0: # melee weapon if range 0
self.melee_dmg[0] += self.weapon_list.weapon_list[weapon[0]][weapon_header["Minimum Damage"]] * \
self.weapon_list.quality[weapon[1]] / (index + 1)
self.melee_dmg[1] += self.weapon_list.weapon_list[weapon[0]][weapon_header["Maximum Damage"]] * \
self.weapon_list.quality[weapon[1]] / (index + 1)
self.melee_penetrate += self.weapon_list.weapon_list[weapon[0]][weapon_header["Armour Penetration"]] * \
self.weapon_list.quality[weapon[1]] / (index + 1)
self.meleespeed += self.weapon_list.weapon_list[weapon[0]][weapon_header["Speed"]] / (index + 1)
else:
self.range_dmg[0] += self.weapon_list.weapon_list[weapon[0]][weapon_header["Minimum Damage"]] * \
self.weapon_list.quality[weapon[1]]
self.range_dmg[1] += self.weapon_list.weapon_list[weapon[0]][weapon_header["Maximum Damage"]] * \
self.weapon_list.quality[weapon[1]]
self.range_penetrate += self.weapon_list.weapon_list[weapon[0]][weapon_header["Armour Penetration"]] * \
self.weapon_list.quality[weapon[1]] / (index + 1)
self.magazine_size += self.weapon_list.weapon_list[weapon[0]][
weapon_header["Magazine"]] # can shoot how many time before have to reload
weapon_reload += self.weapon_list.weapon_list[weapon[0]][weapon_header["Speed"]] * (index + 1)
base_range.append(self.weapon_list.weapon_list[weapon[0]][weapon_header["Range"]] * self.weapon_list.quality[weapon[1]])
arrowspeed.append(self.weapon_list.weapon_list[weapon[0]][weapon_header["Travel Speed"]]) # travel speed of range attack
self.base_meleedef += self.weapon_list.weapon_list[weapon[0]][weapon_header["Defense"]] / (index + 1)
self.base_rangedef += self.weapon_list.weapon_list[weapon[0]][weapon_header["Defense"]] / (index + 1)
self.skill += self.weapon_list.weapon_list[weapon[0]][weapon_header['Skill']]
self.trait += self.weapon_list.weapon_list[weapon[0]][weapon_header['Trait']]
self.weight += self.weapon_list.weapon_list[weapon[0]][weapon_header["Weight"]]
self.meleespeed = int(self.meleespeed)
self.skill = {x: self.stat_list.skill_list[x].copy() for x in self.skill if
x != 0 and x in self.stat_list.skill_list} # grab skill stat into dict
if base_range != []:
self.base_range = np.mean(base_range) # use average range
else:
self.base_range = 0
if arrowspeed != []:
self.arrowspeed = np.mean(arrowspeed) # use average range
else:
self.arrowspeed = 0
# ^^^ End combine
if self.melee_penetrate < 0:
self.melee_penetrate = 0 # melee melee_penetrate cannot be lower than 0
if self.range_penetrate < 0:
self.range_penetrate = 0
# ^^ End weapon stat
self.base_morale = stat[stat_header["Morale"]] + gradestat[grade_header["Morale Bonus"]] # morale with grade bonus
self.base_discipline = stat[stat_header["Discipline"]] + gradestat[grade_header["Discipline Bonus"]] # discilpline with grade bonus
self.mental = stat[stat_header["Mental"]] + gradestat[
grade_header["Mental Bonus"]] # mental resistance from morale melee_dmg and mental status effect
self.troop_number = stat[stat_header["Troop"]] * unitscale[
self.team - 1] * starthp / 100 # number of starting troop, team -1 to become list index
self.base_speed = 50 # All infantry has base speed at 50
self.subunit_type = stat[stat_header["Troop Class"]] - 1 # 0 is melee infantry and 1 is range for command buff
self.featuremod = 1 # the starting column in unit_terrainbonus of infantry
self.authority = 100 # default start at 100
# vv Mount stat
self.mount = self.stat_list.mount_list[stat[stat_header["Mount"]][0]] # mount this subunit use
self.mountgrade = self.stat_list.mount_grade_list[stat[stat_header["Mount"]][1]]
self.mountarmour = self.stat_list.mount_armour_list[stat[stat_header["Mount"]][2]]
if stat[stat_header["Mount"]][0] != 1: # have mount, add mount stat with its grade to subunit stat
self.base_chargedef = 25 # charge defence only 25 for cav
self.base_speed = (self.mount[mount_header['Speed']] + self.mountgrade[mount_grade_header['Speed Bonus']]) # use mount base speed instead
self.troop_health += (self.mount[mount_header['Health Bonus']] * self.mountgrade[mount_grade_header['Health Effect']]) + \
self.mountarmour[1] # Add mount health to the troop health
self.base_charge += (self.mount[mount_header['Charge Bonus']] +
self.mountgrade[mount_grade_header['Charge Bonus']]) # Add charge power of mount to troop
self.base_morale += self.mountgrade[mount_grade_header['Morale Bonus']]
self.base_discipline += self.mountgrade[mount_grade_header['Discipline Bonus']]
self.stamina += self.mount[mount_header['Stamina Bonus']]
self.trait += self.mount[mount_header['Trait']] # Apply mount trait to subunit
self.subunit_type = 2 # If subunit has mount, count as cav for command buff
self.featuremod = 4 # the starting column in unit_terrainbonus of cavalry
# ^^ End mount stat
# v Weight calculation
self.weight += self.armour_list.armour_list[self.armourgear[0]][armour_header["Weight"]] + \
self.mountarmour[2] # Weight from both melee and range weapon and armour
if self.subunit_type == 2: # cavalry has half weight penalty
self.weight = self.weight / 2
# ^ End weight cal
self.trait += self.armour_list.armour_list[self.armourgear[0]][armour_header["Trait"]] # Apply armour trait to subunit
self.base_speed = (self.base_speed * ((100 - self.weight) / 100)) + gradestat[
grade_header["Speed Bonus"]] # finalise base speed with weight and grade bonus
self.size = stat[stat_header["Size"]]
self.description = stat[-1] # subunit description for inspect ui
# if self.hidden
# vv Elemental stat
self.base_elem_melee = 0 # start with physical element for melee weapon
self.base_elem_range = 0 # start with physical for range weapon
self.elem_count = [0, 0, 0, 0, 0] # Elemental threshold count in this order fire,water,air,earth,poison
self.temp_count = 0 # Temperature threshold count
fire_res = 0 # resistance to fire, will be combine into list
water_res = 0 # resistance to water, will be combine into list
air_res = 0 # resistance to air, will be combine into list
earth_res = 0 # resistance to earth, will be combine into list
self.magic_res = 0 # Resistance to any magic
self.heat_res = 0 # Resistance to heat temperature
self.cold_res = 0 # Resistance to cold temperature
poison_res = 0 # resistance to poison, will be combine into list
# ^^ End elemental
self.reload_time = 0 # Unit can only refill magazine when reload_time is equal or more than reload stat
self.crit_effect = 1 # critical extra modifier
self.front_dmg_effect = 1 # Some skill affect only frontal combat melee_dmg
self.side_dmg_effect = 1 # Some skill affect melee_dmg for side combat as well (AOE)
self.corner_atk = False # Check if subunit can attack corner enemy or not
self.flankbonus = 1 # Combat bonus when flanking
self.base_auth_penalty = 0.1 # penalty to authority when bad event happen
self.bonus_morale_dmg = 0 # extra morale melee_dmg
self.bonus_stamina_dmg = 0 # extra stamina melee_dmg
self.auth_penalty = 0.1 # authority penalty for certain activities/order
self.base_hpregen = 0 # hp regeneration modifier, will not resurrect dead troop by default
self.base_staminaregen = 2 # stamina regeneration modifier
self.moraleregen = 2 # morale regeneration modifier
self.status_effect = {} # list of current status effect
self.skill_effect = {} # list of activate skill effect
self.base_inflictstatus = {} # list of status that this subunit will inflict to enemy when attack
self.specialstatus = []
# vv Set up trait variable
self.arcshot = False
self.anti_inf = False
self.anti_cav = False
self.shootmove = False
self.agileaim = False
self.no_range_penal = False
self.long_range_acc = False
self.ignore_chargedef = False
self.ignore_def = False
self.fulldef = False
self.temp_fulldef = False
self.backstab = False
self.oblivious = False
self.flanker = False
self.unbreakable = False
self.temp_unbraekable = False
self.stationplace = False
# ^^ End setup trait variable
# vv Add trait to base stat
self.trait = list(set([trait for trait in self.trait if trait != 0]))
if len(self.trait) > 0:
self.trait = {x: self.stat_list.trait_list[x] for x in self.trait if
x in self.stat_list.trait_list} # Any trait not available in ruleset will be ignored
for trait in self.trait.values(): # add trait modifier to base stat
self.base_attack *= trait[trait_header['Melee Attack Effect']]
self.base_meleedef *= trait[trait_header['Melee Defence Effect']]
self.base_rangedef *= trait[trait_header['Ranged Defence Effect']]
self.base_armour += trait[trait_header['Armour Bonus']]
self.base_speed *= trait[trait_header['Speed Effect']]
self.base_accuracy *= trait[trait_header['Accuracy Effect']]
self.base_range *= trait[trait_header['Range Effect']]
self.base_reload *= trait[trait_header['Reload Effect']]
self.base_charge *= trait[trait_header['Charge Effect']]
self.base_chargedef += trait[trait_header['Charge Defence Bonus']]
self.base_hpregen += trait[trait_header['HP Regeneration Bonus']]
self.base_staminaregen += trait[trait_header['Stamina Regeneration Bonus']]
self.base_morale += trait[trait_header['Morale Bonus']]
self.base_discipline += trait[trait_header['Discipline Bonus']]
self.crit_effect += trait[trait_header['Critical Bonus']]
fire_res += (trait[trait_header['Fire Resistance']] / 100) # percentage, 1 mean perfect resistance, 0 mean none
water_res += (trait[trait_header['Water Resistance']] / 100)
air_res += (trait[trait_header['Air Resistance']] / 100)
earth_res += (trait[trait_header['Earth Resistance']] / 100)
self.magic_res += (trait[trait_header['Magic Resistance']] / 100)
self.heat_res += (trait[trait_header['Heat Resistance']] / 100)
self.cold_res += (trait[trait_header['Cold Resistance']] / 100)
poison_res += (trait[trait_header['Poison Resistance']] / 100)
self.mental += trait[trait_header['Mental Bonus']]
if trait[trait_header['Enemy Status']] != [0]:
for effect in trait[trait_header['Enemy Status']]:
self.base_inflictstatus[effect] = trait[trait_header['Buff Range']]
# self.base_elem_melee =
# self.base_elem_range =
if 3 in self.trait: # Varied training
self.base_attack *= (random.randint(70, 120) / 100)
self.base_meleedef *= (random.randint(70, 120) / 100)
self.base_rangedef *= (random.randint(70, 120) / 100)
self.base_speed *= (random.randint(70, 120) / 100)
self.base_accuracy *= (random.randint(70, 120) / 100)
self.base_reload *= (random.randint(70, 120) / 100)
self.base_charge *= (random.randint(70, 120) / 100)
self.base_chargedef *= (random.randint(70, 120) / 100)
self.base_morale += random.randint(-15, 10)
self.base_discipline += random.randint(-20, 0)
self.mental += random.randint(-20, 10)
# v Change trait variable
if 16 in self.trait:
self.arcshot = True # can shoot in arc
if 17 in self.trait:
self.agileaim = True # gain bonus accuracy when shoot while moving
if 18 in self.trait:
self.shootmove = True # can shoot and move at same time
if 29 in self.trait:
self.ignore_chargedef = True # ignore charge defence completely
if 30 in self.trait:
self.ignore_def = True # ignore defence completely
if 34 in self.trait:
self.fulldef = True # full effective defence for all side
if 33 in self.trait:
self.backstab = True # bonus on rear attack
if 47 in self.trait:
self.flanker = True # bonus on flank attack
if 55 in self.trait:
self.oblivious = True # more penalty on flank/rear defend
if 73 in self.trait:
self.no_range_penal = True # no range penalty
if 74 in self.trait:
self.long_range_acc = True # less range penalty
if 111 in self.trait:
self.unbreakable = True # always unbreakable
self.temp_unbraekable = True
if 149 in self.trait: # Impetuous
self.base_auth_penalty += 0.5
# ^ End change trait variable
# ^^ End add trait to stat
for skill in list(self.skill.keys()): # remove skill if class mismatch
skill_troop_cond = self.skill[skill][self.skill_trooptype]
if skill_troop_cond == 0 or (self.subunit_type == 2 and skill_troop_cond == 2) or (self.subunit_type != 2 and skill_troop_cond != 2):
pass
else:
self.skill.pop(skill)
# self.loyalty
self.elem_res = (fire_res, water_res, air_res, earth_res, poison_res) # list of elemental resistance
self.max_stamina = self.stamina
self.stamina75 = self.stamina * 0.75
self.stamina50 = self.stamina * 0.5
self.stamina25 = self.stamina * 0.25
self.stamina5 = self.stamina * 0.05
self.unit_health = self.troop_health * self.troop_number # Total health of subunit from all troop
self.last_health_state = 4 # state start at full
self.last_stamina_state = 4
self.base_reload = weapon_reload + ((50 - self.base_reload) * weapon_reload / 100) # final reload speed from weapon and skill
# vv Stat variable after receive modifier effect from various sources, used for activity and effect calculation
self.max_morale = self.base_morale
self.attack = self.base_attack
self.meleedef = self.base_meleedef
self.rangedef = self.base_rangedef
self.armour = self.base_armour
self.speed = self.base_speed
self.accuracy = self.base_accuracy
self.reload = self.base_reload
self.morale = self.base_morale
self.discipline = self.base_discipline
self.shootrange = self.base_range
self.charge = self.base_charge
self.chargedef = self.base_chargedef
# ^^ End stat for status effect
if self.mental < 0: # cannot be negative
self.mental = 0
elif self.mental > 200: # cannot exceed 100
self.mental = 200
self.mentaltext = self.mental - 100
self.mental = (200 - self.mental) / 100 # convert to percentage
self.max_health = self.unit_health # health percentage
self.health75 = self.unit_health * 0.75
self.health50 = self.unit_health * 0.5
self.health25 = self.unit_health * 0.25
self.oldlasthealth, self.old_last_stamina = self.unit_health, self.stamina # save previous health and stamina in previous update
self.maxtroop = self.troop_number # max number of troop at the start
self.moralestate = self.base_morale / self.max_morale # turn into percentage
self.staminastate = (self.stamina * 100) / self.max_stamina # turn into percentage
self.staminastate_cal = self.staminastate / 100 # for using as modifer on stat
self.corner_atk = False # cannot attack corner enemy by default
self.temp_fulldef = False
self.auth_penalty = self.base_auth_penalty
self.hpregen = self.base_hpregen
self.staminaregen = self.base_staminaregen
self.inflictstatus = self.base_inflictstatus
self.elem_melee = self.base_elem_melee
self.elem_range = self.base_elem_range
class Subunit(pygame.sprite.Sprite):
images = []
gamebattle = None
gamemap = None # base map
gamemapfeature = None # feature map
gamemapheight = None # height map
dmgcal = longscript.dmgcal
weapon_list = None
armour_list = None
stat_list = None
setrotate = longscript.setrotate
change_leader = longscript.change_leader
maxzoom = 10 # max zoom allow
create_troop_stat = create_troop_stat
def __init__(self, troopid, gameid, parentunit, position, starthp, startstamina, unitscale):
"""Although subunit in code, this is referred as sub-subunit ingame"""
self._layer = 4
pygame.sprite.Sprite.__init__(self, self.containers)
self.wholastselect = None
self.leader = None # Leader in the sub-subunit if there is one, got add in leader gamestart
self.board_pos = None # Used for event log position of subunit (Assigned in gamebattle subunit setup)
self.walk = False # currently walking
self.run = False # currently running
self.frontline = False # on front line of unit or not
self.unit_leader = False # contain the general or not, making it leader subunit
self.attack_target = None
self.melee_target = None # current target of melee combat
self.close_target = None # clost target to move to in melee
self.attacking = False # For checking if parentunit in attacking state or not for using charge skill
self.parentunit = parentunit # reference to the parent battlion of this subunit
self.enemy_front = [] # list of front collide sprite
self.enemy_side = [] # list of side collide sprite
self.friend_front = [] # list of friendly front collide sprite
self.same_front = [] # list of same unit front collide sprite
self.fullmerge = [] # list of sprite that collide and almost overlap with this sprite
self.collide_penalty = False
self.team = self.parentunit.team
self.gamebattle.allsubunitlist.append(self)
if self.team == 1: # add sprite to team subunit group for collision
groupcollide = self.gamebattle.team1subunit
elif self.team == 2:
groupcollide = self.gamebattle.team2subunit
groupcollide.add(self)
self.status_list = self.parentunit.status_list
self.gameid = gameid # ID of this
self.troopid = int(troopid) # ID of preset used for this subunit
self.angle = self.parentunit.angle
self.new_angle = self.parentunit.angle
self.radians_angle = math.radians(360 - self.angle) # radians for apply angle to position (allsidepos and subunit)
self.parent_angle = self.parentunit.angle # angle subunit will face when not moving or
self.red_border = False # red corner to indicate taking melee_dmg in inspect ui
self.state = 0 # Current subunit state, similar to parentunit state
self.timer = random.random() # may need to use random.random()
self.movetimer = 0 # timer for moving to front position before attacking nearest enemy
self.charge_momentum = 1 # charging momentum to reach target before choosing nearest enemy
self.ammo_now = 0
self.zoom = 1
self.lastzoom = 10
self.skill_cond = 0
self.brokenlimit = 0 # morale require for parentunit to stop broken state, will increase everytime broken state stop
self.getfeature = self.gamemapfeature.getfeature
self.getheight = self.gamemapheight.getheight
# v Setup troop stat
stat = self.stat_list.troop_list[self.troopid].copy()
self.create_troop_stat(stat, starthp, startstamina, unitscale)
self.gamebattle.start_troopnumber[self.team] += self.troop_number # add troop number to counter how many troop join battle
# ^ End setup stat
# v Subunit image block
image = self.images[0].copy() # Subunit block blue colour for team1 for shown in inspect ui
if self.team == 2:
image = self.images[13].copy() # red colour
self.image = pygame.Surface((image.get_width() + 10, image.get_height() + 10), pygame.SRCALPHA) # subunit sprite image
pygame.draw.circle(self.image, self.parentunit.colour, (self.image.get_width() / 2, self.image.get_height() / 2), image.get_width() / 2)
if self.subunit_type == 2: # cavalry draw line on block
pygame.draw.line(image, (0, 0, 0), (0, 0), (image.get_width(), image.get_height()), 2)
radian = 45 * 0.0174532925 # top left
start = (
self.image.get_width() / 3 * math.cos(radian), self.image.get_width() / 3 * math.sin(radian)) # draw line from 45 degree in circle
radian = 225 * 0.0174532925 # bottom right
end = (self.image.get_width() * -math.cos(radian), self.image.get_width() * -math.sin(radian)) # drow line to 225 degree in circle
pygame.draw.line(self.image, (0, 0, 0), start, end, 2)
self.imageblock = image.copy() # image shown in inspect ui as square instead of circle
self.selectedimage = pygame.Surface((image.get_width(), image.get_height()), pygame.SRCALPHA)
pygame.draw.circle(self.selectedimage, (255, 255, 255, 150), (image.get_width() / 2, image.get_height() / 2), image.get_width() / 2)
pygame.draw.circle(self.selectedimage, (0, 0, 0, 255), (image.get_width() / 2, image.get_height() / 2), image.get_width() / 2, 1)
self.selectedimage_original, self.selectedimage_original2 = self.selectedimage.copy(), self.selectedimage.copy()
self.selectedimagerect = self.selectedimage.get_rect(topleft=(0, 0))
# ^ End subunit block
self.far_image = self.image.copy()
pygame.draw.circle(self.far_image, (0, 0, 0), (self.far_image.get_width() / 2, self.far_image.get_height() / 2),
self.far_image.get_width() / 2, 4)
self.far_selectedimage = self.selectedimage.copy()
pygame.draw.circle(self.far_selectedimage, (0, 0, 0), (self.far_selectedimage.get_width() / 2, self.far_selectedimage.get_height() / 2),
self.far_selectedimage.get_width() / 2, 4)
scalewidth = self.image.get_width() * 1 / self.maxzoom
scaleheight = self.image.get_height() * 1 / self.maxzoom
dim = pygame.Vector2(scalewidth, scaleheight)
self.far_image = pygame.transform.scale(self.far_image, (int(dim[0]), int(dim[1])))
self.far_selectedimage = pygame.transform.scale(self.far_selectedimage, (int(dim[0]), int(dim[1])))
# v health circle image setup
self.healthimage = self.images[1]
self.health_image_rect = self.healthimage.get_rect(center=self.image.get_rect().center) # for battle sprite
self.health_imageblock_rect = self.healthimage.get_rect(center=self.imageblock.get_rect().center) # for ui sprite
self.image.blit(self.healthimage, self.health_image_rect)
self.imageblock.blit(self.healthimage, self.health_imageblock_rect)
# ^ End health circle
# v stamina circle image setup
self.staminaimage = self.images[6]
self.stamina_image_rect = self.staminaimage.get_rect(center=self.image.get_rect().center) # for battle sprite
self.stamina_imageblock_rect = self.staminaimage.get_rect(center=self.imageblock.get_rect().center) # for ui sprite
self.image.blit(self.staminaimage, self.stamina_image_rect)
self.imageblock.blit(self.staminaimage, self.stamina_imageblock_rect)
# ^ End stamina circle
# v weapon class icon in middle circle
image1 = self.weapon_list.imgs[self.weapon_list.weapon_list[self.primary_main_weapon[0]][-3]] # image on subunit sprite
image1rect = image1.get_rect(center=self.image.get_rect().center)
self.image.blit(image1, image1rect)
image1rect = image1.get_rect(center=self.imageblock.get_rect().center)
self.imageblock.blit(image1, image1rect)
self.imageblock_original = self.imageblock.copy()
self.corner_image_rect = self.images[11].get_rect(
center=self.imageblock.get_rect().center) # red corner when take melee_dmg shown in image block
# ^ End weapon icon
self.image_original = self.image.copy() # original for rotate
self.image_original2 = self.image.copy() # original2 for saving original notclicked
self.image_original3 = self.image.copy() # original3 for saving original zoom level
# v position related
self.unitposition = (position[0] / 10, position[1] / 10) # position in parentunit sprite
battaliontopleft = pygame.Vector2(self.parentunit.base_pos[0] - self.parentunit.base_width_box / 2,
self.parentunit.base_pos[
1] - self.parentunit.base_height_box / 2) # get topleft corner position of parentunit to calculate true pos
self.base_pos = pygame.Vector2(battaliontopleft[0] + self.unitposition[0],
battaliontopleft[1] + self.unitposition[1]) # true position of subunit in map
self.last_pos = self.base_pos
self.movement_queue = []
self.combat_move_queue = []
self.base_target = self.base_pos # base_target to move
self.command_target = self.base_pos # actual base_target outside of combat
self.pos = self.base_pos * self.zoom # pos is for showing on screen
self.imageheight = (image.get_height() - 1) / 20 # get real half height of circle sprite
self.front_pos = (self.base_pos[0], (self.base_pos[1] - self.imageheight)) # generate front side position
self.front_pos = rotationxy(self.base_pos, self.front_pos, self.radians_angle) # rotate the new front side according to sprite rotation
self.attack_pos = self.parentunit.base_attack_pos
self.terrain, self.feature = self.getfeature(self.base_pos, self.gamemap) # get new terrain and feature at each subunit position
self.height = self.gamemapheight.getheight(self.base_pos) # current terrain height
self.front_height = self.gamemapheight.getheight(self.front_pos) # terrain height at front position
# ^ End position related
self.rect = self.image.get_rect(center=self.pos)
def zoomscale(self):
"""camera zoom change and rescale the sprite and position scale"""
if self.zoom != 1:
self.image_original = self.image_original3.copy() # reset image for new scale
scalewidth = self.image_original.get_width() * self.zoom / self.maxzoom
scaleheight = self.image_original.get_height() * self.zoom / self.maxzoom
dim = pygame.Vector2(scalewidth, scaleheight)
self.image = pygame.transform.scale(self.image_original, (int(dim[0]), int(dim[1])))
if self.parentunit.selected and self.state != 100:
self.selectedimage_original = pygame.transform.scale(self.selectedimage_original2, (int(dim[0]), int(dim[1])))
else:
self.image_original = self.far_image.copy()
self.image = self.image_original.copy()
if self.parentunit.selected and self.state != 100:
self.selectedimage_original = self.far_selectedimage.copy()
self.image_original = self.image.copy()
self.image_original2 = self.image.copy()
self.change_pos_scale()
self.rotate()
def change_pos_scale(self):
"""Change position variable to new camera scale"""
self.pos = self.base_pos * self.zoom
self.rect = self.image.get_rect(center=self.pos)
def useskill(self, whichskill):
if whichskill == 0: # charge skill need to seperate since charge power will be used only for charge skill
skillstat = self.skill[list(self.skill)[0]].copy() # get skill stat
self.skill_effect[self.chargeskill] = skillstat # add stat to skill effect
self.skill_cooldown[self.chargeskill] = skillstat[self.skill_cd] # add skill cooldown
else: # other skill
skillstat = self.skill[whichskill].copy() # get skill stat
self.skill_effect[whichskill] = skillstat # add stat to skill effect
self.skill_cooldown[whichskill] = skillstat[self.skill_cd] # add skill cooldown
self.stamina -= skillstat[9]
# self.skill_cooldown[whichskill] =
# def receiveskill(self,whichskill):
def check_skill_condition(self):
"""Check which skill can be used, cooldown, condition state, discipline, stamina are checked. charge skill is excepted from this check"""
if self.skill_cond == 1 and self.staminastate < 50: # reserve 50% stamina, don't use any skill
self.available_skill = []
elif self.skill_cond == 2 and self.staminastate < 25: # reserve 25% stamina, don't use any skill
self.available_skill = []
else: # check all skill
self.available_skill = [skill for skill in self.skill if skill not in self.skill_cooldown.keys()
and self.state in self.skill[skill][self.skill_condition] and self.discipline >= self.skill[skill][
self.skill_discipline_req]
and self.stamina > self.skill[skill][self.skill_stamina_cost] and skill != self.chargeskill]
def find_nearby_subunit(self):
"""Find nearby friendly squads in the same parentunit for applying buff"""
self.nearby_subunit_list = []
corner_subunit = []
for rowindex, rowlist in enumerate(self.parentunit.armysubunit.tolist()):
if self.gameid in rowlist:
if rowlist.index(self.gameid) - 1 != -1: # get subunit from left if not at first column
self.nearby_subunit_list.append(self.parentunit.spritearray[rowindex][rowlist.index(self.gameid) - 1]) # index 0
else: # not exist
self.nearby_subunit_list.append(0) # add number 0 instead
if rowlist.index(self.gameid) + 1 != len(rowlist): # get subunit from right if not at last column
self.nearby_subunit_list.append(self.parentunit.spritearray[rowindex][rowlist.index(self.gameid) + 1]) # index 1
else: # not exist
self.nearby_subunit_list.append(0) # add number 0 instead
if rowindex != 0: # get top subunit
self.nearby_subunit_list.append(self.parentunit.spritearray[rowindex - 1][rowlist.index(self.gameid)]) # index 2
if rowlist.index(self.gameid) - 1 != -1: # get top left subunit
corner_subunit.append(self.parentunit.spritearray[rowindex - 1][rowlist.index(self.gameid) - 1]) # index 3
else: # not exist
corner_subunit.append(0) # add number 0 instead
if rowlist.index(self.gameid) + 1 != len(rowlist): # get top right
corner_subunit.append(self.parentunit.spritearray[rowindex - 1][rowlist.index(self.gameid) + 1]) # index 4
else: # not exist
corner_subunit.append(0) # add number 0 instead
else: # not exist
self.nearby_subunit_list.append(0) # add number 0 instead
if rowindex != len(self.parentunit.spritearray) - 1: # get bottom subunit
self.nearby_subunit_list.append(self.parentunit.spritearray[rowindex + 1][rowlist.index(self.gameid)]) # index 5
if rowlist.index(self.gameid) - 1 != -1: # get bottom left subunit
corner_subunit.append(self.parentunit.spritearray[rowindex + 1][rowlist.index(self.gameid) - 1]) # index 6
else: # not exist
corner_subunit.append(0) # add number 0 instead
if rowlist.index(self.gameid) + 1 != len(rowlist): # get bottom right subunit
corner_subunit.append(self.parentunit.spritearray[rowindex + 1][rowlist.index(self.gameid) + 1]) # index 7
else: # not exist
corner_subunit.append(0) # add number 0 instead
else: # not exist
self.nearby_subunit_list.append(0) # add number 0 instead
self.nearby_subunit_list = self.nearby_subunit_list + corner_subunit
def status_to_friend(self, aoe, statusid, statuslist):
"""apply status effect to nearby subunit depending on aoe stat"""
if aoe in (2, 3):
if aoe > 1: # only direct nearby friendly subunit
for subunit in self.nearby_subunit_list[0:4]:
if subunit != 0 and subunit.state != 100: # only apply to exist and alive squads
subunit.statuseffect[statusid] = statuslist # apply status effect
if aoe > 2: # all nearby including corner friendly subunit
for subunit in self.nearby_subunit_list[4:]:
if subunit != 0 and subunit.state != 100: # only apply to exist and alive squads
subunit.statuseffect[statusid] = statuslist # apply status effect
elif aoe == 4: # apply to whole parentunit
for subunit in self.parentunit.spritearray.flat:
if subunit.state != 100: # only apply to alive squads
subunit.status_effect[statusid] = statuslist # apply status effect
def threshold_count(self, elem, t1status, t2status):
"""apply elemental status effect when reach elemental threshold"""
if elem > 50:
self.status_effect[t1status] = self.status_list[t1status].copy() # apply tier 1 status
if elem > 100:
self.status_effect[t2status] = self.status_list[t2status].copy() # apply tier 2 status
del self.status_effect[t1status] # remove tier 1 status
elem = 0 # reset elemental count
return elem
def find_close_target(self, subunitlist):
"""Find close enemy sub-unit to move to fight"""
closelist = {subunit: subunit.base_pos.distance_to(self.base_pos) for subunit in subunitlist}
closelist = dict(sorted(closelist.items(), key=lambda item: item[1]))
maxrandom = 3
if len(closelist) < 4:
maxrandom = len(closelist) - 1
if maxrandom < 0:
maxrandom = 0
if len(closelist) > 0:
closetarget = list(closelist.keys())[random.randint(0, maxrandom)]
# if close_target.base_pos.distance_to(self.base_pos) < 20: # in case can't find close target
return closetarget
def statusupdate(self, thisweather=None):
"""calculate stat from stamina, morale state, skill, status, terrain"""
if self.red_border and self.parentunit.selected: # have red border (taking melee_dmg) on inspect ui, reset image
self.imageblock.blit(self.imageblock_original, self.corner_image_rect)
self.red_border = False
# v reset stat to default and apply morale, stamina, command buff to stat
if self.max_stamina > 100:
self.max_stamina = self.max_stamina - (self.timer * 0.05) # Max stamina gradually decrease over time - (self.timer * 0.05)
self.stamina75 = self.max_stamina * 0.75
self.stamina50 = self.max_stamina * 0.5
self.stamina25 = self.max_stamina * 0.25
self.stamina5 = self.max_stamina * 0.05
self.morale = self.base_morale
self.authority = self.parentunit.authority # parentunit total authoirty
self.commandbuff = self.parentunit.commandbuff[
self.subunit_type] * 100 # command buff from gamestart leader according to this subunit subunit type
self.discipline = self.base_discipline
self.attack = self.base_attack
self.meleedef = self.base_meleedef
self.rangedef = self.base_rangedef
self.accuracy = self.base_accuracy
self.reload = self.base_reload
self.chargedef = self.base_chargedef
self.speed = self.base_speed
self.charge = self.base_charge
self.shootrange = self.base_range
self.crit_effect = 1 # default critical effect
self.front_dmg_effect = 1 # default frontal melee_dmg
self.side_dmg_effect = 1 # default side melee_dmg
self.corner_atk = False # cannot attack corner enemy by default
self.temp_fulldef = False
self.auth_penalty = self.base_auth_penalty
self.hpregen = self.base_hpregen
self.staminaregen = self.base_staminaregen
self.inflictstatus = self.base_inflictstatus
self.elem_melee = self.base_elem_melee
self.elem_range = self.base_elem_range
# ^ End default stat
# v Apply status effect from trait
if len(self.trait) > 1:
for trait in self.trait.values():
if trait[18] != [0]:
for effect in trait[18]: # aplly status effect from trait
self.status_effect[effect] = self.status_list[effect].copy()
if trait[1] > 1: # status buff range to nearby friend
self.status_to_friend(trait[1], effect, self.status_list[effect].copy())
# ^ End trait
# v apply effect from weather"""
weathertemperature = 0
if thisweather is not None:
weather = thisweather
self.attack += weather.meleeatk_buff
self.meleedef += weather.meleedef_buff
self.rangedef += weather.rangedef_buff
self.armour += weather.armour_buff
self.speed += weather.speed_buff
self.accuracy += weather.accuracy_buff
self.reload += weather.reload_buff
self.charge += weather.charge_buff
self.chargedef += weather.chargedef_buff
self.hpregen += weather.hpregen_buff
self.staminaregen += weather.staminaregen_buff
self.morale += (weather.morale_buff * self.mental)
self.discipline += weather.discipline_buff
if weather.elem[0] != 0: # Weather can cause elemental effect such as wet
self.elem_count[weather.elem[0]] += (weather.elem[1] * (100 - self.elem_res[weather.elem[0]]) / 100)
weathertemperature = weather.temperature
# ^ End weather
# v Map feature modifier to stat
map_feature_mod = self.gamemapfeature.featuremod[self.feature]
if map_feature_mod[self.featuremod] != 1: # speed/charge
speedmod = map_feature_mod[self.featuremod] # get the speed mod appropiate to subunit type
self.speed *= speedmod
self.charge *= speedmod
if map_feature_mod[self.featuremod + 1] != 1: # melee attack
# combatmod = self.parentunit.gamemapfeature.featuremod[self.parentunit.feature][self.featuremod + 1]
self.attack *= map_feature_mod[self.featuremod + 1] # get the attack mod appropiate to subunit type
if map_feature_mod[self.featuremod + 2] != 1: # melee/charge defence
combatmod = map_feature_mod[self.featuremod + 2] # get the defence mod appropiate to subunit type
self.meleedef *= combatmod
self.chargedef *= combatmod
self.rangedef += map_feature_mod[7] # range defence bonus from terrain bonus
self.accuracy -= (map_feature_mod[7] / 2) # range def bonus block subunit sight as well so less accuracy
self.discipline += map_feature_mod[9] # discipline defence bonus from terrain bonus
if map_feature_mod[11] != [0]: # Some terrain feature can also cause status effect such as swimming in water
if 1 in map_feature_mod[11]: # Shallow water type terrain
self.status_effect[31] = self.status_list[31].copy() # wet
if 5 in map_feature_mod[11]: # Deep water type terrain
self.status_effect[93] = self.status_list[93].copy() # drench
if self.weight > 60 or self.stamina <= 0: # weight too much or tired will cause drowning
self.status_effect[102] = self.status_list[102].copy() # Drowning
elif self.weight > 30: # Medium weight subunit has trouble travel through water and will sink and progressively lose troops
self.status_effect[101] = self.status_list[101].copy() # Sinking
elif self.weight < 30: # Light weight subunit has no trouble travel through water
self.status_effect[104] = self.status_list[104].copy() # Swiming
if 2 in map_feature_mod[11]: # Rot type terrain
self.status_effect[54] = self.status_list[54].copy()
if 3 in map_feature_mod[11]: # Poison type terrain
self.elem_count[4] += ((100 - self.elem_res[4]) / 100)
# self.hidden += self.parentunit.gamemapfeature[self.parentunit.feature][6]
tempreach = map_feature_mod[10] + weathertemperature # temperature the subunit will change to based on current terrain feature and weather
# ^ End map feature
# v Apply effect from skill
# For list of status and skill effect column index used in statusupdate see longscript.py load_game_data()
if len(self.skill_effect) > 0:
for status in self.skill_effect: # apply elemental effect to melee_dmg if skill has element
calstatus = self.skill_effect[status]
if calstatus[self.skill_type] == 0 and calstatus[self.skill_element] != 0: # melee elemental effect
self.elem_melee = calstatus[self.skill_element]
elif calstatus[self.skill_type] == 1 and calstatus[self.skill_element] != 0: # range elemental effect
self.elem_range = calstatus[self.skill_element]
self.attack = self.attack * calstatus[self.skill_melee_attack]
self.meleedef = self.meleedef * calstatus[self.skill_melee_defence]
self.rangedef = self.rangedef * calstatus[self.skill_range_defence]
self.speed = self.speed * calstatus[self.skill_speed]
self.accuracy = self.accuracy * calstatus[self.skill_accuracy]
self.shootrange = self.shootrange * calstatus[self.skill_range]
self.reload = self.reload / calstatus[
self.skill_reload] # different than other modifier the higher mod reduce reload time (decrease stat)
self.charge = self.charge * calstatus[self.skill_charge]
self.chargedef = self.chargedef + calstatus[self.skill_charge_defence]
self.hpregen += calstatus[self.skill_hp_regen]
self.staminaregen += calstatus[self.skill_stamina_regen]
self.morale = self.morale + (calstatus[self.skill_morale] * self.mental)
self.discipline = self.discipline + calstatus[self.skill_discipline]
# self.sight += calstatus[self.skill_sight]
# self.hidden += calstatus[self.skill_hide]
self.crit_effect = self.crit_effect * calstatus[self.skill_critical]
self.front_dmg_effect = self.front_dmg_effect * calstatus[self.skill_damage]
if calstatus[self.skill_aoe] in (2, 3) and calstatus[self.skill_damage] != 100:
self.side_dmg_effect = self.side_dmg_effect * calstatus[self.skill_damage]
if calstatus[self.skill_aoe] == 3:
self.corner_atk = True # if aoe 3 mean it can attack enemy on all side
# v Apply status to friendly if there is one in skill effect
if calstatus[self.skill_status] != [0]:
for effect in calstatus[self.skill_status]:
self.status_effect[effect] = self.status_list[effect].copy()
if self.status_effect[effect][2] > 1:
self.status_to_friend(self.status_effect[effect][2], effect, self.status_list)
# ^ End apply status to
self.bonus_morale_dmg += calstatus[self.skill_moraledmg]
self.bonus_stamina_dmg += calstatus[self.skill_staminadmg]
if calstatus[self.skill_enemy_status] != [0]: # Apply inflict status effect to enemy from skill to inflict list
for effect in calstatus[self.skill_enemy_status]:
if effect != 0:
self.inflictstatus[effect] = calstatus[self.skill_aoe]
if self.chargeskill in self.skill_effect:
self.auth_penalty += 0.5 # higher authority penalty when attacking (retreat while attacking)
# ^ End skill effect
# v Apply effect and modifer from status effect
# """special status: 0 no control, 1 hostile to all, 2 no retreat, 3 no terrain effect, 4 no attack, 5 no skill, 6 no spell, 7 no exp gain,
# 7 immune to bad mind, 8 immune to bad body, 9 immune to all effect, 10 immortal""" Not implemented yet
if len(self.status_effect) > 0:
for status in self.status_effect:
calstatus = self.status_list[status]
self.attack = self.attack * calstatus[self.status_melee_attack]
self.meleedef = self.meleedef * calstatus[self.status_melee_defence]
self.rangedef = self.rangedef * calstatus[self.status_range_defence]
self.armour = self.armour * calstatus[self.status_armour]
self.speed = self.speed * calstatus[self.status_speed]
self.accuracy = self.accuracy * calstatus[self.status_accuracy]
self.reload = self.reload / calstatus[self.status_reload]
self.charge = self.charge * calstatus[self.status_charge]
self.chargedef += calstatus[self.status_charge_defence]
self.hpregen += calstatus[self.status_hp_regen]
self.staminaregen += calstatus[self.status_stamina_regen]
self.morale = self.morale + (calstatus[self.status_morale] * self.mental)
self.discipline += calstatus[self.status_discipline]
# self.sight += calstatus[self.status_sight]
# self.hidden += calstatus[self.status_hide]
tempreach += calstatus[self.status_temperature]
if status == 91: # All round defence status
self.temp_fulldef = True
# ^ End status effect
# v Temperature mod function from terrain and weather
for status in self.status_effect.values():
tempreach += status[19] # add more from status effect
if tempreach < 0: # cold temperature
tempreach = tempreach * (100 - self.cold_res) / 100 # lowest temperature the subunit will change based on cold resist
else: # hot temperature
tempreach = tempreach * (100 - self.heat_res) / 100 # highest temperature the subunit will change based on heat resist
if self.temp_count != tempreach: # move temp_count toward tempreach
if tempreach > 0:
if self.temp_count < tempreach:
self.temp_count += (100 - self.heat_res) / 100 * self.timer # increase temperature, rate depends on heat resistance (- is faster)
elif tempreach < 0:
if self.temp_count > tempreach:
self.temp_count -= (100 - self.cold_res) / 100 * self.timer # decrease temperature, rate depends on cold resistance
else: # tempreach is 0, subunit temp revert back to 0
if self.temp_count > 0:
self.temp_count -= (1 + self.heat_res) / 100 * self.timer # revert faster with higher resist
else:
self.temp_count += (1 + self.cold_res) / 100 * self.timer
# ^ End temperature
# v Elemental effect
if self.elem_count != [0, 0, 0, 0, 0]: # Apply effect if elem threshold reach 50 or 100
self.elem_count[0] = self.threshold_count(self.elem_count[0], 28, 92)
self.elem_count[1] = self.threshold_count(self.elem_count[1], 31, 93)
self.elem_count[2] = self.threshold_count(self.elem_count[2], 30, 94)
self.elem_count[3] = self.threshold_count(self.elem_count[3], 23, 35)
self.elem_count[4] = self.threshold_count(self.elem_count[4], 26, 27)
self.elem_count = [elem - self.timer if elem > 0 else elem for elem in self.elem_count]
# ^ End elemental effect
# v Temperature effect
if self.temp_count > 50: # Hot
self.status_effect[96] = self.status_list[96].copy()
if self.temp_count > 100: # Extremely hot
self.status_effect[97] = self.status_list[97].copy()
del self.status_effect[96]
if self.temp_count < -50: # Cold
self.status_effect[95] = self.status_list[95].copy()
if self.temp_count < -100: # Extremely cold
self.status_effect[29] = self.status_list[29].copy()
del self.status_effect[95]
# ^ End temperature effect related function
self.moralestate = self.morale / self.max_morale # for using as modifer to stat
if self.moralestate > 3 or math.isnan(self.moralestate): # morale state more than 3 give no more benefit
self.moralestate = 3
self.staminastate = (self.stamina * 100) / self.max_stamina
self.staminastatecal = 1
if self.stamina != infinity:
self.staminastatecal = self.staminastate / 100 # for using as modifer to stat
self.discipline = (self.discipline * self.moralestate * self.staminastatecal) + self.parentunit.leader_social[
self.grade + 1] + (self.authority / 10) # use morale, stamina, leader social vs grade (+1 to skip class name) and authority
self.attack = (self.attack * (self.moralestate + 0.1)) * self.staminastatecal + self.commandbuff # use morale, stamina and command buff
self.meleedef = (self.meleedef * (
self.moralestate + 0.1)) * self.staminastatecal + self.commandbuff # use morale, stamina and command buff
self.rangedef = (self.rangedef * (self.moralestate + 0.1)) * self.staminastatecal + (
self.commandbuff / 2) # use morale, stamina and half command buff
self.accuracy = self.accuracy * self.staminastatecal + self.commandbuff # use stamina and command buff
self.reload = self.reload * (2 - self.staminastatecal) # the less stamina, the higher reload time
self.chargedef = (self.chargedef * (
self.moralestate + 0.1)) * self.staminastatecal + self.commandbuff # use morale, stamina and command buff
heightdiff = (self.height / self.front_height) ** 2 # walking down hill increase speed while walking up hill reduce speed
self.speed = self.speed * self.staminastatecal * heightdiff # use stamina
self.charge = (self.charge + self.speed) * (
self.moralestate + 0.1) * self.staminastatecal + self.commandbuff # use morale, stamina and command buff
fullmergelen = len(self.fullmerge) + 1
if fullmergelen > 1: # reduce discipline if there are overlap sub-unit
self.discipline = self.discipline / fullmergelen
# v Rounding up, add discipline to stat and forbid negative int stat
disciplinecal = self.discipline / 200
self.attack = self.attack + (self.attack * disciplinecal)
self.meleedef = self.meleedef + (self.meleedef * disciplinecal)
self.rangedef = self.rangedef + (self.rangedef * disciplinecal)
# self.armour = self.armour
self.speed = self.speed + (self.speed * disciplinecal / 2)
# self.accuracy = self.accuracy
# self.reload = self.reload
self.chargedef = self.chargedef + (self.chargedef * disciplinecal)
self.charge = self.charge + (self.charge * disciplinecal)
if self.magazine_left == 0 and self.ammo_now == 0:
self.shootrange = 0
if self.attack < 0: # seem like using if 0 is faster than max(0,)
self.attack = 0
if self.meleedef < 0:
self.meleedef = 0
if self.rangedef < 0:
self.rangedef = 0
if self.armour < 1: # Armour cannot be lower than 1
self.armour = 1
if self.speed < 1:
self.speed = 1
if 105 in self.status_effect: # collapse state enforce 0 speed
self.speed = 0
if self.accuracy < 0:
self.accuracy = 0
if self.reload < 0:
self.reload = 0
if self.charge < 0:
self.charge = 0
if self.chargedef < 0:
self.chargedef = 0
if self.discipline < 0:
self.discipline = 0
# ^ End rounding up
self.rotatespeed = self.parentunit.rotatespeed * 2 # rotate speed for subunit only use for self rotate not subunit rotate related
if self.state in (0, 99):
self.rotatespeed = self.speed
# v cooldown, active and effect timer function
self.skill_cooldown = {key: val - self.timer for key, val in self.skill_cooldown.items()} # cooldown decrease overtime
self.skill_cooldown = {key: val for key, val in self.skill_cooldown.items() if val > 0} # remove cooldown if time reach 0
for a, b in self.skill_effect.items(): # Can't use dict comprehension here since value include all other skill stat
b[3] -= self.timer
self.skill_effect = {key: val for key, val in self.skill_effect.items() if
val[3] > 0 and self.state in val[5]} # remove effect if time reach 0 or restriction state is not met
for a, b in self.status_effect.items():
b[3] -= self.timer
self.status_effect = {key: val for key, val in self.status_effect.items() if val[3] > 0}
# ^ End timer effectrangedamage
def find_shooting_target(self, parentstate):
"""get nearby enemy base_target from list if not targeting anything yet"""
self.attack_pos = list(self.parentunit.near_target.values())[0] # replace attack_pos with enemy unit pos
self.attack_target = list(self.parentunit.near_target.keys())[0] # replace attack_target with enemy unit id
if self.shootrange >= self.attack_pos.distance_to(self.base_pos):
self.state = 11
if parentstate in (1, 3, 5): # Walk and shoot
self.state = 12
elif parentstate in (2, 4, 6): # Run and shoot
self.state = 13
def make_front_pos(self):
"""create new pos for front side of sprite"""
self.front_pos = (self.base_pos[0], (self.base_pos[1] - self.imageheight))
self.front_pos = rotationxy(self.base_pos, self.front_pos, self.radians_angle)
def make_pos_range(self):
"""create range of sprite pos for pathfinding"""
self.posrange = (range(int(max(0, self.base_pos[0] - (self.imageheight - 1))), int(min(1000, self.base_pos[0] + self.imageheight))),
range(int(max(0, self.base_pos[1] - (self.imageheight - 1))), int(min(1000, self.base_pos[1] + self.imageheight))))
def gamestart(self, zoom):
"""run once when game start or subunit just get created"""
self.zoom = zoom
self.make_front_pos()
self.make_pos_range()
self.zoomscale()
self.find_nearby_subunit()
self.statusupdate()
self.terrain, self.feature = self.getfeature(self.base_pos, self.gamemap)
self.height = self.gamemapheight.getheight(self.base_pos)
def update(self, weather, newdt, zoom, combattimer, mousepos, mouseup):
if self.lastzoom != zoom: # camera zoom is changed
self.lastzoom = zoom
self.zoomchange = True
self.zoom = zoom # save scale
self.zoomscale() # update parentunit sprite according to new scale
if self.state != 100: # only run these when not dead
# v Mouse collision detection
if self.gamebattle.gamestate == 1 or (self.gamebattle.gamestate == 2 and self.gamebattle.unit_build_slot not in self.gamebattle.battleui):
if self.rect.collidepoint(mousepos):
self.gamebattle.last_mouseover = self.parentunit # last mouse over on this parentunit
if mouseup and self.gamebattle.uiclick is False:
self.gamebattle.last_selected = self.parentunit # become last selected parentunit
if self.parentunit.selected is False:
self.parentunit.justselected = True
self.parentunit.selected = True
self.wholastselect = self.gameid
self.gamebattle.clickany = True
# ^ End mouse detect
dt = newdt
if dt > 0: # only run these when game not pause
self.timer += dt
self.walk = False # reset walk
self.run = False # reset run
parentstate = self.parentunit.state
if parentstate in (1, 2, 3, 4):
self.attacking = True
elif self.attacking and parentstate not in (1, 2, 3, 4, 10): # cancel charge when no longer move to melee or in combat
self.attacking = False
if self.state not in (95, 97, 98, 99) and parentstate in (0, 1, 2, 3, 4, 5, 6, 95, 96, 97, 98, 99):
self.state = parentstate # Enforce parentunit state to subunit when moving and breaking
self.attack_target = self.parentunit.attack_target
self.attack_pos = self.parentunit.base_attack_pos
if self.timer > 1: # Update status and skill use around every 1 second
self.statusupdate(weather)
self.available_skill = []
if self.skill_cond != 3: # any skill condition behaviour beside 3 (forbid skill) will check available skill to use
self.check_skill_condition()
if self.state in (4, 13) and parentstate != 10 and self.attacking and self.parentunit.moverotate is False and \
self.base_pos.distance_to(self.base_target) < 50: # charge skill only when running to melee
self.charge_momentum += self.timer * (self.speed / 50)
if self.charge_momentum >= 5:
self.useskill(0) # Use charge skill
self.parentunit.charging = True
self.charge_momentum = 5
elif self.charge_momentum > 1: # reset charge momentum if charge skill not active
self.charge_momentum -= self.timer * (self.speed / 50)
if self.charge_momentum <= 1:
self.parentunit.charging = False
self.charge_momentum = 1
skillchance = random.randint(0, 10) # random chance to use random available skill
if len(self.available_skill) > 0 and skillchance >= 6:
self.useskill(self.available_skill[random.randint(0, len(self.available_skill) - 1)])
self.timer -= 1
# if parentstate not in (96,97,98,99) and self.state != 99:
collidelist = []
if self.enemy_front != [] or self.enemy_side != []: # Check if in combat or not with collision
collidelist = self.enemy_front + self.enemy_side
for subunit in collidelist:
if self.state not in (96, 98, 99):
self.state = 10
self.melee_target = subunit
if self.enemy_front == []: # no enemy in front try rotate to enemy at side
# self.base_target = self.melee_target.base_pos
self.new_angle = self.setrotate(self.melee_target.base_pos)
else: # no way to retreat, Fight to the death
if self.enemy_front != [] and self.enemy_side != []: # if both front and any side got attacked
if 9 not in self.status_effect:
self.status_effect[9] = self.status_list[9].copy() # fight to the death status
if parentstate not in (10, 96, 98, 99):
parentstate = 10
self.parentunit.state = 10
if self.melee_target is not None:
self.parentunit.attack_target = self.melee_target.parentunit
break
elif parentstate == 10: # no collide enemy while parent unit in fight state
if self.attacking and self.parentunit.collide:
if self.charge_momentum == 1 and (
self.frontline or self.parentunit.attackmode == 2) and self.parentunit.attackmode != 1: # attack to nearest target instead
if self.melee_target is None and self.parentunit.attack_target is not None:
self.melee_target = self.parentunit.attack_target.subunit_sprite[0]
if self.melee_target is not None:
if self.close_target is None: # movement queue is empty regenerate new one
self.close_target = self.find_close_target(self.melee_target.parentunit.subunit_sprite) # find new close target
if self.close_target is not None: # found target to fight
if self not in self.gamebattle.combatpathqueue:
self.gamebattle.combatpathqueue.append(self)
else: # no target to fight move back to command pos first)
self.base_target = self.attack_target.base_pos
self.new_angle = self.setrotate()
if self.melee_target.parentunit.state != 100:
if self.movetimer == 0:
self.movetimer = 0.1 # recalculate again in 10 seconds if not in fight
# if len(self.same_front) != 0 and len(self.enemy_front) == 0: # collide with friend try move to base_target first before enemy
# self.combat_move_queue = [] # clean queue since the old one no longer without collide
else:
self.movetimer += dt
if len(self.enemy_front) != 0 or len(self.enemy_side) != 0: # in fight, stop timer
self.movetimer = 0
elif self.movetimer > 10 or len(self.combat_move_queue) == 0: # # time up, or no path. reset path
self.movetimer = 0
self.close_target = None
if self in self.gamebattle.combatpathqueue:
self.gamebattle.combatpathqueue.remove(self)
elif len(self.combat_move_queue) > 0: # no collide move to enemy
self.base_target = pygame.Vector2(self.combat_move_queue[0])
self.new_angle = self.setrotate()
else: # whole targeted enemy unit destroyed, reset target and state
self.melee_target = None
self.close_target = None
if self in self.gamebattle.combatpathqueue:
self.gamebattle.combatpathqueue.remove(self)
self.attack_target = None
self.base_target = self.command_target
self.new_angle = self.setrotate()
self.new_angle = self.parentunit.angle
self.state = 0
elif self.attacking is False: # not in fight anymore, rotate and move back to original position
self.melee_target = None
self.close_target = None
if self in self.gamebattle.combatpathqueue:
self.gamebattle.combatpathqueue.remove(self)
self.attack_target = None
self.base_target = self.command_target
self.new_angle = self.parentunit.angle
self.state = 0
if self.state != 10 and self.magazine_left > 0 and self.parentunit.fireatwill == 0 and (self.arcshot or self.frontline) and \
self.charge_momentum == 1: # Range attack when parentunit in melee state with arcshot
self.state = 11
if self.parentunit.near_target != {} and (self.attack_target is None or self.attack_pos == 0):
self.find_shooting_target(parentstate)
# ^ End melee check
else: # range attack
self.melee_target = None
self.close_target = None
if self in self.gamebattle.combatpathqueue:
self.gamebattle.combatpathqueue.remove(self)
self.attack_target = None
self.combat_move_queue = []
# v Range attack function
if parentstate == 11: # Unit in range attack state
self.state = 0 # Default state at idle
if (self.magazine_left > 0 or self.ammo_now > 0) and self.attack_pos != 0 and \
self.shootrange >= self.attack_pos.distance_to(self.base_pos):
self.state = 11 # can shoot if have magazine_left and in shoot range, enter range combat state
elif self.magazine_left > 0 and self.parentunit.fireatwill == 0 and \
(self.state == 0 or (self.state not in (95, 96, 97, 98, 99) and
parentstate in (1, 2, 3, 4, 5, 6) and self.shootmove)): # Fire at will
if self.parentunit.near_target != {} and self.attack_target is None:
self.find_shooting_target(parentstate) # shoot nearest target
if self.state in (11, 12, 13) and self.magazine_left > 0 and self.ammo_now == 0: # reloading magazine_left
self.reload_time += dt
if self.reload_time >= self.reload:
self.ammo_now = self.magazine_size
self.magazine_left -= 1
self.reload_time = 0
self.stamina = self.stamina - (dt * 2) # use stamina while reloading
# ^ End range attack function
# v Combat action related
if combattimer >= 0.5: # combat is calculated every 0.5 second in game time
if self.state == 10: # if melee combat (engaging anyone on any side)
collidelist = [subunit for subunit in self.enemy_front]
for subunit in collidelist:
anglecheck = abs(self.angle - subunit.angle) # calculate which side arrow hit the subunit
if anglecheck >= 135: # front
hitside = 0
elif anglecheck >= 45: # side
hitside = 1
else: # rear
hitside = 2
self.dmgcal(subunit, 0, hitside, self.gamebattle.troop_data.status_list, combattimer)
self.stamina = self.stamina - (combattimer * 5)
elif self.state in (11, 12, 13): # range combat
if type(self.attack_target) == int: # For fire at will, which attacktarget is int
allunitindex = self.gamebattle.allunitindex
if self.attack_target in allunitindex: # if the attack base_target still alive (if dead it would not be in index list)
self.attack_target = self.gamebattle.allunitlist[
allunitindex.index(self.attack_target)] # change attack_target index into sprite
else: # enemy dead
self.attack_pos = 0 # reset attack_pos to 0
self.attack_target = None # reset attack_target to 0
for target in list(self.parentunit.near_target.values()): # find other nearby base_target to shoot
if target in allunitindex: # check if base_target alive
self.attack_pos = target[1]
self.attack_target = target[1]
self.attack_target = self.gamebattle.allunitlist[allunitindex.index(self.attack_target)]
break # found new base_target break loop
elif self.attack_target is None:
self.attack_target = self.parentunit.attack_target
if self.ammo_now > 0 and ((self.attack_target is not None and self.attack_target.state != 100) or
(self.attack_target is None and self.attack_pos != 0)) \
and (self.arcshot or (self.arcshot is False and self.parentunit.shoothow != 1)):
# can shoot if reload finish and base_target existed and not dead. Non arcshot cannot shoot if forbidded
# TODO add line of sight for range attack
rangeattack.RangeArrow(self, self.base_pos.distance_to(self.attack_pos), self.shootrange, self.zoom) # Shoot
self.ammo_now -= 1 # use 1 magazine_left in magazine
elif self.attack_target is not None and self.attack_target.state == 100: # if base_target die when it about to shoot
self.parentunit.range_combat_check = False
self.parentunit.attack_target = 0 # reset range combat check and base_target
# ^ End combat related
if parentstate != 10: # reset base_target every update to command base_target outside of combat
if self.base_target != self.command_target:
self.base_target = self.command_target
if parentstate == 0:
self.new_angle = self.setrotate()
elif self.base_pos == self.base_target and self.angle != self.parentunit.angle: # reset angle
self.new_angle = self.setrotate()
self.new_angle = self.parentunit.angle
# v Rotate Function
if self.angle != self.new_angle:
self.rotatecal = abs(self.new_angle - self.angle) # amount of angle left to rotate
self.rotatecheck = 360 - self.rotatecal # rotate distance used for preventing angle calculation bug (pygame rotate related)
self.radians_angle = math.radians(360 - self.angle) # for allside rotate
if self.angle < 0: # negative angle (rotate to left side)
self.radians_angle = math.radians(-self.angle)
rotatetiny = self.rotatespeed * dt # rotate little by little according to time
if self.new_angle > self.angle: # rotate to angle more than the current one
if self.rotatecal > 180: # rotate with the smallest angle direction
self.angle -= rotatetiny
self.rotatecheck -= rotatetiny
if self.rotatecheck <= 0:
self.angle = self.new_angle # if rotate pass base_target angle, rotate to base_target angle
else:
self.angle += rotatetiny
if self.angle > self.new_angle:
self.angle = self.new_angle # if rotate pass base_target angle, rotate to base_target angle
elif self.new_angle < self.angle: # rotate to angle less than the current one
if self.rotatecal > 180: # rotate with the smallest angle direction
self.angle += rotatetiny
self.rotatecheck -= rotatetiny
if self.rotatecheck <= 0:
self.angle = self.new_angle # if rotate pass base_target angle, rotate to base_target angle
else:
self.angle -= rotatetiny
if self.angle < self.new_angle:
self.angle = self.new_angle # if rotate pass base_target angle, rotate to base_target angle
self.rotate() # rotate sprite to new angle
self.make_front_pos() # generate new pos related to side
self.front_height = self.gamemapheight.getheight(self.front_pos)
# ^ End rotate
# v Move function to given base_target position
revertmove = True # revert move check for in case subunit still need to rotate before moving
if parentstate == 0 or self.parentunit.revert or (self.angle != self.parentunit.angle and self.parentunit.moverotate is False):
revertmove = False
if (self.base_pos != self.base_target or self.charge_momentum > 1) and \
(revertmove or self.angle == self.new_angle): # cannot move if unit still need to rotate
nocolide_check = False # can move if front of unit not collided
if (((self.parentunit.collide is False or self.frontline is False) or parentstate == 99)
or (parentstate == 10 and ((self.frontline or self.parentunit.attackmode == 2) and self.parentunit.attackmode != 1)
or self.charge_momentum > 1)):
nocolide_check = True
enemycollide_check = False # for chance to move or charge through enemy
if len(collidelist) > 0:
enemycollide_check = True
if self.state in (96, 98, 99): # escape
enemycollide_check = False
nocolide_check = True # bypass collide
elif self.chargeskill in self.skill_effect and random.randint(0, 1) == 0: # chance to charge through
enemycollide_check = False
if self.stamina > 0 and nocolide_check and enemycollide_check is False and \
(len(self.same_front) == 0 and len(self.friend_front) == 0 or self.state in (96, 98, 99)):
if self.chargeskill in self.skill_effect and self.base_pos == self.base_target and parentstate == 10:
new_target = self.front_pos - self.base_pos # keep charging pass original target until momentum run out
self.base_target = self.base_target + new_target
self.command_target = self.base_target
move = self.base_target - self.base_pos
move_length = move.length() # convert length
if move_length > 0: # movement length longer than 0.1, not reach base_target yet
move.normalize_ip()
if parentstate in (1, 3, 5, 7): # walking
speed = self.parentunit.walkspeed # use walk speed
self.walk = True
elif parentstate in (10, 99): # run with its own speed instead of uniformed run
speed = self.speed / 15 # use its own speed when broken
self.run = True
else: # self.state in (2, 4, 6, 10, 96, 98, 99), running
speed = self.parentunit.runspeed # use run speed
self.run = True
if self.chargeskill in self.skill_effect: # speed gradually decrease with momentum during charge
speed = speed * self.charge_momentum / 8
if self.collide_penalty: # reduce speed during moving through another unit
speed = speed / 2
move = move * speed * dt
newmove_length = move.length()
newpos = self.base_pos + move
if speed > 0 and (self.state in (98, 99) or (self.state not in (98, 99) and
(0 < newpos[0] < 999 and 0 < newpos[1] < 999))):
# cannot go pass map unless in retreat state
if newmove_length <= move_length: # move normally according to move speed
self.base_pos = newpos
self.pos = self.base_pos * self.zoom
self.rect.center = list(int(v) for v in self.pos) # list rect so the sprite gradually move to position
if self.stamina != infinity:
if self.walk:
self.stamina = self.stamina - (dt * 2)
elif self.run:
self.stamina = self.stamina - (dt * 5)
else: # move length pass the base_target destination, set movement to stop exactly at base_target
move = self.base_target - self.base_pos # simply change move to whatever remaining distance
self.base_pos += move # adjust base position according to movement
self.pos = self.base_pos * self.zoom
self.rect.center = self.pos # no need to do list
if len(self.combat_move_queue) > 0 and self.base_pos.distance_to(
pygame.Vector2(self.combat_move_queue[0])) < 0.1: # reach the current queue point, remove from queue
self.combat_move_queue = self.combat_move_queue[1:]
self.make_front_pos()
self.make_pos_range()
self.terrain, self.feature = self.getfeature(self.base_pos,
self.gamemap) # get new terrain and feature at each subunit position
self.height = self.gamemapheight.getheight(self.base_pos) # get new height
self.front_height = self.gamemapheight.getheight(self.front_pos)
self.last_pos = self.base_pos
if self.unit_leader and newmove_length > 0:
if self.parentunit.moverotate is False:
self.parentunit.base_pos += move
frontpos = (self.parentunit.base_pos[0],
(self.parentunit.base_pos[1] - self.parentunit.base_height_box)) # find front position
self.parentunit.front_pos = rotationxy(self.parentunit.base_pos, frontpos, self.parentunit.radians_angle)
numberpos = (self.parentunit.base_pos[0] - self.parentunit.base_width_box,
(self.parentunit.base_pos[1] + self.parentunit.base_height_box))
self.parentunit.number_pos = rotationxy(self.parentunit.base_pos, numberpos, self.parentunit.radians_angle)
self.parentunit.truenumber_pos = self.parentunit.number_pos * (
11 - self.parentunit.zoom) # find new position for troop number text
else: # Stopping subunit when reach base_target
self.state = 0 # idle
# ^ End move function
# v Morale check
if self.max_morale != infinity:
if self.base_morale < self.max_morale:
if self.morale <= 10: # Enter retreat state when morale reach 0
if self.state not in (98, 99):
self.state = 98 # retreat state
maxrandom = 1 - (self.mental / 100)
if maxrandom < 0:
maxrandom = 0
self.moraleregen -= random.uniform(0, maxrandom) # morale regen slower per broken state
if self.moraleregen < 0: # begin checking broken state
self.state = 99 # Broken state
self.change_leader("broken")
cornerlist = [[0, self.base_pos[1]], [1000, self.base_pos[1]], [self.base_pos[0], 0], [self.base_pos[0], 1000]]
whichcorner = [self.base_pos.distance_to(cornerlist[0]), self.base_pos.distance_to(cornerlist[1]),
self.base_pos.distance_to(cornerlist[2]),
self.base_pos.distance_to(cornerlist[3])] # find closest map corner to run to
foundcorner = whichcorner.index(min(whichcorner))
self.base_target = pygame.Vector2(cornerlist[foundcorner])
self.command_target = self.base_target
self.new_angle = self.setrotate()
for subunit in self.parentunit.subunit_sprite:
subunit.base_morale -= (
15 * subunit.mental) # reduce morale of other subunit, creating panic when seeing friend panic and may cause mass panic
if self.morale < 0:
self.morale = 0 # morale cannot be lower than 0
if self.state not in (95, 99) and parentstate not in (10, 99): # If not missing gamestart leader can replenish morale
self.base_morale += (dt * self.staminastatecal * self.moraleregen) # Morale replenish based on stamina
if self.base_morale < 0: # morale cannot be negative
self.base_morale = 0
elif self.base_morale > self.max_morale:
self.base_morale -= dt # gradually reduce morale that exceed the starting max amount
if self.state == 95: # disobey state, morale gradually decrease until recover
self.base_morale -= dt * self.mental
elif self.state == 98:
if parentstate not in (98, 99):
self.unit_health -= (dt * 100) # Unit begin to desert if retreating but parentunit not retreat/broken
if self.moralestate > 0.2:
self.state = 0 # Reset state to 0 when exit retreat state
# ^ End morale check
# v Hp and stamina regen
if self.stamina != infinity:
if self.stamina < self.max_stamina:
if self.stamina <= 0: # Collapse and cannot act
self.stamina = 0
self.status_effect[105] = self.status_list[105].copy() # receive collapse status
self.stamina = self.stamina + (dt * self.staminaregen) # regen
else: # stamina cannot exceed the max stamina
self.stamina = self.max_stamina
if self.unit_health != infinity:
if self.hpregen > 0 and self.unit_health % self.troop_health != 0: # hp regen cannot ressurect troop only heal to max hp
alivehp = self.troop_number * self.troop_health # max hp possible for the number of alive subunit
self.unit_health += self.hpregen * dt # regen hp back based on time and regen stat
if self.unit_health > alivehp:
self.unit_health = alivehp # Cannot exceed health of alive subunit (exceed mean resurrection)
elif self.hpregen < 0: # negative regen can kill
self.unit_health += self.hpregen * dt # use the same as positive regen (negative regen number * dt will reduce hp)
remain = self.unit_health / self.troop_health
if remain.is_integer() is False: # always round up if there is decimal number
remain = int(remain) + 1
else:
remain = int(remain)
wound = random.randint(0, (self.troop_number - remain)) # chance to be wounded instead of dead
self.gamebattle.death_troopnumber[self.team] += self.troop_number - remain - wound
self.gamebattle.wound_troopnumber[self.team] += wound
self.troop_number = remain # Recal number of troop again in case some die from negative regen
if self.unit_health < 0:
self.unit_health = 0 # can't have negative hp
elif self.unit_health > self.max_health:
self.unit_health = self.max_health # hp can't exceed max hp (would increase number of troop)
if self.oldlasthealth != self.unit_health:
remain = self.unit_health / self.troop_health
if remain.is_integer() is False: # always round up if there is decimal number
remain = int(remain) + 1
else:
remain = int(remain)
wound = random.randint(0, (self.troop_number - remain)) # chance to be wounded instead of dead
self.gamebattle.death_troopnumber[self.team] += self.troop_number - remain - wound
if self.state in (98, 99) and len(self.enemy_front) + len(
self.enemy_side) > 0: # fleeing or broken got captured instead of wound
self.gamebattle.capture_troopnumber[self.team] += wound
else:
self.gamebattle.wound_troopnumber[self.team] += wound
self.troop_number = remain # Recal number of troop again in case some die from negative regen
# v Health bar
healthlist = (self.health75, self.health50, self.health25, 0)
for index, health in enumerate(healthlist):
if self.unit_health > health:
if self.last_health_state != abs(4 - index):
self.image_original3.blit(self.images[index + 1], self.health_image_rect)
self.imageblock_original.blit(self.images[index + 1], self.health_imageblock_rect)
self.imageblock.blit(self.imageblock_original, self.corner_image_rect)
self.last_health_state = abs(4 - index)
self.zoomscale()
break
# ^ End Health bar
self.oldlasthealth = self.unit_health
# v Stamina bar
if self.old_last_stamina != self.stamina:
staminalist = (self.stamina75, self.stamina50, self.stamina25, self.stamina5, -1)
for index, stamina in enumerate(staminalist):
if self.stamina >= stamina:
if self.last_stamina_state != abs(4 - index):
# if index != 3:
self.image_original3.blit(self.images[index + 6], self.stamina_image_rect)
self.zoomscale()
self.imageblock_original.blit(self.images[index + 6], self.stamina_imageblock_rect)
self.imageblock.blit(self.imageblock_original, self.corner_image_rect)
self.last_stamina_state = abs(4 - index)
break
self.old_last_stamina = self.stamina
# ^ End stamina bar
if self.state in (98, 99) and (self.base_pos[0] <= 0 or self.base_pos[0] >= 999 or
self.base_pos[1] <= 0 or self.base_pos[1] >= 999): # remove when unit move pass map border
self.state = 100 # enter dead state
self.gamebattle.flee_troopnumber[self.team] += self.troop_number # add number of troop retreat from battle
self.troop_number = 0
self.gamebattle.battlecamera.remove(self)
if self.troop_number <= 0: # enter dead state
self.state = 100 # enter dead state
self.image_original3.blit(self.images[5], self.health_image_rect) # blit white hp bar
self.imageblock_original.blit(self.images[5], self.health_imageblock_rect)
self.zoomscale()
self.last_health_state = 0
self.skill_cooldown = {} # remove all cooldown
self.skill_effect = {} # remove all skill effects
self.imageblock.blit(self.imageblock_original, self.corner_image_rect)
self.red_border = True # to prevent red border appear when dead
self.parentunit.deadchange = True
if self in self.gamebattle.battlecamera:
self.gamebattle.battlecamera.change_layer(sprite=self, new_layer=1)
self.gamebattle.allsubunitlist.remove(self)
self.parentunit.subunit_sprite.remove(self)
for subunit in self.parentunit.armysubunit.flat: # remove from index array
if subunit == self.gameid:
self.parentunit.armysubunit = np.where(self.parentunit.armysubunit == self.gameid, 0, self.parentunit.armysubunit)
break
self.change_leader("die")
self.gamebattle.eventlog.addlog([0, str(self.board_pos) + " " + str(self.name)
+ " in " + self.parentunit.leader[0].name
+ "'s parentunit is destroyed"], [3]) # add log to say this subunit is destroyed in subunit tab
self.enemy_front = [] # reset collide
self.enemy_side = []
self.friend_front = []
self.same_front = []
self.fullmerge = []
self.collide_penalty = False
def rotate(self):
"""rotate subunit image may use when subunit can change direction independently from parentunit"""
self.image = pygame.transform.rotate(self.image_original, self.angle)
if self.parentunit.selected and self.state != 100:
self.selectedimage = pygame.transform.rotate(self.selectedimage_original, self.angle)
self.image.blit(self.selectedimage, self.selectedimagerect)
self.rect = self.image.get_rect(center=self.pos)
def combat_pathfind(self):
# v Pathfinding
self.combat_move_queue = []
movearray = self.gamebattle.subunitposarray.copy()
intbasetarget = (int(self.close_target.base_pos[0]), int(self.close_target.base_pos[1]))
for y in self.close_target.posrange[0]:
for x in self.close_target.posrange[1]:
movearray[x][y] = 100 # reset path in the enemy sprite position
intbasepos = (int(self.base_pos[0]), int(self.base_pos[1]))
for y in self.posrange[0]:
for x in self.posrange[1]:
movearray[x][y] = 100 # reset path for sub-unit sprite position
startpoint = (min([max(0, intbasepos[0] - 5), max(0, intbasetarget[0] - 5)]), # start point of new smaller array
min([max(0, intbasepos[1] - 5), max(0, intbasetarget[1] - 5)]))
endpoint = (max([min(999, intbasepos[0] + 5), min(999, intbasetarget[0] + 5)]), # end point of new array
max([min(999, intbasepos[1] + 5), min(999, intbasetarget[1] + 5)]))
movearray = movearray[startpoint[1]:endpoint[1]] # cut 1000x1000 array into smaller one by row
movearray = [thisarray[startpoint[0]:endpoint[0]] for thisarray in movearray] # cut by column
# if len(movearray) < 100 and len(movearray[0]) < 100: # if too big then skip combat pathfinding
grid = Grid(matrix=movearray)
grid.cleanup()
start = grid.node(intbasepos[0] - startpoint[0], intbasepos[1] - startpoint[1]) # start point
end = grid.node(intbasetarget[0] - startpoint[0], intbasetarget[1] - startpoint[1]) # end point
finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
path, runs = finder.find_path(start, end, grid)
path = [(thispath[0] + startpoint[0], thispath[1] + startpoint[1]) for thispath in path] # remake pos into actual map pos
path = path[4:] # remove some starting path that may clip with friendly sub-unit sprite
self.combat_move_queue = path # add path into combat movement queue
if len(self.combat_move_queue) < 1: # simply try walk to target anyway if pathfinder return empty
self.combat_move_queue = [self.close_target.base_pos]
# print("operations:", runs, "path length:", len(path))
# print(grid.grid_str(path=path, start=start, end=end))
# print(self.combat_move_queue)
# print(self.base_pos, self.close_target.base_pos, self.gameid, startpoint, intbasepos[0] - startpoint[0], intbasepos[1] - startpoint[1])
# ^ End path finding
def delete(self, local=False):
"""delete reference when del is called"""
if local:
print(locals())
else:
del self.parentunit
del self.leader
del self.wholastselect
del self.attack_target
del self.melee_target
del self.close_target
if self in self.gamebattle.combatpathqueue:
self.gamebattle.combatpathqueue.remove(self)
|
11497544
|
import pyodbc
from infrastructure.connection.database.connectors.DatabaseConnector import DatabaseConnector
from models.configs.DatabaseConfig import DatabaseConfig
class MssqlDbConnector(DatabaseConnector):
def __init__(self, database_config: DatabaseConfig):
self.database_config: DatabaseConfig = database_config
self.database_config.driver = 'ODBC Driver 17 for SQL Server'
# ;Client_CSet=UTF-8;Server_CSet=WINDOWS-1251
self.connection_string = 'DRIVER={%s};SERVER=%s;DATABASE=%s;UID=%s;PWD=%s' % (
self.database_config.driver, self.database_config.host, self.database_config.database,
self.database_config.username, self.database_config.password)
self.connection = None
self.cursor = None
def connect(self):
self.connection = pyodbc.connect(self.connection_string) # ,ansi=True)
# self.connection.setencoding(encoding='utf-8')
self.cursor = self.connection.cursor()
self.cursor.setinputsizes([(pyodbc.SQL_WVARCHAR, 0, 0)])
def disconnect(self):
try:
if self.cursor is not None:
self.cursor.close()
if self.connection is not None:
self.connection.close()
except Exception:
pass
def get_connection(self):
return self.connection
def execute_many(self, query, data):
self.cursor.fast_executemany = True
try:
self.cursor.executemany(query, data)
self.connection.commit()
return self.cursor.rowcount
except Exception as error:
try:
self.connection.rollback()
self.cursor.fast_executemany = False
self.cursor.executemany(query, data)
self.connection.commit()
return self.cursor.rowcount
except Exception as error:
self.connection.rollback()
self.cursor.close()
raise
def get_target_query_indexer(self):
indexer = '?'
return indexer
def prepare_data(self, data):
# if data is not None and isinstance(data, str):
# data = data\
# .replace("ı", "i")\
# .replace("ş", "s")\
# .replace("ğ", "g")\
# .replace("İ", "I")\
# .replace("Ş","S")\
# .replace("Ğ", "G")
return data
|
11497545
|
def extractWwwFringeoctopusCom(item):
'''
Parser for 'www.fringeoctopus.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('TWQQF', 'Transmigration with QQ Farm', 'translated'),
('Black Belly Wife', 'Black Belly Wife', 'translated'),
('BBW', 'Black Belly Wife', 'translated'),
('Transmigrating with a Cleaver', 'Transmigrating with a Cleaver', 'translated'),
('resplendent farming apothecary', 'resplendent farming apothecary', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
11497589
|
import serial
import time
import os
# Opens com port and bin file to be read
ser = serial.Serial('COM6', baudrate = 9600, timeout = None)
F = open("bin1.bin", "rb")
# Saves fileSize as int that fits into a byte
fileSize = int(os.path.getsize("bin1.bin")/256)
# Wait for Arduino
time.sleep(2)
def main():
write()
read()
F.close()
ser.close()
def read():
# Reads file and saves, showing what has came back
D = open("bin2.bin", "wb")
val = ser.read(fileSize*256)
D.write(val)
print(bytes(val).hex(), end=' ')
D.close()
def write():
# Sends file size and prints to confirm
ser.write((fileSize).to_bytes(1, 'big'))
print(256*int.from_bytes(ser.read(), 'big'))
time.sleep(1)
ser.reset_input_buffer()
ser.reset_output_buffer()
# Sends file in 1024 byte chunks
toSend = F.read(1024)
while toSend:
ser.write(bytearray(toSend))
print(bytes(toSend).hex())
print(int(ser.readline()))
toSend = F.read(1024)
ser.reset_input_buffer()
ser.reset_output_buffer()
if __name__ == "__main__":
main()
|
11497594
|
import paho.mqtt.client as mqtt #import the client1
import time
import random
import socket
import os
import sys
import s3upload as s3
uuidCoral = str(os.getenv('MAC1'))
Active = True
Unit = 'Local'
#######################################################
## Initialize Variables ##
#######################################################
config = {}
config['Local'] = ["127.0.0.1", "/upload", "Receive Commands on MQTT"]
hostname=socket.gethostname()
timeTrigger = 0
ID = str(random.randint(1,100001))
#######################################################
## Local MQTT Callback Function ##
#######################################################
def on_message_local(client, userdata, message):
payload = str(message.payload.decode("utf-8"))
print('Message Received: ' + message.topic + ' | ' + payload)
def on_disconnect(client, userdata, rc):
global Active
Active = False
#############################################
## Initialize Local MQTT Bus ##
#############################################
broker_address=config[Unit][0]
local_topic= '/teachable-camera'+config[Unit][1]
print("connecting to MQTT broker at "+broker_address+", channel '"+local_topic+"'")
clientLocal = mqtt.Client("S3-Upload-"+ID) #create new instance
clientLocal.on_message = on_message_local #attach function to callback
clientLocal.on_disconnect = on_disconnect
clientLocal.connect(broker_address) #connect to broker
clientLocal.loop_start() #start the loop
clientLocal.subscribe(local_topic+"/receive/#")
clientLocal.publish(local_topic+"/registration","S3-Upload-"+ID+" Receiver Registration")
#############################################
## Main Loop ##
#############################################
while Active:
if timeTrigger < time.mktime(time.gmtime()):
timeTrigger = time.mktime(time.gmtime()) + 10
clientLocal.publish(local_topic+"/Heartbeat","Lamp-Control-"+ID+" Heartbeat")
for file in os.listdir(s3.image_path):
if file.split('.')[1] == 'jpeg':
file_full_path = os.path.join(s3.image_path, file)
print(file_full_path)
if os.path.exists(file_full_path.replace('jpeg','json')):
time.sleep(0.5) # bug fix - adding delay to ensure the json file has been fully written
response = s3.uploadImage(file_full_path, file_full_path.replace('jpeg','json'))
clientLocal.publish(local_topic, response)
time.sleep(0.1)
|
11497617
|
import os
import mimetypes
import base64
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
if unit == '':
return '%3.0f %s%s' % (num, unit, suffix)
else:
return '%3.1f %s%s' % (num, unit, suffix)
num /= 1024.0
return '%.1f %s%s' % (num, 'Yi', suffix)
def get_icon_by_mime(mime, iconset='papirus', isDir=False):
if isDir:
mime = 'inode/directory'
if mime:
segments = mime.split('/')
valid_targets = [segments[0] + os.path.sep + segments[1], segments[1], segments[0], 'default']
else:
valid_targets = ['default']
for target in valid_targets:
path = os.path.dirname(__file__) + os.path.sep + '..' + os.path.sep + 'icons' + os.path.sep + iconset + os.path.sep + target + '.svg'
if os.path.isfile(path):
return base64.b64encode(open(path, 'rb').read()).decode()
class Entry(object):
def __init__(self, file, root, base=os.path.sep, human=False, iconset='papirus'):
path = root + os.path.sep + file
self.path = base + path.lstrip('.*' + os.path.sep)
self.name = os.path.basename(path)
self.mime = mimetypes.guess_type(path)[0]
if human:
self.size = sizeof_fmt(os.path.getsize(path))
else:
self.size = os.path.getsize(path)
self.modified = os.path.getmtime(path)
self.isDir = os.path.isdir(path)
self.icon = get_icon_by_mime(self.mime, iconset=iconset, isDir=self.isDir)
|
11497640
|
import pytest
from brownie import accounts, reverts
from settings import *
# reset the chain after every test case
@pytest.fixture(autouse=True)
def isolation(fn_isolation):
pass
@pytest.fixture(scope='function')
def test_deploy_nft(nft_factory):
name = "Non Fungible Token"
symbol = "NFT"
fee = 0.1 * TENPOW18
tx = nft_factory.deployNFT(name, symbol, {"from": accounts[0], "value": fee})
assert "NFTDeployed" in tx.events
assert nft_factory.balance() == 0.1 * TENPOW18
assert nft_factory.numberOfNFTs() == 1
def test_withdraw_fund(nft_factory, test_deploy_nft):
nft_factory.withdrawFund({"from": accounts[0]})
assert nft_factory.balance() == 0
|
11497660
|
class APIError(Exception):
"""Raised when unexpected server error."""
class ParametersError(Exception):
"""Raised when invalid parameters are used in a query"""
|
11497676
|
from pathlib import Path
import numpy as np
import pytest
from hnswlib_searcher import HnswlibSearcher
from jina import Document, DocumentArray, Executor
_DIM = 10
@pytest.fixture
def two_elem_index():
index = HnswlibSearcher(dim=_DIM, metric='l2')
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
index.index(da, {})
return index, da
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.metric == 'cosine'
def test_empty_search():
index = HnswlibSearcher(dim=_DIM)
da = DocumentArray([Document(embedding=np.random.normal(size=(_DIM,)))])
index.search(da, {})
assert len(da[0].matches) == 0
def test_index_no_docs():
index = HnswlibSearcher(dim=_DIM)
index.index(None, {})
def test_index_empty_docs():
index = HnswlibSearcher(dim=_DIM)
da = DocumentArray()
index.index(da, {})
def test_update_no_docs():
index = HnswlibSearcher(dim=_DIM)
index.update(None, {})
def test_update_empty_docs():
index = HnswlibSearcher(dim=_DIM)
da = DocumentArray()
index.update(da, {})
def test_search_no_docs():
index = HnswlibSearcher(dim=_DIM)
index.search(None, {})
def test_searh_empty_docs():
index = HnswlibSearcher(dim=_DIM)
da = DocumentArray()
index.search(da, {})
def test_index():
NUM_DOCS = 1000
index = HnswlibSearcher(dim=_DIM)
embeddings = np.random.normal(size=(NUM_DOCS, _DIM))
da1 = DocumentArray([Document(embedding=emb) for emb in embeddings])
da2 = DocumentArray([Document(embedding=emb) for emb in embeddings])
index.index(da1, {})
assert len(index._ids_to_inds) == NUM_DOCS
assert index._index.element_count == NUM_DOCS
assert set(index._ids_to_inds.keys()) == set(da1.get_attributes('id'))
index.index(da2, {})
assert len(index._ids_to_inds) == 2 * NUM_DOCS
assert index._index.element_count == 2 * NUM_DOCS
assert set(index._ids_to_inds.keys()) == set(da1.get_attributes('id')).union(
da2.get_attributes('id')
)
def test_index_with_update(two_elem_index):
index, da = two_elem_index
da_search = DocumentArray(
[
Document(embedding=np.ones(_DIM) * 1.1),
Document(embedding=np.ones(_DIM) * 2.1),
]
)
# switch embeddings of a and b
da[0].embedding = np.ones(_DIM) * 2.0
da[1].embedding = np.ones(_DIM) * 1.0
index.index(da, {})
assert index._ids_to_inds == {'a': 0, 'b': 1}
assert index._index.element_count == 2
index.search(da_search, {})
assert [m.id for m in da_search[0].matches] == ['b', 'a']
assert [m.id for m in da_search[1].matches] == ['a', 'b']
def test_index_wrong_dim():
index = HnswlibSearcher(dim=10)
embeddings = np.random.normal(size=(2, 11))
da1 = DocumentArray([Document(embedding=emb) for emb in embeddings])
with pytest.raises(ValueError, match='Attempted to index'):
index.index(da1, {})
@pytest.mark.parametrize('limit', [5, 10])
@pytest.mark.parametrize(
['metric', 'is_distance'],
[
('cosine', True),
('euclidean', True),
('inner_product', True),
('cosine', False),
('euclidean', False),
('inner_product', False),
],
)
def test_search_basic(limit: int, metric: str, is_distance: bool):
index = HnswlibSearcher(
dim=_DIM, metric=metric, limit=limit, is_distance=is_distance
)
embeddings_ind = np.random.normal(size=(1000, _DIM))
embeddings_search = np.random.normal(size=(10, _DIM))
da_index = DocumentArray([Document(embedding=emb) for emb in embeddings_ind])
da_search = DocumentArray([Document(embedding=emb) for emb in embeddings_search])
index.index(da_index, {})
index.search(da_search, {})
indexed_ids = da_index.get_attributes('id')
for d in da_search:
ms = d.matches
scores = [m.scores[metric].value for m in ms]
assert len(ms) == limit
for m in ms:
assert m.id in indexed_ids
for i in range(len(ms) - 1):
if not is_distance:
assert ms[i].scores[metric].value >= ms[i + 1].scores[metric].value
else:
assert ms[i].scores[metric].value <= ms[i + 1].scores[metric].value
def test_topk_max():
"""Test that even with limit set to more than size of index, at most size of
index elements are returned"""
index = HnswlibSearcher(dim=_DIM, limit=1000)
embeddings = np.random.normal(size=(10, _DIM))
da = DocumentArray([Document(embedding=emb) for emb in embeddings])
index.index(da, {})
index.search(da, {})
for d in da:
assert len(d.matches) == 10
def test_search_quality():
"""Test that we get everything correct for a small index"""
index = HnswlibSearcher(dim=_DIM, metric='euclidean')
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.1),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
Document(id='c', embedding=np.ones(_DIM) * 4.0),
Document(id='d', embedding=np.ones(_DIM) * 7.0),
Document(id='e', embedding=np.ones(_DIM) * 11.0),
]
)
index.index(da, {})
index.search(da)
matches_a = [m.id for m in da[0].matches]
matches_b = [m.id for m in da[1].matches]
matches_c = [m.id for m in da[2].matches]
matches_d = [m.id for m in da[3].matches]
matches_e = [m.id for m in da[4].matches]
assert matches_a == ['a', 'b', 'c', 'd', 'e']
assert matches_b == ['b', 'a', 'c', 'd', 'e']
assert matches_c == ['c', 'b', 'a', 'd', 'e']
assert matches_d == ['d', 'c', 'e', 'b', 'a']
assert matches_e == ['e', 'd', 'c', 'b', 'a']
for doc in da:
assert doc.matches[0].scores['euclidean'].value == 0
def test_search_wrong_dim():
index = HnswlibSearcher(dim=_DIM)
embeddings_ind = np.random.normal(size=(1000, _DIM))
embeddings_search = np.random.normal(size=(10, 17))
da_index = DocumentArray([Document(embedding=emb) for emb in embeddings_ind])
da_search = DocumentArray([Document(embedding=emb) for emb in embeddings_search])
index.index(da_index, {})
with pytest.raises(ValueError, match='Query documents have embeddings'):
index.search(da_search, {})
def test_update(two_elem_index):
index, da = two_elem_index
da_search = DocumentArray(
[
Document(embedding=np.ones(_DIM) * 1.1),
Document(embedding=np.ones(_DIM) * 2.1),
]
)
assert index._ids_to_inds == {'a': 0, 'b': 1}
index.search(da_search, {})
assert [m.id for m in da_search[0].matches] == ['a', 'b']
assert [m.id for m in da_search[1].matches] == ['b', 'a']
for d in da_search:
d.pop('matches')
# switch embeddings of a and b
da[0].embedding = np.ones(_DIM) * 2.0
da[1].embedding = np.ones(_DIM) * 1.0
index.update(da, {})
assert index._ids_to_inds == {'a': 0, 'b': 1}
assert index._index.element_count == 2
index.search(da_search, {})
assert [m.id for m in da_search[0].matches] == ['b', 'a']
assert [m.id for m in da_search[1].matches] == ['a', 'b']
def test_update_ignore_non_existing(two_elem_index):
index, da = two_elem_index
da_search = DocumentArray(
[
Document(embedding=np.ones(_DIM) * 1.1),
Document(embedding=np.ones(_DIM) * 2.1),
]
)
# switch embeddings of a and b, and add a new element - it should not get indexed
da[0].embedding = np.ones(_DIM) * 2.0
da[1].embedding = np.ones(_DIM) * 1.0
da.append(Document(id='c', embedding=np.ones(_DIM) * 3.0))
index.update(da, {})
assert index._ids_to_inds == {'a': 0, 'b': 1}
assert index._index.element_count == 2
index.search(da_search, {})
assert [m.id for m in da_search[0].matches] == ['b', 'a']
assert [m.id for m in da_search[1].matches] == ['a', 'b']
def test_update_wrong_dim():
index = HnswlibSearcher(dim=_DIM)
embeddings_ind = np.random.normal(size=(10, _DIM))
embeddings_update = np.random.normal(size=(10, 17))
da_index = DocumentArray([Document(embedding=emb) for emb in embeddings_ind])
index.index(da_index, {})
for doc, emb in zip(da_index, embeddings_update):
doc.embedding = emb
with pytest.raises(ValueError, match='Attempted to update vectors with dimension'):
index.update(da_index, {})
def test_delete(two_elem_index):
index, da = two_elem_index
index.delete({'ids': ['a', 'c']})
assert index._ids_to_inds == {'b': 1}
index.search(da, {'limit': 10})
assert len(da[0].matches) == 1
def test_delete_soft(two_elem_index):
"""Test that we do not overwrite deleted indices"""
index, da = two_elem_index
assert index._ids_to_inds == {'a': 0, 'b': 1}
index.delete({'ids': ['b']})
assert index._ids_to_inds == {'a': 0}
assert index._index.element_count == 2
index.index(da[1:2], {})
assert index._ids_to_inds == {'a': 0, 'b': 2}
assert index._index.element_count == 3
def test_clear(two_elem_index):
index, _ = two_elem_index
index.clear()
assert len(index._ids_to_inds) == 0
assert index._index.element_count == 0
def test_dump(two_elem_index, tmp_path):
index, da = two_elem_index
index.dump({'dump_path': str(tmp_path)})
assert (tmp_path / 'index.bin').is_file()
assert (tmp_path / 'ids.json').is_file()
def test_dump_no_path(two_elem_index):
index, _ = two_elem_index
with pytest.raises(ValueError, match='The `dump_path` must be provided'):
index.dump()
def test_dump_load(tmp_path, two_elem_index):
index, da = two_elem_index
index.dump({'dump_path': str(tmp_path)})
index = HnswlibSearcher(dim=_DIM, metric='l2', dump_path=tmp_path)
assert index._ids_to_inds == {'a': 0, 'b': 1}
assert index._index.element_count == 2
index.search(da, {})
assert da[0].matches.get_attributes('id') == ['a', 'b']
assert da[1].matches.get_attributes('id') == ['b', 'a']
def test_status(two_elem_index):
index, _ = two_elem_index
status = index.status()[0]
assert status.tags['count_active'] == 2
assert status.tags['count_indexed'] == 2
assert status.tags['count_deleted'] == 0
index.delete({'ids': ['a']})
status = index.status()[0]
assert status.tags['count_active'] == 1
assert status.tags['count_indexed'] == 2
assert status.tags['count_deleted'] == 1
|
11497680
|
import tqdm
import cv2
import argparse
import numpy as np
import torch
import human_inst_seg
# this can be install by:
# pip install git+https://github.com/Project-Splinter/streamer_pytorch --upgrade
import streamer_pytorch as streamer
parser = argparse.ArgumentParser(description='.')
parser.add_argument(
'--camera', action="store_true")
parser.add_argument(
'--images', default="", nargs="*")
parser.add_argument(
'--videos', default="", nargs="*")
parser.add_argument(
'--loop', action="store_true")
parser.add_argument(
'--vis', action="store_true")
args = parser.parse_args()
def visulization(data):
image, bboxes, probs = data
image = torch.cat([
image[:, 0:3], image[:, 0:3]*image[:, 3:4]], dim=3)
probs = probs.unsqueeze(3)
bboxes = (bboxes * probs).sum(dim=1, keepdim=True) / probs.sum(dim=1, keepdim=True)
window = image[0].cpu().numpy().transpose(1, 2, 0)
window = (window * 0.5 + 0.5) * 255.0
window = np.uint8(window).copy()
bbox = bboxes[0, 0, 0].cpu().numpy()
window = cv2.rectangle(
window,
(int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])),
(255,0,0), 2)
window = cv2.cvtColor(window, cv2.COLOR_BGR2RGB)
window = cv2.resize(window, (0, 0), fx=2, fy=2)
cv2.imshow('window', window)
cv2.waitKey(30)
seg_engine = human_inst_seg.Segmentation()
seg_engine.eval()
if args.camera:
data_stream = streamer.CaptureStreamer()
elif len(args.videos) > 0:
data_stream = streamer.VideoListStreamer(
args.videos * (10000 if args.loop else 1))
elif len(args.images) > 0:
data_stream = streamer.ImageListStreamer(
args.images * (10000 if args.loop else 1))
loader = torch.utils.data.DataLoader(
data_stream,
batch_size=1,
num_workers=1,
pin_memory=False,
)
try:
# no vis: ~ 50 fps
for data in tqdm.tqdm(loader):
outputs, bboxes, probs = seg_engine(data)
if args.vis:
visulization([outputs, bboxes, probs])
except Exception as e:
print (e)
del data_stream
|
11497717
|
import smtplib, ssl
class EmailAlert:
smtp_server = "mailrelay.tugraz.at"
port = 587 # For starttls
sender_email = "<EMAIL>"
receiver_email = "<EMAIL>"
password = "<PASSWORD>"
def __init__(self, password, smtp_server="mailrelay.tugraz.at", port=587, sender_email="<EMAIL>", receiver_email="<EMAIL>"):
self.smtp_server = smtp_server
self.port = port
self.sender_email = sender_email
self.receiver_email = receiver_email
self.password = password
def sendAlert(self, message):
context = ssl.create_default_context()
with smtplib.SMTP(self.smtp_server, self.port) as server:
server.ehlo() # Can be omitted
server.starttls(context=context)
server.ehlo() # Can be omitted
server.login(self.sender_email, self.password)
server.sendmail(self.sender_email, self.receiver_email, message)
|
11497736
|
import unittest
import torch
from torch import nn
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../experiments')))
import rgcn
class TestExps(unittest.TestCase):
def test_sumsparse(self):
indices = torch.tensor([[0, 1, 2, 0], [0, 1, 2, 1]], dtype=torch.long)
vals = torch.tensor([1, 1, 1, 1], dtype=torch.float)
print(vals / rgcn.sum_sparse(indices.t(), vals, (3, 3), row=False))
|
11497774
|
import pytest
from mugen.video.segments.VideoSegment import VideoSegment
from tests import DATA_PATH
@pytest.fixture
def shinsekai_segment() -> VideoSegment:
return VideoSegment(f'{DATA_PATH}/video/shinsekai.mp4')
|
11497780
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pyccc
import moldesign as mdt
from ..compute import packages
from ..utils import from_filepath
from .. import units as u
from ..utils import exports
class OpenMMPickleMixin(object):
def __getstate__(self):
mystate = self.__dict__.copy()
if 'sim' in mystate:
assert 'sim_args' not in mystate
sim = mystate.pop('sim')
mystate['sim_args'] = (sim.topology, sim.system, sim.integrator)
return mystate
def __setstate__(self, state):
from simtk.openmm import app
if 'sim_args' in state:
assert 'sim' not in state
args = state.pop('sim_args')
state['sim'] = app.Simulation(*args)
self.__dict__.update(state)
# This is a factory for the MdtReporter class. It's here so that we don't have to import
# simtk.openmm.app at the module level
def MdtReporter(mol, report_interval):
from simtk.openmm.app import StateDataReporter
class MdtReporter(StateDataReporter):
"""
We'll use this class to capture all the information we need about a trajectory
It's pretty basic - the assumption is that there will be more processing on the client side
"""
def __init__(self, mol, report_interval):
self.mol = mol
self.report_interval = report_interval
self.trajectory = mdt.Trajectory(mol)
self.annotation = None
self.last_report_time = None
self.logger = mdt.helpers.DynamicsLog()
def __del__(self):
try:
super().__del__()
except AttributeError:
pass # suppress irritating error msgs
def report_from_mol(self, **kwargs):
self.mol.calculate()
if self.annotation is not None:
kwargs.setdefault('annotation', self.annotation)
self.report(self.mol.energy_model.sim,
self.mol.energy_model.sim.context.getState(getEnergy=True,
getForces=True,
getPositions=True,
getVelocities=True),
settime=self.mol.time)
def report(self, simulation, state, settime=None):
""" Callback for dynamics after the specified interval
Args:
simulation (simtk.openmm.app.Simulation): simulation to report on
state (simtk.openmm.State): state of the simulation
"""
# TODO: make sure openmm masses are the same as MDT masses
settime = settime if settime is not None else simtk2pint(state.getTime())
report = dict(
positions=simtk2pint(state.getPositions()),
momenta=simtk2pint(state.getVelocities())*self.mol.dim_masses,
forces=simtk2pint(state.getForces()),
time=settime,
vectors=simtk2pint(state.getPeriodicBoxVectors()),
potential_energy=simtk2pint(state.getPotentialEnergy()))
if self.annotation is not None: report['annotation'] = self.annotation
if settime:
self.last_report_time = report['time']
self.trajectory.new_frame(properties=report)
self.logger.print_step(self.mol, properties=report)
def describeNextReport(self, simulation):
"""
Returns:
tuple: A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self.report_interval - simulation.currentStep % self.report_interval
return (steps, True, True, True, True)
return MdtReporter(mol, report_interval)
PINT_NAMES = {'mole': u.avogadro,
'degree': u.degrees,
'radian': u.radians,
'elementary charge': u.q_e}
@exports
def simtk2pint(quantity, flat=False):
""" Converts a quantity from the simtk unit system to the internal unit system
Args:
quantity (simtk.unit.quantity.Quantity): quantity to convert
flat (bool): if True, flatten 3xN arrays to 3N
Returns:
mdt.units.MdtQuantity: converted to MDT unit system
"""
from simtk import unit as stku
mag = np.array(quantity._value)
if quantity.unit == stku.radian:
return mag * u.radians
if quantity.unit == stku.degree:
return mag * u.degrees
for dim, exp in itertools.chain(quantity.unit.iter_scaled_units(),
quantity.unit.iter_top_base_units()):
if dim.name in PINT_NAMES:
pintunit = PINT_NAMES[dim.name]
else:
pintunit = u.ureg.parse_expression(dim.name)
mag = mag * (pintunit**exp)
if flat:
mag = np.reshape(mag, (np.product(mag.shape),))
return u.default.convert(mag)
@exports
def pint2simtk(quantity):
""" Converts a quantity from the pint to simtk unit system.
Note SimTK has a less extensive collection that pint. May need to have pint convert
to SI first
"""
from simtk import unit as stku
SIMTK_NAMES = {'ang': stku.angstrom,
'fs': stku.femtosecond,
'nm': stku.nanometer,
'ps': stku.picosecond}
newvar = quantity._magnitude
for dim, exp in quantity._units.items():
if dim in SIMTK_NAMES:
stkunit = SIMTK_NAMES[dim]
else:
stkunit = getattr(stku, dim)
newvar = newvar * stkunit ** exp
return newvar
@packages.openmm.runsremotely
def _amber_to_mol(prmtop_file, inpcrd_file):
""" Convert an amber prmtop and inpcrd file to an MDT molecule
Args:
prmtop_file (file-like): topology file in amber prmtop format
inpcrd_file (file-like): coordinate file in amber crd format
Returns:
moldesign.Molecule: Molecule parsed from amber output
"""
from simtk.openmm import app
prmtop = from_filepath(app.AmberPrmtopFile, prmtop_file)
inpcrd = from_filepath(app.AmberInpcrdFile, inpcrd_file)
mol = topology_to_mol(prmtop.topology,
positions=inpcrd.positions,
velocities=inpcrd.velocities)
return mol
if packages.openmm.is_installed():
def amber_to_mol(prmtop_file, inpcrd_file):
if not isinstance(prmtop_file, pyccc.FileContainer):
prmtop_file = pyccc.LocalFile(prmtop_file)
if not isinstance(inpcrd_file, pyccc.FileContainer):
inpcrd_file = pyccc.LocalFile(inpcrd_file)
return _amber_to_mol(prmtop_file, inpcrd_file)
else:
amber_to_mol = _amber_to_mol
exports(amber_to_mol)
@exports
def topology_to_mol(topo, name=None, positions=None, velocities=None, assign_bond_orders=True):
""" Convert an OpenMM topology object into an MDT molecule.
Args:
topo (simtk.openmm.app.topology.Topology): topology to convert
name (str): name to assign to molecule
positions (list): simtk list of atomic positions
velocities (list): simtk list of atomic velocities
assign_bond_orders (bool): assign bond orders from templates (simtk topologies
do not store bond orders)
"""
from simtk import unit as stku
# Atoms
atommap = {}
newatoms = []
masses = u.amu*[atom.element.mass.value_in_unit(stku.amu) for atom in topo.atoms()]
for atom,mass in zip(topo.atoms(), masses):
newatom = mdt.Atom(atnum=atom.element.atomic_number,
name=atom.name,
mass=mass)
atommap[atom] = newatom
newatoms.append(newatom)
# Coordinates
if positions is not None:
poslist = np.array([p.value_in_unit(stku.nanometer) for p in positions]) * u.nm
poslist.ito(u.default.length)
for newatom, position in zip(newatoms, poslist):
newatom.position = position
if velocities is not None:
velolist = np.array([v.value_in_unit(stku.nanometer/stku.femtosecond) for v in velocities]) * u.nm/u.fs
velolist = u.default.convert(velolist)
for newatom, velocity in zip(newatoms, velolist):
newatom.momentum = newatom.mass * simtk2pint(velocity)
# Biounits
chains = {}
for chain in topo.chains():
if chain.id not in chains:
chains[chain.id] = mdt.Chain(name=chain.id, index=chain.index)
newchain = chains[chain.id]
for residue in chain.residues():
newresidue = mdt.Residue(name='%s%d' % (residue.name,
residue.index),
chain=newchain,
pdbindex=int(residue.id),
pdbname=residue.name)
newchain.add(newresidue)
for atom in residue.atoms():
newatom = atommap[atom]
newatom.residue = newresidue
newresidue.add(newatom)
# Bonds
bonds = {}
for bond in topo.bonds():
a1, a2 = bond
na1, na2 = atommap[a1], atommap[a2]
if na1 not in bonds:
bonds[na1] = {}
if na2 not in bonds:
bonds[na2] = {}
b = mdt.Bond(na1, na2)
b.order = 1
if name is None:
name = 'Unnamed molecule from OpenMM'
newmol = mdt.Molecule(newatoms, name=name)
if assign_bond_orders:
for residue in newmol.residues:
try:
residue.assign_template_bonds()
except (KeyError, ValueError):
pass
return newmol
@exports
def mol_to_topology(mol):
""" Create an openmm topology object from an MDT molecule
Args:
mol (moldesign.Molecule): molecule to copy topology from
Returns:
simtk.openmm.app.Topology: topology of the molecule
"""
from simtk.openmm import app
top = app.Topology()
chainmap = {chain: top.addChain(chain.name) for chain in mol.chains}
resmap = {res: top.addResidue(res.resname, chainmap[res.chain], str(res.pdbindex))
for res in mol.residues}
atommap = {atom: top.addAtom(atom.name,
app.Element.getBySymbol(atom.element),
resmap[atom.residue],
id=str(atom.pdbindex))
for atom in mol.atoms}
for bond in mol.bonds:
top.addBond(atommap[bond.a1], atommap[bond.a2])
return top
@exports
def mol_to_modeller(mol):
from simtk.openmm import app
if mol.is_small_molecule:
if not mol.residues[0].resname:
mol.residues[0].resname = 'UNL'
mol.residues[0].pdbindex = 1
if not mol.chains[0].pdbname:
mol.chains[0].pdbname = 'A'
return app.Modeller(mol_to_topology(mol), pint2simtk(mol.positions))
def list_openmmplatforms():
from simtk import openmm
return [openmm.Platform.getPlatform(ip).getName()
for ip in range(openmm.Platform.getNumPlatforms())]
|
11497794
|
from datetime import datetime, timedelta, timezone
from functools import partial
from unittest.mock import Mock
from uuid import UUID
import pytest
import trio
import trio.hazmat
from trio_websocket import open_websocket, serve_websocket
from . import assert_elapsed, assert_max_elapsed, assert_min_elapsed
from starbelly.job import StatsTracker
from starbelly.starbelly_pb2 import JobRunState, ServerMessage
from starbelly.subscription import (
ExponentialBackoff,
JobStatusSubscription,
ResourceMonitorSubscription,
SyncTokenError,
SyncTokenInt,
TaskMonitorSubscription,
)
HOST = '127.0.0.1'
class MockWebsocket:
''' A simple mock websocket useful for testing. '''
def __init__(self):
self._send, self._recv = trio.open_memory_channel(0)
async def get_message(self):
message = await self._recv.receive()
return message
async def send_message(self, message):
await self._send.send(message)
def test_sync_token_int():
token = b'\<KEY>'
assert SyncTokenInt.decode(token) == 0x11
assert SyncTokenInt.encode(0x11) == token
def test_decode_wrong_type():
token = b'\<KEY>'
with pytest.raises(SyncTokenError):
assert SyncTokenInt.decode(token)
def test_decode_malformed():
token = b'\<KEY>'
with pytest.raises(SyncTokenError):
assert SyncTokenInt.decode(token)
async def test_job_state_subscription(autojump_clock, nursery):
job1_id = UUID('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
job2_id = UUID('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
job1_doc = {
'id': str(job1_id),
'name': 'Job #1',
'seeds': ['https://job1.example'],
'tags': ['tag1a', 'tag1b'],
'item_count': 10,
'http_success_count': 7,
'http_error_count': 2,
'exception_count': 1,
'http_status_counts': {200: 7, 404: 2},
'started_at': datetime(2019, 1, 25, 14, 44, 0, tzinfo=timezone.utc),
'completed_at': None,
'run_state': 'RUNNING',
}
job2_doc = {
'id': str(job2_id),
'name': 'Job #2',
'seeds': ['https://job2.example'],
'tags': ['tag2a'],
'item_count': 20,
'http_success_count': 14,
'http_error_count': 4,
'exception_count': 2,
'http_status_counts': {200: 14, 404: 4},
'started_at': datetime(2019, 1, 25, 14, 55, 0, tzinfo=timezone.utc),
'completed_at': None,
'run_state': 'RUNNING',
}
stats_tracker = StatsTracker(timedelta(seconds=60))
stats_tracker.add_job(job1_doc)
stats_tracker.add_job(job2_doc)
websocket = MockWebsocket()
subscription = JobStatusSubscription(id_=1, stats_tracker=stats_tracker,
websocket=websocket, min_interval=2)
assert repr(subscription) == '<JobStatusSubscription id=1>'
with pytest.raises(Exception):
# Can't cancel before it starts running:
subscription.cancel()
nursery.start_soon(subscription.run)
# The first two items should be received immediately and in full.
with assert_max_elapsed(0.1):
data = await websocket.get_message()
message1 = ServerMessage.FromString(data).event
assert message1.subscription_id == 1
assert len(message1.job_list.jobs) == 2
job1 = message1.job_list.jobs[0]
assert job1.job_id == job1_id.bytes
assert job1.name == 'Job #1'
assert job1.seeds[0] == 'https://job1.example'
assert job1.tags[0] == 'tag1a'
assert job1.tags[1] == 'tag1b'
assert job1.item_count == 10
assert job1.http_success_count == 7
assert job1.http_error_count == 2
assert job1.exception_count == 1
assert job1.http_status_counts[200] == 7
assert job1.http_status_counts[404] == 2
assert job1.started_at == '2019-01-25T14:44:00+00:00'
assert not job1.HasField('completed_at')
assert job1.run_state == JobRunState.Value('RUNNING')
job2 = message1.job_list.jobs[1]
assert job2.job_id == job2_id.bytes
assert job2.name == 'Job #2'
assert job2.seeds[0] == 'https://job2.example'
assert job2.tags[0] == 'tag2a'
assert job2.item_count == 20
assert job2.http_success_count == 14
assert job2.http_error_count == 4
assert job2.exception_count == 2
assert job2.http_status_counts[200] == 14
assert job2.http_status_counts[404] == 4
assert job2.started_at == '2019-01-25T14:55:00+00:00'
assert not job2.HasField('completed_at')
assert job2.run_state == JobRunState.Value('RUNNING')
# Add 1 item to job 1. Two seconds later, we should get an update for job 1
# but not job 2.
with assert_min_elapsed(2):
job1_doc.update({
'item_count': 11,
'http_success_count': 8,
'http_status_counts': {200: 8, 404: 2},
})
data = await websocket.get_message()
message2 = ServerMessage.FromString(data).event
assert message2.subscription_id == 1
assert len(message2.job_list.jobs) == 1
job1 = message2.job_list.jobs[0]
assert job1.name == 'Job #1'
assert job1.seeds[0] == 'https://job1.example'
assert job1.tags[0] == 'tag1a'
assert job1.tags[1] == 'tag1b'
assert job1.item_count == 11
assert job1.http_success_count == 8
assert job1.http_error_count == 2
assert job1.exception_count == 1
assert job1.http_status_counts[200] == 8
assert job1.http_status_counts[404] == 2
assert job1.started_at == '2019-01-25T14:44:00+00:00'
assert not job1.HasField('completed_at')
assert job1.run_state == JobRunState.Value('RUNNING')
# Add 2 items to job 2. Two seconds later, we should get an update for job 2
# but not job 1.
with assert_min_elapsed(2):
completed_at = datetime(2019, 1, 25, 14, 56, 0, tzinfo=timezone.utc)
job2_doc.update({
'item_count': 22,
'http_success_count': 15,
'http_error_count': 5,
'http_status_counts': {200: 15, 404: 5},
})
data = await websocket.get_message()
message3 = ServerMessage.FromString(data).event
assert message3.subscription_id == 1
assert len(message3.job_list.jobs) == 1
job2 = message3.job_list.jobs[0]
assert job2.name == 'Job #2'
assert job2.seeds[0] == 'https://job2.example'
assert job2.tags[0] == 'tag2a'
assert job2.item_count == 22
assert job2.http_success_count == 15
assert job2.http_error_count == 5
assert job2.exception_count == 2
assert job2.http_status_counts[200] == 15
assert job2.http_status_counts[404] == 5
assert job2.started_at == '2019-01-25T14:55:00+00:00'
assert job2.run_state == JobRunState.Value('RUNNING')
# Cancel the subscription and wait 2 seconds to make sure it doesn't send us
# any more events.
subscription.cancel()
with pytest.raises(trio.TooSlowError):
with trio.fail_after(2):
data = await websocket.get_message()
async def test_resource_subscription(autojump_clock, nursery):
# Set up fixtures
job1_id = UUID('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
job2_id = UUID('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
resource_monitor = Mock()
measurement1 = {
'timestamp': datetime(2019, 1, 25, 0, 0, 0, tzinfo=timezone.utc),
'cpus': [0.99, 0.55],
'memory_used': 1_000_000,
'memory_total': 2_000_000,
'disks': [{
'mount': '/root',
'used': 3_000_000,
'total': 4_000_000,
},{
'mount': '/home',
'used': 5_000_000,
'total': 6_000_000,
}],
'networks': [{
'name': 'eth0',
'sent': 7_000_000,
'received': 8_000_000,
},{
'name': 'eth1',
'sent': 9_000_000,
'received': 10_000_000,
}],
'jobs': [{
'id': str(job1_id),
'name': 'Test 1 Job',
'current_downloads':3,
},{
'id': str(job2_id),
'name': 'Test 2 Job',
'current_downloads': 2,
}],
'current_downloads': 5,
'maximum_downloads': 10,
'rate_limiter': 150,
}
measurement2 = measurement1.copy() # Note: SHALLOW COPY!
measurement2['timestamp'] = datetime(2019, 1, 25, 0, 0, 1,
tzinfo=timezone.utc)
measurement2['memory_used'] = 1_000_001
measurement3 = measurement1.copy() # Note: SHALLOW COPY!
measurement3['timestamp'] = datetime(2019, 1, 25, 0, 0, 2,
tzinfo=timezone.utc)
measurement3['memory_used'] = 1_000_002
resource_monitor.history.return_value = [measurement1, measurement2]
send_channel, recv_channel = trio.open_memory_channel(0)
resource_monitor.get_channel.return_value = recv_channel
# Instantiate subscription. Ask for 3 historical measurements, but only 2
# are available so it should just send those 2.
websocket = MockWebsocket()
subscription = ResourceMonitorSubscription(id_=1, websocket=websocket,
resource_monitor=resource_monitor, history=3)
assert repr(subscription) == '<ResourceMonitorSubscription id=1>'
nursery.start_soon(subscription.run)
# We should be able to read two events immediately.
with assert_max_elapsed(0.1):
data = await websocket.get_message()
event1 = ServerMessage.FromString(data).event
assert event1.subscription_id == 1
frame1 = event1.resource_frame
assert frame1.timestamp == '2019-01-25T00:00:00+00:00'
assert frame1.cpus[0].usage == 0.99
assert frame1.cpus[1].usage == 0.55
assert frame1.memory.used == 1_000_000
assert frame1.memory.total == 2_000_000
assert frame1.disks[0].mount == '/root'
assert frame1.disks[0].used == 3_000_000
assert frame1.disks[0].total == 4_000_000
assert frame1.disks[1].mount == '/home'
assert frame1.disks[1].used == 5_000_000
assert frame1.disks[1].total == 6_000_000
assert frame1.networks[0].name == 'eth0'
assert frame1.networks[0].sent == 7_000_000
assert frame1.networks[0].received == 8_000_000
assert frame1.networks[1].name == 'eth1'
assert frame1.networks[1].sent == 9_000_000
assert frame1.networks[1].received == 10_000_000
assert frame1.jobs[0].job_id == job1_id.bytes
assert frame1.jobs[0].name == 'Test 1 Job'
assert frame1.jobs[0].current_downloads == 3
assert frame1.jobs[1].job_id == job2_id.bytes
assert frame1.jobs[1].name == 'Test 2 Job'
assert frame1.jobs[1].current_downloads == 2
assert frame1.current_downloads == 5
assert frame1.maximum_downloads == 10
assert frame1.rate_limiter == 150
data = await websocket.get_message()
event2 = ServerMessage.FromString(data).event
assert event2.subscription_id == 1
frame2 = event2.resource_frame
assert frame2.timestamp == '2019-01-25T00:00:01+00:00'
assert frame2.memory.used == 1_000_001
# The third frame does not arrive immediately.
with pytest.raises(trio.TooSlowError):
with trio.fail_after(2):
data = await websocket.get_message()
# Now emulate the resource monitor sending another measurement, and we
# should be able to receive it immediately.
with assert_max_elapsed(0.1):
await send_channel.send(measurement3)
data = await websocket.get_message()
event3 = ServerMessage.FromString(data).event
assert event3.subscription_id == 1
frame3 = event3.resource_frame
assert frame3.timestamp == '2019-01-25T00:00:02+00:00'
assert frame3.memory.used == 1_000_002
async def test_task_monitor(autojump_clock, nursery):
# To simplify testing, we pick the current task as the root task:
root_task = trio.hazmat.current_task()
websocket = MockWebsocket()
subscription = TaskMonitorSubscription(id_=1, websocket=websocket,
period=2.0, root_task=root_task)
# We create a few dummy tasks that will show up in the task monitor.
async def dummy_parent(task_status):
async with trio.open_nursery() as inner:
await inner.start(dummy_child, name='Dummy Child 1')
await inner.start(dummy_child, name='Dummy Child 2')
task_status.started()
async def dummy_child(task_status):
task_status.started()
await trio.sleep_forever()
await nursery.start(dummy_parent, name='Dummy Parent 1')
await nursery.start(dummy_parent, name='Dummy Parent 2')
nursery.start_soon(subscription.run, name='Task Monitor Subscription')
# We should receive the first event right away.
with assert_max_elapsed(0.1):
data = await websocket.get_message()
event1 = ServerMessage.FromString(data).event
assert event1.subscription_id == 1
task_tree = event1.task_tree
assert task_tree.name == '<Root>'
subtask_1 = task_tree.subtasks[0]
assert subtask_1.name == 'Dummy Parent 1'
subtask_1_1 = subtask_1.subtasks[0]
assert subtask_1_1.name == 'Dummy Child 1'
subtask_1_2 = subtask_1.subtasks[1]
assert subtask_1_2.name == 'Dummy Child 2'
subtask_2 = task_tree.subtasks[1]
assert subtask_2.name == 'Dummy Parent 2'
subtask_2_1 = subtask_2.subtasks[0]
assert subtask_2_1.name == 'Dummy Child 1'
subtask_2_2 = subtask_2.subtasks[1]
assert subtask_2_2.name == 'Dummy Child 2'
subtask_3 = task_tree.subtasks[2]
assert subtask_3.name == 'Task Monitor Subscription'
# The second event won't arrive for two more seconds.
with assert_min_elapsed(2.0):
data = await websocket.get_message()
event2 = ServerMessage.FromString(data).event
assert event1.subscription_id == 1
task_tree = event1.task_tree
assert task_tree.name == '<Root>'
assert len(task_tree.subtasks) == 3
subscription.cancel()
|
11497795
|
import pytest
from stere.strategy.element_strategy import ElementStrategy
def test_element_strategy_find():
elem = ElementStrategy('dummy', '//fake')
with pytest.raises(NotImplementedError):
elem._find_all()
|
11497892
|
from pytest import mark
from twisted.internet import defer
from twisted.trial import unittest
from scrapy import signals, Request, Spider
from scrapy.utils.test import get_crawler, get_from_asyncio_queue
from tests.mockserver import MockServer
class ItemSpider(Spider):
name = 'itemspider'
def start_requests(self):
for index in range(10):
yield Request(self.mockserver.url(f'/status?n=200&id={index}'),
meta={'index': index})
def parse(self, response):
return {'index': response.meta['index']}
class AsyncSignalTestCase(unittest.TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.items = []
def tearDown(self):
self.mockserver.__exit__(None, None, None)
async def _on_item_scraped(self, item):
item = await get_from_asyncio_queue(item)
self.items.append(item)
@mark.only_asyncio()
@defer.inlineCallbacks
def test_simple_pipeline(self):
crawler = get_crawler(ItemSpider)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
self.assertEqual(len(self.items), 10)
for index in range(10):
self.assertIn({'index': index}, self.items)
|
11497897
|
import numpy as np
import PIL
def resize(img, size):
# Same resize method as torchvision
H, W = size
img = img.transpose(1, 2, 0).astype(np.uint8)
p = PIL.Image.fromarray(img, mode='RGB')
p = np.array(p.resize((W, H)))
return p.transpose(2, 0, 1).astype(np.float32)
if __name__ == '__main__':
from linemod_dataset import LinemodDataset
from chainercv.visualizations import vis_image
import matplotlib.pyplot as plt
dataset = LinemodDataset('..')
img, _, _ = dataset[0]
img = resize(img, (543, 543))
vis_image(img)
plt.show()
|
11497907
|
from tests.package.test_python import TestPythonPackageBase
class TestPythonTwisted(TestPythonPackageBase):
config = TestPythonPackageBase.config
sample_scripts = ["tests/package/sample_python_twisted.py"]
def run_sample_scripts(self):
cmd = "netstat -ltn 2>/dev/null | grep 0.0.0.0:1234"
_, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 1)
cmd = self.interpreter + " sample_python_twisted.py &"
# give some time to setup the server
cmd += "sleep 30"
_, exit_code = self.emulator.run(cmd, timeout=35)
self.assertEqual(exit_code, 0)
cmd = "netstat -ltn 2>/dev/null | grep 0.0.0.0:1234"
_, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0)
class TestPythonPy2Twisted(TestPythonTwisted):
__test__ = True
config = TestPythonTwisted.config + \
"""
BR2_PACKAGE_PYTHON=y
BR2_PACKAGE_PYTHON_TWISTED=y
"""
class TestPythonPy3Twisted(TestPythonTwisted):
__test__ = True
config = TestPythonTwisted.config + \
"""
BR2_PACKAGE_PYTHON3=y
BR2_PACKAGE_PYTHON_TWISTED=y
"""
|
11497913
|
def getNames():
names = ['Christopher', 'Susan', 'Danny']
newName = input('Enter last guest: ')
names.append(newName)
return names
def printNames(names):
for name in names:
print(name)
return
|
11497947
|
import urllib.parse
from customers.decorators import customer_required
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.cache import cache_page
from .forms import CommentForm
from .models import Category, Comment, Post
@customer_required
@cache_page(60 * 2)
def blog_post_view(request):
'''
This view will render all posts & filter categories.
'''
posts = Post.published.all()
paginator = Paginator(posts, 1)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
category = request.GET.get('category')
if category is None:
posts = Post.published.all()
else:
posts = Post.published.filter(category__name=category)
category = Category.objects.all()
context = {
'categories': category,
'page_obj': page_obj,
}
return render(request, 'blog/blog.html', context)
@customer_required
@cache_page(60 * 2)
def blog_details_view(request, slug):
'''
This blog details will show more info about a specific blog post.
'''
post = get_object_or_404(Post, slug=slug)
'''
This will decode the post title for share article link.
'''
share_able_link = urllib.parse.quote(post.title)
'''
This will display the total comments of specific post.
'''
total_comments = Comment.objects.filter(post=post).count()
'''
This will display the total category number of the post.
'''
total_category = Category.objects.filter(post=post).count()
'''
This will display specific category of the post.
'''
category = Category.objects.filter(post=post)
'''
This will display who commented on the post.
'''
comments = Comment.objects.filter(post=post, customer=request.user.customer)
context = {
'post': post,
'comments': comments,
'total_category': total_category,
'share_able_link': share_able_link,
'total_comments_on_post': total_comments,
'category': category.values_list('name', flat=True).first(),
}
return render(request, 'blog/blog_detail.html', context)
@customer_required
def create_comment_view(request, slug):
'''
This will create new comment on a specific post.
'''
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = get_object_or_404(Post, slug=slug)
comment.customer = request.user.customer
comment.body = request.POST.get('body')
comment.save()
return redirect('blog:blog_detail', slug=slug)
else:
return render(request, 'blog/blog_detail.html')
@customer_required
@cache_page(60 * 2)
def blog_search_view(request):
'''
This view will search for the blog post with title, slug and category.
'''
queryset = request.GET.get('q')
if queryset:
posts = Post.published.filter(Q(title__icontains=queryset) | Q(slug__icontains=queryset) | Q(category__name=queryset))
context = {
'query': queryset,
'page_obj': posts,
}
return render(request, 'blog/blog.html', context)
else:
raise Http404
# @customer_required
# def comment_delete_view(request, slug, pk):
# '''
# This will delete the comment.
# '''
# comment = get_object_or_404(Comment, pk=pk)
# if request.method == 'POST':
# comment.delete()
# return redirect('blog:blog_detail', slug=slug)
|
11497956
|
from sympy import symbols, cos, sin
from sympy.plotting import plot3d_parametric_line
u = symbols('u')
|
11497962
|
import unittest
try:
import submission
except ImportError:
pass
class Test1(unittest.TestCase):
def test_passes(self):
"""This test should pass"""
self.assertTrue(submission.return_true())
def test_fails(self):
"""This test should fail"""
self.assertTrue(submission.return_false())
class Test2(unittest.TestCase):
def test_fails_and_outputs_json(self):
"""This test should fail and print json"""
self.fail(submission.return_json())
if __name__ == '__main__':
unittest.main()
|
11497979
|
import thread
import logging
import os
import datetime
def initLogging(logfile_prefix, level_name, log_name):
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
datefmt='%Y-%m-%d %H:%M:%S'
## -- Create Logfile Name if prefix exists
if logfile_prefix:
now = datetime.datetime.now()
LOG_FILENAME = mConfig.log_path + logfile_prefix + '_' + now.strftime("%Y-%m-%d") + '.log'
else:
LOG_FILENAME = None
## -- Get the level for logging
level = LEVELS.get(level_name, logging.NOTSET)
## -- add filename=LOG_FILENAME below as parameter to write to file
logging.basicConfig(level=level,
format='%(asctime)s - %(message)s',
datefmt=datefmt,
filename=LOG_FILENAME
)
logger = logging.getLogger(log_name)
#formatter = logging.Formatter("%("+datefmt+")s - %(name)s - %(levelname)s - %(message)s")
#ch = logging.StreamHandler()
#ch.setFormatter(formatter)
#logger.addHandler(ch)
return logger
def str_to_bool(s):
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError
class Thread:
def __init__(self):
#self.logger = logger
#self.session = session
self.running = False
def Start(self):
self.keepGoing = self.running = True
thread.start_new_thread(self.Run, ())
#self.logger.info('STARTED Thread')
def Stop(self):
self.keepGoing = False
self.running = False
#self.logger.info('STOPPED Thread')
def IsRunning(self):
if hasattr(self,'running'):
return self.running
else:
return False
|
11498001
|
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
class GaussRankTransform(nn.Module):
def __init__(self, data: torch.Tensor, eps=1e-6):
super(GaussRankTransform, self).__init__()
tformed = self._erfinv(data, eps)
data, sort_idx = data.sort()
self.register_buffer('src', data)
self.register_buffer('dst', tformed[sort_idx])
@staticmethod
def _erfinv(data: torch.Tensor, eps):
rank = data.argsort().argsort().float()
rank_scaled = (rank / rank.max() - 0.5) * 2
rank_scaled = rank_scaled.clamp(-1 + eps, 1 - eps)
tformed = rank_scaled.erfinv()
return tformed
def forward(self, x):
return self._transform(x, self.dst, self.src)
def invert(self, x):
return self._transform(x, self.src, self.dst)
def _transform(self, x, src, dst):
pos = src.argsort()[x.argsort().argsort()]
N = len(self.src)
pos[pos >= N] = N - 1
pos[pos - 1 <= 0] = 0
x1 = dst[pos]
x2 = dst[pos - 1]
y1 = src[pos]
y2 = src[pos - 1]
relative = (x - x2) / (x1 - x2)
return (1 - relative) * y2 + relative * y1
# %%
if __name__ == '__main__':
# %%
x = torch.from_numpy(np.random.uniform(low=0, high=1, size=2000))
grt = GaussRankTransform(x)
x_tformed = grt.forward(x)
x_inv = grt.invert(x_tformed)
# %%
print(x)
print(x_inv)
print(grt.dst)
print(torch.sort(x_tformed)[0])
bins = 100
plt.hist(x, bins=bins)
plt.show()
plt.hist(x_inv, bins=bins)
plt.show()
plt.hist(grt.src, bins=bins)
plt.show()
plt.hist(x_tformed, bins=bins)
plt.show()
plt.hist(grt.dst, bins=bins)
plt.show()
|
11498046
|
from typing import Any, Callable, Mapping
from helpers.transport.interface import MessageBus
class LocalMessageBus(MessageBus):
def __init__(self):
self.event_handler: Mapping[str, Callable[[Any], Any]] = {}
def shutdown(self):
pass
def handle(self, event_name: str) -> Callable[..., Any]:
def register_event_handler(event_handler: Callable[[Any], Any]):
self.event_handler[event_name] = event_handler
return register_event_handler
def publish(self, event_name: str, message: Any) -> Any:
if event_name not in self.event_handler:
raise Exception('Event handler for "{}" is not found'.format(event_name))
print({'action': 'publish_local_event', 'event_name': event_name, 'message': message})
self.event_handler[event_name](message)
|
11498064
|
import urllib.request
import os.path
import matplotlib.pyplot as plt
def download_pretrained_model():
# Download pre-trained model if necessary
if not os.path.isfile('CIFAR10_plain.pt'):
if not os.path.exists('./temp'):
os.makedirs('./temp')
urllib.request.urlretrieve('https://nc.mlcloud.uni-tuebingen.de/index.php/s/2PBDYDsiotN76mq/download', './temp/CIFAR10_plain.pt')
def plot_regression(X_train, y_train, X_test, f_test, y_std, plot=True,
file_name='regression_example'):
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, sharey=True,
figsize=(4.5, 2.8))
ax1.set_title('MAP')
ax1.scatter(X_train.flatten(), y_train.flatten(), alpha=0.3, color='tab:orange')
ax1.plot(X_test, f_test, color='black', label='$f_{MAP}$')
ax1.legend()
ax2.set_title('LA')
ax2.scatter(X_train.flatten(), y_train.flatten(), alpha=0.3, color='tab:orange')
ax2.plot(X_test, f_test, label='$\mathbb{E}[f]$')
ax2.fill_between(X_test, f_test-y_std*2, f_test+y_std*2,
alpha=0.3, color='tab:blue', label='$2\sqrt{\mathbb{V}\,[y]}$')
ax2.legend()
ax1.set_ylim([-4, 6])
ax1.set_xlim([X_test.min(), X_test.max()])
ax2.set_xlim([X_test.min(), X_test.max()])
ax1.set_ylabel('$y$')
ax1.set_xlabel('$x$')
ax2.set_xlabel('$x$')
plt.tight_layout()
if plot:
plt.show()
else:
plt.savefig(f'docs/{file_name}.png')
|
11498094
|
import io
from lxml import objectify, etree
import pathlib
import pytest
import tempfile
import re
from hescorehpxml import (
HPXMLtoHEScoreTranslator,
main
)
both_hescore_min = [
'hescore_min_v3',
'hescore_min'
]
def get_example_xml_tree_elementmaker(filebase):
rootdir = pathlib.Path(__file__).resolve().parent.parent
hpxmlfilename = str(rootdir / 'examples' / f'{filebase}.xml')
tree = objectify.parse(hpxmlfilename)
root = tree.getroot()
ns = re.match(r'\{(.+)\}', root.tag).group(1)
E = objectify.ElementMaker(
annotate=False,
namespace=ns
)
return tree, E
def scrub_hpxml_doc(doc):
f_in = io.BytesIO()
doc.write(f_in)
f_in.seek(0)
tr = HPXMLtoHEScoreTranslator(f_in)
f_out = io.BytesIO()
tr.export_scrubbed_hpxml(f_out)
f_out.seek(0)
scrubbed_doc = objectify.parse(f_out)
return scrubbed_doc
@pytest.mark.parametrize('hpxml_filebase', both_hescore_min)
def test_remove_customer(hpxml_filebase):
doc, E = get_example_xml_tree_elementmaker(hpxml_filebase)
hpxml = doc.getroot()
hpxml.Building.addprevious(
E.Customer(
E.CustomerDetails(
E.Person(
E.SystemIdentifier(
E.SendingSystemIdentifierType('some other id'),
E.SendingSystemIdentifierValue('1234'),
id='customer1'
),
E.Name(
E.FirstName('John'),
E.LastName('Doe')
),
E.Telephone(
E.TelephoneNumber('555-555-5555')
)
),
E.MailingAddress(
E.Address1('PO Box 1234'),
E.CityMunicipality('Anywhere'),
E.StateCode('CO')
)
)
)
)
doc2 = scrub_hpxml_doc(doc)
hpxml2 = doc2.getroot()
assert len(hpxml2.Customer) == 1
assert len(hpxml2.Customer.getchildren()) == 1
assert len(hpxml2.Customer.CustomerDetails.getchildren()) == 1
assert len(hpxml2.Customer.CustomerDetails.Person.getchildren()) == 1
assert hpxml2.Customer.CustomerDetails.Person.SystemIdentifier.attrib['id'] == 'customer1'
@pytest.mark.parametrize('hpxml_filebase', both_hescore_min)
def test_remove_health_and_safety(hpxml_filebase):
doc, E = get_example_xml_tree_elementmaker(hpxml_filebase)
hpxml = doc.getroot()
hpxml.Building.BuildingDetails.Systems.addnext(
E.HealthAndSafety()
)
doc2 = scrub_hpxml_doc(doc)
assert len(doc2.xpath('//h:HealthAndSafety', namespaces={'h': hpxml.nsmap[None]})) == 0
@pytest.mark.parametrize('hpxml_filebase', both_hescore_min)
def test_remove_occupancy(hpxml_filebase):
doc, E = get_example_xml_tree_elementmaker(hpxml_filebase)
hpxml = doc.getroot()
hpxml.Building.BuildingDetails.BuildingSummary.Site.addnext(
E.BuildingOccupancy(
E.LowIncome('true')
)
)
doc2 = scrub_hpxml_doc(doc)
assert len(doc2.xpath('//h:BuildingOccupancy', namespaces={'h': hpxml.nsmap[None]})) == 0
@pytest.mark.parametrize('hpxml_filebase', both_hescore_min)
def test_remove_annual_energy_use(hpxml_filebase):
doc, E = get_example_xml_tree_elementmaker(hpxml_filebase)
hpxml = doc.getroot()
energy_use_el = E.AnnualEnergyUse(
E.ConsumptionInfo(
E.UtilityID(
id='utility01'
),
E.ConsumptionType(
E.Energy(
E.FuelType('electricity'),
E.UnitofMeasure('kWh')
)
),
E.ConsumptionDetail(
E.Consumption('1.0')
)
)
)
hpxml.Building.BuildingDetails.BuildingSummary.BuildingConstruction.addnext(energy_use_el)
hpxml.Building.BuildingDetails.Systems.HVAC.HVACPlant.CoolingSystem.CoolingSystemType.addprevious(energy_use_el)
doc2 = scrub_hpxml_doc(doc)
assert len(doc2.xpath('//h:AnnualEnergyUse', namespaces={'h': hpxml.nsmap[None]})) == 0
@pytest.mark.parametrize('hpxml_filebase', both_hescore_min)
def test_remove_utility(hpxml_filebase):
doc, E = get_example_xml_tree_elementmaker(hpxml_filebase)
hpxml = doc.getroot()
hpxml.append(
E.Utility(
E.UtilitiesorFuelProviders(
E.UtilityFuelProvider(
E.SystemIdentifier(id='utility01')
)
)
)
)
doc2 = scrub_hpxml_doc(doc)
assert len(doc2.xpath('h:Utility', namespaces={'h': hpxml.nsmap[None]})) == 0
@pytest.mark.parametrize('hpxml_filebase', both_hescore_min)
def test_remove_consumption(hpxml_filebase):
doc, E = get_example_xml_tree_elementmaker(hpxml_filebase)
hpxml = doc.getroot()
hpxml.append(
E.Consumption(
E.BuildingID(id='bldg1'),
E.CustomerID(id='customer1'),
E.ConsumptionDetails(
E.ConsumptionInfo(
E.UtilityID(
id='utility01'
),
E.ConsumptionType(
E.Energy(
E.FuelType('electricity'),
E.UnitofMeasure('kWh')
)
),
E.ConsumptionDetail(
E.Consumption('1.0')
)
)
)
)
)
doc2 = scrub_hpxml_doc(doc)
assert len(doc2.xpath('h:Consumption', namespaces={'h': hpxml.nsmap[None]})) == 0
@pytest.mark.parametrize('hpxml_filebase', both_hescore_min)
def test_remove_building_customerid(hpxml_filebase):
doc, E = get_example_xml_tree_elementmaker(hpxml_filebase)
hpxml = doc.getroot()
hpxml.Building.BuildingID.addnext(
E.CustomerID(
E.SendingSystemIdentifierType('asdf'),
E.SendingSystemIdentifierValue('jkl')
)
)
doc2 = scrub_hpxml_doc(doc)
assert len(doc2.xpath('h:Building/h:CustomerID', namespaces={'h': hpxml.nsmap[None]})) == 0
def test_cli_scrubbed():
root_dir = pathlib.Path(__file__).resolve().parent.parent
xml_file_path = root_dir / 'examples' / 'hescore_min_v3.xml'
schema_path = pathlib.Path(root_dir, 'hescorehpxml', 'schemas', 'hpxml-3.0.0', 'HPXML.xsd')
schema_doc = etree.parse(str(schema_path))
schema = etree.XMLSchema(schema_doc.getroot())
parser = etree.XMLParser(schema=schema)
with tempfile.TemporaryDirectory() as tmpdir:
# Export a scrubbed hpxml
outfile = pathlib.Path(tmpdir, 'out.xml')
main([str(xml_file_path), '--scrubbed-hpxml', str(outfile)])
# Ensure it validates
etree.parse(str(outfile), parser)
# Remove a required element
tree = etree.parse(str(xml_file_path), parser)
root = tree.getroot()
ns = {'h': 'http://hpxmlonline.com/2019/10'}
el = root.xpath('//h:YearBuilt', namespaces=ns)[0]
el.getparent().remove(el)
infile2 = pathlib.Path(tmpdir, 'in2.xml')
outfile2 = pathlib.Path(tmpdir, 'out2.xml')
tree.write(str(infile2))
# Export a scrubbed hpxml, the translation will fail
with pytest.raises(SystemExit):
main([str(infile2), '--scrubbed-hpxml', str(outfile2)])
etree.parse(str(outfile2), parser)
# Add an invalid element so schema validation fails
tree = etree.parse(str(xml_file_path), parser)
root = tree.getroot()
etree.SubElement(root, 'boguselement')
infile3 = pathlib.Path(tmpdir, 'in3.xml')
outfile3 = pathlib.Path(tmpdir, 'out3.xml')
tree.write(str(infile3))
# Run export. Schema validation will fail, no file created.
with pytest.raises(SystemExit):
main([str(infile3), '--scrubbed-hpxml', str(outfile3)])
assert not outfile3.exists()
|
11498127
|
from flask import Blueprint, Response, redirect, render_template, request
from app import database
from app.config import BASE_URL, GITHUB_URL, SPOTIFY__LOGIN
from app.utils import generate_token, get_user_info
auth = Blueprint("auth", __name__, template_folder="templates")
@auth.route("/login")
def login():
return redirect(SPOTIFY__LOGIN)
@auth.route("/callback")
def callback():
# Get the code.
code = request.args.get("code")
# Handle if code does not exist.
if not code:
return Response("No code found!")
# Get the access token.
try:
token = generate_token(code)
access_token = token["access_token"]
user_id = get_user_info(access_token)["id"]
except KeyError:
return Response("Invalid Auth workflow! Please login correctly.")
# Save the user in the database.
database.child("users").child(user_id).set(token)
# Render the callback template.
return render_template(
"callback.html",
id=user_id,
base_url=BASE_URL,
github_url=GITHUB_URL
)
|
11498140
|
import numpy as np
from PIL import Image
import tensorflow as tf
class ImageClassifier(object):
def __init__(self, FLAGS):
self.FLAGS = FLAGS
# Creates graph from saved GraphDef.
self.create_graph()
# Creates node id --> English label lookup.
self.label_map = self.create_label_map()
def run_inference_on_image(self, image):
"""
Runs image recognition on a .jpg image. The image is received in a
Flask http request, converted to a numpy array, reshaped to the required
input shape for the Tensorflow MobileNet model, and run through the model.
The output of the model is an array of prediction confidences for each
class the model was trained against. The top K predictions are converted
to human-readable labels with their prediction scores.
"""
image_data = self.get_image_data(image)
with tf.Session() as sess:
output_tensor = sess.graph.get_tensor_by_name('MobilenetV2/Predictions/Reshape_1:0')
predictions = sess.run(output_tensor, {'input:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-self.FLAGS.num_top_predictions:][::-1]
results = []
for node_id in top_k:
prediction = {
'label': self.label_map[node_id],
'score': predictions[node_id].astype(float),
}
results.append(prediction)
return results
def get_image_data(self, image):
"""
Converts an image from a Flask http request to a numpy array in the
shape used by the Tensorflow MobileNet model.
"""
img = np.array(Image.open(image).resize((224,224))).astype(np.float) / 128 - 1
return img.reshape(1,224,224,3)
def create_graph(self):
"""
Creates a graph from frozen GraphDef file.
"""
with tf.gfile.FastGFile(self.FLAGS.model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def create_label_map(self):
"""
Create a dict mapping label id to human readable string.
Returns:
labels_to_names: dictionary where keys are integers from to 1000
and values are human-readable names.
We retrieve a synset file, which contains a list of valid synset labels used
by ILSVRC competition. There is one synset one per line, eg.
# n01440764
# n01443537
We also retrieve a synset_to_human_file, which contains a mapping from synsets
to human-readable names for every synset in Imagenet. These are stored in a
tsv format, as follows:
# n02119247 black fox
# n02119359 silver fox
We assign each synset (in alphabetical order) an integer, starting from 1
(since 0 is reserved for the background class).
Code is based on
https://github.com/tensorflow/models/blob/master/research/inception/inception/data/build_imagenet_data.py#L463
"""
synset_list = [s.strip() for s in open(self.FLAGS.label_path).readlines()]
synset_to_human_list = open(self.FLAGS.label_metadata_path).readlines()
synset_to_human = {}
for s in synset_to_human_list:
parts = s.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
label_index = 1
labels_to_names = {0: 'background'}
for synset in synset_list:
name = synset_to_human[synset]
labels_to_names[label_index] = name
label_index += 1
return labels_to_names
|
11498152
|
def msg_too_expensive_dim(method_name, dim):
return('%s method is too expensive to be '
'run for matrices with dimension '
'greater than %d.' % (method_name, dim))
|
11498166
|
import mock
import pytest
from dmaws.utils import (
DEFAULT_TEMPLATES_PATH,
merge_dicts,
template, template_string, LazyTemplateMapping,
mkdir_p,
)
from jinja2 import UndefinedError
class TestMergeDicts(object):
def test_simple_dicts(self):
assert merge_dicts({"a": 1}, {"b": 2}) == {"a": 1, "b": 2}
def test_overwriting_key(self):
assert merge_dicts({"a": 1}, {"a": 2}) == {"a": 2}
def test_overwriting_list_key(self):
assert merge_dicts({"a": [1]}, {"a": [2]}) == {"a": [2]}
def test_no_overwriting_dict_keys(self):
with pytest.raises(ValueError):
assert merge_dicts({"a": {"a": 1}}, {"a": 2}) == {"a": 2}
def test_replacing_keys_with_dicts(self):
assert merge_dicts({"a": 2}, {"a": {"a": 1}}) == {"a": {"a": 1}}
def test_dict_keys_are_merged(self):
assert(
merge_dicts({"a": {"a": 1}}, {"a": {"b": 2}})
== {"a": {"a": 1, "b": 2}}
)
def test_nested_dicts(self):
assert(
merge_dicts({"a": {"a": 1, "b": 1}, "b": {"a": 1, "b": 1}},
{"a": {"a": 2}, "b": {"b": 2}})
== {"a": {"a": 2, "b": 1}, "b": {"a": 1, "b": 2}}
)
class TestTemplateString(object):
def test_simple_string(self):
assert template_string("string", {}) == "string"
def test_template_string(self):
assert template_string("{{ var }} string", {"var": "a"}) == "a string"
def test_template_dot_accessor(self):
assert(
template_string("{{ var.name }} string", {"var": {"name": "a"}})
== "a string"
)
def test_missing_variable(self):
with pytest.raises(UndefinedError):
assert template_string("{{ var }} string", {}) == "a string"
def test_template_loader(self):
assert template_string('{% extends "base.j2" %}', {},
templates_path="tests/templates/")
@mock.patch('dmaws.utils.jinja2.FileSystemLoader')
def test_template_loader_default_path(self, jinja_loader):
template_string('string', {})
jinja_loader.assert_called_with(DEFAULT_TEMPLATES_PATH)
class TestTemplate(object):
def test_template_string(self):
assert template("{{ var }} string", {"var": "a"}) == "a string"
def test_template_list(self):
assert(
template(["{{ avar }}", "{{ bvar }}"], {"avar": "a", "bvar": "b"})
== ["a", "b"]
)
def test_template_dict(self):
assert(
template({"a": "{{ avar }}", "b": "{{ bvar }}"},
{"avar": "a", "bvar": "b"})
== {"a": "a", "b": "b"}
)
def test_template_nested_list(self):
assert(
template([["{{ avar }}"], "{{ bvar }}"],
{"avar": "a", "bvar": "b"})
== [["a"], "b"]
)
def test_template_nested_dict(self):
assert(
template({"a": {"a": "{{ avar }}"}, "b": "{{ bvar }}"},
{"avar": "a", "bvar": "b"})
== {"a": {"a": "a"}, "b": "b"}
)
@mock.patch('dmaws.utils.jinja2.FileSystemLoader')
def test_template_loader_default_path(self, jinja_loader):
template('string', {})
jinja_loader.assert_called_with(DEFAULT_TEMPLATES_PATH)
class TestLazyTemplateMapping(object):
def test_not_templated_on_init(self):
assert LazyTemplateMapping({"key": "{{ var }}"}, {})
def test_single_key(self):
mapping = LazyTemplateMapping({"key": "test", "err": "{{ var }}"},
{"var": "a"})
assert mapping["key"] == "test"
def test_kwargs_shadow_variables(self):
mapping = LazyTemplateMapping({"a": "{{ var }}", "b": "{{ var }}"},
{"var": "var"}, var="kwarg")
assert mapping["a"] == "kwarg"
def test_missing_key_error(self):
mapping = LazyTemplateMapping({"key": "{{ var }}"}, {})
with pytest.raises(KeyError):
mapping['missing']
def test_keys(self):
mapping = LazyTemplateMapping({"a": "{{ var }}", "b": "{{ var }}"}, {})
assert set(mapping.keys()) == set(["a", "b"])
def test_items(self):
mapping = LazyTemplateMapping({"a": "{{ var }}a", "b": "{{ var }}b"},
{"var": "a"})
assert set(mapping.items()) == set([("a", "aa"), ("b", "ab")])
class TestMkdirP(object):
def test_directories_created(self, makedirs):
mkdir_p('path/to/create')
makedirs.assert_called_with('path/to/create')
@pytest.mark.parametrize("path_exists", [
True, False
])
def test_directories_created_if_they_exist(self, makedirs, isdir, path_exists):
makedirs.side_effect = OSError()
isdir.return_value = path_exists
if path_exists:
mkdir_p('path/to/create')
makedirs.assert_called_with('path/to/create')
else:
with pytest.raises(OSError):
mkdir_p('path/to/create')
|
11498221
|
from twisted.trial import unittest
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet import reactor
from ooni.utils.socks import TrueHeadersSOCKS5Agent
class TestSocks(unittest.TestCase):
def test_create_agent(self):
proxyEndpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', 9050)
agent = TrueHeadersSOCKS5Agent(reactor, proxyEndpoint=proxyEndpoint)
|
11498259
|
import os
import argparse
import enum
import tarfile
import abc
def get_pretrained_model(destination):
"""
Obtains a ready to use style_transfer model file.
Args:
destination: path to where the file should be stored
"""
url = "https://storage.googleapis.com/download.magenta.tensorflow.org/models/ \
arbitrary_style_transfer.tar.gz"
os.system("curl -o arbitrary_style_transfer.tar.gz {0}".format(url))
with tarfile.open("arbitrary_style_transfer.tar.gz") as tar:
if not os.path.exists(destination):
os.makedirs(destination)
tar.extractall(destination)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prepare pre-trained model for style transfer model')
parser.add_argument('--model_path', type=str, default='./model', help='directory to put models, default is ./model')
args = parser.parse_args()
model_path = args.model_path
try:
get_pretrained_model(model_path)
except AttributeError:
print("The model fetched failed.")
|
11498291
|
import bisect
class PriorityQueue:
def __init__(self):
self.queue = []
def append(self,data,priority):
"""append a new element to the queue according to its priority"""
bisect.insort(self.queue,(priority,data))
def pop(self,n):
"""pop the hightest element of the queue. The n argument is
here to follow the standard queue protocol """
return self.queue.pop(0)[1]
#sample
a=PriorityQueue()
a.append('L',5)
a.append('E',4)
a.append('L',5)
a.append('O',8)
a.append('H',1)
for i in range(5):
print a.pop(0),
print
|
11498328
|
from emailrep import EmailRep
import argparse
import json
class bcolors:
OKGREEN = "\033[92m"
FAIL = "\033[91m"
BOLD = "\033[1m"
ENDC = "\033[0m"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--email", type=str, required=True, help="Email")
return parser.parse_args()
def main():
args = parse_args()
mail = args.email
with open("./keys.json", "r") as configFile:
conf = json.loads(configFile.read())
if conf[0]["EmailRep.io API Key"] == "":
print(f"{bcolors.FAIL}[-] Enter the API key in the keys.json file to use this feature!{bcolors.ENDC}")
exit()
emailrep = EmailRep(conf[0]["EmailRep.io API Key"])
result = emailrep.query(mail)
print(f"|-- blacklisted: {bcolors.BOLD}" + str(result["details"]["blacklisted"]) + f"{bcolors.ENDC}")
print(f"|-- malicious_activity: {bcolors.BOLD}" + str(result["details"]["malicious_activity"]) + f"{bcolors.ENDC}")
if result["details"]["credentials_leaked"]:
print(f"|-- credentials_leaked: {bcolors.OKGREEN}" + str(result["details"]["credentials_leaked"]) + f"{bcolors.ENDC}")
else:
print(f"|-- credentials_leaked: {bcolors.BOLD}" + str(result["details"]["credentials_leaked"]) + f"{bcolors.ENDC}")
if result["details"]["data_breach"]:
print(f"|-- data_breach: {bcolors.OKGREEN}" + str(result["details"]["data_breach"]) + f"{bcolors.ENDC}")
else:
print(f"|-- data_breach: {bcolors.BOLD}" + str(result["details"]["data_breach"]) + f"{bcolors.ENDC}")
print(f"|-- domain_exists: {bcolors.BOLD}" + str(result["details"]["domain_exists"]) + f"{bcolors.ENDC}")
print(f"|-- new_domain: {bcolors.BOLD}" + str(result["details"]["new_domain"]) + f"{bcolors.ENDC}")
print(f"|-- free_provider: {bcolors.BOLD}" + str(result["details"]["free_provider"]) + f"{bcolors.ENDC}")
print(f"|-- valid_mx: {bcolors.BOLD}" + str(result["details"]["valid_mx"]) + f"{bcolors.ENDC}")
print(f"|-- spoofable: {bcolors.BOLD}" + str(result["details"]["spoofable"]) + f"{bcolors.ENDC}")
print(f"|-- spam: {bcolors.BOLD}" + str(result["details"]["spam"]) + f"{bcolors.ENDC}")
main()
|
11498399
|
from __future__ import annotations
from reamber.base.Property import list_props
from reamber.base.lists.notes.HitList import HitList
from reamber.sm.SMMine import SMMine
from reamber.sm.lists.notes.SMNoteList import SMNoteList
@list_props(SMMine)
class SMMineList(HitList[SMMine], SMNoteList[SMMine]):
...
|
11498419
|
class ElectricalSetting(Element,IDisposable):
""" The ElectricalSetting class represents an instance of element of electrical settings. """
def AddDistributionSysType(self,name,phase,phaseConfig,numWire,volLineToLine,volLineToGround):
"""
AddDistributionSysType(self: ElectricalSetting,name: str,phase: ElectricalPhase,phaseConfig: ElectricalPhaseConfiguration,numWire: int,volLineToLine: VoltageType,volLineToGround: VoltageType) -> DistributionSysType
Add a new distribution system type to project.
name: The name of new added distribution system type
phase: Single or three phase this type is
phaseConfig: Configuration property of given phase
numWire: Wire number of this distribution system
volLineToLine: Type of line to line voltage in this system
volLineToGround: Type of line to ground voltage in this system
Returns: New added distribution system type object.
"""
pass
def AddVoltageType(self,name,actualValue,minValue,maxValue):
"""
AddVoltageType(self: ElectricalSetting,name: str,actualValue: float,minValue: float,maxValue: float) -> VoltageType
Add a new type definition of voltage into project.
name: Specify voltage type name
actualValue: Specify actual value of voltage type.
minValue: Specify acceptable minimum value of the voltage type.
maxValue: Specify acceptable maximum value of the voltage type.
Returns: New added voltage type object.
"""
pass
def AddWireMaterialType(self,name,baseMaterial):
"""
AddWireMaterialType(self: ElectricalSetting,name: str,baseMaterial: WireMaterialType) -> WireMaterialType
Add a new type of wire material.
name: Name of new material type.
baseMaterial: Specify an existing material type which New material will be constructed based
on.
Returns: New added wire material type object.
"""
pass
def AddWireType(self,name,materialType,temperatureRating,insulation,maxSize,neutralMultiplier,neutralRequired,neutralMode,conduit):
"""
AddWireType(self: ElectricalSetting,name: str,materialType: WireMaterialType,temperatureRating: TemperatureRatingType,insulation: InsulationType,maxSize: WireSize,neutralMultiplier: float,neutralRequired: bool,neutralMode: NeutralMode,conduit: WireConduitType) -> WireType
Add a new wire type to project.
name: Name of the new wire type.
materialType: Wire material of new wire type.
temperatureRating: Temperature rating type information of new wire type.
insulation: Insulation of new wire type.
maxSize: Max wire size of new wire type.
neutralMultiplier: Neutral multiplier of new wire type.
neutralRequired: Specify whether neutral point is required.
neutralMode: Specify neutral mode.
conduit: Conduit type of new wire type.
Returns: New added wire type object.
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
@staticmethod
def GetElectricalSettings(document):
"""
GetElectricalSettings(document: Document) -> ElectricalSetting
Get the electrical settings of the project.
document: The document.
Returns: The electrical settings of the project.
"""
pass
def GetSpecificFittingAngles(self):
"""
GetSpecificFittingAngles(self: ElectricalSetting) -> IList[float]
Gets the list of specific fitting angles.
Returns: Angles (in degrees).
"""
pass
def GetSpecificFittingAngleStatus(self,angle):
"""
GetSpecificFittingAngleStatus(self: ElectricalSetting,angle: float) -> bool
Gets the status of given specific fitting angle.
angle: The specific fitting angle (in degree) that must be one of 90,60,45,30,22.5
or 11.25 degrees.
"""
pass
def IsValidSpecificFittingAngle(self,angle):
"""
IsValidSpecificFittingAngle(self: ElectricalSetting,angle: float) -> bool
Checks that the given value is a valid specific fitting angle. The specific
fitting angles are angles of 90,60,45,30,22.5 or 11.25 degrees.
angle: The angle value (in degree).
Returns: True if the given value is a valid specific fitting angle.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def RemoveDistributionSysType(self,distributionSysType):
"""
RemoveDistributionSysType(self: ElectricalSetting,distributionSysType: DistributionSysType)
Remove an existing distribution system type from the project.
"""
pass
def RemoveVoltageType(self,voltageType):
"""
RemoveVoltageType(self: ElectricalSetting,voltageType: VoltageType)
Remove the voltage type from project.
voltageType: Specify the voltage type to be removed.
"""
pass
def RemoveWireMaterialType(self,materialType):
"""
RemoveWireMaterialType(self: ElectricalSetting,materialType: WireMaterialType)
Remove the wire material type from project.
materialType: The wire material type to be removed.
"""
pass
def RemoveWireType(self,wireType):
"""
RemoveWireType(self: ElectricalSetting,wireType: WireType)
Remove wire type definition from project.
"""
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetSpecificFittingAngleStatus(self,angle,bStatus):
"""
SetSpecificFittingAngleStatus(self: ElectricalSetting,angle: float,bStatus: bool)
Sets the status of given specific angle.
angle: The specific angle (in degree) that must be 60,45,30,22.5 or 11.25 degrees.
bStatus: Status,true - using the given angle during the pipe layout.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
CircuitLoadCalculationMethod=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The method to calculate circuit load
Get: CircuitLoadCalculationMethod(self: ElectricalSetting) -> CircuitLoadCalculationMethod
Set: CircuitLoadCalculationMethod(self: ElectricalSetting)=value
"""
CircuitNamePhaseA=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Circuit Naming by Phase - Phase A Label.
Get: CircuitNamePhaseA(self: ElectricalSetting) -> str
Set: CircuitNamePhaseA(self: ElectricalSetting)=value
"""
CircuitNamePhaseB=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Circuit Naming by Phase - Phase B Label.
Get: CircuitNamePhaseB(self: ElectricalSetting) -> str
Set: CircuitNamePhaseB(self: ElectricalSetting)=value
"""
CircuitNamePhaseC=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Circuit Naming by Phase - Phase C Label.
Get: CircuitNamePhaseC(self: ElectricalSetting) -> str
Set: CircuitNamePhaseC(self: ElectricalSetting)=value
"""
CircuitRating=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The default circuit rating for newly created circuit.
Get: CircuitRating(self: ElectricalSetting) -> float
Set: CircuitRating(self: ElectricalSetting)=value
"""
CircuitSequence=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sequence in which power circuits are created.
Get: CircuitSequence(self: ElectricalSetting) -> CircuitSequence
Set: CircuitSequence(self: ElectricalSetting)=value
"""
DistributionSysTypes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get all distribution system types of the project.
Get: DistributionSysTypes(self: ElectricalSetting) -> DistributionSysTypeSet
"""
VoltageTypes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get all voltage type definitions information of the project.
Get: VoltageTypes(self: ElectricalSetting) -> VoltageTypeSet
"""
WireConduitTypes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get electrical conduit types information of the project.
Get: WireConduitTypes(self: ElectricalSetting) -> WireConduitTypeSet
"""
WireMaterialTypes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get electrical wire material types information of the project.
Get: WireMaterialTypes(self: ElectricalSetting) -> WireMaterialTypeSet
"""
WireTypes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get all wire type definition information of the project.
Get: WireTypes(self: ElectricalSetting) -> WireTypeSet
"""
|
11498420
|
from inkplate6_PLUS import Inkplate
from image import *
import time
display = Inkplate(Inkplate.INKPLATE_1BIT)
#main function used by micropython
if __name__ == "__main__":
display.begin()
display.tsInit(1)
display.drawRect(450, 350, 100, 100, display.BLACK)
display.display()
counter = 0
while True:
#touch the square
if(display.touchInArea(450, 350, 100, 100)):
counter += 1
print(counter)
|
11498447
|
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from arch.model_cityscapes import SqueezeNASNetCityscapesHyperparameters, ASPP_Lite, ASPP, Conv_BN_ReLU
from search.arch_search import SuperNetwork
from search.model_search import SuperNetworkSqueezeNASNet
class SuperNetworkSqueezeNASNetCityscapes(SuperNetwork):
def __init__(self, hyperparams: SqueezeNASNetCityscapesHyperparameters, cost_loss_multiplier: float, lr_aspp=True):
super().__init__()
self.hyperparams = hyperparams
self.lr_aspp = lr_aspp
self.cost_loss_multiplier = cost_loss_multiplier
self.encoder = SuperNetworkSqueezeNASNet(
hyperparams=hyperparams.to_ds_mobile_net_hyperparameters(last_channels=None, num_classes=None))
self.criterion = CrossEntropyLoss(ignore_index=255)
mid_ch = hyperparams.mid_channels
low_level_channels = None
count = 0
for block in hyperparams.blocks:
count += block.num_repeat
if count > self.hyperparams.skip_output_block_index:
low_level_channels = block.num_channels
break
assert low_level_channels is not None
if hyperparams.last_channels:
last_channels = hyperparams.last_channels
else:
last_channels = hyperparams.blocks[-1].num_channels
if self.lr_aspp:
self.decoder = ASPP_Lite(os16_channels=last_channels, os8_channels=low_level_channels,
mid_channels=mid_ch, num_classes=hyperparams.num_classes)
else:
self.decoder = ASPP(in_ch=last_channels, mid_ch=mid_ch, out_ch=mid_ch, groups=(mid_ch,) * 3)
self.lowlevel1x1 = nn.Conv2d(low_level_channels, low_level_channels, 1)
self.logits2 = Conv_BN_ReLU(mid_ch + low_level_channels, mid_ch + low_level_channels, 3,
groups=mid_ch + low_level_channels, padding=1)
self.logits3 = nn.Conv2d(low_level_channels + mid_ch, hyperparams.num_classes, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, normalized_rgb, gt=None, rgb=None, fname=None):
encoder_outputs = self.encoder(normalized_rgb)
residuals_outputs = encoder_outputs['residuals_outputs']
cur_feat = encoder_outputs['output']
cost = encoder_outputs['cost']
b, c, h, w = cur_feat.shape
assert w >= h, cur_feat.shape
low_level_feat = residuals_outputs[self.hyperparams.skip_output_block_index]
if self.lr_aspp:
logits = self.decoder(cur_feat, low_level_feat)
logits = F.interpolate(logits, scale_factor=8, mode='bilinear', align_corners=True)
else:
logits = self.decoder(cur_feat)
logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=True)
low_level_feat = self.lowlevel1x1(low_level_feat)
logits = torch.cat((logits, low_level_feat), dim=1)
logits = self.logits2(logits)
logits = self.logits3(logits)
logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=True)
if gt is None:
return {'preds': logits, 'cost': cost}
logits = logits.float()
resource_cost_loss = torch.mean(self.cost_loss_multiplier * cost)
problem_loss = self.criterion(logits, gt)
loss = resource_cost_loss + problem_loss
return {'loss': loss, 'preds': logits, 'cost': cost, 'problem_loss': problem_loss,
'resource_cost_loss': resource_cost_loss}
|
11498502
|
def main():
import sys
from poseidon_cli.cli import PoseidonShell
p_shell = PoseidonShell()
if '-c' in sys.argv:
while sys.argv.pop(0) != '-c':
pass
p_shell.onecmd(' '.join(sys.argv))
else:
p_shell.cmdloop()
|
11498503
|
import os,sys
# change the path accoring to the test folder in system
#sys.path.append('/home/ubuntu/setup/src/fogflow/test/UnitTest/v1')
from datetime import datetime
import copy
import json
import requests
import time
import pytest
import data_ngsi10
import sys
# change it by broker ip and port
brokerIp="http://localhost:8070"
print("Testing of v1 API")
# testCase 1
'''
To test subscription request
'''
def test_getSubscription1():
url=brokerIp+"/ngsi10/subscribeContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata1),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
assert r.status_code == 200
#testCase 2
'''
To test entity creation with attributes, then susbscribing and get subscription using ID
'''
def test_getSubscription2():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r = requests.post(url,data=json.dumps(data_ngsi10.subdata2),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata3),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#get request to fetch subscription
get_url=brokerIp+"/ngsi10/subscription/"
url=get_url+sid
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
resp=resp['entities']
sid2=resp[0]["id"]
if "Result1"==sid2:
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 3
'''
To test entity creation with one attribute : pressure only followed by subscribing and get using ID
'''
def test_getSubscription3():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r = requests.post(url,data=json.dumps(data_ngsi10.subdata4),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata5),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#get request to fetch subscription
get_url=brokerIp+"/ngsi10/subscription/"
url=get_url+sid
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
resp=resp['entities']
sid2=resp[0]["id"]
if "Result2"==sid2:
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 4
'''
To test entity creation with one attribute : Temperature only followed by subscription and get using ID
'''
def test_getSubscription4():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r = requests.post(url,data=json.dumps(data_ngsi10.subdata6),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata7),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#get request to fetch subscription
get_url=brokerIp+"/ngsi10/subscription/"
url=get_url+sid
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
resp=resp['entities']
sid2=resp[0]["id"]
if "Result3"==sid2:
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 5
'''
To test create entity without passing Domain data followed by subscription and get using ID
'''
def test_getSubscription5():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r = requests.post(url,data=json.dumps(data_ngsi10.subdata8),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata9),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#get request to fetch subscription
get_url=brokerIp+"/ngsi10/subscription/"
url=get_url+sid
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
resp=resp['entities']
sid2=resp[0]["id"]
if "Result4"==sid2:
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 6
'''
To test create entity without attributes followed by subscription and get using Id
'''
def test_getSubscription6():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r = requests.post(url,data=json.dumps(data_ngsi10.subdata10),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata11),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#get request to fetch subscription
get_url=brokerIp+"/ngsi10/subscription/"
url=get_url+sid
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
resp=resp['entities']
sid2=resp[0]["id"]
if "Result5"==sid2:
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 7
'''
To test create entity without attributes and Metadata and followed by sbscription and get using Id
'''
def test_getSubscription7():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r = requests.post(url,data=json.dumps(data_ngsi10.subdata12),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#print(r.status_code)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata13),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#get request to fetch subscription
get_url=brokerIp+"/ngsi10/subscription/"
url=get_url+sid
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
resp=resp['entities']
sid2=resp[0]["id"]
if "Result6"==sid2:
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 8
'''
To test create entity without entity type followed by subscription and get using Id
'''
def test_getSubscription8():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r = requests.post(url,data=json.dumps(data_ngsi10.subdata14),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata15),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#get request to fetch subscription
get_url=brokerIp+"/ngsi10/subscription/"
url=get_url+sid
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
resp=resp['entities']
sid2=resp[0]["id"]
if "Result7"==sid2:
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 9
'''
To test get subscription request by first posting subscription request followed by delete request
'''
def test_getSubscription9():
#create an entity
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata16),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#subscribing
url_del=brokerIp+"/ngsi10/subscription/"
url=url_del+sid
r = requests.delete(url,headers=headers)
#print(r.status_code)
#get request to fetch subscription
get_url=brokerIp+"/ngsi10/subscription"
url=get_url+sid
r=requests.get(url)
print("Subscription with sid-"+sid+" not found")
assert r.status_code == 404
#testCase 10
'''
To test the update post request to create entity
'''
def test_getSubscription10():
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata17),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
#print(r.status_code)
assert r.status_code == 200
#testCase 11
'''
To test subscription with attributes and using ID to validate it
'''
def test_getSubscription11():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata18),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata19),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#update the created entity
url=brokerIp+"/ngsi10/updateContext"
r=requests.post(url,data=json.dumps(data_ngsi10.subdata20),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#validate via accumulator
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : sid})
print(r.content)
assert r.status_code == 200
#testCase 12
'''
To test subscription for its if and else part : 1) for Destination Header
'''
'''def test_getSubscription12():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata21),headers=headers)
resp_content=r.content
resInJson=resp_content.decode('utf8').replace("'",'"')
resp=json.loads(resInJson)
#print(resp)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json','Destination' : 'orion-broker'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata22),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#update the created entity
url=brokerIp+"/ngsi10/updateContext"
r=requests.post(url,data=json.dumps(data_ngsi10.subdata23),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#validate via accumulator
url="http://1172.16.58.3:1026/v2/entities/"
#r=requests.post(url,json={"subscriptionId" : sid})
r=requests.get(url)
print(r.content)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#if resp[0]["id"]=="Result11" and resp[0]["type"]=="Result11":
#print("\nValidated")
#else:
#print("\nNot Validated")
assert r.status_code == 200
'''
#testCase 13
'''
To test subscription for its if and else part : 2) for User - Agent Header
'''
def test_getSubscription18():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata24),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json','User-Agent' : 'lightweight-iot-broker'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata25),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#update created entity
url=brokerIp+"/ngsi10/updateContext"
r=requests.post(url,data=json.dumps(data_ngsi10.subdata26),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#validate via accumulator
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : sid})
print(r.content)
assert r.status_code == 200
#testCase 14
'''
To test subcription for its if else part : 3) Require-Reliability Header
'''
def test_getSubscription19():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata27),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json','Require-Reliability' : 'true'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata28),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#update the created entity
url=brokerIp+"/ngsi10/updateContext"
r=requests.post(url,data=json.dumps(data_ngsi10.subdata29),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#validate via accumulator
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : sid})
print(r.content)
assert r.status_code == 200
#testCase 15
'''
To test subscription with two headers simultaneously : 4) Destination and User-Agent
'''
def test_getSubscription20():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata30),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json','Destination' : 'orion-broker','User-Agent':'lightweight-iot-broker'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata31),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#update created entity
url=brokerIp+"/ngsi10/updateContext"
r=requests.post(url,data=json.dumps(data_ngsi10.subdata32),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#validate via accumulator
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : sid})
#print(r.content)
if r.content=="Not validated":
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 16
'''
To test subscription with two headers simultaneously : 4) User-Agent and Require-Reliability
'''
def test_getSubscription21():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata33),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json','User-Agent':'lightweight-iot-broker','Require-Reliability':'true'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata34),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#update created entity
url=brokerIp+"/ngsi10/updateContext"
r=requests.post(url,data=json.dumps(data_ngsi10.subdata35),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#validate via accumulator
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : sid})
print(r.content)
assert r.status_code == 200
#testCase 17
'''
To test subscription with two headers simultaneously : 4) Destination and Require-Reliability headers
'''
def test_getSubscription22():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata36),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json','Destination':'orion-broker','Require-Reliability':'true'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata37),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#update created entity
url=brokerIp+"/ngsi10/updateContext"
r=requests.post(url,data=json.dumps(data_ngsi10.subdata38),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#validate via accumulator
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : sid})
#print(r.content)
if r.content == "Not validated":
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 18
'''
To test subscription with all headers simultaneously : 5) Destination, User-Agent and Require-Reliability headers
'''
def test_getSubscription23():
#create an entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata39),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#subscribing
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json','Destination':'orion-broker','User-Agent':'lightweight-iot-broker','Require-Reliability':'true'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata40),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#update created entity
url=brokerIp+"/ngsi10/updateContext"
r=requests.post(url,data=json.dumps(data_ngsi10.subdata41),headers=headers)
resp_content1=r.content
resInJson=resp_content1.decode('utf8').replace("'",'"')
resp1=json.loads(resInJson)
#print(resp1)
#validate via accumulator
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : sid})
#print(r.content)
if r.content == "Not validated":
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 19
'''
To test for get subscripton requests
'''
def test_getsubscription24():
url=brokerIp+"/ngsi10/subscription"
r=requests.get(url)
assert r.status_code == 200
#testCase 20
'''
To test for get all entities
'''
def test_getallentities():
url=brokerIp+"/ngsi10/entity"
r=requests.get(url)
assert r.status_code == 200
#testCase 21
'''
To test for query request using Id
'''
def test_queryrequest1():
url=brokerIp+"/ngsi10/queryContext"
headers={'Content-Type':'appliction/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata42),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 22
'''
To test for query request using type
'''
def test_queryrequest2():
url=brokerIp+"/ngsi10/queryContext"
headers={'Content-Type':'appliction/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata43),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 23
'''
To test for query request using geo-scope(polygon)
'''
def test_queryrequest3():
url=brokerIp+"/ngsi10/queryContext"
headers={'Content-Type':'appliction/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata44),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 24
'''
To test for query request multiple filter
'''
def test_queryrequest4():
url=brokerIp+"/ngsi10/queryContext"
headers={'Content-Type':'appliction/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata45),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 25
'''
To test if wrong payload is decoded or not
'''
def test_case25():
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata46),headers=headers)
#print(r.status_code)
assert r.status_code == 200
#testCase26
'''
To test the response on passing DELETE in updateAction in payload
'''
def test_case26():
#create v1 entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json','User-Agent':'lightweight-iot-broker'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata47),headers=headers)
#print(r.content)
#get the created entity
url=brokerIp+"/ngsi10/entity/Result047"
r=requests.get(url)
#print(r.content)
#passing DELETE in update payload
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json','User-Agent':'lightweight-iot-broker'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata48),headers=headers)
#print(r.content)
#get the created entity
url=brokerIp+"/ngsi10/entity/Result047"
r=requests.get(url)
#print(r.content)
assert r.status_code == 404
#testCase 27
'''
To test the entity creation with empty payload
'''
def test_case27():
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata49),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 28
'''
To test the subscription with empty payload
'''
def test_case28():
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata49),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
assert r.status_code == 200
#testCase 29
'''
To get subscription of empty payload when subscribing
'''
def test_case29():
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata49),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#get request
get_url=brokerIp+"/ngsi10/subscription/"
url=get_url+sid
r=requests.get(url)
#print(r.content)
assert r.status_code == 200
#testCase 30
'''
To test the action of API on passing an attribute as a command in payload
'''
def test_cases30():
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json','User-Agent':'lightweight-iot-broker'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata50),headers=headers)
#print(r.content)
assert r.status_code == 500
#testCase 31
'''
To test the fiware header with updateAction equal to UPDATE
'''
def test_case31():
#create and register entity
url=brokerIp+"/NGSI9/registerContext"
headers={'Content-Type':'appliction/json','fiware-service':'openiot','fiware-servicepath':'/'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata51),headers=headers)
#print(r.content)
# maiing a updateContext request
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json','fiware-service':'openiot','fiware-servicepath':'/'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata57),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 32
'''
To test the fiware header with updateAction equal to APPEND
'''
def test_case32():
url=brokerIp+"/NGSI9/registerContext"
headers={'Content-Type':'appliction/json','fiware-service':'openiot','fiware-servicepath':'/'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata52),headers=headers)
#print(r.content)
# maiing a updateContext request
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json','fiware-service':'openiot','fiware-servicepath':'/'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata57),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 33
'''
To test the fiware header with updateAction equal to delete
'''
def test_case33():
#create v1 entity
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json','fiware-service':'Abc','fiware-servicepath':'pqr'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata53),headers=headers)
#print(r.content)
#get the created entity
url=brokerIp+"/ngsi10/entity/Result053"
r=requests.get(url)
#print(r.content)
#passing DELETE in update payload
url=brokerIp+"/ngsi10/updateContext"
headers={'Content-Type':'appliction/json','fiware-service':'Abc','fiware-servicepath':'pqr'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata54),headers=headers)
#print(r.content)
#get the created entity
url=brokerIp+"/ngsi10/entity/Result053"
r=requests.get(url)
#print(r.content)
assert r.status_code == 404
#testCase 34
'''
To test the notifyContext request
'''
def test_case34():
url=brokerIp+"/ngsi10/notifyContext"
headers={'Content-Type':'appliction/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata55),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 35
'''
To test unsubscribing feature
'''
def test_case35():
#create subscription
url=brokerIp+"/ngsi10/subscribeContext"
headers= {'Content-Type': 'application/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata56),headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
resp=resp['subscribeResponse']
sid=resp['subscriptionId']
#print(sid)
#unsubscribe Context
url=brokerIp+"/ngsi10/unsubscribeContext"
headers={'Content-Type': 'application/json'}
r=requests.post(url,json={"subscriptionId":sid,"originator":"POMN"},headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 36
'''
To test entity creation using other route
'''
def test_case36():
url=brokerIp+"/v1/updateContext"
headers={'Content-Type':'appliction/json'}
r=requests.post(url,data=json.dumps(data_ngsi10.subdata56),headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 37
'''
To test and fetch unique entity
'''
def test_case37():
url=brokerIp+"/ngsi10/entity/Result14"
r=requests.get(url)
#print(r.content)
assert r.status_code == 200
#testCase 38
'''
To test and fetch attribute specific to an entity
'''
def test_case38():
url=brokerIp+"/ngsi10/entity/Result14/pressure"
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(r.content)
name=resp['name']
type1=resp['type']
val=resp['value']
if name=='pressure' and type1=='float' and val==55:
print("\nValidated")
else:
print("\nNot Validated")
assert r.status_code == 200
|
11498519
|
import argparse
import json
import os
import shutil
from chainerui.utils.tempdir import tempdir
def convert_dict(conditions):
if isinstance(conditions, argparse.Namespace):
return vars(conditions)
return conditions
def save_args(conditions, out_path):
"""A util function to save experiment condition for job table.
Args:
conditions (:class:`argparse.Namespace` or dict): Experiment conditions
to show on a job table. Keys are show as table header and values
are show at a job row.
out_path (str): Output directory name to save conditions.
"""
args = convert_dict(conditions)
try:
os.makedirs(out_path)
except OSError:
pass
with tempdir(prefix='args', dir=out_path) as tempd:
path = os.path.join(tempd, 'args.json')
with open(path, 'w') as f:
json.dump(args, f, indent=4)
new_path = os.path.join(out_path, 'args')
shutil.move(path, new_path)
|
11498567
|
import numpy as np
def random_subset(iterator, k):
result = iterator[:k]
i = k
tmp_it = iterator[k:]
for item in tmp_it:
i = i + 1
s = int(np.random.random() * i)
if s < k:
result[s] = item
return result
def generate_imbalance(X, y, positive_label=1, ir=2):
mask = y == positive_label
seq = np.arange(y.shape[0])[mask]
k = float(sum(mask))/ir
idx = np.asarray(random_subset(seq, int(k)))
mask = ~mask
mask[idx] = True
return X[mask], y[mask]
|
11498667
|
import copy
import typing
from pathlib import Path
import click
import vpype as vp
# Load the default config
vp.CONFIG_MANAGER.load_config_file(str(Path(__file__).parent / "bundled_configs.toml"))
def invert_axis(document: vp.Document, invert_x: bool, invert_y: bool):
"""Inverts none, one or both axis of the document.
This applies a relative scale operation with factors of 1 or -1
on the two axis to all layers. The inversion happens relative to
the center of the bounds.
"""
bounds = document.bounds()
if not bounds:
return document
origin = (
0.5 * (bounds[0] + bounds[2]),
0.5 * (bounds[1] + bounds[3]),
)
document.translate(-origin[0], -origin[1])
document.scale(-1 if invert_x else 1, -1 if invert_y else 1)
document.translate(origin[0], origin[1])
return document
@click.command()
@click.argument("output", type=click.File("w"))
@click.option(
"-p",
"--profile",
nargs=1,
default=None,
type=str,
help="gcode writer profile from the vpype configuration file subsection 'gwrite'",
)
@vp.global_processor
def gwrite(document: vp.Document, output: typing.TextIO, profile: str):
"""
Write gcode or other ascii files for the vpype pipeline.
The output format can be customized by the user heavily to an extent that you can also output most known
non-gcode ascii text files.
"""
gwrite_config = vp.CONFIG_MANAGER.config["gwrite"]
# If no profile was provided, try to use a default
if not profile:
# Try to get the default profile from the config
if "default_profile" in gwrite_config:
profile = gwrite_config["default_profile"]
else:
raise click.BadParameter(
"no gwrite profile provided on the commandline and no default gwrite "
+ "profile configured in the vpype configuration. This can be done using "
+ 'the "default_default" key in the "gwrite" section'
)
# Check that the profile is actually there, we can be sure that the `gwrite`
# part exists as there are several default profiles.
if profile not in gwrite_config:
profiles = [p for p in gwrite_config.keys() if p != "default_profile"]
raise click.BadParameter(
"gwrite profile "
+ profile
+ " not found in vpype configuration. Available gwrite profiles: "
+ ", ".join(profiles)
)
# Read the config for the profile from the main vpype
config = gwrite_config[profile]
document_start = config.get("document_start", None)
document_end = config.get("document_end", None)
layer_start = config.get("layer_start", None)
layer_end = config.get("layer_end", None)
layer_join = config.get("layer_join", None)
line_start = config.get("line_start", None)
line_end = config.get("line_end", None)
line_join = config.get("line_join", None)
segment_first = config.get("segment_first", None)
segment = config.get("segment", None)
segment_last = config.get("segment_last", None)
unit = config.get("unit", "mm")
offset_x = config.get("offset_x", 0.0)
offset_y = config.get("offset_y", 0.0)
scale_x = config.get("scale_x", 1.0)
scale_y = config.get("scale_y", 1.0)
# transform the document according to the desired parameters
orig_document = document
document = copy.deepcopy(document) # do NOT affect the pipeline's document
unit_scale = vp.convert_length(unit)
document.scale(scale_x / unit_scale, scale_y / unit_scale)
document.translate(offset_x, offset_y)
invert_x = config.get("invert_x", False)
invert_y = config.get("invert_y", False)
# transform the document according to inversion parameters
if invert_x or invert_y:
document = invert_axis(document, invert_x, invert_y)
# process file
filename = output.name
if document_start is not None:
output.write(document_start.format(filename=filename))
last_x = 0
last_y = 0
xx = 0
yy = 0
lastlayer_index = len(document.layers.values()) - 1
for layer_index, layer_id in enumerate(document.layers):
layer = document.layers[layer_id]
if layer_start is not None:
output.write(
layer_start.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=layer_index,
index1=layer_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
lastlines_index = len(layer) - 1
for lines_index, line in enumerate(layer):
if line_start is not None:
output.write(
line_start.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=lines_index,
index1=lines_index + 1,
lines_index=lines_index,
lines_index1=lines_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
segment_last_index = len(line) - 1
for segment_index, seg in enumerate(line):
x = seg.real
y = seg.imag
dx = x - last_x
dy = y - last_y
idx = int(round(x - xx))
idy = int(round(y - yy))
xx += idx
yy += idy
if segment_first is not None and segment_index == 0:
seg_write = segment_first
elif segment_last is not None and segment_index == segment_last_index:
seg_write = segment_last
else:
seg_write = segment
if seg_write is not None:
output.write(
seg_write.format(
x=x,
y=y,
dx=dx,
dy=dy,
_x=-x,
_y=-y,
_dx=-dx,
_dy=-dy,
ix=xx,
iy=yy,
idx=idx,
idy=idy,
index=segment_index,
index1=segment_index + 1,
segment_index=segment_index,
segment_index1=segment_index + 1,
lines_index=lines_index,
lines_index1=lines_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
last_x = x
last_y = y
if line_end is not None:
output.write(
line_end.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=lines_index,
index1=lines_index + 1,
lines_index=lines_index,
lines_index1=lines_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
if line_join is not None and lines_index != lastlines_index:
output.write(
line_join.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=lines_index,
index1=lines_index + 1,
lines_index=lines_index,
lines_index1=lines_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
if layer_end is not None:
output.write(
layer_end.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=layer_index,
index1=layer_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
if layer_join is not None and layer_index != lastlayer_index:
output.write(
layer_join.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=layer_index,
index1=layer_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
if document_end is not None:
output.write(document_end.format(filename=filename))
output.flush()
output.close()
info = config.get("info", None)
if info:
print(info)
return orig_document
gwrite.help_group = "Output"
|
11498682
|
from amlearn.utils.data import read_lammps_dump
from amlearn.featurize.pipeline import FeaturizePipeline
from amlearn.featurize.nearest_neighbor import VoroNN, DistanceNN
from amlearn.featurize.short_range_order import \
DistanceInterstice, VolumeAreaInterstice
from amlearn.featurize.medium_range_order import MRO
__author__ = "<NAME>"
__email__ = "<EMAIL>"
"""
This is an example script of deriving interstice distribution features for
each atom, based on relevant distance/area/volume interstice classes in
amlearn.featurize.short_range_order,
as well as classes in amlearn.featurize.medium_range_order to further
coarse-grain SRO features to MRO.
"""
system = ["Cu65Zr35", "qr_5plus10^10"]
atomic_number_list = [29, 40] # Cu, Zr
stat_ops = ['mean', 'std', 'min', 'max']
lammps_file = "xxx/dump.lmp"
structure, bds = read_lammps_dump(lammps_file)
output_path = "xxx/xxx"
featurizers = [
# neighboring analysis
VoroNN(bds=bds, cutoff=5.0, output_path=output_path),
DistanceNN(bds=bds, cutoff=4.0, output_path=output_path),
# distance interstice
DistanceInterstice(atomic_number_list=atomic_number_list,
dependent_class='voro', stat_ops=stat_ops,
output_path=output_path),
DistanceInterstice(atomic_number_list=atomic_number_list,
dependent_class='dist', stat_ops=stat_ops,
output_path=output_path),
# area and volume interstice
VolumeAreaInterstice(atomic_number_list=atomic_number_list,
stat_ops=stat_ops, output_path=output_path),
# from SRO to MRO
MRO(stats_types=[0, 1, 1, 1, 1, 0], output_path=output_path)]
# defining a featurize_pipeline
featurize_pipeline = FeaturizePipeline(featurizers=featurizers,
output_path=output_path)
# featurization
feature_df = featurize_pipeline.fit_transform(X=structure, bds=bds,
lammps_df=structure)
|
11498688
|
import numpy
import sys
with open(sys.argv[1], "r") as fp, \
open(sys.argv[1] + ".sort", "w") as fw:
line_list = []
bleu_list = []
for line in fp:
if "BLEU=" not in line:
continue
line = line.strip()
line_list.append(line)
idx = line.index("BLEU=")
bleu_list.append(float(line[idx+5:idx+10]))
argidx = numpy.argsort(bleu_list)
fw.write("\n".join([line_list[idx] for idx in argidx[::-1]]))
|
11498696
|
from PIL import Image
from abc import ABC, abstractmethod
import torch
from plan2scene.common.image_description import ImageDescription
from plan2scene.evaluation.metrics import AbstractPairedMetric, AbstractUnpairedMetric
"""
A matcher pairs a predicted texture with a ground truth reference crop (if available).
The matcher informs the evaluator whether to include or exclude a prediction from the evaluation.
(E.g. textures predicted for surfaces without ground truth crops should be excluded.)
"""
class AbstractMatcher(ABC):
@abstractmethod
def __call__(self, pred: Image.Image, gt_textures: dict) -> tuple:
"""
Invokes matcher.
:param pred: Predicted texture for a surface.
:param gt_textures: Dictionary containing ground truth references for the surface.
:return: Tuple of (Metric value, Matched ground truth image, True if the texture should be included in the evaluation.)
"""
pass
class PairedMatcher(AbstractMatcher):
"""
Wrapper for metrics that require a pair of inputs (prediction and ground truth).
"""
def __init__(self, metric: AbstractPairedMetric):
"""
Initializes paired matcher.
:param metric: Metric used to evaluate.
"""
super().__init__()
self.metric = metric
def __repr__(self):
return str(self.metric)
def __call__(self, pred: Image.Image, gt_textures: dict) -> tuple:
"""
Evaluates prediction (crop) against the gt reference in gt_textures.
:param pred: Prediction
:param gt_textures: Dictionary containing the gt texture. Should only contain a single ground truth texture.
:return: If success, return Tuple of metric result, ground truth texture, True. Else return None, None, False.
"""
pred_texture = pred
if isinstance(pred_texture, ImageDescription):
pred_texture = pred_texture.image
with torch.no_grad():
if len(gt_textures) == 0:
return None, None, False
assert len(gt_textures) == 1
gt_texture = list(gt_textures.values())[0]
if isinstance(gt_texture, ImageDescription):
gt_texture = gt_texture.image
loss = self.metric(pred_texture, gt_texture)
return loss, gt_texture, True
class UnpairedMatcher(AbstractMatcher):
"""
Wrapper for metrics that only require the prediction to evaluate. (E.g. TILE)
"""
def __init__(self, metric: AbstractUnpairedMetric):
"""
Initializes unpaired matcher.
:param metric: Metric used
"""
super().__init__()
self.metric = metric
def __repr__(self):
return str(self.metric)
def __call__(self, pred: Image, gt_textures: dict):
"""
Evaluates prediction (crop).
:param pred: Prediction
:param gt_textures: Dictionary containing the gt texture. Not used.
:return: return Tuple of metric result, None, True.
"""
pred_texture = pred
if isinstance(pred_texture, ImageDescription):
pred_texture = pred_texture.image
with torch.no_grad():
loss = self.metric(pred_texture)
return loss, None, True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.