id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
12809458 | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
from datetime import date
from datetime import timedelta
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
path = os.path.abspath('.')
path += '\\data\\new_stock\\hk_new_stock.csv'
hk_new_stock = pd.read_csv(path,index_col=0,encoding='gbk')
hk_new_stock = hk_new_stock.dropna()
hk_new_stock = hk_new_stock[hk_new_stock['recommend'] != '不建议']
hk_new_stock['money'] = hk_new_stock['money'].astype(float)
print(hk_new_stock['money'].describe())
hk_new_stock['luck'] = hk_new_stock['luck'].str.strip("%").astype(float)/100
hk_new_stock['rise'] = hk_new_stock['rise'].str.strip("%").astype(float)/100
corr_pd = hk_new_stock[['money','luck','rise']]
print(corr_pd.corr())
hk_new_stock['earn'] = hk_new_stock['luck']*(hk_new_stock['money']* hk_new_stock['rise']-hk_new_stock['money']*(0.010077)-18-5)
print(hk_new_stock['earn'].describe())
print(hk_new_stock['earn'].sum())
all_data = hk_new_stock['earn'].values.tolist()
# 绘制violin 和 box plot图
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(9, 6))
# plot violin plot
axes[0].violinplot(all_data,
showmeans=False,
showextrema=True,
showmedians=True)
axes[0].set_ylabel('expected profit')
axes[0].set_title('Violin plot')
# plot box plot
axes[1].boxplot(all_data)
axes[1].set_title('Box plot')
plt.show()
# path1 = os.path.abspath('.')
# path1 += '\\data\\new_stock\\result.csv'
# hk_new_stock['earn'].to_csv(path1)
# print(hk_new_stock[['money','luck']].describe()) | StarcoderdataPython |
15184 | <filename>projects/ide/sublime/src/Bolt/api/inspect/highlighting.py
import sublime
from ui.read import settings as read_settings
from ui.write import write, highlight as write_highlight
from lookup import file_type as lookup_file_type
from ui.read import x as ui_read
from ui.read import spots as read_spots
from ui.read import regions as ui_regions
from core.read import read as core_read
from structs.general_thread import *
from structs.thread_handler import *
from structs.highlight_list import *
from structs.flag_region import *
from core.analyse import analyse
def flags():
return [
FlagRegion('bolt.incorrect', 'comment', 'light_x', 0),
FlagRegion('bolt.missing', 'string', 'arrow_right', 0),
FlagRegion('bolt.unused', 'comment', 'dot', sublime.DRAW_OUTLINED),
FlagRegion('bolt.wrong_module', 'comment', 'light_x', 0)
]
def highlight_setting():
return 'bolt.live.highlight'
def rate_setting():
return 'bolt.live.highlight.rate'
def is_enabled():
settings = read_settings.load_settings()
return settings.get(highlight_setting(), False)
def get_rate():
settings = read_settings.load_settings()
return settings.get(rate_setting(), 1000)
def set_enabled(state):
settings = read_settings.load_settings()
settings.set(highlight_setting(), state)
write.save_settings()
def toggle(view):
def noop(v):
return True
handler = ThreadHandler(noop, noop, noop)
prev = is_enabled()
current = not prev
if (current):
run(view, handler)
else:
clear(view)
set_enabled(current)
def run(view, handler):
valid = lookup_file_type.is_bolt_module(view)
if not valid:
open_file = view.file_name() if view.file_name() != None else '-- no view'
print 'View is not a bolt module: ' + open_file
handler.cancel()
else:
read_view = ui_read.all(view)
spots = read_spots.spots(view)
plasmas = core_read.plasmas(read_view.ptext)
def update_ui(highlights, module_wrong):
def run():
regions = write_highlight.regions(view, highlights)
module_region = [ui_regions.module_name(view)] if module_wrong else []
flag_info = zip(flags(), [regions.incorrect, regions.missing, regions.unused, module_region])
def highlight_flag(x):
if len(x[1]) > 0:
write_highlight.highlight(view, x[1], x[0]),
else:
write_highlight.remove_highlight(view, x[0])
map(highlight_flag, flag_info)
sublime.set_timeout(run, 0)
thread = GeneralThread(_highlighter(read_view, spots, plasmas, update_ui), handler.success, handler.failure)
sublime.set_timeout(thread.start, 0)
handler.init(thread)
def clear(view):
def run():
write_highlight.remove_highlights(view, flags())
sublime.set_timeout(run, 0)
def _highlighter(read_view, spots, plasmas, callback):
def r():
try:
highlights = analyse.all(read_view.base, read_view.nests, plasmas, spots, read_view.external)
module_wrong = analyse.module_wrong(read_view)
callback(highlights, module_wrong)
except Exception as exc:
print "Error during identifying highlighted regions: " + str(exc)
traceback.print_exc(limit=10)
callback(HighlightLists([], [], []), False)
return r
| StarcoderdataPython |
12820834 | <gh_stars>0
from flask import render_template
from . import main
@main.app_errorhandler(404)
def fo_o_fo(error):
return render_template('fo_o_fo.html'),404 | StarcoderdataPython |
9697983 | from pact_testgen.generator import generate_tests
from pact_testgen.dialects.django import Dialect
def test_django_test_generator_output_is_parsable(testfile):
test_file, _ = generate_tests(testfile, Dialect())
compile(test_file, "<string>", "exec")
def test_output_includes_expected_test_cases(testfile):
test_file, _ = generate_tests(testfile, Dialect())
# Names of test cases we expect to see. This is driven directly
# by test_app/client_tests.py
print(f"\nTEST FILE\n------\n\n{test_file}\n")
assert "TestAnAuthorId1" in test_file
assert "TestAnAuthorId1ABookExistsWithAuthorId1" in test_file
assert "TestNoInitialState" in test_file
assert "test_an_author_creation_request" in test_file
assert "test_a_book_search_request_for_a_non_existent_author" in test_file
assert "test_a_request_for_author_id_1" in test_file
assert "test_an_author_update_request" in test_file
assert "test_an_author_deletion_request" in test_file
assert "test_a_book_search_request_for_author_id_1" in test_file
def test_provider_state_file_has_expected_methods(testfile):
_, provider_state_file = generate_tests(testfile, Dialect())
print(f"\nPROVIDER STATE FILE\n-------------------\n\n{provider_state_file}\n")
assert "setup_nothing" not in provider_state_file
assert "setup_an_author_id_1" in provider_state_file
assert "setup_an_author_id_1_a_book_exists_with_author_id_1" in provider_state_file
| StarcoderdataPython |
8148397 | from data.indicator.GT2.preprocess import config | StarcoderdataPython |
4929852 | <filename>PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/tslibs/test_normalize_date.py<gh_stars>1000+
# -*- coding: utf-8 -*-
"""Tests for functions from pandas._libs.tslibs"""
from datetime import date, datetime
import pytest
from pandas._libs import tslibs
@pytest.mark.parametrize("value,expected", [
(date(2012, 9, 7), datetime(2012, 9, 7)),
(datetime(2012, 9, 7, 12), datetime(2012, 9, 7)),
(datetime(2007, 10, 1, 1, 12, 5, 10), datetime(2007, 10, 1))
])
def test_normalize_date(value, expected):
result = tslibs.normalize_date(value)
assert result == expected
| StarcoderdataPython |
192059 | <reponame>fkmclane/MCP
import logging
import multiprocessing
import os
import os.path
import select
import subprocess
import sys
import time
import traceback
import mcp.config
import mcp.error
import mcp.common.env
import mcp.common.daemon
import mcp.model.server
log = logging.getLogger('mcp')
class Script(object):
def __init__(self, server):
self.server = server
self.exe = os.path.join(self.server.prefix, 'script', 'script.py')
self.proc = None
def exists(self):
return os.path.isfile(self.exe)
def start(self):
if not self.exists():
raise mcp.error.ScriptNonexistentError()
if mcp.config.container:
self.proc = subprocess.Popen(['/usr/local/bin/mcp-container-helper', self.server.prefix, os.path.join('/', 'srv', 'script', 'bin', 'python'), os.path.join('/', 'srv', 'script', 'script.py')], stdin=open(os.path.join(self.server.prefix, 'var', 'ladderlog.txt'), 'r'), stdout=self.server.proc.stdin, stderr=open(os.path.join(self.server.prefix, 'script-error.log'), 'w'), env=mcp.common.env.get_script(), cwd=self.server.prefix)
else:
self.proc = subprocess.Popen([os.path.join(self.server.prefix, 'script', 'bin', 'python'), sys.executable, os.path.join(self.server.prefix, 'script', 'script.py')], stdin=open(os.path.join(self.server.prefix, 'var', 'ladderlog.txt'), 'r'), stdout=self.server.proc.stdin, stderr=open(os.path.join(self.server.prefix, 'script-error.log'), 'w'), env=mcp.common.env.get_script(), cwd=os.path.join(self.server.prefix, 'var'))
def stop(self):
if self.is_running():
self.proc.terminate()
if mcp.config.container:
self.proc.wait()
else:
try:
self.proc.wait(5)
except subprocess.TimeoutExpired:
self.proc.kill()
self.proc.wait()
self.proc = None
def is_running(self):
return bool(self.proc and self.proc.poll() is None)
def is_dead(self):
return bool(self.proc and self.proc.poll())
def is_quit(self):
return bool(self.proc and self.proc.poll() == 0)
class Server(object):
def __init__(self, metadata):
self.name = metadata.server
self.prefix = os.path.join(mcp.config.prefix, self.name)
self.exe = os.path.join(self.prefix, 'bin', 'armagetronad-dedicated')
self.port = metadata.port
self.library = metadata.library
self.proc = None
self.script = Script(self)
def exists(self):
return os.path.isfile(self.exe)
def start(self):
if not self.exists():
raise mcp.error.ServerNonexistentError()
if mcp.config.container:
self.proc = subprocess.Popen(['/usr/local/bin/mcp-container-helper', self.prefix, os.path.join('bin', 'armagetronad-dedicated'), '--vardir', os.path.join('/', 'srv', 'var'), '--userdatadir', os.path.join('/', 'srv', 'user'), '--configdir', os.path.join('/', 'srv', 'config'), '--datadir', os.path.join('/', 'srv', 'data')], stdin=subprocess.PIPE, stdout=open(os.path.join(self.prefix, 'server.log'), 'a'), stderr=open(os.path.join(self.prefix, 'error.log'), 'w'), env=mcp.common.env.get_server(), cwd=self.prefix)
else:
self.proc = subprocess.Popen([os.path.join(self.prefix, 'bin', 'armagetronad-dedicated'), '--vardir', os.path.join(self.prefix, 'var'), '--userdatadir', os.path.join(self.prefix, 'user'), '--configdir', os.path.join(self.prefix, 'config'), '--datadir', os.path.join(self.prefix, 'data')], stdin=subprocess.PIPE, stdout=open(os.path.join(self.prefix, 'server.log'), 'a'), stderr=open(os.path.join(self.prefix, 'error.log'), 'w'), env=mcp.common.env.get_server(), cwd=self.prefix)
if self.script.exists():
self.script.start()
def stop(self):
self.script.stop()
if self.is_running():
self.proc.terminate()
if mcp.config.container:
self.proc.wait()
else:
try:
self.proc.wait(5)
except subprocess.TimeoutExpired:
self.proc.kill()
self.proc.wait()
self.proc = None
def reload(self):
self.send_command('INCLUDE settings.cfg')
self.send_command('INCLUDE server_info.cfg')
self.send_command('INCLUDE settings_custom.cfg')
def is_running(self):
return bool(self.proc and self.proc.poll() is None)
def is_dead(self):
return bool(self.proc and self.proc.poll())
def is_quit(self):
return bool(self.proc and self.proc.poll() == 0)
def send_command(self, command):
if not self.is_running():
raise mcp.error.ServerStoppedError()
self.proc.stdin.write(command.encode('latin1') + b'\n')
self.proc.stdin.flush()
running_read, running_write = None, None
process = None
def run():
os.close(running_write)
server_processes = {}
for entry in mcp.model.server.items():
server_processes[entry.server] = Server(entry)
entry.running = entry.autostart
entry.script_running = entry.autostart
entry.command = ''
try:
while not select.select([running_read], [], [], mcp.config.poll_interval)[0]:
try:
for entry in mcp.model.server.items():
# create process if necessary
if entry.server not in server_processes:
server_processes[entry.server] = Server(entry)
entry.running = entry.autostart
entry.script_running = entry.autostart
entry.command = ''
# get process
process = server_processes[entry.server]
# check if each server is supposed to be running and poll for problems
if entry.running:
if not process.proc:
process.start()
for command in entry.command.split('\n'):
if command:
process.send_command(command)
entry.command = ''
if process.is_quit():
process.script.stop()
entry.script_running = False
process.stop()
entry.running = False
log.warning(process.name + ' stopped by itself.')
elif process.is_dead():
with open(os.path.join(process.prefix, 'server.log'), 'a') as server_log:
server_log.write('WARNING: The server did not gracefully quit: now restarting.\n')
with open(os.path.join(process.prefix, 'error.log'), 'a') as error_log:
error_log.write('WARNING: The server did not gracefully quit: now restarting.\n')
log.warning(process.name + ' did not gracefully quit.')
process.stop()
process.start()
log.warning(process.name + ' restarted.')
if entry.script_running:
try:
if not process.script.proc:
process.script.start()
elif process.script.is_quit():
process.script.stop()
entry.script_running = False
log.warning(process.name + ' script stopped by itself.')
elif process.script.is_dead():
process.script.stop()
entry.script_running = False
log.warning(process.name + ' script did not gracefully quit.')
except mcp.error.ScriptNonexistentError:
pass
else:
entry.script_running = False
if process.script.is_running():
process.script.stop()
if process.is_running():
process.stop()
# unload and reload server if necessary
if entry.reload:
del server_processes[entry.server]
entry.reload = False
entry.waiting = False
for name in list(server_processes.keys()):
if not mcp.model.server.get(name):
del server_processes[name]
except:
traceback.print_exc()
finally:
for process in server_processes.values():
process.stop()
def start():
global running_read, running_write, process
if process:
return
running_read, running_write = os.pipe()
process = multiprocessing.Process(target=run, name='mcp-manager')
process.start()
os.close(running_read)
def stop():
global running_read, running_write, process
if not process:
return
os.write(running_write, b'stop\n')
process.join()
running_read, running_write = None, None
process = None
def is_running():
return bool(process and process.is_alive())
| StarcoderdataPython |
4990675 | ## You Can't Code Under Pressure #1
## 8 kyu
## https://www.codewars.com//kata/53ee5429ba190077850011d4
def double_integer(i):
return i * 2 | StarcoderdataPython |
5005977 | <filename>generator.py
import numpy as np
import trimesh
import random
import pyglet
import h5py
def save_h5(h5_filename, data, label, data_dtype='uint8', label_dtype='uint8'):
h5_fout = h5py.File(h5_filename)
h5_fout.create_dataset(
'data', data=data,
compression='gzip', compression_opts=4,
dtype=data_dtype)
h5_fout.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype=label_dtype)
h5_fout.close()
for i in range(32):
data = 1
labels = np.array([0,1,2,3]*16)
isFirst = True
for j in range(16):
rand = random.random()*10+1
distort = rand * (random.random()*2 + .2)
cylinder = trimesh.creation.cylinder(rand, height=distort, sections=320, segment=None, transform=None)
cone = trimesh.creation.cone(rand, height=distort, sections=640, transform=None)
annulus = trimesh.creation.annulus(rand, rand*(1.1+random.random()), height=distort, sections=160, transform=None, segment=None)
sphere = trimesh.creation.icosphere(subdivisions=3, radius=1.0, color=None)
cylinder.show()
cone.show()
annulus.show()
sphere.show()
if(isFirst):
isFirst = False
data = np.array([cylinder.sample(512), cone.sample(512), annulus.sample(512), sphere.sample(512)])
else:
data = np.concatenate((data,np.array([cylinder.sample(512), cone.sample(512), annulus.sample(512), sphere.sample(512)])),axis=0)
#save_h5('data/file'+str(i)+'.hdf5', data, labels)
for i in range(16):
data = 1
labels = np.array([0,1,2,3]*16)
isFirst = True
for j in range(16):
rand = random.random()*10+1
distort = rand * (random.random()*2 + .2)
cylinder = trimesh.creation.cylinder(rand, height=distort, sections=320, segment=None, transform=None)
cone = trimesh.creation.cone(rand, height=distort, sections=640, transform=None)
annulus = trimesh.creation.annulus(rand, rand*(1.1+random.random()), height=distort, sections=160, transform=None, segment=None)
sphere = trimesh.creation.icosphere(subdivisions=3, radius=1.0, color=None)
if(isFirst):
isFirst = False
data = np.array([cylinder.sample(512), cone.sample(512), annulus.sample(512), sphere.sample(512)])
else:
data = np.concatenate((data,[cylinder.sample(512), cone.sample(512), annulus.sample(512), sphere.sample(512)]),axis=0)
#save_h5('test/file'+str(i)+'.hdf5', data, labels)
| StarcoderdataPython |
5160584 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import glob
import torch
import itertools
import numpy as np
from fairseq import metrics, options, utils
from fairseq import search
from fairseq.data import (
encoders,
indexed_dataset,
AppendTokenDataset,
ConcatDataset,
StripTokenDataset,
TruncateDataset,
XDAEDenoisingDataset,
Dictionary,
PrependTokenDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
MultiLanguagePairDataset,
data_utils,
)
from .denoising import DenoisingTask
from .translation import TranslationTask
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task
logger = logging.getLogger(__name__)
def load_multi_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
add_language_token=False,
domain=None,
common_eos=None,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
def replace_eos(dataset, dictionary, eos_token):
dataset = StripTokenDataset(dataset, dictionary.eos())
eos_index = dictionary.index("[{}]".format(eos_token))
return AppendTokenDataset(dataset, eos_index)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - tag_num - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
# add tags
if domain is not None:
src_dataset = PrependTokenDataset(src_dataset, src_dict.index("[{}]".format(domain)))
if add_language_token:
src_dataset = PrependTokenDataset(
src_dataset, tgt_dict.index('[2{}]'.format(tgt))
)
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return MultiLanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task('xdae_multilingual_translation')
class XDAEMultilingualTranslationTask(DenoisingTask):
@staticmethod
def add_args(parser):
DenoisingTask.add_args(parser)
# for pretrain
parser.add_argument(
"--multilang-sampling-alpha",
type=float,
default=1.0,
help="smoothing alpha for sample ratios across multiple datasets",
)
parser.add_argument("--downsample-by-min", default=False, action="store_true",
help="Downsample all large dataset by the length of smallest dataset")
parser.add_argument("--add-lang-token", default=False, action="store_true")
parser.add_argument("--with-len", default=False, action="store_true")
parser.add_argument('--prepend-bos', default=False, action='store_true')
parser.add_argument('--placeholder', type=int,
help="placeholder for more special ids such as language ids",
default=-1)
parser.add_argument("--add-tgt-len-tags", type=int, default=0,
help="number of length tags to add")
parser.add_argument('--word-shuffle', type=float, default=0,
help="Randomly shuffle input words (0 to disable)")
parser.add_argument("--word-dropout", type=float, default=0,
help="Randomly dropout input words (0 to disable)")
parser.add_argument("--word-blank", type=float, default=0,
help="Randomly blank input words (0 to disable)")
parser.add_argument("--with-predefined-notes-group", default=False, action="store_true")
parser.add_argument("--align-notes", default=False, action="store_true")
parser.add_argument("--align-rests", default=False, action="store_true")
parser.add_argument('--notes-level', type=int,
help="notes level",
default=5)
parser.add_argument("--notes-weight", type=float, default=0.1,
help="weight lambda to the align notes")
parser.add_argument("--shape-weight", type=float, default=0.1,
help="weight lambda to the align notes")
parser.add_argument("--rests-weight", type=float, default=0.1,
help="weight lambda to the align rest notes (control rythm)")
parser.add_argument("--durations-weight", type=float, default=0.1,
help="weight lambda to the align note durations (control rythm)")
parser.add_argument('--distance-reward', default=False, action='store_true')
parser.add_argument('--lyrics-dict', type=str, help="lyrics dictionary data path", default=None)
parser.add_argument("--min-align-prob", type=float, default=0.1,
help="prob for unaccepted tokens")
parser.add_argument('--sampled-data', default=False, action='store_true')
parser.add_argument(
"--langs", type=str, help="language ids we are considering", default=None
)
parser.add_argument(
"--no-whole-word-mask-langs",
type=str,
default="",
metavar="N",
help="languages without spacing between words dont support whole word masking",
)
parser.add_argument('--finetune-langs', type=str,
help="language pairs to finetune',', for example, 'en-zh,zh-en'",
default=None)
parser.add_argument('--finetune-data', type=str,
help="finetuning data path",
default=None)
parser.add_argument('--common-eos', type=str,
help="common end of sentence tag for all tasks/langs",
default=None)
parser.add_argument('--domains', type=str,
help="domains to pretrain ',', for example, 'LYRICS,WMT'",
default=None)
parser.add_argument('--mono-langs', type=str,
help="monolingual languages used in pretraining, separated wiht ',', for example, 'en,fr,de'",
default=None)
parser.add_argument('--para-langs', type=str,
help="parallel langagues, for example, 'en-zh,jp-zh'",
default=None)
parser.add_argument('--mono-domain', type=str,
help="domain of monolingual data",
default=None)
parser.add_argument('--para-domain', type=str,
help="domain of parallel data",
default=None)
parser.add_argument('--ft-domain', type=str,
help="domain of fintuning data",
default=None)
parser.add_argument("--use-domain-eos", action="store_true",
help="use domain tag as end of sentence",
default=False)
parser.add_argument("--mono-ratio", type=float,
help="Percentage of monolingual data",
default=0.5)
# for generation
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
## options for reporting BLEU during validation
#parser.add_argument('--eval-bleu', action='store_true',
#help='evaluation with BLEU scores')
#parser.add_argument('--eval-bleu-detok', type=str, default="space",
#help='detokenizer before computing BLEU (e.g., "moses"); '
#'required if using --eval-bleu; use "space" to '
#'disable detokenization; see fairseq.data.encoders '
#'for other options')
#parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
#help='args for building the tokenizer, if needed')
#parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
#help='if setting, we compute tokenized BLEU instead of sacrebleu')
#parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
#help='remove BPE before computing BLEU')
#parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
#help='generation args for BLUE scoring, '
#'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
#parser.add_argument('--eval-bleu-print-samples', action='store_true',
# help='print sample generations during validation')
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = args.data.split(":")
assert len(paths) > 0
if args.langs is None:
if args.sampled_data:
languages = list(cls.get_languages(cls, paths[0]))
else:
languages = sorted([
name for name in os.listdir(paths[0])
if os.path.isdir(os.path.join(paths[0], name))
])
else:
languages = args.langs.split(",")
dict_path = paths[1] if len(paths) == 2 else paths[0]
if os.path.exists(os.path.join(dict_path, "dict.txt")):
dictionary = Dictionary.load(os.path.join(dict_path, "dict.txt"))
else:
dictionary = Dictionary.load(os.path.join(dict_path, f"dict.{languages[0]}.txt"))
domains = args.domains.split(",") if args.domains is not None else None
assert (args.mono_domain is None) or (args.mono_domain in domains)
assert (args.para_domain is None) or (args.para_domain in domains)
dictionary.add_symbol('<mask>')
if args.add_lang_token:
if args.common_eos is not None:
dictionary.add_symbol('[{}]'.format(args.common_eos))
if domains is not None:
for d in domains:
dictionary.add_symbol(f"[{d}]")
for lang in languages:
dictionary.add_symbol('[2{}]'.format(lang))
if args.add_tgt_len_tags > 0:
for i in range(args.add_tgt_len_tags):
dictionary.add_symbol('[LEN{}]'.format(i+1))
if args.placeholder > 0:
for i in range(args.placeholder):
dictionary.add_symbol('[placeholder{}]'.format(i))
logger.info("dictionary: {} types".format(len(dictionary)))
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.dictionary = dictionary
self.src_dict = dictionary
self.tgt_dict = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.index('<mask>')
self.langs = args.langs
self.args = args
self.path_cache = {}
self.para_langs = None if args.para_langs is None else args.para_langs.split(",")
self.ft_langs = None if args.finetune_langs is None else args.finetune_langs.split(",")
self.mono_langs = args.mono_langs.split(",") if args.mono_langs is not None else None
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def get_languages(self, data_folder):
files = [path for path in os.listdir(data_folder)]
lgs = set([x.split('.')[-2] for x in files])
return lgs
def get_dataset_path(self, split, data_folder, epoch, lgs=None, is_pair=False):
if data_folder in self.path_cache:
files = self.path_cache[data_folder]
else:
files = [path for path in os.listdir(data_folder)]
# remove this to speed up
# if os.path.isfile(os.path.join(data_folder, path))
self.path_cache[data_folder] = files
files = [path for path in files if(split in path) and (".bin" in path)]
if lgs is None:
lgs = set([x.split('.')[-2] for x in files])
paths = {}
for lg_index, lg in enumerate(lgs):
if is_pair:
pair = lg.split('-')
split_count = len([path for path in files if ".{0}.{1}.bin".format(lg, pair[0]) in path])
else:
split_count = len([path for path in files if ".{0}.bin".format(lg) in path])
big_step = epoch // split_count
small_step = epoch % split_count
with data_utils.numpy_seed((self.args.seed + big_step) * 100 + lg_index):
shuffle = np.random.permutation(split_count)
index = shuffle[small_step]
path = os.path.join(data_folder, "{0}.{1}.{2}".format(split, index, lg))
paths[lg] = path
return paths
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(":")
assert len(paths) > 0
# pretrained dataset path
mono_ratio = self.args.mono_ratio
para_path = ""
para_dataset = None
lang_splits = [split]
if split == getattr(self.args, "train_subset", "train"):
if mono_ratio > 1e-5 :
data_path = paths[0]
split_path = os.path.join(data_path, split)
sampled = self.args.sampled_data
languages = self.mono_langs
if sampled:
all_lg_path = self.get_dataset_path(split, data_path, epoch, languages)
if languages is None:
languages = list(all_lg_path.keys())
else:
all_lg_path = None
if languages is None:
languages = sorted([
lang for lang in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, lang))
])
else:
for lang in languages:
assert os.path.exists(os.path.join(data_path, lang)), "all the languages must exist"
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info("Language to id mapping: {}".format({
lang: ids for ids, lang in enumerate(languages)
})
)
mask_whole_words = get_whole_word_mask(self.args, self.dictionary)
language_without_segmentations = self.args.no_whole_word_mask_langs.split(",")
lang_datasets = []
for language in languages:
tag_num = int(self.args.with_len)
split_path = os.path.join(data_path, language, split) if all_lg_path is None else all_lg_path[language]
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError("Dataset not found: {} ({})".format(split, split_path))
end_token = self.source_dictionary.eos()
# create continuous blocks of tokens
strip_length = 2 if self.args.mono_domain is None else 3
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - strip_length, # one less for <s>
pad=self.source_dictionary.pad(),
eos=end_token,
break_mode=self.args.sample_break_mode,
)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
if self.args.prepend_bos:
bos_idx = self.source_dictionary.bos()
dataset = PrependTokenDataset(dataset, bos_idx)
if self.args.mono_domain is not None:
tag_num += 1
bos_idx = self.source_dictionary.index('[{}]'.format(self.args.mono_domain))
dataset = PrependTokenDataset(dataset, bos_idx)
if self.args.add_lang_token:
tag_num += 1
bos_idx = self.source_dictionary.index('[2{}]'.format(language))
dataset = PrependTokenDataset(dataset, bos_idx)
# replace end token
dataset = StripTokenDataset(dataset, self.source_dictionary.eos())
dataset = AppendTokenDataset(dataset, end_token)
lang_mask_whole_words = mask_whole_words if language not in language_without_segmentations else None
lang_dataset = XDAEDenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
lang_mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
tag_num=tag_num,
eos=end_token,
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
'loaded total {} blocks for all languages'.format(
dataset_lengths.sum(),
)
)
if not self.args.sampled_data:
#For train subset, additionally up or down sample languages.
if self.args.downsample_by_min:
min_len = min(dataset_lengths)
size_ratio = min_len / dataset_lengths
else:
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info("Sample probability by language: {}".format({
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
})
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info("Up/Down Sampling ratio by language: {}".format({
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
})
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
mono_dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
mono_dataset = ConcatDataset(
lang_datasets,
)
# start loading parallel dataset
para_path = paths[1] if self.args.mono_ratio > 1e-5 else paths[0]
para_datasets = []
for pair in self.para_langs:
src, tgt = pair.split("-")
lang_dataset = load_multi_langpair_dataset(
para_path,
split,
src,
self.source_dictionary,
tgt,
self.target_dictionary,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=getattr(self.args, 'max_source_positions', 512),
max_target_positions=getattr(self.args, 'max_target_positions', 512),
prepend_bos=getattr(self.args, 'preprend_bos', False),
add_language_token=self.args.add_lang_token,
domain=self.args.para_domain,
common_eos=self.args.common_eos,
)
para_datasets.append(lang_dataset)
if len(para_datasets) > 1:
dataset_lengths = np.array([len(d) for d in para_datasets], dtype=float)
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info("Sample probability by language pair: {}".format({
pair: "{0:.4f}".format(sample_probs[id])
for id, pair in enumerate(self.para_langs)
})
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info("Up/Down Sampling ratio by language for finetuning: {}".format({
pair: "{0:.2f}".format(size_ratio[id])
for id, pair in enumerate(self.para_langs)
})
)
resampled_lang_datasets = [
ResamplingDataset(
para_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(para_datasets)
]
para_dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
para_dataset = para_datasets[0]
if mono_ratio > 1e-5:
mono_len, para_len = len(mono_dataset), len(para_dataset)
if mono_len > para_len:
ratio = float(para_len/mono_len)*mono_ratio/(1.0 - mono_ratio)
logger.info("Down sampling probability for monolingual data: {}".format(ratio))
mono_dataset = ResamplingDataset(
mono_dataset,
size_ratio=ratio,
seed=self.args.seed,
epoch=epoch,
replace=ratio >= 1.0,
)
else:
ratio = float(mono_len/para_len)*(1.0 - mono_ratio)/mono_ratio
logger.info("Down sampling probability for parallel data: {}".format(ratio))
para_dataset = ResamplingDataset(
para_dataset,
size_ratio=ratio,
seed=self.args.seed,
epoch=epoch,
replace=ratio >= 1.0,
)
para_dataset = ConcatDataset(
[para_dataset, mono_dataset]
)
ft_path = self.args.finetune_data
ft_datasets = []
if not (split == getattr(self.args, "train_subset", "train") and (ft_path == para_path)):
for pair in self.ft_langs:
src, tgt = pair.split("-")
lang_dataset = load_multi_langpair_dataset(
ft_path,
split,
src,
self.source_dictionary,
tgt,
self.target_dictionary,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=getattr(self.args, 'max_source_positions', 512),
max_target_positions=getattr(self.args, 'max_target_positions', 512),
prepend_bos=getattr(self.args, 'preprend_bos', False),
add_language_token=self.args.add_lang_token,
domain=self.args.ft_domain,
common_eos=self.args.common_eos,
)
ft_datasets.append(lang_dataset)
if split == getattr(self.args, "train_subset", "train"):
if len(ft_datasets) > 1:
dataset_lengths = np.array([len(d) for d in ft_datasets], dtype=float)
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info("Sample probability by language pair: {}".format({
pair: "{0:.4f}".format(sample_probs[id])
for id, pair in enumerate(self.ft_langs)
})
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info("Up/Down Sampling ratio by language for finetuning: {}".format({
pair: "{0:.2f}".format(size_ratio[id])
for id, pair in enumerate(self.ft_langs)
})
)
resampled_lang_datasets = [
ResamplingDataset(
ft_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(ft_datasets)
]
ft_dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
ft_dataset = ft_datasets[0] if len(ft_datasets) > 0 else None
else:
ft_dataset = ConcatDataset(ft_datasets)
domain_name = "_{}".format(self.args.ft_domain) if self.args.ft_domain is not None else ""
for lang_id, lang_dataset in enumerate(ft_datasets):
split_name = split + "_" + self.ft_langs[lang_id] + domain_name
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if hasattr(self.args, "valid_subset"):
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ','.join(lang_splits)
)
if para_dataset is None:
assert ft_dataset is not None, "must have at least some dataset"
para_dataset = ft_dataset
elif ft_dataset is not None:
para_dataset = ConcatDataset(
[para_dataset, ft_dataset]
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(para_dataset))
self.datasets[split] = SortDataset(
para_dataset,
sort_order=[
shuffle,
para_dataset.sizes,
],
)
def load_notes(self, split="test"):
if self.args.with_predefined_notes_group:
path = os.path.join(self.args.finetune_data, f"{split}.notes.group.pitches_dur")
else:
path = os.path.join(self.args.finetune_data, f"{split}.notes.pitches_dur")
notes = []
durations = []
with open(path, 'r') as rf:
for l in rf:
if self.args.with_predefined_notes_group:
pairs = [[x.split(":") for x in group.split()] for group in l.strip().split("|")]
notes.append([[float(x[0]) for x in group] for group in pairs])
durations.append([[float(x[0]) for x in group] for group in pairs])
else:
pairs = [x.split(":") for x in l.strip().split()]
notes.append([[float(x[0])] for x in pairs])
durations.append([[float(x[1])] for x in pairs])
return notes, durations
def load_rests(self, split="test"):
if self.args.with_predefined_notes_group:
path = os.path.join(self.args.finetune_data, f"{split}.notes.group.rests_dur")
else:
path = os.path.join(self.args.finetune_data, f"{split}.notes.rests_dur")
rests = []
durations = []
with open(path, 'r') as rf:
for l in rf:
pairs = [x.split(":") for x in l.strip().split()]
rests.append([float(x[0]) for x in pairs])
durations.append([float(x[1]) for x in pairs])
return rests, durations
def load_zh_lyrics_dictionary(self):
path = os.path.join(self.args.lyrics_dict)
lyrics_dict = {}
with open(path, 'r') as rf:
for l in rf:
w, f = l.strip().split()
lyrics_dict[w] = float(f)
return lyrics_dict
def inference_step(self, generator, models, sample, prefix_tokens=None,
notes=None, durations=None, rests=None, rest_durations=None,
constraints=None):
with torch.no_grad():
bos_token = self.dictionary.bos()
return generator.generate(models, sample,
prefix_tokens=prefix_tokens, bos_token=bos_token,
notes=notes, durations=durations, rests=rests, rest_durations=rest_durations)
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None
):
eos = self.source_dictionary.eos()
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
if self.args.align_notes:
from fairseq.sequence_generator_with_constraints import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
else:
from fairseq.sequence_generator_with_prefix import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
try:
from fairseq.fb_sequence_generator import FBSequenceGenerator
except ModuleNotFoundError:
pass
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
constrained = getattr(args, "constraints", False)
prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained:
search_strategy = search.LexicallyConstrainedBeamSearch(
self.target_dictionary, args.constraints
)
elif prefix_allowed_tokens_fn:
search_strategy = search.PrefixConstrainedBeamSearch(
self.target_dictionary, prefix_allowed_tokens_fn
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
extra_gen_cls_kwargs["print_alignment"] = args.print_alignment
elif getattr(args, "fb_seq_gen", False):
seq_gen_cls = FBSequenceGenerator
else:
seq_gen_cls = SequenceGenerator
lyrics_dict = None
if self.args.lyrics_dict is not None:
lyrics_dict = self.load_zh_lyrics_dictionary()
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
min_align_prob=getattr(self.args, "min_align_prob", 0.1),
lyrics_dict=lyrics_dict,
notes_weight=getattr(self.args, "notes_weight", 0.0),
shape_weight=getattr(self.args, "shape_weight", 0.0),
rests_weight=getattr(self.args, "rests_weight", 0.0),
durations_weight=getattr(self.args, "durations_weight", 0.0),
distance_reward=getattr(self.args, "distance_reward", False),
notes_level=getattr(self.args, "notes_level", 5),
search_strategy=search_strategy,
eos=eos,
**extra_gen_cls_kwargs,
)
| StarcoderdataPython |
11366442 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import unittest
from airflow.hooks.postgres_hook import PostgresHook
class TestPostgresHook(unittest.TestCase):
def setUp(self):
super(TestPostgresHook, self).setUp()
self.cur = mock.MagicMock()
self.conn = conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
class UnitTestPostgresHook(PostgresHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestPostgresHook()
def test_copy_expert(self):
m = mock.mock_open(read_data='{"some": "json"}')
with mock.patch('airflow.hooks.postgres_hook.open', m, create=True) as m:
statement = "SQL"
filename = "filename"
self.cur.fetchall.return_value = None
f = m(filename, 'w')
def test_open(filename, mode):
return f
self.assertEqual(None, self.db_hook.copy_expert(statement, filename, open=test_open))
self.conn.close.assert_called_once()
self.cur.close.assert_called_once()
self.cur.copy_expert.assert_called_once_with(statement, f)
| StarcoderdataPython |
1729634 | from GridProperties import GridProperties
class DTGridProperties(GridProperties):
def __init__(self):
super().__init__()
def init_grid(self):
self.grid_dict = {
'max_depth': [2, 4, 6, 8]
} | StarcoderdataPython |
49472 | <filename>lifegame.py<gh_stars>0
# from machine import I2C,Pin
# from ssd1306 import SSD1306_I2C#I2C的oled选该方法
# i2c=I2C(0,sda=Pin(0), scl=Pin(1), freq=400000)
# oled = SSD1306_I2C(128, 64, i2c) #你的OLED分辨率,使用I2C
# import ujson as json
# oled.fill(1) #清空屏幕
# oled.show()
# oled.fill(0)
# oled.show()
import json
import time
import gc
def lifegame(past,xa=64,ya=32):
now=[]
decell={}
for i in past:
j=[]
j.append([i[0]-1,i[1]-1])
j.append([i[0],i[1]-1])
j.append([i[0]+1,i[1]-1])
j.append([i[0]-1,i[1]])
j.append([i[0]+1,i[1]])
j.append([i[0]-1,i[1]+1])
j.append([i[0],i[1]+1])
j.append([i[0]+1,i[1]+1])
jsj=0
for x in j:
if x in past:
jsj+=1
else:
if x[0] in range(xa) and x[1] in range(ya):
xxx=json.dumps(x)
if xxx in decell:
decell[xxx]+=1
else:
decell[xxx]=1
if jsj in [2,3]:
now.append(i)
for dec in decell:
if decell[dec]==3:
now.append(json.loads(dec))
del past
del decell
gc.collect()
return now
f=open("a.json",'r')
d=json.loads(f.read())
f.close()
for i in range(100):
t=time.time()
d=lifegame(d)
print(i,":",len(d),":",time.time()-t,gc.mem_free())
gc.collect()
| StarcoderdataPython |
557 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.utils import set_request
from frappe.website.serve import get_response
test_dependencies = ["Blog Post"]
class TestWebsiteRouteMeta(unittest.TestCase):
def test_meta_tag_generation(self):
blogs = frappe.get_all(
"Blog Post", fields=["name", "route"], filters={"published": 1, "route": ("!=", "")}, limit=1
)
blog = blogs[0]
# create meta tags for this route
doc = frappe.new_doc("Website Route Meta")
doc.append("meta_tags", {"key": "type", "value": "blog_post"})
doc.append("meta_tags", {"key": "og:title", "value": "My Blog"})
doc.name = blog.route
doc.insert()
# set request on this route
set_request(path=blog.route)
response = get_response()
self.assertTrue(response.status_code, 200)
html = response.get_data().decode()
self.assertTrue("""<meta name="type" content="blog_post">""" in html)
self.assertTrue("""<meta property="og:title" content="My Blog">""" in html)
def tearDown(self):
frappe.db.rollback()
| StarcoderdataPython |
3363756 | <filename>app/productdb/validators.py
import json
from django.core.exceptions import ValidationError
import app.productdb.models
def validate_json(value):
"""a simple JSON validator
:param value:
:return:
"""
try:
json.loads(value)
except:
raise ValidationError("Invalid format of JSON data string")
def validate_product_list_string(value, vendor_id):
"""
verifies that a product list string contains only valid Product IDs that are stored in the database for a given
vendor
"""
values = []
missing_products = []
for line in value.splitlines():
values += line.split(";")
values = sorted([e.strip() for e in values])
for value in values:
try:
app.productdb.models.Product.objects.get(product_id=value, vendor_id=vendor_id)
except:
missing_products.append(value)
if len(missing_products) != 0:
v = app.productdb.models.Vendor.objects.filter(id=vendor_id).first()
msg = "The following products are not found in the database for the vendor %s: %s" % (
v.name,
",".join(missing_products)
)
raise ValidationError(msg, code="invaild")
| StarcoderdataPython |
9604453 | '''Train CaPE with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import pickle
from sklearn.metrics import log_loss, brier_score_loss
from scipy.special import expit
from models import *
from calibration_tools.evaluate import evaluate
from calibration_tools.modified_training import BaselineTrainable, FocalLoss, EntropyRegularizedLoss, MMCELoss
from calibration_tools.ensemble import BaselineEnsemble
from calibration_tools.calibrate import calibrate_model, inference
from calibration_tools.cape import CaPE
from datasets import FaceDataset, CancerDataset
from tqdm import tqdm
parser = argparse.ArgumentParser(description='PyTorch CaPE Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
parser.add_argument('--model-name', default='ckpt.pth', type=str)
parser.add_argument('--model-dir', default='crossentropy', type=str, required=True)
parser.add_argument('--label_type', default='equal', type=str)
parser.add_argument('--methods', default='ce', type=str)
parser.add_argument('--dataset', default='face', type=str)
parser.add_argument('--early_stopping_ckpt', default='.', type=str)
args = parser.parse_args()
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
def unpickling(file):
return pickle.load(open(file, 'rb'))
if args.dataset == 'face':
assert args.label_type in {'linear', 'sig', 'skewed', 'centered', 'discrete'}
trainset = FaceDataset(root='./Faces_detection/', prob_type=args.label_type, mode='train', std_classifier=False)
valset = FaceDataset(root='./Faces_detection/', prob_type=args.label_type, mode='val', std_classifier=False)
testset = FaceDataset(root='./Faces_detection/', prob_type=args.label_type, mode='test', std_classifier=False)
elif args.dataset == 'weather':
raise NotImplementedError
elif args.dataset == 'traffic':
raise NotImplementedError
elif args.dataset == 'cancer':
bag_dir = './cancer_survival/'
trainset = CancerDataset(bag_dir + "train")
valset = CancerDataset(bag_dir + "val")
testset = CancerDataset(bag_dir + "test")
else:
raise NotImplementedError
def cancer_collate_func(data):
x_list, label_list, probs_list, index_list, gt_list = [], [], [], [], []
for x, label, probs, index, gt in data:
x_list.append(x)
label_list.append(label)
probs_list.append(probs)
index_list.append(index)
gt_list.append(gt)
return x_list, torch.LongTensor(label_list), torch.FloatTensor(probs_list), torch.LongTensor(
index_list), torch.FloatTensor(gt_list)
if args.dataset == 'cancer':
batch_size = 16
# baseline_method_v2.BATCH_SIZE = 16
# baseline_trainable_v2.BATCH_SIZE = 16
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=True, collate_fn=cancer_collate_func, num_workers=4)
valloader = torch.utils.data.DataLoader(
valset, batch_size=batch_size, shuffle=False, collate_fn=cancer_collate_func, num_workers=4)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, collate_fn=cancer_collate_func, num_workers=4)
else:
batch_size = 256
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=True, num_workers=4)
valloader = torch.utils.data.DataLoader(
valset, batch_size=batch_size, shuffle=False, num_workers=4)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=4)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
if args.dataset == 'face':
net = torchvision.models.resnet18(num_classes=2)
elif args.dataset == 'cancer':
net = MILAttention(batch=True)
else:
net = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=2)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
# cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/' + args.model_name)
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.5, \
min_lr=1e-6, verbose=True)
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets, _, _, _) in tqdm(enumerate(trainloader)):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
# progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
print("Loss: %.3f" % (train_loss / (batch_idx + 1)))
def test(net, dataloader):
global best_acc
net.eval()
targets_probs = np.zeros(len(dataloader.dataset))
labels = np.zeros(len(dataloader.dataset))
indices = np.zeros(len(dataloader.dataset))
gt_labels = np.zeros(len(dataloader.dataset))
net.eval()
with torch.no_grad():
pointer = 0
for batch_idx, (inputs, label, _, ids, gt_label) in tqdm(enumerate(dataloader)):
if "WSI" in str(type(dataloader.dataset)):
idx = np.arange(pointer, pointer + len(ids))
pointer += len(ids)
else:
idx = ids
inputs = inputs.to(device)
outputs = net(inputs)
out_prob = F.softmax(outputs, dim=1)
targets_probs[idx] = out_prob[:, 1].cpu().numpy()
labels[idx] = label
gt_labels[idx] = gt_label
indices[idx] = ids
if "WSI" in str(type(dataloader.dataset)):
tile_level = {
"target_prob": targets_probs,
"labels": labels,
"gt": gt_labels,
"slide_id": indices
}
slide_level = dataloader.dataset._aggregate_tile(tile_level)
targets_probs, labels, indices, gt_labels = np.array(slide_level['target_prob']), \
np.array(slide_level['labels']), np.array(
slide_level['slide_id']), np.array(slide_level['gt'])
return targets_probs, labels
print(os.path.join(args.model_dir, args.model_name))
if args.methods == 'ce':
min_val_loss = 1e10
for epoch in range(start_epoch, start_epoch + 200):
train(epoch)
val_targets_probs, labels = test(net, valloader)
val_loss = log_loss(y_true=labels, y_pred=val_targets_probs)
state = {
'net': net.state_dict(),
'val_loss': val_loss,
'epoch': epoch,
}
if min_val_loss > val_loss:
print('Saving..')
print('val_loss: {:.3f}'.format(val_loss))
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
torch.save(state, os.path.join(args.model_dir, args.model_name))
min_val_loss = val_loss
torch.save(state, os.path.join(args.model_dir, "epoch_{}.pth".format(epoch)))
scheduler.step(val_loss)
elif args.methods == 'deepensemble':
M = 5
adversarial_epsilon = 0.01
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
baseline = BaselineEnsemble(net, M, optimizer, criterion, trainset, valset, adversarial_epsilon=adversarial_epsilon,
save_dir=os.path.join(args.model_dir, args.model_name), num_epoch=200)
baseline.fit()
elif args.methods == 'ours_bin':
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
# lr = 1e-3
model_path = args.early_stopping_ckpt
checkpoint = torch.load(model_path)['net']
net.load_state_dict(checkpoint)
optimizer = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
m_kwargs = {
"net": net, # early stopped network
"optimizer": optimizer, # optimizer for finetuning
"train_dataset": trainset, # train set
"val_dataset": valset, # validation set for finetuning stopping
"num_epoch": 100, # max number of epochs for finetuning
"n_bins": 5, # number of bins for updated probabilistic labels
"calpertrain": 2,
"finetune_type": "bin",
"save_dir": os.path.join(args.model_dir, args.model_name)
}
calibrate_model(CaPE, m_kwargs=m_kwargs, test_dataset=testset)
elif args.methods == 'ours_kd':
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
model_path = args.early_stopping_ckpt
checkpoint = torch.load(model_path)['net']
net.load_state_dict(checkpoint)
optimizer = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
m_kwargs = {
"net": net, # early stopped network
"optimizer": optimizer, # optimizer for finetuning
"train_dataset": trainset, # train set
"val_dataset": valset, # validation set for finetuning stopping
"num_epoch": 100, # max number of epochs for finetuning
"calpertrain": 3,
"finetune_type": "kde",
"sigma": 0.05,
"window": 100,
"save_dir": os.path.join(args.model_dir, args.model_name)
}
calibrate_model(CaPE, m_kwargs=m_kwargs, test_dataset=testset)
else:
if args.methods == "focal":
criterion = FocalLoss(alpha=None, gamma=2.0)
elif args.methods == "entropy":
criterion = EntropyRegularizedLoss(beta=1.0)
elif args.methods == "MMCE":
criterion = MMCELoss(beta=3.0)
else:
raise NotImplementedError
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
baseline = BaselineTrainable(net, optimizer, criterion, trainset, valset,
save_dir=os.path.join(args.model_dir, args.model_name), num_epoch=200)
baseline.fit()
| StarcoderdataPython |
9719744 | # Generated by Django 3.1.4 on 2021-02-24 19:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rthl_site', '0046_auto_20210224_2100'),
]
operations = [
migrations.AlterField(
model_name='match',
name='date',
field=models.DateTimeField(verbose_name='Date and time'),
),
migrations.AlterField(
model_name='match',
name='name',
field=models.CharField(max_length=50, verbose_name='Title'),
),
migrations.AlterField(
model_name='match',
name='place',
field=models.CharField(blank=True, max_length=50, verbose_name='Place'),
),
migrations.AlterField(
model_name='match',
name='team_A',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='match_teamA', to='rthl_site.team', verbose_name='team А'),
),
migrations.AlterField(
model_name='match',
name='team_B',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='match_teamB', to='rthl_site.team', verbose_name='team B'),
),
migrations.AlterField(
model_name='match',
name='tournament',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='match_tournament', to='rthl_site.tournament', verbose_name='In tournament'),
),
migrations.AlterUniqueTogether(
name='lineup',
unique_together={('match', 'team_side', 'team')},
),
]
| StarcoderdataPython |
327320 | import os
from tempfile import SpooledTemporaryFile
from storages.backends.s3boto3 import S3Boto3Storage
class CustomS3Boto3Storage(S3Boto3Storage):
"""
This is our custom version of S3Boto3Storage that fixes a bug in
boto3 where the passed in file is closed upon upload.
https://github.com/boto/boto3/issues/929
https://github.com/matthewwithanm/django-imagekit/issues/391
"""
def _save_content(self, obj, content, parameters):
"""
We create a clone of the content file as when this is passed to boto3 it wrongly closes
the file upon upload where as the storage backend expects it to still be open
"""
# Seek our content back to the start
content.seek(0, os.SEEK_SET)
# Create a temporary file that will write to disk after a specified size
content_autoclose = SpooledTemporaryFile()
# Write our original content into our copy that will be closed by boto3
content_autoclose.write(content.read())
# Upload the object which will auto close the content_autoclose instance
super(CustomS3Boto3Storage, self)._save_content(
obj, content_autoclose, parameters
)
# Cleanup if this is fixed upstream our duplicate should always close
if not content_autoclose.closed:
content_autoclose.close()
| StarcoderdataPython |
6703114 | <reponame>andela/ah-backend-dojo<gh_stars>1-10
from django.contrib import admin
from .models import Article, FavoriteArticle, ReadingStats
# Register your models here.
admin.site.register(Article)
admin.site.register(FavoriteArticle)
admin.site.register(ReadingStats)
| StarcoderdataPython |
1843100 | <reponame>4mYHime/tls-sig-api-python
import base64
import zlib
import json
import time
# python ecdsa 开发库请到 https://github.com/warner/python-ecdsa
# 或者 tls 技术支持分享的链接 http://share.weiyun.com/24b674bced4f84ecbbe6a7945738b9f4
# 下载,下载完毕之后进入其根目录,运行下面的命令进行安装,
# python setup.py install
# 下面是转换私钥格式命令
# openssl ec -outform PEM -inform PEM -in private.pem -out private_ec.pem
# -in 后面的传入下载的私钥 -out 后面是转换后的私钥文件
from ecdsa import SigningKey,util
import hashlib
# 这里请填写应用自己的私钥
ecdsa_pri_key = """
your_private_key
"""
def base64_encode_url(data):
base64_data = base64.b64encode(data)
# type(base64_data) ->bytes
base64_data = bytes.decode(base64_data).replace('+', '*')
base64_data = base64_data.replace('/', '-')
base64_data = base64_data.replace('=', '_')
return base64_data
def base64_decode_url(base64_data):
base64_data = base64_data.replace('*', '+')
base64_data = base64_data.replace('-', '/')
base64_data = base64_data.replace('_', '=')
raw_data = base64.b64decode(base64_data)
return raw_data
class TLSSigAPI:
""""""
__acctype = 0
__identifier = ""
__appid3rd = ""
__sdkappid = 0
__version = 20190114
__expire = 3600*24*30 # 默认一个月,需要调整请自行修改
__pri_key = ""
__pub_key = ""
_err_msg = "ok"
def __get_pri_key(self):
return self.__pri_key_loaded
def __init__(self, sdkappid, pri_key):
self.__sdkappid = sdkappid
self.__pri_key = pri_key
self.__pri_key_loaded = SigningKey.from_pem(self.__pri_key)
def __create_dict(self):
m = {}
m["TLS.account_type"] = "%d" % self.__acctype
m["TLS.identifier"] = "%s" % self.__identifier
m["TLS.appid_at_3rd"] = "%s" % self.__appid3rd
m["TLS.sdk_appid"] = "%d" % self.__sdkappid
m["TLS.expire_after"] = "%d" % self.__expire
m["TLS.version"] = "%d" % self.__version
m["TLS.time"] = "%d" % time.time()
return m
def __encode_to_fix_str(self, m):
fix_str = "TLS.appid_at_3rd:" + m["TLS.appid_at_3rd"] + "\n" \
+ "TLS.account_type:" + m["TLS.account_type"] + "\n" \
+ "TLS.identifier:" + m["TLS.identifier"] + "\n" \
+ "TLS.sdk_appid:" + m["TLS.sdk_appid"] + "\n" \
+ "TLS.time:" + m["TLS.time"] + "\n" \
+ "TLS.expire_after:" + m["TLS.expire_after"] + "\n"
return fix_str
def tls_gen_sig(self, identifier):
self.__identifier = identifier
m = self.__create_dict()
fix_str = self.__encode_to_fix_str(m)
pk_loaded = self.__get_pri_key()
sig_field = pk_loaded.sign(fix_str.encode(), hashfunc=hashlib.sha256, sigencode=util.sigencode_der)
sig_field_base64 = base64.b64encode(sig_field)
s2 = bytes.decode(sig_field_base64)
m["TLS.sig"] = s2
json_str = json.dumps(m)
# type(json_str) -> str
sig_cmpressed = zlib.compress(json_str.encode()) # json_str bytes-like -> bytes
# type(sig_cmpressed) ->bytes
base64_sig = base64_encode_url(sig_cmpressed) # sig_cmpressed bytes-like -> bytes
return base64_sig
def main():
api = TLSSigAPI(1400001052, ecdsa_pri_key)
sig = api.tls_gen_sig("xiaojun")
print sig
if __name__ == "__main__":
main()
| StarcoderdataPython |
11228165 | <reponame>Tahmid04/ExplainaBoard
import sys
import argparse
import numpy
import sys
# sys.path.append("./src")
# from src.utils import *
# from src.errorAnalysis import *
from ..src.errorAnalysis import *
def get_chunk_type(tok):
"""
Args:
tok: id of token, ex 4
idx_to_tag: dictionary {4: "B-PER", ...}
Returns:
tuple: "B", "PER"
"""
# tag_name = idx_to_tag[tok]
tag_class = tok.split('-')[0]
tag_type = tok.split('-')[-1]
return tag_class, tag_type
def get_chunks(seq):
"""
tags:dic{'per':1,....}
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = 'O'
# idx_to_tag = {idx: tag for tag, idx in tags.items()}
chunks = []
# chunk_type, chunk_start = None, None
chunk_current = 0
#print(seq)
# BMES -> BIEO
w_start = 0
chunk = None
tag = ""
for i, tok in enumerate(seq):
tag += tok
if tok == "S":
chunk = ("S",i, i+1)
chunks.append(chunk)
tag = ""
if tok == "B":
w_start = i
if tok == "E":
chunk = (tag, w_start, i+1)
chunks.append(chunk)
tag=""
# for i, tok in enumerate(seq):
# if tok == "M":
# tok = "I"
# if tok == "S":
# tok = "B"
#
# #End of a chunk 1
# if tok == default and chunk_type is not None:
# # Add a chunk.
# chunk = (chunk_type, chunk_start, i)
# chunks.append(chunk)
# chunk_type, chunk_start = None, None
#
# # End of a chunk + start of a chunk!
# elif tok != default:
# tok_chunk_class, tok_chunk_type = get_chunk_type(tok)
# if chunk_type is None:
# chunk_type, chunk_start = tok_chunk_type, i
# elif tok_chunk_type != chunk_type or tok_chunk_class == "B":
# chunk = (chunk_type, chunk_start, i)
# chunks.append(chunk)
# chunk_type, chunk_start = tok_chunk_type, i
# else:
# pass
# # end condition
# if chunk_type is not None:
# chunk = (chunk_type, chunk_start, len(seq))
# chunks.append(chunk)
return chunks
def read_data(corpus_type, fn, column_no=-1, delimiter =' '):
print('corpus_type',corpus_type)
word_sequences = list()
tag_sequences = list()
total_word_sequences = list()
total_tag_sequences = list()
with codecs.open(fn, 'r', 'utf-8') as f:
lines = f.readlines()
curr_words = list()
curr_tags = list()
for k in range(len(lines)):
line = lines[k].strip()
if len(line) == 0 or line.startswith('-DOCSTART-'): # new sentence or new document
if len(curr_words) > 0:
word_sequences.append(curr_words)
tag_sequences.append(curr_tags)
curr_words = list()
curr_tags = list()
continue
strings = line.split(delimiter)
word = strings[0].strip()
tag = strings[column_no].strip() # be default, we take the last tag
#tag='B-'+tag
tag = tag + "-W"
curr_words.append(word)
curr_tags.append(tag)
total_word_sequences.append(word)
total_tag_sequences.append(tag)
if k == len(lines) - 1:
word_sequences.append(curr_words)
tag_sequences.append(curr_tags)
# if verbose:
# print('Loading from %s: %d samples, %d words.' % (fn, len(word_sequences), get_words_num(word_sequences)))
# return word_sequences, tag_sequences
return total_word_sequences, total_tag_sequences, word_sequences, tag_sequences
# getAspectValue(test_word_sequences, test_trueTag_sequences, test_word_sequences_sent, dict_precomputed_path)
def getAspectValue(test_word_sequences, test_trueTag_sequences, test_word_sequences_sent,
test_trueTag_sequences_sent, dict_preComputed_path, dict_aspect_func):
def getSententialValue(test_trueTag_sequences_sent, test_word_sequences_sent):
eDen = []
sentLen = []
for i, test_sent in enumerate(test_trueTag_sequences_sent):
pred_chunks = set(get_chunks(test_sent))
num_entityToken = 0
for pred_chunk in pred_chunks:
idx_start = pred_chunk[1]
idx_end = pred_chunk[2]
num_entityToken += idx_end - idx_start
# introduce the entity token density in sentence ...
eDen.append(float(num_entityToken) / len(test_sent))
# introduce the sentence length in sentence ...
sentLen.append(len(test_sent))
return eDen, sentLen
dict_preComputed_model = {}
for aspect, path in dict_preComputed_path.items():
print("path:\t"+path)
if os.path.exists(path):
print('load the hard dictionary of entity span in test set...')
fread = open(path, 'rb')
dict_preComputed_model[aspect] = pickle.load(fread)
else:
raise ValueError("can not load hard dictionary" + aspect + "\t" + path)
dict_span2aspectVal = {}
for aspect, fun in dict_aspect_func.items():
dict_span2aspectVal[aspect] = {}
eDen_list, sentLen_list = [], []
eDen_list, sentLen_list = getSententialValue(test_trueTag_sequences_sent,
test_word_sequences_sent)
dict_pos2sid = getPos2SentId(test_word_sequences_sent)
dict_ap2rp = getTokenPosition(test_word_sequences_sent)
all_chunks = get_chunks(test_trueTag_sequences)
dict_span2sid = {}
dict_chunkid2span = {}
for span_info in all_chunks:
#print(span_info)
#span_type = span_info[0].lower()
#print(span_type)
idx_start = span_info[1]
idx_end = span_info[2]
span_cnt = ''.join(test_word_sequences[idx_start:idx_end]).lower()
#print(span_cnt.encode("utf-8").decode("utf-8"))
span_cnt = span_cnt.encode("gbk","ignore").decode("gbk","ignore")
#print(sys.getdefaultencoding())
span_type = ''.join(test_trueTag_sequences[idx_start:idx_end])
span_pos = str(idx_start) + "|||" + str(idx_end) + "|||" + span_type
if len(span_type) !=(idx_end - idx_start):
print(idx_start, idx_end)
print(span_info)
print(span_type + "\t" + span_cnt)
print("--------------")
#print(span_pos)
# print(span_info)
# print(span_cnt)
span_length = idx_end - idx_start
# span_token_list = test_word_sequences[idx_start:idx_end]
# span_token_pos_list = [str(pos) + "|||" + span_type for pos in range(idx_start, idx_end)]
#print(span_token_pos_list)
span_sentid = dict_pos2sid[idx_start]
sLen = float(sentLen_list[span_sentid])
dict_span2sid[span_pos] = span_sentid
text_sample = "".join(test_word_sequences_sent[span_sentid])
text_sample = text_sample
dict_chunkid2span[span_pos] = span_cnt + "|||" + text_sample
# Sentence Length: sLen
aspect = "sLen"
if aspect in dict_aspect_func.keys():
dict_span2aspectVal[aspect][span_pos] = sLen
# Entity Length: eLen
aspect = "eLen"
if aspect in dict_aspect_func.keys():
dict_span2aspectVal[aspect][span_pos] = float(span_length)
# Tag: tag
aspect = "tag"
if aspect in dict_aspect_func.keys():
dict_span2aspectVal[aspect][span_pos] = span_type
#print(dict_span2aspectVal)
return dict_span2aspectVal, dict_span2sid, dict_chunkid2span
# def tuple2str(triplet):
# res = ""
# for v in triplet:
# res += str(v) + "_"
# return res.rstrip("_")
def evaluate(task_type = "ner", analysis_type = "single", systems = [], output = "./output.json", is_print_ci = False, is_print_case = False, is_print_ece = False):
path_text = ""
if analysis_type == "single":
path_text = systems[0]
corpus_type = "dataset_name"
model_name = "model_name"
path_preComputed = ""
path_aspect_conf = "./interpret_eval/tasks/cws/conf.aspects"
path_json_input = "./interpret_eval/tasks/cws/template.json"
fn_write_json = output
# Initalization
dict_aspect_func = loadConf(path_aspect_conf)
metric_names = list(dict_aspect_func.keys())
print("dict_aspect_func: ", dict_aspect_func)
print(dict_aspect_func)
fwrite_json = open(fn_write_json, 'w')
# get preComputed paths from conf file
dict_preComputed_path = {}
for aspect, func in dict_aspect_func.items():
is_preComputed = func[2].lower()
if is_preComputed == "yes":
dict_preComputed_path[aspect] = path_preComputed + "_" + aspect + ".pkl"
print("PreComputed directory:\t", dict_preComputed_path[aspect])
list_text_sent, list_text_token = read_single_column(path_text, 0)
list_true_tags_sent, list_true_tags_token = read_single_column(path_text, 1)
list_pred_tags_sent, list_pred_tags_token = read_single_column(path_text, 2)
dict_span2aspectVal, dict_span2sid, dict_chunkid2span = getAspectValue(list_text_token, list_true_tags_token, list_text_sent, list_true_tags_sent, dict_preComputed_path, dict_aspect_func)
dict_span2aspectVal_pred, dict_span2sid_pred, dict_chunkid2span_pred = getAspectValue(list_text_token, list_pred_tags_token, list_text_sent, list_pred_tags_sent, dict_preComputed_path, dict_aspect_func)
holistic_performance = f1(list_true_tags_sent, list_pred_tags_sent)["f1"]
confidence_low_overall, confidence_up_overall = 0,0
if is_print_ci:
confidence_low_overall, confidence_up_overall = compute_confidence_interval_f1_cws(dict_span2sid.keys(), dict_span2sid_pred.keys(), dict_span2sid, dict_span2sid_pred, n_times=10)
print("confidence_low_overall:\t", confidence_low_overall)
print("confidence_up_overall:\t", confidence_up_overall)
print("------------------ Holistic Result")
print(holistic_performance)
# print(f1(list_true_tags_token, list_pred_tags_token)["f1"])
def __selectBucktingFunc(func_name, func_setting, dict_obj):
if func_name == "bucketAttribute_SpecifiedBucketInterval":
return eval(func_name)(dict_obj, eval(func_setting))
elif func_name == "bucketAttribute_SpecifiedBucketValue":
if len(func_setting.split("\t")) != 2:
raise ValueError("selectBucktingFunc Error!")
n_buckets, specified_bucket_value_list = int(func_setting.split("\t")[0]), eval(func_setting.split("\t")[1])
return eval(func_name)(dict_obj, n_buckets, specified_bucket_value_list)
elif func_name == "bucketAttribute_DiscreteValue": # now the discrete value is R-tag..
if len(func_setting.split("\t")) != 2:
raise ValueError("selectBucktingFunc Error!")
tags_list = list(set(dict_obj.values()))
topK_buckets, min_buckets = int(func_setting.split("\t")[0]), int(func_setting.split("\t")[1])
# return eval(func_name)(dict_obj, min_buckets, topK_buckets)
return eval(func_name)(dict_obj, topK_buckets, min_buckets)
dict_bucket2span = {}
dict_bucket2span_pred = {}
dict_bucket2f1 = {}
aspect_names = []
errorCase_list = []
for aspect, func in dict_aspect_func.items():
# print(aspect, dict_span2aspectVal[aspect])
dict_bucket2span[aspect] = __selectBucktingFunc(func[0], func[1], dict_span2aspectVal[aspect])
# print(aspect, dict_bucket2span[aspect])
# exit()
dict_bucket2span_pred[aspect] = bucketAttribute_SpecifiedBucketInterval(dict_span2aspectVal_pred[aspect],
dict_bucket2span[aspect].keys())
dict_bucket2f1[aspect], errorCase_list = getBucketF1_cws(dict_bucket2span[aspect], dict_bucket2span_pred[aspect], dict_span2sid, dict_span2sid_pred, dict_chunkid2span, dict_chunkid2span_pred, list_true_tags_token, list_pred_tags_token, is_print_ci, is_print_case)
aspect_names.append(aspect)
print("aspect_names: ", aspect_names)
# for v in errorCase_list:
# print(v)
print("------------------ Breakdown Performance")
for aspect in dict_aspect_func.keys():
printDict(dict_bucket2f1[aspect], aspect)
print("")
# Calculate databias w.r.t numeric attributes
dict_aspect2bias={}
for aspect, aspect2Val in dict_span2aspectVal.items():
if type(list(aspect2Val.values())[0]) != type("string"):
dict_aspect2bias[aspect] = numpy.average(list(aspect2Val.values()))
print("------------------ Dataset Bias")
for k,v in dict_aspect2bias.items():
print(k+":\t"+str(v))
print("")
def beautifyInterval(interval):
if type(interval[0]) == type("string"): ### pay attention to it
return interval[0]
else:
if len(interval) == 1:
bk_name = '(' + format(float(interval[0]), '.3g') + ',)'
return bk_name
else:
range1_r = '(' + format(float(interval[0]), '.3g') + ','
range1_l = format(float(interval[1]), '.3g') + ')'
bk_name = range1_r + range1_l
return bk_name
dict_fineGrained = {}
for aspect, metadata in dict_bucket2f1.items():
dict_fineGrained[aspect] = []
for bucket_name, v in metadata.items():
# print("---------debug--bucket name old---")
# print(bucket_name)
bucket_name = beautifyInterval(bucket_name)
# print("---------debug--bucket name new---")
# print(bucket_name)
#bucket_value = format(v[0]*100,'.4g')
bucket_value = format(float(v[0])*100, '.4g')
n_sample = v[1]
confidence_low = format(float(v[2])*100, '.4g')
confidence_up = format(float(v[3])*100, '.4g')
error_entity_list = v[4]
# instantiation
dict_fineGrained[aspect].append({"bucket_name":bucket_name, "bucket_value":bucket_value, "num":n_sample, "confidence_low":confidence_low, "confidence_up":confidence_up, "bucket_error_case":error_entity_list[0:int(len(error_entity_list)/10)]})
obj_json = load_json(path_json_input)
obj_json["task"] = task_type
obj_json["data"]["name"] = corpus_type
obj_json["data"]["language"] = "Chinese"
obj_json["data"]["bias"] = dict_aspect2bias
obj_json["model"]["name"] = model_name
obj_json["model"]["results"]["overall"]["performance"] = holistic_performance
obj_json["model"]["results"]["overall"]["confidence_low"] = confidence_low_overall
obj_json["model"]["results"]["overall"]["confidence_up"] = confidence_up_overall
obj_json["model"]["results"]["fine_grained"] = dict_fineGrained
# Save error cases: overall
obj_json["model"]["results"]["overall"]["error_case"] = errorCase_list[0:int(len(errorCase_list)/10)]
save_json(obj_json, fn_write_json)
| StarcoderdataPython |
6619048 | <filename>mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/mpesa_settings.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from json import loads, dumps
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.utils import call_hook_method, fmt_money
from frappe.integrations.utils import create_request_log, create_payment_gateway
from frappe.utils import get_request_site_address
from erpnext.erpnext_integrations.utils import create_mode_of_payment
from erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_connector import MpesaConnector
from erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_custom_fields import create_custom_pos_fields
class MpesaSettings(Document):
supported_currencies = ["KES"]
def validate_transaction_currency(self, currency):
if currency not in self.supported_currencies:
frappe.throw(_("Please select another payment method. Mpesa does not support transactions in currency '{0}'").format(currency))
def on_update(self):
create_custom_pos_fields()
create_payment_gateway('Mpesa-' + self.payment_gateway_name, settings='Mpesa Settings', controller=self.payment_gateway_name)
call_hook_method('payment_gateway_enabled', gateway='Mpesa-' + self.payment_gateway_name, payment_channel="Phone")
# required to fetch the bank account details from the payment gateway account
frappe.db.commit()
create_mode_of_payment('Mpesa-' + self.payment_gateway_name, payment_type="Phone")
def request_for_payment(self, **kwargs):
args = frappe._dict(kwargs)
request_amounts = self.split_request_amount_according_to_transaction_limit(args)
for i, amount in enumerate(request_amounts):
args.request_amount = amount
if frappe.flags.in_test:
from erpnext.erpnext_integrations.doctype.mpesa_settings.test_mpesa_settings import get_payment_request_response_payload
response = frappe._dict(get_payment_request_response_payload(amount))
else:
response = frappe._dict(generate_stk_push(**args))
self.handle_api_response("CheckoutRequestID", args, response)
def split_request_amount_according_to_transaction_limit(self, args):
request_amount = args.request_amount
if request_amount > self.transaction_limit:
# make multiple requests
request_amounts = []
requests_to_be_made = frappe.utils.ceil(request_amount / self.transaction_limit) # 480/150 = ceil(3.2) = 4
for i in range(requests_to_be_made):
amount = self.transaction_limit
if i == requests_to_be_made - 1:
amount = request_amount - (self.transaction_limit * i) # for 4th request, 480 - (150 * 3) = 30
request_amounts.append(amount)
else:
request_amounts = [request_amount]
return request_amounts
def get_account_balance_info(self):
payload = dict(
reference_doctype="Mpesa Settings",
reference_docname=self.name,
doc_details=vars(self)
)
if frappe.flags.in_test:
from erpnext.erpnext_integrations.doctype.mpesa_settings.test_mpesa_settings import get_test_account_balance_response
response = frappe._dict(get_test_account_balance_response())
else:
response = frappe._dict(get_account_balance(payload))
self.handle_api_response("ConversationID", payload, response)
def handle_api_response(self, global_id, request_dict, response):
"""Response received from API calls returns a global identifier for each transaction, this code is returned during the callback."""
# check error response
if getattr(response, "requestId"):
req_name = getattr(response, "requestId")
error = response
else:
# global checkout id used as request name
req_name = getattr(response, global_id)
error = None
if not frappe.db.exists('Integration Request', req_name):
create_request_log(request_dict, "Host", "Mpesa", req_name, error)
if error:
frappe.throw(_(getattr(response, "errorMessage")), title=_("Transaction Error"))
def generate_stk_push(**kwargs):
"""Generate stk push by making a API call to the stk push API."""
args = frappe._dict(kwargs)
try:
callback_url = get_request_site_address(True) + "/api/method/erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_settings.verify_transaction"
mpesa_settings = frappe.get_doc("Mpesa Settings", args.payment_gateway[6:])
env = "production" if not mpesa_settings.sandbox else "sandbox"
# for sandbox, business shortcode is same as till number
business_shortcode = mpesa_settings.business_shortcode if env == "production" else mpesa_settings.till_number
connector = MpesaConnector(env=env,
app_key=mpesa_settings.consumer_key,
app_secret=mpesa_settings.get_password("consumer_secret"))
mobile_number = sanitize_mobile_number(args.sender)
response = connector.stk_push(
business_shortcode=business_shortcode, amount=args.request_amount,
passcode=mpesa_settings.get_password("<PASSWORD>"),
callback_url=callback_url, reference_code=mpesa_settings.till_number,
phone_number=mobile_number, description="POS Payment"
)
return response
except Exception:
frappe.log_error(title=_("Mpesa Express Transaction Error"))
frappe.throw(_("Issue detected with Mpesa configuration, check the error logs for more details"), title=_("Mpesa Express Error"))
def sanitize_mobile_number(number):
"""Add country code and strip leading zeroes from the phone number."""
return "254" + str(number).lstrip("0")
@frappe.whitelist(allow_guest=True)
def verify_transaction(**kwargs):
"""Verify the transaction result received via callback from stk."""
transaction_response = frappe._dict(kwargs["Body"]["stkCallback"])
checkout_id = getattr(transaction_response, "CheckoutRequestID", "")
integration_request = frappe.get_doc("Integration Request", checkout_id)
transaction_data = frappe._dict(loads(integration_request.data))
total_paid = 0 # for multiple integration request made against a pos invoice
success = False # for reporting successfull callback to point of sale ui
if transaction_response['ResultCode'] == 0:
if integration_request.reference_doctype and integration_request.reference_docname:
try:
item_response = transaction_response["CallbackMetadata"]["Item"]
amount = fetch_param_value(item_response, "Amount", "Name")
mpesa_receipt = fetch_param_value(item_response, "MpesaReceiptNumber", "Name")
pr = frappe.get_doc(integration_request.reference_doctype, integration_request.reference_docname)
mpesa_receipts, completed_payments = get_completed_integration_requests_info(
integration_request.reference_doctype,
integration_request.reference_docname,
checkout_id
)
total_paid = amount + sum(completed_payments)
mpesa_receipts = ', '.join(mpesa_receipts + [mpesa_receipt])
if total_paid >= pr.grand_total:
pr.run_method("on_payment_authorized", 'Completed')
success = True
frappe.db.set_value("POS Invoice", pr.reference_name, "mpesa_receipt_number", mpesa_receipts)
integration_request.handle_success(transaction_response)
except Exception:
integration_request.handle_failure(transaction_response)
frappe.log_error(frappe.get_traceback())
else:
integration_request.handle_failure(transaction_response)
frappe.publish_realtime(
event='process_phone_payment',
doctype="POS Invoice",
docname=transaction_data.payment_reference,
user=integration_request.owner,
message={
'amount': total_paid,
'success': success,
'failure_message': transaction_response["ResultDesc"] if transaction_response['ResultCode'] != 0 else ''
},
)
def get_completed_integration_requests_info(reference_doctype, reference_docname, checkout_id):
output_of_other_completed_requests = frappe.get_all("Integration Request", filters={
'name': ['!=', checkout_id],
'reference_doctype': reference_doctype,
'reference_docname': reference_docname,
'status': 'Completed'
}, pluck="output")
mpesa_receipts, completed_payments = [], []
for out in output_of_other_completed_requests:
out = frappe._dict(loads(out))
item_response = out["CallbackMetadata"]["Item"]
completed_amount = fetch_param_value(item_response, "Amount", "Name")
completed_mpesa_receipt = fetch_param_value(item_response, "MpesaReceiptNumber", "Name")
completed_payments.append(completed_amount)
mpesa_receipts.append(completed_mpesa_receipt)
return mpesa_receipts, completed_payments
def get_account_balance(request_payload):
"""Call account balance API to send the request to the Mpesa Servers."""
try:
mpesa_settings = frappe.get_doc("Mpesa Settings", request_payload.get("reference_docname"))
env = "production" if not mpesa_settings.sandbox else "sandbox"
connector = MpesaConnector(env=env,
app_key=mpesa_settings.consumer_key,
app_secret=mpesa_settings.get_password("consumer_secret"))
callback_url = get_request_site_address(True) + "/api/method/erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_settings.process_balance_info"
response = connector.get_balance(mpesa_settings.initiator_name, mpesa_settings.security_credential, mpesa_settings.till_number, 4, mpesa_settings.name, callback_url, callback_url)
return response
except Exception:
frappe.log_error(title=_("Account Balance Processing Error"))
frappe.throw(_("Please check your configuration and try again"), title=_("Error"))
@frappe.whitelist(allow_guest=True)
def process_balance_info(**kwargs):
"""Process and store account balance information received via callback from the account balance API call."""
account_balance_response = frappe._dict(kwargs["Result"])
conversation_id = getattr(account_balance_response, "ConversationID", "")
request = frappe.get_doc("Integration Request", conversation_id)
if request.status == "Completed":
return
transaction_data = frappe._dict(loads(request.data))
if account_balance_response["ResultCode"] == 0:
try:
result_params = account_balance_response["ResultParameters"]["ResultParameter"]
balance_info = fetch_param_value(result_params, "AccountBalance", "Key")
balance_info = format_string_to_json(balance_info)
ref_doc = frappe.get_doc(transaction_data.reference_doctype, transaction_data.reference_docname)
ref_doc.db_set("account_balance", balance_info)
request.handle_success(account_balance_response)
frappe.publish_realtime("refresh_mpesa_dashboard", doctype="Mpesa Settings",
docname=transaction_data.reference_docname, user=transaction_data.owner)
except Exception:
request.handle_failure(account_balance_response)
frappe.log_error(title=_("Mpesa Account Balance Processing Error"), message=account_balance_response)
else:
request.handle_failure(account_balance_response)
def format_string_to_json(balance_info):
"""
Format string to json.
e.g: '''Working Account|KES|481000.00|481000.00|0.00|0.00'''
=> {'Working Account': {'current_balance': '481000.00',
'available_balance': '481000.00',
'reserved_balance': '0.00',
'uncleared_balance': '0.00'}}
"""
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split('|')
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
available_balance=fmt_money(account_info[3], currency="KES"),
reserved_balance=fmt_money(account_info[4], currency="KES"),
uncleared_balance=fmt_money(account_info[5], currency="KES")
)
return dumps(balance_dict)
def fetch_param_value(response, key, key_field):
"""Fetch the specified key from list of dictionary. Key is identified via the key field."""
for param in response:
if param[key_field] == key:
return param["Value"] | StarcoderdataPython |
12801525 | <gh_stars>1-10
from bot_manager import TelegramBot
bot_mgr = TelegramBot()
if bot_mgr.is_initialized:
bot_mgr.run()
| StarcoderdataPython |
8060086 | <gh_stars>1-10
from .S2Location import S2Location
from .terms import SBOL2
class S2Range(S2Location):
def __init__(self, g, uri):
super(S2Range, self).__init__(g, uri)
@property
def start(self):
return self.get_integer_property(SBOL2.start)
@property
def end(self):
return self.get_integer_property(SBOL2.end)
@property
def orientation(self):
return self.get_uri_property(SBOL2.orientation)
| StarcoderdataPython |
11324892 | <reponame>biologioholic/sktime<filename>sktime/transformations/panel/signature_based/tests/test_method.py
# -*- coding: utf-8 -*-
"""Tests for signature method."""
import numpy as np
import pytest
from sktime.transformations.panel.signature_based import SignatureTransformer
def test_generalised_signature_method():
"""Check that dimension and dim of output are correct."""
# Build an array X, note that this is [n_sample, n_channels, length] shape.
import esig
n_channels = 3
depth = 4
X = np.random.randn(5, n_channels, 10)
# Check the global dimension comes out correctly
method = SignatureTransformer(depth=depth, window_name="global")
assert method.fit_transform(X).shape[1] == esig.sigdim(n_channels + 1, depth) - 1
# Check dyadic dim
method = SignatureTransformer(depth=depth, window_name="dyadic", window_depth=3)
assert (
method.fit_transform(X).shape[1]
== (esig.sigdim(n_channels + 1, depth) - 1) * 15
)
# Ensure an example
X = np.array([[0, 1], [2, 3], [1, 1]]).reshape(-1, 2, 3)
method = SignatureTransformer(depth=2, window_name="global")
true_arr = np.array(
[[1.0, 2.0, 1.0, 0.5, 1.33333333, -0.5, 0.66666667, 2.0, -1.0, 1.5, 3.0, 0.5]]
)
assert np.allclose(method.fit_transform(X), true_arr)
def test_window_error():
"""Test that wrong window parameters raise error."""
X = np.random.randn(5, 2, 3)
# Check dyadic gives a value error
method = SignatureTransformer(window_name="dyadic", window_depth=10)
with pytest.raises(ValueError):
method.fit_transform(X)
# Expanding and sliding errors
method = SignatureTransformer(
window_name="expanding", window_length=10, window_step=5
)
with pytest.raises(ValueError):
method.fit_transform(X)
method = SignatureTransformer(
window_name="sliding", window_length=10, window_step=5
)
with pytest.raises(ValueError):
method.fit_transform(X)
| StarcoderdataPython |
9647070 | import logging
import os
from abc import ABCMeta
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_random_state
from pycsca.utils import print_dictionary
from .constants import LABEL_COL, MISSING_CCS_FIN
from .utils import str2bool
sns.set(color_codes=True)
plt.style.use('default')
class CSVReader(metaclass=ABCMeta):
def __init__(self, folder: str, preprocessing='replace', **kwargs):
self.logger = logging.getLogger(CSVReader.__name__)
self.dataset_folder = folder
self.f_file = os.path.join(self.dataset_folder, "Feature Names.csv")
self.df_file = os.path.join(self.dataset_folder, "Features.csv")
self.preprocessing = preprocessing
self.ccs_fin_array = [False]
self.correct_class = "Correctly Formatted Pkcs#1 Pms Message"
self.__load_dataset__()
def __load_dataset__(self):
if not os.path.exists(self.df_file):
raise ValueError("No such file or directory: {}".format(self.df_file))
self.data_frame = pd.read_csv(self.df_file, index_col=0)
if LABEL_COL not in self.data_frame.columns:
error_string = 'Dataframe does not contain label columns'
if self.data_frame.shape[0] == 0:
raise ValueError('Dataframe is empty and {}'.format(error_string))
else:
df = pd.DataFrame.copy(self.data_frame)
df[LABEL_COL] = df[LABEL_COL].apply(lambda x: ' '.join(x.split('_')).title())
if self.correct_class not in df[LABEL_COL].unique():
raise ValueError('Dataframe is does not contain correct class {}'.format(self.correct_class))
self.data_frame[LABEL_COL] = self.data_frame[LABEL_COL].apply(lambda x: ' '.join(x.split('_')).title())
labels = list(self.data_frame[LABEL_COL].unique())
labels.sort()
labels.remove(self.correct_class)
label_encoder = LabelEncoder()
label_encoder.fit_transform(labels)
self.label_mapping = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_) + 1))
self.label_mapping = {**{self.correct_class: 0}, **self.label_mapping}
self.inverse_label_mapping = dict((v, k) for k, v in self.label_mapping.items())
self.n_labels = len(self.label_mapping)
self.data_raw = pd.DataFrame.copy(self.data_frame)
self.data_frame[LABEL_COL].replace(self.label_mapping, inplace=True)
self.logger.info("Label Mapping {}".format(print_dictionary(self.label_mapping)))
self.logger.info("Inverse Label Mapping {}".format(print_dictionary(self.inverse_label_mapping)))
if self.preprocessing == 'replace':
self.data_frame = self.data_frame.fillna(value=-1)
elif self.preprocessing == 'remove':
cols = [c for c in self.data_frame.columns if 'msg1' not in c or 'msg5' not in c]
self.data_frame = self.data_frame[cols]
self.data_frame = self.data_frame.fillna(value=-1)
self.features = pd.read_csv(self.f_file, index_col=0)
self.feature_names = self.features['machine'].values.flatten()
if MISSING_CCS_FIN in self.data_frame.columns:
self.data_frame[MISSING_CCS_FIN] = self.data_frame[MISSING_CCS_FIN].apply(str2bool)
self.ccs_fin_array = list(self.data_frame[MISSING_CCS_FIN].unique())
df = pd.DataFrame.copy(self.data_frame)
df[LABEL_COL].replace(self.inverse_label_mapping, inplace=True)
df = pd.DataFrame.copy(self.data_frame)
df[LABEL_COL].replace(self.inverse_label_mapping, inplace=True)
df = pd.DataFrame(df[[LABEL_COL, MISSING_CCS_FIN]].value_counts().sort_index())
df.reset_index(inplace=True)
df.rename({0: 'Frequency'}, inplace=True, axis='columns')
df.sort_values(by=[MISSING_CCS_FIN, LABEL_COL], inplace=True)
f_vals = df.loc[df[LABEL_COL] == self.correct_class][[MISSING_CCS_FIN, 'Frequency']].values
vals = dict(zip(f_vals[:, 0], f_vals[:, 1]))
def div(row, val):
return row['Frequency'] / val
df['ratio_1_0'] = df.apply(
lambda row: div(row, vals[True]) if str2bool(row.missing_ccs_fin) else div(row, vals[False]), axis=1)
fname = os.path.join(self.dataset_folder, "label_frequency.csv")
df.to_csv(fname)
def plot_class_distribution(self):
fig_param = {'facecolor': 'w', 'edgecolor': 'w', 'transparent': False, 'dpi': 800, 'bbox_inches': 'tight',
'pad_inches': 0.05}
dfs = []
data_frame = pd.DataFrame.copy(self.data_frame)
if MISSING_CCS_FIN in data_frame.columns:
for val in self.ccs_fin_array:
df = data_frame[data_frame[MISSING_CCS_FIN] == val]
dfs.append((df, val))
else:
# df = pd.DataFrame.copy(data_frame)
dfs.append((data_frame, 'NA'))
n_r = len(dfs)
fig, axs = plt.subplots(nrows=n_r, ncols=1, figsize=(5, 3 * n_r + 2), frameon=True, edgecolor='k',
facecolor='white')
title = ''
for (df, val), ax in zip(dfs, axs):
df[LABEL_COL].replace(self.inverse_label_mapping, inplace=True)
if MISSING_CCS_FIN in data_frame.columns:
title = ' Missing-CCS-FIN ' + str(val)
d = dict(df[LABEL_COL].value_counts())
ax.barh(list(d.keys()), list(d.values()), color="r", align="center")
ax.set_yticks(range(len(d)))
ax.set_yticklabels(list(d.keys()))
ax.set_title(title, y=0.95, fontsize=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# ax = df[LABEL_COL].value_counts().plot(kind='barh', title=title)
ax.set_xlabel("Label Frequency")
fname = os.path.join(self.dataset_folder, "plot_label_frequency.png")
fig_param['fname'] = fname
fig.savefig(**fig_param)
def get_data_class_label(self, class_label=1, missing_ccs_fin=False):
if MISSING_CCS_FIN in self.data_frame.columns:
df = self.data_frame[self.data_frame[MISSING_CCS_FIN] == missing_ccs_fin]
else:
df = self.data_frame
if class_label == 0:
x, y = self.get_data(df)
else:
p = [0, class_label]
df = df[df.label.isin(p)]
df[LABEL_COL].replace([class_label], 1, inplace=True)
x, y = df[self.feature_names].values, df[LABEL_COL].values.flatten()
return x, y
def get_data(self, df):
x, y = df[self.feature_names].values, df[LABEL_COL].values.flatten()
return x, y
| StarcoderdataPython |
3441859 | <reponame>RandomKiddo/ProjectEuler
data = open('daysixdata.txt').read().split('\n\n')
total = 0
for sheet in data:
answers = sheet.split('\n')
people = []
for answer in answers:
person = []
for _ in answer:
person.append(_)
people.append(person)
similarity = people[0]
for _ in range(1, len(people)):
similarity = list(set(similarity) & set(people[_]))
total += len(similarity)
print(total, len(similarity))
print(total)
| StarcoderdataPython |
5031346 | <gh_stars>10-100
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.access import access_pb2 as yandex_dot_cloud_dot_access_dot_access__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
from yandex.cloud.resourcemanager.v1 import cloud_pb2 as yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__pb2
from yandex.cloud.resourcemanager.v1 import cloud_service_pb2 as yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2
class CloudServiceStub(object):
"""A set of methods for managing Cloud resources.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/Get',
request_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.GetCloudRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__pb2.Cloud.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/List',
request_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudsResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/Create',
request_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.CreateCloudRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/Update',
request_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.UpdateCloudRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/Delete',
request_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.DeleteCloudRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/ListOperations',
request_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudOperationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudOperationsResponse.FromString,
)
self.ListAccessBindings = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/ListAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsResponse.FromString,
)
self.SetAccessBindings = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/SetAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.SetAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateAccessBindings = channel.unary_unary(
'/yandex.cloud.resourcemanager.v1.CloudService/UpdateAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.UpdateAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class CloudServiceServicer(object):
"""A set of methods for managing Cloud resources.
"""
def Get(self, request, context):
"""Returns the specified Cloud resource.
To get the list of available Cloud resources, make a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves the list of Cloud resources.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a cloud in the specified organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates the specified cloud.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified cloud.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""Lists operations for the specified cloud.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListAccessBindings(self, request, context):
"""access
Lists access bindings for the specified cloud.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetAccessBindings(self, request, context):
"""Sets access bindings for the specified cloud.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAccessBindings(self, request, context):
"""Updates access bindings for the specified cloud.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CloudServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.GetCloudRequest.FromString,
response_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__pb2.Cloud.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudsResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.CreateCloudRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.UpdateCloudRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.DeleteCloudRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudOperationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudOperationsResponse.SerializeToString,
),
'ListAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.ListAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsResponse.SerializeToString,
),
'SetAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.SetAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.SetAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'UpdateAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.UpdateAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.resourcemanager.v1.CloudService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class CloudService(object):
"""A set of methods for managing Cloud resources.
"""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/Get',
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.GetCloudRequest.SerializeToString,
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__pb2.Cloud.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/List',
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudsRequest.SerializeToString,
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/Create',
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.CreateCloudRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/Update',
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.UpdateCloudRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/Delete',
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.DeleteCloudRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/ListOperations',
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudOperationsRequest.SerializeToString,
yandex_dot_cloud_dot_resourcemanager_dot_v1_dot_cloud__service__pb2.ListCloudOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListAccessBindings(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/ListAccessBindings',
yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsRequest.SerializeToString,
yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetAccessBindings(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/SetAccessBindings',
yandex_dot_cloud_dot_access_dot_access__pb2.SetAccessBindingsRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateAccessBindings(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.resourcemanager.v1.CloudService/UpdateAccessBindings',
yandex_dot_cloud_dot_access_dot_access__pb2.UpdateAccessBindingsRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| StarcoderdataPython |
6431809 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2017 <NAME> <<EMAIL>>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
#
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
from __future__ import absolute_import
from . import lib
from . import game
from . import gui
from . import main
from . import maps
from . import mouse
from . import picture
from . import pictures
from . import savegame
| StarcoderdataPython |
390306 | import numpy
import numpy.linalg
from naive_bayes import gaussian
def distance_matrix(data):
"""Returns a matrix with the Euclidean distance between each data line
"""
D = numpy.zeros( (data.shape[0], data.shape[0]) )
for i in xrange(data.shape[0]):
for j in xrange(i):
D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])
D[j,i] = D[i,j]
return D
def mutual_proximity(distance_matrix):
"""Returns the mutual proximity matrix given a distance matrix
Please, see:
USING MUTUAL PROXIMITY TO IMPROVE CONTENT-BASED AUDIO SIMILARITY
<NAME>, <NAME>, <NAME>, <NAME>
Proceedings of the 12th ISMIR (2011)
"""
vars = numpy.var(distance_matrix, axis = 0)
means = numpy.mean(distance_matrix, axis = 0)
D = numpy.zeros (distance_matrix.shape)
for i in xrange(distance_matrix.shape[0]):
for j in xrange(i):
D[i,j] = \
(1 - gaussian(distance_matrix[i,j], means[i], vars[i])) * \
(1 - gaussian(distance_matrix[i,j], means[j], vars[j]))
D[j,i] = D[i,j]
return D
def minimum_subset_distance(D, limits1, limits2):
"""Returns minimum distance between elements of different subsets
"""
score = numpy.ones( (limits1[1]) )
for i in xrange(limits1[1]):
for j in xrange(limits2[1]-limits2[0]):
score[i] = min(score[i], D[i,j+limits2[0]-1])
#print i, j, D[i,j+limits2[0]-1], score[i], min(score[i], D[i,j+limits2[0]-1])
return score
def group_subset_distance(D, limits1, limits2):
"""Returns group distance between elements of different subsets
"""
score = numpy.ones( (limits1[1]) )
for i in xrange(limits1[1]):
for j in xrange(limits2[1]-limits2[0]):
score[i] = score[i] * D[i,j+limits2[0]-1]
#print i, j, D[i,j+limits2[0]-1], score[i], min(score[i], D[i,j+limits2[0]-1])
return score
#A = numpy.array( [[1, 2, 3], [3, 2, 1], [0, 0, 0]] )
#D = distance_matrix(A)
#print D
#D0 = mutual_proximity(D)
#print D0
| StarcoderdataPython |
4838301 | name = 'EMBL2checklists'
__all__ = ['ChecklistOps', 'globalVariables', 'EMBL2checklistsMain', 'PrerequisiteOps']
| StarcoderdataPython |
396306 | <filename>so/labs/lab4/lock.py
from threading import Thread
from threading import RLock
import time
import random
g = 0
lock = RLock()
def incrementa():
global g
lock.acquire()
tmp = g # le valor
tmp += 1 # incrementa
lock.acquire()
time.sleep(random.randrange(0, 2))
lock.release()
g = tmp # escreve
lock.release()
if __name__=="__main__":
start = time.time()
thread1 = Thread(target=incrementa)
thread1.start()
thread2 = Thread(target=incrementa)
thread2.start()
thread1.join()
thread2.join()
print(g)
end = time.time()
print('Time taken in seconds -', end - start) | StarcoderdataPython |
3545350 | <reponame>damshenas/aws-cdk-examples
#!/usr/bin/env python3
from aws_cdk import App
from api_sqs_lambda.api_sqs_lambda_stack import ApiSqsLambdaStack
app = App()
ApiSqsLambdaStack(app, "ApiSqsLambdaStack")
app.synth()
| StarcoderdataPython |
9625080 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# created by restran on 2016/1/2
from __future__ import unicode_literals, absolute_import
from datetime import timedelta
"""
Django settings for test_project project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# from pymongo import MongoClient
from mongoengine import connect
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# 如果设置DEBUG = False,一定要配置ALLOWED_HOSTS
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = (
# 'django.contrib.admin',
# 'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djcelery',
'djkombu', # Add support for the django:// broker
'accounts',
'dashboard',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'accounts.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'fomalhaut.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fomalhaut.wsgi.application'
# Set this to True to wrap each HTTP request in a transaction on this database.
ATOMIC_REQUESTS = True
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'test_db',
# 'USER': 'root',
# 'PASSWORD': '<PASSWORD>',
# 'HOST': '127.0.0.1',
# 'PORT': '3306',
# }
# }
# 关闭浏览器后到期,cookie 那边不设置 expire_date
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
# TIME_ZONE = 'UTC'
# 使用机器的系统时间,不做UTC的转换
# TIME_ZONE = None
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False # 不使用时区的时间,默认使用机器的系统时间
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# 定位到哪个文件夹
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# TEMPLATE_DIRS = (
# os.path.join(BASE_DIR, 'templates'),
# )
LOGGING_LEVEL = 'DEBUG' if DEBUG else 'INFO'
LOGGING_HANDLERS = ['console'] if DEBUG else ['file']
# LOGGING_LEVEL = 'INFO'
# LOGGING_HANDLERS = ['file']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'file': {
'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 如果没有使用并发的日志处理类,在多实例的情况下日志会出现缺失
'class': 'cloghandler.ConcurrentRotatingFileHandler',
# 当达到10MB时分割日志
'maxBytes': 1024 * 1024 * 10,
'backupCount': 12,
# If delay is true,
# then file opening is deferred until the first call to emit().
'delay': True,
'filename': 'logs/mysite.log',
'formatter': 'verbose'
}
},
'loggers': {
'django': {
'handlers': LOGGING_HANDLERS,
'level': LOGGING_LEVEL,
'propagate': False,
},
'django.request': {
'handlers': LOGGING_HANDLERS,
'level': LOGGING_LEVEL,
'propagate': False,
},
'accounts': {
'handlers': LOGGING_HANDLERS,
'level': LOGGING_LEVEL,
},
'dashboard': {
'handlers': LOGGING_HANDLERS,
'level': LOGGING_LEVEL,
},
'common': {
'handlers': LOGGING_HANDLERS,
'level': LOGGING_LEVEL,
},
'fomalhaut': {
'handlers': LOGGING_HANDLERS,
'level': LOGGING_LEVEL,
},
}
}
# 请求后端网站时,避免占用太长时间
# 异步HTTP请求时的 connect 超时时间
# 只是连接的时间
DEFAULT_ASYNC_HTTP_CONNECT_TIMEOUT = 20
# 异步HTTP请求时的 request 超时时间
# 整个请求的时间
DEFAULT_ASYNC_HTTP_REQUEST_TIMEOUT = 20
# ACCESS_TOKEN 的过期时间, 单位秒
DEFAULT_ACCESS_TOKEN_EXPIRE_SECONDS = 7 * 3600 * 24
# REFRESH_TOKEN 的过期时间, 单位秒
DEFAULT_REFRESH_TOKEN_EXPIRE_SECONDS = 20 * 3600 * 24
# 访问日志默认分页大小
DEFAULT_ACCESS_LOG_PAGE_SIZE = 200
# 在 echarts 饼图中,最大显示的项
ECHARTS_PIPE_PLOT_MAX_NUM = 20
# 因为 body 太长, 在 Chrome 中, 如果是字节文件, 由于编码问题会导致浏览器卡住
# 但是 Sarafi 不会
# 访问日志详情 body 最大长度
ACCESS_LOG_DETAIL_MAX_BODY_LENGTH = 1024 * 50
# 使用 Redis 存储 Session 数据
# 注意,密码要替换成服务器上的
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_PASSWORD = '<PASSWORD>'
# MongoDB 配置
MONGO_HOST = '127.0.0.1'
MONGO_PORT = 27017
MONGO_USERNAME = 'fomalhaut_test'
MONGO_PASSWORD = '<PASSWORD>'
MONGO_DBNAME = 'fomalhaut_test'
connect(
db=MONGO_DBNAME,
username=MONGO_USERNAME,
password=<PASSWORD>,
host=MONGO_HOST,
port=MONGO_PORT
)
# 创建一个数据库连接池
# DB_CLIENT = MongoClient(MONGO_HOST, MONGO_PORT, max_pool_size=200)
# # 验证数据库用户名和密码
# DB_CLIENT[MONGO_DBNAME].authenticate(
# MONGO_USERNAME, MONGO_PASSWORD, mechanism='SCRAM-SHA-1')
# MONGO_DB = DB_CLIENT[MONGO_DBNAME]
#
# client 配置 redis 中 key 前缀
CLIENT_CONFIG_REDIS_PREFIX = 'config'
# redis 中统计分析日志列表的 key
ANALYTICS_LOG_REDIS_LIST_KEY = 'logs'
# 访问日志,数据库保存天数
ACCESS_LOG_KEEP_DAYS = 60
# 从 redis 迁移数据时,批量处理的最大数量
REDIS_ACCESS_LOG_TRANSFER_BATCH_COUNT = 100
# 网站发送出去的邮箱账号设定
EMAIL_SMTP_SERVER = 'localhost'
EMAIL_NOTIFY_NAME = 'Fomalhaut <<EMAIL>>'
DEFAULT_EMAIL_NOTIFY_SUBJECT = '来自 Fomalhaut 的邮件通知'
# 配置djcelery相关参数,ResultStore默认存储在数据库可不必重写
import djcelery
djcelery.setup_loader()
BROKER_URL = 'django://'
# 任务定义所在的模块
CELERY_IMPORTS = ('dashboard.tasks', 'common.tasks')
# 使用和Django一样的时区
CELERY_TIMEZONE = TIME_ZONE
# 不使用时区的时间,默认使用机器的系统时间
# 否则会使用UTC时间
CELERY_ENABLE_UTC = False
# 以上为基本配置,以下为周期性任务定义,以 celerybeat_开头的
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
# 可以配置的字段, 可以查看这里
# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html#available-fields
# 添加定期任务
CELERYBEAT_SCHEDULE = {
# 定期清除旧的访问日志
'clear_old_access_logs': {
'task': 'dashboard.tasks.clear_old_access_logs',
'schedule': timedelta(days=1),
'options': {
# 任务的过期时间, 可以用 int (单位秒) 或者 datetime
# 该时间后的任务将不会执行
'expires': 3600 * 24 * 5
}
},
# 从 mongodb 中读取和解析日志
'parse_access_logs': {
'task': 'dashboard.tasks.parse_access_logs',
# 每隔几分钟执行一次
'schedule': timedelta(seconds=50),
'options': {
'expires': 600
}
},
# 从 redis 中读取日志存储到 mongodb 中
'transfer_access_logs': {
'task': 'dashboard.tasks.transfer_access_logs',
# 每隔几分钟执行一次
'schedule': timedelta(seconds=30),
'options': {
'expires': 600
}
},
}
# 发送邮件的时候要使用该名称来拼验证URL地址
SITE_DOMAIN = '127.0.0.1' # 站点域名
SITE_NAME = 'Fomalhaut' # 站点名称
# Django upgrading to 1.9 error "AppRegistryNotReady: Apps aren't loaded yet."
# 添加如下代码解决
import django
django.setup()
| StarcoderdataPython |
5013477 | def hello():
return "Hello Python!"
| StarcoderdataPython |
6642985 | <filename>section2/oop_goral.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 10:52:55 2020
@author: tgoral
"""
# OBJECT ORIENTED PROGRAM
print("OBJECT ORIENTED PROGRAMMING")
class Student:
def __init__(self, name, grades):
self.name = name
self.grades = grades
def average_grades (self):
return sum(self.grades) / len(self.grades)
student = Student('Bob',(100,100,93,78,90))
print(student.name)
print(student.grades)
print(student.average_grades())
print()
student2 = Student('Rolf',(100,80,73,88,90))
print(student2.name)
print(student2.grades)
print(student2.average_grades())
# MAGIC METHODS
print("\nMAGIC METHOD")
class Person:
def __init__(self,name, age):
self.name = name
self.age = age
#def __str__(self):
#return f"{self.name}, {self.age}"
def __repr__(self):
return f"<{self.name}, {self.age}>"
bob = Person('Bob',35)
print(bob)
class Store:
def __init__(self,name):
# You'll need 'name' as an argument to this method.
# Then, initialise 'self.name' to be the argument, and 'self.items' to be an empty list.
self.name = name
self.items = []
def add_item(self, name, price):
# Create a dictionary with keys name and price, and append that to self.items.
item = {'name': name, 'price': price}
self.items.append(item)
def stock_price(self):
# Add together all item prices in self.items and return the total.
total = 0
for each in self.items:
total += each['price']
return total | StarcoderdataPython |
6618354 | #!/usr/bin/env python3
from ncclient import manager
conn = manager.connect(
host='192.168.178.142',
port=22,
username='admin',
password='<PASSWORD>',
hostkey_verify=False,
device_params={'name': 'nexus'},
look_for_keys=False
)
for value in conn.server_capabilities:
print(value)
conn.close_session()
#Here is the output
#root@server:/home/python_examples/ncclient# python3 first_ncc_example.py
#urn:ietf:params:netconf:capability:writable-running:1.0
#urn:ietf:params:netconf:capability:url:1.0?scheme=file
#urn:ietf:params:netconf:capability:candidate:1.0
#urn:ietf:params:netconf:capability:rollback-on-error:1.0
#urn:ietf:params:netconf:capability:confirmed-commit:1.0
#urn:ietf:params:netconf:base:1.0
#urn:ietf:params:netconf:capability:validate:1.0
#urn:ietf:params:xml:ns:netconf:base:1.0
#root@server:/home/python_examples/ncclient#
#INSTALL ncclient:
#(venv) $ git clone https://github.com/ncclient/ncclient
#(venv) $ cd ncclient/
#(venv) $ python setup.py install
| StarcoderdataPython |
8161701 | from .utils import get_asset_path, render_from_layout
import matplotlib.pyplot as plt
import numpy as np
NUM_OBJECTS = 9
EMPTY, TILE1, TILE2, TILE3, TILE4, TILE5, TILE6, TILE7, TILE8 = range(NUM_OBJECTS)
def generate_tile_token(tile_num):
if tile_num is None:
return plt.imread(get_asset_path('slidetile_empty.png'))
return plt.imread(get_asset_path('slidetile_{}.png'.format(tile_num)))
TOKEN_IMAGES = {
EMPTY : generate_tile_token(None),
TILE1 : generate_tile_token(1),
TILE2 : generate_tile_token(2),
TILE3 : generate_tile_token(3),
TILE4 : generate_tile_token(4),
TILE5 : generate_tile_token(5),
TILE6 : generate_tile_token(6),
TILE7 : generate_tile_token(7),
TILE8 : generate_tile_token(8),
}
def build_layout(obs):
layout = EMPTY * np.ones((3, 3), dtype=int)
for lit in obs:
if lit.predicate.name == 'at':
tile, x, y = lit.variables
assert tile.startswith("t")
tile_num = int(tile[1:])
assert x.startswith("x")
c = int(x[1:]) - 1
assert y.startswith("y")
r = int(y[1:]) - 1
layout[r, c] = tile_num
return layout
def get_token_images(obs_cell):
return [TOKEN_IMAGES[obs_cell]]
def render(obs, mode='human', close=False):
layout = build_layout(obs)
return render_from_layout(layout, get_token_images)
| StarcoderdataPython |
1602348 | <gh_stars>0
from blox.modules.module import module
from blox.exceptions import *
import urllib2,base64,logging
logger = logging.getLogger( 'blox.url' )
class url(module):
def input(self, params):
logger.debug('getting data from url: %s'%params['url'])
request = urllib2.Request( params['url'] )
if 'proxyname' in params:
proxy = urllib2.ProxyHandler({'http': params['proxyname'] })
opener = urllib2.build_opener( proxy )
urllib2.install_opener( opener )
if 'username' in params and 'password' in params:
base64auth = base64.encodestring('%s:%s' % ( params['username'], params['password'] )).replace('\n','')
request.add_header("Authorization", "Basic %s" % base64auth)
try:
result = urllib2.urlopen( request )
except urllib2.URLError:
raise ParseException( 'URL Error' )
return result.read().strip()
| StarcoderdataPython |
4964907 | <gh_stars>1-10
"""Support for toggling Amcrest IP camera settings."""
import logging
from homeassistant.const import CONF_NAME, CONF_SWITCHES, STATE_OFF, STATE_ON
from homeassistant.helpers.entity import ToggleEntity
from . import DATA_AMCREST, SWITCHES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the IP Amcrest camera switch platform."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
switches = discovery_info[CONF_SWITCHES]
camera = hass.data[DATA_AMCREST][name].device
all_switches = []
for setting in switches:
all_switches.append(AmcrestSwitch(setting, camera, name))
async_add_entities(all_switches, True)
class AmcrestSwitch(ToggleEntity):
"""Representation of an Amcrest IP camera switch."""
def __init__(self, setting, camera, name):
"""Initialize the Amcrest switch."""
self._setting = setting
self._camera = camera
self._name = '{} {}'.format(SWITCHES[setting][0], name)
self._icon = SWITCHES[setting][1]
self._state = None
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def state(self):
"""Return the state of the switch."""
return self._state
@property
def is_on(self):
"""Return true if switch is on."""
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn setting on."""
if self._setting == 'motion_detection':
self._camera.motion_detection = 'true'
elif self._setting == 'motion_recording':
self._camera.motion_recording = 'true'
def turn_off(self, **kwargs):
"""Turn setting off."""
if self._setting == 'motion_detection':
self._camera.motion_detection = 'false'
elif self._setting == 'motion_recording':
self._camera.motion_recording = 'false'
def update(self):
"""Update setting state."""
_LOGGER.debug("Polling state for setting: %s ", self._name)
if self._setting == 'motion_detection':
detection = self._camera.is_motion_detector_on()
elif self._setting == 'motion_recording':
detection = self._camera.is_record_on_motion_detection()
self._state = STATE_ON if detection else STATE_OFF
@property
def icon(self):
"""Return the icon for the switch."""
return self._icon
| StarcoderdataPython |
8031199 | <reponame>lileiigithub/PetdogRecognition
# -*- coding: utf-8 -*-
from PIL import Image
class ProcessImg(object):
size = (224, 224)
def __init__(self,_path):
self.img_path = _path
def resize(self):
try:
im = Image.open(self.img_path)
x = im.size[0]
y = im.size[1]
if x > y:
box = (int((x-y)/2),0,int((x-y)/2)+y,y)
elif x < y:
box = (0,int((y-x)/2),x,int((y-x)/2)+x)
else:
box = (0,0,x,y)
region = im.crop(box) # clip a region
region.thumbnail(ProcessImg.size) # make a thumbnail
# 判断图片 x,y 是否小于 224
if region.size[0] < x or region.size[1] < y:
region = region.resize(ProcessImg.size,Image.ANTIALIAS)
region.save("resize.jpg", "JPEG")
return region
except IOError:
print("cannot create thumbnail for", self.img_path)
| StarcoderdataPython |
1927400 | from asl._utils import load_heroes_dataset
| StarcoderdataPython |
11367684 | <gh_stars>10-100
import pytest
from luracoin.pow import proof_of_work
@pytest.mark.skip(reason="WIP")
def test_proof_of_work(): # type: ignore
assert False
| StarcoderdataPython |
8165123 | <filename>rlexperiments/common/tf_util.py
import os
import sys
import multiprocessing
import numpy as np
import tensorflow as tf
def create_session(cuda_visible_devices='0', gpu_memory_fraction=0.5):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
return tf.Session(config=config)
def explained_variance(ypred, y):
'''
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
'''
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y - ypred) / vary
| StarcoderdataPython |
286454 | <filename>steam/ext/dota2/protobufs/dota_client_enums.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: dota_client_enums.proto
# plugin: python-betterproto
import betterproto
class ETournamentTemplate(betterproto.Enum):
NONE = 0
AutomatedWin3 = 1
class ETournamentGameState(betterproto.Enum):
Unknown = 0
Canceled = 1
Scheduled = 2
Active = 3
RadVictory = 20
DireVictory = 21
RadVictoryByForfeit = 22
DireVictoryByForfeit = 23
ServerFailure = 40
NotNeeded = 41
class ETournamentTeamState(betterproto.Enum):
Unknown = 0
Node1 = 1
NodeMax = 1024
Eliminated = 14003
Forfeited = 14004
Finished1st = 15001
Finished2nd = 15002
Finished3rd = 15003
Finished4th = 15004
Finished5th = 15005
Finished6th = 15006
Finished7th = 15007
Finished8th = 15008
Finished9th = 15009
Finished10th = 15010
Finished11th = 15011
Finished12th = 15012
Finished13th = 15013
Finished14th = 15014
Finished15th = 15015
Finished16th = 15016
class ETournamentState(betterproto.Enum):
Unknown = 0
CanceledByAdmin = 1
Completed = 2
Merged = 3
ServerFailure = 4
TeamAbandoned = 5
TeamTimeoutForfeit = 6
TeamTimeoutRefund = 7
ServerFailureGrantedVictory = 8
TeamTimeoutGrantedVictory = 9
InProgress = 100
WaitingToMerge = 101
class ETournamentNodeState(betterproto.Enum):
Unknown = 0
Canceled = 1
TeamsNotYetAssigned = 2
InBetweenGames = 3
GameInProgress = 4
AWon = 5
BWon = 6
AWonByForfeit = 7
BWonByForfeit = 8
ABye = 9
AAbandoned = 10
ServerFailure = 11
ATimeoutForfeit = 12
ATimeoutRefund = 13
class EdotaGroupMergeResult(betterproto.Enum):
OK = 0
FailedGeneric = 1
NotLeader = 2
TooManyPlayers = 3
TooManyCoaches = 4
EngineMismatch = 5
NoSuchGroup = 6
OtherGroupNotOpen = 7
AlreadyInvited = 8
NotInvited = 9
class EPartyBeaconType(betterproto.Enum):
Available = 0
Joinable = 1
| StarcoderdataPython |
11343384 | from src.extractor.outlook_extractor import OutlookExtractor
def test_extract_data():
messages = OutlookExtractor().extract_data()
length = len(messages)
assert messages[length - 1].SenderEmailAddress == "<EMAIL>"
| StarcoderdataPython |
6694516 | string = input("Enter the string: ")
str = string.split()
str = list(reversed(str))
print("Reversed String :"," ".join(str))
| StarcoderdataPython |
3252864 | <reponame>sharechanxd/Risk-Index-Covid19
dict_iso={'afghanistan': 'Afghanistan',
'albania': 'Albania',
'algeria': 'Algeria',
'andorra': 'Andorra',
'angola': 'Angola',
'antigua-and-barbuda': 'Antigua and Barbuda',
'argentina': 'Argentina',
'armenia': 'Armenia',
'aruba': 'Aruba',
'australia': 'Australia',
'austria': 'Austria',
'azerbaijan': 'Azerbaijan',
'bahamas': 'Bahamas',
'bahrain': 'Bahrain',
'bangladesh': 'Bangladesh',
'Barbados': 'Barbados',
'belarus': 'Belarus',
'belgium': 'Belgium',
'belize': 'Belize',
'benin': 'Benin',
'bermuda': 'Bermuda',
'bhutan': 'Bhutan',
'bolivia': 'Bolivia, Plurinational State of',
'bosnia-and-herzegovina': 'Bosnia and Herzegovina',
'botswana': 'Botswana',
'brazil': 'Brazil',
'bulgaria': 'Bulgaria',
'burkina-faso': 'Burkina Faso',
'burundi': 'Burundi',
'cabo-verde': 'Cape Verde',
'cambodia': 'Cambodia',
'cameroon': 'Cameroon',
'canada': 'Canada',
'cayman-islands': 'Cayman Islands',
'central-african-republic': 'Central African Republic',
'chad': 'Chad',
'chile': 'Chile',
'china': 'China',
'china-hong-kong-sar': 'Hong Kong,China',
'china-macao-sar': 'Macao,China',
'colombia': 'Colombia',
'comoros': 'Comoros',
'congo': 'Congo',
'costa-rica': 'Costa Rica',
'cote-d-ivoire': "Côte d'Ivoire",
'croatia': 'Croatia',
'cuba': 'Cuba',
'cyprus': 'Cyprus',
'czech-republic': 'Czech Republic',
'democratic-republic-of-the-congo': 'Congo, the Democratic Republic of the',
'denmark': 'Denmark',
'djibouti': 'Djibouti',
'dominican-republic': 'Dominican Republic',
'ecuador': 'Ecuador',
'egypt': 'Egypt',
'el-salvador': 'El Salvador',
'equatorial-guinea': 'Equatorial Guinea',
'eritrea': 'Eritrea',
'estonia': 'Estonia',
'ethiopia': 'Ethiopia',
'faeroe-islands': 'Faroe Islands',
'fiji': 'Fiji',
'finland': 'Finland',
'france': 'France',
'french-guiana': 'French Guiana',
'french-polynesia': 'French Polynesia',
'gabon': 'Gabon',
'gambia': 'Gambia',
'georgia': 'Georgia',
'germany': 'Germany',
'ghana': 'Ghana',
'gibraltar': 'Gibraltar',
'greece': 'Greece',
'grenada': 'Grenada',
'guadeloupe': 'Guadeloupe',
'guatemala': 'Guatemala',
'guinea': 'Guinea',
'guinea-bissau': 'Guinea-Bissau',
'guyana': 'Guyana',
'haiti': 'Haiti',
'honduras': 'Honduras',
'hungary': 'Hungary',
'iceland': 'Iceland',
'india': 'India',
'indonesia': 'Indonesia',
'iran': 'Iran, Islamic Republic of',
'iraq': 'Iraq',
'ireland': 'Ireland',
'israel': 'Israel',
'italy': 'Italy',
'jamaica': 'Jamaica',
'japan': 'Japan',
'jordan': 'Jordan',
'kazakhstan': 'Kazakhstan',
'kenya': 'Kenya',
'kuwait': 'Kuwait',
'kyrgyzstan': 'Kyrgyzstan',
'latvia': 'Latvia',
'lebanon': 'Lebanon',
'lesotho': 'Lesotho',
'liberia': 'Liberia',
'libya': 'Libya',
'liechtenstein': 'Liechtenstein',
'lithuania': 'Lithuania',
'luxembourg': 'Luxembourg',
'macedonia': 'North Macedonia',
'madagascar': 'Madagascar',
'malawi': 'Malawi',
'malaysia': 'Malaysia',
'maldives': 'Maldives',
'mali': 'Mali',
'malta': 'Malta',
'martinique': 'Martinique',
'mauritania': 'Mauritania',
'mauritius': 'Mauritius',
'mayotte': 'Mayotte',
'mexico': 'Mexico',
'moldova': 'Moldova, Republic of',
'monaco': 'Monaco',
'mongolia': 'Mongolia',
'montenegro': 'Montenegro',
'morocco': 'Morocco',
'mozambique': 'Mozambique',
'myanmar': 'Myanmar',
'namibia': 'Namibia',
'nepal': 'Nepal',
'netherlands': 'Netherlands',
'new-zealand': 'New Zealand',
'nicaragua': 'Nicaragua',
'niger': 'Niger',
'nigeria': 'Nigeria',
'norway': 'Norway',
'oman': 'Oman',
'pakistan': 'Pakistan',
'panama': 'Panama',
'papua-new-guinea': 'Papua New Guinea',
'paraguay': 'Paraguay',
'peru': 'Peru',
'philippines': 'Philippines',
'poland': 'Poland',
'portugal': 'Portugal',
'qatar': 'Qatar',
'reunion': 'Réunion',
'romania': 'Romania',
'russia': 'Russia',
'rwanda': 'Rwanda',
'saint-kitts-and-nevis': 'Saint Kitts and Nevis',
'saint-lucia': 'Saint Lucia',
'sao-tome-and-principe': 'Sao Tome and Principe',
'saudi-arabia': 'Saudi Arabia',
'senegal': 'Senegal',
'serbia': 'Serbia',
'seychelles': 'Seychelles',
'sierra-leone': 'Sierra Leone',
'singapore': 'Singapore',
'slovakia': 'Slovakia',
'slovenia': 'Slovenia',
'somalia': 'Somalia',
'south-africa': 'South Africa',
'south-korea': 'South Korea',
'spain': 'Spain',
'sri-lanka': 'Sri Lanka',
'state-of-palestine': 'Palestinian Territory, Occupied',
'sudan': 'Sudan',
'suriname': 'Suriname',
'swaziland': 'Swaziland',
'sweden': 'Sweden',
'switzerland': 'Switzerland',
'syria': 'Syrian Arab Republic',
'taiwan': 'Taiwan,China',
'tajikistan': 'Tajikistan',
'tanzania': 'Tanzania, United Republic of',
'thailand': 'Thailand',
'togo': 'Togo',
'trinidad-and-tobago': 'Trinidad and Tobago',
'tunisia': 'Tunisia',
'turkey': 'Turkey',
'turks-and-caicos-islands': 'Turks and Caicos Islands',
'uganda': 'Uganda',
'uk': 'United Kingdom',
'ukraine': 'Ukraine',
'united-arab-emirates': 'United Arab Emirates',
'uruguay': 'Uruguay',
'us': 'United States',
'uzbekistan': 'Uzbekistan',
'venezuela': 'Venezuela, Bolivarian Republic of',
'viet-nam': 'Viet Nam',
'western-sahara': 'Western Sahara',
'yemen': 'Yemen',
'zambia': 'Zambia',
'zimbabwe': 'Zimbabwe',
'faeroe-islands':'Faroe Islands',
'saint-vincent-and-the-grenadines':'Saint Vincent & the Grenadines',
'timor-leste':'Timor-Leste',
'grenada':'Grenada',
'new-caledonia':'New Caledonia',
'laos':'Lao People\'s Democratic Republic',
'dominica':'Dominica',
'falkland-islands-malvinas':'Falkland Islands',
'greenland':'Greenland',
'holy-see':'Holy See (Vatican City State)',
'anguilla':'Anguilla',
'south-sudan':'South Sudan'
}
cate={'china':'east asia',
'us':'north america',
'brazil':'south america',
'russia':'eastern europe',
'india':'south asia',
'uk':'western europe',
'spain':'western europe',
'peru':'south america',
'chile':'south america',
'italy':'western europe',
'iran':'west asia',
'mexico':'central america and mexico',
'pakistan':'west asia',
'turkey':'west asia',
'germany':'western europe',
'saudi-arabia':'west asia',
'france':'western europe',
'south-africa':'southern africa',
'bangladesh':'south asia',
'canada':'north america',
'qatar':'west asia',
'democratic-republic-of-the-congo':'central africa',
'colombia':'south america',
'egypt':'south-east mediterranean',
'sweden':'western europe',
'belarus':'eastern europe',
'belgium':'western europe',
'argentina':'south america',
'ecuador':'south america',
'indonesia':'southeast asia',
'netherlands':'western europe',
'united-arab-emirates':'west asia',
'iraq':'west asia',
'kuwait':'west asia',
'singapore':'southeast asia',
'ukraine':'eastern europe',
'portugal':'western europe',
'oman':'west asia',
'philippines':'southeast asia',
'poland':'eastern europe',
'panama':'central america and mexico',
'switzerland':'western europe',
'dominican-republic':'caribbean',
'afghanistan':'west asia',
'bolivia':'south america',
'romania':'eastern europe',
'bahrain':'west asia',
'ireland':'western europe',
'armenia':'eastern europe',
'nigeria':'west africa',
'israel':'south-east mediterranean',
'kazakhstan':'central asia',
'japan':'east asia',
'austria':'western europe',
'honduras':'central america and mexico',
'sao-tome-and-principe':'southeast asia',
'central-african-republic':'central africa',
'gabon':'central africa',
'ghana':'west africa',
'azerbaijan':'central asia',
'guatemala':'central america and mexico',
'moldova':'eastern europe',
'serbia':'eastern europe',
'algeria':'south-east mediterranean',
'nepal':'south asia',
'south-korea':'east asia',
'denmark':'western europe',
'cameroon':'central africa',
'morocco':'south-east mediterranean',
'czech-republic':'eastern europe',
'sudan':'east africa',
'cote-d-ivoire':'west africa',
'norway':'western europe',
'malaysia':'southeast asia',
'uzbekistan':'central asia',
'australia':'pacific region',
'finland':'western europe',
'saint-martin':'caribbean',
'senegal':'west africa',
'macedonia':'eastern europe',
'kenya':'east africa',
'el-salvador':'central america and mexico',
'guyana':'caribbean',
'tajikistan':'central asia',
'ethiopia':'east africa',
'guinea':'west africa',
'venezuela':'south america',
'jamaica':'caribbean',
'kyrgyzstan':'central asia',
'bulgaria':'eastern europe',
'djibouti':'east africa',
'luxembourg':'western europe',
'mauritania':'west africa',
'hungary':'eastern europe',
'bosnia-and-herzegovina':'eastern europe',
'french-guiana':'south america',
'grenada':'caribbean',
'greece':'western europe',
'thailand':'southeast asia',
'costa-rica':'central america and mexico',
'suriname':'caribbean',
'somalia':'east africa',
'croatia':'eastern europe',
'mayotte':'east africa',
'albania':'eastern europe',
'cuba':'caribbean',
'maldives':'south asia',
'nicaragua':'central america and mexico',
'equatorial-guinea':'central africa',
'mali':'west africa',
'paraguay':'south america',
'madagascar':'indian ocean islands',
'sri-lanka':'south asia',
'haiti':'caribbean',
'state-of-palestine':'missing',
'south-sudan':'east africa',
'estonia':'eastern europe',
'iceland':'western europe',
'lithuania':'eastern europe',
'lebanon':'south-east mediterranean',
'slovakia':'eastern europe',
'guinea-bissau':'west africa',
'slovenia':'eastern europe',
'zambia':'southern africa',
'new-zealand':'pacific region',
'sierra-leone':'west africa',
'china-hong-kong-sar':'east asia',
'tunisia':'south-east mediterranean',
'cabo-verde':'west africa',
'benin':'west africa',
'malawi':'southern africa',
'jordan':'south-east mediterranean',
'yemen':'west asia',
'latvia':'eastern europe',
'niger':'west africa',
'cyprus':'south-east mediterranean',
'burkina-faso':'west africa',
'uruguay':'south america',
'georgia':'eastern europe',
'rwanda':'east africa',
'chad':'west africa',
'mozambique':'southern africa',
'uganda':'east africa',
'andorra':'western europe',
'swaziland':'southern africa',
'liberia':'west africa',
'libya':'south-east mediterranean',
'malta':'south-east mediterranean',
'togo':'west africa',
'channel-islands':'western europe',
'zimbabwe':'southern africa',
'reunion':'indian ocean islands',
'tanzania':'southern africa',
'montenegro':'eastern europe',
'taiwan':'east asia',
'viet-nam':'southeast asia',
'mauritius':'west africa',
'myanmar':'southeast asia',
'comoros':'indian ocean islands',
'angola':'southern africa',
'syria':'south-east mediterranean',
'martinique':'eastern europe',
'mongolia':'east asia',
'cayman-islands':'north america',
'eritrea':'east africa',
'namibia':'southern africa',
'guadeloupe':'caribbean',
'gibraltar':'north africa',
'burundi':'east africa',
'bermuda':'north america',
'cambodia':'southeast asia',
'bahamas':'caribbean',
'monaco':'eastern europe',
'botswana':'southern africa',
'bhutan':'south asia',
'seychelles':'indian ocean islands',
'antigua-and-barbuda':'caribbean',
'french-polynesia':'pacific region',
'china-macao-sar':'east asia',
'gambia':'west africa',
'turks-and-caicos-islands':'southern africa',
'lesotho':'southern africa',
'belize':'caribbean',
'curacao':'north america',
'papua-new-guinea':'pacific region',
'western-sahara':'west africa',
'fiji':'pacific region',
'saint-kitts-and-nevis':'caribbean',
'saint-lucia':'caribbean',
'congo':'west africa',
'trinidad-and-tobago':'caribbean',
'faeroe-islands':'western europe',
'Barbados':'caribbean',
'liechtenstein':'western europe',
'aruba':'western europe',
'faeroe-islands':'western europe',
'saint-vincent-and-the-grenadines':'caribbean',
'timor-leste':'pacific region',
'grenada':'caribbean',
'new-caledonia':'pacific region',
'laos':'southeast asia',
'dominica':'caribbean',
'falkland-islands-malvinas':'south america',
'greenland':'north america',
'holy-see':'western europe',
'anguilla':'caribbean',
}
from tqdm import tqdm
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
import json
import time
import random
import html5lib
import re
import scipy.stats as st
from pandas.core.frame import DataFrame
import copy
import math
import os
import datetime
#import datetime
x0=datetime.date.today()
x1=datetime.date.today()-datetime.timedelta(days=1)
x2=datetime.date.today()-datetime.timedelta(days=2)
# run_time
ts=[]
ts.append(x0.__format__('%Y%m%d'))
ts.append(x1.__format__('%Y%m%d'))
ts.append(x2.__format__('%Y%m%d'))
print(ts)
headers = { 'Connection': 'close',}
# proxies={'http':'http://127.0.0.1:10080','https':'http://127.0.0.1:10080'}
url='https://www.worldometers.info/coronavirus/#countries'
# url='https://www.worldometers.info/coronavirus/country/us/'
a=requests.get(url,headers=headers)
soup = BeautifulSoup(a.content,'html5lib')
x=soup.body.find_all('tr', attrs={'style': ['','background-color:#F0F0F0','background-color:#EAF7D5']})
# 190 210
def find_start_yesterday(i,j):
for start in range(i,j):
one=x[start]
two=x[start+1]
l1=one.find_all('a',attrs={'class':'mt_a'})
l2=two.find_all('a',attrs={'class':'mt_a'})
if l1==[] or l2==[]:
continue
s1=str(l1[0])
s2=str(l2[0])
coun1=s1.split('/')
coun2=s2.split('/')
if coun1[1]=='china' and coun2[1]=='us':
return start
#385 410
# def find_end_yesterday(i,j):
# for end in range(i,j):
# final_pre=x[end-1]
# final=x[end]
# l1=final_pre.find_all('a',attrs={'class':'mt_a'})
# l2=final.find_all('a',attrs={'class':'mt_a'})
# if l1==[] or l2==[]:
# continue
# s1=str(l1[0])
# s2=str(l2[0])
# coun1=s1.split('/')
# coun2=s2.split('/')
# if (coun1[1]=='anguilla' and coun2[1]=='saint-pierre-and-miquelon') or (coun2[1]=='anguilla' and coun1[1]=='saint-pierre-and-miquelon'):
# return end+1
def find_end_yesterday(i,j):
for end in range(i,j):
# final_pre=x[end-1]
final=x[end]
# l1=final_pre.find_all('a',attrs={'class':'mt_a'})
l2=final.find_all('a',attrs={'class':'mt_a'})
if l2==[]:
continue
# s1=str(l1[0])
s2=str(l2[0])
# coun1=s1.split('/')
coun2=s2.split('/')
if coun2[1]=='anguilla':
return end+1
end=find_end_yesterday(400,440)
end2=find_end_yesterday(630,700)
start=find_start_yesterday(190,250)
start2=find_start_yesterday(440,470)
print('start:{}\tend:{}\tstart2:{}\tend2:{}'.format(start,end,start2,end2))
col_name=['0','#','Country,Other','TotalCases',
'NewCases','TotalDeaths','NewDeaths','TotalRecovered',
'NewRecovered','ActiveCases','Serious,Critical','Tot Cases/1M pop',
'Deaths/1M pop','TotalTests','Tests/1M pop','Population',
'Continent','17',' 1 Caseevery X', 'ppl1 Deathevery',' X ppl1 Testevery ','X ppl','22',
'Cases Per 100K Population','Tests Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio',
'New Positive%','Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week',
'New Test','NPI','Region','key-id','Country/District','7 days inc cases','7 days inc deaths']
raw_data=[]
for i in tqdm(range(start,end)):
text_source=x[i]
l=text_source.find_all('a',attrs={'class':'mt_a'})
if l==[]:
continue
s=str(l[0])
coun=s.split('/')
try:
region=cate[coun[1]]
iso=dict_iso[coun[1]]
except:
region='missing'
url='https://www.worldometers.info/coronavirus/country/'+coun[1]+'/'
print(coun[1])
a=''
while a=='':
try:
a=requests.get(url,headers=headers)
except:
a=''
soup = BeautifulSoup(a.content,'html5lib')
r=soup.body.find_all('script',attrs={'type':'text/javascript'})
p=re.compile(r'categories: \[(.*?)\]',re.S)
rs=re.findall(p,r[0].text)
d=rs[0]
# d=re.sub(r'\"','',d)
str_pat = re.compile(r'\"(.*?)\"')
d = str_pat.findall(d)
date=d
p1=re.compile(r'name: \'Cases\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
case=d.split(',')
except:
# print('{} cases is not{}'.format(coun[1],j))
continue
p1=re.compile(r'name: \'Deaths\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
TD=d.split(',')
except:
continue
j={'Date':date,'Total Cases':case,'Total Deaths':TD}
# print(j)
print("Date {} TC {} TD {}".format(len(date),len(case),len(TD)))
if not len(set([len(date),len(case),len(TD)])) == 1:
continue
hist_data_of_coun_i=pd.DataFrame(j)
hist_data_of_coun_i['Total Deaths'][0]=0
for k in range(len(hist_data_of_coun_i['Total Deaths'])):
if hist_data_of_coun_i['Total Deaths'][k]=='null':
data['Total Deaths'][k]=0
hist_data_of_coun_i['Total Cases']=hist_data_of_coun_i['Total Cases'].astype(int)
hist_data_of_coun_i['Total Deaths']=hist_data_of_coun_i['Total Deaths'].astype(int)
hist_data_of_coun_i['case inc']=hist_data_of_coun_i['Total Cases'].diff()
hist_data_of_coun_i['death inc']=hist_data_of_coun_i['Total Deaths'].diff()
#七日新增死亡与cases
seven_cases=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])
seven_deaths=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])
inc1=hist_data_of_coun_i.loc[len(date)-1,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'case inc'])
inc2=hist_data_of_coun_i.loc[len(date)-1,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'death inc'])
inc_1=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(8,15)])
inc_2=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(8,15)])
adcp=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/7
p=1
while inc1 ==0 and hist_data_of_coun_i.loc[len(date)-1,'Total Cases']>=10000:
p+=1
inc1=hist_data_of_coun_i.loc[len(date)-p,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-1-p,'case inc'])
dd=hist_data_of_coun_i.shift(5)
hist_data_of_coun_i['inc_p']=np.log(hist_data_of_coun_i['case inc']/dd['case inc'])/5
hist_data_of_coun_i=hist_data_of_coun_i[~hist_data_of_coun_i.isin([np.nan, np.inf, -np.inf]).any(1)]
da=hist_data_of_coun_i['inc_p'].values
try:
slope,intercept, r_value, p_value, std_err=st.linregress(list(range(30)), da[:30])
except:
slope=None
# print(x[i])
bo=x[i].text.split('\n')
# print(bo)
if bo[6]=='' and bo[7]=='':
del bo[7]
if bo[17]=='' and bo[18]=='':
del bo[18]
for o in range(start2,end2):
s1=x[o]
l1=s1.find_all('a',attrs={'class':'mt_a'})
if l1==[]:
continue
s1=str(l1[0])
coun1=s1.split('/')
if coun1[1]==coun[1]:
bo1=x[o].text.split('\n')
break
for h in range(len(bo)):
bo[h]=bo[h].replace(',','')
bo[h]=bo[h].replace('+','')
for h in range(len(bo1)):
bo1[h]=bo1[h].replace(',','')
bo1[h]=bo1[h].replace('+','')
#Cases Per 100K Population
try:
bo.append(100000*int(bo[3])/int(bo[15]))
except:
print(coun[1])
continue
# bo.append(np.nan)
# print('lack one')
#Tests Per 100K Population
try:
bo.append(100000*int(bo[13])/int(bo[15]))
except:
print(coun[1])
continue
# bo.append(np.nan)
# print('lack one')
#'Active Cases Per 100k Population'
try:
bo.append(int(bo[9])*100000/int(bo[15]))
except:
bo.append(np.nan)
# print('lack one')
#Total Test:Positive Ratio
bo.append(int(bo[3])/int(bo[13]))
#New Positive
try:
bo.append((int(bo[3])-int(bo1[3]))/(int(bo[13])-int(bo1[13])))
except:
bo.append(np.nan)
# print('lack one')
#Case Fatality Rate%
try:
if bo[5]=='':
bo.append(0)
else:
bo.append(int(bo[5])/int(bo[3]))
except:
bo.append(np.nan)
#New Confirmed Case Growth Rate
# try:
# q=2
# while (math.isnan(inc1) or inc1==np.inf) and q<=9:
# # print(inc1)
# inc1=hist_data_of_coun_i.loc[len(date)-q,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-q-7,'case inc'])
# c=hist_data_of_coun_i.loc[len(date)-q,'case inc']
# q+=1
# # print(inc1)
# if math.isnan(inc1):
# bo.append(0)
# elif inc1==np.inf:
# bo.append(0.01)
# # elif c<=100:
# # bo.append(0.03)
# else:
# bo.append(inc1)
# except:
# bo.append(0)
# print('lack one')
#New Sum Confirmed Case Growth Rate
if math.isnan(inc_1) or inc_1=='':
bo.append(0)
elif inc_1==np.inf:
bo.append(0.01)
else:
bo.append(inc_1)
print(bo[-1])
#New Death Case Growth Rate
# try:
# q=2
# while (math.isnan(inc2) or inc2==np.inf) and q<=9:
# # print(inc2)
# inc2=hist_data_of_coun_i.loc[len(date)-q,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-q-7,'death inc'])
# q+=1
# # print(inc2)
# if math.isnan(inc2):
# bo.append(0)
# elif inc2==np.inf:
# bo.append(0.1)
# else:
# bo.append(inc2)
# except:
# bo.append(0)
# print('lack one')
#New Sum Death Case Growth Rate
if math.isnan(inc_2) or inc_2=='':
bo.append(0)
elif inc_2==np.inf:
bo.append(0.1)
else:
bo.append(inc_2)
print(bo[-1])
#Average daily cases per 100,000 people in the past week
bo.append(adcp*100000/int(bo[15]))
# New Test
try:
bo.append(int(bo[13])-int(bo1[13]))
except:
bo.append(np.nan)
# print('lack one')
bo.append(slope)
if region=='missing':
continue
else:
bo.append(region)
bo.append(coun1[1])
bo.append(iso)
bo.append(seven_cases)
bo.append(seven_deaths)
print(len(bo))
print(bo)
if len(bo)!=39:
os.exit(-1)
raw_data.append(bo)
raw_data=DataFrame(raw_data,columns=col_name)
brief_raw_data=raw_data[['Country,Other','key-id','Region','Country/District','Population',
'TotalCases','ActiveCases','TotalDeaths','NewDeaths','TotalRecovered','NewRecovered','Serious,Critical','NewCases','New Test','Cases Per 100K Population','Tests Per 100K Population',
'Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI','7 days inc cases','7 days inc deaths']]
brief_raw_data['week death rate']=brief_raw_data['7 days inc deaths']/brief_raw_data['7 days inc cases']
tf=copy.deepcopy(brief_raw_data)
uni_region=list(set(list(tf['Region'].values)))
uni_region.remove('western europe')
data_region=tf[tf['Region']=='western europe']
data_region=data_region.replace(np.nan,'shit')
data_region=data_region.replace(np.inf,'shit')
data_region=data_region.replace('N/A','shit')
data_region=data_region.replace('',0)
data_region=data_region.replace(' ',0)
data_region.loc[data_region['NPI']=='shit','NPI']=0
data_region.loc[data_region['Case Fatality Rate%']=='shit','Case Fatality Rate%']=0
dd=data_region[['TotalCases','ActiveCases','TotalRecovered','Case Fatality Rate%']]
ac=dd[(dd['TotalCases']!='shit')&(dd['ActiveCases']!='shit')&(dd['TotalRecovered']!='shit')&(dd['Case Fatality Rate%']!='shit')]
active_rate_region=sum(ac['ActiveCases'].astype(int))/sum(ac['TotalCases'].astype(int))
data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Active Cases Per 100k Population']=active_rate_region*100000*data_region.loc[data_region['Active Cases Per 100k Population']=='shit','TotalCases'].astype(int)/data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Population'].astype(int)
dd=data_region[['NewCases','New Test']]
ac=dd[dd['New Test']!=0]
new_posi=sum(ac['NewCases'].astype(int))/sum(ac['New Test'])
data_region.loc[data_region['New Test']==0,'New Positive%']=new_posi
final=copy.deepcopy(data_region)
for distri in uni_region:
data_region=tf[tf['Region']==distri]
data_region=data_region.replace(np.nan,'shit')
data_region=data_region.replace(np.inf,'shit')
data_region=data_region.replace('N/A','shit')
data_region=data_region.replace('',0)
data_region=data_region.replace(' ',0)
data_region.loc[data_region['NPI']=='shit','NPI']=0
data_region.loc[data_region['Case Fatality Rate%']=='shit','Case Fatality Rate%']=0
dd=data_region[['TotalCases','ActiveCases','TotalRecovered','Case Fatality Rate%']]
ac=dd[(dd['TotalCases']!='shit')&(dd['ActiveCases']!='shit')&(dd['TotalRecovered']!='shit')&(dd['Case Fatality Rate%']!='shit')]
active_rate_region=sum(ac['ActiveCases'].astype(int))/sum(ac['TotalCases'].astype(int))
data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Active Cases Per 100k Population']=active_rate_region*100000*data_region.loc[data_region['Active Cases Per 100k Population']=='shit','TotalCases'].astype(int)/data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Population'].astype(int)
dd=data_region[['NewCases','New Test']]
ac=dd[dd['New Test']!=0]
try:
new_posi=sum(ac['NewCases'].astype(int))/sum(ac['New Test'])
except:
new_posi=0
data_region.loc[data_region['New Test']==0,'New Positive%']=new_posi
data_region.loc[data_region['New Test']=='shit','New Positive%']=new_posi
final=pd.concat([final,data_region])
final=final.reset_index(drop=True)
tf2=final[['Country,Other','key-id','Country/District','Region','TotalCases','Cases Per 100K Population','Tests Per 100K Population',
'Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI']]
#越高越好,即需要降序
# for x in ['Cases Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',]
x='Tests Per 100K Population'
df=copy.deepcopy(tf2[['Country,Other',x]])
df2=df.sort_values(x,ascending=False,inplace=False)
df2 = df2.reset_index(drop=True)
df2['cum']=df.index+1
df2['cum_prob']=100*df2['cum']/max(df2['cum'])
df3=pd.merge(df,df2,on=['Country,Other'])
# tf2['IND_'+x]=df3['cum_prob']
tf2['IND_'+x]=0
for h in list(tf2['Country,Other'].values):
tf2.loc[tf2['Country,Other']==h,'IND_'+x]=df3.loc[df3['Country,Other']==h,'cum_prob'].values[0]
for x in ['Cases Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%','Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI']:
i=1
df=copy.deepcopy(tf2[['Country,Other',x]])
print(x)
df2=df.sort_values(x,inplace=False)
df2 = df2.reset_index(drop=True)
df2['cum']=df.index+1
df2['cum_prob']=100*df2['cum']/max(df2['cum'])
df3=pd.merge(df,df2,on=['Country,Other'])
tf2['IND_'+x]=0
for h in list(tf2['Country,Other'].values):
tf2.loc[tf2['Country,Other']==h,'IND_'+x]=df3.loc[df3['Country,Other']==h,'cum_prob'].values[0]
i+=1
tf2['Comprehensive Index']=0.15*tf2['IND_Cases Per 100K Population']+0.08*tf2['IND_Tests Per 100K Population']+0.2*tf2['IND_Active Cases Per 100k Population']+0.1*tf2['IND_Total Test:Positive Ratio']+0.13*tf2['IND_New Positive%']+0.05*tf2['IND_Case Fatality Rate%']+ 0.22*tf2['IND_New Confirmed Case Growth Rate']+0.07*tf2['IND_New Death Case Growth Rate']
today=datetime.datetime.now()
tf4=tf2[['Country/District','TotalCases','IND_Cases Per 100K Population','IND_Tests Per 100K Population','IND_Total Test:Positive Ratio',
'IND_New Positive%','IND_Case Fatality Rate%','IND_New Confirmed Case Growth Rate','IND_New Death Case Growth Rate','IND_Active Cases Per 100k Population',
'IND_NPI','IND_Average daily cases per 100,000 people in the past week','Comprehensive Index']]
tf_c=copy.deepcopy(tf4)
tf_c_rename=tf_c.rename({'TotalCases':'TOTAL CASE','IND_Cases Per 100K Population':'IND1_Cases Per 100K Population','IND_Tests Per 100K Population':'IND2_Tests Per 100K Population',
'IND_Active Cases Per 100k Population':'IND8_Active Cases Per 100k Population','IND_Total Test:Positive Ratio':'IND3_Total Test:Positive Ratio',
'IND_New Positive%':'IND4_New Positive%','IND_Case Fatality Rate%':'IND5_Case Fatality Rate%','IND_New Confirmed Case Growth Rate':'IND6_New Confirmed Case Growth Rate',
'IND_New Death Case Growth Rate':'IND7_New Death Case Growth Rate','IND_NPI':'NPI'},axis='columns')
tf_c_rename.to_excel('World_index_{}.xlsx'.format(today),sheet_name=ts[1],index=False)
tf2.to_excel('World_raw_index_{}.xlsx'.format(today),sheet_name=ts[1],index=False)
url='https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv'
a=requests.get(url,headers=headers)
with open("vacc.csv",'wb') as f:
f.write(a.content)
vacc = pd.read_csv('vacc.csv',keep_default_na=False)
ct = list(dict(vacc['location'].value_counts()).keys())
for x in ['total_vaccinations','people_vaccinated','people_fully_vaccinated','total_boosters']:
vacc[x]=vacc[x].replace('',0)
vacc[x]=vacc[x].astype(float)
vacc[x]=vacc[x].astype(int)
img = dict()
for i in ct:
dt = vacc[vacc['location']==i]
d=[]
for x in ['total_vaccinations','people_vaccinated','people_fully_vaccinated','total_boosters']:
d.append(max(dt[x]))
img[i]=d
brief_raw_data['total_vaccinations']=0
brief_raw_data['people_vaccinated']=0
brief_raw_data['people_fully_vaccinated']=0
brief_raw_data['total_boosters']=0
for i in img.keys():
brief_raw_data.loc[((brief_raw_data['Country,Other']==i)|(brief_raw_data['Country/District']==i)),'total_vaccinations'] = int(img[i][0])
brief_raw_data.loc[((brief_raw_data['Country,Other']==i)|(brief_raw_data['Country/District']==i)),'people_vaccinated'] = int(img[i][1])
brief_raw_data.loc[((brief_raw_data['Country,Other']==i)|(brief_raw_data['Country/District']==i)),'people_fully_vaccinated'] = int(img[i][2])
brief_raw_data.loc[((brief_raw_data['Country,Other']==i)|(brief_raw_data['Country/District']==i)),'total_boosters'] = int(img[i][3])
brief_raw_data['Population']=brief_raw_data['Population'].astype(int)
brief_raw_data['vacc_per_100']=brief_raw_data['total_vaccinations']*100/brief_raw_data['Population']
brief_raw_data['cases_per_100']=brief_raw_data['Cases Per 100K Population']/1000
brief_raw_data['total_immune']=brief_raw_data['cases_per_100']+brief_raw_data['vacc_per_100']*0.9
def infer_vaccinated(one_row):
if one_row['total_vaccinations']!=0 and one_row['people_vaccinated']==0:
one_row['people_vaccinated']=int(0.7018*one_row['total_vaccinations'])
if one_row['total_vaccinations']!=0 and one_row['people_fully_vaccinated']==0:
one_row['people_fully_vaccinated']=int(0.2936*one_row['total_vaccinations'])
return one_row
brief_raw_data = brief_raw_data.apply(infer_vaccinated,axis=1)
import datetime
from datetime import timedelta
import sys
vacc = pd.read_csv('vacc.csv',keep_default_na=False)
ct = list(dict(vacc['location'].value_counts()).keys())
vacc_daily=copy.deepcopy(vacc)
vacc_daily['daily_vaccinations']=vacc_daily['daily_vaccinations'].replace('',-1)
vacc_daily['daily_vaccinations']=vacc_daily['daily_vaccinations'].astype(int)
vacc_daily = vacc_daily.drop(vacc_daily[vacc_daily['daily_vaccinations']==-1].index)
vacc_daily['date']=pd.to_datetime(vacc_daily['date']).dt.date
daily=dict()
# vacc_daily.loc[vacc_daily['date']==,'daily_vaccinations']
for i in ct:
rdd=[]
dt = vacc_daily[vacc_daily['location']==i]
today=datetime.date.today()
try:
a=max(dt['date'])
# print(a)
# print(today)
# print(today-timedelta(14))
# if today-timedelta(14)<a:
x=dt.loc[(dt['date']<=today)&(dt['date']>=today-timedelta(14)),'daily_vaccinations']
note = sum(x)/len(x)
# else:
# note = -2
except:
note =-2
daily[i]=int(note)
for x in ['total_vaccinations','people_vaccinated','people_fully_vaccinated','total_boosters']:
vacc[x]=vacc[x].replace('',-1)
vacc[x]=vacc[x].astype(float)
vacc[x]=vacc[x].astype(int)
vacc = vacc.drop(vacc[vacc['total_vaccinations']==-1].index)
vacc['date']=pd.to_datetime(vacc['date'])
rd=dict()
for i in ct:
rdd=[]
dt = vacc[vacc['location']==i]
a=max(dt['date'])
b=min(dt['date'])
c=a-b
Vacc_Days=int(c.days)
if Vacc_Days==0:
Vacc_Days=1
Total_Vacc=int(max(dt['total_vaccinations']))
Avg_Vac_Daily=Total_Vacc/Vacc_Days
d=a-timedelta(14)
if len(list(dt['date']))==1:
Avg_Vac_Last14D=Total_Vacc
elif daily[i]==-2:
Avg_Vac_Last14D=Total_Vacc/Vacc_Days
else:
Avg_Vac_Last14D=daily[i]
rdd=[Vacc_Days,Total_Vacc,Avg_Vac_Daily,Avg_Vac_Last14D]
rd[i]=rdd
# lst14days=pd.DataFrame(rd,columns=['Country','Vac_Days','Total_Vac','Avg_Vac_Daily','Avg_Vac_Last14D'])
brief_raw_data['累计感染率(%)']=brief_raw_data['Cases Per 100K Population']/100000
brief_raw_data['接种率=(接种疫苗数/总人口)']=brief_raw_data['vacc_per_100']/100
brief_raw_data['VacSpeed_Last14D']=0.05
brief_raw_data['Population']=brief_raw_data['Population'].astype(int)
brief_raw_data['fully vaccinated %']=100*brief_raw_data['people_fully_vaccinated']/brief_raw_data['Population']
brief_raw_data['at least 1 dose vaccinated %']=100*brief_raw_data['people_vaccinated']/brief_raw_data['Population']
brief_raw_data['Fully Vac Ratio %']=100*brief_raw_data['fully vaccinated %']/brief_raw_data['at least 1 dose vaccinated %']
# brief_raw_data['加强针接种率']=brief_raw_data['total_boosters']/brief_raw_data['Population']
for i in rd.keys():
brief_raw_data.loc[((brief_raw_data['Country,Other']==i)|(brief_raw_data['Country/District']==i)),'VacSpeed_Last14D'] = 100*int(rd[i][3])/brief_raw_data.loc[((brief_raw_data['Country,Other']==i)|(brief_raw_data['Country/District']==i)),'Population']
vaccine_efficacy = pd.read_excel('Vaccine-Country.xlsx',sheet_name='Efficacy')
vaccine_efficacy = vaccine_efficacy.set_index('Vaccine').T.to_dict('list')
vaccine_map=pd.read_excel('Vaccine-Country.xlsx',sheet_name='Vac-Mapping')
vaccine_map = dict(zip(vaccine_map['Location'],vaccine_map['首选疫苗']))
brief_raw_data['Vaccine1']='NA'
brief_raw_data['Vaccine_Efficacy']=0.8
for i in vaccine_map.keys():
brief_raw_data.loc[((brief_raw_data['Country,Other']==i)|(brief_raw_data['Country/District']==i)),'Vaccine1'] = vaccine_map[i]
brief_raw_data.loc[((brief_raw_data['Country,Other']==i)|(brief_raw_data['Country/District']==i)),'Vaccine_Efficacy'] = vaccine_efficacy[vaccine_map[i]][1]
brief_raw_data['total_immune_adj']=brief_raw_data['累计感染率(%)']*100+brief_raw_data['vacc_per_100']*brief_raw_data['Vaccine_Efficacy']/2
brief_raw_data['DaystoReachHerdImmunity']=(0.7-brief_raw_data['累计感染率(%)']-brief_raw_data['接种率=(接种疫苗数/总人口)']*brief_raw_data['Vaccine_Efficacy']/2)/(brief_raw_data['VacSpeed_Last14D']*brief_raw_data['Vaccine_Efficacy']*0.01/2)
import datetime
def change_time(x):
in_date = ts[1]
# print(x)
if x<0:
out_date = 'already'
return out_date
elif math.isinf(x):
out_date = 'never'
return out_date
dt = datetime.datetime.strptime(in_date, "%Y%m%d")
try:
out_date = (dt + datetime.timedelta(days=int(x))).strftime("%Y-%m-%d")
except:
out_date = 'never'
return out_date
# brief_raw_data.to_excel('World_rawdata_test.xlsx')
brief_raw_data['HerdImmunityDate']=brief_raw_data['DaystoReachHerdImmunity'].map(change_time)
brief_raw_data['Total Boosters']=brief_raw_data['total_boosters']
brief_raw_data['加强针接种率']=brief_raw_data['total_boosters']/brief_raw_data['Population']
brief_raw_data.to_excel('World_rawdata_{}.xlsx'.format(today),sheet_name=ts[1],index=False)
uni_region=list(set(list(brief_raw_data['Region'].values)))
writer = pd.ExcelWriter('Region_World_Rawdata_{}.xlsx'.format(today))
for distri in uni_region:
data_region=brief_raw_data[brief_raw_data['Region']==distri]
data_region.to_excel(writer,sheet_name=distri,index=False)
writer.save() | StarcoderdataPython |
3312144 | <filename>epregressions/builds/makefile.py
import os
from epregressions.builds.base import BaseBuildDirectoryStructure
from epregressions.ep_platform import exe_extension
class CMakeCacheMakeFileBuildDirectory(BaseBuildDirectoryStructure):
def __init__(self):
super(CMakeCacheMakeFileBuildDirectory, self).__init__()
self.source_directory = None
def set_build_directory(self, build_directory):
"""
This method takes a build directory, and updates any dependent member variables, in this case the source dir.
This method *does* allow an invalid build_directory, as could happen during program initialization
:param build_directory:
:return:
"""
self.build_directory = build_directory
if not os.path.exists(self.build_directory):
self.source_directory = 'unknown - invalid build directory?'
return
cmake_cache_file = os.path.join(self.build_directory, 'CMakeCache.txt')
if not os.path.exists(cmake_cache_file):
raise Exception('Could not find cache file in build directory')
with open(cmake_cache_file, 'r') as f_cache:
for this_line in f_cache.readlines():
if 'CMAKE_HOME_DIRECTORY:INTERNAL=' in this_line:
tokens = this_line.strip().split('=')
self.source_directory = tokens[1]
break
else:
raise Exception('Could not find source directory spec in the CMakeCache file')
def get_idf_directory(self):
if not self.build_directory:
raise Exception('Build directory has not been set with set_build_directory()')
return os.path.join(self.source_directory, 'testfiles')
def get_build_tree(self):
if not self.build_directory:
raise Exception('Build directory has not been set with set_build_directory()')
this_exe_ext = exe_extension()
return {
'build_dir': self.build_directory,
'source_dir': self.source_directory,
'energyplus': os.path.join(self.build_directory, 'Products', 'energyplus' + this_exe_ext),
'basement': os.path.join(self.build_directory, 'Products', 'Basement' + this_exe_ext),
'idd_path': os.path.join(self.build_directory, 'Products', 'Energy+.idd'),
'slab': os.path.join(self.build_directory, 'Products', 'Slab' + this_exe_ext),
'basementidd': os.path.join(self.build_directory, 'Products', 'BasementGHT.idd'),
'slabidd': os.path.join(self.build_directory, 'Products', 'SlabGHT.idd'),
'expandobjects': os.path.join(self.build_directory, 'Products', 'ExpandObjects' + this_exe_ext),
'epmacro': os.path.join(self.source_directory, 'bin', 'EPMacro', 'Linux', 'EPMacro' + this_exe_ext),
'readvars': os.path.join(self.build_directory, 'Products', 'ReadVarsESO' + this_exe_ext),
'parametric': os.path.join(self.build_directory, 'Products', 'ParametricPreprocessor' + this_exe_ext),
'test_files_dir': os.path.join(self.source_directory, 'testfiles'),
'weather_dir': os.path.join(self.source_directory, 'weather'),
'data_sets_dir': os.path.join(self.source_directory, 'datasets')
}
| StarcoderdataPython |
6404592 |
def Calculator():
while True:
print("\t\t\t\t WELCOME TO JOE'S CALCULATOR")
print("1. Addition")
print("2. Subtraction")
print("3. Multiplication")
print("4. Division")
print("5. Modulo")
choice = int(input("Enter your choice: "))
var1 = int(input("Enter variable one: "))
var2 = int(input("Enter variable two: "))
var3 = 0
if choice == 1:
var3 = var1 + var2
print("The sum is: " + str(var3))
elif choice == 2:
var3 = var1 - var2
print("The difference is: " + str(var3))
elif choice == 3:
var3 = var1 * var2
print("The product is: " + str(var3))
elif choice == 4:
var3 = var1 / var2
print("The dividend is: " + str(var3))
elif choice == 5:
var3 = var1 % var2
print("The remainder is: " + str(var3))
else:
print("You've entered incorrect choice.")
print("Do you want to calculate again?")
recalc = input("Y/N")
if recalc == "Y" or recalc == "y":
Calculator()
else:
exit()
Calculator()
| StarcoderdataPython |
3468762 | from pylab import *
import postgkyl
style.use('postgkyl.mplstyle')
def calcMeanSpect(fn, lo, up):
d = postgkyl.GData("%s_00%d.bp" % (fn, lo))
s = d.getValues()
g = d.getGrid()
dx = g[0][1]-g[0][0]
gc = linspace(g[0][0]+0.5*dx, g[0][-1]-0.5*dx, g[0].shape[0]-1)
for i in range(lo+1, up+1):
d = postgkyl.GData("%s_00%d.bp" % (fn, i))
s = s + d.getValues()
return gc, sqrt(s/(up-lo+1))
X, s = calcMeanSpect('s5-euler-kh_chi-spect', 190, 200)
loglog(X, s/s[0])
grid()
xx = linspace(13, 100, 20)
ee = 0.75*(xx/xx[0])**(-1.0)
loglog(xx, ee, '--k')
text(50,0.5,r'$k^{-1}$')
xlabel(r'$k/2\pi$')
ylabel(r'$\chi$')
savefig('chi-spect.png')
show()
| StarcoderdataPython |
6538437 | from ocli import sub, Main, Base
from .cli import decrypt, encrypt, keygen, pick
# @sub(
# {
# "decrypt": decrypt.Decrypt,
# "encrypt": encrypt.Encrypt,
# "keygen": keygen.KeyGen,
# "pick": pick.Pick,
# },
# help="select sub command",
# required=True,
# )
# class Top(Main):
class Top(Base):
def options(self, opt):
super().options(
opt.sub(
{
"decrypt": decrypt.Decrypt,
"encrypt": encrypt.Encrypt,
"keygen": keygen.KeyGen,
"pick": pick.Pick,
},
help="select sub command",
required=True,
)
)
def main():
return Top().main()
(__name__ == "__main__") and Top().main()
| StarcoderdataPython |
1836387 | <filename>IntegratedFireControl.py
'''
Created on 2022/01/23
@author: sa
'''
import numpy as np
from .MyUtil import *
from ASRCAISim1.libCore import *
S_TIMEOUT = 1200
MPS_MINIMUM_LIVE_MISSILE_VELOCITY = 500
S_MISSILE_ACL_TIME = 4.0
MPS_MAXSPEED = 500
M_RADAR_RANGE = 100000
M_SPLASH_RANGE = 300
M_ABREAST_CRITERIA = 10000
class Track():
def __init__(self,foundBy,lastFoundTime,lastFoundPos,lastFoundVel,isAlive,lastShotTime=[0,0],track3D = Track3D().to_json(), foundInThisStep = False):
self.foundBy = foundBy
self.lastFoundTime = lastFoundTime
self.lastFoundPos = lastFoundPos
self.lastFoundVel = lastFoundVel
self.isAlive = isAlive
self.lastShotTime = [0,0]
self.track3D = track3D
self.foundInThisStep = foundInThisStep
class IntegratedFireControl():
def __init__(self):
self.trackFile = {}
self.launchAwaitingId = [-1,-1]
self.launchAwaitingTargets = [
[None]*10,
[None]*10
]
self.launchAwaitingTime = [
[-1]*10,
[-1]*10
]
def reset(self):
self.trackFile = {}
self.launchAwaitingId = [-1,-1]
def _updateFoundTarget(self,time,port,track3D):
if(str(track3D["truth"]) in self.trackFile):
tmpTrack = self.trackFile[str(track3D["truth"])]
tmpTrack.isAlive = True
tmpTrack.foundBy = port
tmpTrack.lastFoundTime = time
tmpTrack.lastFoundPos = track3D["pos"]
tmpTrack.lastFoundVel = track3D["vel"]
tmpTrack.track3D = track3D
tmpTrack.foundInThisStep = True
else:
tmpTrack = Track(port,time,track3D["pos"],track3D["vel"],True,track3D=track3D,foundInThisStep=True)
self.trackFile[str(track3D["truth"])] = tmpTrack
def updateTrack(self,time,observableList): #observableList: array of observableList. if not available, None
for key,track in self.trackFile.items():
track.foundInThisStep = False
track.track3D = None
for port in range(2):
if(not observableList[port] is None):
for track3D in observableList[port]["sensor"]["radar"]["track"]:
self._updateFoundTarget(time,port,track3D)
#撃墜確認
for truth,target in self.trackFile.items():
if(target.isAlive and target.lastFoundTime != time): #alive, but lost
for port in range(2):
if(not observableList[port] is None):
myPos = observableList[port]["motion"]["pos"]
myVel = observableList[port]["motion"]["vel"]
targetPos = target.lastFoundPos
radAspectToTarget = calcRadAspectAngle3D(targetPos,myPos,myVel)
mDistanceToTarget = calcDistance3d(myPos,targetPos)
mTargetMovableRange = MPS_MAXSPEED * (float(time)-float(target.lastFoundTime))
if(radAspectToTarget > np.pi/2 and #sensor is directed to target
mDistanceToTarget + mTargetMovableRange < M_RADAR_RANGE and #target is in radar range
mTargetMovableRange < mDistanceToTarget * np.sin(radAspectToTarget-np.pi/2)): #target is in radar covarage
target.isAlive = False
#ミサイルが目標と十分接近したか?
for port in range(2):
if(not observableList[port] is None):
for missile in observableList[port]["weapon"]["missiles"]:
if(missile["isAlive"] and missile["hasLaunched"]):
npMissilePos = np.array([float(missile["motion"]["pos"][0]),float(missile["motion"]["pos"][1]),float(missile["motion"]["pos"][2])])
npTargetPos = np.array([float(missile["target"]["pos"][0]),float(missile["target"]["pos"][1]),float(missile["target"]["pos"][2])])
npMissileVel = np.array([float(missile["motion"]["vel"][0]),float(missile["motion"]["vel"][1]),float(missile["motion"]["vel"][2])])
npTargetVel = np.array([float(missile["target"]["vel"][0]),float(missile["target"]["vel"][1]),float(missile["target"]["vel"][2])])
missilePosInNextFrame = npMissilePos + npMissileVel*0.1
targetPosInNextFrame = npTargetPos + npTargetVel*0.1
mMissDistance = calcDistance3d(missilePosInNextFrame,targetPosInNextFrame)
if(mMissDistance < M_SPLASH_RANGE):
#print("Splash ",str(truth)," confirmed by downlink",mMissDistance)
self.trackFile[str(missile["target"]["truth"])].isAlive=False
def _updateFireControl(self,time,port,track3D):
if(str(track3D["truth"]) in self.trackFile):
tmpTrack = self.trackFile[str(track3D["truth"])]
tmpTrack.lastShotTime[port] = time
self.trackFile[str(track3D["truth"])] = tmpTrack
else:
print("!!!!!!!Debug launched before found!!!!!!")
print("port:",port)
print("track3D",track3D)
def requestFire(self,time,observableList,port,targetTrack):
if(observableList[port] is None):
launch = False
target = Track3D().to_json()
return launch,target
nextMissileId = int(observableList[port]["weapon"]["nextMsl"])
if(not observableList[port]["weapon"]["launchable"] or nextMissileId >= 10):
launch = False
target = Track3D().to_json()
return launch,target
launch = True
target = targetTrack
self._updateFireControl(time, port, target)
self.launchAwaitingId[port] = nextMissileId
self.launchAwaitingTargets[port][nextMissileId] = target["truth"]
self.launchAwaitingTime[port][nextMissileId] = float(time)
return launch,target
def getTimeFromLastLaunch(self,time,truth):#time from launch by any ally
truth = str(truth)
if(truth in self.trackFile):
t0 = float(self.trackFile[truth].lastShotTime[0])
t1 = float(self.trackFile[truth].lastShotTime[1])
lastShotTime = np.max(t0,t1)
return float(time) - lastShotTime
else:
return S_TIMEOUT
def getTimeFromLastLaunchBy(self,time,port,truth):
truth = str(truth)
if(truth in self.trackFile):
return float(time) - self.trackFile[truth].lastShotTime[port]
else:
return S_TIMEOUT
def isLaunchAwaiting(self,observableList,port):
if(self.launchAwaitingId[port] < 0):
return False
elif(self.launchAwaitingId[port] >= 10):
return False
else:
nextMissile = observableList[port]["weapon"]["missiles"][self.launchAwaitingId[port]]
if(nextMissile["isAlive"] and nextMissile["hasLaunched"]):
return False
elif(not nextMissile["isAlive"]):
return False
else:
return True
def _isTargetShotBy_bak(self,time,port,observableList,targetTrack3D,mpsMinimumLiveMissileVelecity = MPS_MINIMUM_LIVE_MISSILE_VELOCITY):
if(targetTrack3D is None): #不正なターゲット
return False,"invalid target",0,0,0.0
if(observableList[port] is None): #対象味方機が被撃墜済
return False,"element not alive",0,0,0.0
for missileId in range(10):
missiles = observableList[port]["weapon"]["missiles"]
if(not self.launchAwaitingTargets[port][missileId] is None): #ターゲット割当済
if(missiles[missileId]["isAlive"] and (not missiles[missileId]["hasLaunched"])):#未発射かつ発射割当済
if((float(time) - self.launchAwaitingTime[port][missileId]) < 3.0 and (str(self.launchAwaitingTargets[port][missileId]) == str(targetTrack3D["truth"]))): #現在時刻が発射要求時刻より3秒以上経過していれば発射失敗->割当済ではない
return True,"assigned, not launched",port,missileId,float(missiles[missileId]["launchedT"])
elif(missiles[missileId]["isAlive"] and missiles[missileId]["hasLaunched"]):#発射済かつ残速度あり
if(str(missiles[missileId]["target"]["truth"]) == str(targetTrack3D["truth"])):
npVel = np.array([float(missiles[missileId]["motion"]["vel"][0]),float(missiles[missileId]["motion"]["vel"][1]),float(missiles[missileId]["motion"]["vel"][2])])
velMag = np.linalg.norm(npVel,ord=2)
if(velMag > mpsMinimumLiveMissileVelecity):
return True,"launched, has speed",port,missileId,float(missiles[missileId]["launchedT"])
if((float(time)-float(missiles[missileId]["launchedT"])) < S_MISSILE_ACL_TIME):
return True,"launched, accel",port,missileId,float(missiles[missileId]["launchedT"])
return False,"no matching",0,0,0.0
def _isTargetShotBy(self,time,port,observableList,targetTrack3D,mpsMinimumLiveMissileVelecity = MPS_MINIMUM_LIVE_MISSILE_VELOCITY):
if(targetTrack3D is None): #不正なターゲット
return False,"invalid target",0,0,0.0
if(observableList[port] is None): #対象味方機が被撃墜済
return False,"element not alive",0,0,0.0
for missileId in range(10):
missiles = observableList[port]["weapon"]["missiles"]
if(not self.launchAwaitingTargets[port][missileId] is None): #ターゲット割当済
if(missiles[missileId]["isAlive"] and (not missiles[missileId]["hasLaunched"])):#未発射かつ発射割当済
if((float(time) - self.launchAwaitingTime[port][missileId]) < 3.0 and (str(self.launchAwaitingTargets[port][missileId]) == str(targetTrack3D["truth"]))): #現在時刻が発射要求時刻より3秒以上経過していれば発射失敗->割当済ではない
return True,"assigned, not launched",port,missileId,float(missiles[missileId]["launchedT"])
elif(missiles[missileId]["isAlive"] and missiles[missileId]["hasLaunched"]):#発射済かつ残速度あり
if(str(missiles[missileId]["target"]["truth"]) == str(targetTrack3D["truth"])):
npVel = np.array([float(missiles[missileId]["motion"]["vel"][0]),float(missiles[missileId]["motion"]["vel"][1]),float(missiles[missileId]["motion"]["vel"][2])])
velMag = np.linalg.norm(npVel,ord=2)
if(velMag > mpsMinimumLiveMissileVelecity):
return True,"launched, has speed",port,missileId,float(missiles[missileId]["launchedT"])
if((float(time)-float(missiles[missileId]["launchedT"])) < S_MISSILE_ACL_TIME):
return True,"launched, accel",port,missileId,float(missiles[missileId]["launchedT"])
#DEBUG FOR UNNECESSARY LAUNCH
# print("target {:}".format(str(targetTrack3D["truth"])))
# print("missile alive criteria: {:.1f}".format(mpsMinimumLiveMissileVelecity))
# for missileId in range(10):
# if(not missiles[missileId]["isAlive"]):
# print("{:}-{:} alive: {:}".format(port,missileId,missiles[missileId]["isAlive"]))
# else:
# npVel = np.array([float(missiles[missileId]["motion"]["vel"][0]),float(missiles[missileId]["motion"]["vel"][1]),float(missiles[missileId]["motion"]["vel"][2])])
# velMag = np.linalg.norm(npVel,ord=2)
# print("{:}-{:} alive: {:}, hasLaunched {:}, target {:}, match {:}, vel {:.1f},vel alive {:}, awaiting {:}".format(port,missileId,missiles[missileId]["isAlive"],missiles[missileId]["hasLaunched"],str(missiles[missileId]["target"]["truth"]),str(missiles[missileId]["target"]["truth"]) == str(targetTrack3D["truth"]),velMag,velMag > mpsMinimumLiveMissileVelecity, self.launchAwaitingTargets[port][missileId]))
return False,"no matching",0,0,0.0
def _isTargetShot(self,time,observableList,targetTrack3D,mpsMinimumLiveMissileVelecity = MPS_MINIMUM_LIVE_MISSILE_VELOCITY):
for port in [0,1]:
targetShot,shotInfo,shotPort,missileId,launchTime = self._isTargetShotBy(time,port,observableList,targetTrack3D,mpsMinimumLiveMissileVelecity)
if(targetShot):
return True,shotInfo,shotPort,missileId,launchTime
return False,"no matching",0,0,0.0
def isTargetShotBy(self,time,port,observableList,targetTrack3D,mpsMinimumLiveMissileVelecity = MPS_MINIMUM_LIVE_MISSILE_VELOCITY):
return self._isTargetShotBy(time, port, observableList, targetTrack3D, mpsMinimumLiveMissileVelecity)[0]
def isTargetShot(self,time,observableList,targetTrack3D,mpsMinimumLiveMissileVelecity = MPS_MINIMUM_LIVE_MISSILE_VELOCITY):
for port in [0,1]:
if(self.isTargetShotBy(time,port,observableList,targetTrack3D,mpsMinimumLiveMissileVelecity)):
return True
return False
def isPenetratable(self,time,observable,primaryAxis):
M_MARGIN = 27000
PENETRATION_RANGE = 100000
if(len(self.trackFile) != 2): #敵が両方見つかっていない場合は突破不可
return False
for key,track in self.trackFile.items():
if(track.isAlive):
myPos = np.array([float(observable["motion"]["pos"][0]),float(observable["motion"]["pos"][1]),float(observable["motion"]["pos"][2])])
npPrimaryAxis = np.array(primaryAxis)
myVel = npPrimaryAxis * MPS_MAXSPEED #全力で敵陣に向かう想定
targetPos = np.array([float(track.lastFoundPos[0]),float(track.lastFoundPos[1]),float(track.lastFoundPos[2])])
movableRange = MPS_MAXSPEED * (float(time) - float(track.lastFoundTime))
#自分の方が敵陣に近いか判定
estimatedTargetPos = targetPos - movableRange * npPrimaryAxis #最も突破方向に移動された場合
if(myPos[1]*primaryAxis[1] < estimatedTargetPos[1]*primaryAxis[1]*(-1)):
return False
#逃げ切れるか判定
estimatedTargetPos = targetPos + movableRange * npPrimaryAxis #最も接近された場合
myPosWithMargin = myPos - M_MARGIN * npPrimaryAxis
hotAspect = isHot(calcRadAspectAngle2D(estimatedTargetPos,myPosWithMargin,myVel))
if(not hotAspect): #ミサイルが追いつける距離の補正入れた状態でcoldならpass
continue
interceptPoint = calcInterceptPoint2D2(estimatedTargetPos,myPosWithMargin,myVel)
if(interceptPoint[1]*primaryAxis[1] < PENETRATION_RANGE): #敵陣手前でインターセプトされる可能性あり。
return False
return True
def getTrack(self,truth):
if(truth is None):
return None
else:
return self.trackFile[str(truth)]
def getAnotherLiveTruth(self,truth):
if(truth is None):
return None
elif(len(self.trackFile) == 1):
return None
for key,val in self.trackFile.items():
if(str(key) != str(truth) and val.isAlive):
return key
return None
def getTargetsRemain(self):#未発見のターゲットは生存とみなす
count = 2
for key,val in self.trackFile.items():
if(not val.isAlive):
count -= 1
return count
def getNumTargetsFound(self):
return len(self.trackFile)
def _assign2v2_northSouth(self,observableList): #敵味方どちらも2機生存を前提とする
truth0 = list(self.trackFile.keys())[0]
truth1 = list(self.trackFile.keys())[1]
if(float(self.trackFile[truth0].lastFoundPos[0]) > float(self.trackFile[truth1].lastFoundPos[0])):
northTarget = truth0
southTarget = truth1
else:
northTarget = truth1
southTarget = truth0
if(float(observableList[0]["motion"]["pos"][0]) > float(observableList[1]["motion"]["pos"][0])): #flight1 is north
return northTarget,southTarget
else:
return southTarget,northTarget
def _assign2v2_nearestFirst(self,observableList): #敵味方どちらも2機生存を前提とする
distance_blue1_red1 = calcDistance3d(observableList[0]["motion"]["pos"],list(self.trackFile.values())[0].lastFoundPos)
distance_blue1_red2 = calcDistance3d(observableList[0]["motion"]["pos"],list(self.trackFile.values())[1].lastFoundPos)
distance_blue2_red1 = calcDistance3d(observableList[1]["motion"]["pos"],list(self.trackFile.values())[0].lastFoundPos)
distance_blue2_red2 = calcDistance3d(observableList[1]["motion"]["pos"],list(self.trackFile.values())[1].lastFoundPos)
distanceList = np.array([distance_blue1_red1,distance_blue1_red2,distance_blue2_red1,distance_blue2_red2])
minIndex = np.argmin(distanceList)
truth0 = list(self.trackFile.keys())[0]
truth1 = list(self.trackFile.keys())[1]
if(minIndex == 0):
return truth0,truth1
elif(minIndex == 1):
return truth1,truth0
elif(minIndex == 2):
return truth1,truth0
else:
return truth0,truth1
def _assign2v2_LeadingTargetFirst(self,observableList,primaryAxis): #敵味方どちらも2機生存を前提とする。
truth0 = list(self.trackFile.keys())[0]
truth1 = list(self.trackFile.keys())[1]
if(float(self.trackFile[truth0].lastFoundPos[1]) > float(self.trackFile[truth1].lastFoundPos[1])):
eastTarget = truth0
westTarget = truth1
else:
eastTarget = truth1
westTarget = truth0
if(primaryAxis[1] > 0): #RED
leadingTarget = westTarget
trailingTarget = eastTarget
else: #BLUE
leadingTarget = eastTarget
trailingTarget = westTarget
targetPos = self.trackFile[truth0].lastFoundPos
port0Pos = observableList[0]["motion"]["pos"]
port0Vel = observableList[0]["motion"]["vel"]
port0Distance = calcDistance3d(port0Pos,targetPos)
port0Hot = isHot(calcRadAspectAngle3D(targetPos,port0Pos,port0Vel))
port1Pos = observableList[1]["motion"]["pos"]
port1Vel = observableList[1]["motion"]["vel"]
port1Distance = calcDistance3d(port1Pos,targetPos)
port1Hot = isHot(calcRadAspectAngle3D(targetPos,port1Pos,port1Vel))
if(port0Hot and (not port1Hot)):
return leadingTarget,trailingTarget
elif((not port0Hot) and port1Hot):
return trailingTarget,leadingTarget
else: #both ally hot or both ally cold
if(port0Distance < port1Distance):
return leadingTarget,trailingTarget
else:
return trailingTarget,leadingTarget
def _assign2v2_trail_abreast(self,observableList,primaryAxis):
truth0 = list(self.trackFile.keys())[0]
truth1 = list(self.trackFile.keys())[1]
if(np.abs(float(self.trackFile[truth0].lastFoundPos[0]) - float(self.trackFile[truth1].lastFoundPos[0])) < M_ABREAST_CRITERIA):
#return self._assign2v2_nearestFirst(observableList)
return self._assign2v2_LeadingTargetFirst(observableList,primaryAxis)
else:
return self._assign2v2_northSouth(observableList)
def _assign2v2_minimamTotalDistance(self,observableList):
distance_blue1_red1 = calcDistance3d(observableList[0]["motion"]["pos"],list(self.trackFile.values())[0].lastFoundPos)
distance_blue1_red2 = calcDistance3d(observableList[0]["motion"]["pos"],list(self.trackFile.values())[1].lastFoundPos)
distance_blue2_red1 = calcDistance3d(observableList[1]["motion"]["pos"],list(self.trackFile.values())[0].lastFoundPos)
distance_blue2_red2 = calcDistance3d(observableList[1]["motion"]["pos"],list(self.trackFile.values())[1].lastFoundPos)
straightMatchupDistance = np.sqrt(distance_blue1_red1) + np.sqrt(distance_blue2_red2)
crossMathupDistance = np.sqrt(distance_blue1_red2) + np.sqrt(distance_blue2_red1)
truth0 = list(self.trackFile.keys())[0]
truth1 = list(self.trackFile.keys())[1]
if(straightMatchupDistance < crossMathupDistance):
return truth0,truth1
else:
return truth1,truth0
def assignPrimaryTarget(self,observableList,primaryAxis):
if(observableList[1] is None): #ally is down
if(len(self.trackFile) == 0): #目標未発見
return None,None
elif(len(self.trackFile) == 1): #目標1機のみ発見
truth = list(self.trackFile.keys())[0]
if(self.trackFile[truth].isAlive):
return truth,None
else:
return None,None
else: #目標2機発見済
truth0 = list(self.trackFile.keys())[0]
truth1 = list(self.trackFile.keys())[1]
if(not self.trackFile[truth0].isAlive): #target0 down
return truth1,None
elif(not self.trackFile[truth1].isAlive): #target1 down
return truth0,None
else: #both targets alive
distanceToTarget0 = calcDistance3d(observableList[0]["motion"]["pos"],self.trackFile[truth0].lastFoundPos)
distanceToTarget1 = calcDistance3d(observableList[0]["motion"]["pos"],self.trackFile[truth1].lastFoundPos)
if(distanceToTarget0 < distanceToTarget1):
return truth0,None
else:
return truth1,None
if(observableList[0] is None): #ally is down
if(len(self.trackFile) == 0): #目標未発見
return None,None
elif(len(self.trackFile) == 1): #目標1機のみ発見
truth = list(self.trackFile.keys())[0]
if(self.trackFile[truth].isAlive):
return None,truth
else:
return None,None
else: #目標2機発見済
truth0 = list(self.trackFile.keys())[0]
truth1 = list(self.trackFile.keys())[1]
if(not self.trackFile[truth0].isAlive): #target0 down
return None,truth1
elif(not self.trackFile[truth1].isAlive): #target1 down
return None,truth0
else: #both targets alive
distanceToTarget0 = calcDistance3d(observableList[1]["motion"]["pos"],self.trackFile[truth0].lastFoundPos)
distanceToTarget1 = calcDistance3d(observableList[1]["motion"]["pos"],self.trackFile[truth1].lastFoundPos)
if(distanceToTarget0 < distanceToTarget1):
return None,truth0
else:
return None,truth1
else: #all ally are alive
if(len(self.trackFile) == 0): #目標未発見
return None,None
elif(len(self.trackFile) == 1): #目標1機のみ発見
truth = list(self.trackFile.keys())[0]
if(self.trackFile[truth].isAlive):
return truth,truth
else:
return None,None
else: #2機発見済
truth0 = list(self.trackFile.keys())[0]
truth1 = list(self.trackFile.keys())[1]
if(not self.trackFile[truth0].isAlive): #target0 down
return truth1,truth1
elif(not self.trackFile[truth1].isAlive): #target1 down
return truth0,truth0
else: #both targets alive
#return self._assign2v2_minimamTotalDistance(observableList)
return self._assign2v2_trail_abreast(observableList,primaryAxis)
#return self._assign2v2_northSouth(observableList)
| StarcoderdataPython |
1805907 | #!/usr/bin/env python
"""
Freight
=======
A deploy service.
:copyright: (c) 2015 Functional Software Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import os.path
from setuptools import setup, find_packages
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
with open('requirements-test.txt') as file:
tests_require = file.read().splitlines()
with open('requirements.txt') as file:
install_requires = file.read().splitlines()
setup(
name='freight',
version='0.0.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/getsentry/freight',
description='A deployment service',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests']),
zip_safe=False,
install_requires=install_requires,
extras_require={
'test': tests_require,
},
license='Apache 2.0',
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| StarcoderdataPython |
3441653 | test = {
'name': 'bluedog',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
sqlite> SELECT * FROM bluedog;
blue|dog
blue|dog
blue|dog
blue|dog
blue|dog
blue|dog
blue|dog
blue|dog
blue|dog
blue|dog
blue|dog
sqlite> SELECT * FROM bluedog_songs;
blue|dog|Clair De Lune
blue|dog|Formation
blue|dog|Dancing Queen
blue|dog|Dancing Queen
blue|dog|Dancing Queen
blue|dog|Dancing Queen
blue|dog|Clair De Lune
blue|dog|Formation
blue|dog|Never Be Like You
blue|dog|Formation
blue|dog|Never Be Like You
""",
'hidden': False,
'locked': False
}
],
'ordered': False,
'scored': True,
'setup': r"""
sqlite> .read lab13.sql
""",
'teardown': '',
'type': 'sqlite'
}
]
}
| StarcoderdataPython |
6466879 | <filename>app_folder/blueprints/users/views.py
from crypt import methods
from os import abort
from flask import redirect, render_template, request, url_for, flash , current_app, jsonify
from app_folder.blueprints.users import user
from app_folder.blueprints.users.model import User
from flask_login import login_required, current_user
from app_folder.blueprints.users.forms import UpdateCredentials
from app_folder.extensions import db
from flask import abort
from app_folder.blueprints.pages.models import Post
from lib.permissions import Permission
from .forms import EditPostForm
from lib.verify_login import permission_required
@user.route('/profile/<string:username>')
def profile(username):
page = request.args.get('page', 1, int)
user = User.query.filter_by(username=username).first()
if user.profile_view_count is None and user != current_user:
user.profile_view_count = 1
elif user != current_user:
user.profile_view_count += 1
db.session.commit()
if user is None:
abort(404)
pagination = Post.query.filter_by(user=user).order_by(Post.date_posted.desc()).paginate(page, current_app.config['LEGIT_POST_PER_PAGE'], error_out=False)
posts = pagination.items
return render_template('profile.html', user=user, posts=posts, pagination=pagination)
@user.route('/settings', methods=['GET', 'POST'])
@login_required
def settings():
form = UpdateCredentials()
if form.validate_on_submit():
current_user.password = <PASSWORD>.password.data
current_user.email = form.email.data
current_user.course = form.course.data
current_user.education = form.education.data
current_user.about_me = form.about_me.data
current_user.surname = form.surname.data
current_user.first_name = form.first_name.data
current_user.headline = form.headline.data
db.session.commit()
flash('Profile updated', 'success')
return redirect(url_for('user.profile', username=current_user.username))
form.surname.data = current_user.surname
form.username.data = current_user.username
form.first_name.data = current_user.first_name
form.email.data = current_user.email
form.course.data = current_user.course
form.education.data = current_user.education
form.about_me.data = current_user.about_me
form.headline.data = current_user.headline
return render_template('settings.html', form=form)
@user.route('/post/<int:id>')
@login_required
def post(id):
post = Post.query.get_or_404(id)
if current_user != post.user:
post.user.get_user_total_post_views()
return render_template('post.html', posts=[post])
@user.route('/edit_post/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_post(id):
post = Post.query.get_or_404(id)
if current_user != post.user and not current_user.can(Permission.WRITE_ARTICLES):
abort(404)
form = EditPostForm()
if form.validate_on_submit():
post.content = form.content.data
db.session.commit()
flash('Your post has been updated', 'success')
return redirect(url_for('user.post', id=post.id))
form.content.data = post.content
return render_template('edit_post.html', form=form, post=post)
@user.route('/follow/<string:username>', methods=['GET'])
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username = username).first()
print(current_user.is_following(user), 'current user status')
if user is not None and not current_user.is_following(user):
current_user.follow(user)
flash(f'You\'re now following {user.username}', 'success')
return redirect(url_for('user.profile', username=user.username))
if user is None:
flash('Invalid user', 'info')
return redirect(url_for('page.home'))
if current_user.is_following(user):
flash('You\'re already following this user', 'info')
return redirect(url_for('user.profile', username=user.username))
@user.route('/unfollow/<string:username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is not None and current_user.is_following(user):
current_user.unfollow(user)
flash(f'You\'ve unfollow {user.username}.', 'success')
return redirect(url_for('user.profile', username=user.username))
if user is None:
flash('Invalid user', 'warning')
return redirect(url_for('/'))
@user.route('/followers/<string:username>')
def followers(username):
user = User.query.filter_by(username=username).first()
page = request.args.get('page', 1, int)
if user is None:
flash('User not found', 'success')
return redirect(url_for('page.home'))
follower_pagination = user.followers.paginate(page, current_app.config['LEGIT_FOLLOWERS_PER_PAGE'], error_out=False )
followed_pagination = user.followed.paginate(page, current_app.config['LEGIT_FOLLOWERS_PER_PAGE'], error_out=False)
follower = [{'user' : item.follower, 'timestamp' : item.timestamp} for item in follower_pagination.items]
followed = [ {'user' : item.followed , 'timestamp' : item.timestamp } for item in followed_pagination.items]
return render_template('followers.html', follower=follower, follower_pagination=follower_pagination, followed_pagination=followed_pagination, followed=followed, user=user)
@user.route('/network/<string:username>')
@login_required
def network(username):
if not current_user.is_authenticated:
return redirect(url_for('auth.login'))
page = request.args.get('page', 1, int)
follower_pagination = current_user.followers.paginate(page, current_app.config['LEGIT_FOLLOWERS_PER_PAGE'], error_out=False )
followed_pagination = current_user.followed.paginate(page, current_app.config['LEGIT_FOLLOWERS_PER_PAGE'], error_out=False)
follower = [{'user' : item.follower, 'timestamp' : item.timestamp} for item in follower_pagination.items]
followed = [ {'user' : item.followed , 'timestamp' : item.timestamp } for item in followed_pagination.items]
return render_template('followers.html', follower=follower, follower_pagination=follower_pagination, followed_pagination=followed_pagination, followed=followed, user=user)
| StarcoderdataPython |
4954320 | # -*- coding: utf-8 -*-
# Time : 2021/12/16 16:10
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
from .core import EmailRelay, Chrome
def get_verification_code(
link: str,
chromedriver_path: str = None,
driver: Chrome = None,
silence: bool = True
) -> str:
"""
监听来件并识别验证码,返回邮箱验证码。
需要先调用 apis_get_email_context() 获得邮箱账号
:param driver:
:param chromedriver_path:
:param silence:
:param link: 邮箱指纹链接,被封装在 apis_get_email_context() 的返回值中
:return:
"""
chromedriver_path = "chromedriver" if chromedriver_path is None else chromedriver_path
er = EmailRelay(
url=link,
chromedriver_path=chromedriver_path,
silence=silence
)
api = er.set_spider_option() if driver is None else driver
try:
# 站点映射转移
er.get_html_handle(api, er.register_url)
# 监听新邮件
er.check_receive(api)
# 切换到邮件正文页面
er.switch_to_mail(api)
# 清洗出验证码
verification_code = er.get_number(api)
return verification_code
finally:
api.quit()
def get_email_context(
chromedriver_path: str = None,
silence: bool = True
) -> dict:
"""
生产具备指纹特性的邮箱
:param chromedriver_path:
:param silence:
:return: 返回 context 上下文对象,包含 `email` `id` `link` 键值对
"""
chromedriver_path = "chromedriver" if chromedriver_path is None else chromedriver_path
er = EmailRelay(
chromedriver_path=chromedriver_path,
silence=silence
)
api = er.set_spider_option()
try:
# 站点映射转移
er.get_html_handle(api, er.register_url)
# 获取随机指纹邮箱
er.get_temp_email(api)
# 使用 context 封装上下文运行环境
context = {
"email": er.email_driver,
"id": er.email_id,
"link": er.mailbox_link.format(er.email_id),
}
return context
finally:
api.quit()
| StarcoderdataPython |
180391 | import os
import sys
import logging
import wyze_sdk
from wyze_sdk import Client
from wyze_sdk.errors import WyzeApiError
if len(sys.argv) < 5 :
sys.stdout = sys.stderr
print(f"USAGE: {sys.argv[0]} wyze_email wyze_password robovac_nickname roomname [roomname...]")
quit(1)
rooms2clean = []
num_rooms2clean = len(sys.argv)
I = 4
while I <= num_rooms2clean:
#print(f"I = {I}, sys.argv[I] = '{sys.argv[I-1]}'")
rooms2clean.append(sys.argv[I-1])
I+=1
#print(f"rooms2clean = {rooms2clean}")
client = Client(email=sys.argv[1], password=<PASSWORD>[2])
roboVacNickname = os.sys.argv[3]
for device in client.devices_list():
if device.product.model == "JA_RO2":
if device.nickname == roboVacNickname :
device_mac = device.mac
if device_mac == "Not_Set":
sys.stdout = sys.stderr
print(f"Vacuum not found in list of Wyze devices...")
quit(1)
try:
vacuum = client.vacuums.info(device_mac=device_mac)
from wyze_sdk.models.devices import VacuumMode
if vacuum.mode == VacuumMode.SWEEPING:
sys.stdout = sys.stderr
print(f"RoboVac is already sweeping. Stop the current sweep and try again...")
quit(1)
#print(f"vacuum.current_map.rooms = {vacuum.current_map.rooms}")
#for roomname in rooms2clean:
# print(f"roomname = '{roomname}'")
room_ids=[]
for room in vacuum.current_map.rooms:
for roomname in rooms2clean:
if room.name == roomname:
room_ids.append( room.id )
client.vacuums.sweep_rooms(device_mac=device_mac, room_ids=room_ids)
print(f"Sweeping started successfully...")
quit(0)
except WyzeApiError as e:
# You will get a WyzeApiError if the request failed
sys.stdout = sys.stderr
print(f"Got an error: {e}")
| StarcoderdataPython |
8119259 | from typing import List
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
left, right = 0, k
lenN = len(nums)
medians = []
while right <= lenN:
window = sorted(nums[left:right])
median = k // 2
if (right - left) % 2:
medians.append(window[median])
else:
medians.append((window[median - 1] + window[median]) / 2)
left, right = left + 1, right + 1
return medians
if __name__ == '__main__':
s = Solution()
while 1:
nums = list(map(int, input().split(',')))
k = int(input())
print(s.medianSlidingWindow(nums, k))
''''测试用例
1,3,-1,-3,5,3,6,7
3
1,4,2,3
4
'''
| StarcoderdataPython |
3349569 | <filename>test/operations/test_crossover.py
"""
"Genyal" (c) by <NAME>.
"Genyal" is licensed under a
Creative Commons Attribution 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by/4.0/>.
"""
import random
import string
import sys
import unittest
from typing import Tuple
import pytest
from genyal.genotype import GeneFactory
from genyal.individuals import Individual
from genyal.operations.crossover import CrossoverError, single_point_crossover
@pytest.mark.repeat(16)
def test_not_matching_couples(ascii_gene_factory: GeneFactory[str],
random_generator: random.Random, seed: int):
individual = Individual(gene_factory=ascii_gene_factory, random_generator=random_generator)
individual.set(number_of_genes=random_generator.randint(1, 32))
unmatching_size = random_generator.randint(1, 32)
while len(individual) == unmatching_size:
unmatching_size = random_generator.randint(1, 32)
erroneous_individual = Individual(gene_factory=ascii_gene_factory,
random_generator=random_generator)
erroneous_individual.set(unmatching_size)
with pytest.raises(CrossoverError) as error:
single_point_crossover(individual, erroneous_individual)
assert unmatching_error_msg(len(individual), len(
erroneous_individual)) in error.value.cause, f"Test failed with seed: {seed}"
@pytest.mark.repeat(32)
def test_single_point_crossover(couple: Tuple[Individual, Individual], seed: int):
expected_cut_point = random.Random(seed).randrange(0, len(couple[0]))
couple[0].random_generator = random.Random(seed)
offspring = single_point_crossover(couple[0], couple[1])
assert offspring.genes == (
couple[0].genes[:expected_cut_point] + couple[1].genes[expected_cut_point:])
@pytest.fixture
def couple(ascii_gene_factory: GeneFactory[str], random_generator: random.Random) \
-> Tuple[Individual, Individual]:
"""A pair of individuals"""
couple = (Individual(gene_factory=ascii_gene_factory, random_generator=random_generator),
Individual(gene_factory=ascii_gene_factory, random_generator=random_generator))
number_of_genes = random_generator.randint(1, 64)
couple[0].set(number_of_genes)
couple[1].set(number_of_genes)
return couple
@pytest.fixture
def ascii_gene_factory(random_generator: random.Random) -> GeneFactory[str]:
factory = GeneFactory[str]()
factory.generator = lambda: random_generator.choice(string.ascii_lowercase)
return factory
@pytest.fixture()
def random_generator(seed: int) -> random.Random:
"""The random number generator used in the tests."""
return random.Random(seed)
@pytest.fixture
def seed() -> int:
"""The seed used by the tests."""
return random.randint(-sys.maxsize, sys.maxsize)
def unmatching_error_msg(size_1: int, size_2) -> str:
return f"Can't perform a crossover over individuals of different sizes. {size_1} != {size_2}."
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
64626 | import os, sys
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
percentages = [0.01, 0.1, 0.2, 0.4, 0.5, 0.6]
for percentage in percentages:
data = []
save_path = '../logs/SOM_weights_MNIST_noise_{}.npy'.format(percentage)
wts = np.load(save_path).reshape(-1, 784)
print ("============{}============".format(wts.shape))
kmeans = KMeans(n_clusters=10).fit(wts)
centers = kmeans.cluster_centers_
for i in range(2):
for j in range(5):
plt.subplot(2, 5, i*5 + j + 1)
plt.imshow(centers[i*5+j].reshape(28, 28).T)
if (i == 0) and (j == 0): plt.title("MNIST Noise {}".format(percentage))
plt.show()
| StarcoderdataPython |
131580 | <filename>test/test_fillgaps_lowpass_2d.py
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
import gridtools.gapfilling as gtg
GAP = np.nan
KERNEL = np.ones((3, 3), dtype=np.float64)
class FillgapsLowpass2d(unittest.TestCase):
def _test_fillgaps(self, src, desired_out, desired_gaps_filled):
src = np.array(src)
gc1 = gtg.count_gaps(src)
actual_out = gtg.fillgaps_lowpass_2d(src, kernel=KERNEL)
gc2 = gtg.count_gaps(actual_out)
actual_gaps_filled = gc1 - gc2
assert_almost_equal(actual_out, np.array(desired_out))
self.assertEqual(actual_gaps_filled, desired_gaps_filled)
def test_0_missing(self):
self._test_fillgaps([[1.0, 2.0],
[3.0, 4.0]],
[[1.0, 2.0],
[3.0, 4.0]], 0)
def test_1_missing(self):
self._test_fillgaps([[GAP]],
[[GAP]], 0)
_F_ = (2 + 3 + 4) / 3.
self._test_fillgaps([[GAP, 2.0],
[3.0, 4.0]],
[[_F_, 2.0],
[3.0, 4.0]], 1)
_F_ = (1 + 2 + 3) / 3.
self._test_fillgaps([[1.0, 2.0],
[3.0, GAP]],
[[1.0, 2.0],
[3.0, _F_]], 1)
_F_ = (1 + 2 + 3 + 4 + 6 + 7 + 8 + 9) / 8.
self._test_fillgaps([[1.0, 2.0, 3.0],
[4.0, GAP, 6.0],
[7.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0],
[4.0, _F_, 6.0],
[7.0, 8.0, 9.0]], 1)
def test_2_missing(self):
self._test_fillgaps([[GAP, GAP]],
[[GAP, GAP]], 0)
F1_ = (2 + 3) / 2.
F2_ = (2 + 3) / 2.
self._test_fillgaps([[GAP, 2.0],
[3.0, GAP]],
[[F1_, 2.0],
[3.0, F2_]], 2)
F1_ = (2 + 4) / 2.
F2_ = (2 + 3 + 4 + 6 + 7 + 8 + 9) / 7.
self._test_fillgaps([[GAP, 2.0, 3.0],
[4.0, GAP, 6.0],
[7.0, 8.0, 9.0]],
[[F1_, 2.0, 3.0],
[4.0, F2_, 6.0],
[7.0, 8.0, 9.0]], 2)
def test_3_missing(self):
self._test_fillgaps([[GAP, GAP],
[GAP, 4.0]],
[[4.0, 4.0],
[4.0, 4.0]], 3)
F1_ = 2.
F2_ = (2 + 7 + 8) / 3.
F3_ = (2 + 3 + 6 + 7 + 8 + 9) / 6.
self._test_fillgaps([[GAP, 2.0, 3.0],
[GAP, GAP, 6.0],
[7.0, 8.0, 9.0]],
[[F1_, 2.0, 3.0],
[F2_, F3_, 6.0],
[7.0, 8.0, 9.0]], 3)
def test_4_missing(self):
self._test_fillgaps([[GAP, GAP],
[GAP, GAP]],
[[GAP, GAP],
[GAP, GAP]], 0)
F1_ = (3 + 6) / 2.
F2_ = (7 + 8) / 2.
F3_ = (3 + 6 + 7 + 8 + 9) / 5.
F4_ = (F1_ + F2_ + F3_) / 3.
self._test_fillgaps([[GAP, GAP, 3.0],
[GAP, GAP, 6.0],
[7.0, 8.0, 9.0]],
[[F4_, F1_, 3.0],
[F2_, F3_, 6.0],
[7.0, 8.0, 9.0]], 4)
def test_9_missing(self):
F1_ = (1 + 2 + 3 + 4 + 5 + 9) / 6.
F2_ = (2 + 3 + 4) / 3.
F3_ = (3 + 4) / 2.
F4_ = (5 + 9 + 13) / 3.
F5_ = (9 + 13) / 2.
F6_ = (F1_ + F2_ + F3_ + F4_ + F5_) / 5.
F7_ = (F2_ + F3_) / 2.
F8_ = (F4_ + F5_) / 2.
F9_ = (F6_ + F7_ + F8_) / 3.
self._test_fillgaps([[1.0, 2.0, 3.0, 4.0],
[5.0, GAP, GAP, GAP],
[9.0, GAP, GAP, GAP],
[13., GAP, GAP, GAP]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, F1_, F2_, F3_],
[9.0, F4_, F6_, F7_],
[13., F5_, F8_, F9_]], 9)
| StarcoderdataPython |
267821 | import torch
import torch.nn as nn
from .utils.summary import summary as model_summary
from tensornet.engine.learner import Learner
class BaseModel(nn.Module):
def __init__(self):
"""This function instantiates all the model layers."""
super(BaseModel, self).__init__()
self.learner = None
def forward(self, x):
"""This function defines the forward pass of the model.
Args:
x: Input.
Returns:
Model output.
"""
raise NotImplementedError
def summary(self, input_size):
"""Generates model summary.
Args:
input_size (tuple): Size of input to the model.
"""
model_summary(self, input_size)
def create_learner(
self, train_loader, optimizer, criterion, device='cpu',
epochs=1, l1_factor=0.0, val_loader=None, callbacks=None, metrics=None,
activate_loss_logits=False, record_train=True
):
"""Create Learner object.
Args:
train_loader (torch.utils.data.DataLoader): Training data loader.
optimizer (torch.optim): Optimizer for the model.
criterion (torch.nn): Loss Function.
device (str or torch.device): Device where the data
will be loaded.
epochs (int, optional): Numbers of epochs to train the model. (default: 1)
l1_factor (float, optional): L1 regularization factor. (default: 0)
val_loader (torch.utils.data.DataLoader, optional): Validation data
loader. (default: None)
callbacks (list, optional): List of callbacks to be used during training.
(default: None)
track (str, optional): Can be set to either 'epoch' or 'batch' and will
store the changes in loss and accuracy for each batch
or the entire epoch respectively. (default: 'epoch')
metrics (list of str, optional): List of names of the metrics for model
evaluation. (default: None)
"""
self.learner = Learner(
train_loader, optimizer, criterion, device=device, epochs=epochs,
val_loader=val_loader, l1_factor=l1_factor, callbacks=callbacks, metrics=metrics,
activate_loss_logits=activate_loss_logits, record_train=record_train
)
self.learner.set_model(self)
def set_learner(self, learner):
"""Assign a learner object to the model.
Args:
learner (Learner): Learner object.
"""
self.learner = learner
self.learner.set_model(self)
def fit(self, *args, start_epoch=1, **kwargs):
"""Train the model.
Args:
start_epoch (int, optional): Start epoch for training.
(default: 1)
"""
# Check learner
if self.learner is None:
print('Creating a learner object.')
self.create_learner(*args, **kwargs)
# Train Model
self.learner.fit(start_epoch=start_epoch)
def save_learnable(self, filepath, **kwargs):
"""Save the learnable model.
Args:
filepath (str): File in which the model will be saved.
**kwargs (optional): Additional parameters to save with the model.
"""
if self.learner is None:
raise ValueError('Cannot save un-trained model.')
torch.save({
'model_state_dict': self.state_dict(),
'optimizer_state_dict': self.learner.optimizer.state_dict(),
**kwargs
}, filepath)
def save(self, filepath):
"""Save the model.
Args:
filepath (str): File in which the model will be saved.
"""
torch.save(self, filepath)
def load(self, filepath):
"""Load the model.
Args:
filepath (str): File in which the model is be saved.
Returns:
Parameters saved inside the checkpoint file.
"""
checkpoint = torch.load(filepath)
self.load_state_dict(checkpoint['model_state_dict'])
return {
k: v for k, v in checkpoint.items() if k != 'model_state_dict'
}
| StarcoderdataPython |
1676392 | def luas_segitiga (a, t):
return a * t * 0.5
def luas_trapesium (a, b, t):
return (a + b) * t * 0.5
def mulai ():
print( "Pilih menu berikut:" )
print( "[1] <NAME>" )
print( "[2] <NAME>")
print( "inputkan pilihan (1, 2, atau 3):" )
inp = input()
if inp == 1 :
print( "Inputkan alas dan tinggi:" )
a = int(input())
t = int(input())
print(luas_segitiga(a, t))
elif inp == 2 :
print( "Inputkan dua sisi sejajar dan tinggi:" )
a = int(input())
b = int(input())
t = int(input())
print(luas_trapesium(a, b, t))
else :
print( "Inputan tidak valid!" )
print( "Memulai program..." )
mulai()
print( "Program selesai..." )
| StarcoderdataPython |
1830613 | from flask_restful import Resource, reqparse
from server.controllers.tickets import *
from server.controllers.users import *
from server.api.v1 import return_failure, return_success, require_login
from typing import cast
CREATE_PARSER = reqparse.RequestParser(bundle_errors=True)
CREATE_PARSER.add_argument('data',
help='Needs data',
required=True)
class TicketCreate(Resource):
def get(self):
return return_failure("Please use post requests")
@require_login(CREATE_PARSER)
def post(self, data, user):
ticket = create_ticket(user, data['data'])
if (ticket is None):
return return_failure("could not create ticket")
return return_success({'ticket': ticket.json()})
TICKET_PARSER = reqparse.RequestParser(bundle_errors=True)
TICKET_PARSER.add_argument('ticket_id',
help='Need ticket',
required=True)
class TicketClaim(Resource):
def get(self):
return return_failure("Please use post requests")
@require_login(TICKET_PARSER)
def post(self, data, user):
ticket = get_ticket(data["ticket_id"])
if ticket is None:
return return_failure("ticket not found")
if claim_ticket(user, ticket):
return return_success({'ticket': ticket.json()})
return return_failure("could not claim ticket")
class TicketUnclaim(Resource):
def get(self):
return return_failure("Please use post requests")
@require_login(TICKET_PARSER)
def post(self, data, user):
ticket = get_ticket(data["ticket_id"])
if ticket is None:
return return_failure("ticket not found")
if unclaim_ticket(user, ticket):
return return_success({'ticket': ticket.json()})
return return_failure("could not unclaim ticket")
class TicketClose(Resource):
def get(self):
return return_failure("Please use post requests")
@require_login(TICKET_PARSER)
def post(self, data, user):
ticket = get_ticket(data["ticket_id"])
if ticket is None:
return return_failure("ticket not found")
if close_ticket(user, ticket):
return return_success({'ticket': ticket.json()})
return return_failure("could not close ticket")
class TicketCancel(Resource):
def get(self):
return return_failure("Please use post requests")
@require_login(TICKET_PARSER)
def post(self, data, user):
ticket = get_ticket(data["ticket_id"])
if ticket is None:
return return_failure("ticket not found")
if cancel_ticket(user, ticket):
return return_success({'ticket': ticket.json()})
return return_failure("could not cancel ticket")
TICKET_RATE_PARSER = reqparse.RequestParser(bundle_errors=True)
TICKET_RATE_PARSER.add_argument('ticket_id',
help='Need ticket',
required=True)
TICKET_RATE_PARSER.add_argument('rating',
help='Need to assign rating',
required=True)
class TicketRate(Resource):
def get(self):
return return_failure("Please use post requests")
@require_login(TICKET_RATE_PARSER)
def post(self, data, user):
ticket = get_ticket(data["ticket_id"])
if ticket is None:
return return_failure("ticket not found")
if rate_ticket(user, ticket, data["rating"]):
return return_success({'ticket': ticket.json()})
return return_failure("could not cancel ticket")
| StarcoderdataPython |
6438750 | <filename>housing_affordability_18/apps.py
from django.apps import AppConfig
class HousingAffordability18Config(AppConfig):
name = 'housing_affordability_18'
| StarcoderdataPython |
1840212 | import os
import re
import numpy
from ObjectDetectionParser import ObjectDetectionParser
# from ObjectDetectionParser import ImageRaw
from SupportClasses import PrecisionAndRecall
from SupportClasses import GoldContent
from SupportClasses import Rectangle
class FasterRcnnParser(ObjectDetectionParser):
# __iterations = None
__board = None
def __init__(self, **kwargs):
ObjectDetectionParser.__init__(self, **kwargs)
self._detectionThreshold = 0.3
# parse PyFaster
def parseErrMethod(self, errString):
# print errString
ret = {}
if 'box' in errString:
dictBox,imgPath = self._processBoxes(errString)
if dictBox:
ret["boxes"] = dictBox
ret["img_path"] = imgPath
elif 'score' in errString:
dictScore,imgPath = self._processScores(errString)
if dictScore:
ret["scores"] = dictScore
ret["img_path"] = imgPath
return (ret if len(ret) > 0 else None)
def _processScores(self, errString):
ret = {}
# ERR img_name: /home/carol/radiation-benchmarks/data/VOC2012/2011_004360.jpg class: horse wrong_score_size: -17
# ERR img_name: /home/carol/radiation-benchmarks/data/VOC2012/2011_004360.jpg class: horse score: [0] e: 0.0158654786646 r: 0.00468954769894
scoreErr = re.match(".*img_name\: (\S+).*"
"class\: (\S+).*wrong_score_size\: (\S+).*", errString)
imgPath = ''
if scoreErr:
try:
ret["wrong_score_size"] = abs(int(scoreErr.group(3)))
except:
print "\nerror on parsing wrong_score_size"
raise
else:
scoreErr = re.match(".*img_name\: (\S+).*"
"class\: (\S+).*score\: \[(\d+)\].*e\: (\S+).*r\: (\S+).*", errString)
try:
ret["score_pos"] = int(scoreErr.group(3))
except:
print "\nerror on parsing score pos"
raise
try:
ret["score_e"] = float(scoreErr.group(4))
except:
print "\nerror on parsing score pos"
raise
try:
ret["score_r"] = float(scoreErr.group(5))
except:
print "\nerror on parsing score read"
raise
if scoreErr:
try:
imgPath = scoreErr.group(1)
ret["class"] = scoreErr.group(2)
except:
print "\nerror on parsing img_path and class"
raise
return (ret if len(ret) > 0 else None), imgPath
def _processBoxes(self, errString):
##ERR img_name: /home/carol/radiation-benchmarks/data/CALTECH/set10/V000/193.jpg
# class: sheep
# box: [8]
# x1_e: 435.740264893 x1_r: 435.782531738
# y1_e: 244.744735718 y1_r: 244.746307373
# x2_e: 610.136474609 x2_r: 610.124450684
# y2_e: 326.088867188 y2_r: 326.093597412
ret = {}
imageErr = re.match(".*img_name\: (\S+).*"
"class\: (\S+).*box\: \[(\d+)\].*"
"x1_e\: (\S+).*x1_r\: (\S+).*"
"y1_e\: (\S+).*y1_r\: (\S+).*"
"x2_e\: (\S+).*x2_r\: (\S+).*"
"y2_e\: (\S+).*y2_r\: (\S+).*", errString)
imgPath = ''
if imageErr:
# ret['generation'] = 1
imgPath = imageErr.group(1)
ret["class"] = imageErr.group(2)
ret["box"] = imageErr.group(3)
# x1
ret["x1_e"] = imageErr.group(4)
try:
long(float(ret["x1_e"]))
except:
ret["x1_e"] = 1e30
ret["x1_r"] = imageErr.group(5)
try:
long(float(ret["x1_r"]))
except:
ret["x1_r"] = 1e30
###########
# y1
ret["y1_e"] = imageErr.group(6)
try:
long(float(ret["y1_e"]))
except:
ret["y1_e"] = 1e30
ret["y1_r"] = imageErr.group(7)
try:
long(float(ret["y1_r"]))
except:
ret["y1_r"] = 1e30
###########
# x2
ret["x2_e"] = imageErr.group(8)
try:
long(float(ret["x2_e"]))
except:
ret["x2_e"] = 1e30
ret["x2_r"] = imageErr.group(9)
try:
long(float(ret["x2_r"]))
except:
ret["x2_r"] = 1e30
############
# y2
ret["y2_e"] = imageErr.group(10)
try:
long(float(ret["y2_e"]))
except:
ret["y2_e"] = 1e30
ret["y2_r"] = imageErr.group(11)
try:
long(float(ret["y2_r"]))
except:
ret["y2_r"] = 1e30
# ERR boxes: [98,12] e: 13.7840118408 r: 13.7840270996
imageErr = re.match(".*boxes\: \[(\d+),(\d+)\].*e\: (\S+).*r\: (\S+).*", errString)
# if imageErr:
# ret['generation'] = 2
# ret["i"] = int(imageErr.group(1))
# ret["j"] = int(imageErr.group(2))
#
# ret["e"] = imageErr.group(3)
# try:
# long(float(ret["e"]))
# except:
# ret["e"] = 1e30
#
# ret["r"] = imageErr.group(4)
# try:
# long(float(ret["r"]))
# except:
# ret["r"] = 1e30
return (ret if len(ret) > 0 else None), imgPath
def _relativeErrorParser(self, errList):
if len(errList) <= 0:
return
goldKey = self._machine + "_" + self._benchmark + "_" + self._goldFileName
if self._machine in self._goldBaseDir:
goldPath = self._goldBaseDir[self._machine] + "/py_faster_rcnn/" + self._goldFileName
# txtPath = self._goldBaseDir[self._machine] + '/networks_img_list/' + os.path.basename(self._imgListPath)
else:
print "\n not indexed machine ", self._machine
return
if goldKey not in self._goldDatasetArray:
g = GoldContent.GoldContent(nn='pyfaster', filepath=goldPath)
self._goldDatasetArray[goldKey] = g
# else:
# print '\nnao passou para ', goldKey
gold = self._goldDatasetArray[goldKey].getPyFasterGold()
imgPos = errList[0]['img_path']
imgFilename = self.__setLocalFile(imgPos)
# imgObj = ImageRaw(imgFilename)
# to get from gold
imgFilenameRaw = imgPos.rstrip() if 'radiation-benchmarks' in imgPos else '/home/carol/radiation-benchmarks/data/' + imgPos.rstrip()
goldImg = gold[imgFilenameRaw]
# print goldImg
foundImg = self.__copyGoldImg(goldImg)
self._wrongElements = 0
for y in errList:
#boxes
if 'boxes' in y:
cl = str(y['boxes']['class'])
box = int(y['boxes']['box'])
x1R = float(y['boxes']['x1_r'])
x2R = float(y['boxes']['x2_r'])
y1R = float(y['boxes']['y1_r'])
y2R = float(y['boxes']['y2_r'])
r = [x1R, y1R, x2R, y2R]
# e = [x1E, y1E, x2E, y2E]
for i in xrange(0,4): foundImg[cl]['boxes'][box][i] = r[i]
#scores
if 'scores' in y:
if 'score_pos' in y['scores']:
sR = float(y['scores']['score_r'])
sE = float(y['scores']['score_e'])
sP = int(y['scores']['score_pos'])
cl = str(y['scores']['class'])
foundImg[cl]['scores'][sP] = sR
# if 0.1 < math.fabs(goldImg[cl]['scores'][sP] - sE):
# print "\npau no score ", goldImg[cl]['scores'][sP], sE
# sys.exit()
gValidRects, gValidProbs, gValidClasses = self.__generatePyFasterDetection(goldImg)
fValidRects, fValidProbs, fValidClasses = self.__generatePyFasterDetection(foundImg)
# print gValidRects
self._abftType = self._rowDetErrors = self._colDetErrors = 'pyfaster'
precisionRecallObj = PrecisionAndRecall.PrecisionAndRecall(self._prThreshold)
gValidSize = len(gValidRects)
fValidSize = len(fValidRects)
precisionRecallObj.precisionAndRecallParallel(gValidRects, fValidRects)
if len(gValidRects) == 0 and len(fValidRects) == 0:
self._precision = 1
self._recall = 1
else:
self._precision = precisionRecallObj.getPrecision()
self._recall = precisionRecallObj.getRecall()
if self._imgOutputDir and (self._precision != 1 or self._recall != 1):
self.buildImageMethod(imgFilename.rstrip(), gValidRects, fValidRects, str(self._sdcIteration)
+ '_' + self._logFileName, self._imgOutputDir)
self._falseNegative = precisionRecallObj.getFalseNegative()
self._falsePositive = precisionRecallObj.getFalsePositive()
self._truePositive = precisionRecallObj.getTruePositive()
# set all
self._goldLines = gValidSize
self._detectedLines = fValidSize
self._precisionAndRecallClasses(fValidClasses, gValidClasses)
#like printYoloDetection
def __generatePyFasterDetection(self, detection):
rects = []
probs = []
classes = []
for cls_ind, cls in enumerate(self._classes[1:]):
valueDet = detection[cls]
scores = valueDet['scores']
box = valueDet['boxes']
for pb, bbox in zip(scores, box):
if float(pb) >= self._detectionThreshold:
probs.append(float(pb))
l = int(float(bbox[0]))
b = int(float(bbox[1]))
w = int(float(bbox[2]) - l)
h = int(float(bbox[3]) - b)
rect = Rectangle.Rectangle(l, b, w, h)
rects.append(rect)
classes.append(cls)
return rects, probs, classes
def __copyGoldImg(self, goldImg):
ret = {}
for cls_ind, cls in enumerate(self._classes[1:]):
d = goldImg[cls]
scores = d['scores']
box = d['boxes']
ret[cls] = {'boxes': [], 'scores': []}
for pb, bbox in zip(scores, box):
newBbox = numpy.empty(4, dtype=float)
for i in xrange(0,4): newBbox[i] = bbox[i]
ret[cls]['boxes'].append(newBbox)
ret[cls]['scores'].append(float(pb))
return ret
def setSize(self, header):
# pyfaster
# HEADER iterations: 1000 img_list: /home/carol/radiation-benchmarks/data/networks_img_list/caltech.pedestrians.1K.txt board: K40
m = re.match(".*iterations\: (\d+).*img_list\: (\S+).*board\: (\S+).*", header)
if m:
self._iterations = m.group(1)
self._imgListPath = m.group(2)
self.__board = m.group(3)
self._goldFileName = self._datasets[os.path.basename(self._imgListPath)]
self._size = 'py_faster_' + os.path.basename(self._imgListPath) + '_' + str(self.__board)
def __setLocalFile(self, imgPath):
tmp = ''
if 'radiation-benchmarks' in imgPath:
splited = (imgPath.rstrip()).split('radiation-benchmarks/')[1]
else:
splited = 'data/' + imgPath
tmp = self._localRadiationBench + '/radiation-benchmarks/' + splited
tmp.replace('//', '/')
return tmp
| StarcoderdataPython |
1831370 | <gh_stars>0
from django.http import JsonResponse
from django.core import serializers
from django.shortcuts import render
from api.data.ioe_crawler import run_spider
import json
# runs spider - save first page of exam.ioe.edu.np to new_notices.json
def get_new_notifications(request):
run_spider()
notifications = get_notifications()
#notifications = serializers.serialize('json', notifications, )
print('notifications')
return JsonResponse({'notices':notifications}, status = 200)
#return notifications
#nnot = get_new_notification('req')
#print('\n\n\n Notifications:' + str(nnot) +'\n\n\n')
# gets new notices from new_notices.json using notices.json
# return new notices
# add new notices to notices.json
def get_notifications(what=None):
with open('api/data/notices.json', 'r') as file:
old_notices = json.load(file)
with open('api/data/new_notices.json', 'r') as file:
newly_scrapped = json.load(file)
new_notices = []
#reversed because it would be easier to stack on top of old_data
#scraped_topics.reverse()
#scraped_urls.reverse()
for index, notice in enumerate(reversed(newly_scrapped)):
if notice not in old_notices:
old_notices.insert(0, notice)
new_notices.insert(0, notice)
print('Adding new notices:\n\t {}'.format(new_notices))
with open('api/data/notices.json','w') as file:
json.dump(old_notices, file, indent = 4)
## Got new_topics and new_urls
if what !=None:print(what)
#print(new_notices)
return(new_notices)
def get_saved_notifications(request, how_many:int):
#if int(how_many>50):
# how_many = 50
with open('api/data/ioe_notices.json', 'r') as file:
notices = json.load(file)
notices = notices[:how_many]
#notifications = {'topics':topics, 'urls':urls}
return JsonResponse({"notices":notices}, status = 200)
| StarcoderdataPython |
4927647 | # Miracle Cube Fragment
FRAGMENT = 2430112
POT_SCROLL = 2049401
ADV_POT_SCROLL = 2049400
q = sm.getQuantityOfItem(FRAGMENT)
if q >= 10:
if sm.canHold(ADV_POT_SCROLL):
sm.giveItem(ADV_POT_SCROLL)
sm.consumeItem(FRAGMENT, 10)
else:
sm.systemMessage("Make sure you have enough space in your inventory..")
elif q >= 5:
if sm.canHold(POT_SCROLL):
sm.giveItem(POT_SCROLL)
sm.consumeItem(FRAGMENT, 5)
else:
sm.systemMessage("Make sure you have enough space in your inventory..")
else:
sm.systemMessage("One must have at least 5 fragments to unleash the magic powers..") | StarcoderdataPython |
6463814 | from api import app
from api.database import db
from api.models import User, Group, App, Log, Note, ComponentUser, Component, Invite
from flask_restless import APIManager, ProcessingException
from flask_login import current_user, login_fresh
from flask import session
from flask_cors import CORS
cors = CORS(app, resources={r"/api/v2/*": {"origins": "*"}})
cors = CORS(app, resources={r"/api/component": {"origins": "*"}})
def auth_func(*args, **kw):
if not login_fresh() and not current_user.is_authenticated:
if not 'user' in session:
raise ProcessingException(description='Not authenticated!', code=401)
def is_auth_to_app_edit(*args, **kw):
if not current_user.group.app_edit_all:
raise ProcessingException(description='Not authenticated!', code=401)
def is_auth_to_app_drop(*args, **kw):
if not current_user.group.app_drop:
raise ProcessingException(description='Not authenticated!', code=401)
def is_auth_to_user_add(*args, **kw):
if not current_user.group.user_add:
raise ProcessingException(description='Not authenticated!', code=401)
def is_auth_to_user_drop(*args, **kw):
if not current_user.group.user_drop:
raise ProcessingException(description='Not authenticated!', code=401)
def get_logged_user(search_params=None, **kw):
if search_params is None:
return
filt = dict(name='id', op='eq', val=current_user.get_id())
if 'filters' not in search_params:
search_params['filters'] = []
search_params['filters'].append(filt)
def get_app_visible(search_params=None, **kw):
if search_params is None:
return
filt = dict(name='status', op='eq', val=True)
if 'filters' not in search_params:
search_params['filters'] = []
search_params['filters'].append(filt)
manager = APIManager(app,
flask_sqlalchemy_db=db,
preprocessors=dict(DELETE_SINGLE=[auth_func],
PUT_SINGLE=[auth_func]))
# /api/user , /api/user/<int>
manager.create_api(User,
exclude_columns=['password_hash'],
methods=['GET', 'POST', 'DELETE','PUT'],
preprocessors=dict(GET_SINGLE=[auth_func],
GET_MANY=[auth_func],
POST=[auth_func, is_auth_to_user_add],
DELETE_SINGLE=[is_auth_to_user_drop]),
results_per_page=0)
manager.create_api(User,
url_prefix='/api/me',
methods=['GET', 'POST', 'PUT'],
preprocessors=dict(POST=[auth_func],
GET_SINGLE=[auth_func, get_logged_user],
GET_MANY=[auth_func, get_logged_user]),
results_per_page=0)
# /api/group , /api/group/<int>
manager.create_api(Group,
exclude_columns=['users.password_hash', 'users.group_id'],
methods=['GET', 'POST', 'DELETE','PUT'],
preprocessors=dict(POST=[auth_func],
GET_SINGLE=[auth_func],
GET_MANY=[auth_func]),
results_per_page=0)
# /api/app , /api/app/<int>
manager.create_api(App,
exclude_columns=['creator.password_hash'],
methods=['GET', 'POST', 'DELETE','PUT'],
preprocessors=dict(POST=[auth_func],
GET_SINGLE=[auth_func],
GET_MANY=[auth_func],
PUT_SINGLE=[is_auth_to_app_edit],
DELETE_SINGLE=[is_auth_to_app_drop]),
results_per_page=0)
# /api/v2/app , /api/v2/app/<int>
manager.create_api(App,
include_columns=['id','name','link','desc', 'img_link', 'order_id', 'beta'],
url_prefix='/api/v2',
methods=['GET'],
preprocessors=dict(POST=[auth_func],
GET_SINGLE=[get_app_visible],
GET_MANY=[get_app_visible]),
results_per_page=0)
# /api/log , /api/log/<int>
manager.create_api(Log,
exclude_columns=['author.password_hash'],
methods=['GET', 'POST'],
preprocessors=dict(POST=[auth_func],
GET_SINGLE=[auth_func],
GET_MANY=[auth_func]),
results_per_page=0)
# /api/note , /api/note/<int>
manager.create_api(Note,
exclude_columns=['owner.password_hash'],
methods=['GET', 'POST', 'DELETE'],
preprocessors=dict(POST=[auth_func],
GET_SINGLE=[auth_func],
GET_MANY=[auth_func]),
results_per_page=0)
manager.create_api(Invite,
exclude_columns=['author.password_hash'],
methods=['GET', 'POST', 'DELETE'],
preprocessors=dict(POST=[auth_func],
GET_SINGLE=[auth_func],
GET_MANY=[auth_func]),
results_per_page=0)
manager.create_api(ComponentUser,
methods=['GET'],
preprocessors=dict(GET_SINGLE=[auth_func],
GET_MANY=[auth_func]),
results_per_page=0)
manager.create_api(Component,
methods=['GET', 'POST'],
preprocessors=dict(GET_SINGLE=[auth_func],
GET_MANY=[auth_func]),
results_per_page=0) | StarcoderdataPython |
301861 | from django.contrib import admin
from django.contrib import admin
from django.urls import path, include
from tv import views
urlpatterns = [
path('postComment', views.postComment, name="postComment"),
path("", views.tvHome, name='tvHome'),
path("<str:slug>", views.tvPost, name='tvPost'),
] | StarcoderdataPython |
4985032 | import tensorflow as tf
# You'll generate plots of attention in order to see which parts of an image
# our model focuses on during captioning
#import matplotlib.pyplot as plt
# Scikit-learn includes many helpful utilities
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import re
import numpy as np
import os
import time
import json
from glob import glob
from PIL import Image
import pickle
from tqdm import tqdm, trange
BATCH_SIZE_INCEPTION = 64
print("BATCH_SIZE_INCEPTION: ", BATCH_SIZE_INCEPTION) ##
BATCH_SIZE = 1024
print("BATCH_SIZE: ", BATCH_SIZE)
print("Download caption annotation files")
annotation_folder = '/annotations/'
if not os.path.exists(os.path.abspath('.') + annotation_folder):
annotation_zip = tf.keras.utils.get_file('captions.zip',
cache_subdir=os.path.abspath('.'),
origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
extract = True)
annotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'
os.remove(annotation_zip)
else:
annotation_file = os.path.abspath('.') +'/annotations/captions_train2014.json'
print("Download image files")
image_folder = '/train2014/'
if not os.path.exists(os.path.abspath('.') + image_folder):
image_zip = tf.keras.utils.get_file('train2014.zip',
cache_subdir=os.path.abspath('.'),
origin = 'http://images.cocodataset.org/zips/train2014.zip',
extract = True)
PATH = os.path.dirname(image_zip) + image_folder
os.remove(image_zip)
else:
PATH = os.path.abspath('.') + image_folder
print("Read the json file")
with open(annotation_file, 'r') as f:
annotations = json.load(f)
print("Store captions and image names in vectors")
all_captions = []
all_img_name_vector = []
for annot in annotations['annotations']:
caption = '<start> ' + annot['caption'] + ' <end>'
image_id = annot['image_id']
full_coco_image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (image_id)
all_img_name_vector.append(full_coco_image_path)
all_captions.append(caption)
print("Shuffle captions and image_names together")
# Set a random state
train_captions, img_name_vector = shuffle(all_captions,
all_img_name_vector,
random_state=1)
print("Preprocess the images using InceptionV3")
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (299, 299))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
print("Initialize InceptionV3 and load the pretrained Imagenet weights")
image_model = tf.keras.applications.InceptionV3(include_top=False,
weights='imagenet')
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
print("Caching the features extracted from InceptionV3")
# Get unique images
encode_train = sorted(set(img_name_vector))
# Feel free to change batch_size according to your system configuration
image_dataset = tf.data.Dataset.from_tensor_slices(encode_train)
image_dataset = image_dataset.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
for img, path in tqdm(image_dataset):
batch_features = image_features_extract_model(img)
batch_features = tf.reshape(batch_features,
(batch_features.shape[0], -1, batch_features.shape[3]))
for bf, p in zip(batch_features, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
print("Preprocess and tokenize the captions")
# Find the maximum length of any caption in our dataset
def calc_max_length(tensor):
return max(len(t) for t in tensor)
print("Choose the top 5000 words from the vocabulary")
top_k = 5000
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=top_k,
oov_token="<unk>",
filters='!"#$%&()*+.,-/:;=?@[\]^_`{|}~ ')
tokenizer.fit_on_texts(train_captions)
train_seqs = tokenizer.texts_to_sequences(train_captions)
tokenizer.word_index['<pad>'] = 0
tokenizer.index_word[0] = '<pad>'
print("Create the tokenized vectors")
train_seqs = tokenizer.texts_to_sequences(train_captions)
print("Pad each vector to the max_length of the captions")
# If you do not provide a max_length value, pad_sequences calculates it automatically
cap_vector = tf.keras.preprocessing.sequence.pad_sequences(train_seqs, padding='post')
print("Calculates the max_length, which is used to store the attention weights")
max_length = calc_max_length(train_seqs)
print("Create training and validation sets using an 80-20 split")
img_name_train, img_name_val, cap_train, cap_val = train_test_split(img_name_vector,
cap_vector,
test_size=0.2,
random_state=0)
print("Length Image train",len(img_name_train))
print("Length Caption train",len(cap_train))
print("Length Image validation",len(img_name_val))
print("Length Caption validation", len(cap_val))
# Feel free to change these parameters according to your system's configuration
image_features_extract_model = BATCH_SIZE # For Titan RTX
print("BATCH SIZE:", BATCH_SIZE)
BUFFER_SIZE = 1000
embedding_dim = 256
units = 512
vocab_size = top_k + 1
num_steps = len(img_name_train) // BATCH_SIZE
# Shape of the vector extracted from InceptionV3 is (64, 2048)
# These two variables represent that vector shape
features_shape = 2048
attention_features_shape = 64
# Load the numpy files
def map_func(img_name, cap):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor, cap
dataset = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train))
print("Use map to load the numpy files in parallel")
dataset = dataset.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
print("Shuffle and batch")
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
print("Model")
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
# features(CNN_encoder output) shape == (batch_size, 64, embedding_dim)
# hidden shape == (batch_size, hidden_size)
# hidden_with_time_axis shape == (batch_size, 1, hidden_size)
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, 64, hidden_size)
score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
# attention_weights shape == (batch_size, 64, 1)
# you get 1 at the last axis because you are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class CNN_Encoder(tf.keras.Model):
# Since you have already extracted the features and dumped it using pickle
# This encoder passes those features through a Fully connected layer
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
# shape after fc == (batch_size, 64, embedding_dim)
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super(RNN_Decoder, self).__init__()
self.units = units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.units)
def call(self, x, features, hidden):
# defining attention as a separate model
context_vector, attention_weights = self.attention(features, hidden)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# shape == (batch_size, max_length, hidden_size)
x = self.fc1(output)
# x shape == (batch_size * max_length, hidden_size)
x = tf.reshape(x, (-1, x.shape[2]))
# output shape == (batch_size * max_length, vocab)
x = self.fc2(x)
return x, state, attention_weights
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
print("Checkpoint")
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(encoder=encoder,
decoder=decoder,
optimizer = optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
start_epoch = 0
if ckpt_manager.latest_checkpoint:
start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1])
print("restoring the latest checkpoint in checkpoint_path")
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Training")
# adding this in a separate cell because if you run the training cell
# many times, the loss_plot array will be reset
loss_plot = []
@tf.function
def train_step(img_tensor, target):
loss = 0
# initializing the hidden state for each batch
# because the captions are not related from image to image
hidden = decoder.reset_state(batch_size=target.shape[0])
dec_input = tf.expand_dims([tokenizer.word_index['<start>']] * target.shape[0], 1)
with tf.GradientTape() as tape:
features = encoder(img_tensor)
for i in range(1, target.shape[1]):
# passing the features through the decoder
predictions, hidden, _ = decoder(dec_input, features, hidden)
loss += loss_function(target[:, i], predictions)
# using teacher forcing
dec_input = tf.expand_dims(target[:, i], 1)
total_loss = (loss / int(target.shape[1]))
trainable_variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
return loss, total_loss
EPOCHS = 20
print("Number of Epochs:", EPOCHS)
for epoch in trange(start_epoch, EPOCHS):
start = time.time()
total_loss = 0
for (batch, (img_tensor, target)) in enumerate(dataset):
batch_loss, t_loss = train_step(img_tensor, target)
total_loss += t_loss
if batch % 100 == 0:
print ('Epoch {} Batch {} Loss {:.4f}'.format(
epoch + 1, batch, batch_loss.numpy() / int(target.shape[1])))
# storing the epoch end loss value to plot later
loss_plot.append(total_loss / num_steps)
#if epoch % 5 == 0:
ckpt_manager.save()
print ('Epoch {} Loss {:.6f}'.format(epoch + 1,
total_loss/num_steps))
print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
| StarcoderdataPython |
4974 | from waiter.action import process_kill_request
from waiter.util import guard_no_cluster, check_positive
def kill(clusters, args, _, __):
"""Kills the service(s) using the given token name."""
guard_no_cluster(clusters)
token_name_or_service_id = args.get('token-or-service-id')
is_service_id = args.get('is-service-id', False)
force_flag = args.get('force', False)
timeout_secs = args['timeout']
success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs)
return 0 if success else 1
def register(add_parser):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('kill', help='kill services')
parser.add_argument('token-or-service-id')
parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true')
parser.add_argument('--service-id', '-s', help='kill by service id instead of token',
dest='is-service-id', action='store_true')
parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete',
type=check_positive, default=30)
return kill
| StarcoderdataPython |
3294096 | <reponame>alexmgr/pdbfile<gh_stars>1-10
#!/usr/bin/python
# coding: utf-8
# Copyright (c) 2016 Mountainstorm
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals, print_function
from pefile import PE, DEBUG_TYPE, DIRECTORY_ENTRY
import struct
import ntpath
import uuid
import os
class PEUnknownDebugDataError(Exception):
pass
class PEMissingDebugDataError(Exception):
pass
class CodeViewRSDS(object):
def __init__(self, data):
fmt = '<4s16sI'
sz = struct.calcsize(fmt)
self.magic, guid, self.age = struct.unpack(fmt, data[:sz])
self.guid = uuid.UUID(bytes_le=guid)
i = data[sz:].find('\x00')
self.filename = data[sz:sz+i].decode('utf-8', 'ignore')
# generate symbol_id
guid = str(self.guid).replace('-', '').upper()
fn = ntpath.basename(self.filename).lower()
self.symbol_id = '%s/%s%X' % (fn, guid, self.age)
class CodeViewNB10(object):
def __init__(self, data):
fmt = '<4sIII'
sz = struct.calcsize(fmt)
(self.magic,
self.offset,
self.timestamp,
self.age) = struct.unpack(fmt, data[:sz])
i = data[sz:].find('\x00')
self.filename = data[sz:sz+i].decode('utf-8', 'ignore')
# generate symbol_id
fn = ntpath.basename(self.filename).lower()
self.symbol_id = '%s/%X%X' % (
fn, self.timestamp, self.age
)
class PEDebugData(object):
def __init__(self, path, filename=None):
self.pe = PE(path, fast_load=True)
self.path = path
self.filename = filename
if filename is None:
self.filename = os.path.basename(path)
@property
def symbol_id(self):
return self.codeview_info().symbol_id
@property
def executable_id(self):
retval = None
if self.filename is not None:
retval = '%s/%X%X' % (self.filename.lower(),
self.pe.FILE_HEADER.TimeDateStamp,
self.pe.OPTIONAL_HEADER.SizeOfImage)
return retval
def codeview_info(self):
info = None
data = self.debug_data()
if data is not None:
if data[:4] == 'RSDS':
info = CodeViewRSDS(data)
elif data[:4] == 'NB10':
info = CodeViewNB10(data)
else:
raise PEUnknownDebugDataError('Unknown CodeView type: %s' % data[:4])
else:
raise PEMissingDebugDataError()
return info
def debug_data(self):
data = None
if not hasattr(self.pe, 'DIRECTORY_ENTRY_DEBUG'):
self.pe.parse_data_directories(
DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_DEBUG']
)
if hasattr(self.pe, 'DIRECTORY_ENTRY_DEBUG'):
for entry in self.pe.DIRECTORY_ENTRY_DEBUG:
off = entry.struct.PointerToRawData
if (entry.struct.Type == DEBUG_TYPE['IMAGE_DEBUG_TYPE_CODEVIEW'] or
entry.struct.Type == DEBUG_TYPE['IMAGE_DEBUG_TYPE_MISC']):
data = self.pe.__data__[off:off+entry.struct.SizeOfData]
if data is not None:
break
return data
| StarcoderdataPython |
3346434 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
All necessary for feature
"""
import tensorflow as tf
# model preprocess
model_preprocess = {
"DenseNet": tf.keras.applications.densenet.preprocess_input,
"EfficientNet": tf.keras.applications.efficientnet.preprocess_input,
"NasNet": tf.keras.applications.nasnet.preprocess_input,
"ResNet": tf.keras.applications.resnet_v2.preprocess_input,
}
| StarcoderdataPython |
12824567 | # Space Invaders
# Created by <NAME>
# Adapted by <NAME>
# !/usr/bin/env python
from pygame import *
import pygame.draw
from nave import Ship
from tiro import Bullet
from inimigo import Enemy
from barreira import Blocker
from ufo import Mystery
from explosao import Explosion
from vida import Life
from texto import Text
import sys
from random import shuffle, choice
import numpy as np
import peewee
from pontos_orm import ScoreOrm
from estados_orm import StateOrm
from estados import State
from pontos import Score
# RGB Constants
WHITE = (255, 255, 255)
GREEN = (153, 255, 187)
YELLOW = (241, 255, 0)
BLUE = (80, 255, 239)
PURPLE = (203, 0, 255)
RED = (237, 28, 36)
SCREEN = display.set_mode((800, 600))
FONT = "fonts/space_invaders.ttf"
IMG_NAMES = ["ship", "ship", "mystery", "enemy1_1", "enemy1_2", "enemy2_1", "enemy2_2",
"enemy3_1", "enemy3_2", "explosionblue", "explosiongreen", "explosionpurple", "laser", "enemylaser"]
IMAGE = {name: image.load("images/{}.png".format(name)).convert_alpha() for name in IMG_NAMES}
class SpaceInvaders(object):
def __init__(self):
mixer.pre_init(44100, -16, 1, 512)
init()
self.caption = display.set_caption('Space Invaders')
self.screen = SCREEN
self.background = image.load('images/background.jpg').convert()
self.startGame = False
self.mainScreen = True
self.gameOver = False
self.scoreBoard = False
# Initial value for a new game
self.enemyPositionDefault = 65
# Counter for enemy starting position (increased each new round)
self.enemyPositionStart = self.enemyPositionDefault
# Current enemy starting position
self.enemyPosition = self.enemyPositionStart
def reset(self, score, lives, newGame=False):
self.player = Ship(IMAGE, game)
self.playerGroup = sprite.Group(self.player)
self.explosionsGroup = sprite.Group()
self.bullets = sprite.Group()
self.mysteryShip = Mystery(IMAGE, game)
self.mysteryGroup = sprite.Group(self.mysteryShip)
self.enemyBullets = sprite.Group()
self.reset_lives(lives)
self.enemyPosition = self.enemyPositionStart
self.make_enemies()
# Only create blockers on a new game, not a new round
if newGame:
self.allBlockers = sprite.Group(self.make_blockers(0), self.make_blockers(1), self.make_blockers(2),
self.make_blockers(3))
self.keys = key.get_pressed()
self.clock = time.Clock()
self.timer = time.get_ticks()
self.noteTimer = time.get_ticks()
self.shipTimer = time.get_ticks()
self.score = score
self.lives = lives
self.create_audio()
self.create_text()
self.killedRow = -1
self.killedColumn = -1
self.makeNewShip = False
self.shipAlive = True
self.killedArray = [[0] * 10 for x in range(5)]
def make_blockers(self, number):
blockerGroup = sprite.Group()
for row in range(4):
for column in range(9):
blocker = Blocker(10, GREEN, row, column, game)
blocker.rect.x = 50 + (200 * number) + (column * blocker.width)
blocker.rect.y = 450 + (row * blocker.height)
blockerGroup.add(blocker)
return blockerGroup
def reset_lives_sprites(self):
self.life1 = Life(657, 3, IMAGE, game)
self.life2 = Life(685, 3, IMAGE, game)
self.life3 = Life(713, 3, IMAGE, game)
self.life4 = Life(741, 3, IMAGE, game)
self.life5 = Life(769, 3, IMAGE, game)
if self.lives == 5:
self.livesGroup = sprite.Group(self.life1, self.life2, self.life3, self.life4, self.life5)
elif self.lives == 4:
self.livesGroup = sprite.Group(self.life1, self.life2, self.life3, self.life4)
elif self.lives == 3:
self.livesGroup = sprite.Group(self.life1, self.life2, self.life3)
elif self.lives == 2:
self.livesGroup = sprite.Group(self.life1, self.life2)
elif self.lives == 1:
self.livesGroup = sprite.Group(self.life1)
def reset_lives(self, lives):
self.lives = lives
self.reset_lives_sprites()
def create_audio(self):
self.sounds = {}
for sound_name in ["shoot", "shoot2", "invaderkilled", "mysterykilled", "shipexplosion"]:
self.sounds[sound_name] = mixer.Sound("sounds/{}.wav".format(sound_name))
self.sounds[sound_name].set_volume(0.2)
self.musicNotes = [mixer.Sound("sounds/{}.wav".format(i)) for i in range(4)]
for sound in self.musicNotes:
sound.set_volume(0.5)
self.noteIndex = 0
def play_main_music(self, currentTime):
moveTime = self.enemies.sprites()[0].moveTime
if currentTime - self.noteTimer > moveTime:
self.note = self.musicNotes[self.noteIndex]
if self.noteIndex < 3:
self.noteIndex += 1
else:
self.noteIndex = 0
self.note.play()
self.noteTimer += moveTime
def background_stars(self, game):
# The background stars:
# Set the position:
self.stars_x = np.random.rand(5) * 800
self.stars_y = np.random.rand(5) * 600
# Set the velocity:
self.stars_v = np.zeros(5)
for i in np.arange(5):
self.stars_v[i] = int(0.5 + np.random.uniform() * 0.1)
game.stars_y = (game.stars_y + game.stars_v * 0.2) % 600
for i in range(5):
game.stars_x[i] = game.stars_x[i] if not game.stars_v[i] else \
game.stars_x[i] + 0.1 * int((np.random.rand() - 0.5) * 2.1)
pygame.draw.aaline(game.screen, WHITE,
(int(game.stars_x[i]), int(game.stars_y[i])),
(int(game.stars_x[i]), int(game.stars_y[i])))
def create_text(self):
self.titleText = Text(FONT, 50, "Space Invaders", WHITE, 164, 155)
self.titleText2 = Text(FONT, 25, "Press any key to continue ...", WHITE, 201, 225)
self.gameOverText = Text(FONT, 50, "Game Over! ", WHITE, 250, 270)
self.nextRoundText = Text(FONT, 50, "Next Round! ", WHITE, 240, 270)
self.enemy1Text = Text(FONT, 25, " = 10 pts", GREEN, 368, 270)
self.enemy2Text = Text(FONT, 25, " = 20 pts", BLUE, 368, 320)
self.enemy3Text = Text(FONT, 25, " = 30 pts", PURPLE, 368, 370)
self.enemy4Text = Text(FONT, 25, " = ?????", RED, 368, 420)
self.scoreText = Text(FONT, 20, "Score:", WHITE, 4, 5)
self.livesText = Text(FONT, 20, "Lives: ", WHITE, 580, 5)
self.leaderboardText = Text(FONT, 50, "Scoreboard: ", WHITE, 150, 100)
def check_input(self):
self.keys = key.get_pressed()
for e in event.get():
if e.type == QUIT:
sys.exit()
if e.type == KEYDOWN:
if e.key == K_SPACE:
if len(self.bullets) == 0 and self.shipAlive:
if self.score < 1000:
bullet = Bullet(self.player.rect.x + 23, self.player.rect.y + 5, -1, 15, "laser", "center",
game, IMAGE)
self.bullets.add(bullet)
self.allSprites.add(self.bullets)
self.sounds["shoot"].play()
else:
leftbullet = Bullet(self.player.rect.x + 8, self.player.rect.y + 5, -1, 15, "laser", "left",
game, IMAGE)
rightbullet = Bullet(self.player.rect.x + 38, self.player.rect.y + 5, -1, 15, "laser",
"right", game, IMAGE)
self.bullets.add(leftbullet)
self.bullets.add(rightbullet)
self.allSprites.add(self.bullets)
self.sounds["shoot2"].play()
def make_enemies(self):
enemies = sprite.Group()
for row in range(5):
for column in range(10):
enemy = Enemy(row, column, IMAGE, game)
enemy.rect.x = 157 + (column * 50)
enemy.rect.y = self.enemyPosition + (row * 45)
enemies.add(enemy)
self.enemies = enemies
self.allSprites = sprite.Group(self.player, self.enemies, self.livesGroup, self.mysteryShip)
def make_enemies_shoot(self):
columnList = []
for enemy in self.enemies:
columnList.append(enemy.column)
columnSet = set(columnList)
columnList = list(columnSet)
shuffle(columnList)
column = columnList[0]
enemyList = []
rowList = []
for enemy in self.enemies:
if enemy.column == column:
rowList.append(enemy.row)
row = max(rowList)
for enemy in self.enemies:
if enemy.column == column and enemy.row == row:
if (time.get_ticks() - self.timer) > 700:
self.enemyBullets.add(
Bullet(enemy.rect.x + 14, enemy.rect.y + 20, 1, 5, "enemylaser", "center", game, IMAGE))
self.allSprites.add(self.enemyBullets)
self.timer = time.get_ticks()
def calculate_score(self, row):
scores = {0: 30,
1: 20,
2: 20,
3: 10,
4: 10,
5: choice([50, 100, 150, 300])
}
score = scores[row]
self.score += score
return score
def create_main_menu(self):
self.enemy1 = IMAGE["enemy3_1"]
self.enemy1 = transform.scale(self.enemy1, (40, 40))
self.enemy2 = IMAGE["enemy2_2"]
self.enemy2 = transform.scale(self.enemy2, (40, 40))
self.enemy3 = IMAGE["enemy1_2"]
self.enemy3 = transform.scale(self.enemy3, (40, 40))
self.enemy4 = IMAGE["mystery"]
self.enemy4 = transform.scale(self.enemy4, (80, 40))
self.screen.blit(self.enemy1, (318, 270))
self.screen.blit(self.enemy2, (318, 320))
self.screen.blit(self.enemy3, (318, 370))
self.screen.blit(self.enemy4, (299, 420))
for e in event.get():
if e.type == QUIT:
sys.exit()
if e.type == KEYUP:
self.startGame = True
self.mainScreen = False
def update_enemy_speed(self):
if len(self.enemies) <= 10:
for enemy in self.enemies:
enemy.moveTime = 400
if len(self.enemies) == 5:
for enemy in self.enemies:
enemy.moveTime = 200
def check_collisions(self):
collidedict = sprite.groupcollide(self.bullets, self.enemyBullets, True, False)
if collidedict:
for value in collidedict.values():
for currentSprite in value:
self.enemyBullets.remove(currentSprite)
self.allSprites.remove(currentSprite)
enemiesdict = sprite.groupcollide(self.bullets, self.enemies, True, False)
if enemiesdict:
for value in enemiesdict.values():
for currentSprite in value:
self.sounds["invaderkilled"].play()
player_state = State()
player_state.save_state(self.player.rect.x, self.lives, "invader")
self.killedRow = currentSprite.row
self.killedColumn = currentSprite.column
score = self.calculate_score(currentSprite.row)
explosion = Explosion(currentSprite.rect.x, currentSprite.rect.y, currentSprite.row, False, False,
score, FONT, WHITE, IMAGE, game)
self.explosionsGroup.add(explosion)
self.allSprites.remove(currentSprite)
self.enemies.remove(currentSprite)
self.gameTimer = time.get_ticks()
break
mysterydict = sprite.groupcollide(self.bullets, self.mysteryGroup, True, True)
if mysterydict:
for value in mysterydict.values():
for currentSprite in value:
currentSprite.mysteryEntered.stop()
self.sounds["mysterykilled"].play()
player_state = State()
player_state.save_state(self.player.rect.x, self.lives, "mystery")
score = self.calculate_score(currentSprite.row)
explosion = Explosion(currentSprite.rect.x, currentSprite.rect.y, currentSprite.row, False, True,
score, FONT, WHITE, IMAGE, game)
self.explosionsGroup.add(explosion)
self.allSprites.remove(currentSprite)
self.mysteryGroup.remove(currentSprite)
newShip = Mystery(IMAGE, game)
self.allSprites.add(newShip)
self.mysteryGroup.add(newShip)
break
bulletsdict = sprite.groupcollide(self.enemyBullets, self.playerGroup, True, False)
if bulletsdict:
for value in bulletsdict.values():
for playerShip in value:
if self.lives == 5:
self.lives -= 1
self.livesGroup.remove(self.life5)
self.allSprites.remove(self.life5)
elif self.lives == 4:
self.lives -= 1
self.livesGroup.remove(self.life4)
self.allSprites.remove(self.life4)
elif self.lives == 3:
self.lives -= 1
self.livesGroup.remove(self.life3)
self.allSprites.remove(self.life3)
elif self.lives == 2:
self.lives -= 1
self.livesGroup.remove(self.life2)
self.allSprites.remove(self.life2)
elif self.lives == 1:
self.lives -= 1
self.livesGroup.remove(self.life1)
self.allSprites.remove(self.life1)
elif self.lives == 0:
self.gameOver = True
self.startGame = False
self.sounds["shipexplosion"].play()
explosion = Explosion(playerShip.rect.x, playerShip.rect.y, 0, True, False, 0,
FONT, WHITE, IMAGE, game)
self.explosionsGroup.add(explosion)
self.allSprites.remove(playerShip)
self.playerGroup.remove(playerShip)
self.makeNewShip = True
self.shipTimer = time.get_ticks()
self.shipAlive = False
if sprite.groupcollide(self.enemies, self.playerGroup, True, True):
self.gameOver = True
self.startGame = False
sprite.groupcollide(self.bullets, self.allBlockers, True, True)
sprite.groupcollide(self.enemyBullets, self.allBlockers, True, True)
sprite.groupcollide(self.enemies, self.allBlockers, False, True)
def create_new_ship(self, createShip, currentTime):
if createShip and (currentTime - self.shipTimer > 900):
self.player = Ship(IMAGE, game)
self.allSprites.add(self.player)
self.playerGroup.add(self.player)
self.makeNewShip = False
self.shipAlive = True
def create_game_over(self, current_time):
self.screen.blit(self.background, (0, 0))
self.background_stars(game)
if current_time - self.timer < 750:
self.gameOverText.draw(self.screen)
if current_time - self.timer > 750 and current_time - self.timer < 1500:
self.screen.blit(self.background, (0, 0))
if current_time - self.timer > 1500 and current_time - self.timer < 2250:
self.gameOverText.draw(self.screen)
if current_time - self.timer > 2250 and current_time - self.timer < 2750:
self.screen.blit(self.background, (0, 0))
if current_time - self.timer > 3000:
scoreboard = Score()
scoreboard.save_score(self.score)
self.startGame = False
self.gameOver = False
self.mainScreen = False
self.scoreBoard = True
for e in event.get():
if e.type == QUIT:
sys.exit()
def create_scoreboard(self, scores):
self.screen.blit(self.background, (0, 0))
self.background_stars(game)
self.scoreboardText = Text(FONT, 20, "1- " + str(scores[0]), WHITE, 235, 201)
self.scoreboardText2 = Text(FONT, 20, "2- " + str(scores[1]), WHITE, 235, 221)
self.scoreboardText3 = Text(FONT, 20, "3- " + str(scores[2]), WHITE, 235, 241)
self.scoreboardText4 = Text(FONT, 20, "4- " + str(scores[3]), WHITE, 235, 261)
self.scoreboardText5 = Text(FONT, 20, "5- " + str(scores[4]), WHITE, 235, 281)
self.scoreboardText6 = Text(FONT, 20, "6- " + str(scores[5]), WHITE, 435, 201)
self.scoreboardText7 = Text(FONT, 20, "7- " + str(scores[6]), WHITE, 435, 221)
self.scoreboardText8 = Text(FONT, 20, "8- " + str(scores[7]), WHITE, 435, 241)
self.scoreboardText9 = Text(FONT, 20, "9- " + str(scores[8]), WHITE, 435, 261)
self.scoreboardText10 = Text(FONT, 20, "10- " + str(scores[9]), WHITE, 435, 281)
self.leaderboardText.draw(self.screen)
self.scoreboardText.draw(self.screen)
self.scoreboardText2.draw(self.screen)
self.scoreboardText3.draw(self.screen)
self.scoreboardText4.draw(self.screen)
self.scoreboardText5.draw(self.screen)
self.scoreboardText6.draw(self.screen)
self.scoreboardText7.draw(self.screen)
self.scoreboardText8.draw(self.screen)
self.scoreboardText9.draw(self.screen)
self.scoreboardText10.draw(self.screen)
for e in event.get():
if e.type == QUIT:
sys.exit()
if e.type == KEYUP:
self.startGame = False
self.gameOver = False
self.scoreBoard = False
self.mainScreen = True
def main(self):
while True:
if self.mainScreen:
self.reset(0, 5, True)
self.screen.blit(self.background, (0, 0))
self.background_stars(game)
self.titleText.draw(self.screen)
self.titleText2.draw(self.screen)
self.enemy1Text.draw(self.screen)
self.enemy2Text.draw(self.screen)
self.enemy3Text.draw(self.screen)
self.enemy4Text.draw(self.screen)
self.create_main_menu()
elif self.startGame:
if len(self.enemies) == 0:
current_time = time.get_ticks()
if current_time - self.gameTimer < 3000:
self.screen.blit(self.background, (0, 0))
self.background_stars(game)
self.scoreText2 = Text(FONT, 20, str(self.score), GREEN, 85, 5)
self.scoreText.draw(self.screen)
self.scoreText2.draw(self.screen)
self.nextRoundText.draw(self.screen)
self.livesText.draw(self.screen)
self.livesGroup.update(self.keys)
self.check_input()
if current_time - self.gameTimer > 3000:
# Move enemies closer to bottom
self.enemyPositionStart += 35
self.reset(self.score, self.lives)
self.make_enemies()
self.gameTimer += 3000
else:
current_time = time.get_ticks()
self.play_main_music(current_time)
self.screen.blit(self.background, (0, 0))
self.background_stars(game)
self.allBlockers.update(self.screen)
self.scoreText2 = Text(FONT, 20, str(self.score), GREEN, 85, 5)
self.scoreText.draw(self.screen)
self.scoreText2.draw(self.screen)
self.livesText.draw(self.screen)
self.check_input()
self.allSprites.update(self.keys, current_time, self.killedRow, self.killedColumn, self.killedArray)
self.explosionsGroup.update(self.keys, current_time)
self.check_collisions()
self.create_new_ship(self.makeNewShip, current_time)
self.update_enemy_speed()
if len(self.enemies) > 0:
self.make_enemies_shoot()
elif self.gameOver:
current_time = time.get_ticks()
# Reset enemy starting position
self.enemyPositionStart = self.enemyPositionDefault
self.create_game_over(current_time)
elif self.scoreBoard:
scoreboard = Score()
self.scores = scoreboard.order_scores()
self.create_scoreboard(self.scores)
display.update()
self.clock.tick(60)
if __name__ == '__main__':
try:
ScoreOrm.create_table()
StateOrm.create_table()
except peewee.OperationalError:
print('Tabela ja existe!')
game = SpaceInvaders()
game.main()
| StarcoderdataPython |
1836071 | from conans import ConanFile, CMake, tools
import os
required_conan_version = ">=1.43.0"
class FlatbuffersConan(ConanFile):
name = "flatbuffers"
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://google.github.io/flatbuffers"
topics = ("flatbuffers", "serialization", "rpc", "json-parser")
description = "Memory Efficient Serialization Library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"header_only": [True, False],
"flatc": [True, False, "deprecated"],
"flatbuffers": [True, False, "deprecated"],
"options_from_context": [True, False, "deprecated"],
}
default_options = {
"shared": False,
"fPIC": True,
"header_only": False,
"flatc": "deprecated",
"flatbuffers": "deprecated",
"options_from_context": "deprecated",
}
generators = "cmake"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _has_flatc(self):
# don't build flatc when it makes little sense or not supported
return self.settings.os not in ["Android", "iOS", "watchOS", "tvOS", "Neutrino"]
def export_sources(self):
self.copy("CMakeLists.txt")
self.copy(os.path.join("cmake", "FlatcTargets.cmake"))
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared or self.options.header_only:
del self.options.fPIC
if self.options.header_only:
del self.options.shared
# deprecated options
for deprecated_option in ["flatc", "flatbuffers", "options_from_context"]:
if self.options.get_safe(deprecated_option) != "deprecated":
self.output.warn("{} option is deprecated, do not use".format(deprecated_option))
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def package_id(self):
if self.options.header_only and not self._has_flatc:
self.info.header_only()
# deprecated options
del self.info.options.flatc
del self.info.options.flatbuffers
del self.info.options.options_from_context
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmakelists = os.path.join(self._source_subfolder, "CMakeLists.txt")
# Prefer manual injection of current version in build(), otherwise it tries to call git
tools.replace_in_file(cmakelists, "include(CMake/Version.cmake)", "")
# No warnings as errors
tools.replace_in_file(cmakelists, "/WX", "")
tools.replace_in_file(cmakelists, "-Werror ", "")
# Install dll to bin folder
tools.replace_in_file(cmakelists,
"RUNTIME DESTINATION ${CMAKE_INSTALL_LIBDIR}",
"RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["FLATBUFFERS_BUILD_TESTS"] = False
self._cmake.definitions["FLATBUFFERS_INSTALL"] = True
self._cmake.definitions["FLATBUFFERS_BUILD_FLATLIB"] = not self.options.header_only and not self.options.shared
self._cmake.definitions["FLATBUFFERS_BUILD_FLATC"] = self._has_flatc
self._cmake.definitions["FLATBUFFERS_STATIC_FLATC"] = False
self._cmake.definitions["FLATBUFFERS_BUILD_FLATHASH"] = False
self._cmake.definitions["FLATBUFFERS_BUILD_SHAREDLIB"] = not self.options.header_only and self.options.shared
# Honor conan profile
self._cmake.definitions["FLATBUFFERS_LIBCXX_WITH_CLANG"] = False
# Mimic upstream CMake/Version.cmake removed in _patch_sources()
version = tools.Version(self.version)
self._cmake.definitions["VERSION_MAJOR"] = version.major
self._cmake.definitions["VERSION_MINOR"] = version.minor
self._cmake.definitions["VERSION_PATCH"] = version.patch
# To install relocatable shared libs on Macos
self._cmake.definitions["CMAKE_POLICY_DEFAULT_CMP0042"] = "NEW"
# Fix iOS/tvOS/watchOS
if tools.is_apple_os(self.settings.os):
self._cmake.definitions["CMAKE_MACOSX_BUNDLE"] = False
self._cmake.configure()
return self._cmake
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
self.copy(pattern="FlatcTargets.cmake",
dst=self._module_path,
src="cmake")
self.copy(pattern="BuildFlatBuffers.cmake",
dst=self._module_path,
src=os.path.join(self._source_subfolder, "CMake"))
@property
def _module_path(self):
return os.path.join("lib", "cmake")
def package_info(self):
self.cpp_info.set_property("cmake_find_mode", "both")
self.cpp_info.set_property("cmake_module_file_name", "FlatBuffers")
self.cpp_info.set_property("cmake_file_name", "Flatbuffers")
cmake_target = "flatbuffers"
if not self.options.header_only and self.options.shared:
cmake_target += "_shared"
self.cpp_info.set_property("cmake_target_name", "flatbuffers::{}".format(cmake_target))
self.cpp_info.set_property("pkg_config_name", "flatbuffers")
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
if not self.options.header_only:
self.cpp_info.components["libflatbuffers"].libs = tools.collect_libs(self)
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["libflatbuffers"].system_libs.append("m")
# Provide flatbuffers::flatc target and CMake functions from BuildFlatBuffers.cmake
build_modules = [
os.path.join(self._module_path, "FlatcTargets.cmake"),
os.path.join(self._module_path, "BuildFlatBuffers.cmake"),
]
self.cpp_info.set_property("cmake_build_modules", build_modules)
if self._has_flatc:
bindir = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bindir))
self.env_info.PATH.append(bindir)
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.filenames["cmake_find_package"] = "FlatBuffers"
self.cpp_info.filenames["cmake_find_package_multi"] = "Flatbuffers"
self.cpp_info.names["cmake_find_package"] = "flatbuffers"
self.cpp_info.names["cmake_find_package_multi"] = "flatbuffers"
self.cpp_info.components["libflatbuffers"].names["cmake_find_package"] = cmake_target
self.cpp_info.components["libflatbuffers"].names["cmake_find_package_multi"] = cmake_target
self.cpp_info.components["libflatbuffers"].build_modules["cmake_find_package"] = build_modules
self.cpp_info.components["libflatbuffers"].build_modules["cmake_find_package_multi"] = build_modules
self.cpp_info.components["libflatbuffers"].set_property("cmake_file_name", "flatbuffers::{}".format(cmake_target))
self.cpp_info.components["libflatbuffers"].set_property("pkg_config_name", "flatbuffers")
| StarcoderdataPython |
6415946 | <filename>timm/models/layers/__init__.py
from .activations import *
from .adaptive_avgmax_pool import \
adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
from .blur_pool import BlurPool2d
from .classifier import ClassifierHead, create_classifier
from .cond_conv2d import CondConv2d, get_condconv_initializer
from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\
set_layer_config
from .conv2d_same import Conv2dSame, conv2d_same
from .conv_bn_act import ConvBnAct
from .create_act import create_act_layer, get_act_layer, get_act_fn
from .create_attn import get_attn, create_attn
from .create_conv2d import create_conv2d
from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act
from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path
from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn
from .evo_norm import EvoNormBatch2d, EvoNormSample2d
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible
from .inplace_abn import InplaceAbn
from .linear import Linear
from .mixed_conv2d import MixedConv2d
from .mlp import Mlp, GluMlp, GatedMlp
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .norm import GroupNorm, LayerNorm2d
from .norm_act import BatchNormAct2d, GroupNormAct
from .padding import get_padding, get_same_padding, pad_same
from .patch_embed import PatchEmbed
from .pool2d_same import AvgPool2dSame, create_pool2d
from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite
from .selective_kernel import SelectiveKernel
from .separable_conv import SeparableConv2d, SeparableConvBnAct
from .space_to_depth import SpaceToDepthModule
from .split_attn import SplitAttn
from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
from .test_time_pool import TestTimePoolHead, apply_test_time_pool
from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_
| StarcoderdataPython |
11298309 | # Title: dataset.py - Create dataset for face recognition
# Author: <NAME>
# Copyright (c) 2017 Linaro Limited
#
################################################################
# Import OpenCV library
import cv2,os
import numpy as np
from PIL import Image
import os
f = os.popen("ls -lrt /dev/video*")
for line in f.readlines():
print line
camNO = int(line[-2])
print "Camera detected:"
print camNO
# Create VideoCapture object for camera id 0
cap = cv2.VideoCapture(camNO)
# Read / Write from a text file to keep track of ID's and Names.
# File format is CSV
CurrentId = 0
CurrentName = "None"
file = open("file.txt","rw")
data = file.readlines()
if len(data) > 0:
for line in data:
words = line.split(",")
if len(words) > 0:
CurrentId = words[0]
CurrentName = words[1]
else:
print ""
file.close
# Load classifier
detector=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# Prompt for person name
id = int(CurrentId) + 1
print "Make sure there is only one face in front of the Camera!!!"
print "Adding ID Number"
print id
Newname = raw_input('Enter Name: ')
file = open("file.txt", "a")
file.write(str(id))
file.write(",")
file.write(Newname)
file.write("\n")
file.close
print "."
print "."
print "."
print "."
print "Launching the Video Stream."
count = 0
# Maximum count for the dataset
max_count = 30
while(True):
# Capture and return video frame
ret, frame = cap.read()
# Convert from BGR to GRAY color-space
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect objects of different sizes
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# Draw rectangle around the face
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# Save the captured face in dataset folder
cv2.imwrite("dataset/User." + str(id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
# Show the captured frame
cv2.imshow('frame',frame)
print "Captured frame %d" % count
# Increment count value
count += 1
# Wait for 100 miliseconds
if cv2.waitKey(100) & 0xFF == ord('q'):
break
# Break if count value exceeds 20
elif count > max_count:
break
# Do cleanup
cap.release()
cv2.destroyAllWindows()
print "Wait for the program to finish Training..."
# Create Local Binary Patterns Histograms (LBPH) recognizer
recognizer = cv2.face.createLBPHFaceRecognizer()
# Load the classifier
detector = cv2.CascadeClassifier("lbpcascade_frontalface.xml");
def getImagesAndLabels(path):
# Get path of all files in the folder
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
# Create empth face list
faceSamples = []
#create empty ID list
Ids = []
# Loop through all image paths
for imagePath in imagePaths:
# Load the and convert it to PIL image
pilImage = Image.open(imagePath).convert('L')
# Convert PIL image into numpy array
imageNp = np.array(pilImage,'uint8')
# Get Id from the image
Id = int(os.path.split(imagePath)[-1].split(".")[1])
# Extract the face from the training image sample
faces = detector.detectMultiScale(imageNp)
# If face is detected, append the face and Id to list
for (x,y,w,h) in faces:
# Append the face to list
faceSamples.append(imageNp[y:y+h,x:x+w])
# Append the Id to list
Ids.append(Id)
return faceSamples,Ids
# Get faces and Ids from the dataset folder
faces,Ids = getImagesAndLabels('dataset')
# Train the recognizer for detected faces
recognizer.train(faces, np.array(Ids))
# Save the recognizer out in trainer.yml
recognizer.save('trainer/trainer.yml')
| StarcoderdataPython |
11377432 | #Copyright (c) 2020 Ocado. All Rights Reserved.
import numpy as np
class Cost:
def __init__(self, value, blocked=False):
self.value = value
self.blocked = blocked
def to_float(self):
if self.blocked : return np.inf
return self.value
def __add__(self, other):
if isinstance(other, Cost):
raise ValueError
return Cost(self.value + other, self.blocked)
def __sub__(self, other):
if isinstance(other, Cost):
raise ValueError
return Cost(self.value - other, self.blocked)
def __lt__(self, other):
if isinstance(other, Cost):
if self.blocked != other.blocked : return not self.blocked
return self.value < other.value
if self.blocked : return np.inf < other
return self.value < other
def __le__(self, other):
if isinstance(other, Cost):
if self.blocked != other.blocked : return not self.blocked
return self.value <= other.value
if self.blocked : return np.inf <= other
return self.value <= other
def __eq__(self, other):
if isinstance(other, Cost):
return self.blocked == other.blocked and self.value == other.value
if self.blocked : return np.inf == other
return self.value == other
def __ne__(self, other):
if isinstance(other, Cost):
return self.blocked != other.blocked or self.value != other.value
if self.blocked : return np.inf != other
return self.value != other
def __gt__(self, other):
if isinstance(other, Cost):
if self.blocked != other.blocked : return self.blocked
return self.value > other.value
if self.blocked : return np.inf > other
return self.value > other
def __ge__(self, other):
if isinstance(other, Cost):
if self.blocked != other.blocked : return self.blocked
return self.value >= other.value
if self.blocked : return np.inf >= other
return self.value >= other
| StarcoderdataPython |
6532722 | import json
import phantom.app
import py42.sdk
from py42.response import Py42Response
from py42.services.users import UserService
from pytest import fixture
from requests import Response
from code42_connector import Code42Connector
TEST_USER_UID = "TEST_USER_UID"
MOCK_ALERT_DETAIL_RESPONSE = {
"alerts": [
{
"type$": "ALERT_SUMMARY",
"tenantId": "11111111-abcd-4231-99ab-df6434da4663",
"type": "FED_COMPOSITE",
"name": "Test Alert",
"description": "it's a test",
"actor": "<EMAIL>",
"actorId": "987210998131391466",
"target": "N/A",
"severity": "LOW",
"ruleId": "cab2d5ee-a512-45b1-8848-809327033048",
"ruleSource": "Alerting",
"id": "11111111-9724-4005-b848-76af488cf5e2",
"createdAt": "2021-05-13T16:51:35.4259080Z",
"state": "OPEN",
"observations": [
{
"type$": "OBSERVATION",
"id": "240526fc-3a32-4755-85ab-c6ee6e7f31ce",
"observedAt": "2020-05-28T12:50:00.0000000Z",
"type": "FedEndpointExfiltration",
"data": {
"type$": "OBSERVED_ENDPOINT_ACTIVITY",
"id": "240526fc-3a32-4755-85ab-c6ee6e7f31ce",
"sources": ["Endpoint"],
"exposureTypes": ["ApplicationRead"],
"firstActivityAt": "2020-05-28T12:50:00.0000000Z",
"lastActivityAt": "2020-05-28T12:50:00.0000000Z",
"fileCount": 3,
"totalFileSize": 533846,
"fileCategories": [
{
"type$": "OBSERVED_FILE_CATEGORY",
"category": "SourceCode",
"fileCount": 3,
"totalFileSize": 533846,
"isSignificant": True,
},
{
"type$": "OBSERVED_FILE_CATEGORY",
"category": "Pdf",
"fileCount": 3,
"totalFileSize": 533846,
"isSignificant": True,
},
],
"files": [
{
"type$": "OBSERVED_FILE",
"eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_935873453596901068_956171635867906205_5",
"path": "C:/Users/QA/Downloads/",
"name": "Customers.jpg",
"category": "Image",
"size": 265122,
},
{
"type$": "OBSERVED_FILE",
"eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_935873453596901068_956171635867906205_6",
"path": "C:/Users/QA/Downloads/",
"name": "data.png",
"category": "Image",
"size": 129129,
},
{
"type$": "OBSERVED_FILE",
"eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_935873453596901068_956171635867906205_7",
"path": "C:/Users/QA/Downloads/",
"name": "company_secrets.ps",
"category": "Image",
"size": 139595,
},
],
"syncToServices": [],
"sendingIpAddresses": ["127.0.0.1"],
},
},
{
"type$": "OBSERVATION",
"id": "7f4d125d-c7ca-4264-83fe-fa442bf270b6",
"observedAt": "2020-06-11T20:20:00.0000000Z",
"type": "FedCloudSharePermissions",
"data": {
"type$": "OBSERVED_CLOUD_SHARE_ACTIVITY",
"id": "7f4d125d-c7ca-4264-83fe-fa442bf270b6",
"sources": ["GoogleDrive"],
"exposureTypes": ["SharedOutsideTrustedDomain"],
"firstActivityAt": "2020-06-11T20:20:00.0000000Z",
"lastActivityAt": "2020-06-11T20:25:00.0000000Z",
"fileCount": 1,
"totalFileSize": 182554405,
"fileCategories": [
{
"type$": "OBSERVED_FILE_CATEGORY",
"category": "Archive",
"fileCount": 1,
"totalFileSize": 182554405,
"isSignificant": False,
}
],
"files": [
{
"type$": "OBSERVED_FILE",
"eventId": "14FnN9-YOhVUO_Tv8Mu-hEgevc2K4l07l_5_9e633ffd-9329-4cf4-8645-27a23b83ebc0",
"name": "Code42CrashPlan_8.0.0_1525200006800_778_Mac.dmg",
"category": "Archive",
"size": 182554405,
}
],
"outsideTrustedDomainsEmails": ["<EMAIL>"],
"outsideTrustedDomainsEmailsCount": 1,
"outsideTrustedDomainsCounts": [
{
"type$": "OBSERVED_DOMAIN_INFO",
"domain": "gmail.com",
"count": 1,
}
],
"outsideTrustedDomainsTotalDomainCount": 1,
"outsideTrustedDomainsTotalDomainCountTruncated": False,
},
},
{
"type$": "OBSERVATION",
"id": "7f4d125d-c7ca-4264-83fe-fa442bf270b6",
"observedAt": "2020-06-11T20:20:00.0000000Z",
"type": "FedCloudSharePermissions",
"data": {
"type$": "OBSERVED_CLOUD_SHARE_ACTIVITY",
"id": "7f4d125d-c7ca-4264-83fe-fa442bf270b6",
"sources": ["GoogleDrive"],
"exposureTypes": ["UnknownExposureTypeThatWeDontSupportYet"],
"firstActivityAt": "2020-06-11T20:20:00.0000000Z",
"lastActivityAt": "2020-06-11T20:25:00.0000000Z",
"fileCount": 1,
"totalFileSize": 182554405,
"fileCategories": [
{
"type$": "OBSERVED_FILE_CATEGORY",
"category": "Archive",
"fileCount": 1,
"totalFileSize": 182554405,
"isSignificant": False,
}
],
"files": [
{
"type$": "OBSERVED_FILE",
"eventId": "14FnN9-YOhVUO_Tv8Mu-hEgevc2K4l07l_5_9e633ffd-9329-4cf4-8645-27a23b83ebc0",
"name": "Code42CrashPlan_8.0.0_1525200006800_778_Mac.dmg",
"category": "Archive",
"size": 182554405,
}
],
"outsideTrustedDomainsEmails": ["<EMAIL>"],
"outsideTrustedDomainsEmailsCount": 1,
"outsideTrustedDomainsCounts": [
{
"type$": "OBSERVED_DOMAIN_INFO",
"domain": "gmail.com",
"count": 1,
}
],
"outsideTrustedDomainsTotalDomainCount": 1,
"outsideTrustedDomainsTotalDomainCountTruncated": False,
},
},
],
}
]
}
MOCK_SEARCH_ALERTS_LIST_RESPONSE = {
"type$": "ALERT_QUERY_RESPONSE",
"alerts": [
{
"type$": "ALERT_SUMMARY",
"tenantId": "11111111-af5b-4231-9d8e-df6434da4663",
"type": "FED_COMPOSITE",
"name": "Alert 1",
"description": "Its a test :)",
"actor": "<EMAIL>",
"actorId": "987210998131391466",
"target": "N/A",
"severity": "LOW",
"ruleId": "cab2d5ee-a512-45b1-8848-809327033048",
"ruleSource": "Alerting",
"id": "11111111-9724-4005-b848-76af488cf5e2",
"createdAt": "2021-05-13T16:51:35.4259080Z",
"state": "OPEN",
},
{
"type$": "ALERT_SUMMARY",
"tenantId": "11111111-af5b-4231-9d8e-df6434da4663",
"type": "FED_COMPOSITE",
"name": "File Upload Alert",
"description": "Alert on any file upload events",
"actor": "<EMAIL>",
"actorId": "987210998131391466",
"target": "N/A",
"severity": "MEDIUM",
"ruleId": "962a6a1c-54f6-4477-90bd-a08cc74cbf71",
"ruleSource": "Alerting",
"id": "1111111-555f-4880-8909-f5679448e67c",
"createdAt": "2021-05-13T16:51:35.3465540Z",
"state": "OPEN",
},
],
"totalCount": 2,
"problems": [],
}
MOCK_SECURITY_EVENT_RESPONSE = {
"totalCount": 3,
"fileEvents": [
{
"eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_935873453596901068_956171635867906205_5",
"eventType": "READ_BY_APP",
"eventTimestamp": "2020-05-28T12:46:39.838Z",
"insertionTimestamp": "2020-05-28T12:51:50.040Z",
"fieldErrors": [],
"filePath": "C:/Users/QA/Downloads/",
"fileName": "company_secrets.txt",
"fileType": "FILE",
"fileCategory": "IMAGE",
"fileCategoryByBytes": "Image",
"fileCategoryByExtension": "Image",
"fileSize": 265122,
"fileOwner": "Test",
"md5Checksum": "9cea266b4e07974df1982ae3b9de92ce",
"sha256Checksum": "34d0c9fc9c907ec374cf7e8ca1ff8a172e36eccee687f0a9b69dd169debb81e1",
"createTimestamp": "2020-05-28T12:43:34.902Z",
"modifyTimestamp": "2020-05-28T12:43:35.105Z",
"deviceUserName": "<EMAIL>",
"osHostName": "HOSTNAME",
"domainName": "host.docker.internal",
"publicIpAddress": "255.255.255.255",
"privateIpAddresses": ["255.255.255.255", "127.0.0.1"],
"deviceUid": "935873453596901068",
"userUid": "912098363086307495",
"actor": None,
"directoryId": [],
"source": "Endpoint",
"url": None,
"shared": None,
"sharedWith": [],
"sharingTypeAdded": [],
"cloudDriveId": None,
"detectionSourceAlias": None,
"fileId": None,
"exposure": ["ApplicationRead"],
"processOwner": "QA",
"processName": "chrome.exe",
"windowTitle": ["Jira"],
"tabUrl": "example.com",
"removableMediaVendor": None,
"removableMediaName": None,
"removableMediaSerialNumber": None,
"removableMediaCapacity": None,
"removableMediaBusType": None,
"removableMediaMediaName": None,
"removableMediaVolumeName": [],
"removableMediaPartitionId": [],
"syncDestination": None,
"emailDlpPolicyNames": None,
"emailSubject": None,
"emailSender": None,
"emailFrom": None,
"emailRecipients": None,
"outsideActiveHours": False,
"mimeTypeByBytes": "image/png",
"mimeTypeByExtension": "image/png",
"mimeTypeMismatch": False,
"printJobName": None,
"printerName": None,
"printedFilesBackupPath": None,
"remoteActivity": "UNKNOWN",
"trusted": False,
"operatingSystemUser": "IEUser",
"destinationCategory": "Cloud Storage",
"destinationName": "Google Drive",
"riskScore": 5,
"riskSeverity": "HIGH",
"riskIndicators": [
{"name": "Google Drive upload", "weight": 5},
{"name": "Spreadsheet", "weight": 0},
],
},
{
"eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_935873453596901068_956171635867906205_5",
"eventType": "READ_BY_APP",
"eventTimestamp": "2020-05-28T12:46:39.838Z",
"insertionTimestamp": "2020-05-28T12:51:50.040Z",
"fieldErrors": [],
"filePath": "C:/Users/QA/Downloads/",
"fileName": "data.jpg",
"fileType": "FILE",
"fileCategory": "IMAGE",
"fileCategoryByBytes": "Image",
"fileCategoryByExtension": "Image",
"fileSize": 265122,
"fileOwner": "QA",
"md5Checksum": "9cea266b4e07974df1982ae3b9de92ce",
"sha256Checksum": "34d0c9fc9c907ec374cf7e8ca1ff8a172e36eccee687f0a9b69dd169debb81e1",
"createTimestamp": "2020-05-28T12:43:34.902Z",
"modifyTimestamp": "2020-05-28T12:43:35.105Z",
"deviceUserName": "<EMAIL>",
"osHostName": "TEST'S MAC",
"domainName": "host.docker.internal",
"publicIpAddress": "255.255.255.255",
"privateIpAddresses": ["127.0.0.1"],
"deviceUid": "935873453596901068",
"userUid": "912098363086307495",
"actor": None,
"directoryId": [],
"source": "Endpoint",
"url": None,
"shared": None,
"sharedWith": [],
"sharingTypeAdded": [],
"cloudDriveId": None,
"detectionSourceAlias": None,
"fileId": None,
"exposure": ["ApplicationRead"],
"processOwner": "QA",
"processName": "chrome.exe",
"windowTitle": ["Jira"],
"tabUrl": "example.com/test",
"removableMediaVendor": None,
"removableMediaName": None,
"removableMediaSerialNumber": None,
"removableMediaCapacity": None,
"removableMediaBusType": None,
"removableMediaMediaName": None,
"removableMediaVolumeName": [],
"removableMediaPartitionId": [],
"syncDestination": None,
"emailDlpPolicyNames": None,
"emailSubject": None,
"emailSender": None,
"emailFrom": None,
"emailRecipients": None,
"outsideActiveHours": False,
"mimeTypeByBytes": "image/png",
"mimeTypeByExtension": "image/png",
"mimeTypeMismatch": False,
"printJobName": None,
"printerName": None,
"printedFilesBackupPath": None,
"remoteActivity": "UNKNOWN",
"trusted": False,
"operatingSystemUser": "IEUser",
"destinationCategory": "Cloud Storage",
"destinationName": "Google Drive",
"riskScore": 5,
"riskSeverity": "HIGH",
"riskIndicators": [
{"name": "Google Drive upload", "weight": 5},
{"name": "Spreadsheet", "weight": 0},
],
},
{
"eventId": "0_1d71796f-af5b-4231-9d8e-df6434da4663_935873453596901068_956171635867906205_5",
"eventType": "READ_BY_APP",
"eventTimestamp": "2020-05-28T12:46:39.838Z",
"insertionTimestamp": "2020-05-28T12:51:50.040Z",
"fieldErrors": [],
"filePath": "C:/Users/QA/Downloads/",
"fileName": "confidential.pdf",
"fileType": "FILE",
"fileCategory": "IMAGE",
"fileCategoryByBytes": "Image",
"fileCategoryByExtension": "Image",
"fileSize": 265122,
"fileOwner": "Mock",
"md5Checksum": "9cea266b4e07974df1982ae3b9de92ce",
"sha256Checksum": "34d0c9fc9c907ec374cf7e8ca1ff8a172e36eccee687f0a9b69dd169debb81e1",
"createTimestamp": "2020-05-28T12:43:34.902Z",
"modifyTimestamp": "2020-05-28T12:43:35.105Z",
"deviceUserName": "<EMAIL>",
"osHostName": "Test's Windows",
"domainName": "host.docker.internal",
"publicIpAddress": "255.255.255.255",
"privateIpAddresses": ["0:0:0:0:0:0:0:1", "127.0.0.1"],
"deviceUid": "935873453596901068",
"userUid": "912098363086307495",
"actor": None,
"directoryId": [],
"source": "Endpoint",
"url": None,
"shared": None,
"sharedWith": [],
"sharingTypeAdded": [],
"cloudDriveId": None,
"detectionSourceAlias": None,
"fileId": None,
"exposure": ["ApplicationRead"],
"processOwner": "QA",
"processName": "chrome.exe",
"windowTitle": ["Jira"],
"tabUrl": "example.com/foo",
"removableMediaVendor": None,
"removableMediaName": None,
"removableMediaSerialNumber": None,
"removableMediaCapacity": None,
"removableMediaBusType": None,
"removableMediaMediaName": None,
"removableMediaVolumeName": [],
"removableMediaPartitionId": [],
"syncDestination": None,
"emailDlpPolicyNames": None,
"emailSubject": None,
"emailSender": None,
"emailFrom": None,
"emailRecipients": None,
"outsideActiveHours": False,
"mimeTypeByBytes": "image/png",
"mimeTypeByExtension": "image/png",
"mimeTypeMismatch": False,
"printJobName": None,
"printerName": None,
"printedFilesBackupPath": None,
"remoteActivity": "UNKNOWN",
"trusted": False,
"operatingSystemUser": "IEUser",
"destinationCategory": "Cloud Storage",
"destinationName": "Google Drive",
"riskScore": 5,
"riskSeverity": "HIGH",
"riskIndicators": [
{"name": "Google Drive upload", "weight": 5},
{"name": "Spreadsheet", "weight": 0},
],
},
],
}
@fixture
def mock_py42_client(mocker):
client = mocker.MagicMock(spec=py42.sdk.SDKClient)
client.users = mocker.MagicMock(spec=UserService)
mocker.patch("py42.sdk.from_local_account", return_value=client)
return client
@fixture(autouse=True)
def mock_create_attachment(mocker):
mock_vault = mocker.patch("phantom.vault.Vault.create_attachment")
return mock_vault
@fixture
def mock_py42_with_user(mocker, mock_py42_client):
response_data = {"users": [{"userUid": TEST_USER_UID}]}
return _set_py42_users(mocker, mock_py42_client, response_data)
@fixture
def mock_py42_without_user(mocker, mock_py42_client):
return _set_py42_users(mocker, mock_py42_client, {"users": []})
def _set_py42_users(mocker, mock_py42_client, response_data):
mock_py42_client.users.get_by_username.return_value = create_mock_response(
mocker, response_data
)
return mock_py42_client
@fixture
def connector():
connector = Code42Connector()
return connector
def create_fake_connector(action_identifier, client=None):
def fake_get_action_identifier():
return action_identifier
def fake_get_container_id():
return 42
connector = Code42Connector()
connector.get_action_identifier = fake_get_action_identifier
connector.get_container_id = fake_get_container_id
connector._client = client
return connector
def attach_client(connector, client):
connector._client = client
return connector
def create_mock_response(mocker, response_data):
response = mocker.MagicMock(spec=Response)
response.text = json.dumps(response_data)
return Py42Response(response)
def assert_success(connector):
action_results = connector.get_action_results()
assert len(action_results) == 1
status = action_results[0].get_status()
assert status == phantom.app.APP_SUCCESS
def assert_fail(connector):
action_results = connector.get_action_results()
assert len(action_results) == 1
status = action_results[0].get_status()
assert status == phantom.app.APP_ERROR
def assert_fail_message(connector, expected_message):
action_results = connector.get_action_results()
assert len(action_results) == 1
msg = action_results[0].get_message()
status = action_results[0].get_status()
assert msg == expected_message
assert status == phantom.app.APP_ERROR
def assert_successful_single_data(connector, expected_data):
action_results = connector.get_action_results()
assert len(action_results) == 1
data = action_results[0].get_data()
status = action_results[0].get_status()
assert data[0] == expected_data
assert status == phantom.app.APP_SUCCESS
def assert_successful_summary(connector, expected_summary):
action_results = connector.get_action_results()
assert len(action_results) == 1
summary = action_results[0].get_summary()
status = action_results[0].get_status()
assert summary == expected_summary
assert status == phantom.app.APP_SUCCESS
def assert_successful_message(connector, expected_message):
action_results = connector.get_action_results()
assert len(action_results) == 1
msg = action_results[0].get_message()
status = action_results[0].get_status()
assert msg == expected_message
assert status == phantom.app.APP_SUCCESS
def assert_successful_params(connector, expected_params):
action_results = connector.get_action_results()
assert len(action_results) == 1
params = action_results[0].get_param()
status = action_results[0].get_status()
assert params == expected_params
assert status == phantom.app.APP_SUCCESS
def assert_container_added(connector, expected_containers):
action_results = connector.get_action_results()
assert len(action_results) == 1
assert connector._containers == expected_containers
status = action_results[0].get_status()
assert status == phantom.app.APP_SUCCESS
def assert_artifacts_added(connector, expected_artifacts):
action_results = connector.get_action_results()
assert len(action_results) == 1
assert connector._artifacts == expected_artifacts
status = action_results[0].get_status()
assert status == phantom.app.APP_SUCCESS
def assert_state_saved(connector, expected_state):
action_results = connector.get_action_results()
assert len(action_results) == 1
assert connector._state == expected_state
status = action_results[0].get_status()
assert status == phantom.app.APP_SUCCESS
| StarcoderdataPython |
1603557 | #!/usr/bin/env python3
from bottle import Bottle, route, request, app
import keras
from DeepSPADE import data_helpers
app = Bottle()
@app.route('/')
def index():
return 'The main HTTP API is a POST request on /spam with JSON {"body": "text"}'
@app.route('/spam')
def spam():
body = request.query.body
filtered = data_helpers.filterinput(body)
score = float(app.model.predict(filtered))
result = round(score) == 1.0
return {'spam': result, 'score': score, 'text': body}
def main():
from sys import argv
try:
host = argv[1]
except IndexError:
host = '0.0.0.0'
app.model = keras.models.load_model('save_ensemble3_1.h5')
app.run(host=host, port=8080)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3480121 | # Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from django import forms
from django import shortcuts
from django.conf import settings
from django.http import HttpResponse
from django.utils.safestring import mark_safe
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions as utils
from openstack_dashboard import fiware_api
from openstack_dashboard import api
from openstack_dashboard.dashboards.idm import forms as idm_forms
LOG = logging.getLogger('idm_logger')
AVATAR_SMALL = settings.MEDIA_ROOT+"/ApplicationAvatar/small/"
AVATAR_MEDIUM = settings.MEDIA_ROOT+"/ApplicationAvatar/medium/"
AVATAR_ORIGINAL = settings.MEDIA_ROOT+"/ApplicationAvatar/original/"
class CreateApplicationForm(forms.SelfHandlingForm):
appID = forms.CharField(widget=forms.HiddenInput(), required=False)
redirect_to = forms.CharField(widget=forms.HiddenInput(), required=False)
name = forms.CharField(label=("Name"), required=True)
description = forms.CharField(
label=("Description"),
widget=forms.Textarea(attrs={'rows':4, 'cols':40}),
required=True)
url = forms.CharField(
label=mark_safe('URL <a href="#" class="contextual-help" data-toggle="popover" data-placement="left" data-title="Application URL" data-content="For security purposes, only OAuth requests coming from this URL will be accepted by KeyRock."><i class="fa fa-question-circle" ></i></a>'),
required=True)
callbackurl = forms.CharField(
label=mark_safe('Callback URL <a href="#" class="contextual-help" data-toggle="popover" data-placement="left" data-title="Application Callback URL" data-content="The user agent will be redirected to this URL when OAuth flow is finished."><i class="fa fa-question-circle" ></i></a>'),
required=True)
title = 'Application Information'
def handle(self, request, data):
#create application
#default_domain = api.keystone.get_default_domain(request)
if data['redirect_to'] == "create":
try:
application = fiware_api.keystone.application_create(request,
name=data['name'],
description=data['description'],
redirect_uris=[data['callbackurl']],
url=data['url'])
provider = fiware_api.keystone.get_provider_role(request)
user = request.user
organization = request.organization
if request.organization.id == request.user.default_project_id:
fiware_api.keystone.add_role_to_user(
request,
role=provider,
user=user,
organization=organization,
application=application.id,
use_idm_account=True)
else:
fiware_api.keystone.add_role_to_organization(
request,
role=provider,
organization=organization,
application=application.id,
use_idm_account=True)
LOG.debug('Application %s created', application.name)
except Exception:
exceptions.handle(
request, ('Unable to register the application.'))
return False
response = shortcuts.redirect(
'horizon:idm:myApplications:avatar_step', application.id)
return response
else:
try:
LOG.debug('updating application %s', data['appID'])
redirect_uris = [data['callbackurl'],]
fiware_api.keystone.application_update(request,
data['appID'],
name=data['name'],
description=data['description'],
redirect_uris=redirect_uris,
url=data['url'])
msg = 'Application updated successfully.'
messages.success(request, (msg))
LOG.debug(msg)
response = shortcuts.redirect(
'horizon:idm:myApplications:detail', data['appID'])
return response
except Exception as e:
LOG.error(e)
exceptions.handle(request, ('Unable to update the application.'))
class AvatarForm(forms.SelfHandlingForm, idm_forms.ImageCropMixin):
appID = forms.CharField(widget=forms.HiddenInput())
image = forms.ImageField(label=(""), required=False)
redirect_to = forms.CharField(widget=forms.HiddenInput(), required=False)
title = 'Application Avatar'
def handle(self, request, data):
application_id = data['appID']
if request.FILES:
image = request.FILES['image']
output_img = self.crop(image)
small = 25, 25, 'small'
medium = 36, 36, 'medium'
original = 100, 100, 'original'
# if output_img.size[0] < original[0]:
# messages.warning(request, 'Image is smaller than 60px/60px')
meta = [original, medium, small]
for meta in meta:
size = meta[0], meta[1]
img_type = meta[2]
output_img.thumbnail(size)
img = (settings.MEDIA_ROOT +'/ApplicationAvatar/'
+ img_type + "/" + application_id)
output_img.save(img, 'JPEG')
image_root = ('ApplicationAvatar/' + img_type
+ "/" + application_id)
if img_type == 'small':
fiware_api.keystone.application_update(
request, application_id, img_small=image_root)
elif img_type == 'medium':
fiware_api.keystone.application_update(
request, application_id, img_medium=image_root)
else:
fiware_api.keystone.application_update(
request, application_id, img_original=image_root)
if data['redirect_to'] == "update":
response = shortcuts.redirect(
'horizon:idm:myApplications:detail', application_id)
LOG.debug('Avatar for application {0} updated'.format(application_id))
else:
response = shortcuts.redirect(
'horizon:idm:myApplications:roles_step', application_id)
LOG.debug('Avatar for application {0} saved'.format(application_id))
return response
class DeleteImageForm(forms.SelfHandlingForm):
description = 'Delete uploaded image'
template = 'idm/myApplications/_delete_image.html'
def __init__(self, *args, **kwargs):
self.application_id = kwargs.pop('application_id')
super(DeleteImageForm, self).__init__(*args, **kwargs)
def handle(self, request, data):
application_id = self.application_id
fiware_api.keystone.application_update(request, application_id,
img_small='',
img_medium='',
img_original='')
os.remove(AVATAR_SMALL + application_id)
os.remove(AVATAR_MEDIUM + application_id)
os.remove(AVATAR_ORIGINAL + application_id)
return shortcuts.redirect('horizon:idm:myApplications:edit', application_id)
class CreateRoleForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=("Role Name"))
application_id = forms.CharField(required=True,
widget=forms.HiddenInput())
no_autocomplete = True
def handle(self, request, data):
try:
LOG.debug('Creating role with name "%s"' % data['name'])
new_role = fiware_api.keystone.role_create(
request, name=data['name'], application=data['application_id'])
messages.success(request,
('Role "%s" was successfully created.')
% data['name'])
return new_role
except Exception:
exceptions.handle(request, ('Unable to create role.'))
class EditRoleForm(forms.SelfHandlingForm):
role_id = forms.CharField(required=True,
widget=forms.HiddenInput())
name = forms.CharField(max_length=60, label='')
no_autocomplete = True
def handle(self, request, data):
try:
LOG.debug('Updating role with id {0}'.format(data['role_id']))
role = fiware_api.keystone.role_update(request,
role=data['role_id'],
name=data['name'])
messages.success(request,
('Role "%s" was successfully updated.')
% data['role_id'])
response = HttpResponse(role.name)
return response
except Exception:
exceptions.handle(request, ('Unable to delete role.'))
class DeleteRoleForm(forms.SelfHandlingForm):
role_id = forms.CharField(required=True,
widget=forms.HiddenInput())
def handle(self, request, data):
try:
LOG.debug('Deleting role with id {0}'.format(data['role_id']))
fiware_api.keystone.role_delete(request,
role_id=data['role_id'])
messages.success(request,
('Role "%s" was successfully deleted.')
% data['role_id'])
return True
except Exception:
exceptions.handle(request, ('Unable to delete role.'))
class CreatePermissionForm(forms.SelfHandlingForm):
application_id = forms.CharField(required=True,
widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=("Permission Name"))
description = forms.CharField(max_length=255, label=("Description"))
action = forms.CharField(required=False, max_length=255, label=("HTTP action"))
resource = forms.CharField(required=False, max_length=255, label=("Resource"))
xml = forms.CharField(
required=False,
label='Use XACML to define a more complex authorization policy.',
widget=forms.Textarea())
no_autocomplete = True
def clean(self):
"""Check that either action and resource or xml are set but not both."""
cleaned_data = super(CreatePermissionForm, self).clean()
action = cleaned_data.get('action')
resource = cleaned_data.get('resource')
xml = cleaned_data.get('xml')
if xml and (action or resource):
raise forms.ValidationError(
'If you use the advanced rule you cannot set also action and resource',
code='invalid')
if not xml and not (action and resource):
raise forms.ValidationError(
'You need to define both action and resource.',
code='invalid')
return cleaned_data
def handle(self, request, data):
try:
LOG.debug('Creating permission %s', data['name'])
new_permission = fiware_api.keystone.permission_create(
request,
name=data['name'],
application=data['application_id'],
resource=data.get('resource', None),
action=data.get('action', None),
xml=data.get('xml', None))
messages.success(request,
('Permission "%s" was successfully created.')
% data['name'])
return new_permission
except Exception:
exceptions.handle(request, ('Unable to create permission.'))
class CancelForm(forms.SelfHandlingForm):
appID = forms.CharField(label=("ID"), widget=forms.HiddenInput())
title = 'Cancel'
def handle(self, request, data, application):
image = getattr(application, 'img_original', '')
LOG.debug(image)
if "ApplicationAvatar" in image:
os.remove(AVATAR_SMALL + application.id)
os.remove(AVATAR_MEDIUM + application.id)
os.remove(AVATAR_ORIGINAL + application.id)
LOG.debug('Avatar deleted from server')
fiware_api.keystone.application_delete(request, application.id)
LOG.debug('Application %s deleted', application.id)
messages.success(request, ("Application deleted successfully."))
response = shortcuts.redirect('horizon:idm:myApplications:index')
return response | StarcoderdataPython |
272270 | import numpy as np
import matplotlib.pyplot as plt
import time
from IPython import display
# Implemented methods
methods = ['DynProg', 'ValIter'];
# Some colours
LIGHT_RED = '#FFC4CC';
LIGHT_GREEN = '#95FD99';
BLACK = '#000000';
WHITE = '#FFFFFF';
LIGHT_PURPLE = '#E8D0FF';
LIGHT_ORANGE = '#FAE0C3';
SEB_GREEN = '#52B92C';
BUSTED_BLUE = '#5993B5'
class RobbingBanks:
# Actions
STAY = 0
MOVE_LEFT = 1
MOVE_RIGHT = 2
MOVE_UP = 3
MOVE_DOWN = 4
# Give names to actions
actions_names = {
STAY: "stay",
MOVE_LEFT: "move left",
MOVE_RIGHT: "move right",
MOVE_UP: "move up",
MOVE_DOWN: "move down"
}
# Reward values
def __init__(self, town_map):
""" Constructor of the environment town_map.
"""
self.STEP_REWARD = 0
self.BANK_REWARD = 10
self.CAUGHT_REWARD = -50
self.town_map = town_map;
self.initial_state = np.array([0,0,1,2])
self.actions = self.__actions();
self.states, self.map = self.__states();
self.n_actions = len(self.actions);
self.n_states = len(self.states);
self.transition_probabilities = self.__transitions();
self.rewards = self.__rewards();
def __actions(self):
actions = dict();
actions[self.STAY] = np.array([0, 0]);
actions[self.MOVE_LEFT] = np.array([0,-1]);
actions[self.MOVE_RIGHT] = np.array([0, 1]);
actions[self.MOVE_UP] = np.array([-1,0]);
actions[self.MOVE_DOWN] = np.array([1,0]);
return actions;
def __states(self):
states = dict();
states_vec = dict();
s = 0;
for i in range(self.town_map.shape[0]):
for j in range(self.town_map.shape[1]):
for k in range(self.town_map.shape[0]):
for l in range(self.town_map.shape[1]):
states[s] = np.array([i,j,k,l]);
states_vec[(i,j,k,l)] = s;
s += 1;
return states, states_vec
def __move(self, state, action):
""" Makes a step in the town_map, given a current position and an action.
If the action STAY or an inadmissible action is used, the robber stays in place.
:return integer next_cell corresponding to position (x,y) x (x,y) on the town_map that agent transitions to.
"""
# Compute the future position given current (state, action)
row = self.states[state][0] + self.actions[action][0];
col = self.states[state][1] + self.actions[action][1];
# Is the future position an impossible one ?
hitting_town_walls = (row == -1) or (row == self.town_map.shape[0]) or \
(col == -1) or (col == self.town_map.shape[1])
# Based on the impossiblity check return the next state.
list_police_pos = self.__police_positions(state)
new_police_pos = list_police_pos[np.random.randint(len(list_police_pos))]
#caught = (row, col) == (new_police_pos[0], new_police_pos[1])
caught = all(self.states[state][0:2] == self.states[state][2:])
if caught:
return self.map[tuple(self.initial_state)];
#Hot take: If you "unintentionally" hit the wall, the result should be that you (and the police) stay in place since it's not a "deliberate" move
elif hitting_town_walls:
return state
else:
return self.map[(row, col, new_police_pos[0], new_police_pos[1])];
def __police_positions(self, state):
"""
Input: The state as an int
Returns: A list of possible new minotaur positions from current state
"""
agent_pos = self.states[state][0:2]
police_pos = self.states[state][2:]
diff_pos = np.sign(agent_pos - police_pos)
list_pos = [[1,0], [-1,0], [0, diff_pos[1]]] if diff_pos[0] == 0 else [[0,1], [0,-1], [diff_pos[0],0]] if diff_pos[1] == 0 else [[0,diff_pos[1]], [diff_pos[0],0]]
list_pos += police_pos
list_pos = list(filter(None,[tuple(pos)*(0<=pos[0]<self.town_map.shape[0] and 0<=pos[1]<self.town_map.shape[1]) for pos in list_pos]))
return list_pos
def __transitions(self):
""" Computes the transition probabilities for every state action pair.
:return numpy.tensor transition probabilities: tensor of transition
probabilities of dimension S*S*A
"""
# Initialize the transition probailities tensor (S,S,A)
dimensions = (self.n_states,self.n_states,self.n_actions);
transition_probabilities = np.zeros(dimensions);
# Compute the transition probabilities. Note that the transitions
# are deterministic.
for s in range(self.n_states):
#if we are in the same position as the police, we return to initial
if (self.states[s][0],self.states[s][1])==(self.states[s][2],self.states[s][3]):
transition_probabilities[self.initial_state, s, :] = 1/3
else:
for a in range(self.n_actions):
list_pos = self.__police_positions(s) #police positions
for police_pos in list_pos:
next_s = self.__move(s,a);
new_pos = np.copy(self.states[next_s])
new_pos[2:] = police_pos
next_s = self.map[tuple(new_pos)]
transition_probabilities[next_s, s, a] = 1/len(list_pos);
return transition_probabilities;
def __rewards(self):
rewards = np.zeros((self.n_states, self.n_actions));
# rewards[i,j,k] = r(s' | s, a): tensor of rewards of dimension S x S x A
for s in range(self.n_states):
list_pos = self.__police_positions(s)
for a in range(self.n_actions):
next_s = self.__move(s,a);
#if we can get caught in the next move
if (tuple(self.states[next_s][0:2]) in list_pos):
#if our next position is not a bank
if self.town_map[tuple(self.states[next_s][0:2])] != 1:
rewards[s,a] = self.CAUGHT_REWARD/len(list_pos)
#if our next position is a bank
if self.town_map[tuple(self.states[next_s][0:2])] == 1:
rewards[s,a] = self.CAUGHT_REWARD/len(list_pos) + (len(list_pos)-1)*self.BANK_REWARD/len(list_pos)
#if we cannot get caught in the next move
else:
#reward for standing in a bank
if self.town_map[tuple(self.states[next_s][0:2])] == 1:
rewards[s,a] = self.BANK_REWARD
# list_pos = self.__police_positions(s)
# for a in range(self.n_actions):
# next_s = self.__move(s,a);
return rewards;
def simulate(self,policy):
path = list();
# Initialize current state, next state and time
t = 1;
s = self.map[tuple(self.initial_state)];
# Add the starting position in the town_map to the path
path.append(self.initial_state);
# Move to next state given the policy and the current state
next_s = self.__move(s,policy[s]);
# Add the position in the town_map corresponding to the next state
# to the pygame.freetype.path
path.append(self.states[next_s]);
# Loop while state is not the goal state
T = 40
while t<T:
# Update state
s = next_s;
# Move to next state given the policy and the current state
next_s = self.__move(s,policy[s]);
# Add the position in the town_map corresponding to the next state
# to the path
path.append(self.states[next_s])
# Update time and state for next iteration
t +=1;
return path
def show(self):
print('The states are :')
print(self.states)
print('The actions are:')
print(self.actions)
print('The mapping of the states:')
print(self.map)
print('The rewards:')
print(self.rewards)
def value_iteration(env, gamma, epsilon):
""" Solves the shortest path problem using value iteration
:input town_map env : The town_map environment in which we seek to
find the shortest path.
:input float gamma : The discount factor.
:input float epsilon : accuracy of the value iteration procedure.
:return numpy.array V : Optimal values for every state at every
time, dimension S*T
:return numpy.array policy: Optimal time-varying policy at every state,
dimension S*T
"""
# The value itearation algorithm requires the knowledge of :
# - Transition probabilities
# - Rewards
# - State space
# - Action space
# - The finite horizon
p = env.transition_probabilities;
r = env.rewards;
n_states = env.n_states;
n_actions = env.n_actions;
# Required variables and temporary ones for the VI to run
V = np.zeros(n_states);
Q = np.zeros((n_states, n_actions));
BV = np.zeros(n_states);
# Iteration counter
n = 0;
# Tolerance error
tol = (1 - gamma)* epsilon/gamma;
#tol = 100
# Initialization of the VI
for s in range(n_states):
for a in range(n_actions):
Q[s, a] = r[s, a] + gamma*np.dot(p[:,s,a],V);
BV = np.max(Q, 1);
# Iterate until convergence
while np.linalg.norm(V - BV) >= tol and n < 2600:
# Increment by one the numbers of iteration
n += 1;
# Update the value function
V = np.copy(BV);
# Compute the new BV
for s in range(n_states):
for a in range(n_actions):
Q[s, a] = r[s, a] + gamma*np.dot(p[:,s,a],V);
BV = np.max(Q, 1);
# Show error
#print(np.linalg.norm(V - BV))
# Compute policy
policy = np.argmax(Q,1);
# Return the obtained policy
return V, policy;
def draw_town_map(town_map):
# Map a color to each cell in the town_map
col_map = {0: WHITE, 1: BLACK, 2: LIGHT_GREEN, -6: LIGHT_RED, -1: LIGHT_RED};
# Give a color to each cell
rows,cols = town_map.shape;
colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];
# Create figure of the size of the town_map
fig = plt.figure(1, figsize=(cols,rows));
# Remove the axis ticks and add title title
ax = plt.gca();
ax.set_title('The town_map');
ax.set_xticks([]);
ax.set_yticks([]);
# Give a color to each cell
rows,cols = town_map.shape;
colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];
# Create figure of the size of the town_map
fig = plt.figure(1, figsize=(cols,rows))
# Create a table to color
grid = plt.table(cellText=None,
cellColours=colored_town_map,
cellLoc='center',
loc=(0,0),
edges='closed');
# Modify the hight and width of the cells in the table
tc = grid.properties()['children']
for cell in tc:
cell.set_height(1.0/rows);
cell.set_width(1.0/cols);
def animate_solution(town_map, path, save_anim = False, until_caught = False, gamma = 0):
# Map a color to each cell in the town_map
col_map = {0: WHITE, 1: SEB_GREEN, 2: LIGHT_GREEN, -6: LIGHT_RED, -1: LIGHT_RED};
# Size of the town_map
rows,cols = town_map.shape;
# Create figure of the size of the town_map
fig = plt.figure(1, figsize=(cols,rows));
# Remove the axis ticks and add title title
ax = plt.gca();
ax.set_title('Policy simulation: $\lambda$ = %0.1f' %gamma);
ax.set_xticks([]);
ax.set_yticks([]);
# Give a color to each cell
colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];
# Create figure of the size of the town_map
fig = plt.figure(1, figsize=(cols,rows))
# Create a table to color
grid = plt.table(cellText=None,
cellColours=colored_town_map,
cellLoc='center',
loc=(0,0),
edges='closed');
# Modify the hight and width of the cells in the table
tc = grid.properties()['children']
for cell in tc:
cell.set_height(1.0/rows);
cell.set_width(1.0/cols);
# Update the color at each frame
path_robber = [tuple(p)[0:2] for p in path]
path_police = [tuple(p)[2:] for p in path]
for i in range(len(path_robber)):
if i == 0:
grid.get_celld()[(path_robber[i])].set_facecolor(LIGHT_ORANGE)
grid.get_celld()[(path_robber[i])].get_text().set_text('Robber')
grid.get_celld()[(path_police[i])].set_facecolor(LIGHT_RED)
grid.get_celld()[(path_police[i])].get_text().set_text('Police')
if save_anim:
plt.savefig('optimal_policy_'+str(i))
else:
if until_caught and path_robber[i] == path_police[i]:
grid.get_celld()[(path_robber[i-1])].set_facecolor(col_map[town_map[path_robber[i-1]]])
grid.get_celld()[(path_robber[i-1])].get_text().set_text('')
grid.get_celld()[(path_police[i-1])].set_facecolor(col_map[town_map[path_police[i-1]]])
grid.get_celld()[(path_police[i-1])].get_text().set_text('')
grid.get_celld()[(path_police[i])].set_facecolor(BUSTED_BLUE)
grid.get_celld()[(path_police[i])].get_text().set_text('BUSTED')
print("BUSTED!!!", gamma)
if save_anim:
plt.savefig(str(gamma)+'_'+str(i)+'.png')
break
if save_anim:
plt.savefig(str(gamma)+'_'+str(i)+'.png')
grid.get_celld()[(path_robber[i-1])].set_facecolor(col_map[town_map[path_robber[i-1]]])
grid.get_celld()[(path_robber[i-1])].get_text().set_text('')
grid.get_celld()[(path_police[i-1])].set_facecolor(col_map[town_map[path_police[i-1]]])
grid.get_celld()[(path_police[i-1])].get_text().set_text('')
grid.get_celld()[(path_robber[i])].set_facecolor(LIGHT_ORANGE)
grid.get_celld()[(path_robber[i])].get_text().set_text('Robber')
grid.get_celld()[(path_police[i])].set_facecolor(LIGHT_RED)
grid.get_celld()[(path_police[i])].get_text().set_text('Police')
grid.get_celld()[0,0].get_text().set_text('SEB')
grid.get_celld()[0,0].get_text().set_color('white')
grid.get_celld()[0,5].get_text().set_text('SEB')
grid.get_celld()[0,5].get_text().set_color('white')
grid.get_celld()[2,0].get_text().set_text('SEB')
grid.get_celld()[2,0].get_text().set_color('white')
grid.get_celld()[2,5].get_text().set_text('SEB')
grid.get_celld()[2,5].get_text().set_color('white')
plt.pause(0.7)
plt.show()
town_map= np.array([
[ 1, 0, 0, 0, 0, 1],
[ 0, 0, 0, 0, 0, 0],
[ 1, 0, 0, 0, 0, 1]
])
rb = RobbingBanks(town_map)
p=rb.transition_probabilities
n=rb.n_states
for s in range(n):
summ=np.sum(p[:,s,3])
if summ>1:
print(rb.states[s])
# PLOTTING VALUE_FUNC(INIT_STATE) AS A FUNCTION OF LAMBDA/GAMMA
"""
gammas = np.linspace(0.01,1,100,endpoint=False)
values = []
for gamma in gammas:
V, policy = value_iteration(rb, gamma, epsilon = 1e-6)
values.append(V[rb.map[(0,0,1,2)]])
plt.semilogy(gammas,values,'--')
plt.xlabel('Discount rate $\lambda$')
plt.ylabel('Value function V')
plt.title('Effect of $\lambda$ on V')
plt.plot()
#plt.show()
plt.savefig('Value_2b.png')
"""
# PLOTTING OPTIMAL POLICY FOR DIFFERENT LAMBDAS
"""
gammas = [0.1,0.5,0.8]
for gamma in gammas:
V, policy = value_iteration(rb, gamma, 1e-6)
path = rb.simulate(policy)
animate_solution(town_map, path, save_anim = False, until_caught = True,gamma=gamma)
""" | StarcoderdataPython |
4892562 | # Generated by Django 2.2.6 on 2019-12-07 14:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('personas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.TextField()),
('edad', models.IntegerField()),
],
),
migrations.RemoveField(
model_name='nino',
name='edad',
),
migrations.RemoveField(
model_name='nino',
name='id',
),
migrations.RemoveField(
model_name='nino',
name='nombre',
),
migrations.AddField(
model_name='nino',
name='person_ptr',
field=models.OneToOneField(auto_created=True, default=0, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='personas.Person'),
preserve_default=False,
),
]
| StarcoderdataPython |
2702 | <filename>ucsmsdk/mometa/adaptor/AdaptorMenloQStats.py
"""This module contains the general information for AdaptorMenloQStats ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class AdaptorMenloQStatsConsts:
MENLO_QUEUE_COMPONENT_N = "N"
MENLO_QUEUE_COMPONENT_CPU = "cpu"
MENLO_QUEUE_COMPONENT_ETH = "eth"
MENLO_QUEUE_COMPONENT_FC = "fc"
MENLO_QUEUE_COMPONENT_UNKNOWN = "unknown"
MENLO_QUEUE_INDEX_0 = "0"
MENLO_QUEUE_INDEX_0_A = "0_A"
MENLO_QUEUE_INDEX_0_B = "0_B"
MENLO_QUEUE_INDEX_1 = "1"
MENLO_QUEUE_INDEX_1_A = "1_A"
MENLO_QUEUE_INDEX_1_B = "1_B"
MENLO_QUEUE_INDEX_UNKNOWN = "unknown"
SUSPECT_FALSE = "false"
SUSPECT_NO = "no"
SUSPECT_TRUE = "true"
SUSPECT_YES = "yes"
class AdaptorMenloQStats(ManagedObject):
"""This is AdaptorMenloQStats class."""
consts = AdaptorMenloQStatsConsts()
naming_props = set([u'menloQueueComponent', u'menloQueueIndex'])
mo_meta = MoMeta("AdaptorMenloQStats", "adaptorMenloQStats", "menlo-q-stats-comp-[menlo_queue_component]index-[menlo_queue_index]", VersionMeta.Version111j, "OutputOnly", 0xf, [], ["admin", "operations", "read-only"], [u'adaptorUnit'], [u'adaptorMenloQStatsHist'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"drop_overrun_n0": MoPropertyMeta("drop_overrun_n0", "dropOverrunN0", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n0_delta": MoPropertyMeta("drop_overrun_n0_delta", "dropOverrunN0Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n0_delta_avg": MoPropertyMeta("drop_overrun_n0_delta_avg", "dropOverrunN0DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n0_delta_max": MoPropertyMeta("drop_overrun_n0_delta_max", "dropOverrunN0DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n0_delta_min": MoPropertyMeta("drop_overrun_n0_delta_min", "dropOverrunN0DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1": MoPropertyMeta("drop_overrun_n1", "dropOverrunN1", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1_delta": MoPropertyMeta("drop_overrun_n1_delta", "dropOverrunN1Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1_delta_avg": MoPropertyMeta("drop_overrun_n1_delta_avg", "dropOverrunN1DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1_delta_max": MoPropertyMeta("drop_overrun_n1_delta_max", "dropOverrunN1DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1_delta_min": MoPropertyMeta("drop_overrun_n1_delta_min", "dropOverrunN1DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"intervals": MoPropertyMeta("intervals", "intervals", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"menlo_queue_component": MoPropertyMeta("menlo_queue_component", "menloQueueComponent", "string", VersionMeta.Version111j, MoPropertyMeta.NAMING, None, None, None, None, ["N", "cpu", "eth", "fc", "unknown"], []),
"menlo_queue_index": MoPropertyMeta("menlo_queue_index", "menloQueueIndex", "string", VersionMeta.Version111j, MoPropertyMeta.NAMING, None, None, None, None, ["0", "0_A", "0_B", "1", "1_A", "1_B", "unknown"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"suspect": MoPropertyMeta("suspect", "suspect", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"thresholded": MoPropertyMeta("thresholded", "thresholded", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"time_collected": MoPropertyMeta("time_collected", "timeCollected", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"truncate_overrun_n0": MoPropertyMeta("truncate_overrun_n0", "truncateOverrunN0", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n0_delta": MoPropertyMeta("truncate_overrun_n0_delta", "truncateOverrunN0Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n0_delta_avg": MoPropertyMeta("truncate_overrun_n0_delta_avg", "truncateOverrunN0DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n0_delta_max": MoPropertyMeta("truncate_overrun_n0_delta_max", "truncateOverrunN0DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n0_delta_min": MoPropertyMeta("truncate_overrun_n0_delta_min", "truncateOverrunN0DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1": MoPropertyMeta("truncate_overrun_n1", "truncateOverrunN1", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1_delta": MoPropertyMeta("truncate_overrun_n1_delta", "truncateOverrunN1Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1_delta_avg": MoPropertyMeta("truncate_overrun_n1_delta_avg", "truncateOverrunN1DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1_delta_max": MoPropertyMeta("truncate_overrun_n1_delta_max", "truncateOverrunN1DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1_delta_min": MoPropertyMeta("truncate_overrun_n1_delta_min", "truncateOverrunN1DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"update": MoPropertyMeta("update", "update", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"dropOverrunN0": "drop_overrun_n0",
"dropOverrunN0Delta": "drop_overrun_n0_delta",
"dropOverrunN0DeltaAvg": "drop_overrun_n0_delta_avg",
"dropOverrunN0DeltaMax": "drop_overrun_n0_delta_max",
"dropOverrunN0DeltaMin": "drop_overrun_n0_delta_min",
"dropOverrunN1": "drop_overrun_n1",
"dropOverrunN1Delta": "drop_overrun_n1_delta",
"dropOverrunN1DeltaAvg": "drop_overrun_n1_delta_avg",
"dropOverrunN1DeltaMax": "drop_overrun_n1_delta_max",
"dropOverrunN1DeltaMin": "drop_overrun_n1_delta_min",
"intervals": "intervals",
"menloQueueComponent": "menlo_queue_component",
"menloQueueIndex": "menlo_queue_index",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"suspect": "suspect",
"thresholded": "thresholded",
"timeCollected": "time_collected",
"truncateOverrunN0": "truncate_overrun_n0",
"truncateOverrunN0Delta": "truncate_overrun_n0_delta",
"truncateOverrunN0DeltaAvg": "truncate_overrun_n0_delta_avg",
"truncateOverrunN0DeltaMax": "truncate_overrun_n0_delta_max",
"truncateOverrunN0DeltaMin": "truncate_overrun_n0_delta_min",
"truncateOverrunN1": "truncate_overrun_n1",
"truncateOverrunN1Delta": "truncate_overrun_n1_delta",
"truncateOverrunN1DeltaAvg": "truncate_overrun_n1_delta_avg",
"truncateOverrunN1DeltaMax": "truncate_overrun_n1_delta_max",
"truncateOverrunN1DeltaMin": "truncate_overrun_n1_delta_min",
"update": "update",
}
def __init__(self, parent_mo_or_dn, menlo_queue_component, menlo_queue_index, **kwargs):
self._dirty_mask = 0
self.menlo_queue_component = menlo_queue_component
self.menlo_queue_index = menlo_queue_index
self.child_action = None
self.drop_overrun_n0 = None
self.drop_overrun_n0_delta = None
self.drop_overrun_n0_delta_avg = None
self.drop_overrun_n0_delta_max = None
self.drop_overrun_n0_delta_min = None
self.drop_overrun_n1 = None
self.drop_overrun_n1_delta = None
self.drop_overrun_n1_delta_avg = None
self.drop_overrun_n1_delta_max = None
self.drop_overrun_n1_delta_min = None
self.intervals = None
self.sacl = None
self.status = None
self.suspect = None
self.thresholded = None
self.time_collected = None
self.truncate_overrun_n0 = None
self.truncate_overrun_n0_delta = None
self.truncate_overrun_n0_delta_avg = None
self.truncate_overrun_n0_delta_max = None
self.truncate_overrun_n0_delta_min = None
self.truncate_overrun_n1 = None
self.truncate_overrun_n1_delta = None
self.truncate_overrun_n1_delta_avg = None
self.truncate_overrun_n1_delta_max = None
self.truncate_overrun_n1_delta_min = None
self.update = None
ManagedObject.__init__(self, "AdaptorMenloQStats", parent_mo_or_dn, **kwargs)
| StarcoderdataPython |
277919 | from mock import patch
from pretend import stub
from .util import V1TestCase, writeable_settings_stub
# Tests for both tests and tasks as they are the same code path
def make_thing(name, num, status_name, status_order):
return stub(
Name='{0}-{1}'.format(name, num),
url='http://example.com/{0}'.format(num),
Status=stub(
Name=status_name,
Order=status_order,
)
)
class TestThingCommand(V1TestCase):
get_workitem = patch('helga_versionone.get_workitem')
def setUp(self):
super(TestThingCommand, self).setUp()
self.results = [
make_thing('thing', 1, 'Done', 99),
make_thing('thing', 2, 'None', 0),
make_thing('thing', 3, 'In Progress', 50),
]
self.results_ordered = [self.results[i] for i in [1, 2, 0]]
self.v1.Task.where().select.return_value = self.results
def test_list_tasks(self):
return self._test_command(
'tasks whatever',
'\n'.join([
'[{0}] {1} {2}'.format(t.Status.Name, t.Name, t.url)
for t in self.results_ordered
]),
)
def test_list_tests_none(self):
return self._test_command(
'tests whatever',
'Didn\'t find any Tests for whatever',
)
def test_bad_action(self):
return self._test_command(
'tests whatever fhqwhgads',
'I can\'t just "fhqwhgads" that, {0}'.format(self.nick),
)
def test_add_failed_for_title(self):
return self._test_command(
'tests whatever add',
'I\'m going to need a title for that, {0}'.format(self.nick),
)
def test_add_failed_no_write(self):
return self._test_command(
'tasks whatever add Do a little dance',
'I\'m sorry {0}, write access is disabled'.format(self.nick),
)
class TestThingCommandWithWrite(V1TestCase):
settings = patch('helga_versionone.settings', writeable_settings_stub)
get_workitem = patch('helga_versionone.get_workitem')
def test_tests_add_ok(self):
self.get_workitem().idref = 3
name = '<NAME>'
url = 'http://example.com'
self.v1.Test.create.return_value = stub(
Name=name,
url=url,
)
d = self._test_command(
'tests whatever add {0}'.format(name),
'I created {0} {1} for you, {2}'.format(name, url, self.nick)
)
def check(res):
# Check data and commit called
self.v1.Test.create.assert_called_once_with(
Name=name,
Parent=3,
)
d.addCallback(check)
return d
| StarcoderdataPython |
6452196 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 11:16:04 2017
@author: <NAME> (<EMAIL>)
@brief: MSTD is a generic and efficient method to identify multi-scale topological domains (MSTD)
from symmetric Hi-C and other high resolution asymmetric promoter capture Hi-C datasets
@version 0.0.1
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from colormap import Color, Colormap
def _domain_only_diagonal(Data,win_n,distance):
Dsize=Data.shape[0]
#step1.1
pdensity=np.zeros(Dsize)
DEN_Dict={}
for ip in range(Dsize):
begin_i=ip-win_n+1
if begin_i<0:
begin_i=0
end_i=ip+win_n-1
if end_i>Dsize-1:
end_i=Dsize-1
pdensity[ip]=np.mean(Data[begin_i:ip+1,:][:,ip:end_i+1])
DEN_Dict[ip]=pdensity[ip]
#step1.2
Max_step=100
NDP_Dict={}
ASS_Dict={}
for ip in np.arange(0,Dsize):
for step in np.arange(0,max(ip,Dsize-ip)):
if ip-step>=0:
up_point=pdensity[ip-step]
if up_point>pdensity[ip]:
ASS_Dict[ip]=ip-step
break
if ip+step<=Dsize-1:
down_point=pdensity[ip+step]
if down_point>pdensity[ip]:
ASS_Dict[ip]=ip+step
break
if step>Max_step:
ASS_Dict[ip]=ip
break
NDP_Dict[ip]=step
Thr_den=np.percentile(pdensity,30)
point_assign={}
for temp in DEN_Dict:
point_assign[temp]=0
class_num=1
join_num=0
centers=[]
for item in DEN_Dict:
den=DEN_Dict[item]
dist=NDP_Dict[item]
if (den>Thr_den and dist>distance):
point_assign[item]=class_num
class_num=class_num+1
join_num=join_num+1
centers.append(item)
ASS_Dict[item]=item
old_join_num=0
new_join_num=join_num
while old_join_num!=new_join_num:
old_join_num=join_num
for item in DEN_Dict:
#if ((NDP_Dict[item]<distance)or(DEN_Dict[item]>Thr_den)):
if ASS_Dict[item]==item:
continue
if point_assign[ASS_Dict[item]]!=0:
if point_assign[item]==0:
point_assign[item]=point_assign[ASS_Dict[item]]
join_num=join_num+1
new_join_num=join_num
for item in DEN_Dict:
if point_assign[item]!=0:
temp=centers[int(point_assign[item])-1]
if DEN_Dict[item]<DEN_Dict[temp]/3 :
#print item
point_assign[item]=0
NLP_Dict={}
for ip in np.arange(0,Dsize):
if point_assign[ip]==0:
NLP_Dict[ip]=0
continue
for step in np.arange(0,max(ip,Dsize-ip)):
if ip-step>=0:
up_point=pdensity[ip-step]
if up_point<pdensity[ip]:
break
if ip+step<=Dsize-1:
down_point=pdensity[ip+step]
if down_point<pdensity[ip]:
break
if step>Max_step:
break
NLP_Dict[ip]=step
Thr_den1=np.percentile(pdensity,70)
corrbound=[]
for item in DEN_Dict:
den=DEN_Dict[item]
dist=NLP_Dict[item]
if (den<Thr_den1 and dist>=distance):
corrbound.append(item)
Dict={'density':DEN_Dict,'distance':NDP_Dict,'ass_point':ASS_Dict,'point_assign':point_assign}
DF=pd.DataFrame(Dict)
return DF,centers,corrbound,class_num
def _generate_density_con(Data,win,thr):
Dsize=Data.shape
if Dsize[0]==Dsize[1]:
Dsize=Data.shape
M_density=np.zeros(Dsize)
DEN_Dict={}
for i in range(Dsize[0]):
for j in range(Dsize[1]):
if Data[i,j]>thr or i==j:
begin_i=i-win[0]
if begin_i<0:
begin_i=0
begin_j=j-win[1]
if begin_j<0:
begin_j=0
end_i=i+win[0]
if end_i>Dsize[0]-1:
end_i=Dsize[0]-1
end_j=j+win[1]
if end_j>Dsize[1]-1:
end_j=Dsize[1]-1
M_density[i,j]=np.mean(Data[begin_i:end_i,begin_j:end_j])+np.random.random(1)/1000.0
DEN_Dict[(i,j)]=M_density[i,j]
else:
M_density=np.zeros(Dsize)
DEN_Dict={}
for i in range(Dsize[0]):
for j in range(Dsize[1]):
if Data[i,j]>thr:
begin_i=i-win[0]
if begin_i<0:
begin_i=0
begin_j=j-win[1]
if begin_j<0:
begin_j=0
end_i=i+win[0]
if end_i>Dsize[0]-1:
end_i=Dsize[0]-1
end_j=j+win[1]
if end_j>Dsize[1]-1:
end_j=Dsize[1]-1
M_density[i,j]=np.mean(Data[begin_i:end_i,begin_j:end_j])+np.random.random(1)/1000.0
DEN_Dict[(i,j)]=M_density[i,j]
#print M_density[i,j]
return M_density, DEN_Dict
def _find_highpoints_v2(DEN_Dict,ratio=1):
Dis=50
NDP_Dict={}
ASS_Dict={}
for item in DEN_Dict:
#item=ASS_Dict[item]; item
NDP_Dict[item]=np.linalg.norm((Dis,Dis*ratio))
ASS_Dict[item]=item
for step in np.arange(1,Dis+1,1):
step_point=[(item[0]+st,item[1]+ra) for st in np.arange(-step,step+1) for ra in np.arange(-step*ratio,step*ratio+1) if (abs(st)==step or ratio*(step-1)<abs(ra)<=ratio*step) ]
step_point=[point for point in step_point if point in DEN_Dict]
distance_index=[(np.linalg.norm(((item[0]-temp[0])*ratio,item[1]-temp[1])),temp) for temp in step_point if DEN_Dict[temp]>DEN_Dict[item]]
distance_index.sort()
for ind in distance_index:
if DEN_Dict[ind[1]]>DEN_Dict[item]:
NDP_Dict[item]=ind[0]
ASS_Dict[item]=ind[1]
break
if len(distance_index)>0:
break
return NDP_Dict, ASS_Dict
def _assign_class(DEN_Dict,NDP_Dict,ASS_Dict,Thr_den,Thr_dis):
point_assign={}
for temp in DEN_Dict:
point_assign[temp]=0
class_num=1
join_num=0
centers=[]
for item in DEN_Dict:
den=DEN_Dict[item]
dist=NDP_Dict[item]
#value=den*dist
if (den>Thr_den) and (dist>Thr_dis):
point_assign[item]=class_num
class_num=class_num+1
join_num=join_num+1
centers.append(item)
ASS_Dict[item]=item
Al=len(DEN_Dict)
old_join_num=0
new_join_num=join_num
while old_join_num!=new_join_num:
old_join_num=join_num
for item in DEN_Dict:
if ((NDP_Dict[item]<Thr_dis)or(DEN_Dict[item]>Thr_den)) :
if ASS_Dict[item]==item:
continue
if point_assign[ASS_Dict[item]]!=0:
if point_assign[item]==0:
point_assign[item]=point_assign[ASS_Dict[item]]
join_num=join_num+1
new_join_num=join_num
return point_assign,class_num-1,centers
def _get_region_den2(DEN_Dict,NDP_Dict,ASS_Dict,point_assign,win,centers):
Thr=np.percentile(pd.Series(DEN_Dict),10)
for item in DEN_Dict:
if point_assign[item]!=0:
temp=centers[point_assign[item]-1]
#if DEN_Dict[item]<np.min(DEN_Dict[temp]/3,Thr):
if DEN_Dict[item]<Thr:
point_assign[item]=0
Dict={'density':DEN_Dict,'distance':NDP_Dict,'ass_point':ASS_Dict,'point_assign':point_assign}
DF=pd.DataFrame(Dict)
return DF
def _get_region_piont2(matrix_data,DF,win,centers):
bound=np.zeros((len(centers),4))
for i, cen in enumerate(centers):
cen_index=DF[DF["point_assign"]==i+1].index
indictor=np.zeros(matrix_data.shape)
row=[]
col=[]
for item in cen_index:
row.append(item[0])
col.append(item[1])
indictor[item[0],item[1]]=1
M_temp=matrix_data*indictor
left=np.min(np.array(row))
right=np.max(np.array(row))
bottom=np.min(np.array(col))
upper=np.max(np.array(col))
#print left,right,bottom,upper
#MM=matrix_data[left:right+1,bottom:upper+1]
#plt.subplots(figsize=(8,8))
#sns.heatmap(MM[:,::-1])
#初始化
corr=0.99
#left
M_left=M_temp[left:cen[0]+1,bottom:upper+1]
M_left=M_left[::-1,:]
sum_left=np.sum(M_left)
#right
M_right=M_temp[cen[0]:right+1,bottom:upper+1]
sum_right=np.sum(M_right)
#bottom
M_bottom=M_temp[left:right+1,bottom:cen[1]+1]
M_bottom=M_bottom[:,::-1]
sum_bottom=np.sum(M_bottom)
#upper
M_upper=M_temp[left:right+1,cen[1]:upper+1]
sum_upper=np.sum(M_upper)
#left
sum_temp=0
for step in range(cen[0]-left+1):
sum_temp=sum_temp+np.sum(M_left[step,:])
if sum_left==0:
left=cen[0]-step
break
#print sum_temp, sum_left
#print sum_temp/sum_left
if sum_temp/sum_left>corr:
left=cen[0]-step+1
#print left
break
#right
sum_temp=0
for step in range(right+1-cen[0]):
sum_temp=sum_temp+np.sum(M_right[step,:])
if sum_right==0:
right=cen[0]+step
break
if sum_temp/sum_right>corr:
right=cen[0]+step+1
#print sum_temp/sum_right,right
break
#bottom
sum_temp=0
for step in range(cen[1]-bottom+1):
sum_temp=sum_temp+np.sum(M_bottom[:,step])
if sum_bottom==0:
bottom=cen[1]-step
break
if sum_temp/sum_bottom>corr:
bottom=cen[1]-step+1
#print sum_temp/sum_bottom,bottom
break
#upper
sum_temp=0
for step in range(upper+1-cen[1]):
sum_temp=sum_temp+np.sum(M_upper[:,step])
if sum_upper==0:
upper=cen[1]+step
break
if sum_temp/sum_upper>corr:
upper=cen[1]+step+1
break
bound[i,:]=np.array([upper,bottom,left,right])
Bound=pd.DataFrame(bound,columns=['upper','bottom','left','right'])
Centers=pd.DataFrame(centers,columns=['cen_x','cen_y'])
Results=pd.concat([Bound,Centers],axis=1)
Results=Results.loc[(Results['upper']!=Results['bottom'])*(Results['left']!=Results['right'])]
return Results
def _def_strmouOnCHiC(matrix_data,win_n,thr_dis=10):
#matrix size
Mat_size=matrix_data.shape
print ("Matrix size:"+str(Mat_size[0])+'*'+str(Mat_size[1]) )
ratio=matrix_data.shape[1]//matrix_data.shape[0]
#computing density threshold
point_num=thr_dis*matrix_data.shape[0]/6
#print "Effective points:"+str(point_num)
percent=1-point_num/float(matrix_data.shape[0]*matrix_data.shape[1])
thr=np.percentile(matrix_data,percent*100)
win=(win_n,win_n*ratio)
# if np.max(matrix_data.shape)<3000:
# thr=0
# elif np.max(matrix_data.shape)<5000:
# thr=np.percentile(matrix_data,95)
# else:
# thr=np.percentile(matrix_data,99)
M_density,DEN_Dict=_generate_density_con(matrix_data,win,thr)
#step 2.2
NDP_Dict,ASS_Dict=_find_highpoints_v2(DEN_Dict,ratio)
Thr_den=np.percentile(pd.Series(DEN_Dict),20)
#step 2.3
point_assign,class_num,centers=_assign_class(DEN_Dict,NDP_Dict,ASS_Dict,Thr_den,thr_dis)
#step 2.4:
DF=_get_region_den2(DEN_Dict,NDP_Dict,ASS_Dict,point_assign,win,centers)
#step 2.5:
Results=_get_region_piont2(matrix_data,DF,win,centers)
return Results
def _return_clures(DF,centers,corrbound,num_clu):
start={}
end={}
#flag
old_item=0
for i,item in enumerate(DF['point_assign']):
if item!=old_item:
if old_item !=0:
end[old_item]=i
if item!=0 and (item not in start):
start[item]=i
old_item=item
if old_item!=0:
end[item]=i
#print np.max(pd.Series(end)-pd.Series(start))
clu_count=1
i=0
#item=corrbound[i]
while (i<len(corrbound) and clu_count<len(centers)):
item=corrbound[i]
if (item>centers[clu_count-1] and item<centers[clu_count]):
if (start[clu_count+1]-item<=10) and start[clu_count+1]-end[clu_count]<2:
end[clu_count]=item
start[clu_count+1]=item
clu_count=clu_count+1
i=i+1
elif item>centers[clu_count]:
clu_count=clu_count+1
else:
i=i+1
#print i,clu_count
clures=pd.DataFrame({'Start':start,'End':end}, columns=['Start','End'])
print (np.max(clures['End']-clures['Start']))
return clures
def _plot_HiC(matrix_data,vmax,colors=['white','red']):
#vmax=thr
red_list=list()
green_list=list()
blue_list=list()
#colors=['white','red']
#colors=['darkblue','green','gold','darkred']
for color in colors:
col=Color(color).rgb
red_list.append(col[0])
green_list.append(col[1])
blue_list.append(col[2])
c = Colormap()
d= { 'blue': blue_list,
'green':green_list,
'red':red_list}
mycmap = c.cmap(d)
fig,ax=plt.subplots(figsize=(8,8))
#new_data=np.triu(matrix_data)
#new_data=np.transpose(new_data[:,::-1])
#mask = np.zeros_like(new_data)
#mask[np.tril_indices_from(mask,-1)] = True
#mask=np.transpose(mask[:,::-1])
#with sns.axes_style("white"):
#sns.heatmap(new_data,xticklabels=100,yticklabels=100,mask=mask,cmap=mycmap,cbar=False)
#ax.set_facecolor('w')
#fig.patch.set_facecolor('w')
ax.set_facecolor('w')
ax.grid(b=None)
sns.heatmap(matrix_data,vmax=vmax,xticklabels=100,yticklabels=100,cmap=mycmap,cbar=False)
#sns.heatmap(np.transpose(matrix_data[:,::-1]),vmax=vmax,xticklabels=100,yticklabels=100,cmap=mycmap,cbar=False)
def _show_diagonal_result(clures,matrix_data,thr,colors=['white','red']):
#matrix_data[matrix_data>thr]=thr
_plot_HiC(matrix_data,thr,colors)
for i in range(len(clures)):
start=clures.ix[i+1,'Start']
end=clures.ix[i+1,'End']
#x=[start+0.5,start+0.5,end+0.5]
#y=[start+0.5,end+0.5,end+0.5]
x=[start+0.5,start+0.5,end+0.5,end+0.5,start+0.5]
y=[start+0.5,end+0.5,end+0.5,start+0.5,start+0.5]
plt.plot(x,y,'-',color='k',lw=3)
plt.grid(b=None)
#plt.text(800,200,str(len(clures))+' Clusters\nThr_value (distance)= '+str(Thr_value))
plt.show()
def _show_chic_clusterresult2(Results,matrix_data):
if np.max(matrix_data.shape)<3000:
thr=np.percentile(matrix_data,99.5)
else:
thr=np.percentile(matrix_data,99.9)
matrix_data[matrix_data>thr]=thr
print (thr)
red_list=list()
green_list=list()
blue_list=list()
#['darkblue','seagreen','yellow','gold','coral','hotpink','red']
for color in ['white','green','blue','red']:
col=Color(color).rgb
red_list.append(col[0])
green_list.append(col[1])
blue_list.append(col[2])
c = Colormap()
d= { 'blue': blue_list,
'green':green_list,
'red':red_list}
mycmap = c.cmap(d)
#plt.subplots(figsize=(8,8))
#sns.heatmap(np.transpose(matrix_data[:,::-1]),cmap=mycmap)
#sns.heatmap(np.transpose(matrix_data[:,::-1]),xticklabels=100,yticklabels=500,cmap=mycmap)
plt.subplots(figsize=(8,8))
#sns.heatmap(np.transpose(matrix_data[:,::-1]),cmap=mycmap)
sns.heatmap(matrix_data.T,xticklabels=100,yticklabels=500,cmap=mycmap,cbar=False)
#sns.heatmap(np.transpose(matrix_data[:,::-1]),xticklabels=100,yticklabels=500,cmap=mycmap,cbar=False)
for i in Results.index:
upper=Results.ix[i,'upper']-0.5
bottom=Results.ix[i,'bottom']-0.5
left=Results.ix[i,'left']-0.5
right=Results.ix[i,'right']-0.5
y_loc=[upper,upper,bottom,bottom,upper]
x_loc=[left,right,right,left,left]
plt.plot(x_loc,y_loc,'-',color='k',lw=2.5)
plt.grid(b=None)
plt.show()
def _show_chic_clusterresult1(Results,matrix_data):
if np.max(matrix_data.shape)<3000:
thr=np.percentile(matrix_data,99.5)
else:
thr=np.percentile(matrix_data,99.9)
matrix_data[matrix_data>thr]=thr
print (thr)
red_list=list()
green_list=list()
blue_list=list()
#['darkblue','seagreen','yellow','gold','coral','hotpink','red']
for color in ['darkblue','green','yellow','gold','darkred']:
col=Color(color).rgb
red_list.append(col[0])
green_list.append(col[1])
blue_list.append(col[2])
c = Colormap()
d= { 'blue': blue_list,
'green':green_list,
'red':red_list}
mycmap = c.cmap(d)
#plt.subplots(figsize=(8,8))
#sns.heatmap(np.transpose(matrix_data[:,::-1]),cmap=mycmap)
#sns.heatmap(np.transpose(matrix_data[:,::-1]),xticklabels=100,yticklabels=500,cmap=mycmap)
plt.subplots(figsize=(8,8))
#sns.heatmap(np.transpose(matrix_data[:,::-1]),cmap=mycmap)
sns.heatmap(matrix_data,xticklabels=100,yticklabels=500,cmap=mycmap,cbar=False)
#sns.heatmap(np.transpose(matrix_data[:,::-1]),xticklabels=100,yticklabels=500,cmap=mycmap,cbar=False)
for i in Results.index:
upper=Results.ix[i,'upper']-0.5
bottom=Results.ix[i,'bottom']-0.5
left=Results.ix[i,'left']-0.5
right=Results.ix[i,'right']-0.5
y_loc=[upper,upper,bottom,bottom,upper]
x_loc=[left,right,right,left,left]
plt.plot(x_loc,y_loc,'-',color='k',lw=2.5)
plt.grid(b=None)
plt.show()
def _generate_input_data(Matrix_file):
'generate input example data for chic'
#loc_add='H:\Dataset\Capture Hi-C\Celltypes_blood_17_location'
#loc_add='~/.MSTDlib_test_v2/examples/Celltypes_blood_17_location'
#Dir=os.path.dirname(MSTD.MSTDlib_test_v2.__file__)
Dir='./src/MSTDlib'
loc_add=Dir+'/data/Celltypes_blood_17_location'
CHR=Matrix_file.split("_")[-1]
#关于此染色体对应的
pro_oe_loc=pd.read_table(loc_add+'\\'+CHR,index_col=None)
pro_list=pro_oe_loc.ix[pro_oe_loc['type']=='promoter','loc']
p_orderlist=sorted(pro_list)
#p_strlist=[str(item) for item in p_orderlist]
oe_list=pro_oe_loc.ix[pro_oe_loc['type']=='OE','loc']
oe_orderlist=sorted(oe_list)
oe_strlist=[str(item) for item in oe_orderlist]
#p_orderlist=np.array(pro_list)
fin=open(Matrix_file,'r')
header=fin.readline()
line=fin.readline()
Templine=line.rstrip("\n").split("\t")
matrix_data=np.zeros((len(p_orderlist),len(oe_orderlist)))
#p_oe_peaks=np.zeros(len(oe_list))
pi=0
matrix_data[pi,oe_strlist.index(Templine[1])]=float(Templine[2])
#当前promoter所处的位置
promoter=Templine[0]
for line in fin:
Templine=line.rstrip("\n").split("\t")
if promoter != Templine[0]:
pi=pi+1
#print pi
#p_oe_peaks=np.zeros(len(oe_list))
promoter=Templine[0]
matrix_data[pi,oe_strlist.index(Templine[1])]=float(Templine[2])
fin.close()
matrix_data[np.isnan(matrix_data)]=0
return matrix_data
def MSTD(Matrix_file,Output_file,MDHD=7,symmetry=1,window=10,visualization=1):
"""
@parameters
Matrix_file: Input file address, the format of the file is N*N matrix file without row and column names for Hi-C maps and
the format of the file is N*M matrix file without row and column names for promoter capture Hi-C maps.
Output_file: Output file address, each line in the file is triple containing boundares and centers of detected domains.
MDHD: integer, the threshold for the minimum distance of the elements that have higher density than the element k.
symmetry: 1/0, 1 represents the detecting of TADs and 0 represents the detecting PADs
visualization: if visulization=1, Visualization of results can be showed.
reso: data resolution of input data.
"""
if symmetry==1:
print("#########################################################################")
print("Step 0 : File Read ")
print("#########################################################################")
matrix_data=np.loadtxt(Matrix_file)
matrix_data[np.isnan(matrix_data)]=0
thr=np.percentile(matrix_data,99.99)
matrix_data[matrix_data>thr]=thr
print("Step 0 : Done !!")
print("#########################################################################")
print("Step 1: define domain Only diagonal line")
print("#########################################################################")
DF,centers,corrbound,num_clu=_domain_only_diagonal(matrix_data,window,MDHD)
#output cluster result
clures=_return_clures(DF,centers,corrbound,num_clu)
#clures=pd.DataFrame({'Start':start,'End':end,'Cen':center}, columns=['Start','End','Cen'])
centers=pd.DataFrame(centers, index=clures.index, columns=['Cen'])
boundaries=pd.concat([clures,centers],axis=1)
boundaries.to_csv(Output_file,sep='\t',index=False)
#Output_file_center=Output_file+'_centers'
#pd.Series(centers).to_csv(Output_file_center,index=False)
#show results
if visualization==1:
#_show_diagonal_result(clures,matrix_data)
thr=np.percentile(matrix_data,99.5)
#thr=10
colors=['white','green','red']
sns.set_style("ticks")
_show_diagonal_result(clures,matrix_data,thr,colors)
if symmetry==2:
print("#########################################################################")
print("Step 0 : File Read ")
print("#########################################################################")
matrix_data=np.loadtxt(Matrix_file)
thr=np.percentile(matrix_data,99.9999)
matrix_data[matrix_data>thr]=thr
print("Step 0 : Done !!")
print("#########################################################################")
print("Step 2: define structure moudle on all points or Capture Hi-C")
print("#########################################################################")
Results=_def_strmouOnCHiC(matrix_data,window,MDHD)
Results.to_csv(Output_file,index=False,sep='\t')
#Results=pd.read_table(Output_file,index_col=None)
if visualization==1:
#show capture hic cluster result
_show_chic_clusterresult2(Results,matrix_data)
#show_chic_clusterresult(centers,bound,matrix_data)
def MSTD2(Matrix_file,Output_file,MDHD=7,symmetry=1,window=5,visualization=1):
"""
@parameters
Matrix_file: Input file address, the format of the file is N*N matrix file without row and column names for Hi-C maps,
and the format of the file is triples for promoter capture Hi-C maps,
Output_file: Output file address, each line in the file is triple containing boundares and centers of detected domains.
MDHD: integer, the threshold for the minimum distance of the elements that have higher density than the element k.
symmetry: 1/0, 1 represents the detecting of TADs and 0 represents the detecting PADs
visualization: if visulization=1, Visualization of results can be showed.
res
"""
if symmetry==1:
print("#########################################################################")
print("Step 0 : File Read ")
print("#########################################################################")
matrix_data=np.loadtxt(Matrix_file)
matrix_data[np.isnan(matrix_data)]=0
thr=np.percentile(matrix_data,99.99)
matrix_data[matrix_data>thr]=thr
print("Step 0 : Done !!")
print("#########################################################################")
print("Step 1: define domain Only diagonal line")
print("#########################################################################")
DF,centers,corrbound,num_clu=_domain_only_diagonal(matrix_data,window,MDHD)
#output cluster result
clures=_return_clures(DF,centers,corrbound,num_clu)
#clures=pd.DataFrame({'Start':start,'End':end,'Cen':center}, columns=['Start','End','Cen'])
centers=pd.DataFrame(centers, index=clures.index, columns=['Cen'])
boundaries=pd.concat([clures,centers],axis=1)
boundaries.to_csv(Output_file,sep='\t',index=False)
#show results
if visualization==1:
thr=np.percentile(matrix_data,99.5)
#thr=10
colors=['white','green','red']
sns.set_style("ticks")
_show_diagonal_result(clures,matrix_data,thr,colors)
if symmetry==2:
print("#########################################################################")
print("Step 0 : File Read ")
print("#########################################################################")
matrix_data=_generate_input_data(Matrix_file)
#matrix_data=np.loadtxt(Matrix_file)
thr=np.percentile(matrix_data,99.9999)
matrix_data[matrix_data>thr]=thr
print("Step 0 : Done !!")
print("#########################################################################")
print("Step 2: define structure moudle on all points or Capture Hi-C")
print("#########################################################################")
Results=_def_strmouOnCHiC(matrix_data,window,MDHD)
Results.to_csv(Output_file,index=False,sep='\t')
#Results=pd.read_table(Output_file,index_col=None)
if visualization==1:
#show capture hic cluster result
_show_chic_clusterresult2(Results,matrix_data)
#show_chic_clusterresult(centers,bound,matrix_data)
def example(symmetry=1):
#Dir=os.getcwd()
Dir='./src/MSTDlib'
print("# 1. symmetry Hi-C")
print("# 2. asymmetry capture Hi-C")
if symmetry==1:
Matrix_file=Dir+'\\data\\cortex_chr6_2350-2500_HiC'
Output_file=Dir+'\\data\\cortex_chr6_output'
MSTD(Matrix_file,Output_file,MDHD=10,symmetry=1,window=5,visualization=1)
elif symmetry==2:
#example two
Matrix_file=Dir+'\\data\\nB_chr19_480-700_CHiC'
Output_file=Dir+'\\data\\nB_chr19_480-700_CHiC_output'
MSTD(Matrix_file,Output_file,MDHD=100,symmetry=2,window=5,visualization=1)
return 0
| StarcoderdataPython |
12847479 | <gh_stars>1-10
#!/usr/bin/python
import re
import sys
import cgi
import _mysql
import mysql.connector
from thememetro import *
from cloudNG import *
con=mysql.connector.connect(user='brewerslab',password='<PASSWORD>',database="brewerslab")
form=cgi.FieldStorage()
theme=webTheme()
theme.bgcolor="#ffffff"
if theme.localUser:
sys.stdout.write("Content-Type:text/xml\n\n")
grid={}
db=_mysql.connect(host="localhost",user="brewerslab",passwd='<PASSWORD>',db="brewerslab")
print "<xml><junk>"
bc=brewerslabCloudApi()
#bc.calculateRecipe("<EMAIL>", form['recipe'].value)
#bc.compile("<EMAIL>", form['recipe'].value,None)
bc.calculateRecipeWrapper("<EMAIL>",form['recipe'].value)
print "</junk><complete>1</complete></xml>"
| StarcoderdataPython |
386541 | __doc__ = "DRF library to operate resource's properties as a dictionary"
__version__ = '1.1.0'
__url__ = 'https://github.com/yola/drf-madprops'
| StarcoderdataPython |
11319047 | # Add toor dir of the src tree to the syspath, so that we can use absolute import
import sys
from pathlib import Path # if you haven't already done so
file = Path(__file__).resolve()
parent, root = file.parent, file.parents[1]
sys.path.append(str(root))
import threading
from bgwork.exceptions import NullHandlerError
class Job(threading.Thread):
"""
A generic Job class tends to be running in the background. The handler of each job
will be involked after a timer which is set by the interval argument fires.
"""
def __init__(self, jobname, interval, handler, isdaemon, *args, **kargs):
super().__init__(name=jobname, daemon=isdaemon)
self._ifstop = threading.Event()
self._interval = interval
# self.lock = None # need a lock when accessing global data structures
self._handler = handler
self._args = args
self._kargs = kargs
def run(self):
if not self._handler:
print(f"ERROR: Handler not installed in Job {self.name}")
raise NullHandlerError
# stop only if we are signaled
# wait until timeout to involke the handler using this conditional variable
while not self._ifstop.wait(timeout=float(self._interval)):
# print(f"\n\nThe handler of job {self.name} is being involked.")
self._handler(*self._args, **self._kargs)
print(f"Job {self.name} stops running.")
def stop(self):
# terminate gracefully and clean up resources
self._ifstop.set()
print(f"Job {self.name} is going to terminate.")
print("Cleaning up\n")
# Don't create any Zommmmmbiesssss
self.join()
| StarcoderdataPython |
9724055 | from .structs import (
OrderSide, OrderStatus, OrderType, Pair, Period, Candle,
MarketTrade, Coin, PrivateTrade
)
from .market import Exchange
from .private import Account
from .api import ApiError, ApiProvider
__all__ = [
'OrderSide', 'OrderStatus', 'OrderType', 'Pair', 'Coin',
'Period', 'Candle', 'MarketTrade', 'PrivateTrade',
'Exchange', 'Account',
'ApiError', 'ApiProvider'
]
__version__ = '0.5'
| StarcoderdataPython |
6684447 | <gh_stars>1-10
#!/usr/bin/env python3
import subprocess
import sys
if __name__ == '__main__':
args = sys.argv
cid = args[1]
command = args[2:]
retcode = subprocess.call(['lxc-attach', '-n', cid, '--'] + command)
sys.exit(retcode)
| StarcoderdataPython |
6679782 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of inenv.
# https://github.com/pnegahdar/inenv
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2015, <NAME> <<EMAIL>>
from setuptools import setup, find_packages
from inenv.version import __version__
setup(
name='inenv',
version=__version__,
description='Simple multi virtualenv command runner',
long_description='''
Simple multi virtualenv command runner
''',
keywords='venv virtualenv python multivenv',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/pnegahdar/inenv',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Unix',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
],
packages=find_packages(),
include_package_data=False,
install_requires=[
'atomicwrites>=1.1.5',
'click>=4.0',
'virtualenv>=13.0.3,<20.0.0',
],
setup_requires=[
'pytest-runner',
'pytest-pylint',
],
tests_require=[
'pytest',
'pytest-cov',
'pylint',
"mock; python_version < '3.4'",
],
entry_points={
'console_scripts': [
# add cli scripts here in this form:
'inenv=inenv.cli:run_cli',
'inenv_helper=inenv.cli:run_cli',
],
},
)
| StarcoderdataPython |
12804248 | <reponame>ccrndn/labelbox-python
from typing import Any, Dict, List, Union
import pytest
from labelbox import LabelingFrontend
from labelbox.exceptions import InconsistentOntologyException
from labelbox.schema.ontology import Tool, Classification, Option, \
Ontology, OntologyBuilder
_SAMPLE_ONTOLOGY = {
"tools": [{
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "poly",
"color": "#FF0000",
"tool": "polygon",
"classifications": []
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "segment",
"color": "#FF0000",
"tool": "superpixel",
"classifications": []
}, {
"schemaNodeId":
None,
"featureSchemaId":
None,
"required":
False,
"name":
"bbox",
"color":
"#FF0000",
"tool":
"rectangle",
"classifications": [{
"schemaNodeId":
None,
"featureSchemaId":
None,
"required":
True,
"instructions":
"nested classification",
"name":
"nested classification",
"type":
"radio",
"options": [{
"schemaNodeId":
None,
"featureSchemaId":
None,
"label":
"first",
"value":
"first",
"options": [{
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"instructions": "nested nested text",
"name": "nested nested text",
"type": "text",
"options": []
}]
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"label": "second",
"value": "second",
"options": []
}]
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": True,
"instructions": "nested text",
"name": "nested text",
"type": "text",
"options": []
}]
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "dot",
"color": "#FF0000",
"tool": "point",
"classifications": []
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "polyline",
"color": "#FF0000",
"tool": "line",
"classifications": []
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "ner",
"color": "#FF0000",
"tool": "named-entity",
"classifications": []
}],
"classifications": [{
"schemaNodeId":
None,
"featureSchemaId":
None,
"required":
True,
"instructions":
"This is a question.",
"name":
"This is a question.",
"type":
"radio",
"options": [{
"schemaNodeId": None,
"featureSchemaId": None,
"label": "yes",
"value": "yes",
"options": []
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"label": "no",
"value": "no",
"options": []
}]
}]
}
@pytest.mark.parametrize("tool_type", list(Tool.Type))
def test_create_tool(tool_type) -> None:
t = Tool(tool=tool_type, name="tool")
assert (t.tool == tool_type)
@pytest.mark.parametrize("class_type", list(Classification.Type))
def test_create_classification(class_type) -> None:
c = Classification(class_type=class_type, instructions="classification")
assert (c.class_type == class_type)
@pytest.mark.parametrize("value, expected_value, typing",
[(3, 3, int), ("string", "string", str)])
def test_create_option(value, expected_value, typing) -> None:
o = Option(value=value)
assert (o.value == expected_value)
assert (o.value == o.label)
def test_create_empty_ontology() -> None:
o = OntologyBuilder()
assert (o.tools == [])
assert (o.classifications == [])
def test_add_ontology_tool() -> None:
o = OntologyBuilder()
o.add_tool(Tool(tool=Tool.Type.BBOX, name="bounding box"))
second_tool = Tool(tool=Tool.Type.SEGMENTATION, name="segmentation")
o.add_tool(second_tool)
assert len(o.tools) == 2
for tool in o.tools:
assert (type(tool) == Tool)
with pytest.raises(InconsistentOntologyException) as exc:
o.add_tool(Tool(tool=Tool.Type.BBOX, name="bounding box"))
assert "Duplicate tool name" in str(exc.value)
def test_add_ontology_classification() -> None:
o = OntologyBuilder()
o.add_classification(
Classification(class_type=Classification.Type.TEXT,
instructions="text"))
second_classification = Classification(
class_type=Classification.Type.CHECKLIST, instructions="checklist")
o.add_classification(second_classification)
assert len(o.classifications) == 2
for classification in o.classifications:
assert (type(classification) == Classification)
with pytest.raises(InconsistentOntologyException) as exc:
o.add_classification(
Classification(class_type=Classification.Type.TEXT,
instructions="text"))
assert "Duplicate classification instructions" in str(exc.value)
def test_tool_add_classification() -> None:
t = Tool(tool=Tool.Type.SEGMENTATION, name="segmentation")
c = Classification(class_type=Classification.Type.TEXT, instructions="text")
t.add_classification(c)
assert t.classifications == [c]
with pytest.raises(Exception) as exc:
t.add_classification(c)
assert "Duplicate nested classification" in str(exc)
def test_classification_add_option() -> None:
c = Classification(class_type=Classification.Type.RADIO,
instructions="radio")
o = Option(value="option")
c.add_option(o)
assert c.options == [o]
with pytest.raises(InconsistentOntologyException) as exc:
c.add_option(Option(value="option"))
assert "Duplicate option" in str(exc.value)
def test_option_add_option() -> None:
o = Option(value="option")
c = Classification(class_type=Classification.Type.TEXT, instructions="text")
o.add_option(c)
assert o.options == [c]
with pytest.raises(InconsistentOntologyException) as exc:
o.add_option(c)
assert "Duplicate nested classification" in str(exc.value)
def test_ontology_asdict(project) -> None:
assert OntologyBuilder.from_dict(
_SAMPLE_ONTOLOGY).asdict() == _SAMPLE_ONTOLOGY
def test_from_project_ontology(client, project) -> None:
o = OntologyBuilder.from_project(project)
assert o.asdict() == project.ontology().normalized
| StarcoderdataPython |
8071569 | <reponame>elick007/IBMRedirection<gh_stars>1-10
import os
from time import sleep
from getpass import getpass
from random import choice
from sys import exit as exit_cmd
from webbrowser import open_new_tab
from cloud189.api import Cloud189
from cloud189.api.models import FileList, PathList
from cloud189.api.token import get_token
from cloud189.api.utils import logger
from cloud189.cli import config
from cloud189.cli.downloader import Downloader, Uploader, UploadType
from cloud189.cli.manager import global_task_mgr
from cloud189.cli.recovery import Recovery
from cloud189.cli.utils import *
class Commander:
"""网盘命令行"""
def __init__(self):
self._prompt = '> '
self._disk = Cloud189()
self._task_mgr = global_task_mgr
self._dir_list = ''
self._file_list = FileList()
self._path_list = PathList()
self._parent_id = -11
self._parent_name = ''
self._work_name = ''
self._work_id = -11
self._last_work_id = -11
self._reader_mode = False
self._reader_mode = config.reader_mode
self._default_dir_pwd = ''
self._disk.set_captcha_handler(captcha_handler)
@staticmethod
def clear():
clear_screen()
@staticmethod
def help():
print_help()
@staticmethod
def update():
check_update()
def bye(self):
if self._task_mgr.has_alive_task():
info("有任务在后台运行, 退出请直接关闭窗口")
else:
config.work_id = self._work_id
exit_cmd(0)
def rmode(self):
"""适用于屏幕阅读器用户的显示方式"""
# TODO
choice = input("以适宜屏幕阅读器的方式显示(y): ")
if choice and choice.lower() == 'y':
config.reader_mode = True
self._reader_mode = True
info("已启用 Reader Mode")
else:
config.reader_mode = False
self._reader_mode = False
info("已关闭 Reader Mode")
def cdrec(self):
"""进入回收站模式"""
rec = Recovery(self._disk)
rec.run()
self.refresh()
def refresh(self, dir_id=None, auto=False):
"""刷新当前文件夹和路径信息"""
dir_id = self._work_id if dir_id is None else dir_id
self._file_list, self._path_list = self._disk.get_file_list(dir_id)
if not self._file_list and not self._path_list:
if auto:
error(f"文件夹 id={dir_id} 无效(被删除), 将切换到根目录!")
return self.refresh(-11)
else:
error(f"文件夹 id 无效 {dir_id=}, {self._work_id=}")
return None
self._prompt = '/'.join(self._path_list.all_name) + ' > '
self._last_work_id = self._work_id
self._work_name = self._path_list[-1].name
self._work_id = self._path_list[-1].id
if dir_id != -11: # 如果存在上级路径
self._parent_name = self._path_list[-2].name
self._parent_id = self._path_list[-2].id
def login(self, args):
"""登录网盘"""
if args:
if '--auto' in args:
if config.cookie and self._disk.login_by_cookie(config) == Cloud189.SUCCESS:
self.refresh(config.work_id, auto=True)
return None
username = "18948100719"
password = "<PASSWORD>"
if not username or not password:
error('没有用户名或密码 :(')
return None
code = self._disk.login(username, password)
if code == Cloud189.NETWORK_ERROR:
error("登录失败,网络连接异常")
return None
elif code == Cloud189.FAILED:
error('登录失败,用户名或密码错误 :(')
return None
# 登录成功保存用户 cookie
config.username = username
config.password = password
config.cookie = self._disk.get_cookie()
code, token = get_token(username, password)
if code == Cloud189.SUCCESS:
config.set_token(*token)
self._disk.set_session(*token)
self._work_id = -11
self.refresh(-11)
def clogin(self):
"""使用 cookie 登录"""
if platform() == 'Linux' and not os.environ.get('DISPLAY'):
info("请使用浏览器打开: https://cloud.189.cn 获取 cookie")
else:
open_new_tab('https://cloud.189.cn')
info("请设置 Cookie 内容:")
c_login_user = input("COOKIE_LOGIN_USER=")
if not c_login_user:
error("请输入正确的 Cookie 信息")
return None
cookie = {"COOKIE_LOGIN_USER": str(c_login_user)}
if self._disk.login_by_cookie(cookie) == Cloud189.SUCCESS:
user = self._disk.get_user_infos()
if not user:
error("发生未知错误!")
return None
user_infos = {
'name': user.account.replace('@189.cn', ''),
'pwd': '',
'cookie': cookie,
'key': '',
'secret': '',
'token': '',
'save_path': './downloads',
'work_id': -11
}
config.set_infos(user_infos)
self._work_id = config.work_id
self.refresh()
else:
error("登录失败, 请检查 Cookie 是否正确")
def logout(self, args):
"""注销/删除用户"""
if args: # 删除用户
for name in args:
result = config.del_user(name)
if result:
info(f"成功删除用户 {name}")
else:
error(f"删除用户 {name} 失败!")
return None
clear_screen()
self._prompt = '> '
# self._disk.logout() # TODO(rachpt@126.com): 还没有注销登录的方法
self._file_list.clear()
self._path_list = ''
self._parent_id = -11
self._work_id = -11
self._last_work_id = -11
self._parent_name = ''
self._work_name = ''
config.cookie = None
def su(self, args):
"""列出、切换用户"""
users = config.get_users_name()
def list_user():
for i, user in enumerate(users):
user_info = config.get_user_info(user)
methord = "用户名+密码 登录" if user_info[2] else "Cookie 登录"
print(f"[{i}] 用户名: {user}, {methord}")
if args:
if args[0] == '-l':
list_user()
return None
elif args[0] in users:
select_user = args[0]
else:
error(f"用户名 {args[0]} 无效")
return None
else:
list_user()
select = input("请输入用户序号, [0、1 ... ]: ")
if select.isnumeric():
select = int(select)
if select > len(users):
error(f"序号 {select} 无效!")
return None
select_user = users[select]
else:
error(f"序号 {select} 无效!")
return None
config.work_id = self._work_id # 保存旧的工作目录
result = config.change_user(select_user)
if result and self._disk.login_by_cookie(config) == Cloud189.SUCCESS:
info(f"成功切换至用户 {config.username}")
self.refresh(config.work_id)
else:
error("切换用户失败!")
def ls(self, args):
"""列出文件(夹)"""
fid = old_fid = self._work_id
flag_full = False
flag_arg_l = False
if args:
if len(args) >= 2:
if args[0] == '-l':
flag_full = True
fname = args[-1]
elif args[-1] == '-l':
flag_full = True
fname = args[0]
else:
info("暂不支持查看多个文件!")
fname = args[0]
else:
if args[0] == '-l':
flag_full = True
flag_arg_l = True
else:
fname = args[0]
if not flag_arg_l:
if file := self._file_list.find_by_name(fname):
if file.isFolder:
fid = file.id
else:
error(f"{fname} 非文件夹,显示当前目录文件")
else:
error(f"{fname} 不存在,显示当前目录文件")
if fid != old_fid:
self._file_list, _ = self._disk.get_file_list(fid)
if not flag_full: # 只罗列文件名
for file in self._file_list:
if file.isFolder:
print(f"\033[1;34m{handle_name(file.name)}\033[0m", end=' ')
else:
print(f"{handle_name(file.name)}", end=' ')
print()
else:
if self._reader_mode: # 方便屏幕阅读器阅读
for file in self._file_list:
print(
f"{handle_name(file.name)} 大小:{get_file_size_str(file.size)} 上传时间:{file.ctime} ID:{file.id}")
else: # 普通用户显示方式
for file in self._file_list:
star = '✦' if file.isStarred else '✧' # 好像 没什么卵用
file_name = f"\033[1;34m{handle_name(file.name)}\033[0m" if file.isFolder else handle_name(
file.name)
print("# {0:<17}{1:<4}{2:<20}{3:>8} {4}".format(
file.id, star, file.ctime, get_file_size_str(file.size), file_name))
if fid != old_fid:
self._file_list, _ = self._disk.get_file_list(old_fid)
def cd(self, args):
"""切换工作目录"""
dir_name = args[0]
if not dir_name:
info('cd .. 返回上级路径, cd - 返回上次路径, cd / 返回根目录')
elif dir_name in ["..", "../"]:
self.refresh(self._parent_id)
elif dir_name == '/':
self.refresh(-11)
elif dir_name == '-':
self.refresh(self._last_work_id)
elif dir_name == '.':
pass
elif folder := self._file_list.find_by_name(dir_name):
self.refresh(folder.id)
else:
error(f'文件夹不存在: {dir_name}')
def mkdir(self, args):
"""创建文件夹"""
if not args:
info('参数:新建文件夹名')
refresh_flag = False
for name in args:
if self._file_list.find_by_name(name):
error(f'文件夹已存在: {name}')
continue
r = self._disk.mkdir(self._work_id, name)
if r.code == Cloud189.SUCCESS:
print(f"{name} ID: ", r.id)
refresh_flag = True
else:
error(f'创建文件夹 {name} 失败!')
continue
if refresh_flag:
self.refresh()
def rm(self, args):
"""删除文件(夹)"""
if not args:
info('参数:删除文件夹(夹)名')
return None
for name in args:
if file := self._file_list.find_by_name(name):
self._disk.delete_by_id(file.id)
print(f"删除:{name} 成功!")
else:
error(f"无此文件:{name}")
self.refresh()
def rename(self, args):
"""重命名文件(夹)"""
name = args[0].strip(' ')
if not name:
info('参数:原文件名 [新文件名]')
elif file := self._file_list.find_by_name(name):
new = args[1].strip(' ') if len(args) == 2 else input("请输入新文件名:")
logger.debug(f"{new=}, {args=}")
code = self._disk.rename(file.id, new)
if code == Cloud189.SUCCESS:
self.refresh()
elif code == Cloud189.NETWORK_ERROR:
error('网络错误,请重试!')
else:
error('失败,未知错误!')
else:
error(f'没有找到文件(夹): {name}')
def mv(self, args):
"""移动文件或文件夹"""
name = args[0]
if not name:
info('参数:文件(夹)名 [新文件夹名/id]')
folder_name = ''
target_id = None
file_info = self._file_list.find_by_name(name)
if not file_info:
error(f"文件(夹)不存在: {name}")
return None
if len(args) > 1:
if args[-1].isnumeric():
target_id = args[-1]
else:
folder_name = args[-1]
if not target_id:
info("正在获取所有文件夹信息,请稍后...")
tree_list = self._disk.get_folder_nodes()
if not tree_list:
error("获取文件夹信息出错,请重试.")
return None
if folder_name:
if folder := tree_list.find_by_name(folder_name):
target_id = folder.id
else:
error(f"文件夹 {folder_name} 不存在!")
return None
else:
tree_dict = tree_list.get_path_id()
choice_list = list(tree_dict.keys())
def _condition(typed_str, choice_str):
path_depth = len(choice_str.split('/'))
# 没有输入时, 补全 Cloud189,深度 1
if not typed_str and path_depth == 1:
return True
# Cloud189/ 深度为 2,补全同深度的文件夹 Cloud189/test 、Cloud189/txt
# Cloud189/tx 应该补全 Cloud189/txt
if path_depth == len(typed_str.split('/')) and choice_str.startswith(typed_str):
return True
set_completer(choice_list, condition=_condition)
choice = input('请输入路径(TAB键补全) : ')
if not choice or choice not in choice_list:
error(f"目标路径不存在: {choice}")
return None
target_id = tree_dict.get(choice)
if self._disk.move_file(file_info, target_id) == Cloud189.SUCCESS:
self._file_list.pop_by_id(file_info.id)
else:
error(f"移动文件(夹)到 {choice} 失败")
def down(self, args):
"""自动选择下载方式"""
task_flag = False
follow = False
for arg in args:
if arg == '-f':
follow = True
args.remove(arg)
# TODO: 通过分享链接下载
i = 0
while i < len(args):
item = args[i]
if item.startswith("http"):
pwd = ''
if i < len(args) - 1 and (not args[i + 1].startswith("http")):
pwd = args[i + 1]
i += 1 # 额外加一
self._disk.get_file_info_by_url(item, pwd)
elif file := self._file_list.find_by_name(item):
downloader = Downloader(self._disk)
f_path = '/'.join(self._path_list.all_name) # 文件在网盘的父路径
if file.isFolder: # 使用 web 接口打包下载文件夹
downloader.set_fid(file.id, is_file=False, f_path=f_path, f_name=item)
task_flag = True
self._task_mgr.add_task(downloader) # 提交下载任务
else: # 下载文件
downloader.set_fid(file.id, is_file=True, f_path=f_path, f_name=item)
task_flag = True
self._task_mgr.add_task(downloader) # 提交下载任务
else:
error(f'文件(夹)不存在: {item}')
i += 1
if follow and task_flag:
self.jobs(['-f', ])
elif task_flag:
print("开始下载, 输入 jobs 查看下载进度...")
def jobs(self, args):
"""显示后台任务列表"""
follow = False
for arg in args:
if arg == '-f':
print()
follow = True
args.remove(arg)
if not args:
self._task_mgr.show_tasks(follow)
for arg in args:
if arg.isnumeric():
self._task_mgr.show_detail(int(arg), follow)
else:
self._task_mgr.show_tasks(follow)
def upload(self, args):
"""上传文件(夹)"""
if not args:
info('参数:文件路径')
task_flag = False
follow = False
force = False
mkdir = True
url = False
for arg in args:
follow, force, mkdir, url, match = parsing_up_params(arg, follow, force, mkdir, url)
if match:
args.remove(arg)
for path in args:
if url:
self.upload_by_url(path)
task_flag = True
else:
path = path.strip('\"\' ') # 去除直接拖文件到窗口产生的引号
if not os.path.exists(path):
error(f'该路径不存在哦: {path}')
continue
uploader = Uploader(self._disk)
if os.path.isfile(path):
uploader.set_upload_path(path, is_file=True, force=force)
else:
uploader.set_upload_path(path, is_file=False, force=force, mkdir=mkdir)
uploader.set_target(self._work_id, self._work_name)
self._task_mgr.add_task(uploader)
task_flag = True
if follow and task_flag:
self.jobs(['-f', ])
elif task_flag:
print("开始上传, 输入 jobs 查看上传进度...")
def upload_by_url(self, url):
"""远程下载上传"""
uploader = Uploader(self._disk)
uploader.set_upload_url(url)
uploader.set_target(self._work_id, self._work_name)
self._task_mgr.add_task(uploader)
def upload_by_MD5info(self, md5, size,name,url,session_key,session_secret,access_token):
self._disk.set_session(session_key,session_secret,access_token)
uploader = Uploader(self._disk)
uploader.set_md5_info(md5, size,name,url)
uploader.set_target(self._work_id, self._work_name)
self._task_mgr.add_task(uploader)
def share(self, args):
"""分享文件"""
name = args[0]
if not name:
info('参数:需要分享的文件 [1/2/3] [1/2]')
return None
if file := self._file_list.find_by_name(name):
et = args[1] if len(args) >= 2 else None
ac = args[2] if len(args) >= 3 else None
result = self._disk.share_file(file.id, et, ac)
if result.code == Cloud189.SUCCESS:
print("-" * 50)
print(f"{'文件夹名' if file.isFolder else '文件名 '} : {name}")
print(f"上传时间 : {file.ctime}")
if not file.isFolder:
print(f"文件大小 : {get_file_size_str(file.size)}")
print(f"分享链接 : {result.url}")
print(f"提取码 : {result.pwd or '无'}")
if result.et == '1':
time = '1天'
elif result.et == '2':
time = '7天'
else:
time = '永久'
print(f"有效期 : {time}")
print("-" * 50)
else:
error('获取文件(夹)信息出错!')
else:
error(f"文件(夹)不存在: {name}")
def shared(self, args):
"""显示分享文件"""
stype = 1 # 默认查看 发出的分享
if args and args[0] == '2':
stype = 2 # 收到的分享
all_file = self._disk.list_shared_url(stype)
if not all_file:
info("失败或者没有数据!")
return None
for item in all_file:
f_name = item.name if item.isFolder else f"\033[1;34m{item.name}\033[0m" # 给你点颜色..
print("https:{0:<30} 提取码: {1:>4} [转存/下载/浏览: {2}/{3}/{4}] 文件名: {5}".format(
item.url, item.pwd, item.copyC, item.downC, item.prevC, f_name))
def sign(self, args):
"""签到 + 抽奖"""
if '-a' in args or '--all' in args:
old_user = self.who()
for user in config.get_users_name():
self.su([user, ])
sleep(0.5)
self._disk.user_sign()
sleep(0.5)
self.su([old_user, ])
else:
self._disk.user_sign()
def who(self):
"""打印当前登录账户信息,没有错误则返回用户名"""
user = self._disk.get_user_infos()
if not user:
error("发生未知错误!")
return None
quota = ", 总空间: {:.3f} GB".format(user.quota / 1073741824) # GB
used = ", 已使用: {:.3f} GB".format(user.used / 1073741824) # GB
nickname = f", 昵称: {user.nickname}"
print(f"账号: {user.account}, UID: {user.id}{nickname}{quota}{used}")
# 99 家庭云黄金会员, 199 家庭云铂金会员 (可能不是这个的值)
if user.vip == 100:
vip = "黄金会员"
elif user.vip == 200:
vip = "铂金会员"
else: # 0
vip = "普通会员"
start_time = f", 开始时间: {user.beginTime}" if user.beginTime else ''
end_time = f", 到期时间: {user.endTime}" if user.endTime else ''
print(f"用户类别: {vip}{start_time}{end_time}")
if user.domain:
print(f"个人主页: https://cloud.189.cn/u/{user.domain}")
return user.account.replace('@189.cn', '')
def setpath(self):
"""设置下载路径"""
print(f"当前下载路径 : {config.save_path}")
path = input('修改为 -> ').strip("\"\' ")
if os.path.isdir(path):
config.save_path = path
else:
error('路径非法,取消修改')
def ll(self, args):
"""列出文件(夹),详细模式"""
if choice((0, 1, 0)): # 1/3 概率刷新
self.refresh()
self.ls(['-l', *args])
def quota(self):
self.who()
def exit(self):
self.bye()
def b(self):
self.bye()
def r(self):
self.refresh()
def c(self):
self.clear()
def j(self, args):
self.jobs(args)
def u(self, args):
self.upload(args)
def d(self, args):
self.down(args)
def run_one(self, cmd, args):
"""运行单任务入口"""
no_arg_cmd = ['help', 'update', 'who', 'quota']
cmd_with_arg = ['ls', 'll', 'down', 'mkdir', 'su', 'sign', 'logout',
'mv', 'rename', 'rm', 'share', 'upload']
if cmd in ("upload", "down"):
if "-f" not in args:
args.append("-f")
if cmd in no_arg_cmd:
getattr(self, cmd)()
elif cmd in cmd_with_arg:
getattr(self, cmd)(args)
else:
print(f"命令有误,或者不支持单任务运行 {cmd}")
def run(self):
"""处理交互模式用户命令"""
no_arg_cmd = ['bye', 'exit', 'cdrec', 'clear', 'clogin', 'help', 'r', 'c', 'b',
'refresh', 'rmode', 'setpath', 'update', 'who', 'quota']
cmd_with_arg = ['ls', 'll', 'cd', 'down', 'jobs', 'shared', 'su', 'login', 'logout',
'mkdir', 'mv', 'rename', 'rm', 'share', 'upload', 'sign', 'j', 'u', 'd']
choice_list = [handle_name(i) for i in self._file_list.all_name] # 引号包裹空格文件名
cmd_list = no_arg_cmd + cmd_with_arg
set_completer(choice_list, cmd_list=cmd_list)
try:
args = input(self._prompt).split(' ', 1)
if len(args) == 0:
return None
except KeyboardInterrupt:
print('')
info('退出本程序请输入 bye 或 exit')
return None
cmd, args = (args[0], []) if len(args) == 1 else (
args[0], handle_args(args[1])) # 命令, 参数(可带有空格, 没有参数就设为空)
if cmd in no_arg_cmd:
getattr(self, cmd)()
elif cmd in cmd_with_arg:
getattr(self, cmd)(args)
| StarcoderdataPython |
8090327 | # -*- coding: utf-8 -*-
import re
import six
import warnings
from functools import wraps
from collections import Iterable
import pandas as pd
def get_forward_returns_columns(columns):
syntax = re.compile("^period_\\d+$")
return columns[columns.astype('str').str.contains(syntax, regex=True)]
def convert_to_forward_returns_columns(period):
try:
return 'period_{:d}'.format(period)
except ValueError:
return period
def ignore_warning(message='', category=Warning, module='', lineno=0, append=False):
"""过滤 warnings"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=message, category=category,
module=module, lineno=lineno, append=append)
return func(*args, **kwargs)
return func_wrapper
return decorator
def ensure_tuple(x):
if isinstance(x, six.string_types) or not isinstance(x, Iterable):
return (x,)
else:
return tuple(x)
| StarcoderdataPython |
6702489 | <reponame>flosincapite/HackingNeuralNetworks<filename>utils.py<gh_stars>0
import numpy as np
from skimage import io
def read_image(fname):
image = io.imread(fname)
result = np.zeros(shape=[1, 28, 28, 1], dtype=np.float32)
for yy in range(28):
for xx in range(28):
result[0][xx][yy][0] = float(image[xx][yy]) / 255
return result
| StarcoderdataPython |
210183 | <filename>pytest_headlock/plugin_headlock_debug.py
"""
This plugin creates a CMakeLists file, that contains all testsetups of the
first (failed) test.
To avoid overwriting this File the "--keep-first-failed-pytest" command line
option can be set.
The content of this file is used by testlibs/debug_failed.py
to run only one test.
In contrary to the builtin cache plugin, this plugin provides the option
--keep-first-failed-pytest. This option allows to avoid overwriting
'CMakeLists.tx' and this rerun a test again and again with
'debug_failed.py' (even if it passed in the last run).
A test is also marked as failed if its execution
stops with a crash (=no teardown executed)
"""
import os
from pathlib import Path
from headlock.testsetup import TestSetup, ToolChainDriver
from .common import PYTEST_HEADLOCK_DIR
master_cmakelist = ''
keep_failed = False
def cmakelists_read_state():
try:
lines = open(master_cmakelist, 'r').readlines()
except IOError:
lines = []
if len(lines) < 4:
return '', 'UNDEFINED'
else:
comment1, comment2, first_line, *_, last_line = lines
if first_line[0] != '#':
return '', 'UNDEFINED'
else:
nodeid = first_line[1:].strip()
if last_line.strip() not in ('# OK', '# FAILED'):
return nodeid, 'UNDEFINED'
else:
return nodeid, last_line[1:].strip()
def cmakelists_reset(nodeid):
try:
with open(master_cmakelist, 'w') as cmfile:
cmfile.write(f'# DO NOT MODIFY THIS FILE (CREATED BY '
f'pytest plugin headlock-cmake)\n')
cmfile.write(f'#\n')
cmfile.write(f'# {nodeid}\n')
cmfile.write(f'cmake_minimum_required(VERSION 3.6)\n')
except OSError:
pass
def cmakelists_write_result(result):
try:
with open(master_cmakelist, 'a') as cmfile:
cmfile.write('# ' + result)
except OSError:
pass
def initialize():
try:
os.remove(master_cmakelist)
except OSError:
pass
def start_test(nodeid):
_, cur_state = cmakelists_read_state()
if cur_state != 'FAILED':
cmakelists_reset(nodeid)
def finish_test(nodeid, failed):
cur_nodeid, cur_failed = cmakelists_read_state()
if nodeid == cur_nodeid and cur_failed == 'UNDEFINED':
cmakelists_write_result('FAILED' if failed else 'OK')
#--- PyTest specific interface: ---
def pytest_addoption(parser):
parser.addoption('--keep-first-failed-pytest',
action='store_true', dest='KEEP_FAILED')
def pytest_configure(config):
global abs_markerfile, master_cmakelist, keep_failed
keep_failed = config.option.KEEP_FAILED
if not keep_failed:
master_cmakelist = os.path.join(config.rootdir,
PYTEST_HEADLOCK_DIR, 'CMakeLists.txt')
master_cmakelist_dir = os.path.dirname(master_cmakelist)
if not os.path.exists(master_cmakelist_dir):
os.mkdir(master_cmakelist_dir)
gitignore_path = os.path.join(master_cmakelist_dir, '.gitignore')
with open(gitignore_path, 'wt') as gitignore:
gitignore.write('# created by pytest-headlock automatically, '
'do not change\n*')
initialize()
def pytest_runtest_setup(item):
if not keep_failed:
start_test(item.nodeid)
def pytest_runtest_logreport(report):
if not keep_failed and report.when == 'call':
finish_test(report.nodeid, report.failed)
class CMakeToolChain(ToolChainDriver):
ADDITIONAL_COMPILE_OPTIONS = ['-Werror']
ADDITIONAL_LINK_OPTIONS = ['-Werror']
def __init__(self, base_toolchain):
self.base_toolchain = base_toolchain
self.ADDITIONAL_COMPILE_OPTIONS = \
base_toolchain.ADDITIONAL_COMPILE_OPTIONS + \
self.ADDITIONAL_COMPILE_OPTIONS
self.ADDITIONAL_LINK_OPTIONS = \
base_toolchain.ADDITIONAL_LINK_OPTIONS + \
self.ADDITIONAL_LINK_OPTIONS
self.CLANG_TARGET = base_toolchain.CLANG_TARGET
def sys_predef_macros(self):
return self.base_toolchain.sys_predef_macros()
def sys_incl_dirs(self):
return self.base_toolchain.sys_incl_dirs()
def exe_path(self, name, build_dir):
return self.base_toolchain.exe_path(name, build_dir)
@staticmethod
def escape(str):
if '"' in str or '(' in str or ')' in str:
return '"' + str.replace('"', '\\"') + '"'
else:
return str
def generate_cmakelists(self, prj_name, build_dir, transunits,
req_libs, lib_dirs):
yield f'# This file was generated by CMakeToolChain ' \
f'automaticially.\n' \
f'# Do not modify it manually!\n' \
f'\n' \
f'cmake_minimum_required(VERSION 3.6)\n' \
f'project({prj_name} C)\n' \
f'set(CMAKE_C_STANDARD 99)\n' \
f'\n' \
f'add_library(TS_{prj_name} SHARED'
subsys_names = sorted({tu.subsys_name for tu in transunits})
shortend_prj_name, *_ = prj_name.split('.')
for subsys_name in subsys_names:
yield f' $<TARGET_OBJECTS:CMOD_{subsys_name}_{shortend_prj_name}>'
yield ')\n'
compile_options = self.base_toolchain.ADDITIONAL_COMPILE_OPTIONS \
+ self.ADDITIONAL_COMPILE_OPTIONS
link_options = self.base_toolchain.ADDITIONAL_LINK_OPTIONS \
+ self.ADDITIONAL_LINK_OPTIONS
yield f"add_compile_options({' '.join(compile_options)})\n"
yield f"set(CMAKE_EXE_LINKER_FLAGS \"{' '.join(link_options)}\")\n"
yield '\n'
if lib_dirs:
yield f'link_directories({" ".join(lib_dirs)})\n'
yield f'set_target_properties(TS_{prj_name} PROPERTIES\n' \
f' RUNTIME_OUTPUT_DIRECTORY ${{CMAKE_CURRENT_SOURCE_DIR}}\n' \
f' OUTPUT_NAME __headlock_dbg__\n' \
f' PREFIX "")\n'
if req_libs:
yield f'target_link_libraries(TS_{prj_name} {" ".join(req_libs)})\n'
yield '\n'
for subsys_name in subsys_names:
yield f'add_library(CMOD_{subsys_name}_{shortend_prj_name} OBJECT'
abs_incl_dirs = []
predef_macros = {}
transunits_of_subsys = list(filter(
lambda tu: tu.subsys_name == subsys_name, transunits))
for transunit in sorted(transunits_of_subsys):
rel_path = os.path.relpath(transunit.abs_src_filename,build_dir)
yield ' ' + str(rel_path).replace('\\', '/')
abs_incl_dirs = transunit.abs_incl_dirs
predef_macros = transunit.predef_macros
yield ')\n'
yield f'target_compile_definitions(CMOD_{subsys_name}_{shortend_prj_name} PUBLIC'
for mname, mval in predef_macros.items():
yield ' '
yield self.escape(mname + ('' if mval is None else f'={mval}'))
yield ')\n'
yield f'target_include_directories(CMOD_{subsys_name}_{shortend_prj_name} PUBLIC'
for incl_dir in abs_incl_dirs:
relative_path = os.path.relpath(incl_dir, build_dir)
yield ' ' + relative_path.replace('\\', '/')
yield ')\n'
yield '\n'
def build(self, name, build_dir, transunits, req_libs, lib_dirs):
cmakelists_path = build_dir / 'CMakeLists.txt'
cmakelists_content = ''.join(
self.generate_cmakelists(name, build_dir, transunits,
req_libs, lib_dirs))
cmakelists_path.write_text(cmakelists_content)
if master_cmakelist:
master_cmakelist_path = Path(master_cmakelist)
master_cmakelist_dir = master_cmakelist_path.parent.resolve()
rel_build_dir = os.path.relpath(build_dir,str(master_cmakelist_dir))
rel_build_dir_str = str(rel_build_dir).replace('\\', '/')
if master_cmakelist_path.exists():
lines = master_cmakelist_path.open().readlines()
if len(lines) >= 4:
lastline = lines[-1]
if len(lastline) > 0 and lastline[0] != '#':
with master_cmakelist_path.open('a') as cmfile:
cmfile.write(
f'add_subdirectory('
f'{rel_build_dir_str} {name})\n')
self.base_toolchain.build(name, build_dir, transunits,
req_libs, lib_dirs)
TestSetup.__TOOLCHAIN__ = CMakeToolChain(TestSetup.__TOOLCHAIN__)
| StarcoderdataPython |
1641787 | <gh_stars>1-10
import corpusTools
def Test1():
# stolen from ConferenceCorpusIntro
cc = corpusTools.ConferenceCorpusIntro() # initializes the corpus
cc.printCacheFile() # prints where the cached file is located
# cc.printAvailableDatasourceIds() # does what it says on the tin
# cc.printEventsOfDatasource(sourceId="wikidata") # prints the first ten entries of a data source
# cc.printEventsOfSeries(sourceId="or", seriesAcronym="AAAI", limit=5) # prints 'limit' amount of entries found in the given source with the given acronym. Note: There has to be an eventAcronym that is matched to an entrie as we query for this. Seems to be only usable with open research
# cc.printSqlQueryResult() # SQL query the Database (the cached file) via SQLite, I think
# cc.printDatasourceStats() # does what it says on the tin
# Test lines
# cc.printEventsOfSeries(sourceId="wikidata", seriesAcronym="", limit=100)
cc.printSqlQueryResultTest("SELECT * FROM event_dblp WHERE eventId LIKE '%HPCC%'")
print('\n')
cc.printSqlQueryResultTest("SELECT * FROM event_wikidata WHERE acronym LIKE '%HPCC%'")
# Like does not mean exact match for that we need =; also it is case insensitive.
# % is a wild card an can be 0 or more characters.
# Also given that % is a wild card we have to make sure we don't extract events whose acronym contains our input.
# we may still want to use % however as in cases like dblp searching in eventId for acronyms might be advisable
# since the acronym column can contain the Title.
# Important to note is that the "acronym" in eventId does not always match the actual acronym.
# Also acronyms are often listed with a year or similar.
# If we want to remove false positives, we maybe could operate like the acronymIsIn function from excelExtract.py.
# Note that we have to be careful that columns are named different or not existing in other sources.
# For example: dblp acronym column is useless in some cases and the eventId column is more useful tho less reliable
# Also to note are eventseries_cdw and _wdc as they contain alternate spellings among other things of the same acronym. However it seems as if this would create more ambiguity especially _wdc
# They are not mentioned on the wiki tho and I don't know how they were created or how reliable they are.
# cc.printSqlQueryResultTest("SELECT * FROM eventseries_cdw WHERE (acronym+`acronym:1`+`acronym:2`) LIKE '%AI%'") # I don't know what is wrong with this multi column query
cc.printSqlQueryResultTest("SELECT * FROM eventseries_cdw WHERE acronym LIKE '%aiia%'")
cc.printSqlQueryResultTest("SELECT * FROM eventseries_cdw WHERE `acronym:1` LIKE '%aiia%'")
cc.printSqlQueryResultTest("SELECT * FROM eventseries_cdw WHERE `acronym:2` LIKE '%aiia%'")
# cc.printSqlQueryResultTest("SELECT * FROM eventseries_wdc WHERE (acronym+`acronym:1`+`acronym:2`) LIKE '%WWW%'") # I don't know what is wrong with this multi column query
cc.printSqlQueryResultTest("SELECT * FROM eventseries_wdc WHERE acronym LIKE '%WWW%'")
cc.printSqlQueryResultTest("SELECT * FROM eventseries_wdc WHERE `acronym:1` LIKE '%WWW%'")
cc.printSqlQueryResultTest("SELECT * FROM eventseries_wdc WHERE `acronym:2` LIKE '%WWW%'")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.