hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8577638bf4ccf5772ce85b6e85457d1e027d3afe | 1,507 | py | Python | parallel_esn/example/power_consumption_oneshot.py | zblanks/parallel_esn | 25a979d0863ce54a4a588f4216dc473d4e9c5e8a | [
"BSD-2-Clause"
] | 7 | 2019-05-06T00:32:24.000Z | 2021-06-03T14:49:23.000Z | parallel_esn/example/power_consumption_oneshot.py | zblanks/parallel_esn | 25a979d0863ce54a4a588f4216dc473d4e9c5e8a | [
"BSD-2-Clause"
] | 8 | 2019-04-20T04:51:38.000Z | 2020-02-25T22:25:34.000Z | parallel_esn/example/power_consumption_oneshot.py | zblanks/parallel_esn | 25a979d0863ce54a4a588f4216dc473d4e9c5e8a | [
"BSD-2-Clause"
] | 2 | 2019-04-19T11:05:51.000Z | 2020-10-15T20:40:26.000Z | from pkg_resources import resource_filename
import numpy as np
import matplotlib.pyplot as plt
from ..esn import ESN
from ..utils import chunk_data, standardize_traindata, scale_data
# Example using real data, one shot prediction
# Load data
fname = resource_filename('parallel_esn', 'data/PJM_Load_hourly.csv')
data = np.loadtxt(fname, delimiter=',', skiprows=1, usecols=[1])
tot_len = data.shape[0]
val_len = tot_len//10
train_len = tot_len-val_len
# Split up loaded data with 9/10ths going to training data
# and 1/10th going to validation data
train_dat = data[:train_len]
val_dat = data[train_len:]
# Standardize training data to make it more neural network-friendly
train_dat, mu, sigma = standardize_traindata(train_dat)
# Scale validatino data by mean and s.dev determined by training data
val_dat = scale_data(val_dat, mu, sigma)
windowsize = 160
trainU, trainY = chunk_data(train_dat, windowsize, 20)
valU, valY = chunk_data(val_dat, windowsize, 20)
# Create a new ESN
esn = ESN(1, windowsize, 1, 3)
loss = esn.train_validate(trainU, trainY, valU, valY)
print("validation loss = {}".format(loss))
time = np.arange(windowsize)
plt.plot(time, valU[0, 0, :], 'ob', label='input')
pred = esn.predict(valU[0, 0:1, :])
plt.plot(time+windowsize, pred[0, :], '-r', label='predicted')
plt.plot(time+windowsize, valY[0, 0, :], '^g', label='observed')
plt.title("PJM Standardized Power Consumption (One Shot)")
plt.ylabel("Arb. Units.")
plt.xlabel("Hours")
plt.legend(loc=2, numpoints=1)
plt.show()
| 32.06383 | 69 | 0.741208 | 241 | 1,507 | 4.510373 | 0.460581 | 0.033119 | 0.027599 | 0.027599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023538 | 0.126078 | 1,507 | 46 | 70 | 32.76087 | 0.801822 | 0.197744 | 0 | 0 | 0 | 0 | 0.121565 | 0.019983 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8577a2ae24790cf5340a14689ec14a5be4fe05c0 | 7,522 | py | Python | tests/st/explainer/test_runner.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | 2 | 2021-07-08T13:10:42.000Z | 2021-11-08T02:48:57.000Z | tests/st/explainer/test_runner.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
] | null | null | null | tests/st/explainer/test_runner.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests on mindspore.explainer.ImageClassificationRunner."""
import os
import shutil
from random import random
from unittest.mock import patch
import numpy as np
import pytest
from PIL import Image
from mindspore import context
import mindspore as ms
import mindspore.nn as nn
from mindspore.dataset import GeneratorDataset
from mindspore.explainer import ImageClassificationRunner
from mindspore.explainer._image_classification_runner import _normalize
from mindspore.explainer.benchmark import Faithfulness
from mindspore.explainer.explanation import Gradient
from mindspore.train.summary import SummaryRecord
CONST = random()
NUMDATA = 2
context.set_context(mode=context.PYNATIVE_MODE)
def image_label_bbox_generator():
for i in range(NUMDATA):
image = np.arange(i, i + 16 * 3).reshape((3, 4, 4)) / 50
label = np.array(i)
bbox = np.array([1, 1, 2, 2])
yield (image, label, bbox)
class SimpleNet(nn.Cell):
"""
Simple model for the unit test.
"""
def __init__(self):
super(SimpleNet, self).__init__()
self.reshape = ms.ops.operations.Reshape()
def construct(self, x):
prob = ms.Tensor([0.1, 0.9], ms.float32)
prob = self.reshape(prob, (1, 2))
return prob
class ActivationFn(nn.Cell):
"""
Simple activation function for unit test.
"""
def __init__(self):
super(ActivationFn, self).__init__()
def construct(self, x):
return x
def mock_gradient_call(_, inputs, targets):
return inputs[:, 0:1, :, :]
def mock_faithfulness_evaluate(_, explainer, inputs, targets, saliency):
return CONST * targets
def mock_make_rgba(array):
return array.asnumpy()
class TestRunner:
"""Test on Runner."""
def setup_method(self):
self.dataset = GeneratorDataset(image_label_bbox_generator, ["image", "label", "bbox"])
self.labels = ["label_{}".format(i) for i in range(2)]
self.network = SimpleNet()
self.summary_dir = "summary_test_temp"
self.explainer = [Gradient(self.network)]
self.activation_fn = ActivationFn()
self.benchmarkers = [Faithfulness(num_labels=len(self.labels),
metric="NaiveFaithfulness",
activation_fn=self.activation_fn)]
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run_saliency_no_benchmark(self):
"""Test case when argument benchmarkers is not parsed."""
res = []
runner = ImageClassificationRunner(summary_dir=self.summary_dir, data=(self.dataset, self.labels),
network=self.network, activation_fn=self.activation_fn)
def mock_summary_add_value(_, plugin, name, value):
res.append((plugin, name, value))
with patch.object(SummaryRecord, "add_value", mock_summary_add_value), \
patch.object(Gradient, "__call__", mock_gradient_call):
runner.register_saliency(self.explainer)
runner.run()
# test on meta data
idx = 0
assert res[idx][0] == "explainer"
assert res[idx][1] == "metadata"
assert res[idx][2].metadata.label == self.labels
assert res[idx][2].metadata.explain_method == ["Gradient"]
# test on inference data
for i in range(NUMDATA):
idx += 1
data_np = np.arange(i, i + 3 * 16).reshape((3, 4, 4)) / 50
assert res[idx][0] == "explainer"
assert res[idx][1] == "sample"
assert res[idx][2].sample_id == i
original_path = os.path.join(self.summary_dir, res[idx][2].image_path)
with open(original_path, "rb") as f:
image_data = np.asarray(Image.open(f)) / 255.0
original_image = _normalize(np.transpose(data_np, [1, 2, 0]))
assert np.allclose(image_data, original_image, rtol=3e-2, atol=3e-2)
idx += 1
assert res[idx][0] == "explainer"
assert res[idx][1] == "inference"
assert res[idx][2].sample_id == i
assert res[idx][2].ground_truth_label == [i]
diff = np.array(res[idx][2].inference.ground_truth_prob) - np.array([[0.1, 0.9][i]])
assert np.max(np.abs(diff)) < 1e-6
assert res[idx][2].inference.predicted_label == [1]
diff = np.array(res[idx][2].inference.predicted_prob) - np.array([0.9])
assert np.max(np.abs(diff)) < 1e-6
# test on explanation data
for i in range(NUMDATA):
idx += 1
data_np = np.arange(i, i + 3 * 16).reshape((3, 4, 4)) / 50
saliency_np = data_np[0, :, :]
assert res[idx][0] == "explainer"
assert res[idx][1] == "explanation"
assert res[idx][2].sample_id == i
assert res[idx][2].explanation[0].explain_method == "Gradient"
assert res[idx][2].explanation[0].label in [i, 1]
heatmap_path = os.path.join(self.summary_dir, res[idx][2].explanation[0].heatmap_path)
assert os.path.exists(heatmap_path)
with open(heatmap_path, "rb") as f:
heatmap_data = np.asarray(Image.open(f)) / 255.0
heatmap_image = _normalize(saliency_np)
assert np.allclose(heatmap_data, heatmap_image, atol=3e-2, rtol=3e-2)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run_saliency_with_benchmark(self):
"""Test case when argument benchmarkers is parsed."""
res = []
def mock_summary_add_value(_, plugin, name, value):
res.append((plugin, name, value))
runner = ImageClassificationRunner(summary_dir=self.summary_dir, data=(self.dataset, self.labels),
network=self.network, activation_fn=self.activation_fn)
with patch.object(SummaryRecord, "add_value", mock_summary_add_value), \
patch.object(Gradient, "__call__", mock_gradient_call), \
patch.object(Faithfulness, "evaluate", mock_faithfulness_evaluate):
runner.register_saliency(self.explainer, self.benchmarkers)
runner.run()
idx = 3 * NUMDATA + 1 # start index of benchmark data
assert res[idx][0] == "explainer"
assert res[idx][1] == "benchmark"
assert abs(res[idx][2].benchmark[0].total_score - 2 / 3 * CONST) < 1e-6
diff = np.array(res[idx][2].benchmark[0].label_score) - np.array([i * CONST for i in range(NUMDATA)])
assert np.max(np.abs(diff)) < 1e-6
def teardown_method(self):
shutil.rmtree(self.summary_dir)
| 37.422886 | 109 | 0.626695 | 962 | 7,522 | 4.747401 | 0.230769 | 0.032844 | 0.049923 | 0.025619 | 0.41559 | 0.358441 | 0.335888 | 0.319247 | 0.271732 | 0.248741 | 0 | 0.022579 | 0.246344 | 7,522 | 200 | 110 | 37.61 | 0.783031 | 0.130417 | 0 | 0.338346 | 0 | 0 | 0.031825 | 0 | 0 | 0 | 0 | 0 | 0.195489 | 1 | 0.105263 | false | 0 | 0.120301 | 0.030075 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8578189186a9c269dc0edf7eade08b1bd089bcd5 | 28,956 | py | Python | GUI src/app.py | xngst/press-graphs | 0fdb1b402fc948cf66c1d0c66c726e7ecf6f15e5 | [
"MIT"
] | null | null | null | GUI src/app.py | xngst/press-graphs | 0fdb1b402fc948cf66c1d0c66c726e7ecf6f15e5 | [
"MIT"
] | null | null | null | GUI src/app.py | xngst/press-graphs | 0fdb1b402fc948cf66c1d0c66c726e7ecf6f15e5 | [
"MIT"
] | null | null | null | """
PRESSGRAPHS DASH CLIENT
WEB GUI interface for PressGraphs WebAPI
"""
###################################
# IMPORTS
###################################
#builtins
from datetime import datetime
from datetime import timedelta
#3rd party
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_table as dt
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import requests
from dash.dependencies import Input, Output, State
#oww
from md import md_txt
###################################
# DEFINITIONS
###################################
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.CERULEAN])
app.title = 'Press Graphs'
app.config.suppress_callback_exceptions = True
server = app.server
startup_time = datetime.now().strftime("%Y %m %d %H:%M")
API_KEY = "" # register your own API key at http://pressgraphs.pythonanywhere.com/create/test_user
MAX_REQUEST_DAY = 90
def build_layout():
"""
def to serve app.layout every time the app loads
"""
layout = html.Div(style={"padding":"2vw"},
children=[dcc.Location(id='url', refresh=True),
dbc.Nav([
dbc.NavItem(dbc.NavLink("kezdőlap", active=True, href="/")),
dbc.NavItem(dbc.NavLink("dátum szerint", href="/all_date")),
dbc.NavItem(dbc.NavLink("újságok szerint", href="/all_org")),
dbc.NavItem(dbc.NavLink("újság szerint", href="/site_tab")),
dbc.NavItem(dbc.NavLink("két újság összevetése", href="/site_vs_tab")),
dbc.NavItem(dbc.NavLink("két szó összevetése", href="words_tab")),
dbc.DropdownMenu(
[dbc.DropdownMenuItem("újságok", href="mo"),
dbc.DropdownMenuItem("útmutató", href ="manual"),
dbc.DropdownMenuItem("elérhetőség", href="contact")],
label="további info",
nav=True)]),
html.Hr(),
html.Div(id='page-content'),
html.Hr()])
return layout
def md_linkler(url: str) ->str:
"""
transforms url to markdown type link
"""
md_link = f"[link]({url})"
return md_link
def update_dt_by_date(dataframe: pd.DataFrame()) -> dt.DataTable():
"""
updates dash_table with passed dataframe
returns dash_table
"""
dataframe["link"] = dataframe["url"].copy()
dataframe["link"] = dataframe["link"].apply(md_linkler)
columns = [{'name': 'dátum', 'id':'date'},
{'name': 'oldal', 'id':'site'},
{'name': 'cím', 'id':'title'},
{'name': 'link', 'id':'link', 'type':'text', 'presentation': 'markdown'},
{'name': 'url', 'id':'url'}]
data = dataframe.to_dict('records')
data_table = dt.DataTable(
style_table={"padding": "50px", "maxHeight": '350px',
"overflowY": "scroll"},
style_data={'whiteSpace': 'normal', 'height': 'auto'},
style_cell={'textAlign': 'left'},
style_cell_conditional=[
{'if': {'column_id': 'date'}, 'width': '30px'},
{'if': {'column_id': 'site'}, 'width': '30px'},
{'if': {'column_id': 'title'}, 'width': '250px'},
{'if': {'column_id': 'link'}, 'width': '30px'},
{'if': {'column_id': 'url'}, 'width': '100px'}],
data=data,
columns=columns,
page_size=50,
export_format="xlsx")
return data_table
def plot_all_by_date(*, dataframe: pd.DataFrame(), search_word: str) -> px.bar:
"""
:date_count:pd.DataFrame
returns: plotly.express.px.bar
"""
if len(dataframe) > 0:
dataframe.columns = ["találatok száma"]
fig = px.bar(dataframe,
height=500,
x=dataframe.index,
y="találatok száma",
color="találatok száma",
labels={"x": "dátum", "date": "cikkek száma"},
opacity=.75,
color_continuous_scale="Geyser"
)
fig.update_layout(
title={'text': f"""A '{search_word}' szó száma a cikkek címeiben
{dataframe.index.min()}--{dataframe.index.max()}.""",
'y': 0.900,
'x': 0.50},
xaxis_title="Dátum",
yaxis_title="Cikkek száma",
yaxis_tickformat = 'd',
transition={'duration': 500},
plot_bgcolor="rgba(0,0,0,0)",
font={"family":"Courier New, monospace",
"size":11,
"color":"#000000"
})
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=True, gridcolor = '#bdbdbd')
if len(dataframe) < 5:
fig.update_layout(xaxis_showticklabels = False, width=750)
fig.update_yaxes(showgrid=False, dtick=1)
return fig
return px.bar()
def plot_all_by_sites(*, dataframe: pd.DataFrame(), search_word: str):
"""
#Horizontal barchart with top n sites
"""
if len(dataframe) > 0:
df = dataframe
df.rename(columns={'title': 'darab'}, inplace=True)
fig = px.bar(df,
height=1500,
orientation='h',
x="darab",
y=df.index,
labels={"y": "orgánum", "x": "cikkek száma"},
opacity=.75,
)
fig.update_layout(
title={'text': "Találatok az elmúlt 90 napból"},
plot_bgcolor="rgba(0,0,0,0)",
yaxis_title="Újságok",
xaxis_title="Cikkek száma",
font={
"family":"Courier New, monospace",
"size":10,
"color":"#000000"
})
fig.update_traces(marker_color='black')
fig.update_xaxes(showgrid=True, gridcolor='#bdbdbd')
fig.update_yaxes(showgrid=False)
return fig
return px.bar()
def compare_two_sites(*,
search_word,
site1_df,
site2_df,
site_1,
site_2):
"""
#Comparison line chart
"""
if search_word:
search_word = str(search_word).lower()
site_corr = site1_df["count"].corr(site2_df["count"])
fig = go.Figure(
layout=go.Layout(
annotations=[go.layout.Annotation(
text=f'Korrelációs együttható (r): {site_corr:.2f}',
hovertext="""Tartomány: -1 és 1 között. Jelzi két tetszőleges érték közötti lineáris kapcsolat nagyságát és irányát.""",
borderpad=1,
bgcolor="#ffffcc",
align='left',
showarrow=False,
xref='paper',
yref='paper',
x=0,
y=1,
bordercolor='grey',
borderwidth=1)]))
fig.add_trace(go.Scatter(x=site1_df.index, y=site1_df["count"],
mode='lines',
line_shape='linear',
name=f'{site_1}'))
fig.add_trace(go.Scatter(x=site2_df.index, y=site2_df["count"],
mode='lines',
line_shape='linear',
name=f'{site_2}'))
fig.update_layout(
title=f"""'{site_1}' és '{site_2}': '{search_word}' szó száma a cikkek címeiben""",
xaxis_title="Dátum",
yaxis_title="Cikkek száma",
plot_bgcolor="rgba(0,0,0,0)",
)
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=True, gridcolor='#bdbdbd')
return fig
return px.bar()
def compare_two_search_words(*,
sw_df_1,
sw_df_2,
search_word_1,
search_word_2):
"""
#TODO
"""
if search_word_1:
sw1 = search_word_1.split()[0].strip()
sw2 = search_word_2.split()[0].strip()
corr = sw_df_1["count"].corr(sw_df_2["count"])
fig = go.Figure(
layout=go.Layout(
annotations=[go.layout.Annotation(
text=f'Korrelációs együttható (r): {corr:.2f}',
hovertext="""Tartomány: -1 és 1 között.""",
borderpad=1,
bgcolor="#ffffcc",
align='left',
showarrow=False,
xref='paper',
yref='paper',
x=0,
y=1,
bordercolor='grey',
borderwidth=1)]))
fig.add_trace(go.Scatter(x=sw_df_1.index, y=sw_df_1["count"],
mode='lines',
line_shape='linear',
name=f'{sw1}'))
fig.add_trace(go.Scatter(x=sw_df_2.index, y=sw_df_2["count"],
mode='lines',
line_shape='linear',
name=f'{sw2}'))
fig.update_layout(
height=600,
title={'text': f"'{sw1}' és '{sw2}' szavak száma a cikkek címeiben",
'y':0.90,
'x':0.5},
xaxis_title="Dátum",
yaxis_title="Cikkek száma",
plot_bgcolor="rgba(0,0,0,0)",
font=dict(
family="Courier New, monospace",
size=11,
color="#000000"
))
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=True, gridcolor='#bdbdbd')
return fig
return px.bar()
###################################
# LAYOUT
###################################
print("loading layout")
app.layout = build_layout
@app.callback(
Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/all_date':
return page_1_layout
elif pathname == '/all_org':
return page_2_layout
elif pathname == '/site_tab':
return page_3_layout
elif pathname == '/site_vs_tab':
return page_4_layout
elif pathname == '/words_tab':
return page_5_layout
elif pathname == '/contact':
return page_6_layout
elif pathname == '/manual':
return page_7_layout
elif pathname == '/mo':
return page_8_layout
else:
return index_page
###################################
# INDEX
###################################
index_page = html.Div([
dcc.Markdown(children=md_txt.index_txt)])
###################################
# PAGE 1 LAYOUT
###################################
page_1_layout = html.Div([
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input",
placeholder="keresett szó...",
type="text",
value="")), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True),
dcc.Graph(id='max_date_bargraph'),
html.Div(id="table1", style={'font-family': 'Impact'})])
###################################
# PAGE 1 CHART CALLBACK
###################################
@app.callback(Output('max_date_bargraph', 'figure'),
[Input('submit-button', 'n_clicks'),
Input('search_input', 'n_submit'),
Input('switch-input', 'value')],
[State('search_input', 'value')])
def date_count_all_site(n_clicks, n_submit, switch_value, search_word):
"""
"""
if n_clicks or n_submit:
search_word = search_word.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
site="all"
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
res_df = pd.DataFrame(content)
if len(res_df) > 0:
res_df.set_index("date", inplace=True)
else:
res_df = pd.DataFrame()
fig = plot_all_by_date(dataframe=res_df, search_word=search_word)
return fig
###################################
# PAGE 1 DATA TABLE CALLBACK
###################################
@app.callback(Output('table1', 'children'),
[Input('max_date_bargraph', 'clickData'),
Input('submit-button', 'n_clicks'),
Input('switch-input', 'value')],
[State('search_input', 'value')])
def update_table(clickData, n_clicks, switch_value, search_word):
"""
#TODO
"""
if clickData:
search_word = search_word.strip()
date = list(clickData["points"])[0]["label"]
site = "all"
if switch_value:
switch_value = 1
else:
switch_value = 0
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{date}/{date}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
df = pd.DataFrame(content)
return update_dt_by_date(df)
else:
return
###################################
# PAGE 2 LAYOUT
###################################
page_2_layout = html.Div([
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input",
placeholder="keresett szó...",
type="text",
value="")), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True),
html.Div(id='my-output'),
dcc.Graph(id='bargraph_2'),
html.Div(id="table2", style={'font-family': 'Impact'})])
###################################
# PAGE 2 CHART CALLBACK
###################################
@app.callback(Output('bargraph_2', 'figure'),
[Input('submit-button', 'n_clicks'),
Input('search_input', 'n_submit'),
Input('switch-input', 'value')],
[State('search_input', 'value')])
def update_by_site(n_clicks, n_submit, switch_value, search_word):
if n_clicks or n_submit:
search_word = search_word.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
site="all"
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
res_df = pd.DataFrame(content)
df = res_df.groupby(by="site").count()["title"]
df = pd.DataFrame(df.sort_values(ascending=True)[:])
else:
df = pd.DataFrame()
fig = plot_all_by_sites(dataframe=df, search_word=search_word)
return fig
###################################
# PAGE 2 DATA TABLE CALLBACK
###################################
@app.callback(Output('table2', 'children'),
[Input('bargraph_2', 'clickData'),
Input('submit-button', 'n_clicks'),
Input('switch-input', 'value')],
[State('search_input', 'value')])
def display_clickData_2(clickData, n_clicks, switch_value, search_word):
if clickData:
search_word = search_word.strip()
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
site = list(clickData["points"])[0]["label"]
if switch_value:
switch_value = 1
else:
switch_value = 0
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
df = pd.DataFrame(content)
return update_dt_by_date(df)
else:
return
###################################
# PAGE 3 LAYOUT
###################################
api_url = f"""http://pressgraphs.pythonanywhere.com/{API_KEY}/info/sites/all"""
response = requests.get(api_url)
schema = response.json()[0]
st_options = pd.DataFrame(response.json()[1]["data"])
page_3_layout = html.Div([
html.H5("oldal szerinti keresés"),
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input",
placeholder="keresett szó...",
type="text",
value='')), width=3)),
html.Br(),
dbc.Row(dbc.Col(html.Div(dcc.Dropdown(
id="sites",
options=[{
'label': i,
'value': i
} for i in st_options["site"]],
placeholder="keresett oldal...",
value='')), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True),
dcc.Graph(id='bargraph_3'),
html.Div(id="table3")])
###################################
# PAGE 3 CHART CALLBACK
###################################
@app.callback(Output('bargraph_3','figure'),
[Input('submit-button', 'n_clicks'),
Input('search_input', 'n_submit'),
Input('switch-input', 'value')],
[State('search_input', 'value'),
State('sites', 'value')])
def update_site_graph(n_clicks, n_submit, switch_value, search_word, site):
"""
"""
if n_clicks or n_submit:
search_word = search_word.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
site=site
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
res_df = pd.DataFrame(content)
if len(res_df) > 0:
res_df.set_index("date",inplace=True)
else:
res_df = pd.DataFrame()
fig = plot_all_by_date(dataframe=res_df,
search_word=search_word)
return fig
###################################
# PAGE 3 DATA TABLE CALLBACK
###################################
@app.callback(Output('table3', 'children'),
[Input('bargraph_3', 'clickData'),
Input('submit-button', 'n_clicks'),
Input('switch-input', 'value')],
[State('search_input', 'value'),
State('sites', 'value')])
def display_clickData_3(clickData, n_clicks, switch_value, search_word, site):
"""
#TODO
"""
if clickData:
search_word = search_word.strip()
date = list(clickData["points"])[0]["label"]
if switch_value:
switch_value = 1
else:
switch_value = 0
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{date}/{date}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
df = pd.DataFrame(content)
return update_dt_by_date(df)
else:
return
###################################
# PAGE 4 LAYOUT
###################################
api_url = f"""http://pressgraphs.pythonanywhere.com/{API_KEY}/info/sites/all"""
response = requests.get(api_url)
schema = response.json()[0]
st_options = pd.DataFrame(response.json()[1]["data"])
page_4_layout = html.Div([
html.H5("két oldal összevetése"),
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input",
placeholder="keresett szó...",
type="text",
value='')),width=3)),
html.Br(),
dbc.Row(dbc.Col(html.Div(dcc.Dropdown(
id="site_1",
options=[{
'label': i,
'value': i
} for i in st_options["site"]],
placeholder="első oldal...",
value='')), width=3)),
html.Br(),
dbc.Row(dbc.Col(html.Div(dcc.Dropdown(
id="site_2",
options=[{
'label': i,
'value': i
} for i in st_options["site"]],
placeholder="második oldal...",
value='')), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True,
),
dcc.Graph(id='graph_4'),
html.Div(id="table4")])
###################################
# PAGE 4 CAHRT CALLBACK
###################################
@app.callback(Output('graph_4','figure'),
[Input('submit-button', 'n_clicks'),
Input('search_input', 'n_submit'),
Input('switch-input', 'value')],
[State('search_input', 'value'),
State('site_1', 'value'),
State('site_2', 'value')])
def update_site_comparison(n_clicks, n_submit, switch_value, search_word, st1, st2):
"""
#TODO
"""
if n_clicks or n_submit:
search_word = search_word.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{st1}"""
response = requests.get(api_url)
s_1_content = response.json()[1]["data"]
s1_df = pd.DataFrame(s_1_content)
s1_df.set_index("date", inplace=True)
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{search_word}/{switch_value}/{from_date}/{today}/{st2}"""
response = requests.get(api_url)
s_2_content = response.json()[1]["data"]
s2_df = pd.DataFrame(s_2_content)
s2_df.set_index("date", inplace=True)
else:
s1_df = pd.DataFrame()
s2_df = pd.DataFrame()
fig = compare_two_sites(search_word=search_word,
site1_df=s1_df,
site2_df=s2_df,
site_1=st1,
site_2=st2)
return fig
###################################
# PAGE 4 DATA TABLE CALLBACK
###################################
@app.callback(
Output('table4', 'children'),
[Input('graph_4', 'clickData'),
Input('submit-button', 'n_clicks'),
Input('switch-input', 'value')],
[State('search_input', 'value'),
State('site_1', 'value'),
State('site_2', 'value')]
)
def display_clickData_4(clickData, n_clicks, switch_value, search_word, st1, st2):
"""
#TODO
"""
if clickData:
search_word = search_word.strip()
date = list(clickData["points"])[0]["x"]
if switch_value:
switch_value = 1
else:
switch_value = 0
site_indicator = clickData["points"][0]['curveNumber']
if site_indicator == 0:
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{date}/{date}/{st1}"
else:
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{search_word}/{switch_value}/{date}/{date}/{st2}"
response = requests.get(api_url)
content = response.json()[1]["data"]
df = pd.DataFrame(content)
return update_dt_by_date(df)
else:
return
###################################
# PAGE 5 LAYOUT
###################################
page_5_layout = html.Div([
html.H5("két szó összevetése"),
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input_1",
placeholder="első keresett szó...",
type="text",
value='')), width=3)),
html.Br(),
dbc.Row(dbc.Col(html.Div(
dbc.Input(id="search_input_2",
placeholder="második keresett szó...",
type="text",
value='')), width=3)),
html.Br(),
dbc.Button("Keresés",
outline=True,
color="info",
className="mr-1",
id='submit-button',
n_clicks=0),
dbc.Checklist(options=[{"label": "keresés szavakon belül", "value": 1}],
value=[],
id="switch-input",
switch=True),
dcc.Graph(id='graph_5'),
html.Div(id="table5")])
###################################
# PAGE 5 CHART CALLBACK
###################################
@app.callback(
Output('graph_5','figure'),
[Input('submit-button', 'n_clicks'),
Input('switch-input', 'value')],
[State('search_input_1', 'value'),
State('search_input_2', 'value')])
def update_word_comparison(n_clicks, switch_value, sw_1, sw_2):
"""
"""
if n_clicks or n_submit:
search_word = sw_1.strip()
if switch_value:
switch_value = 1
else:
switch_value = 0
site="all"
today = datetime.today().strftime("%Y-%m-%d")
from_date = (datetime.today() - \
timedelta(days = MAX_REQUEST_DAY)).strftime("%Y-%m-%d")
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{sw_1}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content_1 = response.json()[1]["data"]
df_1 = pd.DataFrame(content_1)
df_1.set_index("date", inplace=True)
api_url = f"http://pressgraphs.pythonanywhere.com/date/count/"\
f"{API_KEY}/{sw_2}/{switch_value}/{from_date}/{today}/{site}"
response = requests.get(api_url)
content_2 = response.json()[1]["data"]
df_2 = pd.DataFrame(content_2)
df_2.set_index("date", inplace=True)
else:
df_1 = pd.DataFrame()
df_2 = pd.DataFrame()
sw_1 = ""
sw_2 = ""
fig = compare_two_search_words(sw_df_1=df_1,
sw_df_2=df_2,
search_word_1=sw_1,
search_word_2=sw_2)
return fig
###################################
# PAGE 5 DATA TABLE CALLBACK
###################################
@app.callback(
Output('table5', 'children'),
[Input('graph_5', 'clickData'),
Input('switch-input', 'value')],
[State('search_input_1', 'value'),
State('search_input_2', 'value')])
def display_clickData_5(clickData, switch_value, sw_1, sw_2):
"""
#TODO
"""
if clickData:
sw_1 = sw_1.strip()
sw_2 = sw_2.strip()
date = list(clickData["points"])[0]["x"]
site="all"
if switch_value:
switch_value = 1
else:
switch_value = 0
sw_indicator = clickData["points"][0]['curveNumber']
if sw_indicator == 0:
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{sw_1}/{switch_value}/{date}/{date}/{site}"
else:
api_url = f"http://pressgraphs.pythonanywhere.com/date/list/"\
f"{API_KEY}/{sw_2}/{switch_value}/{date}/{date}/{site}"
response = requests.get(api_url)
content = response.json()[1]["data"]
df = pd.DataFrame(content)
return update_dt_by_date(df)
else:
return
###################################
# CONTACT
###################################
page_6_layout = html.Div([
html.H4("Elérhetőség"),
dcc.Markdown(children=md_txt.contact)])
###################################
# MANUAL
###################################
page_7_layout = html.Div([
html.H4("Használati útmutató"),
dcc.Markdown(children=md_txt.manual)])
###################################
# SITE LIST
###################################
page_8_layout = html.Div([
html.H4("Monitorozott oldalak listája"),
dcc.Markdown(children=md_txt.modus_operandi)])
###################################
# RUN APP SERVER
###################################
if __name__ == '__main__':
app.run_server(debug=True, port=8050)
#app.run_server()
| 29.486762 | 140 | 0.521239 | 3,242 | 28,956 | 4.467921 | 0.118754 | 0.040041 | 0.034035 | 0.037556 | 0.682775 | 0.630376 | 0.584743 | 0.553055 | 0.524888 | 0.524888 | 0 | 0.017098 | 0.2749 | 28,956 | 981 | 141 | 29.51682 | 0.672795 | 0.030598 | 0 | 0.570175 | 0 | 0 | 0.210388 | 0.033978 | 0 | 0 | 0 | 0.006116 | 0 | 1 | 0.026316 | false | 0 | 0.019006 | 0 | 0.096491 | 0.001462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
857b09fd9aab40fed40cd686483e3386e8a76e25 | 1,467 | py | Python | algorithms/bubblesort.py | KellyHwong/Algorithms | 50cfc37c9b3694bb5ae9f13bb1e923e4f2142bca | [
"MIT"
] | 3 | 2019-06-20T07:09:57.000Z | 2019-07-01T07:04:46.000Z | algorithms/bubblesort.py | KellyHwong/Algorithms | 50cfc37c9b3694bb5ae9f13bb1e923e4f2142bca | [
"MIT"
] | null | null | null | algorithms/bubblesort.py | KellyHwong/Algorithms | 50cfc37c9b3694bb5ae9f13bb1e923e4f2142bca | [
"MIT"
] | 1 | 2019-11-22T07:36:28.000Z | 2019-11-22T07:36:28.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Jul-13-19 18:02
# @Author : Your Name (you@example.org)
# @Link : http://example.org
import os
import random
import pysnooper
import time
import csv
from quicksort import quicksort
def bubblesort(l: list):
for i in range(len(l)):
for j in range(i, len(l)):
if l[i] > l[j]:
l[i], l[j] = l[j], l[i]
return l
def main():
"""
bubblesort的时间复杂度是O(n^2)
quicksort的时间复杂度是O(nlogn)
"""
# csv_path = "./bubblesort.csv"
csv_path = "./quicksort.csv"
nsample = 1
N = list(range(10, 10000, 10))
avg_elapsed = 0
for n in N:
for _ in range(nsample):
l = [random.randint(0, 10000) for _ in range(n)]
start = time.clock()
# bubblesort(l)
quicksort(l)
elapsed = (time.clock() - start)
# print("Time used:", elapsed)
avg_elapsed += elapsed
avg_elapsed /= nsample
print("n:", n)
print("Average time used:", avg_elapsed)
if not os.path.exists(csv_path):
f = open(csv_path, "w")
f_csv = csv.writer(f)
f_csv.writerow(["N", "avg_elapsed"])
f_csv.writerow((n, avg_elapsed))
else:
f = open(csv_path, "a")
f_csv = csv.writer(f)
f_csv.writerow((n, avg_elapsed))
avg_elapsed = 0
if __name__ == "__main__":
main()
| 23.66129 | 60 | 0.524881 | 195 | 1,467 | 3.805128 | 0.358974 | 0.107817 | 0.012129 | 0.052561 | 0.142857 | 0.130728 | 0.09973 | 0.09973 | 0.09973 | 0.09973 | 0 | 0.029592 | 0.33197 | 1,467 | 61 | 61 | 24.04918 | 0.727551 | 0.177914 | 0 | 0.153846 | 0 | 0 | 0.048346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.153846 | 0 | 0.230769 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
857e81aeb8f91a043f2183d0e0e74e4b51951da7 | 890 | py | Python | codes/Others/Longest-Palindromic-Substring/script.py | kotori-y/LeetCode-Code | cf42265401d5fdedd8ba95974e93f5c005694e86 | [
"MIT"
] | 3 | 2021-04-23T02:02:23.000Z | 2021-05-15T01:01:24.000Z | codes/Others/Longest-Palindromic-Substring/script.py | kotori-y/LeetCode-Code | cf42265401d5fdedd8ba95974e93f5c005694e86 | [
"MIT"
] | null | null | null | codes/Others/Longest-Palindromic-Substring/script.py | kotori-y/LeetCode-Code | cf42265401d5fdedd8ba95974e93f5c005694e86 | [
"MIT"
] | null | null | null | '''
Description:
Author: Kotori Y
Date: 2021-04-22 09:14:19
LastEditors: Kotori Y
LastEditTime: 2021-04-22 09:14:20
FilePath: \LeetCode-Code\codes\Others\Longest-Palindromic-Substring\script.py
AuthorMail: kotori@cbdd.me
'''
class Solution:
def boo(self, s, left, right):
if (left < 0) or (right >= len(s)) or (s[left] != s[right]):
return [left+1, right-1]
return self.boo(s, left-1, right+1)
def longestPalindrome(self, s: str) -> str:
n = len(s)
start, end = 0, 0
for i in range(n):
leftOdd, rightOdd = self.boo(s, i, i)
leftEven, rightEven = self.boo(s, i, i+1)
if rightOdd - leftOdd > end - start:
start, end = leftOdd, rightOdd
if rightEven - leftEven > end - start:
start, end = leftEven, rightEven
return s[start: end+1] | 29.666667 | 77 | 0.569663 | 124 | 890 | 4.08871 | 0.435484 | 0.063116 | 0.047337 | 0.039448 | 0.086785 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0592 | 0.297753 | 890 | 30 | 78 | 29.666667 | 0.752 | 0.24382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8580ed32aedebf1af35027d068875f6c4fc89997 | 6,319 | py | Python | 8-2.machine_learning_ra_pbmc_bulk.py | yxaxaxa/sle_and_hd_single_cell_analysis | 139a3f6bd9ee34bec77b7ab3e1ec81a8c716d992 | [
"MIT"
] | null | null | null | 8-2.machine_learning_ra_pbmc_bulk.py | yxaxaxa/sle_and_hd_single_cell_analysis | 139a3f6bd9ee34bec77b7ab3e1ec81a8c716d992 | [
"MIT"
] | null | null | null | 8-2.machine_learning_ra_pbmc_bulk.py | yxaxaxa/sle_and_hd_single_cell_analysis | 139a3f6bd9ee34bec77b7ab3e1ec81a8c716d992 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[5]:
import pandas as pd
import numpy as np
import glob,os
from glob import iglob
#import scanpy as sc
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import RocCurveDisplay
from sklearn.datasets import load_wine
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
import joblib
import time
import random
import matplotlib as mpl
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
# # RA PBMC data for machine learning
# In[6]:
### training data import
ra=pd.read_csv('../RNA_seq_for_autoimmune_disease/RA_bulk/GSE90081/GSE90081_ra_part.csv',index_col=0)
hd=pd.read_csv('../RNA_seq_for_autoimmune_disease/RA_bulk/GSE90081/GSE90081_hd_part.csv',index_col=0)
hd1=pd.read_csv('../RNA_seq_for_autoimmune_disease/health_bulk/GSE183204_HC_fpkm.csv',sep=',',index_col=0)
# In[7]:
### feature import
features=pd.read_csv('../script4paper2/combined_gene_for_machine_learning.csv',index_col=1).index.values
features=np.append(features,'patient')
features=[i for i in features if i in ra.index.values]
features=[i for i in features if i in hd1.index.values ]
# # remove unwanted gene
# In[8]:
### remove unwanted gene from validation data
hd1=hd1.loc[features,:].T
ra_part=ra.loc[features,:].T
hd_part=hd.loc[features,:].T
# # label data
# In[9]:
### label training data
ra_part['patient']=1
hd_part['patient']=0
hd1['patient']=0
# # machine learning data training
# In[39]:
### merge training data
df=pd.concat([ra_part,hd_part,hd1],axis=0)
### get data labels
label=df.patient.values
### split data with ratio 30% for test and 70% for training
Xtrain, Xtest, Ytrain, Ytest = train_test_split(df.drop(columns=['patient']),label,test_size=0.3)
### rf model initialization
rfc = RandomForestClassifier(random_state=43,class_weight='balanced',oob_score=True)
rfc = rfc.fit(Xtrain,Ytrain)
### document model score
score_r = rfc.score(Xtest,Ytest)
### save feature importance
ra_pbmc=pd.DataFrame(rfc.feature_importances_)
ra_pbmc['feature_importance']=features
ra_pbmc.to_csv('./model/ra_pbmc_feature_importance_bulk.csv')
### print F score and Out of bag score
print("Random Forest:{}".format(score_r))
print("OOB score:",rfc.oob_score_)
# # Figure 7A
# In[40]:
### Generating ROC curve
fig = plt.figure(figsize=(8, 8))
ax = plt.gca()
rfc_disp = RocCurveDisplay.from_estimator(rfc, Xtest, Ytest, ax=ax, alpha=0.8)
plt.legend(loc=4,prop={'size': 10})
plt.xlabel('False Positive Rate', fontsize=18)
plt.ylabel('True Positive Rate', fontsize=16)
ax.plot([0, 1], [0, 1], ls="--", c=".3")
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
plt.savefig('./figure6_and_7/7a_ra_pbmc_bulk_auc.pdf',width=4,height=5)
# # save/load best performance model
# In[24]:
### save the best performance model
#joblib.dump(rfc, './model/ra_synovial_bulk_best.model')
### load model
#rfc=joblib.load('./model/sle_best.model')
# In[19]:
### 10-fold cross validation
print(cross_val_score(rfc,df.drop(columns=['patient']),label,cv=10).mean())
print(cross_val_score(rfc,df.drop(columns=['patient']),label,cv=10).var())
# # Figure 7D
# In[42]:
ra_feature=pd.read_csv('./model/ra_pbmc_feature_importance_bulk.csv')
fig, ax = plt.subplots(figsize=(15, 5))
ax.bar(x=ra_feature['feature_importance'], height=ra_feature['0'])
ax.set_title("Feature importance for RA bulk RNA PBMC model", fontsize=15)
plt.xticks(rotation = 90)
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
plt.savefig('./figure6_and_7/7d_ra_pbmc_bulk.pdf',width=15,height=5)
# # Hyper-parameter adjust
# In[795]:
data=df.drop(columns=['patient'])
label=df.patient.values
start=time.time()
scorel = []
for i in range(0,200,10): # loop for 0-200 decision trees
rfc = RandomForestClassifier(n_estimators=i+1,n_jobs=-1,random_state=0)
score = cross_val_score(rfc,data,label,cv=10).mean()
scorel.append(score)
print(max(scorel),(scorel.index(max(scorel))*10)+1)
end=time.time()
print('Running time: %s Seconds'%(end-start))
plt.figure(figsize=[20,5])
plt.plot(range(1,201,10),scorel)
plt.show()
# In[801]:
scorel = []
for i in range(185,205):
rfc = RandomForestClassifier(n_estimators=i+1,n_jobs=-1,random_state=0)
score = cross_val_score(rfc,data,label,cv=10).mean()
scorel.append(score)
print(max(scorel),([*range(185,205)][scorel.index(max(scorel))]))
plt.figure(figsize=[20,5])
plt.plot(range(185,205),scorel)
plt.show()
# In[802]:
start=time.time()
param_grid = {'max_depth':np.arange(1, 90,2)}
alg = RandomForestClassifier(n_estimators=190,random_state=0)
GS = GridSearchCV(alg,param_grid,cv=10)
GS.fit(data,label)
print(GS.best_params_)
print(GS.best_score_)
end=time.time()
print('Running time: %s Seconds'%(end-start))
# In[803]:
start=time.time()
param_grid = {'max_features':np.arange(5,80,1)}
rfc = RandomForestClassifier(n_estimators=190,random_state=0)
GS = GridSearchCV(rfc,param_grid,cv=10)
GS.fit(data,label)
print(GS.best_params_)
print(GS.best_score_)
end=time.time()
print('Running time: %s Seconds'%(end-start))
# # 100 loop of 10-fold cross validation
# In[35]:
df_n=df.drop(columns=['patient'])
rfc_l = []
fpr_l=[]
tpr_l=[]
acc_l=[]
skf =StratifiedKFold(n_splits=10)
for i in range(100):
for train_index, test_index in skf.split(df_n,label):
rfc = RandomForestClassifier(random_state=0,class_weight="balanced",oob_score=True)
rfc = rfc.fit(df_n.iloc[train_index],label[train_index])
rfc_l.append(roc_auc_score(label[test_index], rfc.predict_proba(df_n.iloc[test_index])[:, 1]))
acc_l.append(accuracy_score(label[test_index], rfc.predict(df_n.iloc[test_index])))
# In[36]:
### average AUC and its standard deviation error
print(np.mean(rfc_l))
print(np.std(rfc_l))
| 24.303846 | 106 | 0.743789 | 1,022 | 6,319 | 4.440313 | 0.253425 | 0.03636 | 0.021155 | 0.033054 | 0.413398 | 0.342221 | 0.318422 | 0.29286 | 0.254738 | 0.197444 | 0 | 0.039181 | 0.103339 | 6,319 | 259 | 107 | 24.397683 | 0.761737 | 0.150815 | 0 | 0.309524 | 0 | 0 | 0.155202 | 0.080349 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.246032 | 0 | 0.246032 | 0.119048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8582f63940499d89e8d668d18b7810768499bc46 | 782 | py | Python | python/tests/test_statement.py | vlachvojta/Theatrical-Players-Refactoring-Kata | a26968f52e680ad041cfdd67572256a93e80f14a | [
"MIT"
] | 69 | 2019-08-07T07:48:29.000Z | 2022-03-25T13:51:27.000Z | python/tests/test_statement.py | vlachvojta/Theatrical-Players-Refactoring-Kata | a26968f52e680ad041cfdd67572256a93e80f14a | [
"MIT"
] | 16 | 2019-08-08T10:12:59.000Z | 2022-03-22T12:42:48.000Z | python/tests/test_statement.py | vlachvojta/Theatrical-Players-Refactoring-Kata | a26968f52e680ad041cfdd67572256a93e80f14a | [
"MIT"
] | 69 | 2019-08-07T13:21:38.000Z | 2022-03-31T17:38:21.000Z | import json
import pytest
from approvaltests import verify
from approvaltests.utils import get_adjacent_file
from statement import statement
def test_example_statement():
with open(get_adjacent_file("invoice.json")) as f:
invoice = json.loads(f.read())
with open(get_adjacent_file("plays.json")) as f:
plays = json.loads(f.read())
verify(statement(invoice, plays))
def test_statement_with_new_play_types():
with open(get_adjacent_file("invoice_new_plays.json")) as f:
invoice = json.loads(f.read())
with open(get_adjacent_file("new_plays.json")) as f:
plays = json.loads(f.read())
with pytest.raises(ValueError) as exception_info:
statement(invoice, plays)
assert "unknown type" in str(exception_info.value)
| 30.076923 | 64 | 0.719949 | 111 | 782 | 4.873874 | 0.315315 | 0.101664 | 0.138632 | 0.140481 | 0.441774 | 0.414048 | 0.303142 | 0.303142 | 0.303142 | 0.18854 | 0 | 0 | 0.172634 | 782 | 25 | 65 | 31.28 | 0.836167 | 0 | 0 | 0.210526 | 0 | 0 | 0.089514 | 0.028133 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
858551bc894c37253f0341f789dc0c08a3fb1059 | 2,702 | py | Python | Chapter06/stan_example.py | PacktPublishing/Mastering-Machine-Learning-Algorithms-Second-Edition | 706d76fdb91b8c59223879cb236ce2bb6cc7e768 | [
"MIT"
] | 40 | 2019-08-23T13:33:12.000Z | 2022-02-24T12:48:41.000Z | Chapter06/stan_example.py | PacktPublishing/Mastering-Machine-Learning-Algorithms-Second-Edition | 706d76fdb91b8c59223879cb236ce2bb6cc7e768 | [
"MIT"
] | null | null | null | Chapter06/stan_example.py | PacktPublishing/Mastering-Machine-Learning-Algorithms-Second-Edition | 706d76fdb91b8c59223879cb236ce2bb6cc7e768 | [
"MIT"
] | 33 | 2019-10-21T09:47:51.000Z | 2022-01-14T17:21:54.000Z | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Install using pip install pystan
# It requires a C/C++ compiler
import pystan
# Set random seed for reproducibility
np.random.seed(1000)
# Number of observations
nb_samples = 10
if __name__ == "__main__":
# Create the observations
departure_delay = np.random.exponential(0.5, size=nb_samples)
travel_time = np.random.normal(2.0, 0.2, size=nb_samples)
arrival_delay = np.random.exponential(0.1, size=nb_samples)
arrival_time = np.random.normal(departure_delay +
travel_time +
arrival_delay,
0.5, size=nb_samples)
# Define the Stan model
code = """
data {
int<lower=0> num;
vector[num] departure_delay;
vector[num] travel_time;
vector[num] arrival_delay;
vector[num] arrival_time;
}
parameters {
real beta_a;
real beta_b;
real mu_t;
real sigma_t;
real sigma_a;
}
model {
departure_delay ~ exponential(beta_a);
travel_time ~ normal(mu_t, sigma_t);
arrival_delay ~ exponential(beta_b);
arrival_time ~ normal(departure_delay +
travel_time +
arrival_delay,
sigma_a);
}
"""
# Compile the model
model = pystan.StanModel(model_code=code)
# Define the observation dataset
data = {
"num": nb_samples,
"departure_delay": departure_delay,
"arrival_time": arrival_time,
"travel_time": travel_time,
"arrival_delay": arrival_delay
}
# Fit the model
fit = model.sampling(data=data, iter=10000,
refresh=10000, warmup=1000,
chains=2, seed=1000)
# Show a fit summary
print(fit)
# Sample some parameters from the posterior distribution
ext = fit.extract()
beta_a = ext["beta_a"]
beta_b = ext["beta_b"]
mu_t = ext["mu_t"]
sigma_t = ext["sigma_t"]
# Show the density estimations
sns.set()
fig, ax = plt.subplots(2, 2, figsize=(22, 12))
sns.distplot(beta_a, kde_kws={"shade": True}, ax=ax[0, 0])
sns.distplot(beta_b, kde_kws={"shade": True}, ax=ax[0, 1])
sns.distplot(mu_t, kde_kws={"shade": True}, ax=ax[1, 0])
sns.distplot(sigma_t, kde_kws={"shade": True}, ax=ax[1, 1])
ax[0, 0].set_title(r"$\beta_0$", fontsize=22)
ax[0, 1].set_title(r"$\beta_1$", fontsize=22)
ax[1, 0].set_title(r"$\mu_t$", fontsize=22)
ax[1, 1].set_title(r"$\sigma_t$", fontsize=22)
plt.show()
| 27.571429 | 65 | 0.57883 | 358 | 2,702 | 4.159218 | 0.27933 | 0.065816 | 0.034923 | 0.040296 | 0.16454 | 0.111484 | 0.111484 | 0.028207 | 0 | 0 | 0 | 0.036209 | 0.304959 | 2,702 | 97 | 66 | 27.85567 | 0.756656 | 0.122872 | 0 | 0.059701 | 0 | 0 | 0.335314 | 0.009338 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.059701 | 0 | 0.059701 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
858760cd43df7a53576a57dc1a9f62fa326e32a2 | 7,377 | py | Python | api/imgur/imgur_api.py | CharlieCorner/pymage_downloader | d145d2fe8666d4dbbc104bb563fc43415bd8802c | [
"Apache-2.0"
] | null | null | null | api/imgur/imgur_api.py | CharlieCorner/pymage_downloader | d145d2fe8666d4dbbc104bb563fc43415bd8802c | [
"Apache-2.0"
] | 9 | 2018-11-04T23:20:22.000Z | 2020-04-30T05:19:07.000Z | api/imgur/imgur_api.py | CharlieCorner/pymage_downloader | d145d2fe8666d4dbbc104bb563fc43415bd8802c | [
"Apache-2.0"
] | null | null | null | import json
import logging
from datetime import datetime
import requests
from api.imgur import *
from exceptions.pymage_exceptions import NotAbleToDownloadException, ImgurAPICommunicationException
from utils.utils import extract_imgur_id_from_url
LOGGER = logging.getLogger(__name__)
class ImgurAPI:
@staticmethod
def get_image_urls(url: str) -> list:
imgur_id = extract_imgur_id_from_url(url)
try:
if "/gallery/" in url:
image_urls = ImgurAPI._get_gallery_urls(imgur_id)
elif "/a/" in url:
image_urls = ImgurAPI._get_album_urls(imgur_id)
else:
# This is a URL with no gallery, album or extension
image_urls = ImgurAPI._get_simple_imgur_url(imgur_id)
except ImgurAPICommunicationException:
raise NotAbleToDownloadException(f"Couldn't process: {url}")
return image_urls
@staticmethod
def _get_simple_imgur_url(imgur_id: str) -> list:
imgur_endpoint = ImgurAPI._get_endpoint_url(IMGUR_SIMPLE, imgur_id)
response = ImgurAPI.get(imgur_endpoint)
if not response.get("success"):
raise ImgurAPICommunicationException(f"Unsuccessful query to Imgur API for ID: {imgur_id}")
link = response.get("data").get("link")
return [link]
@staticmethod
def _get_album_urls(imgur_id: str) -> list:
imgur_endpoint = ImgurAPI._get_endpoint_url(IMGUR_ALBUM, imgur_id)
response = ImgurAPI.get(imgur_endpoint)
if not response.get("success"):
raise ImgurAPICommunicationException(f"Unsuccessful query to Imgur API for ID: {imgur_id}")
album_urls = [image_data.get("link") for image_data in response.get("data")]
return album_urls
@staticmethod
def _get_gallery_urls(imgur_id: str) -> list:
imgur_endpoint = ImgurAPI._get_endpoint_url(IMGUR_GALLERY, imgur_id)
response = ImgurAPI.get(imgur_endpoint)
if not response.get("success"):
raise ImgurAPICommunicationException(f"Unsuccessful query to Imgur API for ID: {imgur_id}")
gallery_urls = [image_data.get("link") for image_data in response.get("data").get("images")]
return gallery_urls
@staticmethod
def _get_endpoint_url(endpoint: str, imgur_id: str) -> str:
return IMGUR_ENDPOINTS.get(endpoint).replace(IMGUR_ID_URL_PLACEHOLDER, imgur_id)
@staticmethod
def _update_api_limits(response: requests.models.Response):
reported_user_limit = int(response.headers[IMGUR_API_RESPONSE_HEADER_USER_LIMIT])
reported_user_remaining = int(response.headers[IMGUR_API_RESPONSE_HEADER_USER_REMAINING])
reported_user_reset_ts = int(response.headers[IMGUR_API_RESPONSE_HEADER_USER_RESET])
LOGGER.debug(f"Imgur API Remaining calls: {reported_user_remaining}")
LOGGER.debug(f"Imgur API Next Limit Reset Timestamp: {reported_user_reset_ts}")
IMGUR_PARAMS[IMGUR_PARAMS_API_CALLS_LIMITS][IMGUR_PARAMS_API_CALLS_LIMITS_USER_LIMIT] \
= reported_user_limit
IMGUR_PARAMS[IMGUR_PARAMS_API_CALLS_LIMITS][IMGUR_PARAMS_API_CALLS_LIMITS_USER_REMAINING] \
= reported_user_remaining
IMGUR_PARAMS[IMGUR_PARAMS_API_CALLS_LIMITS][IMGUR_PARAMS_API_CALLS_LIMITS_USER_RESET_TIMESTAMP] \
= reported_user_reset_ts
@staticmethod
def _check_api_limits():
# This limits need to be checked according to the Imgur API docs https://apidocs.imgur.com/
# HTTP Header Description
# X-RateLimit-UserLimit Total credits that can be allocated.
# X-RateLimit-UserRemaining Total credits available.
# X-RateLimit-UserReset Timestamp (unix epoch) for when the credits will be reset.
# X-RateLimit-ClientLimit Total credits that can be allocated for the application in a day.
# X-RateLimit-ClientRemaining Total credits remaining for the application in a day.
remaining_calls = IMGUR_PARAMS[IMGUR_PARAMS_API_CALLS_LIMITS][IMGUR_PARAMS_API_CALLS_LIMITS_USER_REMAINING]
reset_timestamp = IMGUR_PARAMS[IMGUR_PARAMS_API_CALLS_LIMITS][IMGUR_PARAMS_API_CALLS_LIMITS_USER_RESET_TIMESTAMP]
if remaining_calls <= IMGUR_LIMIT_WARNING_THRESHOLD:
LOGGER.warning(f"Approaching the limit of calls allowed for the Imgur API, remaining: {remaining_calls}")
elif remaining_calls <= 0:
readable_reset_time = datetime.utcfromtimestamp(reset_timestamp).strftime('%Y-%m-%d %H:%M:%S')
raise ImgurAPICommunicationException(f"The limit of calls to the Imgur API has been reached, "
f"more call will be available at {readable_reset_time}")
@staticmethod
def get(endpoint: str, headers: dict = {}) -> dict:
# The Imgur Client ID must be set before we can do anything else
if not IMGUR_PARAMS.get(IMGUR_PARAMS_CLIENT_ID):
raise ImgurAPICommunicationException(f"The Client ID for the Imgur API is not set! Skipping {endpoint}")
# The following will throw an Exception if the limits have been met and will prevent any further call to be made
# to the Imgur API
ImgurAPI._check_api_limits()
# Add the Imgur API Client ID to the Authorization HTTP Header
if HTTP_HEADER_AUTHORIZATION not in headers:
headers[HTTP_HEADER_AUTHORIZATION] = f"Client-ID {IMGUR_PARAMS.get(IMGUR_PARAMS_CLIENT_ID)}"
try:
LOGGER.debug(f"Querying API Imgur on {endpoint}...")
with requests.get(endpoint, headers=headers) as response:
if response.ok:
LOGGER.info('Imgur API query successful!')
ImgurAPI._update_api_limits(response)
data = json.loads(response.text)
return data
else:
raise ImgurAPICommunicationException(
f"Failed to download, we got an HTTP {response.status_code} error "
f"saying {response.text} for {endpoint}")
except requests.exceptions.ConnectionError as ex:
LOGGER.error(ex)
raise ImgurAPICommunicationException(f"Couldn't connect to {endpoint}, because of {str(ex)}")
# Sample Imgur Response
# {
# "data": {
# "id": "7W1xjas",
# "title": null,
# "description": null,
# "datetime": 1541129695,
# "type": "image/jpeg",
# "animated": false,
# "width": 640,
# "height": 691,
# "size": 123980,
# "views": 29125,
# "bandwidth": 3610917500,
# "vote": null,
# "favorite": false,
# "nsfw": true,
# "section": "hentai",
# "account_url": null,
# "account_id": null,
# "is_ad": false,
# "in_most_viral": false,
# "has_sound": false,
# "tags": [],
# "ad_type": 0,
# "ad_url": "",
# "in_gallery": false,
# "link": "https://i.imgur.com/7W1xjas.jpg"
# },
# "success": true,
# "status": 200
# } | 41.212291 | 121 | 0.638471 | 854 | 7,377 | 5.251756 | 0.24356 | 0.0466 | 0.031215 | 0.042363 | 0.363657 | 0.333779 | 0.278261 | 0.263545 | 0.234114 | 0.234114 | 0 | 0.008626 | 0.277077 | 7,377 | 179 | 122 | 41.212291 | 0.832365 | 0.208893 | 0 | 0.223404 | 0 | 0 | 0.153488 | 0.023135 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.074468 | 0.010638 | 0.234043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85899347be22ba0685e74fcc5e8475db930f138a | 2,305 | py | Python | debexpo/tests/test_utils.py | jadonk/debexpo | a022160492e40cd02bafc413a3cb009551fd6f8d | [
"MIT"
] | null | null | null | debexpo/tests/test_utils.py | jadonk/debexpo | a022160492e40cd02bafc413a3cb009551fd6f8d | [
"MIT"
] | null | null | null | debexpo/tests/test_utils.py | jadonk/debexpo | a022160492e40cd02bafc413a3cb009551fd6f8d | [
"MIT"
] | 2 | 2017-01-20T23:08:40.000Z | 2019-08-13T20:30:00.000Z | # -*- coding: utf-8 -*-
#
# test_utils.py — Test cases for debexpo.lib.utils
#
# This file is part of debexpo - https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2008 Jonny Lamb <jonny@debian.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Test cases for debexpo.lib.utils.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb'
__license__ = 'MIT'
from unittest import TestCase
from debexpo.lib.utils import *
from debexpo.lib.changes import Changes
class TestUtilsController(TestCase):
def testParseSection(self):
"""
Tests debexpo.lib.utils.parse_section.
"""
t = parse_section
self.assertEqual(t('section'), ['main', 'section'])
self.assertEqual(t('component/section'), ['component', 'section'])
def testGetPackageDir(self):
"""
Tests debexpo.lib.utils.get_package_dir.
"""
t = get_package_dir
self.assertEqual(t('foo'), 'f/foo')
self.assertEqual(t('libfoo'), 'libf/libfoo')
def testMd5sum(self):
"""
Tests debexpo.lib.utils.md5sum.
"""
self.assertEqual(md5sum('debexpo/tests/changes/synce-hal_0.1-1_source.changes'), 'fbb0b9c81f8a4fa9b8e3b789cf3b5220')
| 33.897059 | 124 | 0.695879 | 304 | 2,305 | 5.217105 | 0.483553 | 0.055486 | 0.056747 | 0.035939 | 0.108449 | 0.034048 | 0 | 0 | 0 | 0 | 0 | 0.017496 | 0.206508 | 2,305 | 67 | 125 | 34.402985 | 0.848004 | 0.608243 | 0 | 0 | 0 | 0 | 0.250313 | 0.105131 | 0 | 0 | 0 | 0 | 0.294118 | 1 | 0.176471 | false | 0 | 0.176471 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85998c2ca2be37ec4bb630ebbb2d3b94c9c6145d | 5,732 | py | Python | T3 2D - Tau Calculator.py | sohdesune/SUTD-projects | 1061bd7e2756dcf4ea6f7e1ed0411f9b761e9bc1 | [
"MIT"
] | null | null | null | T3 2D - Tau Calculator.py | sohdesune/SUTD-projects | 1061bd7e2756dcf4ea6f7e1ed0411f9b761e9bc1 | [
"MIT"
] | null | null | null | T3 2D - Tau Calculator.py | sohdesune/SUTD-projects | 1061bd7e2756dcf4ea6f7e1ed0411f9b761e9bc1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 00:58:08 2018
@author: sohdesune
"""
'''
ln|T_w - T| - ln|T_w - T_amb| = -(1/tau) * t
1. Extract raw data for first 20s from csv
2. Plot complicated ln function vs t
3. Compute tau
4. [Remove outliers]
5. Write tau vs T_w to txt '''
from math import log as ln
from numpy import polyfit
'''============================================== Reading raw data from csv '''
T_amb = [26.375, 26.687, 28.312, 25.562, 28.312, 27.062, 31.125, 31.750,
28.625, 29.687, 26.375, 28.062, 30.125, 25.625, 27.250, 29.687,
31.125, 26.125, 33.437, 30.000, 27.000, 24.687, 31.000, 33.500,
33.187, 32.937, 29.500, 29.062, 28.062, 30.375, 30.437, 26.687,
32.312, 30.937, 23.937, 27.500, 32.125, 31.125, 32.250, 31.875,
25.250, 29.375, 34.312, 24.250, 31.750, 30.875, 29.687, 31.812,
30.875, 32.562, 30.812, 26.875, 33.187, 31.062, 25.062, 31.312]
T_w = [11.8, 11.8, 12.6, 12.6, 12.6, 13.2, 13.3, 13.3, 14.3, 14.3, 14.3, 16.0,
16.1, 17.4, 17.4, 18.8, 18.9, 20.0, 21.3, 21.3, 21.3, 22.5, 22.9, 29.5,
29.6, 29.8, 35.0, 35.3, 35.7, 37.4, 37.9, 38.5, 40.8, 41.3, 41.9, 43.6,
43.9, 44.3, 46.3, 46.6, 47.0, 48.5, 48.8, 49.1, 50.1, 50.4, 50.9, 51.1,
51.4, 51.7, 51.9, 52.3, 56.2, 56.7, 56.9, 57.4]
data = 'directory to csv file with temp vs time data'
f = open(data, 'r')
print('\nReading data from csv file.')
print('Directory:\n{}\n'.format(data))
line = f.readline()
i = 0
all_results = []
#each entry: [T_w, T_amb, list of x values, list of y values]
#each while loop reads the time and temp data for one T_w set
while line != '':
x_val = []
y_val = []
time = line.strip().split(';')
for elem in time:
x_val.append(float(elem))
line = f.readline()
temp = line.strip().split(';')
#compute ln values
for elem in temp:
try:
value = ln(abs(T_w[i] - float(elem))) - ln(abs(T_w[i] - T_amb[i]))
except ValueError:
print('ValueError at T = {}'.format(elem))
print('Occurred for T_w = {}, T_amb = {}'.format(T_w[i], T_amb[i]))
value = ln(0.001)
y_val.append(value)
dataset = [x_val, y_val]
all_results.append([T_w[i], T_amb[i], dataset])
line = f.readline() #skip blank row
line = f.readline()
i += 1
f.close()
print('\nData compiled and modified into the complicated logarithm.')
'''====================================================== Performing linreg '''
linreg_results = []
#each entry: [T_w, gradient, y-intercept]
for result in all_results:
grad, y_int = polyfit(result[2][0], result[2][1], 1)
#print('T_w = {}: gradient {:+.3f}, y-intercept {:+.3f}'.format(result[0], grad, y_int))
linreg_results.append([result[0], grad, y_int])
print('\nLinear regression performed for abovementioned logarithm vs time.')
'''========================================================= Determining tau'''
tau = [(-1/item[1]) for item in linreg_results]
twater = [item[0] for item in linreg_results]
print('\nTau values computed.')
'''=================================================== Plot regression line '''
grad, y_int = polyfit(twater, tau, 1)
print('\nRegression line calculated for full data set of tau against T_water.')
print('Gradient: {:.3f} y-intercept: {:.3f}'.format(grad, y_int))
'''=========================== Remove anomalies and re-plot regression line '''
''' dist from regr line = sqrt( vector^2 - projection^2 )
projection, p = proj matrix, P * vector, b
P = [1 grad]^T * [1 grad] / [1 grad] * [1 grad]^T
= [ 1 g ]
[ g g^2 ] / (g^2 + 1)
b = [x y+c]^T
Pb = [x+gy+gc g(x+gy+gc)]^T / (g^2+1) '''
def dist_from_regr(g, c, x, y):
x_proj = (x + g*y) / (1 + g**2)
y_proj = g * x_proj + c
distance = ((x-x_proj)**2 + (y-y_proj)**2)**0.5
return distance
num_outliers = 0 #number of outliers you wish to remove
removed = 0
while removed < num_outliers:
dist_list = []
for i in range(len(twater)):
dist_list.append(dist_from_regr(grad, y_int, twater[i], tau[i]))
m = dist_list.index(max(dist_list))
m_twater = twater.pop(m)
m_tau = tau.pop(m)
print('\n{:.1f},{:.1f} removed for being {:.1f} away from regression line.'.format(
m_twater, m_tau, dist_list[m]))
grad, y_int = polyfit(twater, tau, 1)
print('New regression line plotted after removing outlier.')
removed += 1
print('\n========================================================\n\nRESULT\n')
print('{} outliers removed from original data.'.format(num_outliers))
grad, y_int = polyfit(twater, tau, 1)
print('Final regression line plotted from {} pairs of values.'.format(len(twater)))
print('Gradient: {:.3f} y-intercept: {:.3f}'.format(grad, y_int))
'''============================================= Write cleaned data to file '''
#send data to txt file to settle the remaining manipulations in Excel
def send_data():
sendto = 'txt for writing to'
f2 = open(sendto, 'a')
for i in range(len(twater)):
f2.write('{},{}\n'.format(twater[i], tau[i]))
f2.close()
print('\nCleaned data set written to text file for further processing.')
print('Destination:\n{}'.format(sendto))
#checkpoint to ensure intentional writing
answer = input('Are you sure you want to write the results to txt? Y/N: ')
if answer == 'Y' or answer == 'y':
send_data()
else:
print('Data not written.') | 37.464052 | 93 | 0.539253 | 913 | 5,732 | 3.313253 | 0.278204 | 0.008595 | 0.023802 | 0.019835 | 0.13124 | 0.087273 | 0.05686 | 0.05686 | 0.027107 | 0.027107 | 0 | 0.11977 | 0.241103 | 5,732 | 153 | 94 | 37.464052 | 0.575632 | 0.087404 | 0 | 0.119565 | 0 | 0 | 0.223795 | 0.017387 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.021739 | 0 | 0.054348 | 0.195652 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
859c6ae60388a87f3a8c1e4b27fc07a9168d7730 | 1,656 | py | Python | boidfunc/endpoint_func.py | travelingnight/boidload | 55df24c3f22104fdf67219d2f7286f71df80c2e7 | [
"MIT"
] | null | null | null | boidfunc/endpoint_func.py | travelingnight/boidload | 55df24c3f22104fdf67219d2f7286f71df80c2e7 | [
"MIT"
] | null | null | null | boidfunc/endpoint_func.py | travelingnight/boidload | 55df24c3f22104fdf67219d2f7286f71df80c2e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Allan Millar
Various functions related to sockets, ip's, port's etc.
"""
import sys, random, socket
from contextlib import closing
def find_port():
# This will only ever be run when the machine has already been
# captured, and from the machine itself.
HOST = "localhost"
# Looking through ports randomly and testing if they are blocked
# It is possible this is unnecessary given we have control, however
# I am doing it so we can minimize messing with anything already
# present on the machine.
while True:
PORT = random.randint(10000,65535)
with closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
) as sock:
# For choosing the port, I am going to pick a closed port and open
# it, based on the idea that it is guaranteed to not interfere with
# other processes, however I think picking the port based on any
# given criteria is as valid.
if sock.connect_ex((HOST, PORT)) == 0:
pass # The port is open so go through the loop again.
else:
break # The port is closed so break out with this port selected.
return PORT
def get_ip():
#https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
# Where I got this function
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP | 36.8 | 95 | 0.63285 | 238 | 1,656 | 4.37395 | 0.592437 | 0.04611 | 0.034582 | 0.038425 | 0.065322 | 0.065322 | 0.065322 | 0 | 0 | 0 | 0 | 0.031489 | 0.290459 | 1,656 | 45 | 96 | 36.8 | 0.854468 | 0.532609 | 0 | 0 | 0 | 0 | 0.042838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.041667 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
859f202a2f2f1138c22ae3526261447b406a220f | 3,339 | py | Python | yt_playlist_downloader.py | lautisilber/youtube_playlist_downloader | 95f24f3a059a6efb02a81f352b08186651fd6aae | [
"MIT"
] | null | null | null | yt_playlist_downloader.py | lautisilber/youtube_playlist_downloader | 95f24f3a059a6efb02a81f352b08186651fd6aae | [
"MIT"
] | null | null | null | yt_playlist_downloader.py | lautisilber/youtube_playlist_downloader | 95f24f3a059a6efb02a81f352b08186651fd6aae | [
"MIT"
] | null | null | null | '''
created by Lautaro Silbergleit on 2021
'''
import re
from pytube import Playlist, YouTube
from tqdm import tqdm
from os import makedirs, listdir, remove
from os.path import join, exists, isfile
import json
from time import sleep
SENSITIVE_CHARACTERS = ['%', ':']
def main():
PLAYLIST_URL_PATH = 'playlist_urls.json'
PLAYLIST_VIDEOS_URLS_PATH = '.playlist_videos_urls.json'
PLAYLIST_DOWNLOAD_PATH = 'playlists'
if not exists(PLAYLIST_URL_PATH):
create = input(f"There's no file named {PLAYLIST_URL_PATH} in this directory\nDo you want to create one [y/n]")
create = True if create in ['y', 'Y', 'yes', 'Yes'] else False
if create:
with open(PLAYLIST_URL_PATH, 'w') as f:
json.dump(['playlist_url_1', 'playlist_url_2', 'playlist_url_3', '...'], f)
return
with open(PLAYLIST_URL_PATH, 'r') as f:
playlist_urls = json.load(f)
# create file with all video's urls
if not exists(PLAYLIST_VIDEOS_URLS_PATH):
with open(PLAYLIST_VIDEOS_URLS_PATH, 'w') as f:
json.dump({}, f)
assert isinstance(playlist_urls, list)
for playlist_url in playlist_urls: # for each playlist
playlist = Playlist(playlist_url)
playlist._video_regex = re.compile(r"\"url\":\"(/watch\?v=[\w-]*)")
playlist_name = playlist.title
print(f"\n Downloading playlist: '{playlist_name}'")
# create playlist download directory
path = join(PLAYLIST_DOWNLOAD_PATH, playlist.title)
if not exists(path):
makedirs(path)
playlist_length = len(list(playlist.video_urls))
with open(PLAYLIST_VIDEOS_URLS_PATH, 'r') as f:
saved_urls = json.load(f)
if not playlist_name in saved_urls:
saved_urls[playlist_name] = []
if len(saved_urls[playlist_name]) != playlist_length:
saved_urls[playlist_name] = []
print('Gathering video info...')
for url in tqdm(list(playlist.video_urls)):
youtube = YouTube(url)
title = youtube.title
for c in SENSITIVE_CHARACTERS:
title = title.replace(c, '')
saved_urls[playlist_name].append({'url':url, 'title': title})
with open(PLAYLIST_VIDEOS_URLS_PATH, 'w') as f:
json.dump(saved_urls, f)
print('done')
# check downloads
all_files = [join(path, f) for f in listdir(path) if isfile(join(path, f))]
all_videos = [v for v in all_files if v.endswith('.mp4')]
if len(all_videos) == len(saved_urls[playlist_name]): # if target video count matches video count, return
print('All files were allready downloaded')
continue
removed_last = False
if all_videos: # if at least one video was downloaded, delete last
for obj in reversed(saved_urls[playlist_name]):
if removed_last: break
title = obj['title']
for f in all_videos: # if any video matches the title, remove it since it was the last and download could not be complete
if '78' in title and '78' in f:
print('hi')
if title in f:
remove(f)
removed_last = True
print(f"Removed last incomplete download '{title}.mp4'")
break
# download videos that weren't already downloaded
print('Downloading...')
for obj in tqdm(saved_urls[playlist_name]):
url = obj['url']
title = obj['title']
p = join(path, f'{title}.mp4')
if not exists(p):
youtube = YouTube(url)
video = youtube.streams.get_highest_resolution()
video.download(path)
else:
sleep(.1)
print('done')
if __name__ == '__main__':
main()
| 31.8 | 126 | 0.698712 | 500 | 3,339 | 4.486 | 0.266 | 0.049041 | 0.053054 | 0.065537 | 0.119483 | 0.057958 | 0.03745 | 0.03745 | 0.03745 | 0.03745 | 0 | 0.00548 | 0.180294 | 3,339 | 104 | 127 | 32.105769 | 0.81403 | 0.116502 | 0 | 0.121951 | 0 | 0.02439 | 0.144853 | 0.008862 | 0 | 0 | 0 | 0 | 0.012195 | 1 | 0.012195 | false | 0 | 0.085366 | 0 | 0.109756 | 0.097561 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85a07ddbba59bb16c26817858612ab97a63c481d | 1,344 | py | Python | 2019/day13.py | kyz/adventofcode | b3dd544624a8fc313ca1fad0d2f02f53bd79ce3d | [
"MIT"
] | null | null | null | 2019/day13.py | kyz/adventofcode | b3dd544624a8fc313ca1fad0d2f02f53bd79ce3d | [
"MIT"
] | null | null | null | 2019/day13.py | kyz/adventofcode | b3dd544624a8fc313ca1fad0d2f02f53bd79ce3d | [
"MIT"
] | null | null | null | import intcode
def breakout_demo(p):
cpu = intcode.computer(p)
screen = dict()
while True:
try:
x, y, tile = next(cpu), next(cpu), next(cpu)
screen[x, y] = tile
except StopIteration:
return bricks_remaining(screen)
def breakout(p):
p[0] = 2
cpu = intcode.computer(p)
screen = dict()
joystick, paddle, ball = 0, None, None
#print("\033[2J")
while True:
x = next(cpu)
if x is None: x = cpu.send(joystick)
y = next(cpu)
tile = next(cpu)
screen[x,y] = tile
if x == -1 and y == 0 and bricks_remaining(screen) == 0:
return tile # final score
elif tile == 3:
paddle = x
elif tile == 4:
ball = x
if paddle is not None and ball is not None:
joystick = -1 if ball < paddle else 1 if ball > paddle else 0
#print("\033[H Score: %d" % screen.get((-1,0), 0))
#for y in range(20):
# print("".join([" #.=O"[screen.get((x,y), 0)] for x in range(40)]))
def bricks_remaining(screen):
return len([1 for x in screen if screen[x] == 2])
with open("day13.txt") as fh:
p = [int(c) for c in fh.readline().split(",")]
print("2019 day 13 part 1: %d" % breakout_demo(p))
print("2019 day 13 part 2: %d" % breakout(p))
| 30.545455 | 79 | 0.530506 | 202 | 1,344 | 3.504951 | 0.336634 | 0.059322 | 0.025424 | 0.053672 | 0.234463 | 0.135593 | 0 | 0 | 0 | 0 | 0 | 0.048458 | 0.324405 | 1,344 | 43 | 80 | 31.255814 | 0.731278 | 0.122768 | 0 | 0.228571 | 0 | 0 | 0.046036 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.028571 | 0.028571 | 0.2 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85a0e2d9282c759866463e075085469b476b17b8 | 1,650 | py | Python | syncany/calculaters/__init__.py | snower/syncany | 32d32907a155618678d5b2335cd8a70192ed1e6f | [
"MIT"
] | 5 | 2018-08-15T13:45:30.000Z | 2021-03-18T01:51:47.000Z | syncany/calculaters/__init__.py | snower/syncany | 32d32907a155618678d5b2335cd8a70192ed1e6f | [
"MIT"
] | null | null | null | syncany/calculaters/__init__.py | snower/syncany | 32d32907a155618678d5b2335cd8a70192ed1e6f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# 18/8/15
# create by: snower
from .calculater import Calculater
from .builtin import *
from .conversion_calculater import ConvCalculater
from ..errors import CalculaterUnknownException
CALCULATERS = {
"": Calculater,
"type": TypeCalculater,
'range': RangeCalculater,
"add": AddCalculater,
"sub": SubCalculater,
"mul": MulCalculater,
"div": DivCalculater,
"mod": ModCalculater,
"bit": BitCalculater,
"substring": SubstringCalculater,
"split": SplitCalculater,
"join": JoinCalculater,
"now": NowCalculater,
"gt": GtCalculater,
"gte": GteCalculater,
"lt": LtCalculater,
"lte": LteCalculater,
"eq": EqCalculater,
"neq": NeqCalculater,
"and": AndCalculater,
"or": OrCalculater,
"in": InCalculater,
"max": MaxCalculater,
"min": MinCalculater,
"len": LenCalculater,
"abs": AbsCalculater,
"index": IndexCalculater,
"filter": FilterCalculater,
"sum": SumCalculater,
"sort": SortCalculater,
"string": StringCalculater,
"array": ArrayCalculater,
"map": MapCalculater,
"math": MathCalculater,
"hash": HashCalculater,
"json": JsonCalculater,
"struct": StructCalculater,
"conv": ConvCalculater,
}
def find_calculater(name):
name = name.split("::")[0]
if name not in CALCULATERS:
raise CalculaterUnknownException("%s is unknown calculater" % name)
return CALCULATERS[name]
def register_calculater(name, calculater):
if not issubclass(calculater, Calculater):
raise TypeError("is not Calculater")
CALCULATERS[name] = calculater
return calculater | 27.04918 | 75 | 0.667273 | 144 | 1,650 | 7.625 | 0.722222 | 0.038251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005331 | 0.204242 | 1,650 | 61 | 76 | 27.04918 | 0.830922 | 0.028485 | 0 | 0 | 0 | 0 | 0.111875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85a25605545d4e4f832fe9a3447b0a972658df24 | 1,094 | py | Python | sweet/features/gabor.py | charlienewey/penumbra-python | a848adf5628a37339354f5ed5a747b03cc4df9bd | [
"BSD-3-Clause"
] | 1 | 2017-10-16T03:47:51.000Z | 2017-10-16T03:47:51.000Z | sweet/features/gabor.py | charlienewey/penumbra-python | a848adf5628a37339354f5ed5a747b03cc4df9bd | [
"BSD-3-Clause"
] | null | null | null | sweet/features/gabor.py | charlienewey/penumbra-python | a848adf5628a37339354f5ed5a747b03cc4df9bd | [
"BSD-3-Clause"
] | null | null | null | import math
import skimage.filters
def variance_difference(image_1, image_2):
def _var_dif(img_1, img_2):
return math.sqrt((img_1.var() - img_2.var()) ** 2)
if isinstance(image_1, list):
var_dif = 0
for i in range(0, len(image_1)):
var_dif += _var_dif(image_1[i], image_2[i])
return var_dif / len(image_1)
else:
return _var_dif(image_1, image_2)
def mean_squared_error(image_1, image_2):
def _mse(img_1, img_2):
return ((img_1 - img_2) ** 2).mean(axis=None)
if isinstance(image_1, list):
err = 0
for i in range(0, len(image_1)):
err += _mse(image_1[i], image_2[i])
return (err / len(image_1))
else:
return _mse(image_1, image_2)
def gabor_filter(image, frequency, theta):
if isinstance(image, list):
filters = []
for i in range(0, len(image)):
filters.append(skimage.filters.gabor_filter(image[i], frequency, theta)[0])
return filters
else:
return skimage.filters.gabor_filter(image, frequency, theta)[0]
| 27.35 | 87 | 0.610603 | 168 | 1,094 | 3.714286 | 0.220238 | 0.115385 | 0.070513 | 0.076923 | 0.605769 | 0.166667 | 0.166667 | 0.070513 | 0.070513 | 0 | 0 | 0.043424 | 0.263254 | 1,094 | 39 | 88 | 28.051282 | 0.730769 | 0 | 0 | 0.233333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.066667 | 0.066667 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85a26fa5e4287545bb2aafe69a4a95320c01eec9 | 3,530 | py | Python | log.py | mpagliaro98/multi-drive-backup-tool | f4f00f59c6fc3f2fb3786b76f807e160794f43c6 | [
"MIT"
] | null | null | null | log.py | mpagliaro98/multi-drive-backup-tool | f4f00f59c6fc3f2fb3786b76f807e160794f43c6 | [
"MIT"
] | null | null | null | log.py | mpagliaro98/multi-drive-backup-tool | f4f00f59c6fc3f2fb3786b76f807e160794f43c6 | [
"MIT"
] | null | null | null | """
log.py
Author: Michael Pagliaro
Utility functions specific to writing log files.
"""
from datetime import datetime
import sys
import traceback
import os
import util
# The log file to be written to whenever log() is called
LOG_FILE = None
LOGS_DIRECTORY = "logs"
def logger(func):
"""
Creates a decorator function that when applied to a function, enables logging during the runtime
of that function. When the function ends, the logfile is closed.
:param func: The function to decorate.
:return: A decorator function that wraps another function, controlling logging before and after it runs.
"""
def wrapper_logger(*args, **kwargs):
begin_log()
return_value = func(*args, **kwargs)
end_log()
return return_value
return wrapper_logger
def begin_log():
"""
Open the log file to prepare for it to be written to. This will also write the first line
of the log file. This should be called before using log() or end_log().
"""
global LOG_FILE
if not os.path.exists(os.path.join(util.working_directory(), LOGS_DIRECTORY)):
os.makedirs(os.path.join(util.working_directory(), LOGS_DIRECTORY), exist_ok=True)
current_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
file_name = "log_backup_" + current_time + ".txt"
file_path = os.path.join(util.working_directory(), LOGS_DIRECTORY, file_name)
LOG_FILE = open(file_path, "w")
LOG_FILE.write("Beginning backup log: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n")
def end_log():
"""
Close the log file after writing an ending message to the file. This should only be called
after begin_log(). To write more log messages after this is called, begin_log() must be
called again, which will start a new file.
"""
global LOG_FILE
LOG_FILE.write("Ending backup log: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n")
LOG_FILE.close()
def log(log_str=""):
"""
Logging function, this will take in any given string and write it to a log file in
the running directory. This will automatically print a newline in the log file after
every time this function is called. The begin_log() function must be called before this
can be used.
:param log_str: The string to append to the log file.
"""
global LOG_FILE
LOG_FILE.write(str(log_str.encode('utf8')) + "\n")
def log_print(log_str=""):
"""
Logging function, this takes any string and writes it to the current log file as well as prints it
to standard output. This automatically puts a newline after the string in the file and in the console
output. The log file must be opened before using this function.
:param log_str:
:return:
"""
global LOG_FILE
LOG_FILE.write(str(log_str.encode('utf8')) + "\n")
print(log_str)
def log_exception(error_file_path, action="ACCESSING"):
"""
Writes the most recent exception to the log file. This includes the full traceback.
:param error_file_path: The file or folder that caused the error.
:param action: What was happening to that file to cause the error, such as "creating" or "deleting".
"""
log("\n" + '=' * 60 + "\nERROR {} {}".format(action, error_file_path))
exc_type, exc_value, exc_traceback = sys.exc_info()
exception_list = traceback.format_exception(exc_type, exc_value, exc_traceback)
full_error_str = ""
for item in exception_list:
full_error_str += item
log(full_error_str + '=' * 60 + "\n") | 36.020408 | 108 | 0.687535 | 540 | 3,530 | 4.362963 | 0.303704 | 0.062394 | 0.033956 | 0.017827 | 0.191426 | 0.170204 | 0.147284 | 0.133277 | 0.078523 | 0.067912 | 0 | 0.002148 | 0.208782 | 3,530 | 98 | 109 | 36.020408 | 0.841389 | 0.478754 | 0 | 0.142857 | 0 | 0 | 0.092912 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.119048 | 0 | 0.333333 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85a33483e90e1c90f85785e7d45cc06e684432cd | 589 | py | Python | CH04/comma_code.py | kaifee-haque/Automate-the-Boring-Stuff-Solutions | 5acbf9a397dc4aa000ebd9e8f6d79d0ee5287fef | [
"MIT"
] | null | null | null | CH04/comma_code.py | kaifee-haque/Automate-the-Boring-Stuff-Solutions | 5acbf9a397dc4aa000ebd9e8f6d79d0ee5287fef | [
"MIT"
] | null | null | null | CH04/comma_code.py | kaifee-haque/Automate-the-Boring-Stuff-Solutions | 5acbf9a397dc4aa000ebd9e8f6d79d0ee5287fef | [
"MIT"
] | null | null | null | #! python3
def comma_string(_list):
"""Takes a list of items and formats it into a string, separated by
commas like plain English.
Args:
_list: The list of items.
Returns:
result: The string of list items separated by commas."""
result = ""
for i, character in enumerate(_list):
if i == len(_list) - 1:
result += "and "
result += str(character)
if i < len(_list) - 1:
result += "," + " "
return(result)
crew = ["Holden", "Nagata", "Kamal", "Burton", "Miller"]
print(comma_string(crew))
| 22.653846 | 71 | 0.561969 | 73 | 589 | 4.438356 | 0.547945 | 0.067901 | 0.067901 | 0.061728 | 0.104938 | 0.104938 | 0 | 0 | 0 | 0 | 0 | 0.007371 | 0.308998 | 589 | 25 | 72 | 23.56 | 0.788698 | 0.349745 | 0 | 0 | 0 | 0 | 0.098039 | 0 | 0.090909 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.090909 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85abdadf7f6975446f101ad9487de12633ee5082 | 564 | py | Python | settings.py | Zeebra38/Schedule_bot | 903f7cde755940f226cf8077c2c35550d0291d51 | [
"MIT"
] | null | null | null | settings.py | Zeebra38/Schedule_bot | 903f7cde755940f226cf8077c2c35550d0291d51 | [
"MIT"
] | null | null | null | settings.py | Zeebra38/Schedule_bot | 903f7cde755940f226cf8077c2c35550d0291d51 | [
"MIT"
] | null | null | null | from DataBase import Schedule
weekdays_en = {'Monday': 'Понедельник',
'Tuesday': 'Вторник',
'Wednesday': 'Среда',
'Thursday': 'Четверг',
'Friday': 'Пятница',
'Saturday': 'Суббота',
'Sunday': 'Воскресенье'}
weekdays_ru = {'Понедельник': 'Monday',
'Вторник': 'Tuesday',
'Среда': 'Wednesday',
'Четверг': 'Thursday',
'Пятница': 'Friday',
'Суббота': 'Saturday',
'Воскресенье': 'Sunday'}
| 31.333333 | 39 | 0.457447 | 36 | 564 | 7.111111 | 0.583333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.386525 | 564 | 17 | 40 | 33.176471 | 0.739884 | 0 | 0 | 0 | 0 | 0 | 0.37234 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85b14c976895976f7613389ae5b2fd3070acf95a | 789 | py | Python | reporting-plugins/add-to-xml-note/edit-note.py | qbicsoftware/etl-scripts | e1cea11b5f55fb218e7d4c8d49bdd3c5fe6c62c6 | [
"MIT"
] | 2 | 2018-04-20T15:48:02.000Z | 2021-11-30T17:39:28.000Z | reporting-plugins/add-to-xml-note/edit-note.py | qbicsoftware/etl-scripts | e1cea11b5f55fb218e7d4c8d49bdd3c5fe6c62c6 | [
"MIT"
] | 41 | 2017-07-19T11:17:26.000Z | 2021-09-28T12:10:49.000Z | reporting-plugins/add-to-xml-note/edit-note.py | qbicsoftware/etl-scripts | e1cea11b5f55fb218e7d4c8d49bdd3c5fe6c62c6 | [
"MIT"
] | 2 | 2017-04-27T10:32:33.000Z | 2018-02-20T09:26:12.000Z | def wrap(element, input):
return "<"+element+">"+input+"</"+element+">\n"
def process(tr, parameters, tableBuilder):
id = parameters.get("id")
idtype = len(id.split("/"))
#sample
if(idtype == 3):
entity = tr.getSampleForUpdate(id)
#experiment
else:
entity = tr.getExperimentForUpdate(id)
user = parameters.get("user")
comment = parameters.get("comment")
time = str(parameters.get("time"))
xml = entity.getPropertyValue("Q_NOTES")
all = ""
try:
for line in xml.split("\n"):
if not "</notes>" in line:
all += line
except:
all = "<notes>"
note = "\n<note>\n"
note += wrap("comment",comment)+wrap("time",time)+wrap("username",user)
note += "</note>\n"
all += note
all += "</notes>"
entity.setPropertyValue("Q_NOTES",all) | 25.451613 | 73 | 0.6109 | 98 | 789 | 4.897959 | 0.418367 | 0.108333 | 0.0375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001558 | 0.186312 | 789 | 31 | 74 | 25.451613 | 0.746106 | 0.020279 | 0 | 0 | 0 | 0 | 0.132124 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0.038462 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85b1d6a22c0d0090a75d779c87cff9038cdd496d | 994 | py | Python | core/main.py | rafael-junio/JustAChip8PythonEmulator | ff9c2d67aeaf4f87ff3b5fd6f0231702587455a7 | [
"MIT"
] | null | null | null | core/main.py | rafael-junio/JustAChip8PythonEmulator | ff9c2d67aeaf4f87ff3b5fd6f0231702587455a7 | [
"MIT"
] | null | null | null | core/main.py | rafael-junio/JustAChip8PythonEmulator | ff9c2d67aeaf4f87ff3b5fd6f0231702587455a7 | [
"MIT"
] | null | null | null | from core.cpu.instructions import Cpu
from core.cpu.config.memory_starter import MemoryStarter
from core.cpu.config.memory_config import Config
from core.reader.file_reader import FileReader
class Main:
def __init__(self):
self.chip8_cpu = Cpu()
self.memory_management = MemoryStarter(self.chip8_cpu)
def run(self):
binary_file = FileReader.file_reader()
file_buffer_list = FileReader.load_binary_to_buffer(binary_file)
self.memory_management.load_into_memory(file_buffer_list, Config.MEMORY_START_ADDRESS)
self.memory_management.load_into_memory(Config.FONT_SET, Config.FONT_SET_START_ADDRESS)
self.cycle()
def cycle(self):
program_counter = self.chip8_cpu.pc
self.chip8_cpu.current_opcode = self.chip8_cpu.memory[program_counter] << 8 | \
self.chip8_cpu.memory[program_counter + 1]
self.chip8_cpu.pc += 2
print(hex(self.chip8_cpu.current_opcode))
| 38.230769 | 95 | 0.71328 | 132 | 994 | 5.045455 | 0.310606 | 0.108108 | 0.144144 | 0.051051 | 0.342342 | 0.198198 | 0 | 0 | 0 | 0 | 0 | 0.013906 | 0.204225 | 994 | 25 | 96 | 39.76 | 0.828066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.2 | 0 | 0.4 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85b65c485dd500cc66bdbb6dadc0a74ee700639d | 2,179 | py | Python | Bot/src/unclassified/motivation.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 181 | 2021-05-26T17:37:40.000Z | 2022-02-26T08:36:07.000Z | Bot/src/unclassified/motivation.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 24 | 2021-05-14T19:47:34.000Z | 2021-09-06T17:16:17.000Z | Bot/src/unclassified/motivation.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 16 | 2021-07-02T09:40:56.000Z | 2022-01-21T10:07:08.000Z | # !/usr/bin/python
"""
Copyright ©️: 2020 Seniatical / _-*™#7519
License: Apache 2.0
A permissive license whose main conditions require preservation of copyright and license notices.
Contributors provide an express grant of patent rights.
Licensed works, modifications, and larger works may be distributed under different terms and without source code.
FULL LICENSE CAN BE FOUND AT:
https://www.apache.org/licenses/LICENSE-2.0.html
Any violation to the license, will result in moderate action
You are legally required to mention (original author, license, source and any changes made)
"""
import discord
from discord.ext import commands
from datetime import timedelta
from discord.ext.commands import BucketType, cooldown
from discord import File
import random
import os
from utility.quotes import words, images
class Motivation(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.speech_paths = []
for file in os.listdir('./storage/speeches'):
if os.path.isdir(file):
for _file in os.listdir(f'./storage/speeches/{file}'):
self.speech_paths.append(file + _file)
else:
self.speech_paths.append('./speeches/' + file)
@commands.command(aliases=['Quotes'])
@cooldown(1, 10, BucketType.user)
async def quote(self, ctx):
return await ctx.send(embed=discord.Embed(
description=random.choice(words),
colour=discord.Colour.gold()
))
@commands.command(aliases=['VQ', 'ImgQ', 'IQuote'])
@cooldown(1, 15, BucketType.user)
async def imagequote(self, ctx):
return await ctx.send(embed=discord.Embed(
colour=discord.Colour.gold(),
).set_image(url=random.choice(images)))
@commands.command(aliases=['Speeches'])
@cooldown(1, 120, BucketType.user)
async def speech(self, ctx):
return await ctx.send(content='Enjoy this speech to listen to!',
file=discord.File(random.choice(self.speech_paths), filename='speech.mp3'))
def setup(bot):
bot.add_cog(Motivation(bot))
| 35.721311 | 114 | 0.658559 | 275 | 2,179 | 5.185455 | 0.501818 | 0.02805 | 0.042076 | 0.046283 | 0.101683 | 0.076438 | 0.058906 | 0.058906 | 0.058906 | 0 | 0 | 0.013789 | 0.234511 | 2,179 | 60 | 115 | 36.316667 | 0.838729 | 0.267554 | 0 | 0.052632 | 0 | 0 | 0.079188 | 0.016361 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85b702a0495ec20a4916f8eef85a4c0805c48829 | 1,882 | py | Python | curami/analysis/pair_matching_word_base.py | EBIBioSamples/curami-v2 | 671ec5f1d48b866c6ccb24fcddfb80610c377e07 | [
"Apache-2.0"
] | null | null | null | curami/analysis/pair_matching_word_base.py | EBIBioSamples/curami-v2 | 671ec5f1d48b866c6ccb24fcddfb80610c377e07 | [
"Apache-2.0"
] | 2 | 2020-07-02T13:56:03.000Z | 2021-06-01T23:51:49.000Z | curami/analysis/pair_matching_word_base.py | EBIBioSamples/curami-v2 | 671ec5f1d48b866c6ccb24fcddfb80610c377e07 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from nltk.stem import LancasterStemmer, WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from curami.commons import file_utils
'''
Match pair of attributes for their base form similarity
Generates matched attribute file by measuring the syntactic similarity between the base form of the two attributes.
Outputs two attributes and similarity score
'''
match_ratio = 0.85
def analyze():
attributes = pd.read_csv(file_utils.matched_attributes_file, encoding=file_utils.encoding)
stemmer = LancasterStemmer()
lemmatizer = WordNetLemmatizer()
matched_attributes = []
for index, row in attributes.iterrows():
# lemmatize
attribute1 = ' '.join(lemmatizer.lemmatize(w) for w in row["ATTRIBUTE_1"].split())
attribute2 = ' '.join(lemmatizer.lemmatize(w) for w in row["ATTRIBUTE_2"].split())
if attribute1 == attribute2:
matched_attributes.append({"ATTRIBUTE_1": row["ATTRIBUTE_1"],
"ATTRIBUTE_2": row["ATTRIBUTE_2"],
"RATIO": 1})
continue
# stem
attribute1 = ' '.join(stemmer.stem(w) for w in row["ATTRIBUTE_1"].split())
attribute2 = ' '.join(stemmer.stem(w) for w in row["ATTRIBUTE_2"].split())
if attribute1 == attribute2:
matched_attributes.append({"ATTRIBUTE_1": row["ATTRIBUTE_1"],
"ATTRIBUTE_2": row["ATTRIBUTE_2"],
"RATIO": 0.8})
pd_matched_attributes = pd.DataFrame(matched_attributes)
pd_matched_attributes = pd_matched_attributes.sort_values(by="RATIO", ascending=False)
pd_matched_attributes.to_csv(
file_utils.word_base_matched_attribute_file, index=False, encoding=file_utils.encoding)
if __name__ == "__main__":
analyze()
| 37.64 | 115 | 0.655154 | 216 | 1,882 | 5.481481 | 0.328704 | 0.129223 | 0.016892 | 0.023649 | 0.374155 | 0.327703 | 0.327703 | 0.327703 | 0.327703 | 0.27027 | 0 | 0.018284 | 0.244421 | 1,882 | 49 | 116 | 38.408163 | 0.814346 | 0.007439 | 0 | 0.2 | 0 | 0 | 0.096833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.133333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85b977a716a0ab42b9f5ac5d7528fcff4dd7ef93 | 7,154 | py | Python | knee/zmethod.py | mariolpantunes/knee | fa4678a55f1f2d161f982b8214541cf8f392444d | [
"MIT"
] | 2 | 2021-09-03T02:59:10.000Z | 2021-12-28T16:32:28.000Z | knee/zmethod.py | mariolpantunes/knee | fa4678a55f1f2d161f982b8214541cf8f392444d | [
"MIT"
] | 9 | 2021-06-05T08:10:30.000Z | 2022-01-05T20:50:32.000Z | knee/zmethod.py | mariolpantunes/knee | fa4678a55f1f2d161f982b8214541cf8f392444d | [
"MIT"
] | 4 | 2020-12-04T07:04:34.000Z | 2021-09-03T02:59:19.000Z | # coding: utf-8
__author__ = 'Tyler Estro'
__version__ = '0.1'
__email__ = 'testro@cs.stonybrook.edu'
__status__ = 'Development'
import numpy as np
import logging
import uts.gradient as grad
from uts.zscore import zscore_array
logger = logging.getLogger(__name__)
def map_index(a:np.ndarray, b:np.ndarray) -> np.ndarray:
"""
Maps the knee points into indexes.
Args:
a (np.ndarray): numpy array with the points (x)
b (np.ndarray): numpy array with the knee points points (x)
Returns:
np.ndarray: The knee indexes
"""
sort_idx = np.argsort(a)
out = sort_idx[np.searchsorted(a, b, sorter=sort_idx)]
return out
def knees(points:np.ndarray, dx:float=0.05, dy:float=0.05, dz:float=0.05, x_max:int=None, y_range:list=None) -> np.ndarray:
"""
Given an array of points, it computes the knees.
Args:
points (np.ndarray): numpy array with the points (x, y)
dx (float): % of max cache size between points (default 0.05)
dy (float): % of max - min miss ratio between points (default 0.05)
dz (float): amount we decrease outlier_z every iteration (default 0.05)
x_max (int): max cache size of original (pre-RDP) MRC (default None)
y_max (list): [max, min] miss ratio of original (pre-RDP) MRC (default None)
Returns:
np.ndarray: The knee points on the curve
"""
x = points[:, 0]
rv = getPoints(points, dx, dy, dz, False, x_max, y_range)
# convert x points into indexes:
return map_index(x, np.array(rv))
def getPoints(points: np.ndarray, dx:float=0.05, dy:float=0.05, dz:float=0.05, plot:bool=False, x_max:int=None, y_range:list=None) -> np.ndarray:
"""
Use our outlier method to find interesting points in an MRC.
Args:
points (np.ndarray): numpy array with the points (x, y)
dx (float): % of max cache size between points (default 0.05)
dy (float): % of max - min miss ratio between points (default 0.05)
dz (float): amount we decrease outlier_z every iteration (default 0.05)
plot (bool): set True if you want to return data useful for plotting
x_max (int): max cache size of original (pre-RDP) MRC (default None)
y_max (list): [max, min] miss ratio of original (pre-RDP) MRC (default None)
Returns:
list: list with the knees x coordinate
"""
# in case we use RDP, we need the original MRC x/y ranges: x_max,y_range vars
x_max = x_max if x_max else len(points)
if y_range:
y_max,y_min = y_range
else:
y_max,y_min = (points[:,1].max(),points[:,1].min())
if len(points) < 4:
logger.debug('pointSelector: < 4 unique requests in workload')
return []
if y_min == 1:
logger.debug('pointSelector: workload completely random (dont bother caching)')
return []
# get absolute x and y distances
x_width = max(1, int(x_max * dx))
y_height = (y_max - y_min) * dy
# get z-score
x = points[:, 0]
y = points[:, 1]
yd2 = grad.csd(x, y)
z_yd2 = zscore_array(x, yd2)
min_zscore = min(z_yd2)
# stack the 2nd derivative zscore with the points
points = np.column_stack((points, z_yd2))
# outlier_points holds our final selected points
outlier_points = np.empty((0,2))
# main loop. start with outliers >= 3 z-score
outlier_z = 3
while True:
points_added = 0
# candidate points have a zscore >= outlier_z
candidates = points[points[:,2] >= outlier_z]
#print('Candidates: ' + str(len(candidates)) + ' Points: ' + str(len(points)) + ' Outlier_Points: ' +
# str(len(outlier_points)) + ' Outlier_Z: ' + str(round(outlier_z,3)))
if len(candidates) > 0:
x_diff = np.argwhere(np.diff(candidates, axis=0)[:,0] >= x_width).flatten()
if len(x_diff) == 0:
outlier_best = candidates[np.argmin(candidates[:,1])] # best miss ratio in range
if all(abs(outlier_best[1]-i) >= y_height for i in outlier_points[:,1]):
outlier_points = np.append(outlier_points, [[outlier_best[0], outlier_best[1]]], axis=0)
points = points[np.where(((points[:,0] <= (outlier_best[0] - x_width)) | (points[:,0] >= (outlier_best[0] + x_width))) & \
((points[:,1] <= (outlier_best[1] - y_height)) | (points[:,1] >= (outlier_best[1] + y_height))))]
points_added += 1
else:
candidate_outliers = np.empty((0,3))
x_diff = np.hstack(([0],x_diff,[len(candidates)-1]))
# first create an array of candidate outliers
for i in range(0, len(x_diff)-1):
# points in this form (0, 1) [1,2) ... [n,End)
if i == 0:
x_range = candidates[candidates[:,0] <= candidates[x_diff[i+1]][0]]
else:
x_range = candidates[(candidates[:,0] > candidates[x_diff[i]][0]) & (candidates[:,0] <= candidates[x_diff[i+1]][0])]
outlier_best = x_range[np.argmin(x_range[:,1])] # point with best miss ratio in range
outlier_best_z = x_range[np.argmin(x_range[:,2])][2] # best z-score in range
outlier_best[2] = outlier_best_z
candidate_outliers = np.append(candidate_outliers, [outlier_best], axis=0)
# sort all the candidate outliers by z-score in descending order
candidate_outliers = candidate_outliers[np.argsort(candidate_outliers[:,2])][::-1]
for outlier_best in candidate_outliers:
if all(abs(outlier_best[1]-i) >= y_height for i in outlier_points[:,1]):
outlier_points = np.append(outlier_points, [[outlier_best[0], outlier_best[1]]], axis=0)
points = points[np.where(((points[:,0] <= (outlier_best[0] - x_width)) | (points[:,0] >= (outlier_best[0] + x_width))) & \
((points[:,1] <= (outlier_best[1] - y_height)) | (points[:,1] >= (outlier_best[1] + y_height))))]
points_added += 1
# terminating conditions (i think len(points) == 0 is all we need now)
if len(points) == 0 or ((outlier_z <= min_zscore) and points_added == 0):
break
outlier_z -= dz
# sweep through and points to avoid picking concavity issues
outlier_min_mr = 1.0
# convert to a dict so we can delete in-place
outlier_points = {int(x[0]):x[1] for x in outlier_points}
outlier_keys = list(sorted(outlier_points.keys()))
for k in outlier_keys:
if outlier_points[k] > outlier_min_mr:
del outlier_points[k]
else:
outlier_min_mr = outlier_points[k]
# returns sorted list of cache sizes
if not plot:
#return map_index(points, outlier_points)
return np.array(list(sorted(outlier_points.keys())))
else:
return (outlier_points, z_yd2)
| 41.836257 | 146 | 0.588063 | 1,014 | 7,154 | 3.994083 | 0.205128 | 0.057037 | 0.023704 | 0.018765 | 0.393333 | 0.360494 | 0.344198 | 0.344198 | 0.328395 | 0.30716 | 0 | 0.025069 | 0.286273 | 7,154 | 170 | 147 | 42.082353 | 0.768116 | 0.331004 | 0 | 0.223529 | 0 | 0 | 0.034207 | 0.005196 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035294 | false | 0 | 0.047059 | 0 | 0.152941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85b9a2ded21bb7e26efb4522b699d759a2fe4d28 | 1,496 | py | Python | services/logo_finder_service.py | fedsp/site2data | 5e049c3b96875283bf854ece6796abfd44690954 | [
"MIT"
] | null | null | null | services/logo_finder_service.py | fedsp/site2data | 5e049c3b96875283bf854ece6796abfd44690954 | [
"MIT"
] | null | null | null | services/logo_finder_service.py | fedsp/site2data | 5e049c3b96875283bf854ece6796abfd44690954 | [
"MIT"
] | null | null | null | from config import settings
import re
class LogoFinderService():
def __init__(self,soup_obj,website_url):
self.soup_obj = soup_obj
self.website_url = website_url
self.scrapping_settings = settings['ScrappingSettings']
def find_logo(self) -> str:
'''returns a list of scrapped logo full paths'''
image_objects = self.soup_obj.find_all('img')
logos = []
for image in image_objects:
image_address = image.get('src')
if image_address != None:
if self.scrapping_settings['LogoTextIdentifier'] in image_address.lower():
logos.append(image_address)
if 'class' in image.attrs:
classnames = image.attrs['class']
for classname in classnames:
if self.scrapping_settings['LogoTextIdentifier'] in classname.lower():
logos.append(image_address)
if len(logos) == 0:
return "NO LOGO FOUND"
logo = logos[0]
regex_item = settings['ScrappingSettings']['AbsoluteVsRelativeRegexChecker']
logo_relative_or_absolute = re.findall(regex_item,logo)
if len(logo_relative_or_absolute) == 0:
logo = self.website_url+logo
if len(logos) >1:
print(f'More than one logo found for:{self.website_url}. The first one was chosen arbitrary.')
logo = f"AMBIGUOUS LOGO: {logo}"
return logo | 42.742857 | 106 | 0.600936 | 169 | 1,496 | 5.12426 | 0.402367 | 0.057737 | 0.038106 | 0.053118 | 0.168591 | 0.168591 | 0 | 0 | 0 | 0 | 0 | 0.003865 | 0.308155 | 1,496 | 35 | 107 | 42.742857 | 0.83285 | 0.028075 | 0 | 0.064516 | 0 | 0 | 0.162181 | 0.036577 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0 | 0.225806 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85bbef976d587c77a873695fe486aefcf8beff7e | 796 | py | Python | spinbot/gh/util.py | rantav/spinnaker | 98fb0c77db8fc723fd705ae6b663a8cbbd348fdb | [
"Apache-2.0"
] | null | null | null | spinbot/gh/util.py | rantav/spinnaker | 98fb0c77db8fc723fd705ae6b663a8cbbd348fdb | [
"Apache-2.0"
] | null | null | null | spinbot/gh/util.py | rantav/spinnaker | 98fb0c77db8fc723fd705ae6b663a8cbbd348fdb | [
"Apache-2.0"
] | 1 | 2018-05-27T01:49:01.000Z | 2018-05-27T01:49:01.000Z | import github
def IssueRepo(issue):
return '/'.join(issue.url.split('/')[-4:-2])
def HasLabel(issue, name):
label = next((l for l in issue.get_labels() if l.name == name), None)
return label is not None
def AddLabel(gh, issue, name, create=True):
if HasLabel(issue, name):
return
label = gh.get_label(IssueRepo(issue), name, create=create)
if label is None:
issue.create_comment(
'Sorry! "{}" is not a label yet, and I don\'t create '.format(name)
+ 'labels to avoid spam.'
)
return
issue.add_to_labels(label)
def ObjectType(o):
if isinstance(o, github.Issue.Issue):
return 'issue'
elif isinstance(o, github.Repository.Repository):
return 'repository'
else:
return None
| 26.533333 | 79 | 0.614322 | 108 | 796 | 4.481481 | 0.435185 | 0.07438 | 0.070248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00339 | 0.258794 | 796 | 29 | 80 | 27.448276 | 0.816949 | 0 | 0 | 0.083333 | 0 | 0 | 0.100503 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.041667 | 0.041667 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85bfbd13cd57659c834b764ccdc661565f83c01a | 3,820 | py | Python | qp/composite.py | meshch/qp | 4f19841769c644ffff3eff297cacf6aeb2ac2cbc | [
"MIT"
] | 4 | 2016-12-06T17:51:45.000Z | 2019-11-15T12:27:24.000Z | qp/composite.py | meshch/qp | 4f19841769c644ffff3eff297cacf6aeb2ac2cbc | [
"MIT"
] | 74 | 2016-11-15T22:11:56.000Z | 2022-03-30T15:38:03.000Z | qp/composite.py | meshch/qp | 4f19841769c644ffff3eff297cacf6aeb2ac2cbc | [
"MIT"
] | 7 | 2017-04-04T19:46:21.000Z | 2021-05-19T06:02:07.000Z | import numpy as np
import scipy as sp
from scipy import stats as sps
import scipy.optimize as op
import qp
class composite(object):
def __init__(self, components, vb=True):
"""
A probability distribution that is a linear combination of scipy.stats.rv_continuous objects
Parameters
----------
components: list or tuple, dicts
aggregation of dicts defining component functions and their coefficients
vb: boolean
report on progress to stdout?
Notes
-----
TO DO: change x --> z
"""
self.components = components
self.n_components = len(self.components)
self.component_range = range(self.n_components)
coefficients = np.array([component['coefficient'] for component in self.components])
self.coefficients = coefficients / np.sum(coefficients)
self.functions = np.array([component['function'] for component in self.components])
def pdf(self, xs):
"""
Evaluates the composite PDF at locations
Parameters
----------
xs: float or numpy.ndarray, float
value(s) at which to evaluate the PDF
Returns
-------
ps: float or numpy.ndarray, float
value(s) of the PDF at xs
"""
p = np.zeros(np.shape(xs))
for c in self.component_range:
p += self.coefficients[c] * self.functions[c].pdf(xs)
return p
def cdf(self, xs):
"""
Evaluates the composite CDF at locations
Parameters
----------
xs: float or numpy.ndarray, float
value(s) at which to evaluate the CDF
Returns
-------
ps: float or numpy.ndarray, float
value(s) of the CDF at xs
"""
ps = np.zeros(np.shape(xs))
for c in self.component_range:
ps += self.coefficients[c] * self.functions[c].cdf(xs)
return ps
def rvs(self, size):
"""
Samples the composite probability distribution
Parameters
----------
size: int
number of samples to take
Returns
-------
xs: numpy.ndarray, float
samples from the PDF
"""
groups = np.random.choice(self.component_range, size, p=self.coefficients)
u, counts = np.unique(groups, return_counts=True)
samples = np.empty(0)
for i in range(len(u)):
samples = np.append(samples, self.functions[u[i]].rvs(counts[i]))
return np.array(samples).flatten()
def ppf(self, cdfs, ivals=None):
"""
Evaluates the composite PPF at locations
Parameters
----------
cdfs: float or numpy.ndarray, float
value(s) at which to find quantiles
ivals: float or numpy.ndarray, float
initial guesses for quantiles
Returns
-------
xs: float or numpy.ndarray, float
quantiles
"""
N = np.shape(cdfs)[0]
xs = np.zeros(N)
if ivals is not None:
xs0 = ivals
else:
all_cdfs = np.zeros(N)
for c in self.component_range:
all_cdfs += self.functions[c].ppf(cdfs)
xs0 = all_cdfs / self.n_components
for n in range(N):
def ppf_helper(x):
return np.absolute(cdfs[n] - self.cdf(x))
res = op.minimize(ppf_helper, xs0[n], method="Nelder-Mead", options={"maxfev": 1e5, "maxiter":1e5}, tol=1e-8)
# res = op.basinhopping(ppf_helper, xs0[n])#, method="Nelder-Mead", options={"maxfev": 1e5, "maxiter":1e5})
xs[n] += res.x
# if vb:
# print(res.message, res.success)
return xs
| 29.612403 | 127 | 0.548953 | 458 | 3,820 | 4.534935 | 0.28821 | 0.046221 | 0.065479 | 0.064035 | 0.341358 | 0.276842 | 0.222918 | 0.222918 | 0.222918 | 0.222918 | 0 | 0.006342 | 0.339529 | 3,820 | 128 | 128 | 29.84375 | 0.816885 | 0.353403 | 0 | 0.065217 | 0 | 0 | 0.021425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.108696 | 0.021739 | 0.369565 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85bfeb01c7c84ab97b4d582a2ab7af8b0450f27b | 1,628 | py | Python | test/PR_test/unit_test/layers/tensorflow/test_reflection_padding_2d.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | 57 | 2019-05-21T21:29:26.000Z | 2022-02-23T05:55:21.000Z | test/PR_test/unit_test/layers/tensorflow/test_reflection_padding_2d.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 93 | 2019-05-23T18:36:07.000Z | 2022-03-23T17:15:55.000Z | test/PR_test/unit_test/layers/tensorflow/test_reflection_padding_2d.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 47 | 2019-05-09T15:41:37.000Z | 2022-03-26T17:00:08.000Z | # Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import tensorflow as tf
import fastestimator as fe
import fastestimator.test.unittest_util as fet
class TestReflectionPadding2D(unittest.TestCase):
def setUp(self):
self.x = tf.reshape(tf.convert_to_tensor(list(range(9))), (1, 3, 3, 1))
def test_reflection_padding_2d_double_side(self):
op = tf.constant([[[[4], [3], [4], [5], [4]], [[1], [0], [1], [2], [1]], [[4], [3], [4], [5], [4]],
[[7], [6], [7], [8], [7]], [[4], [3], [4], [5], [4]]]])
m = fe.layers.tensorflow.ReflectionPadding2D((1, 1))
y = m(self.x)
self.assertTrue(fet.is_equal(y, op))
def test_reflection_padding_2d_single_side(self):
op = tf.constant([[[[1], [0], [1], [2], [1]], [[4], [3], [4], [5], [4]], [[7], [6], [7], [8], [7]]]])
m = fe.layers.tensorflow.ReflectionPadding2D((1, 0))
y = m(self.x)
self.assertTrue(fet.is_equal(y, op))
| 41.74359 | 109 | 0.59828 | 231 | 1,628 | 4.151515 | 0.467532 | 0.062565 | 0.012513 | 0.016684 | 0.289885 | 0.183525 | 0.10219 | 0.10219 | 0.10219 | 0.10219 | 0 | 0.047148 | 0.19226 | 1,628 | 38 | 110 | 42.842105 | 0.682129 | 0.407248 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.166667 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85c403c65750bffb34aefa6e46026cbba51be791 | 533 | py | Python | Aulas python downloads/ex064.py | Osmair-riamso/AulasPython | 647f51182a46f34af6d9b5cff8511182c6cad4a4 | [
"MIT"
] | null | null | null | Aulas python downloads/ex064.py | Osmair-riamso/AulasPython | 647f51182a46f34af6d9b5cff8511182c6cad4a4 | [
"MIT"
] | null | null | null | Aulas python downloads/ex064.py | Osmair-riamso/AulasPython | 647f51182a46f34af6d9b5cff8511182c6cad4a4 | [
"MIT"
] | null | null | null | '''Crie um programa que leia vários números inteiros pelo teclado.
O programa só vai parar quando o usuário digitar o valor 999,
que é a condição de parada. No final, mostre quantos números
foram digitados e qual foi a soma entre eles (desconsiderando o flag). '''
print("Descubra a senha!")
n = cont = soma = 0
n = int(input("Digite um numero: " ))
while n != 999:
cont += 1
soma += n
n = int(input("Digite um numero: " ))
print("Voce digitou {} e a soma total é {} dos numeros digitados.".format(cont,soma))
| 29.611111 | 85 | 0.677298 | 86 | 533 | 4.197674 | 0.651163 | 0.027701 | 0.049862 | 0.083102 | 0.127424 | 0.127424 | 0 | 0 | 0 | 0 | 0 | 0.019139 | 0.21576 | 533 | 17 | 86 | 31.352941 | 0.844498 | 0.482176 | 0 | 0.25 | 0 | 0 | 0.41573 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a40f6ef49b65b50190c5c179cd273379d51e94de | 1,285 | py | Python | test/conftest.py | starcraftman/new-awesome | 59f779e2aa0b1aab2eca2aaf351f789f2833c4a9 | [
"BSD-3-Clause"
] | 6 | 2016-03-09T04:17:42.000Z | 2020-03-02T18:46:28.000Z | test/conftest.py | starcraftman/new-awesome | 59f779e2aa0b1aab2eca2aaf351f789f2833c4a9 | [
"BSD-3-Clause"
] | null | null | null | test/conftest.py | starcraftman/new-awesome | 59f779e2aa0b1aab2eca2aaf351f789f2833c4a9 | [
"BSD-3-Clause"
] | null | null | null | """
Global fixtures to be reused.
"""
from __future__ import absolute_import
import sys
import mock
import pytest
import test.common as tc
@pytest.fixture(scope='session', autouse=True)
def setup_test_bed(request):
"""
Fixture sets up the testing environment for this web application.
Session scope, executes before all tests.
"""
request.addfinalizer(tc.env_teardown)
tc.env_setup()
@pytest.yield_fixture()
def mock_print():
"""
A fixture that mocks python's print function during test.
"""
if sys.version_info < (3, 0):
print_mod = '__builtin__.print'
else:
print_mod = 'builtins.print'
with mock.patch(print_mod) as mock_obj:
yield mock_obj
@pytest.yield_fixture()
def mock_input():
"""
A fixture that mocks python's print function during test.
"""
if sys.version_info < (3, 0):
input_mod = '__builtin__.raw_input'
else:
input_mod = 'builtins.input'
with mock.patch(input_mod) as mock_obj:
yield mock_obj
# @pytest.yield_fixture(scope='function', autouse=True)
# def around_all_tests():
# """
# Executes before and after EVERY test.
# Can be helpful for tracking bugs impacting test bed.
# """
# # before
# yield
# # after
| 21.779661 | 69 | 0.659144 | 170 | 1,285 | 4.764706 | 0.429412 | 0.034568 | 0.066667 | 0.051852 | 0.303704 | 0.264198 | 0.264198 | 0.264198 | 0.264198 | 0.264198 | 0 | 0.004073 | 0.235798 | 1,285 | 58 | 70 | 22.155172 | 0.820774 | 0.352529 | 0 | 0.32 | 0 | 0 | 0.095425 | 0.027451 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.2 | 0 | 0.32 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4109a9f5fcfaf5a3730a40e38d6e3b2c4713fea | 1,167 | py | Python | vizsgaremek/test_con_feed_list.py | boat83/conduit | eaef1c9f34dc7909f42022237815b37405e1885c | [
"MIT"
] | 1 | 2021-08-16T15:37:15.000Z | 2021-08-16T15:37:15.000Z | vizsgaremek/test_con_feed_list.py | boat83/conduit | eaef1c9f34dc7909f42022237815b37405e1885c | [
"MIT"
] | null | null | null | vizsgaremek/test_con_feed_list.py | boat83/conduit | eaef1c9f34dc7909f42022237815b37405e1885c | [
"MIT"
] | null | null | null | def test_con_feed_list():
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
opt = Options()
opt.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install(), options=opt)
driver.get('http://localhost:1667/#/login')
# bejelentkezes
driver.find_element_by_xpath('//fieldset//input[@placeholder="Email"]').send_keys('testuser1@example.com')
driver.find_element_by_xpath('//fieldset//input[@placeholder="Password"]').send_keys('Abcd123$')
driver.find_element_by_xpath('//form/button').click()
time.sleep(4)
driver.find_element_by_xpath('//div[@class="container"]//ul/li[4]/a').click()
time.sleep(3)
# cikkek cimeinek kigyujtese
my_articles = driver.find_elements_by_xpath('//div[@class="article-preview"]//h1')
list_of_feed = []
for row in my_articles:
list_of_feed.append(row.text + '\n')
print(list_of_feed)
# talalatok fileba mentese
with open('list_of_feed.txt', 'a') as x:
for i in list_of_feed:
x.write(i)
driver.close()
| 33.342857 | 110 | 0.689803 | 153 | 1,167 | 5.045752 | 0.522876 | 0.064767 | 0.064767 | 0.098446 | 0.186529 | 0.124352 | 0.124352 | 0.124352 | 0 | 0 | 0 | 0.012346 | 0.167095 | 1,167 | 34 | 111 | 34.323529 | 0.781893 | 0.055698 | 0 | 0 | 0 | 0 | 0.221311 | 0.15847 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0.041667 | 0.166667 | 0 | 0.208333 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a41ba8d2cbc5f6d76cae7b7cd03a312bdb6d6150 | 1,046 | py | Python | payment/migrations/0010_auto__del_field_paymentpackage_name.py | rif/conference-registration | 2e83e98d68eff5c8ab6ae3a79db910e2e81c58ae | [
"BSD-3-Clause"
] | null | null | null | payment/migrations/0010_auto__del_field_paymentpackage_name.py | rif/conference-registration | 2e83e98d68eff5c8ab6ae3a79db910e2e81c58ae | [
"BSD-3-Clause"
] | null | null | null | payment/migrations/0010_auto__del_field_paymentpackage_name.py | rif/conference-registration | 2e83e98d68eff5c8ab6ae3a79db910e2e81c58ae | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PaymentPackage.name'
db.delete_column('payment_paymentpackage', 'name')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'PaymentPackage.name'
raise RuntimeError("Cannot reverse this migration. 'PaymentPackage.name' and its values cannot be restored.")
models = {
'payment.paymentpackage': {
'Meta': {'object_name': 'PaymentPackage'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'conference_price': ('django.db.models.fields.IntegerField', [], {}),
'early': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['payment']
| 32.6875 | 117 | 0.624283 | 110 | 1,046 | 5.872727 | 0.609091 | 0.06192 | 0.086687 | 0.123839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003722 | 0.229446 | 1,046 | 31 | 118 | 33.741935 | 0.797767 | 0.122371 | 0 | 0 | 0 | 0 | 0.40919 | 0.222101 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.210526 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a420b5f8c2547bd492b541121354ca50d503baab | 10,273 | py | Python | code/palmnet/core/layer_replacer.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | 1 | 2021-07-15T07:05:18.000Z | 2021-07-15T07:05:18.000Z | code/palmnet/core/layer_replacer.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | 2 | 2021-07-15T06:12:47.000Z | 2021-07-16T10:05:36.000Z | code/palmnet/core/layer_replacer.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | null | null | null | from abc import abstractmethod, ABCMeta
import pickle
import keras
# from self.keras_module.models import Model
# from self.keras_module.layers import InputLayer
# from self.keras_module.layers import Dense, Conv2D
from palmnet.core.palminizable import Palminizable
from palmnet.utils import get_idx_last_layer_of_class, get_idx_first_layer_of_class
from skluc.utils import log_memory_usage, logger
from collections import defaultdict
import pathlib
class LayerReplacer(metaclass=ABCMeta):
def __init__(self, keep_last_layer=False, keep_first_layer=False, dct_name_compression=None, path_checkpoint_file=None, only_dense=False, keras_module=keras, multi_step=False):
self.keras_module = keras_module
self.keep_last_layer = keep_last_layer
self.keep_first_layer = keep_first_layer
self.only_dense = only_dense
self.dct_name_compression = dct_name_compression if dct_name_compression is not None else dict()
self.path_checkpoint_file = path_checkpoint_file # type: pathlib.Path
self.dct_bool_replaced_layers = defaultdict(lambda: False)
self.dct_old_name_new_name = defaultdict(lambda: None)
self.dct_new_name_old_name = defaultdict(lambda: None)
self._init_layer_classes() # set the classes to be recognised to replace
self.multi_step = multi_step
def __refresh_and_apply_layer_to_input(self, layer, layer_inputs):
new_fresh_layer = layer.__class__(**layer.get_config())
old_layer_weights = layer.get_weights()
x = new_fresh_layer(layer_inputs)
new_fresh_layer.set_weights(old_layer_weights)
return x, new_fresh_layer
@abstractmethod
def _apply_replacement(self, layer):
pass
def load_dct_name_compression(self):
with open(str(self.path_checkpoint_file), 'rb') as rb_file:
self.dct_name_compression = pickle.load(rb_file)
if type(self.dct_name_compression) == Palminizable:
self.dct_name_compression = self.dct_name_compression.sparsely_factorized_layers
def save_dct_name_compression(self):
if self.path_checkpoint_file is None:
return
with open(str(self.path_checkpoint_file), 'wb') as wb_file:
pickle.dump(self.dct_name_compression, wb_file)
def fit_transform(self, model):
self.fit(model)
return self.transform(model)
def fit_one_layer(self, layer):
if layer.name not in self.dct_name_compression:
dct_replacement = self._apply_replacement(layer)
# should return dict in most case but need to be backward compatible with older implementation of PALM
self.dct_name_compression[layer.name] = dct_replacement
self.save_dct_name_compression()
else:
logger.warning("skip layer {} because already in dict".format(layer.name))
def fit(self, model):
for layer in model.layers:
self.fit_one_layer(layer)
def _init_layer_classes(self):
self.dense_layer_class = self.keras_module.layers.Dense
self.conv_layer_class = self.keras_module.layers.Conv2D
def transform_one_layer(self, layer, idx_layer, layer_inputs):
if not self.multi_step:
sparse_factorization = self.dct_name_compression[layer.name]
else:
try:
sparse_factorization = self.dct_name_compression[layer.name]
except KeyError:
# print(layer.name)
# print(self.dct_name_compression.keys())
# print([k for k, v in self.dct_name_compression.items() if k.startswith(layer.name + "_-_")])
sparse_factorization = next(v for k, v in self.dct_name_compression.items() if k.startswith(layer.name + "_-_"))
# exit()
# adapted to the palminized case... not very clean but OK
bool_find_modif = (sparse_factorization != None and sparse_factorization != (None, None))
logger.info('Prepare layer {}'.format(layer.name))
bool_only_dense = not isinstance(layer, self.keras_module.layers.Dense) and self.only_dense
bool_last_layer = idx_layer == self.idx_last_dense_layer and self.keep_last_layer
bool_first_layer = idx_layer == self.idx_first_conv_layer and self.keep_first_layer
keep_this_layer = bool_only_dense or bool_last_layer or bool_first_layer
if bool_find_modif and not keep_this_layer:
# if there is a replacement available and not (it is the last layer and we want to keep it as is)
# create new layer
if isinstance(layer, self.dense_layer_class):
logger.debug("Dense layer treatment")
replacing_layer, replacing_weights, bool_modified = self._replace_dense(layer, sparse_factorization)
elif isinstance(layer, self.conv_layer_class):
logger.debug("Conv2D layer treatment")
replacing_layer, replacing_weights, bool_modified = self._replace_conv2D(layer, sparse_factorization)
else:
raise ValueError("Unsupported layer class")
if bool_modified: # then replace layer with compressed layer
try:
replacing_layer.name = '{}_-_{}'.format(layer.name, replacing_layer.name)
except AttributeError:
logger.warning("Found layer with property name unsettable. try _name instead.")
replacing_layer._name = '{}_-_{}'.format(layer.name, replacing_layer.name)
x = replacing_layer(layer_inputs)
self.dct_old_name_new_name[layer.name] = replacing_layer.name
self.dct_new_name_old_name[replacing_layer.name] = layer.name
self.dct_bool_replaced_layers[layer.name] = True
self._set_weights_to_layer(replacing_layer, replacing_weights)
logger.info('Layer {} modified into {}'.format(layer.name, replacing_layer.name))
else:
x, new_fresh_layer = self.__refresh_and_apply_layer_to_input(layer, layer_inputs)
logger.info('Layer {} unmodified'.format(new_fresh_layer.name))
else:
x, new_fresh_layer = self.__refresh_and_apply_layer_to_input(layer, layer_inputs)
# x = layer(layer_inputs)
logger.info('Layer {} unmodified'.format(new_fresh_layer.name))
return x
def prepare_transform(self, model):
if not isinstance(model.layers[0], self.keras_module.layers.InputLayer):
model = self.keras_module.models.Model(inputs=model.input, outputs=model.output)
# else:
# # this is important because we also want the InputLayer to be reinitialized
# input_shape = model.input_shape
# model.layers.pop(0)
# newInput = self.keras_module.layers.Input(shape=input_shape[1:])
# newOutput = model(newInput)
# model2 = self.keras_module.models.Model(input=newInput, output=newOutput)
network_dict = {'input_layers_of': defaultdict(lambda: []), 'new_output_tensor_of': defaultdict(lambda: [])}
input_shape = model.input_shape
newInput = self.keras_module.layers.Input(shape=input_shape[1:])
# Set the output tensor of the input layer
network_dict['new_output_tensor_of'].update(
{model.layers[0].name: newInput})
for i, layer in enumerate(model.layers):
# each layer is set as `input` layer of all its outbound layers
for node in layer._outbound_nodes:
outbound_layer_name = node.outbound_layer.name
network_dict['input_layers_of'][outbound_layer_name].append(layer.name)
self.idx_last_dense_layer = get_idx_last_layer_of_class(model, self.keras_module.layers.Dense) if self.keep_last_layer else -1
self.idx_last_dense_layer -= 1
self.idx_first_conv_layer = get_idx_first_layer_of_class(model, self.keras_module.layers.Conv2D) if self.keep_first_layer else -1
self.idx_first_conv_layer -= 1
return model, network_dict
def transform(self, model):
model, network_dict = self.prepare_transform(model)
for i, layer in enumerate(model.layers[1:]):
log_memory_usage("Before layer {}".format(layer.name))
# get all layers input
layer_inputs = [network_dict['new_output_tensor_of'][curr_layer_input] for curr_layer_input in network_dict['input_layers_of'][layer.name]]
if len(layer_inputs) == 1:
layer_inputs = layer_inputs[0]
x = self.transform_one_layer(layer, i, layer_inputs)
network_dict['new_output_tensor_of'].update({layer.name: x})
# model = self.keras_module.models.Model(inputs=model.inputs, outputs=x)
model = self.keras_module.models.Model(inputs=network_dict['new_output_tensor_of'][model.layers[0].name], outputs=x)
return model
def have_been_replaced(self, layer_name):
return self.dct_bool_replaced_layers[layer_name]
def get_replaced_layer_name(self, new_layer_name):
return self.dct_new_name_old_name[new_layer_name]
def get_replacing_layer_name(self, old_layer_name):
return self.dct_old_name_new_name[old_layer_name]
@abstractmethod
def _replace_conv2D(self, layer, dct_compression):
"""
Implementation of this method should return the triplet:
replacing_weights: list of np.ndarray
replacing_layer: self.keras_module.layers.Layer
bool_replaced: tells if the layer should be replaced
:param layer:
:param dct_compression:
:return:
"""
pass
@abstractmethod
def _replace_dense(self, layer, dct_compression):
"""
Implementation of this method should return the triplet:
replacing_weights: list of np.ndarray
replacing_layer: self.keras_module.layers.Layer
bool_replaced: tells if the layer should be replaced
:param layer:
:param dct_compression:
:return:
"""
pass
@abstractmethod
def _set_weights_to_layer(self, replacing_layer, replacing_weights):
pass
| 44.280172 | 180 | 0.682566 | 1,332 | 10,273 | 4.943694 | 0.148649 | 0.053303 | 0.051936 | 0.043432 | 0.439028 | 0.35262 | 0.276689 | 0.236598 | 0.179347 | 0.163857 | 0 | 0.00255 | 0.23664 | 10,273 | 231 | 181 | 44.471861 | 0.837159 | 0.166164 | 0 | 0.148936 | 0 | 0 | 0.050494 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0.028369 | 0.056738 | 0.021277 | 0.255319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a422fb3416879c75a86139388d6af9e719ed842b | 1,469 | py | Python | kivyoav/emotion_feedback.py | yglazner/guess_the_bless | 99176759b3f3fb6a4fe0b4d32f70be582a0640af | [
"MIT"
] | 1 | 2017-07-18T10:25:43.000Z | 2017-07-18T10:25:43.000Z | kivyoav/emotion_feedback.py | yglazner/guess_the_bless | 99176759b3f3fb6a4fe0b4d32f70be582a0640af | [
"MIT"
] | null | null | null | kivyoav/emotion_feedback.py | yglazner/guess_the_bless | 99176759b3f3fb6a4fe0b4d32f70be582a0640af | [
"MIT"
] | null | null | null | '''
Created on Jun 29, 2017
@author: yglazner
'''
from kivy.uix.widget import Widget
from kivy.properties import *
from kivy.uix.boxlayout import BoxLayout
from kivy.base import runTouchApp
from kivy.uix.slider import Slider
Slider
class EmotionFeedBack(Widget):
'''
EmotionFeedBack - a widget that lets the user express its emotion by swipping up or down
'''
level = NumericProperty(0.5)
orientation = OptionProperty('horizontal', options=(
'horizontal', 'vertical'))
sources = ListProperty([])
def __init__(self, sources=[], **kw):
'''
'''
self.sources = sources
super(EmotionFeedBack, self).__init__(**kw)
@property
def vertical(self):
return self.orientation == 'vertical'
def on_touch_down(self, touch):
touch.ud['pos'] = touch.pos
return super(EmotionFeedBack, self).on_touch_down(touch)
def on_touch_move(self, touch):
sx, sy = touch.ud['pos']
touch.ud['pos'] = x, y = touch.pos
ts, t = (sy, y) if self.vertical else (sx, x)
size = self.height if self.vertical else self.width
change = (t - ts)*20.0 / size
self.level += change
self.level = max(min(self.level, 1.0), 0)
print (self.level)
return super(EmotionFeedBack, self).on_touch_move(touch)
if __name__ == '__main__':
runTouchApp(EmotionFeedBack()) | 28.25 | 92 | 0.607897 | 176 | 1,469 | 4.9375 | 0.414773 | 0.04603 | 0.037975 | 0.034522 | 0.085155 | 0.085155 | 0 | 0 | 0 | 0 | 0 | 0.013121 | 0.273656 | 1,469 | 52 | 93 | 28.25 | 0.801312 | 0.095303 | 0 | 0 | 0 | 0 | 0.040738 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.15625 | 0.03125 | 0.5 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4233cffbf5beccc2cc2ebc7c2720114c2cce820 | 1,793 | py | Python | solutions/day12/test_lib.py | benjaminarjun/AdventOfCode2020 | b9ca2f5c6121c401eb79911dbbbd0d3188f38034 | [
"MIT"
] | 1 | 2020-12-04T17:57:24.000Z | 2020-12-04T17:57:24.000Z | solutions/day12/test_lib.py | benjaminarjun/AdventOfCode2020 | b9ca2f5c6121c401eb79911dbbbd0d3188f38034 | [
"MIT"
] | null | null | null | solutions/day12/test_lib.py | benjaminarjun/AdventOfCode2020 | b9ca2f5c6121c401eb79911dbbbd0d3188f38034 | [
"MIT"
] | null | null | null | import unittest
from .results import NavInstruction, Position, Ship
class TestNavInstruction(unittest.TestCase):
def test_nav_instruction_from_str(self):
instr_str = 'F10'
instr = NavInstruction.from_str(instr_str)
self.assertEqual('F', instr.direction)
self.assertEqual(10, instr.magnitude)
class TestPosition(unittest.TestCase):
def _get_position(self):
return Position(3, 5)
def test_init(self):
p = self._get_position()
self.assertEqual(Position(3, 5), p)
def test_init_default(self):
p = Position()
self.assertEqual(p.x, 0)
self.assertEqual(p.y, 0)
def test_move_sequence(self):
p = self._get_position()
instrs = [
NavInstruction('N', 3),
NavInstruction('W', 8),
]
for instr in instrs:
p.move(instr)
self.assertEqual(Position(-5, 8), p)
def test_position_addition(self):
p1 = Position(1, 2)
p2 = Position(3, 4)
self.assertEqual(Position(4, 6), p1 + p2)
class TestShipNavigation(unittest.TestCase):
@property
def nav_instructions(self):
return [NavInstruction.from_str(z) for z in (
'F10',
'N3',
'F7',
'R90',
'F11',
)]
def test_ship_navigation(self):
ship = Ship()
ship.navigate(self.nav_instructions)
expected = Position(17, -8)
self.assertEqual(expected, ship.position)
def test_ship_waypoint_navigation(self):
init_wpt = Position(10, 1)
ship = Ship(wpt_nav=True, init_wpt=init_wpt)
ship.navigate(self.nav_instructions)
expected = Position(214, -72)
self.assertEqual(expected, ship.position)
| 24.902778 | 53 | 0.597323 | 207 | 1,793 | 5.014493 | 0.318841 | 0.130058 | 0.066474 | 0.023121 | 0.196532 | 0.090559 | 0.090559 | 0 | 0 | 0 | 0 | 0.033858 | 0.29169 | 1,793 | 71 | 54 | 25.253521 | 0.783465 | 0 | 0 | 0.115385 | 0 | 0 | 0.010597 | 0 | 0 | 0 | 0 | 0 | 0.173077 | 1 | 0.173077 | false | 0 | 0.038462 | 0.038462 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4239e95f4d189e6430c6fd1dbf406378ad3126c | 4,507 | py | Python | scripts used in article/clustering_evaluation.py | BoyanZhou/starstr | 03280b0b280ef5be351b7ff285e90541baed3d63 | [
"Apache-2.0"
] | null | null | null | scripts used in article/clustering_evaluation.py | BoyanZhou/starstr | 03280b0b280ef5be351b7ff285e90541baed3d63 | [
"Apache-2.0"
] | 1 | 2018-09-06T16:50:46.000Z | 2018-09-06T16:50:46.000Z | scripts used in article/clustering_evaluation.py | BoyanZhou/starstr | 03280b0b280ef5be351b7ff285e90541baed3d63 | [
"Apache-2.0"
] | null | null | null | import sys
import numpy as np
A = int(sys.argv[2]) # start of the tree threshold
B = int(sys.argv[3]) # end of the tree threshold
N = int(sys.argv[1]) # repeat number now
threshold_list = [i/1000.0 for i in range(A, B)]
# compare cluster result with the nwk_tree
for i in threshold_list:
branch_length_threshold = float(i)
# read the file grouped by nwk
group_divide_name = "simu_%s_grouped_by_nwk" % str(branch_length_threshold)
group_divide = open(group_divide_name, "r")
indi_divided = []
line_num = 0
indi_num = 0
for line in group_divide:
line_num += 1
if (line_num % 2) == 0:
indi_list = line.strip().split(";")
indi_list.pop()
indi_num += len(indi_list)
indi_divided.append(indi_list)
group_divide.close()
# read the file of clustering result
group_cluster_name = "simu%s_clustering.star_cluster_result.txt" % str(N)
group_cluster = open(group_cluster_name, "r") # cluster result by my method
divide_evaluate_name = "simu%s_clustering_%s_evaluate.txt" % (str(N), str(branch_length_threshold))
divide_evaluate = open(divide_evaluate_name, "w")
n = 0
for line2 in group_cluster:
n += 1
output = []
col = line2.strip().split("\t")
output.append(col[0])
output.append(col[1])
indi_list2 = col[2].split(";")
if n == 1:
output1 = "group\tindi_no\t"
for j in range(1, (int(line_num/2))):
output1 += str(j)
output1 += "\t"
output1 += str(line_num/2)
divide_evaluate.write(output1 + "\n")
for i in indi_divided:
a = len(list(set(indi_list2).intersection(set(i))))
output.append(str(a))
divide_evaluate.write("\t".join(output) + "\n")
group_cluster.close()
divide_evaluate.close()
# summary results of comparison
method_evaluate_final_name = "simu%s_method_evaluate_final.txt" % str(N)
method_evaluate_final = open(method_evaluate_final_name, "w")
col_name_write = False
for i in threshold_list:
branch_length_threshold = float(i)
divide_evaluate_name = "simu%s_clustering_%s_evaluate.txt" % (str(N), str(branch_length_threshold))
divide_evaluate = open(divide_evaluate_name, "r")
group_divided_evaluation = []
group_serial_number = []
group_indi_number = []
for line in divide_evaluate:
col = line.strip().split("\t")
group_divided_evaluation.append(col)
group_divided_evaluation = np.array(group_divided_evaluation) # the array of group divided
nrow = int(group_divided_evaluation.shape[0]) # get the total number of row
ncol = int(group_divided_evaluation.shape[1]) # get the total number of col
# output the title line
if not col_name_write:
group_serial_number = group_divided_evaluation[1:, 0]
group_serial_number_2 = []
for q in group_serial_number:
group_serial_number_2.append(q)
group_serial_number_2.append(q)
group_indi_number = group_divided_evaluation[1:, 1]
group_indi_number_2 = []
for q in group_indi_number:
group_indi_number_2.append(q)
group_indi_number_2.append(q)
method_evaluate_final.write("\t".join(group_serial_number_2) + "\n")
method_evaluate_final.write("\t".join(group_indi_number_2) + "\n")
col_name_write = True
line_rate = []
for j in range(1, nrow):
row_data = group_divided_evaluation[j, 2:]
row_data = [float(k) for k in row_data]
row_data_max = max(row_data) # get the max value in the row (clustering result by my method)
row_data_pos = row_data.index(row_data_max) # get the position of max value
row_data_rate = "%.4f" % (row_data_max/sum(row_data)) # inclusion rate in the row
col_data = group_divided_evaluation[1:, row_data_pos + 2]
col_data = [float(k) for k in col_data]
col_data_max = max(col_data) # get the max value in the col (clustering result by my method)
col_data_pos = col_data.index(col_data_max) # get the position of max value
col_data_rate = "%.4f" % (col_data_max/sum(col_data)) # inclusion rate in the col
line_rate.append(row_data_rate)
line_rate.append(col_data_rate)
method_evaluate_final.write("\t".join(line_rate) + "\n")
divide_evaluate.close()
method_evaluate_final.close()
| 38.194915 | 103 | 0.652319 | 658 | 4,507 | 4.170213 | 0.165654 | 0.033163 | 0.080175 | 0.026239 | 0.351312 | 0.252915 | 0.21137 | 0.135569 | 0.112974 | 0.112974 | 0 | 0.014943 | 0.242734 | 4,507 | 117 | 104 | 38.521368 | 0.789042 | 0.127579 | 0 | 0.129032 | 0 | 0 | 0.055201 | 0.041145 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.021505 | 0 | 0.021505 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a423ebdcfa4aac7d6ec8a3988d72013b56326556 | 236 | py | Python | celery/worker_a/__init__.py | tim-barnes/lang-python | 3dbbc7f38cec598e32bd1a06827246dcab3a0ced | [
"MIT"
] | 1 | 2021-06-16T23:43:27.000Z | 2021-06-16T23:43:27.000Z | celery/worker_a/__init__.py | tim-barnes/lang-python | 3dbbc7f38cec598e32bd1a06827246dcab3a0ced | [
"MIT"
] | null | null | null | celery/worker_a/__init__.py | tim-barnes/lang-python | 3dbbc7f38cec598e32bd1a06827246dcab3a0ced | [
"MIT"
] | null | null | null | from celery import Celery
app = Celery(__name__, broker="redis://redis//")
# app.conf.task_routes = {
# 'worker_a.pulse': {'queue': 'worker_a'}
# }
@app.task
def pulse(i):
print(f"Pulse: {i} ({__name__})")
return i + 1000
| 19.666667 | 48 | 0.618644 | 33 | 236 | 4.090909 | 0.606061 | 0.103704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020619 | 0.177966 | 236 | 11 | 49 | 21.454545 | 0.675258 | 0.29661 | 0 | 0 | 0 | 0 | 0.234568 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4254aa2b6ed63ccd6175aec14c799cb09949454 | 960 | py | Python | main.py | MobinNesari81/Syquation_Solver | 9bfa5963d6ecdf0a529603daaee56900dc4b60a9 | [
"MIT"
] | 1 | 2022-01-11T13:39:26.000Z | 2022-01-11T13:39:26.000Z | main.py | MobinNesari81/Syquation_Solver | 9bfa5963d6ecdf0a529603daaee56900dc4b60a9 | [
"MIT"
] | null | null | null | main.py | MobinNesari81/Syquation_Solver | 9bfa5963d6ecdf0a529603daaee56900dc4b60a9 | [
"MIT"
] | null | null | null | # Main file which solve equation
import processor
coefficient_rows = int(input("Please enter coefficient matrix row numbers: "))
coefficient_columns = int(input("Please enter coefficient matrix column numbers: "))
coefficient_matrix = [[] for _ in range(coefficient_rows)]
print("Please enter coefficients in one row then another one: ")
for i in range(coefficient_rows):
coefficient_matrix[i] = [int(k) for k in input().split()]
variables_numbers = int(input("Please enter variables numbers: "))
variables_matrix = []
for i in range(variables_numbers):
variables_matrix.append(input(f"Please enter {i+1} variable's symbol: "))
constants_matrix = [[0] for _ in range(coefficient_rows)]
for i in range(coefficient_rows):
constants_matrix[i][0] = int(input(f"Please enter {i+1} constant: "))
print('-' * 10)
print("Answer: ")
answer = processor.equation_solver(coefficient_matrix, constants_matrix)
processor.answer_printer(variables_matrix, answer)
| 48 | 84 | 0.760417 | 132 | 960 | 5.371212 | 0.325758 | 0.093089 | 0.101551 | 0.124118 | 0.299013 | 0.228491 | 0 | 0 | 0 | 0 | 0 | 0.007101 | 0.119792 | 960 | 19 | 85 | 50.526316 | 0.831953 | 0.03125 | 0 | 0.111111 | 0 | 0 | 0.275862 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a42671651e3b71dc6f8347faea546f685c85f61f | 9,000 | py | Python | falcon_heavy/core/types/formats.py | NotJustAToy/falcon-heavy | 2e96f649daafc2707a01e38f403f1ce4268f4629 | [
"Apache-2.0"
] | 21 | 2020-01-02T10:44:42.000Z | 2022-02-11T14:27:05.000Z | falcon_heavy/core/types/formats.py | NotJustAToy/falcon-heavy | 2e96f649daafc2707a01e38f403f1ce4268f4629 | [
"Apache-2.0"
] | 2 | 2020-02-13T21:06:56.000Z | 2020-09-27T16:47:25.000Z | falcon_heavy/core/types/formats.py | NotJustAToy/falcon-heavy | 2e96f649daafc2707a01e38f403f1ce4268f4629 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2020 Not Just A Toy Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import re
import uuid
import base64
import binascii
import datetime
import typing as ty
import rfc3987
import rfc3339
from strict_rfc3339 import rfc3339_to_timestamp, InvalidRFC3339Error
from falcon_heavy.utils import force_str, force_bytes
from .base import AbstractConvertible, BaseType, ValidationResult, Messages
from .primitive import StringType, IntegerType
from .enums import ConvertibleEntity
from .exceptions import SchemaError
from .errors import Error
from .path import Path
from .utils import is_file_like
__all__ = (
'DateType',
'DateTimeType',
'RegexType',
'URIType',
'EmailType',
'Int32Type',
'Int64Type',
'UUIDType',
'ByteType',
'BinaryType',
)
class DateType(AbstractConvertible[ty.Union[str, datetime.date]]):
"""Date type
Converts RFC3339 full-date string into python date object and vice versa
:param subtype: basic converter
"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid RFC3339 full-date"
}
__slots__ = ('subtype', )
def __init__(self, subtype: StringType, **kwargs: ty.Any) -> None:
super(DateType, self).__init__(**kwargs)
self.subtype = subtype
def convert(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Optional[ty.Union[str, datetime.date]]:
if isinstance(value, datetime.date) and entity == ConvertibleEntity.RESPONSE and value is not None:
value = value.isoformat()
result = self.subtype.convert(value, path, *args, entity=entity, **context)
if entity == ConvertibleEntity.RESPONSE:
return result
if result is None:
return None
try:
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
raise SchemaError(Error(path, self.messages['format']))
class DateTimeType(AbstractConvertible[ty.Union[str, datetime.datetime]]):
"""Datetime type
Converts RFC3339 date-time string into python datetime object and vice versa
:param subtype: basic converter
"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid RFC3339 date-time"
}
__slots__ = ('subtype', )
def __init__(self, subtype: StringType, **kwargs: ty.Any) -> None:
super(DateTimeType, self).__init__(**kwargs)
self.subtype = subtype
def convert(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Optional[ty.Union[str, datetime.datetime]]:
if isinstance(value, datetime.datetime) and entity == ConvertibleEntity.RESPONSE and value is not None:
value = rfc3339.rfc3339(value)
result = self.subtype.convert(value, path, *args, entity=entity, **context)
if entity == ConvertibleEntity.RESPONSE:
return result
if result is None:
return None
try:
return datetime.datetime.fromtimestamp(rfc3339_to_timestamp(value))
except InvalidRFC3339Error:
raise SchemaError(Error(path, self.messages['format']))
class RegexType(AbstractConvertible[ty.Union[str, ty.Pattern]]):
"""Regex type
:param subtype: basic converter
"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid regular expression"
}
__slots__ = ('subtype', )
def __init__(self, subtype: StringType, **kwargs: ty.Any) -> None:
super(RegexType, self).__init__(**kwargs)
self.subtype = subtype
def convert(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Optional[ty.Union[str, ty.Pattern]]:
result = self.subtype.convert(value, path, *args, entity=entity, **context)
if entity == ConvertibleEntity.RESPONSE:
return result
if result is None:
return None
try:
return re.compile(result)
except (TypeError, re.error):
raise SchemaError(Error(path, self.messages['format']))
class URIType(StringType):
"""URI type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid URI according to RFC3987"
}
__slots__ = ()
def validate_format(self, value: str, *args: ty.Any, **context: ty.Any) -> ValidationResult:
try:
rfc3987.parse(value, rule='URI')
except ValueError:
return self.messages['format']
return None
EMAIL_PATTERN = re.compile(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$')
class EmailType(StringType):
"""Email type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid email address according to RFC5322"
}
__slots__ = ()
def validate_format(self, value: str, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if not EMAIL_PATTERN.match(value):
return self.messages['format']
return None
class Int32Type(IntegerType):
"""Int32 type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid Int32"
}
__slots__ = ()
def validate_format(self, value: int, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if value < -2147483648 or value > 2147483647:
return self.messages['format']
return None
class Int64Type(IntegerType):
"""Int64 type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid Int64"
}
__slots__ = ()
def validate_format(self, value: int, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if value < -9223372036854775808 or value > 9223372036854775807:
return self.messages['format']
return None
UUID_PATTERN = re.compile(
'^'
'[a-f0-9]{8}-'
'[a-f0-9]{4}-'
'[1345][a-f0-9]{3}-'
'[a-f0-9]{4}'
'-[a-f0-9]{12}'
'$'
)
class UUIDType(StringType):
"""UUID type"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not a valid UUID"
}
__slots__ = ()
def _cast(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Any:
if entity == ConvertibleEntity.RESPONSE and isinstance(value, uuid.UUID):
return str(value)
return value
def validate_format(self, value: str, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if not UUID_PATTERN.match(value):
return self.messages['format']
return None
class ByteType(AbstractConvertible[ty.Union[str, ty.BinaryIO]]):
"""Byte type
:param subtype: basic converter
"""
MESSAGES: ty.ClassVar[Messages] = {
'format': "Is not base64 encoded"
}
__slots__ = ('subtype', )
def __init__(self, subtype: StringType, **kwargs: ty.Any) -> None:
super(ByteType, self).__init__(**kwargs)
self.subtype = subtype
def convert(
self,
value: ty.Any,
path: Path,
*args: ty.Any,
entity: ty.Optional[ConvertibleEntity] = None,
**context: ty.Any
) -> ty.Optional[ty.Union[str, ty.BinaryIO]]:
if entity == ConvertibleEntity.RESPONSE and value is not None:
value = force_str(base64.b64encode(force_bytes(value)), encoding='ascii')
result = self.subtype.convert(value, path, *args, entity=entity, **context)
if entity == ConvertibleEntity.REQUEST and result is not None:
try:
return io.BytesIO(base64.b64decode(result, validate=True))
except binascii.Error:
raise SchemaError(Error(path, self.messages['format']))
return result
class BinaryType(BaseType[ty.IO]):
"""Binary type"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be a file-like object"
}
__slots__ = ()
def _check_type(self, value: ty.Any, path: Path, *args: ty.Any, **context: ty.Any) -> bool:
return is_file_like(value)
| 26.162791 | 111 | 0.613333 | 1,024 | 9,000 | 5.290039 | 0.203125 | 0.03046 | 0.018276 | 0.047997 | 0.602178 | 0.55455 | 0.539782 | 0.532583 | 0.492893 | 0.477386 | 0 | 0.028502 | 0.267111 | 9,000 | 343 | 112 | 26.239067 | 0.792753 | 0.106111 | 0 | 0.502392 | 0 | 0.004785 | 0.082137 | 0.006047 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07177 | false | 0 | 0.086124 | 0.004785 | 0.416268 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4292f81184da60467cc60d71bcf08055c6aed74 | 1,008 | py | Python | Desafio086 & 87 - Matriz 3x3.py | tmoura1981/Python_Exercicios | c873e2758dfd9058d2c2d83b5b38b522c6264029 | [
"MIT"
] | 1 | 2021-11-25T11:19:59.000Z | 2021-11-25T11:19:59.000Z | Desafio086 & 87 - Matriz 3x3.py | tmoura1981/Python_Exercicios | c873e2758dfd9058d2c2d83b5b38b522c6264029 | [
"MIT"
] | null | null | null | Desafio086 & 87 - Matriz 3x3.py | tmoura1981/Python_Exercicios | c873e2758dfd9058d2c2d83b5b38b522c6264029 | [
"MIT"
] | null | null | null | valores = [[], [], [],
[], [], [],
[], [], []]
num = linha = coluna = pos = soma = soma_ter_col = maior = 0
titulo = 'Matriz 3x3'
print(titulo.center(50, '='))
for v in range(9): # 9 valores da matriz
num = int(input(f'Linha[{linha}] Coluna[{coluna}]: '))
valores[pos].append(num)
pos += 1
coluna += 1
if coluna == 3: # após 3ª coluna
coluna = 0 # volta à 1ª coluna
linha += 1 # e desce à próxima linha de baixo
if num % 2 == 0:
soma += num
if coluna == 0:
soma_ter_col = valores[2] + valores[5] + valores[8]
print('_'*30)
print(f' { valores[0] } { valores[1] } { valores[2] } ')
print(f' { valores[3] } { valores[4] } { valores[5] } ')
print(f' { valores[6] } { valores[7] } { valores[8] } ')
print('_'*30)
print(f'-> Soma de todos os pares: {soma}')
print(f'-> Soma da 3ª coluna: {sum(soma_ter_col)}')
print(f'-> Maior nº da 2ª linha: {max(valores[3:6])}') # pega-se o maior valor da 2ª linha
| 37.333333 | 91 | 0.529762 | 147 | 1,008 | 3.578231 | 0.387755 | 0.068441 | 0.057034 | 0.057034 | 0.079848 | 0.079848 | 0 | 0 | 0 | 0 | 0 | 0.051913 | 0.27381 | 1,008 | 26 | 92 | 38.769231 | 0.666667 | 0.118056 | 0 | 0.076923 | 0 | 0 | 0.371461 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.346154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a42c0be3c130f39da6d363c285104e88b9933080 | 911 | py | Python | dags/word_count_dag.py | firasesbai/airflow-spark-jobs | 1925a20c212185843fcfb4c9349419bf8c418662 | [
"Apache-2.0"
] | null | null | null | dags/word_count_dag.py | firasesbai/airflow-spark-jobs | 1925a20c212185843fcfb4c9349419bf8c418662 | [
"Apache-2.0"
] | null | null | null | dags/word_count_dag.py | firasesbai/airflow-spark-jobs | 1925a20c212185843fcfb4c9349419bf8c418662 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
spark_master = "spark://spark-master:7077"
input_path = "/usr/local/spark/data/ebook"
now = datetime.now()
with DAG(
dag_id='spark_word_count',
schedule_interval=None,
start_date=datetime(now.year, now.month, now.day),
catchup=False,
tags=['spark']
) as dag:
start = DummyOperator(task_id="start")
spark_job = SparkSubmitOperator(
application='/usr/local/spark/jobs/word_count_job.py',
conn_id='spark_local',
task_id='word_count',
verbose=1,
conf={"spark.master":spark_master, "job.local.dir":"/usr/local/spark/data/"},
application_args=[input_path]
)
end = DummyOperator(task_id="end")
start >> spark_job >> end
| 27.606061 | 85 | 0.706915 | 119 | 911 | 5.226891 | 0.436975 | 0.07074 | 0.062701 | 0.054662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006579 | 0.165752 | 911 | 32 | 86 | 28.46875 | 0.811842 | 0 | 0 | 0 | 0 | 0 | 0.206367 | 0.12404 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a42c53b95de25e5c3963b4ce490f26df00cbe658 | 1,446 | py | Python | faker/providers/phone_number/en_NG/__init__.py | djunehor/faker-1 | e478c437fe3c05b02b7deffa43252f622ea45732 | [
"MIT"
] | null | null | null | faker/providers/phone_number/en_NG/__init__.py | djunehor/faker-1 | e478c437fe3c05b02b7deffa43252f622ea45732 | [
"MIT"
] | null | null | null | faker/providers/phone_number/en_NG/__init__.py | djunehor/faker-1 | e478c437fe3c05b02b7deffa43252f622ea45732 | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
formats = (
# National & Mobile dialing
'0{{area_code}}#######',
'0{{area_code}} ### ####',
'0{{area_code}}-###-####',
# International parenthesis
'234{{area_code}}#######',
'234 {{area_code}} ### ####',
'234-{{area_code}}-###-####',
'+234{{area_code}}#######',
'+234 {{area_code}} ### ####',
'+234-{{area_code}}-###-####',
)
# https://en.wikipedia.org/wiki/Telephone_numbers_in_Nigeria
mobile_codes = [
# MTN
'803',
'703',
'903',
'806',
'706',
'813',
'814',
'816',
'810',
'906',
'704',
# Airtel
'802', '902', '701', '808', '708', '812', '901', '907',
# Glo
'805', '705', '905', '807', '815', '905', '811',
# 9Mobile
'809', '909', '817', '818', '908',
# Ntel
'804',
# Smile
'702',
# Multilinks
'709',
# Starcomms
'819',
# Zoom
'707',
]
def area_code(self):
return self.numerify(self.random_element(self.mobile_codes))
def phone_number(self):
pattern = self.random_element(self.formats)
return self.numerify(self.generator.parse(pattern))
| 24.508475 | 68 | 0.45574 | 135 | 1,446 | 4.711111 | 0.644444 | 0.125786 | 0.103774 | 0.110063 | 0.146226 | 0.146226 | 0.103774 | 0.103774 | 0.103774 | 0.103774 | 0 | 0.136033 | 0.334025 | 1,446 | 58 | 69 | 24.931034 | 0.524403 | 0.126556 | 0 | 0.15 | 0 | 0 | 0.26261 | 0.115292 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0.025 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a42cb12c2f9525fc1e01261772b1444db88cd2dc | 3,794 | py | Python | EnemyStats.py | rohwid/rpg-stats-generator | 5a1fcdf713ec5d0af3cbf6dce3bcb21e6e363519 | [
"MIT"
] | null | null | null | EnemyStats.py | rohwid/rpg-stats-generator | 5a1fcdf713ec5d0af3cbf6dce3bcb21e6e363519 | [
"MIT"
] | null | null | null | EnemyStats.py | rohwid/rpg-stats-generator | 5a1fcdf713ec5d0af3cbf6dce3bcb21e6e363519 | [
"MIT"
] | null | null | null | import datetime
"""
STATS GENERATOR FOR TURN BASED OR ACTION RPG (ROLE PLAYING GAMES)
By: ROHMAN WIDIYANTO
GitHub: http://github.com/rohwid/
All component or object defined separately, here's the reason:
- Levels: Because sometimes the characters won't start from 1st level.
- Magic Point: Because sometimes the games doesn't need it (ex: action RPG).
- Number of Weaknesses: Same reason with Magic Point.
- Generate data container: Generate data container dynamically.
Notes:
- Anything which contain "show" in the function was used for debug or
check the values.
"""
from EnemyDataContainer import Enemy
def all_enemies():
# Initialize with ENEMIES NUMBER and MAX LEVELS
numbers_enemy = 400
max_level = 80
enemies = Enemy(numbers_enemy, max_level)
"""
[RANGE ENEMIES NAME]
Set the "enemy_name" variable to string to automatically generate names
Example:
enemy_name = 'Enemy'
Set the "enemy_name" variable to list or array to manually generate name
Example:
enemy_name = ['alpha', 'beta', 'charlie', 'delta']
"""
enemy_name = 'Enemy'
enemies.range_enemy_name(enemy_name, 'Name', auto='yes')
# [RANGE ENEMIES LEVELS]
min_level = 1
levels_class = ['Easy', 'Medium', 'High']
# Show Graph and Debug
graph = True
debug = False
# Show Title
graph_title = 'Enemy Level Distribution'
title = True
enemies.range_levels(min_level, levels_class, 'Levels', debug, scale=len(levels_class))
enemies.show_range_levels(graph_title, graph, title, debug)
# [RANGE ENEMIES HP]
min_hp = 40
max_hp = 520
enemies.range_health_points(min_hp, max_hp, 'HP')
# [RANGE ENEMIES MP]
min_mp = 20
max_mp = 490
enemies.range_magic_points(min_mp, max_mp, 'MP')
# [RANGE ENEMIES TYPE]
enemy_type = ['Mixed', 'Hard Magic', 'Soft Magic', 'Hard Strength', 'Soft Strength']
# Show Graph and Debug
graph = True
debug = False
# Show Title
graph_title = 'Enemy Level Distribution'
title = True
# Distribution percentage (distribute_percent) example:
# distribute_percent = [40, 10, 20, 10, 20]
distribute_percent = [34, 13, 20, 13, 20]
enemies.range_enemy_type(enemy_type, distribute_percent, 'Type', debug)
enemies.show_range_enemy_type(graph_title, graph, title, debug)
"""
[RANGE ENEMIES WEAKNESSES]
CHARACTER ELEMENT DAMAGE IMPACT.
0: Normal damage.
1: Repel against (no damage).
2: The damage weaknesses.
"""
element_name = ['Phys', 'Water', 'Wind', 'Earth', 'Fire']
damage_name = ['Normal', 'Repel', 'Weak']
# Show Graph and Debug
graph = True
debug = False
# Show Title
graph_title = 'Enemy Element Distribution'
title = True
# Override this function when have different weaknesses concept!
enemies.range_element_weak(element_name, damage_name)
enemies.show_element_weak(graph_title, graph, title, debug)
# [RANGE ENEMIES STATS]
stats_name = ['Strength', 'Magic', 'Endurance', 'Speed', 'Luck']
basic_max_stats = [50, 60, 40, 55, 45]
basic_min_stats = [2, 2, 2, 2, 2]
# Show Graph and Debug
graph = True
debug = False
# Show Title
graph_title = 'Enemy Stats Distribution'
title = True
enemies.range_stats(stats_name, basic_min_stats, basic_max_stats, debug)
enemies.show_range_stats(graph_title, graph, title, debug)
# Parse All Data to The Tables
enemies.generate_enemy()
if __name__ == '__main__':
begin_time = datetime.datetime.now()
all_enemies()
print('\nTime to run this program: ', datetime.datetime.now() - begin_time)
| 28.961832 | 92 | 0.655245 | 485 | 3,794 | 4.950515 | 0.34433 | 0.049979 | 0.049979 | 0.028322 | 0.217826 | 0.188671 | 0.167847 | 0.121616 | 0.121616 | 0.121616 | 0 | 0.019271 | 0.24776 | 3,794 | 130 | 93 | 29.184615 | 0.822004 | 0.122562 | 0 | 0.269231 | 0 | 0 | 0.136089 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.038462 | 0 | 0.057692 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a42e5d68ea357f4b16c9eced0c8eecd76665797a | 590 | py | Python | experiments/spanmaskhistogram/report_writer.py | WojciechMula/parsing-int-series | f0a45c8b1251018e52dac9ebf1d98e8dfb705755 | [
"BSD-2-Clause"
] | 19 | 2018-04-20T06:51:42.000Z | 2022-02-24T02:12:00.000Z | experiments/spanmaskhistogram/report_writer.py | WojciechMula/parsing-int-series | f0a45c8b1251018e52dac9ebf1d98e8dfb705755 | [
"BSD-2-Clause"
] | 2 | 2018-04-20T09:53:37.000Z | 2018-04-27T19:01:16.000Z | experiments/spanmaskhistogram/report_writer.py | WojciechMula/parsing-int-series | f0a45c8b1251018e52dac9ebf1d98e8dfb705755 | [
"BSD-2-Clause"
] | 3 | 2019-02-25T19:26:51.000Z | 2020-11-04T00:50:42.000Z | class RestWriter(object):
def __init__(self, file, report):
self.file = file
self.report = report
def write(self, restsection):
assert len(restsection) >= 1
for title, table in self.report:
self.write_header(title, restsection[0], 80)
self.file.write('\n')
self.file.write(str(table))
def write_header(self, title, char, width = 80):
f = self.file
f.write('\n')
f.write('\n')
f.write("%s\n" % title)
f.write(char * max(len(title), width))
f.write('\n')
| 23.6 | 56 | 0.537288 | 76 | 590 | 4.092105 | 0.355263 | 0.128617 | 0.067524 | 0.051447 | 0.064309 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014851 | 0.315254 | 590 | 24 | 57 | 24.583333 | 0.754951 | 0 | 0 | 0.176471 | 0 | 0 | 0.020374 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.176471 | false | 0 | 0 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a42e667bd6ebb350e9c0d59466ded20c39b8c4ae | 9,005 | py | Python | pywgrib2_xr/dataset.py | yt87/pywgrib2_xr | 5c49eaaee12948ecc2f2aff526a9e51e6d4d98b5 | [
"0BSD"
] | 11 | 2021-01-05T03:26:51.000Z | 2022-02-15T02:44:39.000Z | pywgrib2_xr/dataset.py | yt87/pywgrib2_xr | 5c49eaaee12948ecc2f2aff526a9e51e6d4d98b5 | [
"0BSD"
] | 2 | 2020-12-18T02:35:08.000Z | 2021-07-11T13:01:53.000Z | pywgrib2_xr/dataset.py | yt87/pywgrib2_xr | 5c49eaaee12948ecc2f2aff526a9e51e6d4d98b5 | [
"0BSD"
] | null | null | null | from collections import defaultdict
from functools import partial
import logging
from typing import (
Any,
Callable,
DefaultDict,
Dict,
List,
NamedTuple,
Sequence,
Tuple,
Union,
cast,
)
try:
from numpy.typing import ArrayLike
except ImportError:
ArrayLike = Any
import numpy as np
from xarray.backends.locks import SerializableLock
from . import UNDEFINED, _Variable, WgribError
from .wgrib2 import MemoryBuffer, wgrib, free_files
from .inventory import MetaData
from .template import Template
logger = logging.getLogger(__name__)
# wgrib2 returns C float arrays
DTYPE = np.dtype("float32")
HeaderIndices = Tuple[int, ...]
FileIndex = DefaultDict[str, Dict[HeaderIndices, str]] # file -> Dict
FileIndices = DefaultDict[str, FileIndex] # variable name -> FileIndex
WGRIB2_LOCK = SerializableLock()
class Dataset(NamedTuple):
dims: Dict[str, int]
vars: Dict[str, _Variable]
attrs: Dict[str, Any]
# FIXME: might use https://github.com/roebel/py_find_1st
def find_1st(array, value):
return np.nonzero(array == value)[0][0]
def build_file_index(
items: Sequence[MetaData],
template: Template,
) -> FileIndices:
file_indices: FileIndices = defaultdict(cast(Callable, partial(defaultdict, dict)))
for item in (i for i in items if template.item_match(i)):
varname = template.item_to_varname(item)
try:
specs = template.var_specs[varname]
except KeyError:
logger.info("Variable {!s} not found in template, skipping".format(varname))
continue
time_coord = specs.time_coord
level_coord = specs.level_coord
fcst_time = item.end_ft - item.reftime
header_indices: Tuple[int, ...] = ()
found = True
if time_coord in specs.dims:
try:
i = find_1st(template.coords[time_coord].data, fcst_time)
header_indices = (i,)
except IndexError:
found = False
else:
if template.coords[time_coord].data != fcst_time:
found = False
if not found:
logger.info(
"Variable {:s} forecast time {!r} not found in template, "
"skipping".format(varname, fcst_time)
)
continue
if level_coord in specs.dims:
try:
i = find_1st(template.coords[level_coord].data, item.level_value)
header_indices += (i,)
except IndexError:
logger.info(
"Variable {:s} level {!r} not found in template, "
"skipping".format(varname, item.level_value)
)
continue
file_indices[varname][item.file][header_indices] = item.offset
return file_indices
def expand_item(item: Sequence[Any], shape: Tuple[int, ...]) -> Tuple[List[Any], ...]:
expanded_item = []
for i, size in zip(item, shape):
if isinstance(i, list):
expanded_item.append(i)
elif isinstance(i, np.ndarray):
expanded_item.append(i.tolist())
elif isinstance(i, slice):
expanded_item.append(list(range(i.start or 0, i.stop or size, i.step or 1)))
elif isinstance(i, int):
expanded_item.append([i])
else:
raise TypeError("Unsupported index type {!r}".format(type(i)))
return tuple(expanded_item)
class OnDiskArray:
def __init__(
self,
varname: str,
file_index: FileIndex,
shape: Sequence[int],
template: Template,
) -> None:
self.varname = varname
self.file_index = file_index
self.shape = tuple(shape)
self.geo_ndim = len(template.grid.dims)
self.npts = np.prod(shape[-self.geo_ndim :])
self.missing_value = UNDEFINED # wgrib2 missing value
self.dtype = DTYPE
def __getitem__(self, item: Tuple[Any, ...]) -> ArrayLike:
assert isinstance(item, tuple), "Item type must be tuple not {!r}".format(
type(item)
)
assert len(item) == len(self.shape), "Item len must be {!r} not {!r}".format(
len(self.shape), len(item)
)
header_item = expand_item(item[: -self.geo_ndim], self.shape)
array_field_shape = (
tuple(len(i) for i in header_item) + self.shape[-self.geo_ndim :]
)
array_field = np.full(array_field_shape, fill_value=np.nan, dtype=DTYPE)
datasize = self.npts * array_field.dtype.itemsize
for file, index in self.file_index.items():
# Faster, longer code
def _get_array_indexes():
for header_indices, offset in index.items():
try:
afi = [
it.index(ix) for it, ix in zip(header_item, header_indices)
]
yield afi, offset
except ValueError:
continue
try:
seq_of_array_field_indexes, offsets = zip(*_get_array_indexes())
except ValueError:
continue
inventory = MemoryBuffer()
inventory.set("\n".join(offsets))
output = MemoryBuffer()
args = [
file,
"-rewind_init",
file,
"-i_file",
inventory,
"-rewind_init",
inventory,
"-inv",
"/dev/null",
"-no_header",
"-bin",
output,
]
try:
wgrib(*args)
values = output.get("b")
except WgribError as e:
logger.error("wgrib2 error: {:s}".format(str(e)))
output.close()
continue
finally:
inventory.close()
output.close()
free_files(file)
for pos, array_field_indexes in zip(
range(0, len(values), datasize), seq_of_array_field_indexes
):
chunk = np.frombuffer(values[pos : pos + datasize], dtype=DTYPE)
array_field.__getitem__(tuple(array_field_indexes)).flat[:] = chunk
# Slow, shorter code
# for header_indices, offset in index.items():
# try:
# array_field_indexes = [
# it.index(ix) for it, ix in zip(header_item, header_indices)
# ]
# except ValueError:
# continue
# output = MemoryBuffer()
# args = [
# path,
# "-rewind_init",
# path,
# "-d",
# offset,
# "-inv",
# "/dev/null",
# "-no_header",
# "-bin",
# output,
# ]
# #print('=========== calling wgrib', path, header_indices, offset)
# try:
# wgrib(*args)
# values = output.get("a")
# array_field.__getitem__(tuple(array_field_indexes)).flat[:] = values
# except WgribError as e:
# logger.error("wgrib2 error: {!r}".format(e))
# output.close()
# continue
# finally:
# output.close()
# free_files(path)
array = array_field[(Ellipsis,) + item[-self.geo_ndim :]]
array[array == self.missing_value] = np.nan
for i, it in reversed(list(enumerate(item[: -self.geo_ndim]))):
if isinstance(it, int):
array = array[(slice(None, None, None),) * i + (0,)]
return array
def open_dataset(
items: Sequence[MetaData],
template: Template,
) -> Union[Dataset, None]:
dimensions: Dict[str, int] = {}
variables: Dict[str, _Variable] = {}
file_indices = build_file_index(items, template)
if not file_indices:
logger.warning("No matching data found")
return Dataset(dimensions, variables, {})
for name, file_index in file_indices.items():
var_specs = template.var_specs[name]
data = OnDiskArray(name, file_index, var_specs.shape, template)
variables[name] = _Variable(var_specs.dims, data, var_specs.attrs)
dimensions.update({k: v for k, v in zip(var_specs.dims, var_specs.shape)})
variables.update(template.coords)
variables["reftime"] = _Variable(
# reftime is the same for all items
(),
np.array(items[0].reftime),
{"standard_name": "reference_time"},
)
# Projection variable
variables[template.grid.cfname] = _Variable((), np.array(0), template.grid.attrs)
attrs = template.attrs.copy()
attrs["coordinates"] = " ".join(
tuple(template.coords.keys()) + ("reftime", template.grid.cfname)
)
return Dataset(dimensions, variables, attrs)
| 33.726592 | 88 | 0.549139 | 963 | 9,005 | 4.981308 | 0.228453 | 0.0271 | 0.013759 | 0.011882 | 0.193246 | 0.144882 | 0.133625 | 0.102564 | 0.035439 | 0.035439 | 0 | 0.003363 | 0.339589 | 9,005 | 266 | 89 | 33.853383 | 0.803262 | 0.113937 | 0 | 0.17734 | 0 | 0 | 0.052293 | 0 | 0 | 0 | 0 | 0.003759 | 0.009852 | 1 | 0.034483 | false | 0 | 0.059113 | 0.004926 | 0.147783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a42fe444580c5e09c79668e45d2c77ef6d352594 | 1,473 | py | Python | stog/simple_predict.py | mg9/stog | 49d0d1ebc1ae666f79e43466fbdc33b1b12f1adf | [
"MIT"
] | null | null | null | stog/simple_predict.py | mg9/stog | 49d0d1ebc1ae666f79e43466fbdc33b1b12f1adf | [
"MIT"
] | null | null | null | stog/simple_predict.py | mg9/stog | 49d0d1ebc1ae666f79e43466fbdc33b1b12f1adf | [
"MIT"
] | null | null | null | import h5py,os
from transformers import T5Tokenizer, T5Model, T5ForConditionalGeneration
if __name__ == "__main__":
snt_0 = "amrgraphize: establish model in Industrial Innovation </s>"
snt_1 = "amrgraphize: raise standard to in excess of CITY_1 's 1 magnitude could leave authority with some breathing space for explanation , and alleviate public anger . </s>"
snt_2 = "amrgraphize: 1 . from among they , pick-out 10 for submission to a assessment committee to assess . </s>"
amr_1 = "possible and leave-13 raise standard in-excess-of seismic-quantity 1 earthquake CITY_1 have authority space breathe explain authority some alleviate raise anger public"
amr_2 = "pick-out 1 thing 10 submit committee assess assess committee thing they"
## Load finetuned t5 model
finetuned_t5 = "../t5-small-amrtrained_4"
t5 = T5ForConditionalGeneration.from_pretrained(finetuned_t5)
## Load t5 tokenizer
t5_tokenizer = T5Tokenizer.from_pretrained("../t5-vocab")
snt = snt_2
amr = amr_2
input_ids = t5_tokenizer.encode(snt, return_tensors="pt")
outputs = t5.generate(input_ids=input_ids, max_length=1000)
pred = [
t5_tokenizer.decode(
output#, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
for output in outputs
]
print("snt: ", snt)
print("amr: ", amr)
print("pred: ", pred)
print("outputs: ", outputs)
#print("t5: ", t5.config)
| 32.021739 | 181 | 0.699932 | 195 | 1,473 | 5.107692 | 0.492308 | 0.044177 | 0.02008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036942 | 0.209776 | 1,473 | 45 | 182 | 32.733333 | 0.818729 | 0.086897 | 0 | 0 | 0 | 0.12 | 0.477085 | 0.018032 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a431ecb8dce6d4bde6d87983adda3ad4f20a538e | 23,627 | py | Python | Experiments/evaluate_CAPe.py | Lorenzo-Perini/Active_PU_Learning | 83b608993586420bb84d1b4e6fc6c7cb561a382f | [
"Apache-2.0"
] | 10 | 2020-07-22T09:16:55.000Z | 2022-01-16T12:23:44.000Z | Experiments/evaluate_CAPe.py | Lorenzo-Perini/Active_PU_Learning | 83b608993586420bb84d1b4e6fc6c7cb561a382f | [
"Apache-2.0"
] | null | null | null | Experiments/evaluate_CAPe.py | Lorenzo-Perini/Active_PU_Learning | 83b608993586420bb84d1b4e6fc6c7cb561a382f | [
"Apache-2.0"
] | 1 | 2021-06-28T06:37:54.000Z | 2021-06-28T06:37:54.000Z | import numpy as np
import pandas as pd
from multiprocessing import Pool, freeze_support, cpu_count
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import StratifiedKFold
from anomatools.models import SSDO
from sklearn.ensemble import IsolationForest
import collections, functools
from anomatools.models import SSDO
import pyximport #import cython such that CAPe can work!
pyximport.install()
from CAPe import * #this imports CAPe functions
from TIcE import * #this imports TIcE functions
from Kernel_MPE_grad_threshold import * #this imports km1 and km2 functions
def get_f1scores_wdiff_priors(data, y, real_cont, tmp_cont = 0.1, k = 5, ntimes = 10, name_ds = '', case = 2,
n_splits = 5, n_iter = 3, n_jobs = cpu_count()):
""" This function gets the F1 scores of SSDO when using different contamination factors according to all the methods used in
the paper (CAPe). This function builds a dataframe with the final results and saves it on a csv file (remark: change the
path at the bottom or remove the line). The 5 methods for estimating the class prior in a PU dataset compared are: CAPe,
TIcE, km1, km2 and the naive baseline with the real contamination factor.
Parameters
----------
data : np.array of shape (n_samples, n_features). It is the entire dataset.
y : np.array of shape (n_samples,) containing all the labels.
real_cont : float regarding the REAL expected percentage of anomalies in the training set.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
k : int regarding the number of new labels required. Default = 5.
ntimes : int regarding the number of iterations for getting new k labels. Default = 10 (at least 50 examples).
name_ds : string containing the name of the dataset to output a meaningful csv file. Default empty string.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
n_splits : int regarding the number of splits inside the crossvalidation. Default = 5.
n_iter : int regarding the number of iterations of the whole method. Default = 3.
n_jobs : int regarding the number of jobs to run in parallel. Default = maximum number of jobs.
Returns
----------
F1_results : dataframe containing the F1 results. The first columns is about the labels acquired (k) while the others
contain the F1 score for each method and each number of labels.
prior_results : dataframe containing the prior results. The first columns is about the labels acquired (k) while the
others contain the prior estimates for each method and each number of labels.
"""
F1_results = pd.DataFrame(data = k*np.arange(1, ntimes+1,1), columns = ['k'])
F1_results['F1_CAPe'] = np.zeros(ntimes, dtype = float)
F1_results['F1_TIcE'] = np.zeros(ntimes, dtype = float)
F1_results['F1_km1'] = np.zeros(ntimes, dtype = float)
F1_results['F1_km2'] = np.zeros(ntimes, dtype = float)
F1_results['F1_real'] = np.zeros(ntimes, dtype = float)
CAPe_prior = np.zeros(ntimes, dtype = float)
TIcE_prior = np.zeros(ntimes, dtype = float)
km1_prior = np.zeros(ntimes, dtype = float)
km2_prior = np.zeros(ntimes, dtype = float)
real_prior = np.zeros(ntimes, dtype = float)
for num in np.arange(1,n_iter+1):
skf = StratifiedKFold(n_splits=n_splits, random_state=331, shuffle=True)
F1_CAPe = []
F1_TIcE = []
F1_km1 = []
F1_km2 = []
F1_real = []
for train_index, test_index in skf.split(data, y):
X_train, X_test = data[train_index], data[test_index]
y_train, y_test = y[train_index], y[test_index]
real_anomalies = np.where(y_train == 1)[0]
f1cape, prior_cape = evaluate_CAPe(X_train, X_test, y_test, real_anomalies, k, ntimes, tmp_cont, case)
F1_CAPe.append(f1cape)
CAPe_prior += prior_cape
f1tice, prior_tice = evaluate_TIcE(X_train, X_test, y_test, real_anomalies, k, ntimes, tmp_cont, case)
F1_TIcE.append(f1tice)
TIcE_prior += prior_tice
f1km1, f1km2, prior_km1, prior_km2 = evaluate_km1km2(X_train, X_test, y_test, real_anomalies, k, ntimes,
tmp_cont, case)
F1_km1.append(f1km1)
F1_km2.append(f1km2)
km1_prior += prior_km1
km2_prior += prior_km2
f1real, prior_real = evaluate_realF1(X_train, X_test, y_test, real_cont, real_anomalies, k, ntimes, case)
F1_real.append(f1real)
real_prior += prior_real
print('Done crossval for iter num:', num)
FinalF1CAPe = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_CAPe, collections.Counter()))
FinalF1TIcE = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_TIcE, collections.Counter()))
FinalF1km1 = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_km1, collections.Counter()))
FinalF1km2 = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_km2, collections.Counter()))
FinalF1real = dict(functools.reduce(lambda x, y: x.update(y) or x, F1_real, collections.Counter()))
for j in range(ntimes):
tnfpfntp = FinalF1CAPe[int(k*(j+1))]
FinalF1CAPe[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
tnfpfntp = FinalF1TIcE[int(k*(j+1))]
FinalF1TIcE[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
tnfpfntp = FinalF1km1[int(k*(j+1))]
FinalF1km1[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
tnfpfntp = FinalF1km2[int(k*(j+1))]
FinalF1km2[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
tnfpfntp = FinalF1real[int(k*(j+1))]
FinalF1real[int(k*(j+1))] = (2 * tnfpfntp[3]) / (2 * tnfpfntp[3] + tnfpfntp[1] + tnfpfntp[2])
F1_results['F1_CAPe'] += list(FinalF1CAPe.values())
F1_results['F1_TIcE'] += list(FinalF1TIcE.values())
F1_results['F1_km1'] += list(FinalF1km1.values())
F1_results['F1_km2'] += list(FinalF1km2.values())
F1_results['F1_real'] += list(FinalF1real.values())
print('Finished Iteration number', num,'out of', n_iter)
prior_results = pd.DataFrame(data = k*np.arange(1, ntimes+1,1), columns = ['k'])
prior_results['CAPe_prior'] = CAPe_prior/(ntimes*n_splits)
prior_results['TIcE_prior'] = TIcE_prior/(ntimes*n_splits)
prior_results['km1_prior'] = km1_prior/(ntimes*n_splits)
prior_results['km2_prior'] = km2_prior/(ntimes*n_splits)
prior_results['real_prior'] = real_prior/(ntimes*n_splits)
F1_columns = ['F1_CAPe', 'F1_TIcE', 'F1_km1', 'F1_km2', 'F1_real']
F1_results[F1_columns] = F1_results[F1_columns]/n_iter
F1_results.to_csv('F1score_case_'+str(case)+name_ds+'.csv')
prior_results.to_csv('prior_case_'+str(case)+name_ds+'.csv')
return F1_results, prior_results
def evaluate_CAPe(X_train, X_test, y_test, real_anomalies = [], k = 5, ntimes = 10, tmp_cont = 0.1,
case = 0, n_jobs = cpu_count()):
""" Evaluating CAPe as provided in the paper. This function 1) gets CAPe's estimation of the class prior and 2) saves the
confusion matrix's cell values (tn, fp, fn, tp) in order to compute after all the interations a unique F1 score.
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_test : np.array of shape (m_samples,). It contains the test labels.
real_anomalies : list of shape (n_samples,) containing the index of the real training anomalies. Only needed if case=2.
k : int regarding the number of new labels required.
ntimes : int regarding the number of iterations for getting new k labels.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
n_jobs : int regarding the number of jobs to run in parallel. Default = maximum number of jobs.
Returns
----------
F1_CAPe : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the estimate of the prior in such case.
class_priors : array of shape (ntimes,) containing every k new labels the estimate of the class prior.
"""
n = np.shape(X_train)[0]
tmp_cont = 0.1
query_list = []
labeled_ex = np.zeros(n, dtype=np.int)
ker = KernelDensity().fit(X_train)
dmu = [np.exp(ker.score(X_train[i:i+1])) for i in range(n)]
mean_prob_term = math.log(np.mean(dmu),10) #Take the log density
F1_CAPe = {}
class_priors = np.zeros(ntimes, dtype = float)
for j in range(ntimes):
prior, labeled_ex, query_list = CAPe(X_train, labeled_ex, query_list, k, real_anomalies, tmp_cont, mean_prob_term,
case, n_jobs)
class_priors[j] = prior
tmp_cont = 1 - min(prior,0.9999) #update the contamination factor
F1_CAPe[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex, X_test, y_test, tmp_cont) #compute the performance of the
#classifier
return F1_CAPe, class_priors
def evaluate_TIcE(X_train, X_test, y_test, real_anomalies = [], k = 5, ntimes = 10, tmp_cont = 0.1, case = 0):
""" This function for evaluating TIcE does 1) query examples until k new labels are acquired, 2) get
TIcE's estimation of the class prior and 3) save the confusion matrix's cell values (tn, fp, fn, tp) in order to
compute after all the interations a unique F1 score.
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_test : np.array of shape (m_samples,). It contains the test labels.
real_anomalies : list of shape (n_samples,) containing the index of the real training anomalies. Only needed if case=2.
k : int regarding the number of new labels required.
ntimes : int regarding the number of iterations for getting new k labels.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
Returns
----------
F1_TIcE : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the estimate of the prior in such case.
class_priors : array of shape (ntimes,) containing every k new labels the estimate of the class prior.
"""
n = np.shape(X_train)[0]
tmp_cont = 0.1
query_list = []
labeled_ex = np.zeros(n, dtype=np.int)
F1_TIcE = {}
class_priors = np.zeros(ntimes, dtype = float)
scaler = MinMaxScaler()
for j in range(ntimes):
labeled_ex, query_list = query_at_least_k_points(X_train, labeled_ex, real_anomalies, query_list, k, tmp_cont,\
case)
_, prior = run_from_elsewhere(data = scaler.fit_transform(X_train), labels = labeled_ex) #run TIcE algo and find c
class_priors[j] = prior
tmp_cont = 1 - min(prior, 0.9999)
F1_TIcE[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex, X_test, y_test, tmp_cont) #compute the performance of the
return F1_TIcE, class_priors
def evaluate_km1km2(X_train, X_test, y_test, real_anomalies = [], k = 5, ntimes = 10, tmp_cont = 0.1, case = 0):
""" This function for evaluating km1 and km2 does 1) query examples until k new labels are acquired, 2) get
km1's (km2's) estimation of the class prior and 3) save the confusion matrix's cell values (tn, fp, fn, tp) in order to
compute after all the interations a unique F1 score. It repeats the whole process twice, once for km1 and once for km2.
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_test : np.array of shape (m_samples,). It contains the test labels.
real_anomalies : list of shape (n_samples,) containing the index of the real training anomalies. Only needed if case=2.
k : int regarding the number of new labels required.
ntimes : int regarding the number of iterations for getting new k labels.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
Returns
----------
F1_km1 : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the estimate of the prior in such case (km1).
F1_km2 : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the estimate of the prior in such case (km2).
class_priors_km1: array of shape (ntimes,) containing every k new labels the estimate of the class prior for km1.
class_priors_km2: array of shape (ntimes,) containing every k new labels the estimate of the class prior for km2.
"""
n = np.shape(X_train)[0]
query_list_km1 = []
query_list_km2 = []
labeled_ex_km1 = np.zeros(n, dtype=np.int)
labeled_ex_km2 = np.zeros(n, dtype=np.int)
F1_km1 = {}
F1_km2 = {}
class_priors_km1 = np.zeros(ntimes, dtype = float)
class_priors_km2 = np.zeros(ntimes, dtype = float)
km1_tmp_cont = 0.1
km2_tmp_cont = 0.1
km1_query_list = []
km2_query_list = []
for j in range(ntimes):
labeled_ex_km1, query_list_km1 = query_at_least_k_points(X_train, labeled_ex_km1, real_anomalies, query_list_km1, k,\
tmp_cont, case)
X_component = np.where(labeled_ex_km1 == 1)[0]
X_component = X_train[X_component]
X_mixture = np.where(labeled_ex_km1 == 0)[0]
X_mixture = X_train[X_mixture]
prior_km1,_ = wrapper(X_mixture, X_component)
class_priors_km1[j] = prior_km1
km1_tmp_cont = 1-prior_km1
F1_km1[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex_km1, X_test, y_test, km1_tmp_cont)
#---------------------------------------------------------------------------------------------------
labeled_ex_km2, query_list_km2 = query_at_least_k_points(X_train, labeled_ex_km2, real_anomalies, query_list_km2, k,\
tmp_cont, case)
X_component = np.where(labeled_ex_km2 == 1)[0]
X_component = X_train[X_component]
X_mixture = np.where(labeled_ex_km2 == 0)[0]
X_mixture = X_train[X_mixture]
_, prior_km2 = wrapper(X_mixture, X_component)
class_priors_km2[j] = prior_km2
km1_tmp_cont = 1-prior_km2
F1_km2[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex_km2, X_test, y_test, km2_tmp_cont)
return F1_km1, F1_km2, class_priors_km1, class_priors_km2
def evaluate_realF1(X_train, X_test, y_test, real_cont, real_anomalies = [], k = 5, ntimes = 10, case = 0):
""" This function for evaluating the performance of SSDO with real contamination factor does 1) query examples until k new
labels are acquired and 2) save the confusion matrix's cell values (tn, fp, fn, tp) in order to compute after all the
interations a unique F1 score. It never updates the contamination factor, as it is the true one.
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_test : np.array of shape (m_samples,). It contains the test labels.
real_cont : float regarding the REAL expected percentage of anomalies in the training set.
real_anomalies : list of shape (n_samples,) containing the index of the real training anomalies. Only needed if case=2.
k : int regarding the number of new labels required.
ntimes : int regarding the number of iterations for getting new k labels.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
Returns
----------
F1_real : dict containing for each key multiple of k (k, 2*k, 3*k,...,ntimes*k) the array [tn,fp,fn,tp] obtained
with the real prior.
class_priors : array of shape (ntimes,) containing every k new labels the real class prior.
"""
n = np.shape(X_train)[0]
query_list = []
labeled_ex = np.zeros(n, dtype=np.int)
F1_real = {}
class_priors = np.zeros(ntimes, dtype = float)
for j in range(ntimes):
labeled_ex, query_list = query_at_least_k_points(X_train, labeled_ex, real_anomalies, query_list, k, real_cont, case)
class_priors[j] = 1 - real_cont
F1_real[int(k*(j+1))] = get_tnfpfntp(X_train, labeled_ex, X_test, y_test, real_cont)
return F1_real, class_priors
def get_tnfpfntp(X_train, y_train, X_test, y_test, contamination):
""" This function for evaluating the performance of SSDO gets as input the contamination factor (in addition to training and
test sets) in order to train the classifier with all the information available at the considered step (labels and prior
estimate) and then to compute the confusion matrix's cells values (tn fp fn tp).
Parameters
----------
X_train : np.array of shape (n_samples, n_features). It is the training set.
X_test : np.array of shape (m_samples, n_features). It is the test set.
y_train : np.array of shape (n_samples,). It contains the training labels (at the considered step).
y_test : np.array of shape (m_samples,). It contains the test labels.
contamination : float regarding the REAL expected percentage of anomalies in the training set.
Returns
----------
a numpy array containing [tn,fp,fn,tp] computed for SSDO with y_train labels and contamination equal 1-prior.
"""
prior_detector = IsolationForest(contamination = contamination, behaviour='new').fit(X_train)
train_prior = prior_detector.decision_function(X_train) * -1
train_prior = train_prior + abs(min(train_prior))
detector = SSDO(k=3, alpha=2.3, unsupervised_prior='other', contamination = contamination)
detector.fit(X_train, y_train, prior = train_prior)
prediction = detector.predict(X_test, prior = train_prior)
y_pred = [1 if prediction[i] == 1 else 0 for i in range(len(prediction))]
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
return np.array([tn, fp, fn, tp], dtype = int)
def query_at_least_k_points(data, labeled_ex, real_anomalies, query_list, k, tmp_cont, case):
""" This function queries at least k examples in order to acquire new k labels. The new labels depend on the case and on the
user's uncertainty. This function does 1) compute the user's uncertainty with respect to the case, 2) as long as k new
labels are not acquired, train SSDO and query the most informative example to get labeled by the oracle (PO or IO).
Parameters
----------
data : np.array of shape (n_samples, n_features). It is the data set.
labeled_ex : list of shape (n_samples,) assuming 1 if the example is labeled, 0 otherwise.
real_anomalies : list of shape (n_samples,) containing the index of the real anomalies. Only needed if case=2.
query_list : list of shape (n_samples,) assuming 1 if the example has been queried, 0 otherwise.
k : int regarding the number of new labels required.
tmp_cont : float regarding the starting expected percentage of anomalies in the training set. Default=0.1.
case : int getting 0 when the user's uncertainty is not present (PO), 2 when dealing with it (IO). Default=0.
Returns
----------
labeled_ex : list of shape (n_samples,) containing both the already labeled examples and the new ones.
query_list : list of shape (n_samples,) containing both the already queried examples and the new ones.
"""
n = np.shape(data)[0]
user_uncertainty = compute_user_uncertainty(data, real_anomalies, tmp_cont, case)
prior_detector = IsolationForest(contamination = tmp_cont, behaviour='new', random_state = 331).fit(data)
train_prior = prior_detector.decision_function(data) * -1
train_prior = train_prior + abs(min(train_prior))
detector = SSDO(k=3, alpha=2.3, unsupervised_prior='other', contamination = tmp_cont)
while int(sum(labeled_ex)) < k and len(query_list) < n:
detector.fit(data, np.negative(labeled_ex), prior = train_prior)
score = detector.predict_proba(data, prior = train_prior, method='squash')[:, 0]
score = [abs(x-0.5) for x in score]
#Sort the data according to their uncertainty score
index = sorted([[x,i] for i,x in enumerate(score) if i not in query_list], reverse = False)
idx_query_point = index[0][1] #choose the first most uncertain example and query it
query_list.append(idx_query_point)
uncertainty_score = user_uncertainty[idx_query_point]
reply = np.random.binomial(1, uncertainty_score)
if reply:
labeled_ex[idx_query_point] = +1 #if the user says that it's normal, than update the label
#otherwise put it in the ranking but the model will be trained on the same dataset
return labeled_ex, query_list
| 54.946512 | 129 | 0.629365 | 3,409 | 23,627 | 4.198005 | 0.105603 | 0.016351 | 0.020124 | 0.018587 | 0.632031 | 0.602124 | 0.562784 | 0.54252 | 0.518552 | 0.498009 | 0 | 0.02498 | 0.274813 | 23,627 | 429 | 130 | 55.074592 | 0.81026 | 0.450629 | 0 | 0.175879 | 0 | 0 | 0.022092 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035176 | false | 0 | 0.080402 | 0 | 0.150754 | 0.01005 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4331a72af0d0109165afb03b5f32236c6ec4e77 | 28,470 | py | Python | __main__.py | wbrxcorp/genpack | 12cbbbf8306cb825c65d76ef55e85d24f1db0f90 | [
"MIT"
] | null | null | null | __main__.py | wbrxcorp/genpack | 12cbbbf8306cb825c65d76ef55e85d24f1db0f90 | [
"MIT"
] | null | null | null | __main__.py | wbrxcorp/genpack | 12cbbbf8306cb825c65d76ef55e85d24f1db0f90 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Copyright (c) 2021 Walbrix Corporation
# https://github.com/wbrxcorp/genpack/blob/main/LICENSE
import os,re,argparse,subprocess,glob,json,uuid
import importlib.resources
import urllib.request
import initlib,init,util
import qemu
from sudo import sudo,Tee
BASE_URL="http://ftp.iij.ad.jp/pub/linux/gentoo/"
CONTAINER_NAME="genpack-%d" % os.getpid()
def decode_utf8(bin):
return bin.decode("utf-8")
def encode_utf8(str):
return str.encode("utf-8")
def url_readlines(url):
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as res:
return map(decode_utf8, res.readlines())
def get_latest_stage3_tarball_url(base,arch):
if not base.endswith('/'): base += '/'
_arch = arch
if _arch == "x86_64": _arch = "amd64"
elif _arch == "aarch64": _arch = "arm64"
for line in url_readlines(base + "releases/" + _arch + "/autobuilds/latest-stage3-" + _arch + "-systemd.txt"):
line = re.sub(r'#.*$', "", line.strip())
if line == "": continue
#else
splitted = line.split(" ")
if len(splitted) < 2: continue
#else
return base + "releases/" + _arch + "/autobuilds/" + splitted[0]
return None # not found
def get_content_length(url):
req = urllib.request.Request(url, method="HEAD")
with urllib.request.urlopen(req) as res:
headers = res.info()
if "Content-Length" in headers:
return int(headers["Content-Length"])
#else
return None
def lower_exec(lower_dir, cache_dir, portage_dir, cmdline, nspawn_opts=[]):
subprocess.check_call(sudo(
["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", lower_dir,
"--bind=%s:/var/cache" % os.path.abspath(cache_dir),
"--capability=CAP_MKNOD,CAP_SYS_ADMIN",
"--bind-ro=%s:/var/db/repos/gentoo" % os.path.abspath(portage_dir) ]
+ nspawn_opts + cmdline)
)
def scan_files(dir):
files_found = []
newest_file = 0
for root,dirs,files in os.walk(dir, followlinks=True):
if len(files) == 0: continue
for f in files:
mtime = os.stat(os.path.join(root,f)).st_mtime
if mtime > newest_file: newest_file = mtime
files_found.append(os.path.join(root[len(dir) + 1:], f))
return (files_found, newest_file)
def link_files(srcdir, dstdir):
files_to_link, newest_file = scan_files(srcdir)
for f in files_to_link:
src = os.path.join(srcdir, f)
dst = os.path.join(dstdir, f)
os.makedirs(os.path.dirname(dst), exist_ok=True)
if os.path.isfile(dst): os.unlink(dst)
os.link(src, dst)
return newest_file
def sync_files(srcdir, dstdir, exclude=None):
files_to_sync, newest_file = scan_files(srcdir)
for f in files_to_sync:
if exclude is not None and re.match(exclude, f): continue
src = os.path.join(srcdir, f)
dst = os.path.join(dstdir, f)
subprocess.check_call(sudo(["rsync", "-k", "-R", "--chown=root:root", os.path.join(srcdir, ".", f), dstdir]))
return newest_file
def get_newest_mtime(srcdir):
return scan_files(srcdir)[1]
def put_resource_file(gentoo_dir, module, filename, dst_filename=None, make_executable=False):
dst_path = os.path.join(gentoo_dir, dst_filename if dst_filename is not None else filename)
with Tee(dst_path) as f:
f.write(importlib.resources.read_binary(module, filename))
if make_executable: subprocess.check_output(sudo(["chmod", "+x", dst_path]))
def load_json_file(path):
if not os.path.isfile(path): return None
#else
with open(path) as f:
return json.load(f)
def set_gitignore(workdir):
work_gitignore = os.path.join(workdir, ".gitignore")
if not os.path.exists(work_gitignore):
with open(work_gitignore, "w") as f:
f.write("/*")
def extract_portage(base, workdir):
portage_tarball_url = base + "snapshots/portage-latest.tar.xz"
portage_tarball = os.path.join(workdir, "portage.tar.xz")
portage_dir = os.path.join(workdir, "portage")
trash_dir = os.path.join(workdir, "trash")
done_file = os.path.join(portage_dir, ".done")
os.makedirs(workdir, exist_ok=True)
set_gitignore(workdir)
if not os.path.isfile(portage_tarball) or os.path.getsize(portage_tarball) != get_content_length(portage_tarball_url):
subprocess.check_call(["wget", "-O", portage_tarball, portage_tarball_url])
if os.path.exists(done_file): os.remove(done_file)
if os.path.isdir(portage_dir) and not os.path.exists(done_file):
os.makedirs(trash_dir, exist_ok=True)
os.rename(portage_dir, os.path.join(trash_dir, str(uuid.uuid4())))
if not os.path.isdir(portage_dir):
os.makedirs(portage_dir, exist_ok=True)
print("Extracting portage...")
subprocess.check_call(sudo(["tar", "xpf", portage_tarball, "--strip-components=1", "-C", portage_dir]))
with open(done_file, "w") as f:
pass
def main(base, workdir, arch, sync, bash, artifact, outfile=None, profile=None):
artifact_dir = os.path.join(".", "artifacts", artifact)
build_json = load_json_file(os.path.join(artifact_dir, "build.json"))
if profile is None:
profile = "default"
if build_json and "profile" in build_json: profile = build_json["profile"]
stage3_tarball_url = get_latest_stage3_tarball_url(base,arch)
arch_workdir = os.path.join(workdir, arch)
os.makedirs(arch_workdir, exist_ok=True)
set_gitignore(workdir)
stage3_tarball = os.path.join(arch_workdir, "stage3.tar.xz")
portage_dir = os.path.join(workdir, "portage")
profile_workdir = os.path.join(arch_workdir, "profiles", profile)
cache_dir = os.path.join(profile_workdir, "cache")
gentoo_dir = os.path.join(profile_workdir, "root")
repos_dir = os.path.join(gentoo_dir, "var/db/repos/gentoo")
usr_local_dir = os.path.join(gentoo_dir, "usr/local")
trash_dir = os.path.join(workdir, "trash")
if not os.path.isfile(stage3_tarball) or os.path.getsize(stage3_tarball) != get_content_length(stage3_tarball_url):
subprocess.check_call(["wget", "-O", stage3_tarball, stage3_tarball_url])
stage3_done_file = os.path.join(gentoo_dir, ".stage3-done")
stage3_done_file_time = os.stat(stage3_done_file).st_mtime if os.path.isfile(stage3_done_file) else None
if not stage3_done_file_time or stage3_done_file_time < os.stat(stage3_tarball).st_mtime:
if os.path.isdir(gentoo_dir):
os.makedirs(trash_dir, exist_ok=True)
os.rename(gentoo_dir, os.path.join(trash_dir, str(uuid.uuid4())))
os.makedirs(repos_dir, exist_ok=True)
print("Extracting stage3...")
subprocess.check_call(sudo(["tar", "xpf", stage3_tarball, "--strip-components=1", "-C", gentoo_dir]))
kernel_config_dir = os.path.join(gentoo_dir, "etc/kernels")
subprocess.check_call(sudo(["mkdir", "-p", kernel_config_dir]))
subprocess.check_call(sudo(["chmod", "-R", "o+rw",
os.path.join(gentoo_dir, "etc/portage"), os.path.join(gentoo_dir, "usr/src"),
os.path.join(gentoo_dir, "var/db/repos"), os.path.join(gentoo_dir, "var/cache"),
kernel_config_dir, usr_local_dir]))
with open(os.path.join(gentoo_dir, "etc/portage/make.conf"), "a") as f:
f.write('FEATURES="-sandbox -usersandbox -network-sandbox"\n')
with open(stage3_done_file, "w") as f:
pass
newest_file = link_files(os.path.join(".", "profiles", profile), gentoo_dir)
# remove irrelevant arch dependent settings
for i in glob.glob(os.path.join(gentoo_dir, "etc/portage/package.*/arch-*")):
if not i.endswith("-" + arch): os.unlink(i)
# move files under /var/cache
os.makedirs(cache_dir, exist_ok=True)
subprocess.check_call(sudo(["rsync", "-a", "--remove-source-files", os.path.join(gentoo_dir,"var/cache/"), cache_dir]))
put_resource_file(gentoo_dir, initlib, "initlib.cpp")
put_resource_file(gentoo_dir, initlib, "initlib.h")
put_resource_file(gentoo_dir, initlib, "fat.cpp")
put_resource_file(gentoo_dir, initlib, "fat.h")
put_resource_file(gentoo_dir, init, "init.cpp")
put_resource_file(gentoo_dir, init, "init.h")
put_resource_file(gentoo_dir, util, "build-kernel.py", "usr/local/sbin/build-kernel", True)
put_resource_file(gentoo_dir, util, "with-mysql.py", "usr/local/sbin/with-mysql", True)
put_resource_file(gentoo_dir, util, "download.py", "usr/local/bin/download", True)
put_resource_file(gentoo_dir, util, "install-system-image", "usr/sbin/install-system-image", True)
put_resource_file(gentoo_dir, util, "expand-rw-layer", "usr/sbin/expand-rw-layer", True)
put_resource_file(gentoo_dir, util, "do-with-lvm-snapshot", "usr/sbin/do-with-lvm-snapshot", True)
put_resource_file(gentoo_dir, util, "rpmbootstrap.py", "usr/sbin/rpmbootstrap", True)
put_resource_file(gentoo_dir, util, "genbootstrap.py", "usr/sbin/genbootstrap", True)
put_resource_file(gentoo_dir, util, "genpack-install.cpp", "usr/src/genpack-install.cpp", True)
if sync: lower_exec(gentoo_dir, cache_dir, portage_dir, ["emerge", "--sync"])
if bash:
print("Entering shell... 'exit 1' to abort the process.")
lower_exec(gentoo_dir, cache_dir, portage_dir, ["bash"])
done_file = os.path.join(gentoo_dir, ".done")
done_file_time = os.stat(done_file).st_mtime if os.path.isfile(done_file) else None
portage_time = os.stat(os.path.join(portage_dir, "metadata/timestamp")).st_mtime
newest_file = max(newest_file, portage_time)
if (not done_file_time or newest_file > done_file_time or sync or artifact == "none"):
lower_exec(gentoo_dir, cache_dir, portage_dir, ["emerge", "-uDN", "-bk", "--binpkg-respect-use=y",
"system", "nano", "gentoolkit", "repoman",
"strace", "vim", "tcpdump", "netkit-telnetd"])
if os.path.isfile(os.path.join(gentoo_dir, "build.sh")):
lower_exec(gentoo_dir, cache_dir, portage_dir, ["/build.sh"])
lower_exec(gentoo_dir, cache_dir, portage_dir, ["sh", "-c", "emerge -bk --binpkg-respect-use=y @preserved-rebuild && emerge --depclean && etc-update --automode -5 && eclean-dist -d && eclean-pkg -d"])
with open(done_file, "w") as f:
pass
if artifact == "none": return None # no build artifact
elif artifact == "bash":
lower_exec(gentoo_dir, cache_dir, portage_dir, ["bash"])
return None
#else
##### building profile done
##### build artifact if necessary
upper_dir = os.path.join(arch_workdir, "artifacts", artifact)
genpack_packages_file = os.path.join(upper_dir, ".genpack", "packages") # use its timestamp as build date
if not os.path.exists(genpack_packages_file) or os.stat(genpack_packages_file).st_mtime < max(os.stat(done_file).st_mtime, get_newest_mtime(artifact_dir), get_newest_mtime(os.path.join(".", "packages"))):
if os.path.isdir(upper_dir):
os.makedirs(trash_dir, exist_ok=True)
subprocess.check_call(sudo(["mv", upper_dir, os.path.join(trash_dir, str(uuid.uuid4()))]))
build_artifact(profile, artifact, gentoo_dir, cache_dir, upper_dir, build_json)
# final output
if outfile is None:
if build_json and "outfile" in build_json: outfile = build_json["outfile"]
else: outfile = "%s-%s.squashfs" % (artifact, arch)
if outfile == "-":
subprocess.check_call(sudo(["systemd-nspawn", "-M", CONTAINER_NAME, "-q", "-D", upper_dir, "--network-veth", "-b"]))
return None
#else
if not os.path.isfile(outfile) or os.stat(genpack_packages_file).st_mtime > os.stat(outfile).st_mtime:
compression = build_json["compression"] if build_json and "compression" in build_json else "gzip"
pack(upper_dir, outfile, compression)
return outfile
def build_artifact(profile, artifact, gentoo_dir, cache_dir, upper_dir, build_json):
artifact_pkgs = ["gentoo-systemd-integration", "util-linux","timezone-data","bash","openssh", "coreutils", "procps", "net-tools",
"iproute2", "iputils", "dbus", "python", "rsync", "tcpdump", "ca-certificates","e2fsprogs"]
if build_json and "packages" in build_json:
if not isinstance(build_json["packages"], list): raise Exception("packages must be list")
#else
artifact_pkgs += build_json["packages"]
pkg_map = collect_packages(gentoo_dir)
pkgs = scan_pkg_dep(gentoo_dir, pkg_map, artifact_pkgs)
packages_dir = os.path.join(".", "packages")
files = process_pkgs(gentoo_dir, packages_dir, pkgs)
if os.path.isfile(os.path.join(gentoo_dir, "boot/kernel")): files.append("/boot/kernel")
if os.path.isfile(os.path.join(gentoo_dir, "boot/initramfs")): files.append("/boot/initramfs")
if os.path.isdir(os.path.join(gentoo_dir, "lib/modules")): files.append("/lib/modules/.")
files += ["/dev/.", "/proc", "/sys", "/root", "/home", "/tmp", "/var/tmp", "/var/run", "/run", "/mnt"]
files += ["/etc/passwd", "/etc/group", "/etc/shadow", "/etc/profile.env"]
files += ["/etc/ld.so.conf", "/etc/ld.so.conf.d/."]
files += ["/usr/lib/locale/locale-archive"]
files += ["/bin/sh", "/bin/sed", "/usr/bin/awk", "/usr/bin/python", "/usr/bin/vi", "/usr/bin/nano",
"/bin/tar", "/usr/bin/unzip",
"/usr/bin/wget", "/usr/bin/curl", "/usr/bin/telnet",
"/usr/bin/make", "/usr/bin/diff", "/usr/bin/strings", "/usr/bin/strace",
"/usr/bin/find", "/usr/bin/xargs", "/usr/bin/less"]
files += ["/sbin/iptables", "/sbin/ip6tables", "/sbin/iptables-restore", "/sbin/ip6tables-restore", "/sbin/iptables-save", "/sbin/ip6tables-save"]
if build_json and "files" in build_json:
if not isinstance(build_json["files"], list): raise Exception("files must be list")
#else
files += build_json["files"]
os.makedirs(os.path.dirname(upper_dir), exist_ok=True)
subprocess.check_call(sudo(["mkdir", upper_dir]))
print("Copying files to artifact dir...")
copy(gentoo_dir, upper_dir, files)
copyup_gcc_libs(gentoo_dir, upper_dir)
remove_root_password(upper_dir)
make_ld_so_conf_latest(upper_dir)
create_default_iptables_rules(upper_dir)
set_locale_to_envvar(upper_dir)
# per-package setup
newest_pkg_file = 0
for pkg in pkgs:
pkg_wo_ver = strip_ver(pkg)
package_dir = os.path.join(packages_dir, pkg_wo_ver)
if not os.path.isdir(package_dir): continue
#else
print("Processing package %s..." % pkg_wo_ver)
newest_pkg_file = max(newest_pkg_file, sync_files(package_dir, upper_dir, r"^CONTENTS(\.|$)"))
if os.path.isfile(os.path.join(upper_dir, "pkgbuild")):
subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir, "--overlay=+/:%s:/" % os.path.abspath(upper_dir),
"--bind=%s:/var/cache" % os.path.abspath(cache_dir),
"-E", "PROFILE=%s" % profile, "-E", "ARTIFACT=%s" % artifact,
"--capability=CAP_MKNOD",
"sh", "-c", "/pkgbuild && rm -f /pkgbuild" ]))
# enable services
services = ["sshd","systemd-networkd", "systemd-resolved"]
if build_json and "services" in build_json:
if not isinstance(build_json["services"], list): raise Exception("services must be list")
#else
services += build_json["services"]
enable_services(upper_dir, services)
# artifact specific setup
artifact_dir = os.path.join(".", "artifacts", artifact)
newest_artifact_file = max(newest_pkg_file, sync_files(artifact_dir, upper_dir))
if os.path.isfile(os.path.join(upper_dir, "build")):
print("Building artifact...")
subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir,
"--overlay=+/:%s:/" % os.path.abspath(upper_dir),
"--bind=%s:/var/cache" % os.path.abspath(cache_dir),
"/build" ]))
else:
print("Artifact build script not found.")
subprocess.check_call(sudo(["rm", "-rf", os.path.join(upper_dir, "build"), os.path.join(upper_dir,"build.json"), os.path.join(upper_dir,"usr/src")]))
# generate metadata
genpack_metadata_dir = os.path.join(upper_dir, ".genpack")
subprocess.check_call(sudo(["mkdir", "-p", genpack_metadata_dir]))
subprocess.check_call(sudo(["chmod", "o+rwx", genpack_metadata_dir]))
with open(os.path.join(genpack_metadata_dir, "profile"), "w") as f:
f.write(profile)
with open(os.path.join(genpack_metadata_dir, "artifact"), "w") as f:
f.write(artifact)
with open(os.path.join(genpack_metadata_dir, "packages"), "w") as f:
for pkg in pkgs:
f.write(pkg + '\n')
subprocess.check_call(sudo(["chown", "-R", "root.root", genpack_metadata_dir]))
subprocess.check_call(sudo(["chmod", "755", genpack_metadata_dir]))
def strip_ver(pkgname):
pkgname = re.sub(r'-r[0-9]+?$', "", pkgname) # remove rev part
last_dash = pkgname.rfind('-')
if last_dash < 0: return pkgname
next_to_dash = pkgname[last_dash + 1]
return pkgname[:last_dash] if pkgname.find('/') < last_dash and (next_to_dash >= '0' and next_to_dash <= '9') else pkgname
def collect_packages(gentoo_dir):
pkg_map = {}
db_dir = os.path.join(gentoo_dir, "var/db/pkg")
for category in os.listdir(db_dir):
cat_dir = os.path.join(db_dir, category)
if not os.path.isdir(cat_dir): continue
#else
for pn in os.listdir(cat_dir):
pkg_dir = os.path.join(cat_dir, pn)
if not os.path.isdir(pkg_dir): continue
#else
cat_pn = "%s/%s" % (category, pn)
pn_wo_ver = strip_ver(pn)
cat_pn_wo_ver = "%s/%s" % (category, pn_wo_ver)
if pn_wo_ver in pkg_map: pkg_map[pn_wo_ver].append(cat_pn)
else: pkg_map[pn_wo_ver] = [cat_pn]
if cat_pn_wo_ver in pkg_map: pkg_map[cat_pn_wo_ver].append(cat_pn)
else: pkg_map[cat_pn_wo_ver] = [cat_pn]
return pkg_map
def get_package_set(gentoo_dir, set_name):
pkgs = []
with open(os.path.join(gentoo_dir, "etc/portage/sets", set_name)) as f:
for line in f:
line = re.sub(r'#.*', "", line).strip()
if line != "": pkgs.append(line)
return pkgs
def split_rdepend(line):
if line.startswith("|| ( "):
idx = 5
level = 0
while idx < len(line):
ch = line[idx]
if ch == '(': level += 1
elif ch == ')':
if level == 0:
idx += 1
break
else: level -= 1
idx += 1
leftover = line[idx:].strip()
return (line[:idx], None if leftover == "" else leftover)
#else:
splitted = line.split(' ', 1)
if len(splitted) == 1: return (splitted[0],None)
#else
return (splitted[0], splitted[1])
def parse_rdepend_line(line, make_optional=False):
p = []
while line is not None and line.strip() != "":
splitted = split_rdepend(line)
p.append(splitted[0])
line = splitted[1]
pkgs = set()
for pkg in p:
m = re.match(r"\|\| \( (.+) \)", pkg)
if m:
pkgs |= parse_rdepend_line(m.group(1), True)
continue
if pkg[0] == '!': continue
if pkg[0] == '~': pkg = pkg[1:]
#else
pkg_stripped = strip_ver(re.sub(r':.+$', "", re.sub(r'\[.+\]$', "", re.sub(r'^(<=|>=|=|<|>)', "", pkg))))
pkgs.add('?' + pkg_stripped if make_optional else pkg_stripped)
return pkgs
def scan_pkg_dep(gentoo_dir, pkg_map, pkgnames, pkgs = None):
if pkgs is None: pkgs = set()
for pkgname in pkgnames:
if pkgname[0] == '@':
scan_pkg_dep(gentoo_dir, pkg_map, get_package_set(gentoo_dir, pkgname[1:]), pkgs)
continue
optional = False
if pkgname[0] == '?':
optional = True
pkgname = pkgname[1:]
if pkgname not in pkg_map:
if optional: continue
else: raise BaseException("Package %s not found" % pkgname)
#else
for cat_pn in pkg_map[pkgname]:
cat_pn_wo_ver = strip_ver(cat_pn)
if cat_pn in pkgs: continue # already exists
pkgs.add(cat_pn) # add self
rdepend_file = os.path.join(gentoo_dir, "var/db/pkg", cat_pn, "RDEPEND")
if os.path.isfile(rdepend_file):
with open(rdepend_file) as f:
line = f.read().strip()
if len(line) > 0:
rdepend_pkgnames = parse_rdepend_line(line)
if len(rdepend_pkgnames) > 0: scan_pkg_dep(gentoo_dir, pkg_map, rdepend_pkgnames, pkgs)
return pkgs
def is_path_excluded(path):
for expr in ["/run/","/var/run/","/var/lock/","/usr/share/man/","/usr/share/doc/","/usr/share/gtk-doc/","/usr/share/info/",
"/usr/include/","/var/cache/",re.compile(r'^/usr/lib/python[0-9\.]+?/test/'),re.compile(r'\.a$'),
re.compile(r"\/gschemas.compiled$"), re.compile(r"\/giomodule.cache$")]:
if isinstance(expr, re.Pattern):
if re.search(expr, path): return True
elif isinstance(expr, str):
if path.startswith(expr): return True
else:
raise Exception("Unknown type")
return False
def process_pkgs(gentoo_dir, packages_dir, pkgs):
files = []
for pkg in pkgs:
contents_file = os.path.join(gentoo_dir, "var/db/pkg" , pkg, "CONTENTS")
overridden_contents_file = os.path.join(packages_dir, strip_ver(pkg), "CONTENTS")
if os.path.isfile(os.path.join(overridden_contents_file)):
contents_file = overridden_contents_file
if not os.path.isfile(contents_file): continue
#else
with open(contents_file) as f:
while line := f.readline():
line = re.sub(r'#.*$', "", line).strip()
if line == "": continue
file_to_append = None
if line.startswith("obj "):
file_to_append = re.sub(r' [0-9a-f]+ [0-9]+$', "", line[4:])
elif line.startswith("sym "):
file_to_append = re.sub(r' -> .+$', "", line[4:])
if file_to_append is not None and not is_path_excluded(file_to_append): files.append(file_to_append)
return files
def copy(gentoo_dir, upper_dir, files):
if not gentoo_dir.endswith('/'): gentoo_dir += '/'
# files / dirs to shallow copy
rsync = subprocess.Popen(sudo(["rsync", "-lptgoD", "--keep-dirlinks", "--files-from=-", gentoo_dir, upper_dir]), stdin=subprocess.PIPE)
for f in files:
if f.endswith("/."): continue
f_wo_leading_slash = re.sub(r'^/', "", f)
rsync.stdin.write(encode_utf8(f_wo_leading_slash + '\n'))
src_path = os.path.join(gentoo_dir, f_wo_leading_slash)
if os.path.islink(src_path):
link = os.readlink(src_path)
target = link[1:] if link[0] == '/' else os.path.join(os.path.dirname(f_wo_leading_slash), link)
if os.path.exists(os.path.join(gentoo_dir, target)):
rsync.stdin.write(encode_utf8(target + '\n'))
rsync.stdin.close()
if rsync.wait() != 0: raise BaseException("rsync returned error code.")
# dirs to deep copy
rsync = subprocess.Popen(sudo(["rsync", "-ar", "--keep-dirlinks", "--files-from=-", gentoo_dir, upper_dir]), stdin=subprocess.PIPE)
for f in files:
if not f.endswith("/."): continue
f_wo_leading_slash = re.sub(r'^/', "", f)
rsync.stdin.write(encode_utf8(f_wo_leading_slash + '\n'))
src_path = os.path.join(gentoo_dir, f_wo_leading_slash)
rsync.stdin.close()
if rsync.wait() != 0: raise BaseException("rsync returned error code.")
def copyup_gcc_libs(gentoo_dir, upper_dir):
subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", gentoo_dir, "--overlay=+/:%s:/" % os.path.abspath(upper_dir), "sh", "-c", "touch -h `gcc --print-file-name=`/*.so.* && ldconfig" ]))
def remove_root_password(root_dir):
subprocess.check_call(sudo(["sed", "-i", r"s/^root:\*:/root::/", os.path.join(root_dir, "etc/shadow") ]))
def make_ld_so_conf_latest(root_dir):
subprocess.check_call(sudo(["touch", os.path.join(root_dir, "etc/ld.so.conf") ]))
def create_default_iptables_rules(root_dir):
subprocess.check_call(sudo(["touch", os.path.join(root_dir, "var/lib/iptables/rules-save"), os.path.join(root_dir, "var/lib/ip6tables/rules-save")]))
def set_locale_to_envvar(root_dir):
subprocess.check_call(sudo(["sed", "-i", r"s/^export LANG=.\+$/\[ -f \/etc\/locale\.conf \] \&\& . \/etc\/locale.conf \&\& export LANG/", os.path.join(root_dir, "etc/profile.env") ]))
def enable_services(root_dir, services):
if not isinstance(services, list): services = [services]
subprocess.check_call(sudo(["systemd-nspawn", "-q", "-M", CONTAINER_NAME, "-D", root_dir, "systemctl", "enable"] + services))
def pack(upper_dir, outfile, compression="gzip"):
cmdline = ["mksquashfs", upper_dir, outfile, "-noappend", "-no-exports"]
if compression == "xz": cmdline += ["-comp", "xz", "-b", "1M", "-Xbcj", "x86"]
elif compression == "gzip": cmdline += ["-Xcompression-level", "1"]
elif compression == "lzo": cmdline += ["-comp", "lzo"]
else: raise BaseException("Unknown compression type %s" % compression)
subprocess.check_call(sudo(cmdline))
subprocess.check_call(sudo(["chown", "%d:%d" % (os.getuid(), os.getgid()), outfile]))
def clean(workdir, arch, profile=None):
portage = os.path.join(workdir, "portage.tar.xz")
archdir = os.path.join(workdir, arch)
stage3 = os.path.join(archdir, "stage3.tar.xz")
profiles = os.path.join(archdir, "profiles")
artifacts = os.path.join(archdir, "artifacts")
subprocess.check_call(sudo(["rm", "-rf", portage, stage3, profiles, artifacts]))
if __name__ == "__main__":
arch = os.uname().machine
parser = argparse.ArgumentParser()
parser.add_argument("--base", default=BASE_URL, help="Base URL contains dirs 'releases' 'snapshots'")
parser.add_argument("--workdir", default="./work", help="Working directory to use")
parser.add_argument("-o", "--outfile", default=None, help="Output file")
parser.add_argument("--sync", action="store_true", default=False, help="Run emerge --sync before build gentoo")
parser.add_argument("--bash", action="store_true", default=False, help="Enter bash before anything")
parser.add_argument("--qemu", action="store_true", default=False, help="Run generated rootfs using qemu")
parser.add_argument("--drm", action="store_true", default=False, help="Enable DRM(virgl) when running qemu")
parser.add_argument("--data-volume", action="store_true", default=False, help="Create data partition when running qemu")
parser.add_argument("--system-ini", default=None, help="system.ini file when running qemu")
parser.add_argument("--profile", default=None, help="Override profile")
parser.add_argument("artifact", default=[], nargs='*', help="Artifacts to build")
args = parser.parse_args()
artifacts = []
if len(args.artifact) == 0 and os.path.isdir("./artifacts"):
for i in os.listdir("./artifacts"):
if os.path.isdir(os.path.join("./artifacts", i)): artifacts.append(i)
else:
artifacts += args.artifact
if len(artifacts) == 0: artifacts.append("default")
extract_portage(args.base, args.workdir)
for artifact in artifacts:
if artifact != "default" and not os.path.isdir(os.path.join("./artifacts", artifact)):
raise BaseException("No such artifact: %s" % artifact)
print("Processing artifact %s..." % artifact)
if args.artifact == "clean":
clean(args.workdir, arch, args.profile)
else:
outfile = main(args.base, args.workdir, arch, args.sync, args.bash, artifact, args.outfile, args.profile)
if outfile is not None and args.qemu:
qemu.run(outfile, os.path.join(args.workdir, "qemu.img"), args.drm, args.data_volume, args.system_ini)
print("Done.")
trash_dir = os.path.join(args.workdir, "trash")
if os.path.isdir(trash_dir):
print("Cleaning up...")
subprocess.check_call(sudo(["rm", "-rf", trash_dir]))
| 47.292359 | 216 | 0.638778 | 3,967 | 28,470 | 4.398286 | 0.128309 | 0.04436 | 0.048143 | 0.035591 | 0.414718 | 0.344968 | 0.276192 | 0.185809 | 0.136463 | 0.092503 | 0 | 0.005224 | 0.19993 | 28,470 | 601 | 217 | 47.371048 | 0.760778 | 0.018792 | 0 | 0.127049 | 0 | 0.004098 | 0.171925 | 0.025099 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067623 | false | 0.012295 | 0.014344 | 0.006148 | 0.131148 | 0.022541 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a439fafd82d5aa8cd5f33e7668e9b80a3b2590de | 740 | py | Python | Arduino.py | dvcsciencealliance/vertical-farming-raspberry-pi | 58ca9d9677b9eb9251ff5b07cef3bd34bd11a178 | [
"MIT"
] | 5 | 2019-05-13T21:46:01.000Z | 2021-11-15T10:30:55.000Z | Arduino.py | dvcsciencealliance/vertical-farming-raspberry-pi | 58ca9d9677b9eb9251ff5b07cef3bd34bd11a178 | [
"MIT"
] | null | null | null | Arduino.py | dvcsciencealliance/vertical-farming-raspberry-pi | 58ca9d9677b9eb9251ff5b07cef3bd34bd11a178 | [
"MIT"
] | 3 | 2017-05-04T21:17:43.000Z | 2018-01-29T20:34:57.000Z | import time
import serial
from Sensor import *
class Arduino:
BaudRate = 9600
def __init__(self, specs):
self.name = specs['name']
self.port = specs['port']
self.ser = serial.Serial(self.port, timeout=2)
self.sensors = [Sensor.makeSensor(s)for s in specs['sensors']]
self.on = 0
def read(self):
self.ser.write(self.getCommand())
time.sleep(0.2)
line = self.ser.readline().rstrip()
result = {}
for sensor in self.sensors:
if len(line) > 0:
data = line.split(b' ')
result[sensor.name] = sensor.read(data[sensor.pin])
return result
def getCommand(self):
return 1 if self.on else 0
| 24.666667 | 70 | 0.563514 | 95 | 740 | 4.347368 | 0.442105 | 0.050847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021654 | 0.313514 | 740 | 29 | 71 | 25.517241 | 0.791339 | 0 | 0 | 0 | 0 | 0 | 0.021622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.130435 | 0.043478 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a43a8a2442407e60f6d58636889003043e11690c | 2,716 | py | Python | ymir/backend/src/ymir-controller/controller/invoker/invoker_task_copy.py | elliotmessi/ymir | 3ec8145a1f894778116eb5218de223f6dd805b70 | [
"Apache-2.0"
] | null | null | null | ymir/backend/src/ymir-controller/controller/invoker/invoker_task_copy.py | elliotmessi/ymir | 3ec8145a1f894778116eb5218de223f6dd805b70 | [
"Apache-2.0"
] | null | null | null | ymir/backend/src/ymir-controller/controller/invoker/invoker_task_copy.py | elliotmessi/ymir | 3ec8145a1f894778116eb5218de223f6dd805b70 | [
"Apache-2.0"
] | null | null | null | import logging
import os
from typing import Dict
from controller.invoker.invoker_task_base import TaskBaseInvoker
from controller.utils import utils
from id_definition.error_codes import CTLResponseCode
from proto import backend_pb2
class TaskCopyInvoker(TaskBaseInvoker):
def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp:
copy_request = request.req_create_task.copy
logging.info(f"copy_request: {copy_request}")
if not (copy_request.src_user_id and copy_request.src_repo_id):
return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED,
message="Invalid src user and/or repo id")
src_root = os.path.join(sandbox_root, copy_request.src_user_id, copy_request.src_repo_id)
if not os.path.isdir(src_root):
return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED,
message=f"Invalid src root: {src_root}")
return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="")
@classmethod
def subtask_count(cls) -> int:
return 1
@classmethod
def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str],
request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str,
subtask_id_dict: Dict[int, str]) -> backend_pb2.GeneralResp:
copy_request = request.req_create_task.copy
src_root = os.path.join(sandbox_root, copy_request.src_user_id, copy_request.src_repo_id)
copy_response = cls.copying_cmd(repo_root=repo_root,
task_id=subtask_id,
src_root=src_root,
src_dataset_id=copy_request.src_dataset_id,
work_dir=subtask_workdir,
name_strategy_ignore=copy_request.name_strategy_ignore)
return copy_response
@staticmethod
def copying_cmd(repo_root: str, task_id: str, src_root: str, src_dataset_id: str, work_dir: str,
name_strategy_ignore: bool) -> backend_pb2.GeneralResp:
copying_cmd_str = [
utils.mir_executable(), 'copy', '--root', repo_root,
'--src-root', src_root, '--dst-rev', f"{task_id}@{task_id}", '--src-revs',
f"{src_dataset_id}@{src_dataset_id}", '-w', work_dir
]
if name_strategy_ignore:
copying_cmd_str.append('--ignore-unknown-types')
return utils.run_command(copying_cmd_str)
| 48.5 | 109 | 0.641753 | 333 | 2,716 | 4.888889 | 0.27027 | 0.081081 | 0.060197 | 0.03317 | 0.344595 | 0.28317 | 0.28317 | 0.28317 | 0.28317 | 0.244472 | 0 | 0.004059 | 0.2743 | 2,716 | 55 | 110 | 49.381818 | 0.821918 | 0 | 0 | 0.173913 | 0 | 0 | 0.074374 | 0.02025 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.152174 | 0.021739 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a43dd40cc07d8072ff6bb89e915bbbbd9aedea90 | 1,124 | py | Python | web/ask/ask/urls.py | artemsprygin/nginx-conf | f4f5fe0486d309619c5157a3ce690064b9850fd0 | [
"MIT"
] | null | null | null | web/ask/ask/urls.py | artemsprygin/nginx-conf | f4f5fe0486d309619c5157a3ce690064b9850fd0 | [
"MIT"
] | null | null | null | web/ask/ask/urls.py | artemsprygin/nginx-conf | f4f5fe0486d309619c5157a3ce690064b9850fd0 | [
"MIT"
] | 1 | 2021-07-27T17:35:56.000Z | 2021-07-27T17:35:56.000Z | """ask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from qa.urls import *
from qa.views import test ###only for now
urlpatterns = [
url(r'^admin/', admin.site.urls, name='admin'),
url(r'^login/', test, name='login'),
url(r'^signup/', test, name='signup'),
url(r'^question/', include('qa.urls')), ###good parctice as tree-structure
url(r'^ask/', test, name='ask'),
url(r'^popular/', test, name='popular'),
url(r'^new/', test, name='new'),
url(r'^$', test),
] | 37.466667 | 78 | 0.668149 | 173 | 1,124 | 4.323699 | 0.375723 | 0.042781 | 0.020053 | 0.032086 | 0.156417 | 0.156417 | 0.100267 | 0 | 0 | 0 | 0 | 0.008556 | 0.168149 | 1,124 | 30 | 79 | 37.466667 | 0.791444 | 0.589858 | 0 | 0 | 0 | 0 | 0.197778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a43f90a0bae76d7994937a72ad1940d58309ef25 | 7,443 | py | Python | src/cascade/executor/dismod_runner.py | skspoon/cascade | 00534bd7e2558b880dfeb2e8bb2248a104ba6083 | [
"MIT"
] | null | null | null | src/cascade/executor/dismod_runner.py | skspoon/cascade | 00534bd7e2558b880dfeb2e8bb2248a104ba6083 | [
"MIT"
] | null | null | null | src/cascade/executor/dismod_runner.py | skspoon/cascade | 00534bd7e2558b880dfeb2e8bb2248a104ba6083 | [
"MIT"
] | null | null | null | """
This stage runs Dismod-AT. Dismod gets called in very similar ways.
Let's look at them in order to narrow down configuration of this
stage.::
dismod_at database init
dismod_at database fit <variables>
dismod_at database fit <variables> <simulate_index>
dismod_at database set option <name> <value>
dismod_at database set <table_out> <source>
dismod_at database set <table_out> <source> <sample_index>
dismod_at database depend
dismod_at database simulate <number_simulate>
dismod_at database sample <method> <number_sample>
dismod_at database predict <source>
So how does the cascade know what the input database is?
We decided it would use the name of the stage as the name of
the database. Can a user call dismod_at through the cascade?
Would they want to? I see no reason for it when you can just
call Dismod. You'd call it within the Cascade when it's a known step,
in which case the variables and sources are decided beforehand.
Therefore, the ``command_list`` below will include the entries
that come after the database.::
command_list = [
["init"],
["fit", "both"],
["predict"]
]
That gives enough freedom to specify the command list when
defining the :class:`cascade_at.sequential_batch.Batch`.
"""
import functools
import logging
import os
import asyncio
CODELOG = logging.getLogger(__name__)
MATHLOG = logging.getLogger(__name__)
def dismod_run(command_list):
"""
Returns a batch stage that runs DismodAT on these commands. This
is a builder. The idea is to use this in ``Batch``. These function
names are made up, but it shows how to use ``dismod_run``::
batch = Batch([
("settings", import_settings),
("init", dismod_run([["init"]])),
("country", import_ccov),
("fitpredict", dismod_run([
["fit", "fixed"],
["predict"]
]))
("posteriors", posterior_to_priors)
])
There are two command-line options that affect how DismodAT
runs.
* ``single_use_machine=False`` This is True or False where True
means that we nice the Dismod process in order for it not
to interfere with interactive work on the same machine.
This makes the machine much more responsive with little
loss of efficiency.
* ``subprocess_poll_time=0.5`` This decides how often to check
whether DismodAT is done. It is the time in seconds, as
a floating-point number, to wait between checks. There's
nothing unreasonable about using a tenth of a second.
Args:
command_list (List[List[str]]): A list of commands for DismodAT
to run. It's the part of the command after the database.
Returns:
A callable stage that runs DismodAT on these commands.
"""
return functools.partial(dismod_recipe, command_list)
def dismod_recipe(command_list, context):
"""
Runs Dismod-AT. We generally run Dismod-AT more than once with
a sequence of commands, so we call these a recipe.
Args:
command_list (List[List[str]]): A list of commands for Dismod AT.
context: A context object from which we do I/O.
"""
dismod_executable = context.dismod_executable()
# These are checks we can do before trying to run Dismod. They
# don't need to be exhaustive because we'll see if it doesn't run.
if len(dismod_executable) < 1:
raise ValueError("There is no dismod executable in context")
if not dismod_executable[0].exists():
raise FileNotFoundError(f"Could not find file {dismod_executable}")
using_singularity = len(dismod_executable) > 3 and dismod_executable[0].name == "singularity"
if using_singularity and not dismod_executable[2].exists():
raise FileNotFoundError(f"Could not find singularity image {dismod_executable[2]}")
db_file = context.dismod_file()
if not db_file.exists():
raise FileNotFoundError(f"Could not find file {db_file}")
for command in command_list:
MATHLOG.info("Running dismod_at {} {}".format(db_file, command))
run_and_watch(
dismod_executable + [db_file] + command,
context.params("single_use_machine"),
context.params("subprocess_poll_time"),
)
def reduce_process_priority():
"""
It seems counter-intuitive to ask the process to be slower,
but reducing the priority of the process makes it livable to run
in the background on your laptop, and it won't go appreciably
slower.
"""
os.nice(19)
@asyncio.coroutine
def _read_pipe(pipe, result, callback=lambda text: None):
"""Read from a pipe until it closes.
Args:
pipe: The pipe to read from
result: a list to accumulate the output into
callback: a callable which will be invoked each time data is read from the pipe
"""
while not pipe.at_eof():
text = yield from pipe.read(2 ** 16)
text = text.decode("utf-8")
result.append(text)
callback(text)
def run_and_watch(command, single_use_machine, poll_time):
"""
Runs a command and logs its stdout and stderr while that command
runs. The point is two-fold, to gather stdout from the running
program and to turn any faults into exceptions.
Args:
command (Path|str): The command to run as a rooted path.
single_use_machine (bool): Whether this is running on a machine
where someone is doing interactive work at the same time.
If so, we reduce process priority.
poll_time (int): How many seconds to wait between checking
whether the program is done. This isn't an expensive
operation.
Returns:
str: The output stream.
str: The error stream.
"""
command = [str(a) for a in command]
loop = asyncio.get_event_loop()
return loop.run_until_complete(_async_run_and_watch(command, single_use_machine, poll_time))
@asyncio.coroutine
def _async_run_and_watch(command, single_use_machine, poll_time):
if single_use_machine:
pre_execution_function = reduce_process_priority
else:
pre_execution_function = None
try:
CODELOG.info(f"Forking to {command}")
sub_process = yield from asyncio.subprocess.create_subprocess_exec(
*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, preexec_fn=pre_execution_function
)
except ValueError as ve:
raise Exception(f"Dismod called with invalid arguments {ve}")
except OSError as ose:
raise Exception(f"Dismod couldn't run due to OS error {ose}")
out_list = []
err_list = []
loop = asyncio.get_event_loop()
std_out_task = loop.create_task(_read_pipe(sub_process.stdout, out_list, lambda text: MATHLOG.debug(text)))
std_err_task = loop.create_task(_read_pipe(sub_process.stderr, err_list, lambda text: MATHLOG.error(text)))
yield from sub_process.wait()
yield from std_out_task
yield from std_err_task
if sub_process.returncode != 0:
msg = (
f"return code {sub_process.returncode}\n"
f"stdout {os.linesep.join(out_list)}\n"
f"stderr {os.linesep.join(err_list)}\n"
)
raise Exception("dismod_at failed.\n{}".format(msg))
else:
pass # Return code is 0. Success.
return "".join(out_list), "".join(err_list)
| 35.783654 | 119 | 0.680908 | 1,060 | 7,443 | 4.640566 | 0.309434 | 0.027648 | 0.032527 | 0.011588 | 0.135597 | 0.114861 | 0.114861 | 0.078471 | 0.045538 | 0.036999 | 0 | 0.002821 | 0.238076 | 7,443 | 207 | 120 | 35.956522 | 0.864574 | 0.530834 | 0 | 0.082192 | 0 | 0 | 0.147628 | 0.033084 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082192 | false | 0.013699 | 0.054795 | 0 | 0.178082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a43fdd0da98368699ded07907d0510b59fd1edbd | 617 | py | Python | rpython/translator/platform/arch/test/test_s390x.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | rpython/translator/platform/arch/test/test_s390x.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | rpython/translator/platform/arch/test/test_s390x.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 55 | 2015-08-16T02:41:30.000Z | 2022-03-20T20:33:35.000Z | import py
import platform
from rpython.translator.platform.arch.s390x import (s390x_cpu_revision,
extract_s390x_cpu_ids)
if platform.machine() != 's390x':
py.test.skip("s390x tests only")
def test_cpuid_s390x():
revision = s390x_cpu_revision()
assert revision != 'unknown', 'the model you are running on might be too old'
def test_read_processor_info():
ids = extract_s390x_cpu_ids("""
processor 0: machine = 12345
processor 1: version = FF, identification = AF
""".splitlines())
assert ids == [(0, None, None, 0x12345),
(1, 'FF', 'AF', 0),
]
| 26.826087 | 81 | 0.65154 | 80 | 617 | 4.8375 | 0.5625 | 0.082687 | 0.082687 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083857 | 0.226904 | 617 | 22 | 82 | 28.045455 | 0.727463 | 0 | 0 | 0 | 0 | 0 | 0.256911 | 0 | 0 | 0 | 0.011382 | 0 | 0.117647 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4459b0fbafee95c54aac59a2ffccbbf009eddf7 | 502 | py | Python | assignments/02_sum/sum.py | michaelandrewblum/be434-fall-2021 | 5c2281a99ece283e7ee7d1873708efbef473f3d3 | [
"MIT"
] | null | null | null | assignments/02_sum/sum.py | michaelandrewblum/be434-fall-2021 | 5c2281a99ece283e7ee7d1873708efbef473f3d3 | [
"MIT"
] | null | null | null | assignments/02_sum/sum.py | michaelandrewblum/be434-fall-2021 | 5c2281a99ece283e7ee7d1873708efbef473f3d3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Purpose: Sum any number of inputted integers together.
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Add numbers')
parser.add_argument('integers', metavar='INT', type=int, nargs='+',
help='Numbers to add')
return parser.parse_args()
def main():
args = get_args()
answer = args.integers
print(' + '.join(map(str, answer)) + ' = ' + str(sum(args.integers)))
if __name__ == '__main__':
main() | 27.888889 | 73 | 0.635458 | 61 | 502 | 5.032787 | 0.639344 | 0.045603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002525 | 0.211155 | 502 | 18 | 74 | 27.888889 | 0.772727 | 0.151394 | 0 | 0 | 0 | 0 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.333333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a44700a1f4cbe5d05ab638e44bb80fe2126da8f7 | 940 | py | Python | batch3/outputs/Alens_degeneracies.py | sjoudaki/CosmoJBD | 3c1d029b74034b92cb2974de15e4c18637a5277e | [
"MIT"
] | null | null | null | batch3/outputs/Alens_degeneracies.py | sjoudaki/CosmoJBD | 3c1d029b74034b92cb2974de15e4c18637a5277e | [
"MIT"
] | null | null | null | batch3/outputs/Alens_degeneracies.py | sjoudaki/CosmoJBD | 3c1d029b74034b92cb2974de15e4c18637a5277e | [
"MIT"
] | null | null | null | import planckStyle as s
g = s.getSubplotPlotter()
roots = ['base_Alens_plikHM_TT_lowl_lowE', 'base_Alens_plikHM_TTTEEE_lowl_lowE', 'base_plikHM_TTTEEE_lowl_lowE',
'base_Alens_CamSpecHM_TTTEEE_lowl_lowE']
for i, root in enumerate(roots):
samples = g.getSamples(root)
p = samples.getParams()
samples.addDerived(p.rmsdeflect ** 2, 'vardeflect', label=r'$\langle |\nabla\phi|^2\rangle\,[{\rm arcmin}^2]$')
roots[i] = samples
yparams = [u'Alens', u'vardeflect']
xparams = [u'omegabh2', u'omegach2', 'ns', u'H0', u'omegam', u'sigma8']
g.rectangle_plot(xparams, yparams, roots=roots, ymarkers=[1, None], filled=[True] * 3 + [False],
colors=g.settings.solid_colors[:3] + ['k'], ls=['-'] * 3 + ['--'],
legend_labels=[s.planckTT + r' ($\Lambda{\rm CDM}+A_L$)', s.planckall + r' ($\Lambda{\rm CDM}+A_L$)',
s.planckall + r' ($\Lambda{\rm CDM}$)'])
g.export()
| 49.473684 | 118 | 0.620213 | 129 | 940 | 4.333333 | 0.527132 | 0.057245 | 0.064401 | 0.064401 | 0.193202 | 0.107335 | 0.107335 | 0.107335 | 0.107335 | 0.107335 | 0 | 0.014342 | 0.184043 | 940 | 18 | 119 | 52.222222 | 0.714472 | 0 | 0 | 0 | 0 | 0 | 0.329787 | 0.167021 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a448be34e5e9ec9f19e29113bb213defc64c3da8 | 17,495 | py | Python | pymatgen/analysis/path_finder.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | null | null | null | pymatgen/analysis/path_finder.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | null | null | null | pymatgen/analysis/path_finder.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | 1 | 2018-10-28T01:41:38.000Z | 2018-10-28T01:41:38.000Z |
"""
This module finds diffusion paths through a structure based on a given potential field.
If you use PathFinder algorithm for your research, please consider citing the following work:
Ziqin Rong, Daniil Kitchaev, Pieremanuele Canepa, Wenxuan Huang, Gerbrand Ceder,
The Journal of Chemical Physics 145 (7), 074112
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import numpy.linalg as la
import scipy.signal
import scipy.stats
from scipy.interpolate import interp1d
import math
import six
from abc import ABCMeta, abstractmethod
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import *
from pymatgen.core.periodic_table import *
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import VolumetricData, Chgcar
__author__ = "Daniil Kitchaev"
__version__ = "1.0"
__maintainer__ = "Daniil Kitchaev, Ziqin Rong"
__email__ = "dkitch@mit.edu, rongzq08@mit.edu"
__status__ = "Development"
__date__ = "March 17, 2015"
class NEBPathfinder:
def __init__(self, start_struct, end_struct, relax_sites, v, n_images=20):
"""
General pathfinder for interpolating between two structures, where the interpolating path is calculated with
the elastic band method with respect to the given static potential for sites whose indices are given in
relax_sites, and is linear otherwise.
:param start_struct, end_struct - Endpoint structures to interpolate between
:param relax_sites - List of site indices whose interpolation paths should be relaxed
:param v - Static potential field to use for the elastic band relaxation
:param n_images - Number of interpolation images to generate
"""
self.__s1 = start_struct
self.__s2 = end_struct
self.__relax_sites = relax_sites
self.__v = v
self.__n_images = n_images
self.__images = None
self.interpolate()
def interpolate(self):
"""
Finds a set of n_images from self.s1 to self.s2, where all sites except for the ones given in relax_sites,
the interpolation is linear (as in pymatgen.core.structure.interpolate), and for the site indices given
in relax_sites, the path is relaxed by the elastic band method within the static potential V.
:param relax_sites: List of site indices for which the interpolation path needs to be relaxed
:param v: static potential to use for relaxing the interpolation path
:param n_images: number of images to generate along the interpolation path
"""
images = self.__s1.interpolate(self.__s2, nimages=self.__n_images, interpolate_lattices=False)
for site_i in self.__relax_sites:
start_f = images[0].sites[site_i].frac_coords
end_f = images[-1].sites[site_i].frac_coords
path = NEBPathfinder.string_relax(NEBPathfinder.__f2d(start_f, self.__v),
NEBPathfinder.__f2d(end_f, self.__v),
self.__v, n_images=(self.__n_images+1),
dr=[self.__s1.lattice.a/self.__v.shape[0],
self.__s1.lattice.b/self.__v.shape[1],
self.__s1.lattice.c/self.__v.shape[2]])
for image_i, image in enumerate(images):
image.translate_sites(site_i,
NEBPathfinder.__d2f(path[image_i], self.__v) - image.sites[site_i].frac_coords,
frac_coords=True, to_unit_cell=True)
self.__images = images
@property
def images(self):
"""
Returns a list of structures interpolating between the start and endpoint structures.
"""
return self.__images
def plot_images(self, outfile):
"""
Generates a POSCAR with the calculated diffusion path with respect to the first endpoint.
:param outfile: Output file for the POSCAR
"""
sum_struct = self.__images[0].sites
for image in self.__images:
for site_i in self.__relax_sites:
sum_struct.append(PeriodicSite(image.sites[site_i].specie, image.sites[site_i].frac_coords,
self.__images[0].lattice, to_unit_cell=True, coords_are_cartesian=False))
sum_struct = Structure.from_sites(sum_struct, validate_proximity=False)
p = Poscar(sum_struct)
p.write_file(outfile)
@staticmethod
def string_relax(start, end, V, n_images=25, dr=None, h=3.0, k=0.17, min_iter=100, max_iter=10000, max_tol=5e-6):
"""
Implements path relaxation via the elastic band method. In general, the method is to define a path by a set of
points (images) connected with bands with some elasticity constant k. The images then relax along the forces
found in the potential field V, counterbalanced by the elastic response of the elastic band. In general the
endpoints of the band can be allowed to relax also to their local minima, but in this calculation they are kept
fixed.
:param start, end - Endpoints of the path calculation given in discrete coordinates with respect to the grid in V
:param V - potential field through which to calculate the path
:param n_images - number of images used to define the path. In general anywhere from 20 to 40 seems to be good.
:param dr - Conversion ratio from discrete coordinates to real coordinates for each of the three coordinate vectors
:param h - Step size for the relaxation. h = 0.1 works reliably, but is slow. h=10 diverges with large gradients
but for the types of gradients seen in CHGCARs, works pretty reliably
:param k - Elastic constant for the band (in real units, not discrete)
:param min_iter, max_iter - Number of optimization steps the string will take before exiting (even if unconverged)
:param max_tol - Convergence threshold such that if the string moves by less than max_tol in a step, and at least
min_iter steps have passed, the algorithm will terminate. Depends strongly on the size of the
gradients in V, but 5e-6 works reasonably well for CHGCARs
"""
#
# This code is based on the MATLAB example provided by
# Prof. Eric Vanden-Eijnden of NYU
# (http://www.cims.nyu.edu/~eve2/main.htm)
#
print("Getting path from {} to {} (coords wrt V grid)".format(start, end))
# Set parameters
if not dr:
dr = np.array([1.0/V.shape[0], 1.0/V.shape[1], 1.0/V.shape[2]])
else:
dr = np.array(dr, dtype=float)
keff = k * dr * n_images
h0 = h
# Initialize string
g1 = np.linspace(0, 1, n_images)
s0 = start
s1 = end
s = np.array([g * (s1-s0) for g in g1]) + s0
ds = s - np.roll(s,1,axis=0)
ds[0] = (ds[0] - ds[0])
ls = np.cumsum(la.norm(ds, axis=1))
ls = ls/ls[-1]
fi = interp1d(ls,s,axis=0)
s = fi(g1)
# Evaluate initial distances (for elastic equilibrium)
ds0_plus = s - np.roll(s,1,axis=0)
ds0_minus = s - np.roll(s,-1,axis=0)
ds0_plus[0] = (ds0_plus[0] - ds0_plus[0])
ds0_minus[-1] = (ds0_minus[-1] - ds0_minus[-1])
# Evolve string
for step in range(0, max_iter):
if step > min_iter:
h = h0 * np.exp(-2.0 * (step - min_iter)/max_iter) # Gradually decay step size to prevent oscillations
else:
h = h0
# Calculate forces acting on string
dV = np.gradient(V)
d = V.shape
s0 = s
edV = np.array([[dV[0][int(pt[0])%d[0]][int(pt[1])%d[1]][int(pt[2])%d[2]] / dr[0],
dV[1][int(pt[0])%d[0]][int(pt[1])%d[1]][int(pt[2])%d[2]] / dr[0],
dV[2][int(pt[0])%d[0]][int(pt[1])%d[1]][int(pt[2])%d[2]] / dr[0]] for pt in s])
#if(step % 100 == 0):
# print(edV)
# Update according to force due to potential and string elasticity
ds_plus = s - np.roll(s,1,axis=0)
ds_minus = s - np.roll(s,-1,axis=0)
ds_plus[0] = (ds_plus[0] - ds_plus[0])
ds_minus[-1] = (ds_minus[-1] - ds_minus[-1])
Fpot = edV
Fel = keff * (la.norm(ds_plus) - la.norm(ds0_plus)) * (ds_plus / la.norm(ds_plus))
Fel += keff * (la.norm(ds_minus) - la.norm(ds0_minus)) * (ds_minus / la.norm(ds_minus))
s = s - h * (Fpot + Fel)
# Fix endpoints
s[0] = s0[0]
s[-1] = s0[-1]
# Reparametrize string
ds = s - np.roll(s,1,axis=0)
ds[0] = (ds[0] - ds[0])
ls = np.cumsum(la.norm(ds, axis=1))
ls = ls/ls[-1]
fi = interp1d(ls,s,axis=0)
s = fi(g1)
tol = la.norm((s-s0) * dr) / n_images / h
if (tol > 1e10):
raise ValueError("Pathfinding failed, path diverged! Consider reducing h to avoid divergence.")
if (step > min_iter and tol < max_tol):
print("Converged at step {}".format(step))
break
if (step % 100 == 0):
print ("Step {} - ds = {}".format(step, tol))
return s
@staticmethod
def __f2d(frac_coords, v):
"""
Converts fractional coordinates to discrete coordinates with respect to the grid size of v
"""
#frac_coords = frac_coords % 1
return np.array([int(frac_coords[0]*v.shape[0]),
int(frac_coords[1]*v.shape[1]),
int(frac_coords[2]*v.shape[2])])
@staticmethod
def __d2f(disc_coords, v):
"""
Converts a point given in discrete coordinates withe respect to the grid in v to fractional coordinates.
"""
return np.array([disc_coords[0]/v.shape[0],
disc_coords[1]/v.shape[1],
disc_coords[2]/v.shape[2]])
class StaticPotential(six.with_metaclass(ABCMeta)):
"""
Defines a general static potential for diffusion calculations. Implements grid-rescaling and smearing for the
potential grid. Also provides a function to normalize the potential from 0 to 1 (recommended).
"""
def __init__(self, struct, pot):
self.__v = pot
self.__s = struct
def get_v(self):
"""
Returns the potential
"""
return self.__v
def normalize(self):
"""
Sets the potential range 0 to 1.
"""
self.__v = self.__v - np.amin(self.__v)
self.__v = self.__v / np.amax(self.__v)
def rescale_field(self, new_dim):
"""
Changes the discretization of the potential field by linear interpolation. This is necessary if the potential field
obtained from DFT is strangely skewed, or is too fine or coarse. Obeys periodic boundary conditions at the edges of
the cell. Alternatively useful for mixing potentials that originally are on different grids.
:param new_dim: tuple giving the numpy shape of the new grid
"""
v_dim = self.__v.shape
padded_v = np.lib.pad(self.__v, ((0,1), (0,1), (0,1)), mode='wrap')
ogrid_list = np.array([list(c) for c in list(np.ndindex(v_dim[0]+1, v_dim[1]+1, v_dim[2]+1))])
v_ogrid = padded_v.reshape(((v_dim[0]+1) * (v_dim[1]+1) * (v_dim[2]+1), -1))
ngrid_a, ngrid_b, ngrid_c = np.mgrid[0 : v_dim[0] : v_dim[0]/new_dim[0],
0 : v_dim[1] : v_dim[1]/new_dim[1],
0 : v_dim[2] : v_dim[2]/new_dim[2]]
v_ngrid = scipy.interpolate.griddata(ogrid_list, v_ogrid, (ngrid_a, ngrid_b, ngrid_c), method='linear').reshape((new_dim[0], new_dim[1], new_dim[2]))
self.__v = v_ngrid
def gaussian_smear(self, r):
"""
Applies an isotropic Gaussian smear of width (standard deviation) r to the potential field. This is necessary to
avoid finding paths through narrow minima or nodes that may exist in the field (although any potential or
charge distribution generated from GGA should be relatively smooth anyway). The smearing obeys periodic
boundary conditions at the edges of the cell.
:param r - Smearing width in cartesian coordinates, in the same units as the structure lattice vectors
"""
# Since scaling factor in fractional coords is not isotropic, have to have different radii in 3 directions
a_lat = self.__s.lattice.a
b_lat = self.__s.lattice.b
c_lat = self.__s.lattice.c
# Conversion factors for discretization of v
v_dim = self.__v.shape
r_frac = (r / a_lat, r / b_lat, r / c_lat)
r_disc = (int(math.ceil(r_frac[0] * v_dim[0])), int(math.ceil(r_frac[1] * v_dim[1])),
int(math.ceil(r_frac[2] * v_dim[2])))
# Apply smearing
# Gaussian filter
gauss_dist = np.zeros((r_disc[0] * 4 + 1, r_disc[1] * 4 + 1, r_disc[2] * 4 + 1))
for g_a in np.arange(-2.0 * r_disc[0], 2.0 * r_disc[0] + 1, 1.0):
for g_b in np.arange(-2.0 * r_disc[1], 2.0 * r_disc[1] + 1, 1.0):
for g_c in np.arange(-2.0 * r_disc[2], 2.0 * r_disc[2] + 1, 1.0):
g = np.array([g_a / v_dim[0], g_b / v_dim[1], g_c / v_dim[2]]).T
gauss_dist[int(g_a + r_disc[0])][int(g_b + r_disc[1])][int(g_c + r_disc[2])] = la.norm(np.dot(self.__s.lattice.matrix, g))/r
gauss = scipy.stats.norm.pdf(gauss_dist)
gauss = gauss/np.sum(gauss, dtype=float)
padded_v = np.pad(self.__v, ((r_disc[0], r_disc[0]), (r_disc[1], r_disc[1]), (r_disc[2], r_disc[2])), mode='wrap')
smeared_v = scipy.signal.convolve(padded_v, gauss, mode='valid')
self.__v = smeared_v
class ChgcarPotential(StaticPotential):
'''
Implements a potential field based on the charge density output from VASP.
'''
def __init__(self, chgcar, smear=False, normalize=True):
"""
:param chgcar: Chgcar object based on a VASP run of the structure of interest (Chgcar.from_file("CHGCAR"))
:param smear: Whether or not to apply a Gaussian smearing to the potential
:param normalize: Whether or not to normalize the potential to range from 0 to 1
"""
v = chgcar.data['total']
v = v / (v.shape[0] * v.shape[1] * v.shape[2])
StaticPotential.__init__(self, chgcar.structure, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
class FreeVolumePotential(StaticPotential):
'''
Implements a potential field based on geometric distances from atoms in the structure - basically, the potential
is lower at points farther away from any atoms in the structure.
'''
def __init__(self, struct, dim, smear=False, normalize=True):
"""
:param struct: Unit cell on which to base the potential
:param dim: Grid size for the potential
:param smear: Whether or not to apply a Gaussian smearing to the potential
:param normalize: Whether or not to normalize the potential to range from 0 to 1
"""
self.__s = struct
v = FreeVolumePotential.__add_gaussians(struct, dim)
StaticPotential.__init__(self, struct, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
@staticmethod
def __add_gaussians(s, dim, r=1.5):
gauss_dist = np.zeros(dim)
for a_d in np.arange(0.0, dim[0], 1.0):
for b_d in np.arange(0.0, dim[1], 1.0):
for c_d in np.arange(0.0, dim[2], 1.0):
coords_f = np.array([a_d / dim[0], b_d / dim[1], c_d / dim[2]])
d_f = sorted(s.get_sites_in_sphere(coords_f, s.lattice.a), key=lambda x:x[1])[0][1]
#print(d_f)
gauss_dist[int(a_d)][int(b_d)][int(c_d)] = d_f / r
v = scipy.stats.norm.pdf(gauss_dist)
return v
class MixedPotential(StaticPotential):
'''
Implements a potential that is a weighted sum of some other potentials
'''
def __init__(self, potentials, coefficients, smear=False, normalize=True):
"""
:param potentials: List of objects extending the StaticPotential superclass
:param coefficients: Mixing weights for the elements of the potentials list
:param smear: Whether or not to apply a Gaussian smearing to the potential
:param normalize: Whether or not to normalize the potential to range from 0 to 1
"""
v = potentials[0].get_v() * coefficients[0]
s = potentials[0].__s
for i in range(1, len(potentials)):
v += potentials[i].get_v() * coefficients[i]
StaticPotential.__init__(self, s, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
| 46.405836 | 157 | 0.603658 | 2,527 | 17,495 | 4.012663 | 0.199842 | 0.010848 | 0.004734 | 0.004734 | 0.220907 | 0.171795 | 0.145562 | 0.101874 | 0.089152 | 0.089152 | 0 | 0.027406 | 0.295056 | 17,495 | 376 | 158 | 46.529255 | 0.794778 | 0.359417 | 0 | 0.173077 | 0 | 0 | 0.02718 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.086538 | 0 | 0.216346 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a44b0dc8fca4c0b10f05e699512cb5275897d550 | 1,388 | py | Python | doc/kubernetes/example.py | nlnjnj/ray | 8a829fbdcb09b30af27d09d372b53ed86fdacfaf | [
"Apache-2.0"
] | null | null | null | doc/kubernetes/example.py | nlnjnj/ray | 8a829fbdcb09b30af27d09d372b53ed86fdacfaf | [
"Apache-2.0"
] | null | null | null | doc/kubernetes/example.py | nlnjnj/ray | 8a829fbdcb09b30af27d09d372b53ed86fdacfaf | [
"Apache-2.0"
] | null | null | null | import os
import sys
import time
from collections import Counter
import ray
@ray.remote
def get_hostname(x):
import platform
import time
time.sleep(0.01)
return x + (platform.node(),)
def wait_for_nodes(expected):
# Wait for all nodes to join the cluster.
while True:
num_nodes = len(ray.nodes())
if num_nodes < expected:
print("{} nodes have joined so far, waiting for {} more.".format(
num_nodes, expected - num_nodes))
sys.stdout.flush()
time.sleep(1)
else:
break
def main():
wait_for_nodes(3)
# Check that objects can be transferred from each node to each other node.
for i in range(10):
print("Iteration {}".format(i))
results = [
get_hostname.remote(get_hostname.remote(())) for _ in range(100)
]
print(Counter(ray.get(results)))
sys.stdout.flush()
print("Success!")
sys.stdout.flush()
if __name__ == "__main__":
# NOTE: If you know you're running this on the head node, you can just use "localhost" here.
if "RAY_HEAD_HOST" not in os.environ or os.environ["RAY_HEAD_HOST"] == "":
raise ValueError("RAY_HEAD_HOST environment variable empty. Is there a ray cluster running?")
ray_head = os.environ["RAY_HEAD_HOST"]
ray.init(address=f"{ray_head}:6379")
main()
| 25.703704 | 101 | 0.623919 | 193 | 1,388 | 4.331606 | 0.481865 | 0.050239 | 0.052632 | 0.038278 | 0.047847 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013739 | 0.26585 | 1,388 | 53 | 102 | 26.188679 | 0.806673 | 0.146254 | 0 | 0.131579 | 0 | 0 | 0.172735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.184211 | 0 | 0.289474 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a44bdb80ae36c700d7a3190615830ddbcb5d0287 | 2,563 | py | Python | dataset_builder/domain/dataset_builder_script_service.py | statisticsnorway/microdata-dataset-builder | c58fe5804f146290e1d523536729f1a5b1ac2c73 | [
"Apache-2.0"
] | null | null | null | dataset_builder/domain/dataset_builder_script_service.py | statisticsnorway/microdata-dataset-builder | c58fe5804f146290e1d523536729f1a5b1ac2c73 | [
"Apache-2.0"
] | 3 | 2022-01-18T15:21:49.000Z | 2022-03-07T13:49:03.000Z | dataset_builder/domain/dataset_builder_script_service.py | statisticsnorway/microdata-dataset-builder | c58fe5804f146290e1d523536729f1a5b1ac2c73 | [
"Apache-2.0"
] | null | null | null | import logging
from dataset_builder.exceptions.exceptions import (
BuilderStepError
)
from dataset_builder.adapter import dataset_adapter
from dataset_builder.steps import (
dataset_validator,
dataset_converter,
dataset_transformer,
dataset_enricher,
dataset_mover,
directory_cleaner
)
from dataset_builder.config import log_config
logger = logging.getLogger()
def run_builder(dataset_name: str):
try:
log_config.setup_logger_for_import_pipeline()
logger.info(f'Starting to build dataset {dataset_name}')
data_file_path, metadata_file_path = dataset_validator.run_for_dataset(
dataset_name
)
transformed_metadata_path = dataset_transformer.run(metadata_file_path)
transformed_metadata = dataset_adapter.get_metadata(
transformed_metadata_path
)
temporality_type = transformed_metadata['temporality']
temporal_coverage = transformed_metadata['temporalCoverage']
data_type = transformed_metadata['measureVariable']['dataType']
enriched_data_path = dataset_enricher.run(
data_file_path, temporal_coverage, data_type
)
parquet_file_path = dataset_converter.run(
enriched_data_path, temporality_type, data_type
)
dataset_mover.run(
dataset_name, transformed_metadata_path,
parquet_path=parquet_file_path
)
logger.info('Dataset built sucessfully')
except BuilderStepError as e:
logger.error(str(e))
except Exception as e:
logger.error('Unexpected exception when building dataset')
logger.error(str(e))
finally:
directory_cleaner.run(dataset_name)
def run_metadata_builder(dataset_name: str):
try:
log_config.setup_logger_for_import_pipeline()
logger.info(f'Starting to build metadata {dataset_name}')
metadata_file_path = dataset_validator.run_for_metadata(dataset_name)
transformed_metadata_path = dataset_transformer.run(metadata_file_path)
dataset_mover.run(dataset_name, transformed_metadata_path)
logger.info('Dataset built sucessfully')
except BuilderStepError as e:
logger.error(str(e))
except Exception as e:
logger.error('Unexpected exception when building dataset')
logger.error(str(e))
finally:
directory_cleaner.run(dataset_name)
if __name__ == '__main__':
DATASET_NAME = 'INNTEKT'
# run_builder(DATASET_NAME)
# run_metadata_builder(DATASET_NAME)
| 30.152941 | 79 | 0.712446 | 286 | 2,563 | 6.01049 | 0.216783 | 0.083188 | 0.066899 | 0.069808 | 0.534031 | 0.510762 | 0.510762 | 0.46655 | 0.411867 | 0.411867 | 0 | 0 | 0.218884 | 2,563 | 84 | 80 | 30.511905 | 0.858641 | 0.02341 | 0 | 0.34375 | 0 | 0 | 0.112 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.109375 | 0 | 0.140625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4513608264b5ef5fadbdb3f5d4972dad1c038cb | 8,235 | py | Python | chap05/ConvolutionalLayer.py | viekie/basic_deeplearning | 6c9e55cd621504da3d7ea1627e6783c9819a1916 | [
"Apache-2.0"
] | 3 | 2017-05-23T08:11:44.000Z | 2017-09-25T11:17:57.000Z | chap05/ConvolutionalLayer.py | viekie/basic_deeplearning | 6c9e55cd621504da3d7ea1627e6783c9819a1916 | [
"Apache-2.0"
] | null | null | null | chap05/ConvolutionalLayer.py | viekie/basic_deeplearning | 6c9e55cd621504da3d7ea1627e6783c9819a1916 | [
"Apache-2.0"
] | 1 | 2017-06-19T03:36:40.000Z | 2017-06-19T03:36:40.000Z | #!/usr/bin/env python
# -*- coding:utf8 -*-
# Power by viekie. 2017-05-27 08:35:06
import numpy as np
import Filter
class ConvolutionalLayer(object):
def __init__(self, input_width, input_height, channel_number,
filter_width, filter_height, filter_number, zero_padding,
stride, activator, learning_rate):
# 初始化长、宽、深度、filter的长、宽和深度、激活函数和学习速率
self.input_width = input_width
self.input_height = input_height
self.channel_number = channel_number
self.filter_width = filter_width
self.filer_height = filter_height
self.filter_number = filter_number
self.filters = []
self.zero_padding = zero_padding
self.stride = stride
self.activator = activator
self.learning_rate = learning_rate
# 计算输出结果的宽度
self.output_width = \
ConvolutionalLayer.calc_output_size(self.input_width,
self.filter_width,
self.zero_padding,
self.stride)
# 计算输出结果的高度
self.output_height = \
ConvolutionalLayer.calc_output_size(self.input_height,
self.filter_height,
self.zero_padding,
self.stride)
# 定义生成矩阵
self.output_array = \
np.zeros((self.filter_number, self.output_height,
self.output_width))
# 定义filter
for i in range(channel_number):
self.filters.append(Filter(self.filter_width, self.filer_height,
self.channel_number))
@staticmethod
def calc_output_size(input_size, filter_size, padding_size, stride):
'''
计算输出大小
'''
return (input_size + 2 * padding_size - filter_size) / stride + 1
def forward(self, input_array):
'''
向前计算输出值
'''
self.input_array = input_array
# 先进行padding
self.padding_input_array = self.padding(input_array, self.zero_padding)
# 逐个进行卷积计算
for f in self.filters:
self.convolution(self.padding_input_array, f.get_weights(),
f.get_bais(), self.stride, self.output_array[f])
# 对卷积的结果进行Relu函数操作
self.element_wise_op(self.output_array, self.activator.forward)
def elements_wise_op(self, input_array, f):
for i in np.nditer(input_array, op_flags=['readwrite']):
i[...] = f(input_array[i])
def convolution(self, input_array, kernel_array, bais,
stride, output_array):
# 获取输出和卷积核的大小
output_width = output_array[1]
output_height = output_array[0]
kernel_width = kernel_array[-1]
kernel_height = kernel_array[-2]
# 逐个计算卷积结果
for i in range(output_height):
for j in range(output_width):
output_array[i][j] = \
(self.get_patch(input, i, j, stride,
kernel_height, kernel_width) *
kernel_array).sum() + bais
def get_patch(self, input_array, i, j, stride, height, width):
'''
获取需要被卷积的单元, 针对2D和3D分别进行获取
'''
ret_array = []
nd = input_array.ndim
if nd == 3:
sd = input_array.shape[0]
for d in range(sd):
ret_array.append(self.get_sub_array(input_array[d], i, j,
stride, height, width))
else:
ret_array = self.get_sub_array(input_array,
i, j, stride, height, width)
return ret_array
def get_sub_array(self, input_array, i, j, stride, height, width, nd):
'''
获取子矩阵
'''
row_start = i * stride
col_start = j * stride
return input_array[row_start: row_start + height, col_start: col_start]
def padding(self, input_array, zp):
'''
对input_array进行padding
'''
if zp == 0:
return input_array
else:
if input_array.ndim == 3:
input_width = input_array.shape[2]
input_height = input_array.shape[1]
input_depth = input_array.shape[0]
# 初始化要被返回的padding结果
padding_array = np.zeros((input_depth, input_height + 2 * zp,
input_width + 2 * zp))
# 对padding中原本已经存在的元素进行copy
padding_array[:,
zp: zp + input_height,
zp + input_width] = input_array
return padding_array
elif input_array.ndim == 2:
input_width = input_array.shape[1]
input_height = input_array.shape[0]
padding_array = np.zeros((input_height, input_width))
padding_array[zp: zp + input_height,
zp: zp + input_width] = input_array
return padding_array
def bp_sensitivity_map(self, sensitivity_array, activator):
# 扩展为步长为1
expanded_array = \
self.expand_sensitivity_map(sensitivity_array)
expanded_width = expanded_array.shape[2]
# 计算需要padding 的大小
zp = (self.input_width + self.filter_width - 1 - expanded_width) / 2
# 执行padding
padded_array = self.padding(expanded_array, zp)
# 创建存放梯度的数组
self.delta_array = self.create_delta_array()
# 每一个filter都作用于sensitivity map,然后对相应的
# filter对应的结果进行求和
for f in range(self.filter_number):
filter = self.filters[f]
# 权重矩阵180度旋转
rotate_weights = np.array(map(lambda i: np.rot90(i, 2),
filter.get_weights()))
# 创建临时梯度矩阵
delta_array = self.create_delta_array()
for d in range(delta_array.shape[0]):
# 更新每一个channel对应的梯度矩阵
self.convolution(padded_array[f], rotate_weights,
delta_array[d], 1, 0)
# 将每个filter求出的梯度矩阵进行各自求和
self.delta_array += delta_array
# 生成输入向量转换为np的array
derivative_array = np.array(self.input_array)
# 将输入向量求导数矩阵
self.elements_wise_op(derivative_array, self.activator.backward)
# 求出梯度
self.delta_array *= derivative_array
def expand_sensitivity_map(self, sensitivity_array):
depth = sensitivity_array.shape[0]
# 按照步长为1,计算卷积结果行、列数
expand_width = (self.input_width -
self.filter_width + 2 * self.zero_padding + 1)
expand_height = (self.input_height -
self.filter_width + 2 * self.zero_padding + 1)
# 生成扩展后矩阵大小
expand_array = np.zeros((depth, expand_height, expand_width))
# 对相应位置赋值, 其他扩展的位置为0
for i in range(self.output_height):
for j in range(self.output_width):
step_i = i * self.stride
step_j = j * self.stride
expand_array[:, step_i, step_j] = sensitivity_array[:, i, j]
return expand_array
def create_delta_array(self):
return np.zeros((self.channel_number, self.input_height,
self.input_width))
def bp_gradient(self, sensitivity_array):
# 按照步长为1 扩展sensitivity_array
expand_array = self.expand_sensitivity_map(sensitivity_array)
for f in range(self.filter_number):
filter = filter[f]
for d in range(filter.weights.shape[0]):
# 将扩展后的sensitivity_array和input进行卷积,实际就是为了求梯度
self.convolution(self.padding_input_array[d],
expand_array,
filter.weights_gradient[d], 1, 0)
filter.bias_grad = expand_array[f].sum()
def update(self):
'''
filter进行梯度下降更新
'''
for filter in self.filters:
filter.update(self.learning_rate)
| 39.401914 | 79 | 0.548877 | 867 | 8,235 | 4.94579 | 0.178777 | 0.069963 | 0.026119 | 0.01959 | 0.288479 | 0.189832 | 0.105177 | 0.064366 | 0 | 0 | 0 | 0.011355 | 0.369035 | 8,235 | 208 | 80 | 39.591346 | 0.813895 | 0.07796 | 0 | 0.084507 | 0 | 0 | 0.001213 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091549 | false | 0 | 0.014085 | 0.007042 | 0.169014 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a451da894d989b6d53bdd9a5869aaa0646748dae | 4,521 | py | Python | src/thunder/text_processing/transform.py | rbracco/thunder-speech | 2b16abf1a14438b1174c168ad8252ad869f31139 | [
"MIT"
] | 8 | 2021-01-26T23:19:51.000Z | 2022-03-02T23:18:46.000Z | src/thunder/text_processing/transform.py | rbracco/thunder-speech | 2b16abf1a14438b1174c168ad8252ad869f31139 | [
"MIT"
] | 27 | 2021-01-28T06:50:11.000Z | 2022-02-27T08:21:12.000Z | src/thunder/text_processing/transform.py | rbracco/thunder-speech | 2b16abf1a14438b1174c168ad8252ad869f31139 | [
"MIT"
] | 3 | 2021-05-06T21:04:23.000Z | 2021-08-09T13:24:50.000Z | # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2021 scart97
__all__ = ["BatchTextTransformer", "TextTransformConfig"]
from dataclasses import dataclass
from typing import List, Optional
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from thunder.text_processing.tokenizer import BPETokenizer, char_tokenizer
from thunder.text_processing.vocab import SimpleVocab, Vocab
@dataclass
class TextTransformConfig:
"""Configuration to create [`BatchTextTransformer`][thunder.text_processing.transform.BatchTextTransformer]
Attributes:
initial_vocab_tokens: List of tokens to create the vocabulary, special tokens should not be included here. required.
simple_vocab: Controls if the used vocabulary will only have the blank token or more additional special tokens. defaults to `False`.
sentencepiece_model: Path to sentencepiece .model file, if applicable.
"""
initial_vocab_tokens: List[str]
simple_vocab: bool = False
sentencepiece_model: Optional[str] = None
@classmethod
def from_sentencepiece(cls, output_dir: str) -> "TextTransformConfig":
"""Load the data from a folder that contains the `tokenizer.vocab`
and `tokenizer.model` outputs from sentencepiece.
Args:
output_dir : Output directory of the sentencepiece training, that contains the required files.
Returns:
Instance of `TextTransformConfig` with the corresponding data loaded.
"""
special_tokens = ["<s>", "</s>", "<pad>", "<unk>"]
vocab = []
with open(f"{output_dir}/tokenizer.vocab", "r") as f:
# Read tokens from each line and parse for vocab
for line in f:
piece = line.split("\t")[0]
if piece in special_tokens:
# skip special tokens
continue
vocab.append(piece)
return cls(
initial_vocab_tokens=vocab,
sentencepiece_model=f"{output_dir}/tokenizer.model",
)
class BatchTextTransformer(nn.Module):
def __init__(self, cfg: TextTransformConfig):
"""That class is the glue code that uses all of the text processing
functions to encode/decode an entire batch of text at once.
Args:
cfg: required config to create instance
"""
super().__init__()
self.vocab = (
SimpleVocab(cfg.initial_vocab_tokens)
if cfg.simple_vocab
else Vocab(cfg.initial_vocab_tokens)
)
self.tokenizer = (
BPETokenizer(cfg.sentencepiece_model)
if cfg.sentencepiece_model
else char_tokenizer
)
def encode(self, items: List[str], return_length: bool = True, device=None):
tokenized = [self.tokenizer(x) for x in items]
expanded_tokenized = [self.vocab.add_special_tokens(x) for x in tokenized]
encoded = [
self.vocab.numericalize(x).to(device=device) for x in expanded_tokenized
]
encoded_batched = pad_sequence(
encoded, batch_first=True, padding_value=self.vocab.pad_idx
)
if return_length:
lengths = torch.LongTensor([len(it) for it in encoded]).to(device=device)
return encoded_batched, lengths
else:
return encoded_batched
@torch.jit.export
def decode_prediction(
self, predictions: torch.Tensor, remove_repeated: bool = True
) -> List[str]:
"""
Args:
predictions : Tensor of shape (batch, time)
remove_repeated: controls if repeated elements without a blank between them will be removed while decoding
Returns:
A list of decoded strings, one for each element in the batch.
"""
out_list: List[str] = []
for element in predictions:
# Remove consecutive repeated elements
if remove_repeated:
element = torch.unique_consecutive(element)
# Map back to string
out = self.vocab.decode_into_text(element)
# Join prediction into one string
out = "".join(out)
# _ is a special char only present on sentencepiece
out = out.replace("▁", " ")
out = self.vocab.remove_special_tokens(out)
out_list.append(out)
return out_list
| 35.880952 | 140 | 0.639681 | 524 | 4,521 | 5.389313 | 0.360687 | 0.032224 | 0.03187 | 0.017705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002166 | 0.285114 | 4,521 | 125 | 141 | 36.168 | 0.871287 | 0.337094 | 0 | 0 | 0 | 0 | 0.048313 | 0.019893 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.102941 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a454242a5d2a6fda9bc26f402c83e116c412e094 | 19,801 | py | Python | experiments/book_comparison.py | LasLitz/ma-doc-embeddings | e6edbb64a766b7906179b0cb767606c6f65cddb9 | [
"MIT"
] | 1 | 2022-01-10T20:29:42.000Z | 2022-01-10T20:29:42.000Z | experiments/book_comparison.py | LasLitz/ma-doc-embeddings | e6edbb64a766b7906179b0cb767606c6f65cddb9 | [
"MIT"
] | null | null | null | experiments/book_comparison.py | LasLitz/ma-doc-embeddings | e6edbb64a766b7906179b0cb767606c6f65cddb9 | [
"MIT"
] | null | null | null | import os
from collections import defaultdict
import random
from typing import Dict, List
import pandas as pd
from scipy.stats import stats
from lib2vec.corpus_structure import Corpus
from experiments.predicting_high_rated_books import mcnemar_sig_text, chi_square_test
from lib2vec.vectorization import Vectorizer
from lib2vec.vectorization_utils import Vectorization
import numpy as np
def get_percentage_of_correctly_labeled(vectors, human_assessment_df: pd.DataFrame, doc_id_mapping: Dict[str, str],
facet_mapping: Dict[str, str], use_sum: bool):
# reverted_facets = {value: key for key, value in facet_mapping.items()}
correctly_assessed = []
facet_wise = defaultdict(list)
random_baseline = False
skip_count = 0
agreement_store = defaultdict(list)
for i, row in human_assessment_df.iterrows():
book1 = doc_id_mapping[row["Book 1"]]
book2 = doc_id_mapping[row["Book 2"]]
book3 = doc_id_mapping[row["Book 3"]]
if use_sum:
facet = facet_mapping["total"]
else:
facet = facet_mapping[row["Facet"]]
selection = row["Selection"]
if selection == "skip" or selection == "unsure":
skip_count += 1
continue
if random_baseline:
if int(row["Selected Answer Nr."]) == random.randint(1, 3):
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
else:
correctly_assessed.append(0)
facet_wise[row["Facet"]].append(0)
else:
sim_1 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book2, facet_name=facet)
sim_2 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book3, facet_name=facet)
sim_3 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book2, doc_id_b=book3, facet_name=facet)
if int(row["Selected Answer Nr."]) == 1 and sim_1 > sim_2 and sim_1 > sim_3:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
elif int(row["Selected Answer Nr."]) == 2 and sim_2 > sim_1 and sim_2 > sim_3:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
elif int(row["Selected Answer Nr."]) == 3 and sim_3 > sim_1 and sim_3 > sim_2:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
else:
correctly_assessed.append(0)
agreement_store["False"].append(row["Agreement"])
facet_wise[row["Facet"]].append(0)
print("False:", np.mean(agreement_store["False"]))
print("True:", np.mean(agreement_store["True"]))
result_scores = {facet: sum(scores) / len(scores) for facet, scores in facet_wise.items()}
result_scores["all_facets"] = sum(correctly_assessed) / len(correctly_assessed)
return result_scores, correctly_assessed, facet_wise
def correlation_for_correctly_labeled(vectors, human_assessment_df: pd.DataFrame, doc_id_mapping: Dict[str, str],
facet_mapping: Dict[str, str], use_sum: bool):
# reverted_facets = {value: key for key, value in facet_mapping.items()}
ground_truth = defaultdict(list)
predicted = defaultdict(list)
skip_count = 0
for i, row in human_assessment_df.iterrows():
book1 = doc_id_mapping[row["Book 1"]]
book2 = doc_id_mapping[row["Book 2"]]
book3 = doc_id_mapping[row["Book 3"]]
if use_sum:
facet = facet_mapping["total"]
else:
facet = facet_mapping[row["Facet"]]
selection = row["Selection"]
if selection == "skip" or selection == "unsure":
skip_count += 1
continue
sim_1 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book2, facet_name=facet)
sim_2 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book3, facet_name=facet)
sim_3 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book2, doc_id_b=book3, facet_name=facet)
if sim_1 > sim_2 and sim_1 > sim_3:
pred_label = 1
elif sim_2 > sim_1 and sim_2 > sim_3:
pred_label = 2
elif sim_3 > sim_1 and sim_3 > sim_2:
pred_label = 3
else:
print("warning")
pred_label = -1
ground_truth[row["Facet"]].append(int(row["Selected Answer Nr."]))
ground_truth["all_facets"].append(int(row["Selected Answer Nr."]))
predicted[row["Facet"]].append(pred_label)
predicted["all_facets"].append(pred_label)
# print(row["Facet"], "=", sum(facet_wise[row["Facet"]]))
print(f"{skip_count} times skipped!")
result_scores = {}
for facet, ground_truth_labels in ground_truth.items():
predicted_labels = predicted[facet]
corr = stats.spearmanr(ground_truth_labels, predicted_labels)
spearman = str(f'{abs(corr[0]):.3f}')
if corr[1] < 0.05:
spearman = f"*{spearman}"
result_scores[facet] = spearman
return result_scores
# result_scores = {facet: sum(scores) / len(scores) for facet, scores in facet_wise.items()}
# result_scores["all"] = sum(correctly_assessed) / len(correctly_assessed)
# return result_scores
def load_vectors_from_properties(number_of_subparts, corpus_size, data_set,
filter_mode, vectorization_algorithm):
use_sum = False
if "_sum" in vectorization_algorithm:
use_sum = True
vec_path = Vectorization.build_vec_file_name(number_of_subparts,
corpus_size,
data_set,
filter_mode,
vectorization_algorithm,
"real",
allow_combination=True)
vectors, _ = Vectorization.my_load_doc2vec_format(vec_path)
return vectors, use_sum
def calculate_vectors(data_set_name: str, vec_algorithms: List[str], filters: List[str]):
# try:
# corpus = Corpus.fast_load(path=os.path.join('corpora', data_set_name), load_entities=False)
# except FileNotFoundError:
# corpus = DataHandler.load_classic_gutenberg_as_corpus()
# Preprocesser.annotate_and_save(corpus, corpus_dir=f"corpora/{data_set_name}")
# corpus = Corpus.fast_load(path=os.path.join('corpora', data_set_name), load_entities=False)
for filter_mode in filters:
corpus = Corpus.fast_load("all",
"no_limit",
data_set_name,
filter_mode,
"real",
load_entities=False)
for vectorization_algorithm in vec_algorithms:
use_summation = False
if "_sum" in vectorization_algorithm:
vectorization_algorithm = vectorization_algorithm.replace("_sum", "")
use_summation = True
vec_file_name = Vectorization.build_vec_file_name('all',
'no_limit',
data_set_name,
filter_mode,
vectorization_algorithm,
'real')
if not os.path.isfile(vec_file_name):
Vectorizer.algorithm(input_str=vectorization_algorithm,
corpus=corpus,
save_path=vec_file_name,
return_vecs=False)
def evaluate(data_set_name: str, vec_algorithms: List[str], filters: List[str]):
human_assessment_df = pd.read_csv("../results/human_assessment/gutenberg_classic_20/human_assessed_complete.csv")
print(len(human_assessment_df.index))
human_assessment_df = human_assessment_df.loc[(human_assessment_df['Selection'] != "unsure")]
# & (human_assessment_df['Answers'] > 1)
# human_assessment_df = human_assessment_df.loc[(human_assessment_df['Agreement'] > 0.5)
# # & (human_assessment_df['Answers'] > 1)
# ]
print(len(human_assessment_df.index))
survey_id2doc_id = {1: "cb_17",
2: "cb_2",
3: "cb_0",
4: "cb_1",
5: "cb_3",
6: "cb_4",
7: "cb_5",
8: "cb_6",
9: "cb_9",
10: "cb_11",
11: "cb_12",
12: "cb_13",
13: "cb_14",
14: "cb_15",
15: "cb_8",
16: "cb_7",
17: "cb_10",
18: "cb_18",
19: "cb_19",
20: "cb_16",
}
facets = {"location": "loc", "time": "time", "atmosphere": "atm", "content": "cont", "plot": "plot", "total": ""}
tuples = []
correlation_tuples = []
correctness_table = {}
correctness_table_facet = {}
for filter in filters:
for vec_algorithm in vec_algorithms:
filtered_dataset = f'{data_set_name}_{filter}'
# corpus = Corpus.fast_load(path=os.path.join('corpora', f''data_set_name), load_entities=False)
vecs, use_sum = load_vectors_from_properties(number_of_subparts="all",
corpus_size="no_limit",
data_set=data_set_name,
filter_mode=filter,
vectorization_algorithm=vec_algorithm)
corr_scores = correlation_for_correctly_labeled(vectors=vecs, human_assessment_df=human_assessment_df,
doc_id_mapping=survey_id2doc_id, facet_mapping=facets,
use_sum=use_sum)
correlation_tuples.append((filtered_dataset, vec_algorithm, corr_scores["total"],
corr_scores["time"], corr_scores["location"],
corr_scores["plot"], corr_scores["atmosphere"], corr_scores["content"],
corr_scores["all_facets"]))
scores, cor_ass, facet_wise = get_percentage_of_correctly_labeled(vectors=vecs,
human_assessment_df=human_assessment_df,
doc_id_mapping=survey_id2doc_id,
facet_mapping=facets,
use_sum=use_sum)
correctness_table[vec_algorithm] = cor_ass
correctness_table_facet[vec_algorithm] = facet_wise
tuples.append((filtered_dataset, vec_algorithm, scores["total"], scores["time"],
scores["location"],
scores["plot"],
scores["atmosphere"], scores["content"], scores["all_facets"]))
print((filtered_dataset, vec_algorithm, scores["total"], scores["time"],
scores["location"],
scores["plot"],
scores["atmosphere"], scores["content"], scores["all_facets"]))
try:
algo1 = "bert_pt"
algo2 = "book2vec_adv_dbow_pca"
true_true = 0
true_false = 0
false_true = 0
false_false = 0
for e1, e2 in zip(correctness_table[algo1], correctness_table[algo2]):
if e1 and e2:
true_true += 1
elif e1 and not e2:
true_false += 1
elif not e1 and e2:
false_true += 1
elif not e1 and not e2:
false_false += 1
else:
pass
table = [[true_true, true_false],
[false_true, false_false]]
print(table)
print()
print("Overall")
mcnemar_sig_text(table)
# facets = correctness_table_facet[algo1].keys()
for facet in facets:
true_true = 0
true_false = 0
false_true = 0
false_false = 0
for e1, e2 in zip(correctness_table_facet[algo1][facet], correctness_table_facet[algo2][facet]):
if e1 and e2:
true_true += 1
elif e1 and not e2:
true_false += 1
elif not e1 and e2:
false_true += 1
elif not e1 and not e2:
false_false += 1
else:
pass
table = [[true_true, true_false],
[false_true, false_false]]
print()
print(table)
print(facet)
mcnemar_sig_text(table)
chi_square_test(correctness_table[algo1], correctness_table[algo1])
except KeyError:
pass
result_df = pd.DataFrame(tuples, columns=["Data set", "Algorithm", "Total", "Time",
"Location", "Plot", "Atmosphere", "Content", "Micro AVG"])
result_df = result_df.round(3)
result_df.to_csv("results/human_assessment/performance.csv", index=False)
print(result_df.to_latex(index=False))
corr_df = pd.DataFrame(correlation_tuples, columns=["Data set", "Algorithm", "Total", "Time",
"Location", "Plot", "Atmosphere", "Content", "Micro AVG"])
corr_df.to_csv("results/human_assessment/correlation_results.csv", index=False)
print(corr_df.to_latex(index=False))
if __name__ == '__main__':
data_set = "classic_gutenberg"
# algorithms = ["avg_wv2doc", "doc2vec", "book2vec", "book2vec_concat"]
# algorithms = [
# # "book2vec_o_time", "book2vec_o_loc", "book2vec_o_atm", "book2vec_o_sty", "book2vec_o_plot", "book2vec_o_raw",
# "book2vec", "book2vec_sum", "book2vec_avg", "book2vec_concat", "book2vec_auto", "book2vec_pca",
# "book2vec_dbow",
# # "book2vec_dbow_sum", "book2vec_dbow_avg", "book2vec_dbow_concat", "book2vec_dbow_auto",
# "book2vec_dbow_pca",
# #
# "book2vec_wo_raw",
# # "book2vec_wo_raw_sum", "book2vec_wo_raw_avg", "book2vec_wo_raw_concat",
# # "book2vec_wo_raw_auto",
# "book2vec_wo_raw_pca",
# # "book2vec_dbow_wo_raw", "book2vec_dbow_wo_raw_sum", "book2vec_dbow_wo_raw_avg",
# # "book2vec_dbow_wo_raw_concat", "book2vec_dbow_wo_raw_auto",
# "book2vec_dbow_wo_raw_pca",
# #
# # "book2vec_net_only", "book2vec_net_only_sum", "book2vec_net_only_avg",
# # "book2vec_net_only_concat", "book2vec_net_only_auto",
# "book2vec_net_only_pca",
# # "book2vec_dbow_net_only", "book2vec_dbow_net_only_pca", "book2vec_dbow_net_only_sum",
# # "book2vec_dbow_net_only_avg", "book2vec_dbow_net_only_concat",
# # "book2vec_dbow_net_only_auto",
# "book2vec_dbow_net_only_pca",
# #
# # "book2vec_net", "book2vec_net_sum", "book2vec_net_avg",
# # "book2vec_net_concat", "book2vec_net_auto",
# "book2vec_net_pca",
# # "book2vec_dbow_net", "book2vec_dbow_net_pca", "book2vec_dbow_net_sum", "book2vec_dbow_net_avg",
# # "book2vec_dbow_net_concat", "book2vec_dbow_net_auto",
# "book2vec_dbow_net_pca",
# #
# "book2vec_adv",
# # "book2vec_adv_sum", "book2vec_adv_concat", "book2vec_adv_avg", "book2vec_adv_auto",
# "book2vec_adv_pca",
# # "book2vec_adv_dbow", "book2vec_adv_dbow_sum", "book2vec_adv_dbow_concat", "book2vec_adv_dbow_avg",
# # "book2vec_adv_dbow_auto",
# "book2vec_adv_dbow_pca",
#
# # "book2vec_adv_dbow_wo_raw_pca",
# # "book2vec_adv_dbow_net_wo_raw_pca",
#
# # "book2vec_window_pca",
# # "book2vec_dbow_window_pca",
# "book2vec_adv_window_pca",
# "book2vec_adv_dbow_window_pca",
#
#
# ]
# algorithms = ["book2vec_sum", "book2vec"]
algorithms = [
# "bow",
# "avg_wv2doc_restrict10000",
# "doc2vec",
# "doc2vec_dbow",
# "doc2vec_sentence_based_100",
# "doc2vec_sentence_based_1000",
# "doc2vec_chunk",
# "doc2vec_dbow_chunk"
"bert_pt",
# "bert_pt_chunk",
# # "bert_sentence_based_100_pt",
# "bert_sentence_based_1000_pt",
# "roberta_pt",
# "roberta_pt_chunk",
# "roberta_sentence_based_1000_pt",
# "xlm_pt",
# "xlm_pt_chunk",
# "xlm_sentence_based_1000_pt",
# "psif",
# "book2vec_pca",
# "book2vec_concat",
# "book2vec_auto",
# "book2vec_avg",
#
# "book2vec_dbow_pca",
# "book2vec_dbow_concat",
# "book2vec_dbow_auto",
"book2vec_dbow_avg",
"book2vec_dbow_wo_raw_avg",
"book2vec_dbow_net_only_avg",
"book2vec_dbow_net_avg",
# # "book2vec_advn",
# "book2vec_advn_pca",
# "book2vec_advn_concat",
# "book2vec_advn_auto",
# "book2vec_advn_avg",
# # "book2vec_advn_dbow",
"book2vec_advn_dbow_pca",
# "book2vec_advn_dbow_concat",
# "book2vec_advn_dbow_auto",
"book2vec_advn_dbow_avg",
# "book2vec_bert_pt_pca",
# "book2vec_bert_pt_window",
"book2vec_advn_window_pca",
"book2vec_advn_dbow_window_avg",
]
# algorithms = ["avg_wv2doc", "doc2vec", "doc2vec_dbow",
# "doc2vec_sentence_based_100", "doc2vec_sentence_based_1000",
# "book2vec", "book2vec_concat", "book2vec_wo_raw", "book2vec_wo_raw_concat",
# "book2vec_dbow", "book2vec_dbow_concat",
# "book2vec_dbow_wo_raw", "book2vec_dbow_wo_raw_concat",
# "book2vec_net", "book2vec_net_concat",
# "book2vec_dbow_net", "book2vec_dbow_net_concat",
# "book2vec_net_only", "book2vec_net_only_concat",
# "book2vec_dbow_net_only", "book2vec_dbow_net_only_concat",
# "book2vec_adv", "book2vec_adv_concat", "bow",
# "bert", "bert_sentence_based_100", "bert_sentence_based_100_pt", "bert_sentence_based_1000",
# "bert_sentence_based_1000_pt",
# # "flair_sentence_based_100", "flair_sentence_based_1000",
# "roberta_sentence_based_100_pt", "xlm_sentence_based_100_pt",
# "psif"
# ]
filters = [
# "no_filter",
"specific_words_strict"
]
calculate_vectors(data_set, algorithms, filters)
evaluate(data_set, algorithms, filters)
| 44.697517 | 121 | 0.55477 | 2,129 | 19,801 | 4.757163 | 0.121184 | 0.049763 | 0.029621 | 0.01876 | 0.56872 | 0.498321 | 0.43049 | 0.411532 | 0.374803 | 0.340344 | 0 | 0.030798 | 0.340791 | 19,801 | 442 | 122 | 44.798643 | 0.745116 | 0.248321 | 0 | 0.410714 | 0 | 0 | 0.092655 | 0.027016 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0.010714 | 0.039286 | 0 | 0.067857 | 0.053571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a45592f450830cd65a28e51d131d98bb598da933 | 12,729 | py | Python | armi/physics/neutronics/parameters.py | crisobg1/armi | 38d9febdbec7ab8a67dd9b8e50780e11ea127022 | [
"Apache-2.0"
] | 1 | 2020-10-14T15:18:11.000Z | 2020-10-14T15:18:11.000Z | armi/physics/neutronics/parameters.py | crisobg1/Framework | 87b56c2cf286b75e7cc2c02a1e2886d6ce3037b8 | [
"Apache-2.0"
] | null | null | null | armi/physics/neutronics/parameters.py | crisobg1/Framework | 87b56c2cf286b75e7cc2c02a1e2886d6ce3037b8 | [
"Apache-2.0"
] | 1 | 2020-08-26T09:02:06.000Z | 2020-08-26T09:02:06.000Z | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parameter definitions for the Neutronics Plugin.
We hope neutronics plugins that compute flux will use ``mgFlux``, etc.,
which will enable modular construction of apps.
"""
import numpy
from armi.reactor import parameters
from armi.reactor.parameters import ParamLocation
from armi.reactor.blocks import Block
from armi.reactor.reactors import Core
def getNeutronicsParameterDefinitions():
"""Return ParameterDefinitionCollections for each appropriate ArmiObject."""
return {Block: _getNeutronicsBlockParams(), Core: _getNeutronicsCoreParams()}
def _getNeutronicsBlockParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam(
"axMesh",
units="",
description="number of neutronics axial mesh points in this block",
default=None,
categories=[parameters.Category.retainOnReplacement],
)
def mgFlux(self, value):
self._p_mgFlux = (
value
if value is None or isinstance(value, numpy.ndarray)
else numpy.array(value)
)
pb.defParam(
"mgFlux",
setter=mgFlux,
units="n-cm/s",
description="multigroup volume-integrated flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"adjMgFlux",
units="n-cm/s",
description="multigroup adjoint neutron flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"lastMgFlux",
units="n-cm/s",
description="multigroup volume-integrated flux used for averaging the latest and previous depletion step",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=False,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"mgFluxGamma",
units="g-cm/s",
description="multigroup gamma flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"mgNeutronVelocity",
units="cm/s",
description="multigroup neutron velocity",
location=ParamLocation.AVERAGE,
saveToDB=True,
categories=[parameters.Category.multiGroupQuantities],
default=None,
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
categories=[parameters.Category.detailedAxialExpansion],
) as pb:
# Neutronics reaction rate params that are not re-derived in mesh conversion
pb.defParam(
"rateBalance",
units="1/cm^3/s",
description="Numerical balance between particle production and destruction (should be small)",
)
pb.defParam(
"rateExtSrc",
units="1/cm^3/s",
description="Rate of production of neutrons from an external source.",
)
pb.defParam(
"rateFisAbs",
units="1/cm^3/s",
description="Neutron abs. rate in fissile material",
)
pb.defParam(
"rateFisSrc",
units="1/cm^3/s",
description="Fission source rate. This is related to production rate in fissile by a factor of keff",
)
pb.defParam(
"rateLeak",
units="1/cm^3/s",
description="Rate that neutrons leak out of this block.",
)
pb.defParam(
"rateParasAbs",
units="1/cm^3/s",
description="Rate of parasitic absorption (absorption in non-fertile/fissionable material)",
)
pb.defParam(
"rateProdNet",
units="1/cm^3/s",
description="Net production rate of neutrons",
)
pb.defParam(
"rateScatIn",
units="1/cm^3/s",
description="Rate neutrons in-scatter in this block",
)
pb.defParam(
"rateScatOut",
units="1/cm^3/s",
description="Rate that neutrons out-scatter in this block (removal - absorption)",
)
pb.defParam(
"capturePowerFrac",
units=None,
description="Fraction of the power produced through capture in a block.",
saveToDB="True",
)
pb.defParam(
"fastFluence",
units="#/cm^2",
description="Fast spectrum fluence",
categories=["cumulative"],
)
pb.defParam(
"fastFluencePeak",
units="#/cm^2",
description="Fast spectrum fluence with a peaking factor",
)
pb.defParam(
"fluence", units="#/cm^2", description="Fluence", categories=["cumulative"]
)
pb.defParam(
"flux",
units="n/cm^2/s",
description="neutron flux",
categories=[
parameters.Category.retainOnReplacement,
parameters.Category.fluxQuantities,
],
)
pb.defParam(
"fluxAdj", units="", description="Adjoint flux" # adjoint flux is unitless
)
pb.defParam(
"pdens", units="W/cm$^3$", description="Average volumetric power density"
)
pb.defParam(
"pdensDecay",
units="W/cm$^3$",
description="Decay power density from decaying radionuclides",
)
pb.defParam("arealPd", units="MW/m^2", description="Power divided by XY area")
pb.defParam(
"arealPdGamma", units="MW/m^2", description="Areal gamma power density"
)
pb.defParam("fertileBonus", units=None, description="The fertile bonus")
pb.defParam(
"fisDens",
units="fissions/cm^3/s",
description="Fission density in a pin (scaled up from homogeneous)",
)
pb.defParam(
"fisDensHom", units="1/cm^3/s", description="Homogenized fissile density"
)
pb.defParam(
"fluxDeltaFromRef",
units="None",
description="Relative difference between the current flux and the directly-computed perturbed flux.",
)
pb.defParam(
"fluxDirect",
units="n/cm^2/s",
description="Flux is computed with a direct method",
)
pb.defParam(
"fluxGamma",
units="g/cm^2/s",
description="Gamma scalar flux",
categories=[
parameters.Category.retainOnReplacement,
parameters.Category.fluxQuantities,
],
)
pb.defParam(
"fluxPeak",
units="n/cm^2/s",
description="Peak neutron flux calculated within the mesh",
)
pb.defParam(
"fluxPertDeltaFromDirect",
units="None",
description="Relative difference between the perturbed flux and the directly-computed perturbed flux",
)
pb.defParam(
"fluxPertDeltaFromDirectfluxRefWeighted", units="None", description=""
)
pb.defParam(
"fluxPerturbed", units="1/cm^2/s", description="Flux is computed by MEPT"
)
pb.defParam("fluxRef", units="1/cm^2/s", description="Reference flux")
pb.defParam(
"kInf",
units="None",
description="Neutron production rate in this block/neutron absorption rate in this block. Not truly kinf but a reasonable approximation of reactivity.",
)
pb.defParam(
"medAbsE", units="eV", description="Median neutron absorption energy"
)
pb.defParam(
"medFisE",
units="eV",
description="Median energy of neutron causing fission",
)
pb.defParam("medFlxE", units="eV", description="Median neutron flux energy")
pb.defParam(
"pdensGamma",
units="W/cm^3",
description="Average volumetric gamma power density",
)
pb.defParam(
"pdensNeutron",
units="W/cm^3",
description="Average volumetric neutron power density",
)
pb.defParam("ppdens", units="W/cm^3", description="Peak power density")
pb.defParam("ppdensGamma", units="W/cm^3", description="Peak gamma density")
# rx rate params that are derived during mesh conversion.
# We'd like all things that can be derived from flux and XS to be
# in this category to minimize numerical diffusion but it is a WIP.
with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE,) as pb:
pb.defParam(
"rateAbs",
units="1/cm^3/s",
description="Total absorption rate in this block (fisson + capture).",
)
pb.defParam(
"rateCap",
units="1/cm^3/s",
description="Parasitic capture rate in this block.",
)
pb.defParam(
"rateFis", units="1/cm^3/s", description="Fission rate in this block."
)
pb.defParam(
"rateProdFis",
units="1/cm^3/s",
description="Production rate of neutrons from fission reactions (nu * fission source / k-eff)",
)
pb.defParam(
"rateProdN2n",
units="1/cm^3/s",
description="Production rate of neutrons from n2n reactions.",
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.VOLUME_INTEGRATED,
categories=[parameters.Category.detailedAxialExpansion],
) as pb:
pb.defParam(
"powerGenerated",
units=" W",
description="Generated power. Different than b.p.power only when gamma transport is activated.",
)
pb.defParam("power", units="W", description="Total power")
pb.defParam("powerDecay", units="W", description="Total decay power")
pb.defParam("powerGamma", units="W", description="Total gamma power")
pb.defParam("powerNeutron", units="W", description="Total neutron power")
return pDefs
def _getNeutronicsCoreParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(categories=[parameters.Category.neutronics]) as pb:
pb.defParam(
"eigenvalues",
units=None,
description="All available lambda-eigenvalues of reactor.",
default=None, # will be a list though, can't set default to mutable type.
location=ParamLocation.AVERAGE,
)
pb.defParam(
"axialMesh",
units="cm",
description="Global axial mesh from bottom to top used in structured-mesh neutronics simulations.",
default=None,
location=ParamLocation.TOP,
)
pb.defParam(
"kInf",
units=None,
description="k-infinity",
default=0.0,
location=ParamLocation.AVERAGE,
)
pb.defParam(
"refKeff",
units=None,
description="Reference unperturbed keff",
default=0.0,
location=ParamLocation.AVERAGE,
)
return pDefs
| 30.895631 | 164 | 0.569723 | 1,229 | 12,729 | 5.891782 | 0.274207 | 0.080099 | 0.018782 | 0.033145 | 0.410441 | 0.341389 | 0.243198 | 0.184919 | 0.17028 | 0.116973 | 0 | 0.008225 | 0.331369 | 12,729 | 411 | 165 | 30.970803 | 0.842557 | 0.089402 | 0 | 0.44164 | 0 | 0.003155 | 0.283824 | 0.007266 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012618 | false | 0 | 0.015773 | 0 | 0.037855 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a456f12f0c4a167d3c083c3079f270b436ec245d | 1,768 | py | Python | Software/Backend/app/controllers/Persona.py | davidsgv/Sistemas-Transaccionales | a26904742bd163461aca7e8039448441b4a98fb9 | [
"MIT"
] | null | null | null | Software/Backend/app/controllers/Persona.py | davidsgv/Sistemas-Transaccionales | a26904742bd163461aca7e8039448441b4a98fb9 | [
"MIT"
] | null | null | null | Software/Backend/app/controllers/Persona.py | davidsgv/Sistemas-Transaccionales | a26904742bd163461aca7e8039448441b4a98fb9 | [
"MIT"
] | null | null | null | from flask import Blueprint, jsonify, request
from flask_cors import cross_origin
#Model
from app.model.Persona import Persona
from app.model.Usuario import ManejoUsuarios
persona = Blueprint('persona', __name__, url_prefix="/persona/")
#listar todas las Personas
@persona.route("list", methods=["POST"])
@cross_origin()
def getPersona():
#se verifica si la sesion esta iniciada
login = ManejoUsuarios.VerificarSesion()
if(login):
#se obetiene la lista de la personas
data = Persona.listaPersonas()
return jsonify({"response":data,"message":"ok"})
return jsonify({"message":"invalid token"})
#Obtener persona
@persona.route("", methods=["POST"])
@cross_origin()
def getPersonaById():
#se verifica si la sesion esta iniciada
login = ManejoUsuarios.VerificarSesion()
if(login):
try:
personaId = request.json["idPersona"]
except:
return jsonify({"message":"invalid token"})
try:
data = Persona.GetPersonaById(personaId)
return jsonify({"response":data,"message":"ok"})
except:
return jsonify({"message":"error"})
return jsonify({"message":"invalid token"})
#Crear persona
@persona.route("create", methods=["POST"])
@cross_origin()
def createPersona():
#se verifica si la sesion esta iniciada
login = ManejoUsuarios.VerificarSesion()
if(login):
try:
nombre = request.json["nombre"]
except:
return jsonify({"message":"invalid token"})
try:
Persona.createPersona(nombre)
return jsonify({"message":"ok"})
except:
return jsonify({"message":"error"})
return jsonify({"message":"invalid token"}) | 26.787879 | 64 | 0.635181 | 184 | 1,768 | 6.048913 | 0.326087 | 0.116801 | 0.143756 | 0.121294 | 0.554358 | 0.458221 | 0.405211 | 0.331536 | 0.331536 | 0.331536 | 0 | 0 | 0.234729 | 1,768 | 66 | 65 | 26.787879 | 0.822616 | 0.117081 | 0 | 0.604651 | 0 | 0 | 0.14157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.093023 | 0 | 0.395349 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a458419f6584ac4036e6240f20d87f28078e862b | 947 | py | Python | sieve.py | rigewo02/rsa | 10736f695931ca835600410cd3f89b4f93b1e8e3 | [
"MIT"
] | null | null | null | sieve.py | rigewo02/rsa | 10736f695931ca835600410cd3f89b4f93b1e8e3 | [
"MIT"
] | null | null | null | sieve.py | rigewo02/rsa | 10736f695931ca835600410cd3f89b4f93b1e8e3 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import random
import math
def sieve(n):
# Every number is assumed prime except 0 and 1
numbers = [False, False] + [True] * (n-2)
print("Numbers appended")
for i in range(int(math.sqrt(n))+1):
if not numbers[i]:
continue # Do not do anything if i is not prime
for j in range(i**2, n, i):
numbers[j] = False # Mark every multiple as not prime
print("Filter finished")
res = []
for i in range(n):
if numbers[i]:
res.append(i)
return res
def main(length):
primes = sieve(10**(length + 1))
print("Finished generating")
num = random.choice(primes)
while num < 10**length:
num = random.choice(primes)
print(num)
main(7)
# if __name__ == "__main__":
# if len(sys.argv) > 1:
# length = int(sys.argv[1])
# else:
# length = 5
# main(length)
| 23.097561 | 78 | 0.548046 | 132 | 947 | 3.871212 | 0.44697 | 0.041096 | 0.023483 | 0.043053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023438 | 0.324182 | 947 | 40 | 79 | 23.675 | 0.775 | 0.279831 | 0 | 0.08 | 0 | 0 | 0.074405 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.24 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4591e8c5e0a7f2482e3823831464176b3c6732c | 1,136 | py | Python | tests/api_tests/search/test_fetch_platsannons.py | JobtechSwe/castaway | e0917511b20152f0bd7e2802b73a0beae30a96f5 | [
"Apache-2.0"
] | null | null | null | tests/api_tests/search/test_fetch_platsannons.py | JobtechSwe/castaway | e0917511b20152f0bd7e2802b73a0beae30a96f5 | [
"Apache-2.0"
] | null | null | null | tests/api_tests/search/test_fetch_platsannons.py | JobtechSwe/castaway | e0917511b20152f0bd7e2802b73a0beae30a96f5 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import requests
import pytest
from tests.test_resources.helper import get_with_path_return_json
from tests.test_resources.settings import SEARCH_URL
@pytest.mark.smoke
@pytest.mark.integration
def test_fetch_ad_by_id( session):
"""
Get an ad by a request to /search without a query,and limiting the result to one ad
use the id of the ad when doing a request to the /ad path
verify that the id of the ad is the same as used when doing the request
"""
json_response = get_with_path_return_json( session, '/search', params={'limit': '1'})
ad_id = json_response['hits'][0]['id']
ad_response = get_with_path_return_json( session, path=f"/ad/{ad_id}", params={})
assert ad_response['id'] == ad_id
assert len(ad_response) == 33
@pytest.mark.integration
def test_fetch_not_found_ad_by_id( session):
ad_id = '823069282306928230692823069282306928230692'
r = session.get(f"{SEARCH_URL}/ad/{ad_id}", params={})
assert r.status_code == requests.codes.not_found
if __name__ == '__main__':
pytest.main([os.path.realpath(__file__), '-svv', '-ra', '-m integration'])
| 31.555556 | 89 | 0.724472 | 178 | 1,136 | 4.342697 | 0.410112 | 0.025873 | 0.042691 | 0.065977 | 0.283312 | 0.178525 | 0.093144 | 0 | 0 | 0 | 0 | 0.048269 | 0.161092 | 1,136 | 35 | 90 | 32.457143 | 0.762854 | 0.1875 | 0 | 0.095238 | 0 | 0 | 0.140156 | 0.072303 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.095238 | false | 0 | 0.285714 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a45c6a40589988b06217c130fc291b75cbb96b4e | 2,835 | py | Python | dmaap/tests/test_consulif.py | onap/dcaegen2-platform-plugins | 64131311ba1d01ff7d20bca0c14d30a006b2e712 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2020-07-14T14:22:04.000Z | 2020-07-14T14:22:04.000Z | dmaap/tests/test_consulif.py | alex-sh2020/dcaegen2-platform-plugins | c5abb9b34468400bdcdd3ce23595af41ac03cd80 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | dmaap/tests/test_consulif.py | alex-sh2020/dcaegen2-platform-plugins | c5abb9b34468400bdcdd3ce23595af41ac03cd80 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2020-07-14T19:02:05.000Z | 2020-07-14T19:02:05.000Z | # ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
import pytest
from cloudify.exceptions import NonRecoverableError
import os
from consulif.consulif import ConsulHandle
# No connections are actually made to this host
CONSUL_HOST = "consul" # Should always be a local consul agent on Cloudify Manager
#CONSUL_PORT = '8510'
CONSUL_PORT = '8500'
DBCL_KEY_NAME = "dmaap_dbcl_info" # Consul key containing DMaaP data bus credentials
DBC_SERVICE_NAME= "dmaap_bus_controller" # Name under which the DMaaP bus controller is registered
def test_get_config_service(mockconsul):
err_msg = "Error getting ConsulHandle when configuring dmaap plugin: {0}"
_ch = ConsulHandle("http://{0}:{1}".format(CONSUL_HOST, CONSUL_PORT), None, None, None)
config = _ch.get_config(DBCL_KEY_NAME)
DMAAP_USER = config['dmaap']['username']
DMAAP_PASS = config['dmaap']['password']
DMAAP_OWNER = config['dmaap']['owner']
if 'protocol' in config['dmaap']:
DMAAP_PROTOCOL = config['dmaap']['protocol']
else:
DMAAP_PROTOCOL = 'https' # Default to https (service discovery should give us this but doesn't
if 'path' in config['dmaap']:
DMAAP_PATH = config['dmaap']['path']
else:
DMAAP_PATH = 'webapi' # Should come from service discovery but Consul doesn't support it
service_address, service_port = _ch.get_service(DBC_SERVICE_NAME)
DMAAP_API_URL = '{0}://{1}:{2}/{3}'.format(DMAAP_PROTOCOL, service_address, service_port, DMAAP_PATH)
def test_add_entry(mockconsul):
_ch = ConsulHandle("http://{0}:{1}".format(CONSUL_HOST, CONSUL_PORT), None, None, None)
key = 'DMAAP_TEST'
name = 'dmaap_test_name'
value = 'dmaap_test_value'
_ch.add_to_entry(key, name, value)
name = "dmaap_test_name_2"
value = 'dmaap_test_value_2'
_ch.add_to_entry(key, name, value)
_ch.delete_entry(key)
| 38.835616 | 105 | 0.639153 | 358 | 2,835 | 4.874302 | 0.435754 | 0.044126 | 0.027507 | 0.018338 | 0.093983 | 0.093983 | 0.093983 | 0.066476 | 0.066476 | 0.066476 | 0 | 0.013119 | 0.16649 | 2,835 | 72 | 106 | 39.375 | 0.725349 | 0.45679 | 0 | 0.171429 | 0 | 0 | 0.21004 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0.028571 | 0.114286 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a463318d48a23200b05fe5c478703327af0250c6 | 5,411 | py | Python | utils/transforms.py | zdaiot/NAIC-Person-Re-identification | 762be875b68e85fbaab8b7730b5a857bfcc9e218 | [
"MIT"
] | null | null | null | utils/transforms.py | zdaiot/NAIC-Person-Re-identification | 762be875b68e85fbaab8b7730b5a857bfcc9e218 | [
"MIT"
] | null | null | null | utils/transforms.py | zdaiot/NAIC-Person-Re-identification | 762be875b68e85fbaab8b7730b5a857bfcc9e218 | [
"MIT"
] | null | null | null | import math
import random
import numpy as np
import torchvision.transforms as T
from albumentations import (
Compose, HorizontalFlip, VerticalFlip, CLAHE, RandomRotate90, HueSaturationValue,
RandomBrightness, RandomContrast, RandomGamma, OneOf,
ToFloat, ShiftScaleRotate, GridDistortion, ElasticTransform, JpegCompression, HueSaturationValue,
RGBShift, RandomBrightnessContrast, RandomContrast, Blur, MotionBlur, MedianBlur, GaussNoise,CenterCrop,
IAAAdditiveGaussianNoise,GaussNoise,Cutout,Rotate, Normalize, Crop, RandomCrop, Resize, RGBShift
)
from PIL import Image
import cv2
import os
import matplotlib.pyplot as plt
import copy
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.15, r1=0.3, mean=(0.485, 0.456, 0.406)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
img = copy.deepcopy(img)
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.shape[0] * img.shape[1]
# 计算采样面积和采样长宽比
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.shape[1] and h < img.shape[0]:
x1 = random.randint(0, img.shape[0] - h)
y1 = random.randint(0, img.shape[1] - w)
image_roi = img[x1:x1 + h, y1:y1 + w, :]
image_mean = np.mean(image_roi, axis=(0, 1))
# R通道置零
image_mean[0] = 0
if img.shape[2] == 3:
img[x1:x1 + h, y1:y1 + w, 0] = image_mean[0]
img[x1:x1 + h, y1:y1 + w, 1] = image_mean[1]
img[x1:x1 + h, y1:y1 + w, 2] = image_mean[2]
else:
img[x1:x1 + h, y1:y1 + w] = image_mean[0]
return img
return img
class RGB2GRAY(object):
def __init__(self, p=0.5):
self.probability = p
def __call__(self, image):
if random.uniform(0, 1) > self.probability:
return image
image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# 合并为三通道,以输入网络
image_gray = cv2.merge([image_gray, image_gray, image_gray])
return image_gray
class DataAugmentation(object):
def __init__(self, erase_prob=None, full_aug=True, gray_prob=None):
"""
Args:
full_aug: 是否对整幅图片进行随机增强
"""
self.full_aug = full_aug
self.erase_prob = erase_prob
self.gray_prob = gray_prob
if erase_prob is not None:
self.random_erase = RandomErasing(probability=erase_prob)
if gray_prob is not None:
self.rgb2gray = RGB2GRAY(p=gray_prob)
def __call__(self, image):
"""
:param image: 传入的图片
:return: 经过数据增强后的图片
"""
# 先随机擦除
if self.erase_prob:
image = self.random_erase(image)
# 转为灰度
if self.gray_prob:
image = self.rgb2gray(image)
if self.full_aug:
image = self.data_augmentation(image)
return image
def data_augmentation(self, original_image):
""" 进行样本和掩膜的随机增强
Args:
original_image: 原始图片
Return:
image_aug: 增强后的图片
"""
augmentations = Compose([
HorizontalFlip(p=0.4),
ShiftScaleRotate(shift_limit=0.07, rotate_limit=0, p=0.4),
RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.3),
# 亮度、对比度
RandomGamma(gamma_limit=(80, 120), p=0.1),
RandomBrightnessContrast(p=0.1),
# 模糊
OneOf([
MotionBlur(p=0.1),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.3),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2)
])
augmented = augmentations(image=original_image)
image_aug = augmented['image']
return image_aug
if __name__ == "__main__":
image_path = 'data/Uaic/初赛训练集/初赛训练集/train_set'
# augment = DataAugmentation(erase_flag=True, full_aug=True, gray=True)
augment = DataAugmentation(erase_prob=0.3, gray_prob=0.3)
plt.figure()
for image_name in os.listdir(image_path):
image = Image.open(os.path.join(image_path, image_name)).convert('RGB')
image = np.asarray(image)
augmented = augment(image=image)
plt.imshow(augmented)
plt.show()
| 32.993902 | 108 | 0.577342 | 655 | 5,411 | 4.615267 | 0.30229 | 0.007278 | 0.011578 | 0.013232 | 0.137281 | 0.111479 | 0.102878 | 0.064175 | 0.014555 | 0 | 0 | 0.038661 | 0.321198 | 5,411 | 163 | 109 | 33.196319 | 0.784372 | 0.135834 | 0 | 0.106796 | 0 | 0 | 0.01041 | 0.006866 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067961 | false | 0 | 0.097087 | 0 | 0.262136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a46426c4091f69e2bafaff6462b464224cf7d8e8 | 382 | py | Python | Day-017-MoreOOPS/main.py | codefather91/100DaysOfPython | 7c27e0b1af7b73c8fefdd8e3bd73f092ea665868 | [
"MIT"
] | null | null | null | Day-017-MoreOOPS/main.py | codefather91/100DaysOfPython | 7c27e0b1af7b73c8fefdd8e3bd73f092ea665868 | [
"MIT"
] | null | null | null | Day-017-MoreOOPS/main.py | codefather91/100DaysOfPython | 7c27e0b1af7b73c8fefdd8e3bd73f092ea665868 | [
"MIT"
] | null | null | null | from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = []
for question in question_data:
question_bank.append(Question(question['text'], question['answer']))
quiz = QuizBrain(question_bank)
while quiz.still_has_question():
quiz.next_question()
print(f"\nYour final score : {quiz.score}\nThanks for playing!") | 25.466667 | 72 | 0.777487 | 52 | 382 | 5.519231 | 0.480769 | 0.125436 | 0.146341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120419 | 382 | 15 | 73 | 25.466667 | 0.854167 | 0 | 0 | 0 | 0 | 0 | 0.167102 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a464c2d34843871e0b098c7eded073e73e59a58c | 1,655 | py | Python | using_result.py | takat0m0/pix2pix | f6b0277fdd4bc98581db8cfd6dd6a039baf5e349 | [
"MIT"
] | 3 | 2017-02-09T14:26:12.000Z | 2017-02-20T03:21:26.000Z | using_result.py | takat0m0/pix2pix | f6b0277fdd4bc98581db8cfd6dd6a039baf5e349 | [
"MIT"
] | null | null | null | using_result.py | takat0m0/pix2pix | f6b0277fdd4bc98581db8cfd6dd6a039baf5e349 | [
"MIT"
] | null | null | null | #! -*- coding:utf-8 -*-
import os
import sys
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import cv2
from Model import Model
from util import get_figs, dump_figs
class FigGenerator(object):
def __init__(self, file_name, z_dim, batch_size):
self.batch_size = batch_size
self.z_dim = z_dim
self.model = Model(z_dim, batch_size)
self.model.set_model()
saver = tf.train.Saver()
self.sess = tf.Session()
saver.restore(self.sess, file_name)
def __call__(self, inputs):
assert(len(inputs) == self.batch_size)
#z = np.zeros([self.batch_size, self.z_dim])
z = np.random.normal(0.0, 1.0, [batch_size, z_dim]).astype(np.float32)
return self.model.gen_fig(self.sess, inputs, z)
if __name__ == u'__main__':
# dump file
dump_file = u'./model.dump'
# dir
input_dir = u'train_split/inputs'
target_dir = u'train_split/targets'
# parameter
batch_size = 10
z_dim = 100
# figure generator
fig_gen = FigGenerator(dump_file, z_dim, batch_size)
# get fig
print('-- get figs--')
input_figs, target_figs = get_figs(input_dir, target_dir, False, False)
assert(len(input_figs) == len(target_figs))
print('num figs = {}'.format(len(input_figs)))
# make figure
inputs = input_figs[10: 10 + batch_size]
input_imgs = cv2.hconcat((inputs + 1.0) * 127.5)
cv2.imwrite('inputs.jpg', input_imgs)
outputs = np.asarray(fig_gen(input_figs[10: 10 +batch_size]))
output_imgs = cv2.hconcat((outputs + 1.0) * 127.5)
cv2.imwrite('outputs.jpg', output_imgs)
| 27.583333 | 79 | 0.64713 | 246 | 1,655 | 4.097561 | 0.325203 | 0.098214 | 0.051587 | 0.03869 | 0.144841 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0.028906 | 0.226586 | 1,655 | 59 | 80 | 28.050847 | 0.758594 | 0.076133 | 0 | 0 | 0 | 0 | 0.068376 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.315789 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a465cded6b282420fb15a7a55e99a0790dcca303 | 1,300 | py | Python | prob_calculator.py | ZaatarX/probability-calculator | ae077db1eb435864ac5070c38d5794bccd0c92b8 | [
"MIT"
] | null | null | null | prob_calculator.py | ZaatarX/probability-calculator | ae077db1eb435864ac5070c38d5794bccd0c92b8 | [
"MIT"
] | null | null | null | prob_calculator.py | ZaatarX/probability-calculator | ae077db1eb435864ac5070c38d5794bccd0c92b8 | [
"MIT"
] | null | null | null | import copy
import random
# Consider using the modules imported above.
class Hat:
def __init__(self, **kwargs) -> None:
self.set_contents(**kwargs)
def set_contents(self, **kwargs):
contents = []
for key in kwargs:
for n in range(kwargs[key]):
contents.append(key)
self.contents = contents
def draw(self, quantity):
contents = self.contents
draws = []
if quantity < len(contents):
for n in range(quantity):
i = random.randrange(len(contents))
draws.append(contents[i])
contents = contents[0:i] + contents[i + 1:]
self.contents = contents
return draws
return contents
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
count = 0
bad = 0
for n in range(num_experiments):
# seed of the experiment
exp = copy.deepcopy(hat)
prova = exp.draw(num_balls_drawn)
for v in expected_balls.keys():
count = 0
for x in range(len(prova)):
if prova[x] == v:
count += 1
if count < expected_balls[v]:
bad += 1
break
return 1 - bad / num_experiments
| 26 | 70 | 0.539231 | 150 | 1,300 | 4.566667 | 0.34 | 0.040876 | 0.026277 | 0.048175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009756 | 0.369231 | 1,300 | 49 | 71 | 26.530612 | 0.82561 | 0.05 | 0 | 0.108108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.054054 | 0 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a46624d14c9284368d486f5789d1189343a38de8 | 7,309 | py | Python | naivenmt/layers/transformer.py | luozhouyang/tf-nmt-keras | bcceeec0a477eb09c4a8915e638a27dae6c95562 | [
"Apache-2.0"
] | 7 | 2018-09-10T03:49:06.000Z | 2020-06-15T06:10:28.000Z | naivenmt/layers/transformer.py | luozhouyang/tf-nmt-keras | bcceeec0a477eb09c4a8915e638a27dae6c95562 | [
"Apache-2.0"
] | 1 | 2019-02-18T10:01:44.000Z | 2019-02-18T10:01:44.000Z | naivenmt/layers/transformer.py | luozhouyang/tf-nmt-keras | bcceeec0a477eb09c4a8915e638a27dae6c95562 | [
"Apache-2.0"
] | 1 | 2018-09-15T05:49:31.000Z | 2018-09-15T05:49:31.000Z | # Copyright 2018 luozhouyang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
# This function is modified from https://github.com/Kyubyong/transformer/blob/master/modules.py
# with Apache License V2
def positional_encoding(inputs,
num_units,
scope="positional_encoding"):
"""Positional encoding as described in https://arxiv.org/abs/1706.03762.
Args:
inputs: A 2-d tensor with shape [B, L]. B->Batch size, L->Time steps
num_units: The model's dimension
scope: Variable scope
Returns:
A tensor with shape [B,L,D]. D->Model's dimension
"""
batch_size, time_steps = inputs.get_shape().as_list()
with tf.variable_scope(scope):
position_index = tf.tile(
tf.expand_dims(tf.range(time_steps), 0), [batch_size, 1])
position_encoding = np.array([
[pos / np.power(10000, 2. * i / num_units) for i in range(num_units)]
for pos in range(time_steps)])
position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2]) # dim 2i
position_encoding[:, 1::2] = np.cos(position_encoding[:, 1::2]) # dim 2i+1
# Convert to a tensor
lookup_table = tf.convert_to_tensor(position_encoding)
outputs = tf.nn.embedding_lookup(lookup_table, position_index)
return outputs
def layer_norm(inputs, epsilon=1e-8, scope="layer_norm"):
"""Layer normalization.
norm = gamma * (inputs - mean) / sqrt(variance + epsilon)
Args:
inputs: Input tensor, shape is [B,L,D]. B->Batch size, L->Time steps, D->Model's dim
epsilon: A very small float number to avoid zero division error
scope: Variable scope or name
Returns:
The normalized tensor with shape [B,L,D]
"""
with tf.variable_scope(scope):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** .5)
outputs = gamma * normalized + beta
return outputs
def scaled_dot_product_attention(q, k, v, scale=None, mask=None, dropout=0.2):
"""Scaled dot-product attention.
Args:
q: Query tensor, with shape [h*B, L, D/h]. h->num_heads
k: Key tensor, with shape [h*B, L, D/h]
v: Value tensor, with shape [h*B, L, D/h]
scale: A scalar, scale factor, sqrt(D)
mask: Attention mask, with shape [h*B, L, L]
dropout: A scalar, dropout rate
Returns:
An output tensor and a attention tensor
"""
dot = tf.matmul(q, k, transpose_b=True) # [h*B,L,L]
if scale:
dot = dot * scale
if mask:
padding = tf.ones_like(dot) * dot.dtype.min
dot = tf.where(tf.equal(mask, 0), padding, dot)
attention = tf.nn.softmax(dot)
attention = tf.nn.dropout(attention, dropout)
output = tf.matmul(attention, v)
return output, attention
def multihead_attention(queries,
keys,
values,
num_heads=8,
dropout=0.2,
mask=None,
scope="multihead_attention"):
"""Multi-head attention mechanism.
Args:
queries: Query tensor, with shape [h*B, L, D/h]. h->num_heads
keys: Key tensor, with shape [h*B, L, D/h]
values: Value tensor, with shape [h*B, L, D/h]
num_heads: A scalar, number of heads to split
dropout: A scalar, dropout rate.
mask: Making tensor, with shape [B, L, L]
scope: A string, variable scope name.
Returns:
An output tensor and a attention tensor
"""
with tf.variable_scope(scope) as scope:
model_dim = queries.get_shape()[-1]
q = tf.layers.dense(
queries, model_dim, activation=tf.nn.relu) # (B, L_q, D]
k = tf.layers.dense(
keys, model_dim, activation=tf.nn.relu)
v = tf.layers.dense(
values, model_dim, activation=tf.nn.relu)
# split and concat
q = tf.concat(tf.split(q, num_heads, axis=2), 0) # [h*B, L_q, D/h]
k = tf.concat(tf.split(k, num_heads, axis=2), 0)
v = tf.concat(tf.split(v, num_heads, axis=2), 0)
scale = (model_dim // num_heads) ** -0.5
output, attention = scaled_dot_product_attention(
q, k, v, scale, mask, dropout)
output = tf.concat(tf.split(output, num_heads, axis=0), 2)
output = tf.layers.dense(output, model_dim)
output = tf.nn.dropout(output, dropout)
# residual
output += queries
# layer norm
output = layer_norm(output)
return output, attention
def positional_wise_feed_forward_network(inputs,
model_dim=512,
ffn_dim=2048,
dropout=0.2,
scope="ffn"):
"""Positional-wise feed forward network.
Args:
inputs: Input tensor with shape [B,L,D]
model_dim: Model's dimension
ffn_dim: FFN's inner dimension
dropout: A scalar, dropout rate
scope: Variable's scope or name
Returns:
An output tensor with shape [B,L,D]
"""
with tf.variable_scope(scope) as scope:
params = {"inputs": inputs, "filters": model_dim, "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Readout layer
params = {"inputs": outputs, "filters": ffn_dim, "kernel_size": 1,
"activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(outputs, dropout)
# residual and layer norm
outputs += inputs
outputs = layer_norm(outputs)
return outputs
def padding_mask(seq_k, seq_q, num_heads):
"""Padding mask.
Args:
seq_k: Keys tensor with shape [B,L,D]
seq_q: Queries tensor with shape [B,L,D]
num_heads: A scalar, number of heads
Returns:
A masking tensor with shape [B,L,L]
"""
mask = tf.sign(tf.abs(tf.reduce_sum(seq_k, axis=-1))) # [B,L]
mask = tf.tile(mask, [num_heads, 1]) # [h*B,L]
mask = tf.tile(tf.expand_dims(mask, 1), [1, tf.shape(seq_q)[1], 1]) # [B,L,L]
return mask
def sequence_mask(seq, num_heads, dtype=tf.float32):
"""Sequence mask to blind feature time steps.
Args:
seq: Input tensor with shape [B,L,D]
num_heads: A scalar, number of heads
dtype: Data type
Returns:
A maksing tensor with shape [h*B,L,L]
"""
batch_size = tf.shape(seq)[0]
length = tf.shape(seq)[1]
diag = tf.ones(shape=[length, length], dtype=dtype) # [L,L]
tril = tf.linalg.LinearOperatorLowerTriangular(diag).to_dense() # [L,L]
mask = tf.tile(tf.expand_dims(tril, 0), [num_heads * batch_size, 1, 1]) # [h*B,L,L]
return mask
| 32.198238 | 95 | 0.629224 | 1,077 | 7,309 | 4.17363 | 0.220984 | 0.011568 | 0.05673 | 0.035595 | 0.283426 | 0.205562 | 0.156174 | 0.125028 | 0.07564 | 0.054283 | 0 | 0.015423 | 0.237105 | 7,309 | 226 | 96 | 32.340708 | 0.79071 | 0.405117 | 0 | 0.16129 | 0 | 0 | 0.032491 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075269 | false | 0 | 0.021505 | 0 | 0.172043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4696c2f89e7bc7a526efb0d4e80f4d3b7b63062 | 5,700 | py | Python | sudoku/models.py | mpilkou/django_code | 08e42ef3cdbbcdd9050e591fd97b0d8be060df6b | [
"Apache-2.0"
] | null | null | null | sudoku/models.py | mpilkou/django_code | 08e42ef3cdbbcdd9050e591fd97b0d8be060df6b | [
"Apache-2.0"
] | null | null | null | sudoku/models.py | mpilkou/django_code | 08e42ef3cdbbcdd9050e591fd97b0d8be060df6b | [
"Apache-2.0"
] | null | null | null | from django.db import models
from typing import List, Tuple
from django.core.exceptions import ValidationError
# Create your models here.
class Sudoku(models.Model):
puzzle_creation_date = models.DateField(verbose_name = 'creation date', help_text= 'puzzle creation date', auto_now=True, auto_now_add=False)
@staticmethod
def create_from_puzzle_and_solution_lists(sudoku_puzzle_list: List[List[int]], sudoku_solution_list: List[List[int]]) -> None:
def sudoku_squere_list_to_model(sudoku_field_model: SudokuField, sudoku_squere_list: List[int]) -> SudokuSquereField:
return SudokuSquereField(
f_0 = sudoku_squere_list[0],
f_1 = sudoku_squere_list[1],
f_2 = sudoku_squere_list[2],
f_3 = sudoku_squere_list[3],
f_4 = sudoku_squere_list[4],
f_5 = sudoku_squere_list[5],
f_6 = sudoku_squere_list[6],
f_7 = sudoku_squere_list[7],
f_8 = sudoku_squere_list[8],
sudoku_field_fk = sudoku_field_model
)
def sudoku_list_to_model(sudoku_model: Sudoku, sudoku_list: List[List[int]]) -> Tuple[SudokuField,Tuple[SudokuSquereField]]:
sudoku_field_model = SudokuField(sudoku_fk = sudoku_model)
sudoku_squere_model_tuple = (
sudoku_squere_list_to_model(sudoku_field_model, squere) for squere in sudoku_list
)
return (sudoku_field_model, sudoku_squere_model_tuple)
sudoku_model = Sudoku()
sudoku_field_puzzle, sudoku_3x3_puzzle_tuple = sudoku_list_to_model(sudoku_model, sudoku_puzzle_list)
sudoku_field_puzzle.type = 'p'
sudoku_field_puzzle.sudoku_fk = sudoku_model
for sudoku_3x3_field in sudoku_3x3_puzzle_tuple:
sudoku_3x3_field.sudoku_field_fk = sudoku_field_puzzle
sudoku_field_solution, sudoku_3x3_solution_tuple = sudoku_list_to_model(sudoku_model, sudoku_solution_list)
sudoku_field_solution.type = 's'
sudoku_field_solution.sudoku_fk = sudoku_model
for sudoku_3x3_field in sudoku_3x3_solution_tuple:
sudoku_3x3_field.sudoku_field_fk = sudoku_field_solution
from django.db import transaction
with transaction.atomic():
sudoku_model.save()
sudoku_field_puzzle.save()
for sudoku_field in sudoku_3x3_puzzle_tuple:
sudoku_field.save()
sudoku_field_solution.save()
for sudoku_field in sudoku_3x3_solution_tuple:
sudoku_field.save()
class Meta():
verbose_name_plural = 'Sudoku'
verbose_name = 'Sudoku'
ordering = ['-puzzle_creation_date']
class SudokuField(models.Model):
SUDOKU_TYPES_FIELD_CHOISES = (
('p', 'Puzzle'),
('s', 'Solution'),
)
type = models.CharField(verbose_name = 'puzzle type', max_length=1, choices = SUDOKU_TYPES_FIELD_CHOISES, default=None)
sudoku_fk = models.ForeignKey(Sudoku, on_delete=models.CASCADE, verbose_name='sudoku_id')
def save(self, *args, **kwargs) -> None:
self.clean()
return super().save(*args, **kwargs)
def clean(self) -> None:
types = [ field_type[0] for field_type in self.SUDOKU_TYPES_FIELD_CHOISES ]
if not( self.type in types and type(self.sudoku_fk) == Sudoku ):
raise ValidationError(' validation error in SudokuField fields ')
if self.type == 'p' and SudokuField.objects.filter(type = 'p', sudoku_fk = self.sudoku_fk).exists():
raise ValidationError(' validation error in SudokuField (puzzle already created) ')
return super().clean()
class Meta():
verbose_name_plural = 'Puzzle'
verbose_name = 'Puzzle'
ordering = ['-sudoku_fk']
class SudokuSquereField(models.Model):
SUDOKU_PUZZLE_FIELD_CHOISES = (
(0, ' '),
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
)
f_0 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='1', )
f_1 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='2', )
f_2 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='3', )
f_3 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='4', )
f_4 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='5', )
f_5 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='6', )
f_6 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='7', )
f_7 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='8', )
f_8 = models.PositiveSmallIntegerField(choices = SUDOKU_PUZZLE_FIELD_CHOISES, verbose_name='9', )
sudoku_field_fk = models.ForeignKey(SudokuField, on_delete=models.CASCADE, verbose_name='puzzle_id')
def save(self, *args, **kwargs) -> None:
self.clean()
return super().save(*args, **kwargs)
def clean(self) -> None:
types = [ field_type[0] for field_type in self.SUDOKU_PUZZLE_FIELD_CHOISES ]
if not( self.type in types and type(self.sudoku_field_fk) == SudokuField ):
raise ValidationError(' validation error in SudokuSquereField fields ')
return super().clean()
class Meta():
verbose_name_plural = 'Field 3x3 '
verbose_name = 'Field 3x3'
ordering = ['-sudoku_field_fk']
| 41.605839 | 146 | 0.666842 | 692 | 5,700 | 5.138728 | 0.140173 | 0.077334 | 0.053993 | 0.074241 | 0.544994 | 0.497469 | 0.435602 | 0.395669 | 0.328178 | 0.119235 | 0 | 0.018799 | 0.234737 | 5,700 | 136 | 147 | 41.911765 | 0.796424 | 0.004211 | 0 | 0.141509 | 0 | 0 | 0.059041 | 0.003701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066038 | false | 0 | 0.037736 | 0.009434 | 0.358491 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a46a6612483e49119e42e59a73001ccaee6e6e4f | 3,999 | py | Python | src/dbspro/cli/correctfastq.py | FrickTobias/iSeq | 3732de7716e2d379e9a4d7060dd4797fd1955ac4 | [
"MIT"
] | 1 | 2021-01-18T13:04:04.000Z | 2021-01-18T13:04:04.000Z | src/dbspro/cli/correctfastq.py | FrickTobias/iSeq | 3732de7716e2d379e9a4d7060dd4797fd1955ac4 | [
"MIT"
] | 27 | 2019-06-19T16:38:48.000Z | 2021-11-16T09:50:50.000Z | src/dbspro/cli/correctfastq.py | FrickTobias/DBSpro | 3732de7716e2d379e9a4d7060dd4797fd1955ac4 | [
"MIT"
] | 1 | 2020-02-06T10:23:00.000Z | 2020-02-06T10:23:00.000Z | """
Correct FASTQ/FASTA with the corrected sequences from starcode clustering
"""
from collections import defaultdict
import logging
import os
import statistics
from pathlib import Path
from typing import Iterator, Tuple, List, Set, Dict
import dnaio
from tqdm import tqdm
from xopen import xopen
from dbspro.utils import Summary, IUPAC_MAP
logger = logging.getLogger(__name__)
def add_arguments(parser):
parser.add_argument(
"input", type=Path,
help="FASTQ/FASTA with uncorrected sequences."
)
parser.add_argument(
"corrections", type=Path,
help="Starcode output in format, tab-separate entries: <corrected sequnence>, <read count>, <comma-separated"
"uncorrected sequences>."
)
parser.add_argument(
"-o", "--output-fasta", type=Path,
help="Output FASTA with corrected sequences."
)
parser.add_argument(
"-b", "--barcode-pattern", required=True,
help="IUPAC string with bases forming pattern to match each corrected sequence too."
)
def main(args):
run_correctfastq(
uncorrected_file=args.input,
corrections_file=args.corrections,
corrected_fasta=args.output_fasta,
barcode_pattern=args.barcode_pattern,
)
def run_correctfastq(
uncorrected_file: str,
corrections_file: str,
corrected_fasta: str,
barcode_pattern: str,
):
logger.info("Starting analysis")
logger.info(f"Processing file: {corrections_file}")
summary = Summary()
if os.stat(corrections_file).st_size == 0:
logging.warning(f"File {corrections_file} is empty.")
pattern = [IUPAC_MAP[base] for base in barcode_pattern]
corr_map = get_corrections(corrections_file, pattern, summary)
logger.info("Correcting sequences and writing to output file.")
with dnaio.open(uncorrected_file, mode="r") as reader, \
dnaio.open(corrected_fasta, mode="w") as writer:
for read in tqdm(reader, desc="Parsing reads"):
summary["Reads total"] += 1
if read.sequence in corr_map:
read.sequence = corr_map[read.sequence]
writer.write(read)
summary["Reads corrected"] += 1
else:
summary["Reads without corrected sequence"] += 1
summary.print_stats(name=__name__)
logger.info("Finished")
def parse_starcode_file(filename: Path) -> Iterator[Tuple[str, int, List[str]]]:
with xopen(filename, "r") as file:
for line in file:
try:
cluster_seq, num_reads, raw_seqs_list = line.split()
except ValueError:
logging.warning(f"Non-default starcode output line: {line}")
continue
raw_seqs = raw_seqs_list.split(",")
yield cluster_seq, int(num_reads), raw_seqs
def get_corrections(corrections_file: Path, pattern: List[Set[str]], summary: Summary) -> Dict[str, str]:
corr_map = {}
stats = defaultdict(list)
for cluster_seq, num_reads, raw_seqs in tqdm(parse_starcode_file(corrections_file), desc="Parsing clusters"):
summary["Clusters"] += 1
if match_pattern(cluster_seq, pattern):
summary["Clusters filtered"] += 1
stats["read"].append(num_reads)
stats["sequence"].append(len(raw_seqs))
corr_map.update({raw_seq: cluster_seq for raw_seq in raw_seqs})
# Add statistics
for stat, values in stats.items():
summary[f"Max {stat}s per cluster"] = max(values)
summary[f"Mean {stat}s per cluster"] = statistics.mean(values)
summary[f"Median {stat}s per cluster"] = statistics.median(values)
summary[f"Clusters with one {stat}"] = sum(1 for v in values if v == 1)
return corr_map
def match_pattern(sequence: str, pattern: List[Set[str]]) -> bool:
if len(sequence) != len(pattern):
return False
return all([base in allowed_bases for base, allowed_bases in zip(sequence, pattern)])
| 32.512195 | 117 | 0.656164 | 500 | 3,999 | 5.1 | 0.304 | 0.047059 | 0.026667 | 0.030588 | 0.068235 | 0.019608 | 0 | 0 | 0 | 0 | 0 | 0.002621 | 0.236809 | 3,999 | 122 | 118 | 32.778689 | 0.832896 | 0.022256 | 0 | 0.043478 | 0 | 0.01087 | 0.188621 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.108696 | 0 | 0.206522 | 0.01087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a46b76b7d31855db7ddb1a13d8f3e3d37eeece53 | 27,058 | py | Python | pythonFiles/ASCA_Functions.py | huangysh/ASCA_Cluster | 3f7ff5df514cbe48730ba0634abe7f9726d3b98e | [
"MIT"
] | null | null | null | pythonFiles/ASCA_Functions.py | huangysh/ASCA_Cluster | 3f7ff5df514cbe48730ba0634abe7f9726d3b98e | [
"MIT"
] | null | null | null | pythonFiles/ASCA_Functions.py | huangysh/ASCA_Cluster | 3f7ff5df514cbe48730ba0634abe7f9726d3b98e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# **********************************************************************************************************************
# MIT License
# Copyright (c) 2020 School of Environmental Science and Engineering, Shanghai Jiao Tong University
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
# This file is part of the ASCA Algorithm, it is used for spatial point clustering analysis. This model contains mainly
# three parts, they are points trend analysis, point cluster analysis and spatial visualization.
#
# Author: Yuansheng Huang
# Date: 2020-06-18
# Version: V 1.2
# Literature
# ==========
# Yuansheng Huang, Peng Li, Yiliang He: To centralize or to decentralize? A systematic framework for optimizing
# rural wastewater treatment investment
# Clark and Evans, 1954; Gao, 2013
# **********************************************************************************************************************
# General import
import arcpy
import math
import functools
import numpy as np
from scipy.spatial import Delaunay
from functools import cmp_to_key
# --------------------------read out file--------------------------
def getFeatureName(folder):
"""读取文件夹中所有的.shp文件名(含后缀),并存为list"""
arcpy.env.workspace = folder
featureName = []
for feature in arcpy.ListFeatureClasses():
featureName.append(feature)
return featureName
def readArea(areaShape):
"""
读取shapefile中研究区域的面积
输入参数
areaShape: 研究区域矢量地图,用于读取面积值。
输出参数
areaValue: 研究区域面积,米。
"""
areaList = []
rows = arcpy.SearchCursor(areaShape)
fields = arcpy.ListFields(areaShape)
for row in rows:
for field in fields:
if field.name == "area" or field.name == "AREA":
AREA = row.getValue(field.name)
areaList.append(AREA)
areaValue = np.sum(areaList)
return areaValue
def readObstacle(obstacle):
"""
从shapefile线数据中读取研究区域空间障碍(线段)的起始点坐标,用于删除DT边列表中与障碍线段相交的边。
输入参数:
obstacle: 空间障碍shapefile数据,将所有需考虑的障碍(道路,河流,分水岭等)合并为一个文件,且需在vertex处打断障碍以得到起始点坐标。
输出参数
obstacleList: 障碍列表
"""
obstacleList, rows, fields = [], arcpy.SearchCursor(obstacle), arcpy.ListFields(obstacle)
for row in rows:
for field in fields:
if field.name == "START_X" or field.name == "X_START":
S_X = row.getValue(field.name)
elif field.name == "START_Y" or field.name == "Y_START":
S_Y = row.getValue(field.name)
elif field.name == "END_X" or field.name == "X_END":
E_X = row.getValue(field.name)
elif field.name == "END_Y" or field.name == "Y_END":
E_Y = row.getValue(field.name)
obstacleList.append([[S_X, S_Y], [E_X, E_Y]])
if len(obstacleList) == 0:
raise Exception("EMPTY LIST: YOU GOT AN EMPTY LIST!!! PLEASE CHECK INPUT FILE!")
return obstacleList
def readSpatialPoint(pointShape):
"""
读取空间点坐标数据,并保存为列表
输入参数
pointShape: Path to point shapefile
输出参数
pointList: 空间点坐标列表
spatialRef: 空间参考
"""
pointList, rows, fields, ID = [], arcpy.SearchCursor(pointShape), arcpy.ListFields(pointShape), 0
spatialRef = arcpy.Describe(pointShape).spatialReference
for row in rows:
for field in fields:
if field.name == "POINT_X":
X = row.getValue(field.name)
if field.name == "POINT_Y":
Y = row.getValue(field.name)
if field.name == "Q":
Q = row.getValue(field.name)
if field.name == "RASTERVALU":
H = row.getValue(field.name)
pointList.append([ID, X, Y, Q, H])
ID += 1
if len(pointList) < 1:
raise Exception("EMPTY LIST: YOU GOT AN EMPTY LIST, PLEASE CHECK YOUR INPUT FILE!!!")
return pointList, spatialRef
def checkList(pointlist):
"""
检查空间点列表,删除重复的点。这里的重复是指,坐标重复。
输入参数
pointlist: 空间点坐标列表
输出参数
output: 删除重复后的空间点坐标列表
"""
point = [i[1:] for i in pointlist]
idList = [i[:1] for i in pointlist]
output1 = []
for i in range(len(point)):
if point[i] not in output1:
output1.append(point[i])
else:
idList.remove(idList[i])
output = []
for i in range(len(point)):
output.append(idList.extend(point[i]))
return output
def getNearestDistance(pointList):
"""
此函数用于计算各点到其最近点间的距离,并返回距离列表。
输入参数
pointList: 空间点坐标列表。
输出参数
nearestDistanceList: 最近距离列表。
"""
nearestDistanceList = []
for i in range(len(pointList)):
distanceToPoint = []
for j in range(len(pointList)):
if i != j:
length2D = math.hypot(pointList[i][1] - pointList[j][1], pointList[i][2] - pointList[j][2])
heightDiff = pointList[i][4] - pointList[j][4]
length3D = math.hypot(length2D, heightDiff)
distanceToPoint.append(length3D)
else:
continue
nearestDistance = min(distanceToPoint)
nearestDistanceList.append(nearestDistance)
if len(nearestDistanceList) < 1:
raise Exception("EMPTY LIST: YOU GOT AN EMPTY LIST, PLEASE CHECK YOUR INPUT FILE!!!")
return nearestDistanceList
def NNI(pointList, distanceList, areaValue):
"""
用于计算空间点集的最邻近指数。输出值将用于定义空间点的分布模式,当NNI>1时,空间点集呈均匀分布,当NNI<1时,空间点集呈聚集分布。
输入参数
pointList: 空间点坐标列表
nearestDistanceList: 最近距离列表
areaValue: 研究区域面积,米。
输出参数
index: 空间点集的最邻近指数
z_test: z检验数值
"""
N = len(pointList)
ran = 0.5 * math.sqrt(areaValue / N)
sumD = np.sum(distanceList)
SE = 0.26236 / (math.sqrt(N ** 2) / areaValue)
indexValue = (sumD / N) / ran
z_test = ((sumD / N) - ran) / SE
return indexValue, z_test
# ----------------starting cluster------------------
def getDelaunayTriangle(pointList):
"""
获取空间点集points的Delaunay Triangle (DT) 及DT的顶点索引和坐标。
输入参数
pointList: 空间点坐标列表
输出参数
triangleVertexIndex: DT顶点索引列表
triangleVertexCoordinate: DT顶点坐标列表
"""
pointListS = [i[1:3] for i in pointList]
points = np.array(pointListS)
DT = Delaunay(points)
triangleVertexIndex = DT.simplices[:].tolist()
triangleVertexCoordinate = []
for T in triangleVertexIndex:
triangle = []
for v in T:
triangle.append(pointList[v])
triangleVertexCoordinate.append(triangle)
return triangleVertexIndex, triangleVertexCoordinate
def unfoldList(nestedList):
"""
用于展开嵌套列表,将在后续的算法中反复运用。
输入参数
nestedList: 嵌套列表
输出参数
unfoldedList: 展开后的列表
"""
unfoldedList = [i for j in nestedList for i in j]
return unfoldedList
def clusteredList(pointList, marker): # todo. Is this function useful
"""
根据列表中元素所包含的特定字符/元素,对列表中的元素进行分类。
输入参数
pointList: 列表
marker: 用于分类的特殊标记(最好是按一定顺序排列)
输出参数
clusteredList: 输出,嵌套列表
"""
clusteredList = []
for item in pointList:
clustered = []
for m in marker:
if m in item:
clustered.append(item)
clusteredList.append([m, clustered])
return clusteredList
def uniqueListElement(listWithCopyElement):
"""
删除列表(嵌套列表)中重复的元素,将在后续的算法中反复运用。
"""
listWithUniqueElement = []
for i in listWithCopyElement:
if i not in listWithUniqueElement:
listWithUniqueElement.append(i)
else:
continue
return listWithUniqueElement
def getEdgeID(indexA, indexB):
"""
获取边的ID号。indexA,indexB:分别为表顶点的ID号,即索引号。
"""
if indexA == indexB:
raise Exception("ERROR: Indexes point to the same point!!!")
maxIndex, minIndex = max(indexA, indexB), min(indexA, indexB)
edgeID = "V" + "_" + str(minIndex) + "_" + str(maxIndex)
return edgeID
def getLength(pointA, pointB):
"""
This function used to calculate the Euclidean distance in three dimensions.
"""
length2D = math.hypot(pointA[1] - pointB[1], pointA[2] - pointB[2])
heightDiff = pointA[4] - pointB[4]
length = math.hypot(length2D, heightDiff)
return length
def getEdgeLength(triangleVertexIndex, triangleVertexCoordinate):
"""
用于获取DT网格的边长及边顶点在PointList列表中的索引号。
输入参数
triangleVertexIndex: getDelaunayTriangle函数输出参数,以三角形为单位。
triangleVertexCoordinate: getDelaunayTriangle函数输出参数,以三角形为单位。
输出参数
triangleEdgeList: DT网格边长列表,其结构同输入。
edgeList: 展开并去除重复值后的triangleEdgeList列表。
"""
length, triangleEdgeList = len(triangleVertexCoordinate), []
for i in range(length):
triangleIndex = triangleVertexIndex[i]
triangleCoordinate = triangleVertexCoordinate[i]
edgeA = [getEdgeID(triangleIndex[0], triangleIndex[1]), min(triangleIndex[0], triangleIndex[1]),
max(triangleIndex[0], triangleIndex[1]), getLength(triangleCoordinate[0], triangleCoordinate[1])]
edgeB = [getEdgeID(triangleIndex[0], triangleIndex[2]), min(triangleIndex[0], triangleIndex[2]),
max(triangleIndex[0], triangleIndex[2]), getLength(triangleCoordinate[0], triangleCoordinate[2])]
edgeC = [getEdgeID(triangleIndex[1], triangleIndex[2]), min(triangleIndex[1], triangleIndex[2]),
max(triangleIndex[1], triangleIndex[2]), getLength(triangleCoordinate[1], triangleCoordinate[2])]
edgesList = [edgeA, edgeB, edgeC]
triangleEdgeList.append(edgesList)
unfoldedList = unfoldList(triangleEdgeList)
edgeList = uniqueListElement(unfoldedList)
return triangleEdgeList, edgeList
# --------------------------delete global long edge--------------------------
def getGlobalEdgeStatistic(edgeList):
"""
用于计算全局边长的统计量,全局边长均值和全局边长变异。
输出参数
globalEdgeMean, globalEdgeVariation: 全局边长均值和全局边长变异。
"""
edgeLength = [i[-1] for i in edgeList]
globalEdgeMean = np.mean(edgeLength)
if len(edgeLength) >= 2:
globalEdgeVariation = np.std(edgeLength, ddof=1)
else:
raise ZeroDivisionError
return globalEdgeMean, globalEdgeVariation
def getFirstOrderEdges(pointList, edgeList):
"""
获取各顶点的一阶邻域边。
输出参数
firstOrderEdges: 各点的一阶邻域边
firstOrderPoints: 各点的一阶邻域点
"""
firstOrderEdges, firstOrderPoints = [], []
for point in pointList:
index = point[0]
firstOrderEdge, firstOrderPoin = [index], []
for edge in edgeList:
if index in edge[1:3]:
firstOrderEdge.append(edge)
firstOrderPoin.extend(edge[1:3])
else:
continue
fop = list(set(firstOrderPoin))
if index in fop:
fop.remove(index)
else:
continue
firstOrderPoint = [index]
firstOrderPoint.extend(fop)
firstOrderEdges.append(firstOrderEdge)
firstOrderPoints.append(firstOrderPoint)
return firstOrderEdges, firstOrderPoints
def getFirstOrderEdgesMean(firstOrderEdges): # 20200618, updated
"""
计算各顶点的一阶邻域均值。Pi为索引号
输出参数
firstOrderEdgesMean: 各点的一阶邻域边长均值。[[Pi, AVGi], [ ]…]
"""
firstOrderEdgesMean = []
for i in firstOrderEdges:
edge = [x[3] for x in i[1:]]
element = [i[0], np.mean(edge)]
firstOrderEdgesMean.append(element)
return firstOrderEdgesMean
def getGlobalCutValue(globalEdgeMean, globalEdgeVariation, firstOrderEdgesMean):
"""
计算各顶点的全局约束准则,用于删除全局长边。
输入参数
globalEdgeMean, globalEdgeVariation: 全局边长均值和全局边长变异。
firstOrderEdgesMean: 各点的一阶邻域边长均值。
输出参数
globalCutValueList: 全局约束准则列表
"""
globalCutValueList = []
for i in firstOrderEdgesMean:
GCVi = globalEdgeMean + 0.5 * (globalEdgeMean / i[1]) * globalEdgeVariation
element = [i[0], GCVi]
globalCutValueList.append(element)
return globalCutValueList
def getGlobalOtherEdge(edgeList, globalCutValueList):
"""
获取DT网格中的全局其他边,直接从edgeList列表中删除全局长边。
输入参数
firstOrderEdges:
globalCutValueList: 全局约束准则列表
输出参数
globalOtherEdgeList: 删除全局长边后的edgeList。
"""
longEdge, globalOtherEdgeList = [], edgeList[:]
for point in globalCutValueList:
for edge in edgeList:
if point[0] in edge[1:3] and edge[3] >= point[1]:
if edge in globalOtherEdgeList:
globalOtherEdgeList.remove(edge)
else:
continue
else:
continue
return globalOtherEdgeList
def aggregation(edgeList):
"""
用于获取孤立点以外的其他点所构成的点簇,每个点簇所包含的点为一个嵌套元素。在cluster函数中调用。
此函数将嵌套列表中有相同元素的子列表合并,并将索引号较小的一个元素设置为两个子元素的并,较大一个设置为空列表[]。
输入参数
edgeList: 删除全局长边后的edgeList。
输出参数
indexList: 合并后的列表,嵌套列表,每个子列表表示一个点簇子列表的元素无为点索引号。
"""
indexListX = [i[1:3] for i in edgeList] # get index
for i in range(len(indexListX)):
for j in range(len(indexListX)):
x = list(set(indexListX[i] + indexListX[j]))
y = len(indexListX[j]) + len(indexListX[i])
if i == j:
break
elif len(x) < y:
indexListX[i] = x
indexListX[j] = []
indexList = []
for i in indexListX:
if len(i) > 1:
indexList.append(i)
else:
continue
return indexList
def cluster(pointList, indexList, marker):
"""
给pointList中的元素添加标记,以区分各点簇。
输入参数
pointList: 空间点坐标列表
indexList: aggregation函数输出值
marker: 类簇标记,如“G”。
输出参数
pointList: 在每个元素尾部添加类簇标记的pointList,结构同输入。
"""
clusterPointIndex = [i for j in indexList for i in j]
for i in pointList:
index = pointList.index(i)
marker0 = marker + "0"
if index not in clusterPointIndex:
i.append(marker0)
else:
continue
for lst in indexList: # 标记其他点
markerX = marker + str(indexList.index(lst) + 1)
for i in pointList:
for ele in lst:
if ele == i[0]:
i.append(markerX)
else:
continue
return
# --------------------------删除局部长边--------------------------
def getSubgraphEdge(pointList, edgeList, indexList):
"""
用于获取删除全局长边和障碍边后的所有子图,每个子图为一个元素,每个元素包含子图所有的边。次函数基于aggregation函数结果实现。
输入参数
pointList: 在每个元素尾部添加类簇标记的pointList,结构同输入。
indexList: 合并后的列表,嵌套列表,每个子列表表示一个点簇子列表的元素无为点索引号。
edgeList: 删除全局长边后的edgeList。
输出参数
subgraphEdgeList: 子图边列表
subgraphVertexList: 子图顶点列表
"""
subgraphVertexList = []
for A in indexList: # 获取子图顶点坐标
vertex = [pointList[i] for i in A]
subgraphVertexList.append(vertex)
subgraphEdgeList = []
for subgraphVertex in indexList:
subgraphEdge = []
for i in subgraphVertex:
for j in edgeList:
if i in j[1:3] and j not in subgraphEdge:
subgraphEdge.append(j)
else:
continue
subgraphEdgeList.append(subgraphEdge)
return subgraphVertexList, subgraphEdgeList
def getSecondOrderEdges(subgraphVertexList, subgraphEdgeList):
"""
计算子图各顶点的二阶邻域边长。
输入参数
subgraphVertexList, subgraphEdgeList: getSubgraphEdge函数输出值。
输出参数
subgraphSecondOrderEdgeMean: 各子图各顶点二阶邻域边长均值。
"""
length, subgraphSecondOrderEdgeMean = len(subgraphVertexList), []
for i in range(length): # 获取一个子图,迭代
subgraphVertex, subgraphEdge = subgraphVertexList[i], subgraphEdgeList[i] # vertex and edge of subgraph.
_, firstOrderPoints = getFirstOrderEdges(subgraphVertex, subgraphEdge)
firstOrderPointList = [i[1:] for i in firstOrderPoints]
indexList = [i[0] for i in firstOrderPoints]
secondOrderMean = []
for n in range(len(firstOrderPointList)):
subgraphSecondOrderEdgeC, index, = [], indexList[n]
for p in firstOrderPointList[n]:
for e in subgraphEdge:
if p in e[1:3]:
subgraphSecondOrderEdgeC.append(e)
subgraphSecondOrderEdgeU = uniqueListElement(subgraphSecondOrderEdgeC)
edgeLengthPi = [i[-1] for i in subgraphSecondOrderEdgeU]
Pi_mean = np.mean(edgeLengthPi)
secondOrderMean.append([index, Pi_mean])
subgraphSecondOrderEdgeMean.append(secondOrderMean)
return subgraphSecondOrderEdgeMean
def getSubgraphEdgeStatistic(subgraphVertexList, subgraphEdgeList): # updated by Ethan Huang in 20200618
"""
计算子图各顶点的一阶边长均值及局部边长平均变异。
输入参数
subgraphVertexList, subgraphEdgeList: getSubgraphEdge函数输出值。
输出参数
subgraphMeanVariation: 子图局部边长平均变异。
"""
length, subgraphMeanVariation = len(subgraphVertexList), []
for p in range(length): # 迭代子图
subgraphVertex, subgraphEdge = subgraphVertexList[p], subgraphEdgeList[p] # vertex and edge of subgraph.
firstOrderEdges, _ = getFirstOrderEdges(subgraphVertex, subgraphEdge)
firstOrderEdgeList = [i[1:] for i in firstOrderEdges]
firstOrderEdgeVariationList = []
for edgeList in firstOrderEdgeList:
firstOrderEdgeLength = [e[-1] for e in edgeList] # 子图i中第n点的一阶邻域边长
if len(firstOrderEdgeLength) >= 2:
firstOrderEdgeVariation = np.std(firstOrderEdgeLength, ddof=1)
firstOrderEdgeVariationList.append(firstOrderEdgeVariation)
else:
firstOrderEdgeVariation = np.std(firstOrderEdgeLength, ddof=0)
firstOrderEdgeVariationList.append(firstOrderEdgeVariation)
meanVariation = np.mean(firstOrderEdgeVariationList)
subgraphMeanVariation.append(meanVariation)
return subgraphMeanVariation
def getLocalCutValue(subgraphMeanVariation, subgraphSecondOrderEdgeMean):
"""
计算局部约束准则.
输入参数
subgraphMeanVariation: 子图局部边长平均变异。
subgraphSecondOrderEdgeMean: 各子图各顶点二阶邻域边长均值。
输出参数
subgraphLocalCutValueList: 局部边长约束准则
"""
length, subgraphLocalCutValueList = len(subgraphMeanVariation), []
for i in range(length):
subgraphMV, subgraphSecondOrderMean = subgraphMeanVariation[i], subgraphSecondOrderEdgeMean[i]
localCutValueList = []
for e in subgraphSecondOrderMean:
localCutValue = e[1] + 0.5 * subgraphMV # todo 0.5?
localCutValueList.append([e[0], localCutValue])
subgraphLocalCutValueList.append(localCutValueList)
return subgraphLocalCutValueList
def getLocalOtherEdge(edgeList, subgraphLocalCutValueList):
"""
删除局部长边,获取全局其他边。
输入参数
edgeList: 删除全局长边后的edgeList。
subgraphLocalCutValueList: 局部边长约束准则
输出参数
localOtherEdge:删除局部长边后的edgeList。
"""
localOtherEdge = edgeList[:]
for sg in subgraphLocalCutValueList:
for pnt in sg:
for e in localOtherEdge:
if pnt[0] in e[1:3] and e[-1] >= pnt[1]:
if e in localOtherEdge:
localOtherEdge.remove(e)
else:
continue
return localOtherEdge
# --------------------------删除限制长边--------------------------
def deleteRestrictionEdge(edgeList, restritionNumber):
"""
删除边长大于限定值的DT边。
输入参数
edgeList: 删除局部长边后的edgeList。
restritionNumber: 边长限定值,数值型。
输出参数
edges: 删除限定长边后的edgeList。
"""
edges = edgeList[:]
for e in edges:
if e[3] >= restritionNumber:
edges.remove(e)
else:
continue
return edges
# --------------------------删除不可达边--------------------------
# 以下函数用于空间叠置分析。基于向量旋转角的二维线段相交判定
# ......................................................................................................................
# This is a 2D line segment intersection decision algorithm, And refer to the following reference:
# https://blog.csdn.net/weixin_42736373/article/details/84587005
# ......................................................................................................................
class IntersectTest(object):
def __init__(self, p1, p2, q1, q2):
self.result = self.intersectTest(p1, p2, q1, q2)
def coordiante(self, x1, x2, k):
if x1[k] < x2[k]:
return -1
elif x1[k] == x2[k]:
return 0
else:
return 1
def intersectTest(self, p1, p2, q1, q2):
p = self.subtraction(p2, p1)
q = self.subtraction(q2, q1)
denominator = self.crossProduct(p, q)
t_molecule = self.crossProduct(self.subtraction(q1, p1), q) # (q1 - p1) × q
if denominator == 0:
if t_molecule == 0:
p_q = [p1, p2, q1, q2]
if p1 != q1 and p1 != q2 and p2 != q1 and p2 != q2:
p_q = sorted(p_q, key=cmp_to_key
(functools.partial(self.coordiante, k=1 if (p2[0] - p1[0]) / (p2[1] - p1[1]) == 0 else 0)))
if p_q[0:2] == [p1, p2] or p_q[0:2] == [p2, p1] or p_q[0:2] == [q1, q2] or p_q[0:2] == [q2, q1]:
return 1
else:
return 1 # 相交
else:
return 1 # 相交
else:
return 0 # parallel
t = t_molecule / denominator
if 0 <= t <= 1:
u_molecule = self.crossProduct(self.subtraction(q1, p1), p) # (q1 - p1) × p
u = u_molecule / denominator
if 0 <= u <= 1: # 相交
return 1
else:
return 0
else:
return 0
def subtraction(self, a, b):
c = []
for i, j in zip(a, b):
c.append(i-j)
return c
def crossProduct(self, a, b):
return a[0]*b[1]-a[1]*b[0]
# ......................................................................................................................
def getReachableEdge(edgeList, obstacleList, pointList):
"""
删除与障碍相交的边,返回余下DT边列表,在根据各点的一阶领域点再次做标记。
输入参数
edgeList: 删除限定长边后的edgeList。
obstacleList: 障碍列表[[[Sx1, Sy1],[Ex1, Ey1]], ...]
pointList:
输出参数
reachableEdge: 删除不可达边后的edgeList。
"""
edgeL = [[pointList[e[1]], pointList[e[2]]] for e in edgeList]
unreach, reachable, reachableEdge = [], [], []
for i in obstacleList:
for j in edgeL:
intersect = IntersectTest(i[0], i[1], j[0][1:3], j[1][1:3]).result
if intersect == 1 and j not in unreach:
unreach.append(j)
else:
continue
for e in edgeL:
if e not in unreach:
reachable.append(e)
else:
continue
for p in reachable:
indexA = p[0][0]
indexB = p[1][0]
for E in edgeList:
if indexA in E[1:3] and indexB in E[1:3]:
reachableEdge.append(E)
else:
continue
return reachableEdge
# --------------------------ArcGIS界面的可视化与输出--------------------------
def createShapeFile(pointList, spatialRef, output):
"""
根据坐标点列表创建point文件,并为其设定坐标参考。
输入参数
pointList: 多次聚类标记后的pointList。
spatialRef: 空间参考
output: 文件输出位置及名称
"""
point = arcpy.Point()
pointGeometryList = []
for i in range(len(pointList)):
point.X = pointList[i][1]
point.Y = pointList[i][2]
pointGeometry = arcpy.PointGeometry(point, spatialRef)
pointGeometryList.append(pointGeometry)
arcpy.CopyFeatures_management(pointGeometryList, output)
return
def addMarkerFields(fileName, pointList):
"""
给输出shape文件增加字段
输入参数
fileName: 需增加字段的文件名称及路径
pointList: 多次聚类标记后的pointList。
"""
arcpy.AddField_management(fileName, "ID_T", "FLOAT")
arcpy.AddField_management(fileName, "markerO", "TEXT") # obstacle
arcpy.AddField_management(fileName, "markerG", "TEXT") # global
arcpy.AddField_management(fileName, "markerL", "TEXT") # local
arcpy.AddField_management(fileName, "markerC", "TEXT") # Constraint
counter, rows = 0, arcpy.UpdateCursor(fileName)
for row in rows:
row.setValue("ID_T", pointList[counter][0])
row.setValue("markerO", pointList[counter][-4])
row.setValue("markerG", pointList[counter][-3])
row.setValue("markerL", pointList[counter][-2])
row.setValue("markerC", pointList[counter][-1])
rows.updateRow(row)
counter += 1
return
def outputWriteToTxt(filePath, name, inList, pointList):
"""
This functions writes to a .txt file.
Input Arguments
outListStep_point: Path to folder where .txt file is stored
name: Name of .txt file
inList: List with entries to write to .txt file
"""
outfile = filePath + name + ".txt"
myDocument = open(outfile, 'w')
myDocument.write("=========================================================================================" + "\n")
myDocument.write("This file summarized the cluster result! " + "\n")
myDocument.write("=========================================================================================" + "\n")
myDocument.write("Please notice that 'O0, C0...' represents isolated points! " + "\n")
myDocument.write("\n")
myDocument.write("Numbet of points: " + str(inList[0]) + "\n")
myDocument.write("NNI: " + str(inList[1]) + "\n")
myDocument.write("Number of cluster: " + str(inList[2]-1) + "C" + "\n" + "\n") # 不计散点
myDocument.write("-----------------------------------------------------------------------------------------" + "\n")
myDocument.write("Details of the clustering results" + "\n")
myDocument.write("-----------------------------------------------------------------------------------------" + "\n")
label = list(set([i[-1] for i in pointList]))
listLabel = [i[-1] for i in pointList]
for i in label:
lst = []
for j in listLabel:
if j == i:
lst.append(j)
else:
continue
myDocument.write(i + ": " + str(len(lst)) + "\n")
myDocument.close()
return
| 30.888128 | 120 | 0.594131 | 2,636 | 27,058 | 6.078907 | 0.241654 | 0.007988 | 0.011608 | 0.011233 | 0.102659 | 0.055417 | 0.037194 | 0.026023 | 0.019346 | 0.019346 | 0 | 0.015076 | 0.259664 | 27,058 | 875 | 121 | 30.923429 | 0.784595 | 0.25874 | 0 | 0.177778 | 0 | 0 | 0.05089 | 0.018697 | 0.002222 | 0 | 0 | 0.002286 | 0 | 1 | 0.08 | false | 0 | 0.013333 | 0.002222 | 0.191111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a46c23843edc73519673ebfe146fa5d2ec8aa8a4 | 7,000 | py | Python | legacy/notebooks/Language Model - BPE.py | ceshine/modern_chinese_nlp | e1d5941f381431ac114f440472d3e0f976437777 | [
"MIT"
] | 42 | 2018-08-21T05:31:18.000Z | 2021-08-30T02:00:05.000Z | legacy/notebooks/Language Model - BPE.py | ceshine/modern_chinese_nlp | e1d5941f381431ac114f440472d3e0f976437777 | [
"MIT"
] | null | null | null | legacy/notebooks/Language Model - BPE.py | ceshine/modern_chinese_nlp | e1d5941f381431ac114f440472d3e0f976437777 | [
"MIT"
] | 7 | 2018-08-21T09:04:17.000Z | 2021-03-28T06:25:28.000Z |
# coding: utf-8
# In[1]:
import sys
sys.path.append("../")
# In[2]:
from pathlib import Path
from functools import partial
import joblib
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from fastai.text import LanguageModelLoader, LanguageModelData
from fastai.core import T
from fastai.rnn_reg import EmbeddingDropout
from torch.optim import Adam
import torch.nn as nn
import torch
import torch.nn.functional as F
import sentencepiece as spm
# In[3]:
tokens = joblib.load("../data/tokens_bpe.pkl")
# In[4]:
# Filter out empty texts
tokens = [x for x in tokens if x.shape[0] > 0]
# In[5]:
# Set shuffle = False to keep sentences from the same paragraph together
trn_tokens, val_tokens = train_test_split(tokens, test_size=0.2, shuffle=False)
val_tokens, tst_tokens = train_test_split(val_tokens, test_size=0.5, shuffle=False)
# In[6]:
def get_voc_stats(tokens):
total_tokens = np.sum([x.shape[0] for x in tokens])
unks = np.sum([np.sum(x == 0) for x in tokens])
print("Total tokens: %d\nUnknown Percentage: %.2f %%" % (total_tokens, unks * 100 / total_tokens))
get_voc_stats(tokens)
# In[7]:
bptt = 75
batch_size = 64
n_tok = int(np.max([np.max(x) for x in tokens]) + 1)
trn_loader = LanguageModelLoader(
np.concatenate(trn_tokens), batch_size, bptt)
val_loader = LanguageModelLoader(
np.concatenate(val_tokens), batch_size, bptt)
tst_loader = LanguageModelLoader(
np.concatenate(tst_tokens), batch_size, bptt)
# In[8]:
sp = spm.SentencePieceProcessor()
sp.Load("../data/bpe_model.model")
# In[9]:
sp.EncodeAsIds(", 的*")
# In[10]:
np.sum([np.sum(x == 1) for x in tokens]) # <s>
# In[11]:
np.sum([np.sum(x == 2) for x in tokens]) # </s>
# In[12]:
sp.DecodeIds(trn_tokens[0].tolist())
# In[13]:
sp.DecodeIds(trn_tokens[1].tolist())
# In[14]:
from collections import Counter
tmp = []
for i in range(10000):
for j in range(1, trn_tokens[i].shape[0]):
if trn_tokens[i][j] == 0:
tmp.append(trn_tokens[i][j-1])
Counter(tmp).most_common(10)
# In[15]:
from collections import Counter
tmp = []
for i in range(10000):
for j in range(1, trn_tokens[i].shape[0]-1):
if trn_tokens[i][j] == 4:
tmp.append(trn_tokens[i][j+1])
Counter(tmp).most_common(10)
# In[19]:
sp.DecodeIds([4569])
# In[17]:
path = Path("../data/cache/lm_bpe/")
path.mkdir(parents=True, exist_ok=True)
model_data = LanguageModelData(
path, pad_idx=2, n_tok=n_tok, trn_dl=trn_loader, val_dl=val_loader, test_dl=tst_loader
)
# In[18]:
n_tok
# ### QRNN Model
# In[21]:
drops = np.array([0.05, 0.1, 0.05, 0, 0.1])
learner = model_data.get_model(
partial(Adam, betas=(0.8, 0.999)),
emb_sz=300, n_hid=500, n_layers=4,
dropouti=drops[0], dropout=drops[1], wdrop=drops[2],
dropoute=drops[3], dropouth=drops[4], qrnn=True
)
# In[22]:
learner.clip = 25.
learner.lr_find(start_lr=1e-5, end_lr=1, linear=False)
learner.sched.plot()
# In[22]:
lrs = 2e-3
learner.fit(lrs, 1, wds=1e-7, use_clr=(50, 3), cycle_len=10, use_wd_sched=True)
# In[23]:
learner.sched.plot_lr()
# In[43]:
lrs = 5e-4
learner.fit(lrs, 1, wds=1e-7, use_clr=(50, 3), cycle_len=10, use_wd_sched=True)
# In[14]:
learner.sched.plot_loss()
# In[44]:
learner.save("lm_qrnn")
learner.save_encoder("lm_qrnn_enc")
# In[ ]:
learner.load("lm_qrnn")
# ### LSTM
# In[20]:
drops = np.array([0.1, 0.1, 0.05, 0, 0.1])
learner = model_data.get_model(
partial(Adam, betas=(0.8, 0.999)),
emb_sz=300, n_hid=500, n_layers=3,
dropouti=drops[0], dropout=drops[1], wdrop=drops[2],
dropoute=drops[3], dropouth=drops[4], qrnn=False
)
# In[21]:
learner.clip = 25.
learner.lr_find(start_lr=1e-5, end_lr=1, linear=False)
learner.sched.plot()
# In[22]:
lrs = 2e-3
learner.clip = 10.
learner.fit(lrs, 1, wds=1e-7, use_clr=(50, 5), cycle_len=20, use_wd_sched=True)
# In[23]:
learner.sched.plot_lr()
# In[24]:
learner.save("lm_lstm")
learner.save_encoder("lm_lstm_enc")
# In[25]:
tmp_iter = iter(trn_loader)
# In[26]:
next(tmp_iter)[0].shape
# In[27]:
learner.load("lm_lstm")
# ## Test the model
# In[28]:
learner.model.eval()
# ### Next Character Inference
# In[29]:
tokens = sp.EncodeAsIds("德国 是 世界 大国 之 一 , 其 国内 生产 总 值 以 国际 汇率 计")
tokens
# In[30]:
logits, _, _ = learner.model(T(tokens).unsqueeze(1))
logits.shape
# In[32]:
sorted_idx = np.argsort(logits.data.cpu().numpy(), 1)
preds = []
for i in range(1, 4):
preds.append([sp.IdToPiece(x) for x in sorted_idx[:, -i].tolist()])
# preds = list(map(lambda x: itos[x], np.argmax(logits.data.cpu().numpy(), 1)))
pd.DataFrame({"orig": sp.EncodeAsPieces("德国 是 世界 大国 之 一 , 其 国内 生产 总 值 以 国际 汇率 计") + [""],
"pred_1": [""] + preds[0], "pred_2": [""] + preds[1], "pred_3": [""] + preds[2]})
# In[33]:
def eval(texts):
learner.model[0].reset()
tokens =sp.EncodeAsIds(texts)
logits, _, _ = learner.model(T(tokens).unsqueeze(1))
sorted_idx = np.argsort(logits.data.cpu().numpy(), 1)
preds = []
for i in range(1, 4):
preds.append([sp.IdToPiece(x) for x in sorted_idx[:, -i].tolist()])
# preds = list(map(lambda x: itos[x], np.argmax(logits.data.cpu().numpy(), 1)))
return pd.DataFrame({"orig": sp.EncodeAsPieces(texts) + [""],
"pred_1": [""] + preds[0], "pred_2": [""] + preds[1], "pred_3": [""] + preds[2]})
# In[34]:
eval("在 现代 印刷 媒体 , 卡通 是 一 种 通常 有 幽默 色")
# In[35]:
eval("对 中国 与 南洋 发动 全面 的 战争 。 1990 年代 , 中")
# ### Generate Sentence
# In[38]:
import random
def generate_text(tokens, N=25):
preds = []
for i in range(N):
learner.model[0].reset()
logits, _, _ = learner.model(T(tokens).unsqueeze(1))
probs = F.softmax(logits).data.cpu().numpy()[-1, :]
candidates = np.argsort(probs)[::-1]
while True:
# Sampling
candidate = np.random.choice(candidates, p=probs[candidates])
# Greedy
# candidate = np.argmax(probs[2:]) + 2
if candidate > 2:
print(probs[candidates][:3], probs[candidate])
preds.append(candidate)
break
# for candidate in candidates:
# if candidate > 1 and ord(itos[candidate]) > 255 and (random.random() < probs[candidate] or probs[candidate] < 0.2):
# print(probs[candidate])
# preds.append(candidate)
# break
# tokens = [preds[-1]]#
tokens.append(int(preds[-1]))
# tokens = [:1]
print(sp.DecodeIds(tokens))
generate_text(sp.EncodeAsIds("德国 是 世界 大国 之 一 , 其 国内 生产 总 值 以 国际 汇率 为主 , "))
# In[66]:
generate_text(sp.EncodeAsIds("在 现代 印刷 媒体 , 卡通 是 一种 通常 有 幽默 色 "))
# In[86]:
generate_text(sp.EncodeAsIds("日本 后来 成为 第二次 世界大战 的 轴心国 之一 , 对 中国 与 南洋 发动 全面 的 战争"))
# In[87]:
generate_text(sp.EncodeAsIds("特朗普 政府 以为 加征 关税 会 令 中国 屈服 , 这种 策略 肯定 会 适得其反 , 如果 就业 和 财富"))
| 17.5 | 129 | 0.618286 | 1,140 | 7,000 | 3.688596 | 0.263158 | 0.021403 | 0.011415 | 0.017122 | 0.437812 | 0.384542 | 0.354102 | 0.329132 | 0.322949 | 0.322949 | 0 | 0.049648 | 0.208714 | 7,000 | 399 | 130 | 17.54386 | 0.709514 | 0.145429 | 0 | 0.293706 | 0 | 0 | 0.090046 | 0.011192 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020979 | false | 0 | 0.125874 | 0 | 0.153846 | 0.020979 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a46caa769bacecd220bd2803dcc01b740d0f7a7d | 2,576 | py | Python | controller/components/badger.py | cclauss/flight-lab | d2dfcc842391c287970b14e470f209665a233b59 | [
"Apache-2.0"
] | 15 | 2018-10-18T07:50:46.000Z | 2021-10-21T03:40:55.000Z | controller/components/badger.py | cclauss/flight-lab | d2dfcc842391c287970b14e470f209665a233b59 | [
"Apache-2.0"
] | 9 | 2018-09-17T23:00:02.000Z | 2019-01-22T21:08:04.000Z | controller/components/badger.py | cclauss/flight-lab | d2dfcc842391c287970b14e470f209665a233b59 | [
"Apache-2.0"
] | 12 | 2019-01-07T12:43:37.000Z | 2021-10-21T03:40:44.000Z | # Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for badge reader component."""
import threading
import time
from components import base
from protos import controller_pb2
from utils import badger
class BadgeReaderComponent(base.Component):
"""Component to authorize USB badges.
Events:
"status_changed": when badge scan is authorized or authorization has expired.
"""
_AUTH_TIMEOUT_SEC = 10
def __init__(self, proto, *args, **kwargs):
"""Create a BadgeReaderComponent instance.
Args:
proto: flightlab.BadgeReader protobuf.
"""
super(BadgeReaderComponent, self).__init__(proto, *args, **kwargs)
self._deauth = None
self._validator = badger.BadgeValidator(self.settings.url, self.settings.key_param)
self._reader = badger.BadgeReader(usb_vendor_id=self.settings.usb_vendor_id, usb_product_id=self.settings.usb_product_id)
self._reader.on('read_success', self._on_read_success)
self._reader.start()
def _on_read_success(self, reader, badge_id):
self.logger.info("Badge %s Read Successfully", badge_id)
if self._validator.validate(badge_id):
self.logger.info("Badge Validated")
self.settings.status = controller_pb2.Badger.AUTHORIZED
self.emit('status_changed', self)
else:
self.logger.info("Invalid Badge")
self.settings.status = controller_pb2.Badger.UNAUTHORIZED
self.emit('status_changed', self)
if self._deauth:
self._deauth.cancel()
self._deauth = threading.Timer(self._AUTH_TIMEOUT_SEC, self._deauthorize)
self._deauth.start()
def _deauthorize(self):
self.logger.info("Deauthorizing")
"""Ensures status is changed to UNKNOWN, which is the default state.
Emits:
status_changed
"""
self.settings.status = controller_pb2.Badger.UNKNOWN
self.emit('status_changed', self)
def close(self):
"""Stops the badge reader and deauthorization thread."""
if self._deauth:
self._deauth.cancel()
super(BadgeReaderComponent, self).close()
| 33.025641 | 125 | 0.727873 | 336 | 2,576 | 5.41369 | 0.440476 | 0.038483 | 0.030786 | 0.028037 | 0.186916 | 0.120396 | 0 | 0 | 0 | 0 | 0 | 0.006585 | 0.174689 | 2,576 | 77 | 126 | 33.454545 | 0.849012 | 0.332298 | 0 | 0.194444 | 0 | 0 | 0.078115 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.138889 | 0 | 0.305556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a470aba9fce332f6e5b9f8335122d8eef4080e8a | 1,332 | py | Python | adhoc.py | oudmane/excelcy | 25263d16db0cda24fe66ab3d52ff08a770117dc1 | [
"MIT"
] | 99 | 2018-07-19T17:32:26.000Z | 2022-02-01T18:10:57.000Z | adhoc.py | oudmane/excelcy | 25263d16db0cda24fe66ab3d52ff08a770117dc1 | [
"MIT"
] | 15 | 2018-07-20T01:34:32.000Z | 2020-08-25T09:14:28.000Z | adhoc.py | oudmane/excelcy | 25263d16db0cda24fe66ab3d52ff08a770117dc1 | [
"MIT"
] | 11 | 2018-07-20T03:30:29.000Z | 2021-12-14T22:38:23.000Z | from excelcy import ExcelCy
from excelcy.storage import Config
# test_string = 'Android Pay expands to Canada'
# excelcy = ExcelCy()
# excelcy.storage.config = Config(nlp_base='en_core_web_sm', train_iteration=50, train_drop=0.2)
# doc = excelcy.nlp(test_string)
# # showing no ORG
# print([(ent.label_, ent.text) for ent in doc.ents])
# excelcy.storage.source.add(kind='text', value=test_string)
# excelcy.discover()
# excelcy.storage.prepare.add(kind='phrase', value='Android Pay', entity='PRODUCT')
# excelcy.prepare()
# excelcy.train()
# doc = excelcy.nlp(test_string)
# print([(ent.label_, ent.text) for ent in doc.ents])
# FAILED tests/test_excelcy.py::ExcelCyTestCase::test_execute - AssertionError: assert ('$1', 'MONEY') in {('$1 million', 'MONEY'), ('Uber', 'ORG')}
# FAILED tests/test_pipe.py::PipeTestCase::test_execute - AssertionError: assert ('$1', 'MONEY') in {('$1 million', 'MONEY'), ('Uber', 'ORG')}
# FAILED tests/test_readme.py::ReadmeTestCase::test_readme_04 - AssertionError: assert ('China' == 'Himalayas'
excelcy = ExcelCy()
doc = excelcy.nlp('Android Pay expands to Canada')
print([(ent.label_, ent.text) for ent in doc.ents])
excelcy = ExcelCy.execute(file_path='tests/data/test_data_03.xlsx')
doc = excelcy.nlp('Android Pay expands to Canada')
print([(ent.label_, ent.text) for ent in doc.ents])
| 40.363636 | 148 | 0.720721 | 192 | 1,332 | 4.875 | 0.348958 | 0.059829 | 0.055556 | 0.068376 | 0.479701 | 0.403846 | 0.403846 | 0.403846 | 0.403846 | 0.403846 | 0 | 0.010101 | 0.108108 | 1,332 | 32 | 149 | 41.625 | 0.777778 | 0.70045 | 0 | 0.5 | 0 | 0 | 0.226316 | 0.073684 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4756366de9ce838849093fa734c08d1d1fc9abf | 4,978 | py | Python | lambda/frequencyQueries.py | NickStrick/Code-Challenges | b6c13357783d3b556e90349ccc6f9bb568a3531d | [
"MIT"
] | null | null | null | lambda/frequencyQueries.py | NickStrick/Code-Challenges | b6c13357783d3b556e90349ccc6f9bb568a3531d | [
"MIT"
] | null | null | null | lambda/frequencyQueries.py | NickStrick/Code-Challenges | b6c13357783d3b556e90349ccc6f9bb568a3531d | [
"MIT"
] | null | null | null | # https://youtu.be/O3HBd0ICJ2M
# defaultdict is the same as normal dictionaries, except a defaultdict
# sets a default value if a key has not been set yet; this is mostly
# for convenience
from collections import defaultdict
def freqQuery(queries):
val_counts = defaultdict(int)
freq_counts = defaultdict(int)
answers = []
for i, j in queries:
if i == 1:
# O(1)
if j in val_counts:
# decrement the value's old count
if freq_counts[val_counts[j]] > 0:
freq_counts[val_counts[j]] -= 1
val_counts[j] += 1
# increment the frequency in freq_counts
freq_counts[val_counts[j]] += 1
else:
val_counts[j] = 1
if freq_counts[val_counts[j]]:
freq_counts[val_counts[j]] += 1
else:
freq_counts[val_counts[j]] = 1
if i == 2:
# O(1)
# check that the value exists in val_counts
if val_counts[j]:
# decrement the old frequency count
freq_counts[val_counts[j]] -= 1
val_counts[j] -= 1
# increment the new frequency count
freq_counts[val_counts[j]] += 1
if i == 3:
# O(n) linear in the number of key, value pairs
# aim for a O(1) runtime
# somehow check j in an object
# instead of having the j values be checked against
# the values in an object, it would be much faster
# to check the j values against the keys of an object
if j in freq_counts and freq_counts[j] > 0:
answers.append(1)
else:
answers.append(0)
return answers
# JS IMPLEMENTATION
# function frequencyQueries(queries) {
# const answers = [];
# // keeps track of the number of occurrences of eacy query value
# const occurrences = {};
# // keeps track of how many values have shown up a certain number of times
# // keys are integers representing frequency and values are the number
# // of values that showcase that frequency
# // for example, if a query specifies a new value, then that value has
# // only shown up once, so we'll increment the value associated with
# // the key of 1 to indicate that there is an additional value that
# // has shown up once
# const frequencies = {};
#
# for (const [op, val] of queries) {
# if (op === 1) {
# // subtract an occurrence of the value's prior frequency
# frequencies[occurrences[val]]--;
# // add the value to our occurrences map
# occurrences[val] = (occurrences[val] || 0) + 1;
# // increment an occurrence of the value's new frequency
# frequencies[occurrences[val]] = (frequencies[occurrences[val]] || 0) + 1;
# } else if (op === 2 && occurrences[val]) {
# // subtract an occurrence of the value's prior frequency
# frequencies[occurrences[val]]--;
# // remove the value from our occurrences map
# occurrences[val]--;
# // increment an occurrence of the value's new frequency
# frequencies[occurrences[val]]++;
# } else if (op === 3) {
# // all we have to do for operation 3 is check if the value
# // associated with the frequency > 0
# answers.push(frequencies[val] > 0 ? 1 : 0);
# }
# }
#
# return answers;
# }
# RUST IMPLEMENTATION
# use std::collections::HashMap;
#
# fn frequency_queries(queries: Vec<(i32, i32)>) -> Vec<i32> {
# let mut val_counts: HashMap<i32, i32> = HashMap::new();
# let mut freq_counts: HashMap<i32, i32> = HashMap::new();
# let mut answers = vec![];
#
# for (i, j) in queries {
# match i {
# 1 => {
# let f = val_counts.entry(j).or_insert(0);
# // decrement j's value in freq_counts
# freq_counts.entry(*f).and_modify(|v| if *v > 0 { *v -= 1 }).or_insert(0);
# // increment j's value in val_counts
# *f += 1;
# // increment j's value in freq_counts
# freq_counts.entry(*f).and_modify(|v| *v += 1).or_insert(1);
# },
# 2 => {
# let f = val_counts.entry(j).or_insert(0);
# // decrement j's value in freq_counts
# freq_counts.entry(*f).and_modify(|v| if *v > 0 { *v -= 1 }).or_insert(0);
# // decrement j's value in val_counts
# if *f > 0 { *f -= 1; }
# // increment j's value in freq_counts
# freq_counts.entry(*f).and_modify(|v| *v += 1).or_insert(1);
# },
# 3 => {
# let fc = freq_counts.entry(j).or_insert(0);
# if *fc > 0 { answers.push(1); } else { answers.push(0); }
# },
# _ => panic!("Got an unexpected query number"),
# }
# }
#
# answers
# }
| 37.712121 | 87 | 0.546203 | 647 | 4,978 | 4.117465 | 0.228748 | 0.082583 | 0.045045 | 0.037162 | 0.43994 | 0.375375 | 0.340841 | 0.322072 | 0.260511 | 0.260511 | 0 | 0.021535 | 0.337686 | 4,978 | 131 | 88 | 38 | 0.786473 | 0.724387 | 0 | 0.275862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.034483 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a475dd4e9e3743b42f2405054585911fbb5f8a22 | 1,663 | py | Python | char-rnn-name-classification/train.py | StanleyLsx/practical-pytorch | ccc9ebad47ca6763c04dbb8574769cfe3f1acdde | [
"MIT"
] | null | null | null | char-rnn-name-classification/train.py | StanleyLsx/practical-pytorch | ccc9ebad47ca6763c04dbb8574769cfe3f1acdde | [
"MIT"
] | 2 | 2021-06-08T22:12:46.000Z | 2022-01-13T03:11:10.000Z | char-rnn-name-classification/train.py | StanleyLsx/practical-pytorch | ccc9ebad47ca6763c04dbb8574769cfe3f1acdde | [
"MIT"
] | null | null | null | from data import *
from model import *
import time
import math
n_hidden = 128
n_epochs = 100000
print_every = 5000
plot_every = 1000
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
rnn = RNN(n_letters, n_hidden, n_categories)
optimizer = torch.optim.SGD(rnn.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()
def train(category_tensor, line_tensor):
hidden = rnn.init_hidden()
optimizer.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
optimizer.step()
return output, loss.item()
# Keep track of losses for plotting
current_loss = 0
all_losses = []
def time_since(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for epoch in range(1, n_epochs + 1):
category, line, category_tensor, line_tensor = random_training_pair()
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# Print epoch number, loss, name and guess
if epoch % print_every == 0:
guess, guess_i = category_from_output(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (
epoch, epoch / n_epochs * 100, time_since(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if epoch % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
torch.save(rnn, 'char-rnn-name-classification.pt')
| 25.584615 | 99 | 0.657246 | 246 | 1,663 | 4.296748 | 0.422764 | 0.047304 | 0.051088 | 0.068117 | 0.054872 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027907 | 0.224293 | 1,663 | 64 | 100 | 25.984375 | 0.789922 | 0.113049 | 0 | 0.046512 | 0 | 0 | 0.04966 | 0.021088 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.093023 | 0 | 0.186047 | 0.069767 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4774d5aae891207d351e4416da7e1e5f7ee2c3e | 8,623 | py | Python | flow_models/elephants/plot.py | piotrjurkiewicz/flow_stats | cc97a8381275cb9dd23ed0c3432abffaf4198431 | [
"MIT"
] | 9 | 2019-07-08T09:53:22.000Z | 2021-11-19T07:50:11.000Z | flow_models/elephants/plot.py | ElsevierSoftwareX/SOFTX-D-21-00003 | cc97a8381275cb9dd23ed0c3432abffaf4198431 | [
"MIT"
] | 1 | 2021-02-23T16:01:21.000Z | 2021-04-03T02:06:32.000Z | flow_models/elephants/plot.py | ElsevierSoftwareX/SOFTX-D-21-00003 | cc97a8381275cb9dd23ed0c3432abffaf4198431 | [
"MIT"
] | 5 | 2019-09-27T14:52:54.000Z | 2022-01-25T07:58:24.000Z | #!/usr/bin/python3
import argparse
import collections
import pathlib
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
from flow_models.elephants.calculate import calculate
from flow_models.lib.data import UNITS
from flow_models.lib.plot import save_figure, matplotlib_config
X_VALUES = ['length', 'size']
METHODS = {'first': '-',
'threshold': '--',
'sampling': ':'}
SIZE = 0.6
FIGSIZE = [SIZE * 11.2, SIZE * 5.66]
def plot_traffic(calculated):
interpolated = {}
for n, x_val in enumerate(['length', 'size']):
nidx = 1 / pd.Float64Index(np.geomspace(1, 10000, 5000, endpoint=False))
for method in METHODS:
idx = 1 / calculated[method][x_val]['occupancy_mean']
ddd = calculated[method][x_val].copy().set_index(idx)
ddd = ddd[~ddd.index.duplicated()]
ddd = ddd.reindex(ddd.index.union(nidx)).interpolate('slinear').reindex(nidx)
interpolated.setdefault(method, {})[x_val] = ddd
for to in ['absolute'] + list(METHODS):
to_label = '%'
fig, axes = plt.subplots(1, 2, sharex='all', sharey='all', figsize=[FIGSIZE[0] * 2.132, FIGSIZE[1]])
for n, x_val in enumerate(['length', 'size']):
ax = axes[n]
for method in METHODS:
d = interpolated[method][x_val]['octets_mean']
if to == 'absolute':
r = 1
else:
r = interpolated[to][x_val]['octets_mean']
to_label = f'relative to {to}'
ax.plot(d.index, d / r, 'b' + METHODS[method], lw=2,
label=method)
ax.set_ylabel(f'Traffic coverage [{to_label}]')
ax.set_xlabel(f'Flow table occupancy (decision by {x_val})')
ax.tick_params('y', labelleft=True)
ax.set_xscale('log')
ax.legend()
fig.gca().invert_xaxis()
out = f'traffic_{to}'
save_figure(fig, out)
save_figure(fig, out)
plt.close(fig)
def plot_usage(calculated, what):
interpolated = {}
for n, x_val in enumerate(['length', 'size']):
nidx = pd.Float64Index(np.linspace(50, 100, 5001, endpoint=True))
for method in METHODS:
idx = calculated[method][x_val]['octets_mean']
ddd = calculated[method][x_val].copy().set_index(idx)
ddd = ddd[~ddd.index.duplicated()]
ddd = ddd.reindex(ddd.index.union(nidx)).interpolate('slinear').reindex(nidx)
interpolated.setdefault(x_val, {})[method] = ddd
points = [99, 95, 90, 80, 75, 50]
z = pd.concat({k: pd.concat(v) for k, v in interpolated.items()})
z = z.unstack([0, 1]).swaplevel(1, 2, axis=1).sort_index(axis=1)[['occupancy_mean', 'operations_mean']]
z = z.reindex(METHODS, axis=1, level=1)
z.loc[points][['occupancy_mean', 'operations_mean']].to_latex(f'selected.tex',
float_format='%.2f',
multicolumn_format='c')
for to in ['absolute'] + list(METHODS):
to_label = ' reduction [x]'
fig, axes = plt.subplots(1, 2, sharex='all', sharey='all', figsize=[FIGSIZE[0] * 2.132, FIGSIZE[1]])
for n, x_val in enumerate(['length', 'size']):
ax = axes[n]
for method in METHODS:
d = interpolated[x_val][method]
if to == 'absolute':
r = d.copy()
for col in r.columns:
r[col].values[:] = 1
ax.plot(d.index, d[f'{what}_mean'] / r[f'{what}_mean'], 'k' + METHODS[method], lw=2,
label=method)
else:
r = interpolated[x_val][to]
to_label = f' [relative to {to}]'
ax.plot(d.index, r[f'{what}_mean'] / d[f'{what}_mean'], 'k' + METHODS[method], lw=2,
label=method)
ax.set_xlabel(f'Traffic coverage [%] (decision by {x_val})')
ax.set_ylabel(f'Flow table {what}{to_label}')
ax.tick_params('y', labelleft=True)
if to == 'absolute':
ax.set_yscale('log')
ax.legend()
fig.gca().invert_xaxis()
fig.gca().get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
out = f'{what}_{to}'
save_figure(fig, out)
plt.close(fig)
def plot_calc_sim(ax, calculated, simulated, method, x_val, w):
sim_style = {
'octets': 'bo',
'flows': 'ro',
'fraction': 'ko'
}
calc_style = {
'octets': 'b-',
'flows': 'r-',
'fraction': 'k-'
}
if w == 'fraction':
name = 'Occupancy'
elif w == 'octets':
name = 'Traffic coverage'
elif w == 'flows':
name = 'Operations'
else:
name = w[:-1] + ' coverage'
axis = 'left' if w == 'octets' else 'right'
d = simulated[method][x_val][w + '_mean']
try:
e = simulated[method][x_val][w + '_conf']
except KeyError:
e = None
ax.errorbar(d.index, d, e, None, sim_style[w], lw=1, capthick=1, ms=2,
label=f'{name} (sim.) ({axis})')
n = calculated[method][x_val][w + '_mean']
d = n.loc[:d.index.max() if method != 'sampling' else d.index.min()]
ax.plot(d.index, d, calc_style[w], lw=2,
label=f'{name} (calc.) ({axis})')
def plot_all(calculated, simulated, one):
for method in calculated:
if one:
fig, axes = plt.subplots(1, 2, figsize=[FIGSIZE[0] * 2.132, FIGSIZE[1]], sharey='row')
txes = [ax.twinx() for ax in axes]
txes[0].get_shared_y_axes().join(*txes)
else:
fig, ax = plt.subplots(figsize=FIGSIZE)
tx = ax.twinx()
for n, x_val in enumerate(simulated[method]):
if one:
ax = axes[n]
tx = txes[n]
plot_calc_sim(ax, calculated, simulated, method, x_val, 'octets')
plot_calc_sim(tx, calculated, simulated, method, x_val, 'flows')
plot_calc_sim(tx, calculated, simulated, method, x_val, 'fraction')
ax.set_xscale('log')
tx.set_yscale('log')
ax.legend(loc=3)
tx.legend()
if method == 'sampling':
ax.invert_xaxis()
ax.set_xlabel(f'Sampling probability (sampling by {x_val})')
else:
ax.set_xlabel(f'Flow {x_val} threshold [{UNITS[x_val]}]')
if not one:
out = f'results_{method}_{x_val}'
save_figure(fig, out)
plt.close(fig)
if one:
out = f'results_{method}'
save_figure(fig, out)
plt.close(fig)
def plot_probability():
fig, ax = plt.subplots(1, 1, figsize=FIGSIZE)
idx = np.geomspace(1, 1000, 512)
ax.plot(idx, 1 - (1 - 0.1) ** idx, 'k-', lw=2,
label='p 0.1$')
ax.plot(idx, 1 - (1 - 0.01) ** idx, 'k-', lw=2,
label='p = 0.1$')
ax.text(12, 0.6, '$p = 0.1$')
ax.text(150, 0.6, '$p = 0.01$')
ax.set_xlabel(f'Flow length [packets]')
ax.set_ylabel(f'Total probability of being added to flow table')
ax.set_xscale('log')
save_figure(fig, 'probability')
plt.close(fig)
def plot(dirs, one=False):
simulated = collections.defaultdict(dict)
calculated = collections.defaultdict(dict)
methods = set()
for d in dirs:
d = pathlib.Path(d)
x_val = d.parts[-1]
assert x_val in X_VALUES
for f in d.glob('*.csv'):
method = f.stem
assert method in METHODS
methods.add(method)
simulated[method][x_val] = pd.read_csv(str(f), index_col=0).dropna()
for method, df in calculate('../mixtures/all/' + x_val, 1024, x_val=x_val, methods=methods).items():
calculated[method][x_val] = df.dropna()
plot_all(calculated, simulated, one)
plot_usage(calculated, 'occupancy')
plot_usage(calculated, 'operations')
plot_traffic(calculated)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--one', action='store_true', help='plot in one file')
parser.add_argument('files', nargs='+', help='csv_hist files to plot')
app_args = parser.parse_args()
with matplotlib_config(latex=False):
plot_probability()
plot(app_args.files, app_args.one)
if __name__ == '__main__':
main()
| 37.491304 | 108 | 0.542851 | 1,111 | 8,623 | 4.080108 | 0.212421 | 0.030885 | 0.035297 | 0.02934 | 0.404368 | 0.311493 | 0.286565 | 0.262299 | 0.239576 | 0.173395 | 0 | 0.022705 | 0.305346 | 8,623 | 229 | 109 | 37.655022 | 0.734057 | 0.001971 | 0 | 0.275862 | 0 | 0 | 0.129111 | 0.002789 | 0 | 0 | 0 | 0 | 0.009852 | 1 | 0.034483 | false | 0 | 0.049261 | 0 | 0.083744 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4782f2535bfc86b20dfc387ade673021d013c90 | 2,088 | py | Python | multiview/db/saxs_v2/db_config.py | bsmind/react-multisciview | 613dbe327542d0384e5d6b87697a05db17f42ca8 | [
"MIT"
] | null | null | null | multiview/db/saxs_v2/db_config.py | bsmind/react-multisciview | 613dbe327542d0384e5d6b87697a05db17f42ca8 | [
"MIT"
] | null | null | null | multiview/db/saxs_v2/db_config.py | bsmind/react-multisciview | 613dbe327542d0384e5d6b87697a05db17f42ca8 | [
"MIT"
] | 1 | 2020-08-28T16:27:15.000Z | 2020-08-28T16:27:15.000Z | MONGODB_CONFIG = {
'ROOT': '/Users/scott/Documents/Work/bnl/MultiView/pyServer/data/saxs/',
# mongo db set-up
'DB': {
#'HOST': 'visws.csi.bnl.gov',
'HOST': 'localhost',
'PORT': 27017,
'NAME': 'multiview_saxs_v2',
'COLLECTION': 'saxs_v2'
},
# parsing xml file
'XML': {
# root directory relative to ROOT
'DIR': 'analysis_proper/results/',
# sample name split
'SAMPLE_SPLIT': '_th0.',
# for same protocol, use COMPARE field
'TIMESTAMP': 'save_timestamp',
# rood id field
'ROOTID': 'name',
# protocol id field
'PID': 'name',
# result id field
'RID': 'name', # id
'RVAL': 'value', # value
# fields that will be ignored in a protocol
'P_EXCLUDE': [
'infile',
'outfile',
'output_dir',
'runtime',
],
# fields that will be excluded in a result
'R_EXCLUDE': [
'filebase',
'file_access_time',
'sample_name',
'file_ctime',
'file_size',
'infile',
'filepath',
'filename',
'fileext',
'file_modification_time'
],
# fields whose value will be considered as string
'R_STRING': [
],
'TIME_FIELD': [
'sequence_ID',
'start_timestamp',
'end_timestamp',
'save_timestamp'
]
},
'TIME': {
'XML': False,
'DB': False,
'FORMAT': '%Y-%m-%d %H:%M:%S %f',
},
# tiff (raw data) related
# CROP defines start row and col index (i.e. the first pixel at upper-left corner)
'TIFF': {
'SAVE': True,
'EXT': ['', '.tiff'],
'MODIFY': False,
'DIR': 'tiff/',
'CROP': {'ROW': 221, 'COL': 181},
'RESIZE': 0.5
},
'THUMBNAIL': {
'SAVE': True,
'DIR': 'analysis_proper/thumbnails/',
'EXT': ['', '.jpg', '.png']
}
}
| 22.212766 | 86 | 0.45546 | 202 | 2,088 | 4.584158 | 0.613861 | 0.022678 | 0.036717 | 0.034557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012598 | 0.391762 | 2,088 | 93 | 87 | 22.451613 | 0.716535 | 0.210728 | 0 | 0.112903 | 0 | 0 | 0.358063 | 0.082158 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a479239492621d9e94b74d01d6b7559292d39674 | 415 | py | Python | ch06/hw06_03/hw06_03.py | z2x3c4v5bz/pybook_wenlongtsai_etc | 0a3e90d9f53a1d33e31b27f40de8abdce56e7e2a | [
"MIT"
] | 4 | 2021-06-12T07:51:22.000Z | 2021-12-20T11:35:12.000Z | ch06/hw06_03/hw06_03.py | z2x3c4v5bz/pybook_wenlongtsai_etc | 0a3e90d9f53a1d33e31b27f40de8abdce56e7e2a | [
"MIT"
] | null | null | null | ch06/hw06_03/hw06_03.py | z2x3c4v5bz/pybook_wenlongtsai_etc | 0a3e90d9f53a1d33e31b27f40de8abdce56e7e2a | [
"MIT"
] | 1 | 2021-11-08T03:36:43.000Z | 2021-11-08T03:36:43.000Z | # hw06_03
import random
def makesentence():
subjects = ['Dog', 'Cat', 'Monkey', 'Pig', 'Fox']
verbs = ['walks', 'runs', 'jumps']
advs = ['slowly', 'quickly']
print('%s %s %s.' % (random.choice(subjects), random.choice(verbs), random.choice(advs)))
for i in range(5):
makesentence()
'''
Cat walks quickly.
Fox jumps slowly.
Monkey jumps slowly.
Pig jumps slowly.
Monkey walks quickly.
'''
| 17.291667 | 93 | 0.624096 | 53 | 415 | 4.867925 | 0.509434 | 0.139535 | 0.131783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014837 | 0.187952 | 415 | 23 | 94 | 18.043478 | 0.750742 | 0.016867 | 0 | 0 | 0 | 0 | 0.180602 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a47acd60891c08f70a65acabe0a7b03b7c1a3a1f | 1,482 | py | Python | dci/api/v1/audits.py | redhat-cip/dci-control-server | 6dee30e7b8770fde2466f2b09554d299a3f3db4d | [
"Apache-2.0"
] | 17 | 2016-09-02T09:21:29.000Z | 2021-09-27T11:33:58.000Z | dci/api/v1/audits.py | redhat-cip/dci-control-server | 6dee30e7b8770fde2466f2b09554d299a3f3db4d | [
"Apache-2.0"
] | 80 | 2015-12-09T09:29:26.000Z | 2021-01-06T08:24:22.000Z | dci/api/v1/audits.py | redhat-cip/dci-control-server | 6dee30e7b8770fde2466f2b09554d299a3f3db4d | [
"Apache-2.0"
] | 10 | 2015-09-29T21:34:53.000Z | 2021-09-27T11:34:01.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from dci.api.v1 import api
from dci import decorators
from dci.common import exceptions as dci_exc
from dci.db import models2
from dci.db import declarative
from dci.common.schemas import check_and_get_args
@api.route("/audits", methods=["GET"])
@decorators.login_required
def get_logs(user):
args = check_and_get_args(flask.request.args.to_dict())
query = flask.g.session.query(models2.Log)
if user.is_not_super_admin():
raise dci_exc.Unauthorized()
nb_logs = query.count()
query = declarative.handle_args(query, models2.Log, args)
audits = [
{
"id": audit.id,
"created_at": audit.created_at,
"user_id": audit.user_id,
"action": audit.action,
}
for audit in query.all()
]
return flask.jsonify({"audits": audits, "_meta": {"count": nb_logs}})
| 30.244898 | 75 | 0.695682 | 217 | 1,482 | 4.64977 | 0.529954 | 0.059465 | 0.025768 | 0.031715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014346 | 0.200405 | 1,482 | 48 | 76 | 30.875 | 0.837131 | 0.388664 | 0 | 0 | 0 | 0 | 0.057303 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.269231 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a47ad8b5997995620bf43529f5fc03e2a2cb0078 | 1,017 | py | Python | app/app.py | jaswged/die-detector-api | 249ce50ac340e73a3ce05a2c7ed4f5874a002ab9 | [
"MIT"
] | null | null | null | app/app.py | jaswged/die-detector-api | 249ce50ac340e73a3ce05a2c7ed4f5874a002ab9 | [
"MIT"
] | 2 | 2020-01-07T04:17:58.000Z | 2020-01-08T01:21:30.000Z | app/app.py | jaswged/die-detector-api | 249ce50ac340e73a3ce05a2c7ed4f5874a002ab9 | [
"MIT"
] | null | null | null | # Common python package imports.
from flask import Flask, jsonify, request, render_template
from fastai.vision import *
# Initialize the app and set a secret_key.
app = Flask(__name__)
app.secret_key = 'something_secret'
# Load the pickled model.
defaults.device = torch.device('cpu')
path = '.'
learn = load_learner(path, file='dice.pkl')
@app.route('/')
def docs():
return render_template('docs.html')
@app.route('/upload')
def upload():
return render_template('image.html')
@app.route('/uploader', methods=['GET', 'POST'])
def uploader():
if request.method == 'POST':
f = request.files['file']
img_bytes = f.read()
img = open_image(BytesIO(img_bytes))
pred_class, pred_idx, outputs = learn.predict(img)
print('Returning: ' + str(pred_class), file=sys.stderr)
print('Index: ' + str(pred_idx))
print('Outputs: ' + str(outputs))
return str(pred_class)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| 24.804878 | 63 | 0.654867 | 137 | 1,017 | 4.664234 | 0.547445 | 0.065728 | 0.062598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009685 | 0.187807 | 1,017 | 40 | 64 | 25.425 | 0.763923 | 0.093412 | 0 | 0 | 0 | 0 | 0.131808 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.076923 | 0.076923 | 0.307692 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a47af5d98f82e7d880c1857ba14e7365fcae7341 | 651 | py | Python | junopy/entities/pix.py | robertons/junopy | 1acc64ab99d8ea49bb0dac979cd34da43541f243 | [
"MIT"
] | 3 | 2021-07-12T15:05:13.000Z | 2022-01-31T03:35:43.000Z | junopy/entities/pix.py | robertons/junopy | 1acc64ab99d8ea49bb0dac979cd34da43541f243 | [
"MIT"
] | 2 | 2022-01-29T20:14:51.000Z | 2022-02-07T16:16:24.000Z | junopy/entities/pix.py | robertons/junopy | 1acc64ab99d8ea49bb0dac979cd34da43541f243 | [
"MIT"
] | 1 | 2022-02-01T18:36:10.000Z | 2022-02-01T18:36:10.000Z | # -*- coding: utf-8 -*-
from .lib import *
class Pix(JunoEntity):
def __init__(cls, **kw):
cls.__metadata__ = {}
# FIELDS
cls.id = String(max=80)
cls.key = String(max=80)
cls.type = String(max=80)
cls.includeImage = Boolean()
cls.payloadInBase64 = String()
cls.imageInBase64 = String()
cls.qrcodeInBase64 = String()
cls.amount = Float()
cls.reference = String(max=80)
cls.additionalData = String(max=100)
cls.creationDateTime = DateTime(format="iso")
cls.ownershipDateTime = DateTime(format="iso")
super().__init__(**kw)
| 25.038462 | 54 | 0.574501 | 69 | 651 | 5.246377 | 0.536232 | 0.124309 | 0.121547 | 0.154696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03871 | 0.285714 | 651 | 25 | 55 | 26.04 | 0.739785 | 0.043011 | 0 | 0 | 0 | 0 | 0.009677 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a47e22c2a0cc7d55c5e439122397bc64272708be | 11,203 | py | Python | rstring/__init__.py | phantie/mutable-string | 3a944528e777aecdec0ff8d6cc09585a8543874b | [
"MIT"
] | null | null | null | rstring/__init__.py | phantie/mutable-string | 3a944528e777aecdec0ff8d6cc09585a8543874b | [
"MIT"
] | null | null | null | rstring/__init__.py | phantie/mutable-string | 3a944528e777aecdec0ff8d6cc09585a8543874b | [
"MIT"
] | null | null | null | from __future__ import annotations
from functools import partialmethod, wraps
from array import array
from typing import NewType, Union, Callable, Iterable, Generator, Type
from ruption import some, none
from take import take
__all__ = ('String',)
__version__ = '0.5.3'
def no_mut(f):
if __debug__:
@wraps(f)
def wrap(self, *args, **kwargs):
before = self[:]
after = self
result = f(self, *args, **kwargs)
assert after[:] == before
return result
return wrap
else:
return f
class String(array):
'Mutable, change-friendly, feature-rich String.'
@staticmethod
def __new__(cls, o=None, encoding=None) -> Self:
if not o:
return super().__new__(cls, 'u')
o_cls_inh = set(o.__class__.__mro__)
if str in o_cls_inh:
return cls.from_str(o)
elif array in o_cls_inh:
return cls.from_unicode_array(o)
elif bytes in o_cls_inh:
return cls.from_encoding(o, encoding)
try:
iterable = iter(o)
except TypeError:
raise TypeError(f'{cls.__qualname__} cannot be created from {o.__class__}')
else:
return cls.from_iterable(iterable)
_str_attrs = set(_ for _ in dir(str) if not _.startswith('__')).union(set(('removeprefix', 'removesuffix')))
def __getattr__(self, name):
if name in self._str_attrs:
return lambda *args, **kwargs: getattr(str, name)(str(self), *args, **kwargs)
raise AttributeError(name)
@classmethod
def new(cls) -> Self:
return super().__new__(cls, 'u')
@no_mut
def __eq__(self, _) -> bool:
if isinstance(_, self.__class__):
return super().__eq__(_)
elif isinstance(_, str):
return self.as_str() == _
return False
@no_mut
def __ne__(self, _) -> bool:
if isinstance(_, self.__class__):
return super().__ne__(_)
elif isinstance(_, str):
return self.as_str() != _
return True
@no_mut
def __ge__(self, _):
if isinstance(_, self.__class__):
return super().__ge__(_)
elif isinstance(_, str):
return str(self) >= _
else:
raise TypeError("'>=' not supported between instances of 'String' and", repr(_.__class__.__name__))
@no_mut
def __le__(self, _):
if isinstance(_, self.__class__):
return super().__le__(_)
elif isinstance(_, str):
return str(self) <= _
else:
raise TypeError("'<=' not supported between instances of 'String' and", repr(_.__class__.__name__))
@no_mut
def __gt__(self, _):
if isinstance(_, self.__class__):
return super().__gt__(_)
elif isinstance(_, str):
return str(self) > _
else:
raise TypeError("'>' not supported between instances of 'String' and", repr(_.__class__.__name__))
@no_mut
def __lt__(self, _):
if isinstance(_, self.__class__):
return super().__lt__(_)
elif isinstance(_, str):
return str(self) < _
else:
raise TypeError("'<' not supported between instances of 'String' and", repr(_.__class__.__name__))
@classmethod
def from_str(cls, string: str) -> Self:
new = super().__new__(cls, 'u')
new.push_str(string)
return new
@classmethod
def from_iterable(cls, iterable: Iterable) -> Self:
new = super().__new__(cls, 'u')
new.extend(iterable)
return new
@classmethod
def from_unicode_array(cls, uar: array[u]) -> Self:
new = super().__new__(cls, 'u')
new[:] = uar
return new
@classmethod
def from_encoding(cls, bytes: bytes, encoding: str) -> Self:
return cls.from_str(bytes.decode(encoding))
from_utf8 = partialmethod(from_encoding, encoding='utf-8')
def push(self, _: u):
self.append(_)
def push_str(self, _: str):
self.fromunicode(_)
@no_mut
def __str__(self) -> str:
return self.tounicode()
to_str = as_str = __str__
@no_mut
def __repr__(self) -> str:
return f'String("{self}")'
len = lambda self: self.__len__()
length = property(len)
@no_mut
def as_bytes(self, encoding) -> [int]:
return list(bytearray(str(self), encoding))
def truncate(self, new_len: int):
self[:] = self[:new_len]
def pop(self) -> Option[u]:
try:
return some(super().pop())
except IndexError:
return none
def remove(self, idx: int) -> Option[u]:
try:
_ = self[idx]
del self[idx]
return some(_)
except IndexError:
return none
def retain(self, f: Callable[[u], bool]):
self._set_store_from_iterable((_ for _ in self if f(_)))
filter = retain
def map(self, f: Callable[[u], u]):
self._set_store_from_iterable(map(f, self[:]))
def _check_bounds(self, idx: int):
if not (0 <= idx <= len(self)):
raise IndexError
def _check_range_bounds(self, rng: range):
for _ in rng:
self._check_bounds(_)
def insert(self, idx: int, u: u):
self._check_bounds(idx)
super().insert(idx, u)
def insert_str(self, idx: int, string: str):
for i, s in enumerate(string):
self.insert(idx + i, s)
@no_mut
def is_empty(self) -> bool:
return not bool(self)
def split_off(self, at: int) -> Self:
_ = self.take_from(at)
self.truncate(at)
return _
def take_from(self, idx: int) -> Self:
self._check_bounds(idx)
return self.from_unicode_array(self[idx:])
def clear(self):
self[:] = self[:0]
def drain(self, rng: range) -> Self:
self._check_range_bounds(rng)
_ = self.new()
for i, r in enumerate(rng):
_.push(self.remove(r-i).unwrap())
return _
def replace_range(self, rng: range, replace_with: str):
self._check_range_bounds(rng)
if rng.step != 1:
raise TypeError(f"Step in {rng} must be 1. Period.")
self.drain(rng)
self.insert_str(rng[0], replace_with)
def _set_store_from_iterable(self, iterable: Iterable):
self[:] = self.from_iterable(iterable)
@no_mut
def chars(self) -> Iterable[u]:
return iter(self)
@no_mut
def char_indices(self) -> Iterable[(int, u)]:
return enumerate(self)
@no_mut
def copy(self) -> Self:
new = self.new()
new[:] = self[:]
return new
@no_mut
def __add__(self, _) -> Self:
if isinstance(_, self.__class__):
return take(self.copy()).extend(_).unwrap()
elif isinstance(_, str):
return take(self.copy()).push_str(_).unwrap()
else:
raise NotImplementedError(_)
@no_mut
def __radd__(self, _) -> Self:
if isinstance(_, str):
return take(self.copy()).insert_str(0, _).unwrap()
else:
raise NotImplementedError(_)
def strip_prefix(self, prefix: str, recurr: bool = False):
if len(prefix) > len(self):
return
for this, opposite in zip(self, prefix):
if this != opposite:
break
else:
self[:] = self[len(prefix):]
if recurr:
self.strip_prefix(prefix, True)
removeprefix = strip_prefix
def strip_suffix(self, suffix: str, recurr: bool = False):
if len(suffix) > len(self):
return
for this, opposite in zip(self[-len(suffix):], suffix):
if this != opposite:
break
else:
self[:] = self[:len(self) - len(suffix)]
if recurr:
self.strip_suffix(suffix, True)
removesuffix = strip_suffix
@no_mut
def __mul__(self, other: int) -> Self:
if isinstance(other, int):
return self.from_unicode_array(self[:]*other)
else:
raise NotImplementedError
repeat = __rmul__ = __mul__
@classmethod
def has_custom_impl(cls, methodname: str) -> bool:
if methodname in cls._str_attrs:
return methodname in dir(cls)
else:
raise AttributeError(f'{str} has no method named "{methodname}" ')
@no_mut
def split_at(self, mid: int) -> (Self, Self):
first = self.from_unicode_array(self[:mid])
last = self.from_unicode_array(self[mid:])
return first, last
@no_mut
def lines(self) -> [str]:
return self.splitlines()
@no_mut
def __contains__(self, _: Union[array[u], str, Self]) -> bool:
if isinstance(_, str):
return _ in str(self)
elif isinstance(_, self.__class__):
return str(_) in str(self)
raise TypeError(f"'in <String>' requires str/String/array[u] as left operand, not {type(_).__qualname__}")
contains = __contains__
@no_mut
def split_inclusive(self, sep: u) -> Generator[str]:
assert len(sep) == 1
def incapsulated_generator():
prev = 0
for i, _ in enumerate(self, 1):
if _ == sep:
yield self[prev:i].tounicode()
prev = i
if prev != len(self):
yield self[prev:].tounicode()
return incapsulated_generator()
def collect(self, _: Type) -> Any:
return _(self)
@no_mut
def char_index(self, u: u) -> Option[int]:
try:
return some(self.index(u))
except ValueError:
return none
@no_mut
def rchar_index(self, u: u) -> Option[int]:
try:
return some(len(self) - 1 - self[::-1].index(u))
except ValueError:
return none
@no_mut
def split_once(self, u: u) -> Option[(str, str)]:
opt_idx = self.char_index(u)
if opt_idx is none: return none
first, last = self.split_at(opt_idx.unwrap())
last.remove(0)
return some((str(first), str(last)))
@no_mut
def rsplit_once(self, u: u) -> Option[(str, str)]:
opt_idx = self.rchar_index(u)
if opt_idx is none: return none
first, last = self.split_at(opt_idx.unwrap())
last.remove(0)
return some((str(first), str(last)))
def reverse(self):
self[:] = self[::-1]
def trim(self):
self.trimr()
self.triml()
def trimr(self):
self.removesuffix('\x20', recurr=True)
def triml(self):
self.removeprefix('\x20', recurr=True)
def triml_num(self, num: int):
assert num >= 0
self[:] = self[num:]
def trimr_num(self, num: int):
assert num >= 0
end = len(self)-num
self[:] = self[:end if end > 0 else 0]
def trim_num(self, num: int):
assert num >= 0
self.trimr_num(num)
self.triml_num(num)
Self = String
u = NewType('u', str) # unicode character | 27.060386 | 114 | 0.560743 | 1,352 | 11,203 | 4.323225 | 0.152367 | 0.021386 | 0.032849 | 0.034217 | 0.355346 | 0.296664 | 0.253379 | 0.202224 | 0.154662 | 0.117023 | 0 | 0.003652 | 0.31563 | 11,203 | 414 | 115 | 27.060386 | 0.758706 | 0.005802 | 0 | 0.331269 | 0 | 0.003096 | 0.0481 | 0.001967 | 0 | 0 | 0 | 0 | 0.01548 | 1 | 0.19195 | false | 0 | 0.018576 | 0.03096 | 0.439628 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a47ea1f3b9daa8db7fc8245fb0cffd99591ee6a0 | 2,160 | py | Python | portfolio/Python/scrapy/outillage/conrad.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/outillage/conrad.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/outillage/conrad.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class ConradSpider(BaseSpider):
name = 'conrad.fr'
allowed_domains = ['www.conrad.fr', 'conrad.fr']
start_urls = ('http://www.conrad.fr/outillage_mesure_c_53207',
'http://www.conrad.fr/equipement_maison_c_52080')
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select(u'//ul[@class="sousCat" or @class="categorie"]//a/@href').extract()
for url in categories:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url)
# pagination
next_page = hxs.select(u'//ul[@class="pages"]//a[@title="suivant"]/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
products = hxs.select(u'//table[@class="list"]//tr')[1:]
for product in products:
product_loader = ProductLoader(item=Product(), selector=product)
url = product.select(u'.//h3/a/@href').extract()
url = urljoin_rfc(get_base_url(response), url[0])
product_loader.add_value('url', url)
product_loader.add_xpath('name', u'.//h3/a/text()')
product_loader.add_xpath('price', u'.//p[@class="prixPromo"]/text()',
re=u'([\d\.]+)')
yield product_loader.load_item() | 36 | 99 | 0.635648 | 261 | 2,160 | 5.103448 | 0.363985 | 0.045045 | 0.03003 | 0.038288 | 0.219219 | 0.193694 | 0.172673 | 0.172673 | 0.121622 | 0.121622 | 0 | 0.009158 | 0.241667 | 2,160 | 60 | 100 | 36 | 0.804029 | 0.013889 | 0 | 0.130435 | 0 | 0 | 0.153738 | 0.071932 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.26087 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4872bfc5ea2ebaa9569179a301218bcadb5ad3d | 8,965 | py | Python | old/old_another_small_jobshop_dwave_another_example.py | MiRudnik/quantum_optimization | 9c63c9164d9a8620d7610cc0576a1e3ee7319d98 | [
"MIT"
] | null | null | null | old/old_another_small_jobshop_dwave_another_example.py | MiRudnik/quantum_optimization | 9c63c9164d9a8620d7610cc0576a1e3ee7319d98 | [
"MIT"
] | null | null | null | old/old_another_small_jobshop_dwave_another_example.py | MiRudnik/quantum_optimization | 9c63c9164d9a8620d7610cc0576a1e3ee7319d98 | [
"MIT"
] | 1 | 2021-07-13T21:50:53.000Z | 2021-07-13T21:50:53.000Z | import numpy as np
# Set Q for the problem QUBO
from utils.jobshop_helpers import get_machine_and_time_slot, get_operation_length, is_last_row, get_qubits_from_slot_and_machine, \
get_time_slot
def main():
# qubo_matrix = np.zeros((40,40))
jobs = [[2, 1], [1,2]]
j_flat = []
for job in jobs:
j_flat.extend(job)
time_limit = 5
number_of_machines = 2
qubits_number = number_of_machines * len(j_flat) * time_limit
connections = prepare_connections(jobs, number_of_machines, time_limit)
linear = {}
quadratic = {}
for i in range(qubits_number):
linear['x{}'.format(i), 'x{}'.format(i)] = int(connections[i,i])
for i in range(qubits_number):
for j in range(i + 1, qubits_number):
val = connections[i,j]
if (val != 0):
quadratic['x{}'.format(i), 'x{}'.format(j)] = int(val)
# linear = {('x0', 'x0'): -1, ('x1', 'x1'): -1, ('x2', 'x2'): -1, ('x3', 'x3'): -1,
# ('x4', 'x4'): -1, ('x5', 'x5'): -1, ('x6', 'x6'): -1, ('x7', 'x7'): -1}
# quadratic = {('x0', 'x2'): 2, ('x0', 'x4'): 2, ('x0', 'x6'): 2, ('x2', 'x4'): 2, ('x2', 'x6'): 2, ('x4', 'x6'): 2,
# ('x1', 'x3'): 2, ('x1', 'x5'): 2, ('x1', 'x7'): 2, ('x3', 'x5'): 2, ('x3', 'x7'): 2, ('x5', 'x7'): 2}
# quadratic = {('x0', 'x2'): 2, ('x0', 'x4'): 2, ('x0', 'x6'): 2, ('x1', 'x3'): 2, ('x1', 'x5'): 2, ('x1', 'x7'): 2,
# ('x2', 'x4'): 2, ('x2', 'x6'): 2, ('x3', 'x5'): 2, ('x3', 'x7'): 2
# , ('x4', 'x6'): 2,('x5', 'x7'): 2}
print(linear)
print(quadratic)
Q = dict(linear)
Q.update(quadratic)
# Minor-embed and sample 1000 times on a default D-Wave system
# response = EmbeddingComposite(DWaveSampler()).sample_qubo(Q, num_reads=100)
# for s in list(response.data()):
# print(s.sample, "Energy: ", s.energy, "Occurrences: ", s.num_occurrences)
def add_starts_only_once_constraint(connections, row_length, number_of_qubits, number_of_operations, multiplier):
starting_points = range(row_length)
only_one_one_qubits_lists = [list(range(starting_point, number_of_qubits, number_of_operations)) for starting_point in starting_points]
for oper_list in only_one_one_qubits_lists:
for qubit in oper_list:
connections[qubit, qubit] = -1 * multiplier
print("-1 for [{}, {}]".format(qubit, qubit))
for (i, first_elem) in enumerate(oper_list):
for (j, second_elem) in enumerate(oper_list[i + 1:]):
connections[first_elem, second_elem] = 2 * multiplier
print("2 for [{}, {}]".format(first_elem, second_elem))
return connections
def add_one_job_on_machine_constraint(connections, jobs, row_length, number_of_operations, number_of_qubits, time_limit, multiplier):
for qubit_number in range(number_of_qubits):
machine_number, time_slot = get_machine_and_time_slot(qubit_number, row_length, number_of_operations)
operation_number = qubit_number % number_of_operations
operation_length = get_operation_length(jobs, operation_number)
if (is_last_row(time_slot, time_limit)):
shift = 0
qubits = get_qubits_from_slot_and_machine(machine_number, time_slot + shift, number_of_operations, row_length)
for qubit in qubits:
connections[qubit_number, qubit] += multiplier
else:
for shift in range(operation_length):
qubits = get_qubits_from_slot_and_machine(machine_number, time_slot + shift, number_of_operations,
row_length)
print("For qubit {} qubits are {}".format(qubit_number, list(qubits)))
for qubit in qubits:
if (qubit - qubit_number) % row_length != 0:
connections[qubit_number, qubit] += multiplier
for (i,c) in enumerate(connections):
print(i, c)
return connections
def get_global_op_num(job_lens, job_number, checked_op_num):
previous_operations_number = 0
for job_len in job_lens[:job_number]:
previous_operations_number += job_len
return previous_operations_number + checked_op_num
def get_qubits_for_operation(job_number, checked_op_num, job_lens, number_of_machines, time_limit, number_of_operations):
global_op_num = get_global_op_num(job_lens, job_number, checked_op_num)
row_len = number_of_machines * number_of_operations
qubits = []
for machine_number in range(1,number_of_machines + 1):
qubits.extend([global_op_num + (number_of_operations * (machine_number - 1)) + row_len * cur_time for cur_time in range(time_limit)])
return qubits
def add_order_constraint(connections, jobs, number_of_machines, time_limit, number_of_operations, multiplier):
job_lens = [len(job) for job in jobs]
row_len = number_of_machines * number_of_operations
# for every job
for (job_number, job) in enumerate(jobs):
# for every operation except first in job
for checked_op_num in range(1,len(job)):
qubits_for_checked_op = get_qubits_for_operation(job_number, checked_op_num, job_lens,
number_of_machines, time_limit, number_of_operations)
# for every operation, that is before operation with number op_num
for (tmp_op_num, tmp_op_len) in enumerate(job[:checked_op_num]):
qubits_for_tmp_op = get_qubits_for_operation(job_number, tmp_op_num, job_lens,
number_of_machines, time_limit, number_of_operations)
# print("Job: {}, Checked op_num: {}, tmp op num: {}, tmp op len: {}".format(job, checked_op_num, tmp_op_num, tmp_op_len))
for qubit_checked_op in qubits_for_checked_op:
for qubit_tmp_op in qubits_for_tmp_op:
checked_op_time_slot = get_time_slot(qubit_checked_op, row_len)
tmp_op_time_slot = get_time_slot(qubit_tmp_op, row_len)
if checked_op_time_slot - tmp_op_time_slot < tmp_op_len:
connections[qubit_tmp_op, qubit_checked_op] += multiplier
return connections
def prepare_connections(jobs, number_of_machines, time_limit):
# jobs = [[2,1],[1,2]]
number_of_operations = sum([len(job) for job in jobs])
number_of_qubits = number_of_machines * number_of_operations * time_limit
row_length = number_of_machines * number_of_operations
beta = 1
eta = -1
alpha = 1
connections = np.zeros((number_of_qubits, number_of_qubits))
# connections = add_starts_only_once_constraint(connections, row_length, number_of_qubits, number_of_operations, beta)
connections = add_one_job_on_machine_constraint(connections, jobs, row_length, number_of_operations,
number_of_qubits, time_limit, alpha)
# connections = add_order_constraint(connections, jobs, number_of_machines, time_limit, number_of_operations, eta)
# for (num, conn) in enumerate(connections):
# print(num, conn)
# connections = [[] for i in range(40)]
# connections[0] = [1, 2, 3, 8, 9, 10, 11, 16, 24, 32]
# connections[1] = [2, 3, 8, 9, 16, 17, 24, 25, 32, 33]
# connections[2] = [3, 10, 18, 26, 34]
# connections[3] = [8, 9, 10, 11, 18, 19, 26, 27, 34, 35]
# connections[4] = [5, 6, 7, 12, 13, 14, 15, 20, 28, 36]
# connections[5] = [6, 7, 12, 13, 20, 21, 28, 29, 36, 37]
# connections[6] = [7, 14, 22, 30, 38]
# connections[7] = [12, 13, 14, 15, 22, 23, 30, 31, 38, 39]
# connections[8] = [9, 10, 11, 16, 17, 18, 19, 24, 32]
# connections[9] = [10, 11, 16, 17, 24, 25, 32, 33]
# connections[10] = [11, 18, 26, 34]
# connections[11] = [16, 17, 18, 19, 26, 27, 34, 35]
# connections[12] = [13, 14, 15, 20, 21, 22, 23, 28, 36]
# connections[13] = [14, 15, 20, 21, 28, 29, 36, 37]
# connections[14] = [15, 22, 30, 38]
# connections[15] = [20, 21, 22, 23, 30, 31, 38, 39]
# connections[16] = [17, 18, 19, 24, 25, 26, 27, 32]
# connections[17] = [18, 19, 24, 25, 32, 33]
# connections[18] = [19, 26, 34]
# connections[19] = [24, 25, 26, 27, 34, 35]
# connections[20] = [21, 22, 23, 28, 29, 30, 31, 36]
# connections[21] = [22, 23, 28, 29, 36, 37]
# connections[22] = [23, 30, 38]
# connections[23] = [28, 29, 30, 31, 38, 39]
# connections[24] = [25, 26, 27, 32, 33, 34, 35]
# connections[25] = [26, 27, 32, 33]
# connections[26] = [27, 34]
# connections[27] = [32, 33, 34, 35]
# connections[28] = [29, 30, 31, 36, 37, 38, 39]
# connections[29] = [30, 31, 36, 37]
# connections[30] = [31, 38]
# connections[31] = [36, 37, 38, 39]
# connections[32] = [32, 33, 34, 35]
# connections[33] = [34, 35]
# connections[34] = [35]
# connections[35] = [35]
# connections[36] = [36, 37, 38, 39]
# connections[37] = [36, 38, 39]
# connections[38] = [39]
# connections[39] = [39]
return connections
if __name__=='__main__':
main()
| 46.21134 | 139 | 0.62164 | 1,298 | 8,965 | 4.026194 | 0.128659 | 0.065825 | 0.068886 | 0.026789 | 0.497321 | 0.368733 | 0.309606 | 0.250287 | 0.216992 | 0.216992 | 0 | 0.092547 | 0.232236 | 8,965 | 193 | 140 | 46.450777 | 0.666715 | 0.353374 | 0 | 0.14 | 0 | 0 | 0.013082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07 | false | 0 | 0.02 | 0 | 0.15 | 0.06 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a48740f19c411c12d63473995b985326be58c92a | 425 | py | Python | sensors/TemperaturaDHT11.py | tec-csf/reto-iot-en-supermercado-2019-nova-iot-supermarket | 0eb643132478a06477404dcd86c4359869ec7d81 | [
"MIT"
] | 1 | 2019-10-28T14:58:14.000Z | 2019-10-28T14:58:14.000Z | sensors/TemperaturaDHT11.py | tec-csf/reto-iot-en-supermercado-2019-nova-iot-supermarket | 0eb643132478a06477404dcd86c4359869ec7d81 | [
"MIT"
] | null | null | null | sensors/TemperaturaDHT11.py | tec-csf/reto-iot-en-supermercado-2019-nova-iot-supermarket | 0eb643132478a06477404dcd86c4359869ec7d81 | [
"MIT"
] | null | null | null | import Adafruit_DHT
sensor = Adafruit_DHT.DHT11
pin_temp = 3
def temperatura(pin_temp):
temperature = 0
if (temperature <=22):
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin_temp)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!') | 38.636364 | 86 | 0.663529 | 59 | 425 | 4.661017 | 0.559322 | 0.12 | 0.065455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036585 | 0.228235 | 425 | 11 | 87 | 38.636364 | 0.801829 | 0 | 0 | 0 | 0 | 0 | 0.159624 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.181818 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a48881351fb66e53f1af28f4da440b46709db632 | 2,428 | py | Python | N-MOS_transistor_by_Python/I-V_Characteristics_n-MOSFET.py | yasser296/Python-Projects | eae3598e2d4faf08d9def92c8b417c2e7946c5f4 | [
"MIT"
] | null | null | null | N-MOS_transistor_by_Python/I-V_Characteristics_n-MOSFET.py | yasser296/Python-Projects | eae3598e2d4faf08d9def92c8b417c2e7946c5f4 | [
"MIT"
] | null | null | null | N-MOS_transistor_by_Python/I-V_Characteristics_n-MOSFET.py | yasser296/Python-Projects | eae3598e2d4faf08d9def92c8b417c2e7946c5f4 | [
"MIT"
] | null | null | null | from numpy import arange
from matplotlib import pyplot , figure
# Kn = Kn' * W/L 4
Kn=1e-3
# Vth is th threshold voltagee
Vth = 1.5
# Sweep drain to source voltge from 0 to 12V
Vds = arange(0, 12, 0.1).tolist()
Vgs = [4 , 6 , 8 , 10 ]
Id = list() # Drain Current Id (A)
for I in range(1,len(Vgs)+1) :
Id.append([])
print(Id)
print("\n\n\n\n\n")
# To draw the transition line
line_Id = list()
line_Vds = list()
# Estimate length of the Vds & Vgs lists
m = len( Vds )
n = len( Vgs )
# Initialize the I-V characteristic points
for i in range(0,n) :
for j in range(0,m) :
if (Vgs[i] < Vth) :
Id[i].append(0)
elif (Vds[j] >= ( Vgs[i] - Vth )) :
Id[i].append((0.5 * Kn * (Vgs[i] - Vth)**2) * 1000)
elif (Vds[j] < ( Vgs[i] - Vth )) :
Id[i].append((Kn *( (Vgs[i] - Vth)* Vds[j] - 0.5 * (Vds[j]**2) )) * 1000 )
# get the transition line points
if (Vds[j] == ( Vgs[i] - Vth )) :
line_Id.append((0.5 * Kn * (Vgs[i] - Vth)**2) * 1000 )
line_Vds.append(Vds[j])
print(Id)
# Plotting the I-V characteristic of n-MOSFET
figure, axis = pyplot.subplots()
print(axis)
print(figure)
figure.set_size_inches(12, 8)
print(figure)
curves = list()
for i in range(0,len(Vgs)) :
curve, = pyplot.plot(Vds, Id[i] , label="Vgs= %d" %Vgs[i] , linewidth=2)
pyplot.annotate("Vgs= %d" %Vgs[i], (10, max(Id[i])+0.0005) , fontsize=12 )
curves.append(curve)
# Plotting the transition line
line_Vds_2 = [0] + line_Vds
line_Id_2 = [0] + line_Id
tran, = pyplot.plot(line_Vds_2 , line_Id_2 , label="Transition line" , linestyle='--' , marker= 'x' , markersize = 12 )
curves.append(tran)
# Plotting the legends
pyplot.legend (curves, [curve.get_label() for curve in curves] , prop={"size":12})
# or
# #fontsize=16 == prop={"size":16} on legend only
axis.set_xlabel("Drain-source voltage Vds (Volt)" , fontsize=16 )
axis.set_ylabel("Drain Current Id (mA)" , fontsize=16 )
pyplot.grid(linestyle='--')
#pyplot.xaxis.grid (color="g")
#pyplot.yaxis.grid (color="r")
Vds_x_axis_numbers = arange(0,13,0.5).tolist()
axis.set_xticks (Vds_x_axis_numbers)
axis.tick_params ( axis='x' , colors='b')
Id_y_axis_numbers = arange(0,41,1).tolist()
axis.set_yticks (Id_y_axis_numbers)
axis.tick_params ( axis='y' , colors='g')
pyplot.title("I-V Characteristics of a n-MOSFET" ,fontsize=16 )
resolution = 500
pyplot.savefig('I-V_characteristic_dpi=%d' %resolution , dpi=resolution)
pyplot.show()
| 22.90566 | 119 | 0.639209 | 415 | 2,428 | 3.650602 | 0.293976 | 0.023762 | 0.032343 | 0.021782 | 0.129373 | 0.106271 | 0.067987 | 0.056766 | 0.056766 | 0 | 0 | 0.045158 | 0.17916 | 2,428 | 105 | 120 | 23.12381 | 0.715003 | 0.189456 | 0 | 0.072727 | 0 | 0 | 0.083419 | 0.012873 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.036364 | 0 | 0.036364 | 0.109091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a488bfa5aa832da083db4e6b51c66de316b8a1a6 | 7,652 | py | Python | mods/default/client/gui/game_overlays.py | mpbagot/hsc-major-project-code | eaa69bf566b5b34ae7d4aa78504f97576fa2bb1c | [
"MIT"
] | 4 | 2018-04-17T11:55:06.000Z | 2021-02-25T16:03:47.000Z | mods/default/client/gui/game_overlays.py | mpbagot/mata | eaa69bf566b5b34ae7d4aa78504f97576fa2bb1c | [
"MIT"
] | null | null | null | mods/default/client/gui/game_overlays.py | mpbagot/mata | eaa69bf566b5b34ae7d4aa78504f97576fa2bb1c | [
"MIT"
] | null | null | null | """
game_overlays.py
A module containing the GUI overlays of the default client game
"""
# Import the Modding API
from api.gui.gui import *
from api.gui.objects import *
from api.colour import *
from api.packets import SendCommandPacket
# Import stuff from the mod modules
from mods.default.client.gui.extras import *
from mods.default.client.gui.menus import *
class HUD(Overlay):
def __init__(self, game):
super().__init__()
h = self.screen.get_height()
self.buttons = [P2PNoticeButton(game, [944, 540, 60, 60])]
self.game = game
self.bars = [
HorizBar([744, 698, 260, 20], (255, 0, 0), self.game.player.health/100, 'Health'),
HorizBar([744, 728, 260, 20], (0, 102, 255), self.game.player.exp, 'Experience')
]
equippedItems = self.game.player.inventory.getEquipped()
self.itemSlots = [
ItemSlot(game, equippedItems[0], [664, 630], 60),
ItemSlot(game, equippedItems[1], [664, 700], 60)
]
def drawBackgroundLayer(self):
# Update the bar percentages
self.bars[0].percentage = self.game.player.health/100
self.bars[1].percentage = (self.game.player.exp-int(self.game.player.exp**0.5)**2)/(2*int(self.game.player.exp**0.5)+1)
for a in (0, 1):
self.itemSlots[a].setItem(self.game.player.inventory.getEquipped()[a])
# Draw the background rectangle
pygame.draw.rect(self.screen, (173, 144, 106), scaleRect([654, 620, 400, 150], self.screen))
pygame.draw.rect(self.screen, (65, 55, 40), scaleRect([654, 620, 400, 150], self.screen), 4)
def drawForegroundLayer(self, mousePos):
super().drawForegroundLayer(mousePos)
# Generate a font object
font = pygame.font.Font('resources/font/main.ttf', 20)
text = font.render('Username: '+self.game.player.name, True, (255, 255, 255))
self.screen.blit(text, scaleRect([744, 640], self.screen))
# Generate a smaller font object
font = pygame.font.Font('resources/font/main.ttf', 12)
# Calculate and render the player level
playerLevel = int(self.game.player.exp**0.5)+1
text = font.render('Level: '+str(playerLevel), True, (255, 255, 255))
self.screen.blit(text, scaleRect([744, 670], self.screen))
class Pause(Overlay):
def __init__(self, game):
super().__init__()
self.game = game
self.buttons = [
ResumeButton([351, 179, 321, 90]),
OptionsButton([351, 286, 321, 90], "Options"),
MenuButton([351, 393, 321, 90], True),
ExitButton([351, 500, 321, 90], 'Exit to OS')
]
def drawBackgroundLayer(self):
w = self.screen.get_width()
h = self.screen.get_height()
pygame.draw.rect(self.screen, (236, 196, 145), [w//3, h//7, w//3, h//1.55])
pygame.draw.rect(self.screen, (65, 55, 40), [w//3, h//7, w//3, h//1.55], 4)
def drawForegroundLayer(self, mousePos):
super().drawForegroundLayer(mousePos)
w, h = self.screen.get_size()
font = pygame.font.Font('resources/font/main.ttf', 30)
text = font.render('Menu', True, (0, 0, 0))
self.screen.blit(text, [(w-text.get_width())//2, h//7+5])
class Chat(Overlay):
def __init__(self, game, tab='global'):
super().__init__()
self.game = game
self.tab = tab
self.scrollScreen = Scrollbox([804, 438, 110, 90])
self.textarea = TextArea([100, 538, 618, 100], (255, 255, 255, 127))
latest = game.getModInstance('ClientMod').latestChatTabs
if self.tab not in ['local', 'global']+latest:
latest.insert(0, self.tab)
game.getModInstance('ClientMod').latestChatTabs = latest[:3]
self.buttons = [ChatTabButton([720, 540 + 32 * n, 202, 30], name) for n, name in enumerate(latest)]
def drawForegroundLayer(self, mousePos):
hud = self.game.getModInstance('ClientMod').hudOverlay
if self.game.getGUIState() and self.game.getGUIState().isOverlayOpen(hud):
try:
self.game.getGUIState().getOverlay(hud).notifications.delete(self.tab)
except:
pass
# Fetch the messages from the mod instance
messages = self.game.getModInstance('ClientMod').chatMessages.get(self.tab, [])
# Draw the background rectangle
overlayScreen = pygame.Surface(scaleRect([824, 558], self.screen))
overlayScreen.set_alpha(191)
pygame.draw.rect(overlayScreen, (140, 140, 140), scaleRect([0, 0, 824, 558], self.screen))
pygame.draw.rect(overlayScreen, (170, 170, 170), scaleRect([0, 458, 824, 100], self.screen))
self.screen.blit(overlayScreen, scaleRect([100, 80], self.screen))
self.textarea.draw(self.screen, mousePos)
# Draw the outline boxes
pygame.draw.rect(self.screen, (40, 40, 40), scaleRect([100, 538, 824, 100], self.screen), 4)
pygame.draw.rect(self.screen, (40, 40, 40), scaleRect([100, 80, 824, 558], self.screen), 4)
pygame.draw.rect(self.screen, (40, 40, 40), scaleRect([718, 538, 206, 100], self.screen), 4)
# Generate a font object
fontLarge = pygame.font.Font('resources/font/main.ttf', 20)
# Generate a smaller font object
fontSmall = pygame.font.Font('resources/font/main.ttf', 12)
# Draw the title outline box
title = fontLarge.render(self.tab, True, (0, 0, 0))
# Calculate the leftmost position of the text
leftXPos = (self.screen.get_width() - title.get_width())//2
# Calculate all of the points for the box around the title
pointList = [
[leftXPos - 35, 80],
[leftXPos + title.get_width() + 35, 80],
[leftXPos + 5 + title.get_width(), 50],
[leftXPos - 5, 50]
]
# Fill in the title background shape, then draw the outline around it
pygame.draw.polygon(self.screen, (140, 140, 140), pointList)
pygame.draw.lines(self.screen, (40, 40, 40), True, pointList, 4)
# Lastly, draw the channel title at the top
titlePos = [(self.screen.get_width() - title.get_width())//2, scaleVal(52, self.screen)]
self.screen.blit(title, titlePos)
# Blank out the scrollbox
self.scrollScreen.innerScreen.fill(pygame.Color(127, 127, 127, 0))
# Iterate and blit the messages into the scrollbox
messages = [a for a in messages if '\x00' not in a]
for m, message in enumerate(messages):
text = fontSmall.render(message, True, (0, 0, 0))
self.scrollScreen.blit(text, [0, 15*m])
# Then draw the scrollbox onto the main screen
self.scrollScreen.draw(self.screen, mousePos)
super().drawForegroundLayer(mousePos)
def doKeyPress(self, event):
if event.key == pygame.K_RETURN:
# Adjust the message
message = self.textarea.text
# Skip blank messages
if not message:
return
# Format a non-command as required
if message[0] != '/':
message = '/message '+self.tab+' '+message
# Create the packet
# Send the message
packet = SendCommandPacket(message)
self.game.packetPipeline.sendToServer(packet)
self.textarea.text = ''
# Pass the button press to the textarea
self.textarea.doKeyPress(event)
| 40.062827 | 127 | 0.595792 | 958 | 7,652 | 4.719207 | 0.263048 | 0.075205 | 0.030967 | 0.02787 | 0.304357 | 0.210573 | 0.201504 | 0.16921 | 0.076753 | 0.072329 | 0 | 0.080835 | 0.267642 | 7,652 | 190 | 128 | 40.273684 | 0.72591 | 0.122321 | 0 | 0.151261 | 0 | 0 | 0.035463 | 0.017208 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07563 | false | 0.008403 | 0.05042 | 0 | 0.159664 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a488cd89e65f252e4c293f2398293943079200dc | 11,362 | py | Python | JumpscaleLib/tools/docsite/Doc.py | threefoldtech/jumpscale_lib9 | 03c1451133d777e5af106fcc6f75c1138bb997f2 | [
"Apache-2.0"
] | null | null | null | JumpscaleLib/tools/docsite/Doc.py | threefoldtech/jumpscale_lib9 | 03c1451133d777e5af106fcc6f75c1138bb997f2 | [
"Apache-2.0"
] | 220 | 2018-07-29T08:37:17.000Z | 2019-08-05T15:01:27.000Z | JumpscaleLib/tools/docsite/Doc.py | threefoldtech/jumpscale_lib9 | 03c1451133d777e5af106fcc6f75c1138bb997f2 | [
"Apache-2.0"
] | 1 | 2018-08-20T09:16:08.000Z | 2018-08-20T09:16:08.000Z | from .Link import Link
from jumpscale import j
import toml
import copy
JSBASE = j.application.jsbase_get_class()
class Doc(JSBASE):
"""
"""
def __init__(self, path, name, docsite):
JSBASE.__init__(self)
self.path = path
self.docsite = docsite
self.cat = ""
if "/blogs/" in path or "/blog/" in path:
self.cat = "blog"
if "/defs/" in path or "/def/" in path:
self.cat = "def"
self.path_dir = j.sal.fs.getDirName(self.path)
self.path_dir_rel = j.sal.fs.pathRemoveDirPart(self.path_dir, self.docsite.path).strip("/")
self.name = self._clean(name)
if self.name == "":
raise RuntimeError("name cannot be empty")
self.name_original = name
self.path_rel = j.sal.fs.pathRemoveDirPart(path, self.docsite.path).strip("/")
name_dot = "%s/%s" % (self.path_dir_rel, self.name)
self.name_dot_lower = self._clean("%s/%s" % (self.path_dir_rel, self.name))
# self.markdown_source = ""
# self.show = True
self.errors = []
if j.sal.fs.getDirName(self.path).strip("/").split("/")[-1][0] == "_":
# means the subdir starts with _
self.show = False
self._processed = False
self._extension = None
self._data = {} # is all data, from parents as well, also from default data
self._md = None
self._content = None
self._images = []
self._links_external = []
self._links_doc = []
self._links = []
def _clean(self, name):
name = name.replace("/", ".")
name = name.strip(".")
return j.data.text.strip_to_ascii_dense(name)
def _get_file_path_new(self, name="", extension="jpeg"):
nr = 0
if name == "":
name = self.name
dest = "%s/%s.%s" % (self.path_dir, name, extension)
found = j.sal.fs.exists(dest)
while found:
nr += 1
name = "%s_%s" % (name, nr) # to make sure we have a unique name
dest = "%s/%s.%s" % (self.path_dir, name, extension)
fname = "%s.%s" % (name, extension)
found = j.sal.fs.exists(dest) or fname in self.docsite._files
fname = "%s.%s" % (name, extension)
self.docsite._files[fname] = dest
return name, dest
@property
def links(self):
if self._links == []:
self._links_process()
return self._links
@property
def images(self):
if not self._images:
self._links_process()
return self._images
@property
def extension(self):
if not self._extension:
self._extension = j.sal.fs.getFileExtension(self.path)
return self._extension
@property
def title(self):
if "title" in self.data:
return self.data["title"]
else:
self.error_raise("Could not find title in doc.")
def error_raise(self, msg):
return self.docsite.error_raise(msg, doc=self)
def htmlpage_get(self, htmlpage=None):
if htmlpage is None:
htmlpage = j.data.html.page_get()
htmlpage = self.markdown_obj.htmlpage_get(htmlpage=htmlpage, webparts=True)
return htmlpage
def html_get(self, htmlpage=None):
return str(self.htmlpage_get(htmlpage=htmlpage))
@property
def html(self):
return self.html_get()
@property
def data(self):
if self._data == {}:
# look for parts which are data
for part in self.parts_get(cat="data"):
for key, val in part.ddict.items():
print("data update")
if j.data.types.list.check(val):
if key not in self._data:
self._data[key] = []
for subval in val:
if subval not in self._data[key] and subval != "":
self._data[key].append(subval)
else:
self._data[key] = val
# now we have all data from the document itself
keys = [part for part in self.docsite.data_default.keys()]
keys.sort(key=len)
for key in keys:
key = key.strip("/")
if self.path_rel.startswith(key):
data = self.docsite.data_default[key]
self._data_update(data)
print("data process doc")
return self._data
@property
def markdown_obj(self):
if not self._md:
try:
self._md = j.data.markdown.document_get(self.markdown_source)
except Exception as e:
msg = "Could not parse markdown of %s" % self
msg += str(e)
self.error_raise(msg)
self._md = j.data.markdown.document_get(content="```\n%s\n```\n" % msg)
return self._md
def header_get(self, level=1, nr=0):
res = self.markdown_obj.parts_get(cat="header")
if len(res) < 1:
return self.error_raise("header level:%s %s could not be found" % (level, nr))
for header in res:
if header.level == level:
return header
@property
def markdown(self):
"""
markdown after processing of the full doc
"""
self._macros_process()
self._links_process()
try:
res = self.markdown_obj.markdown
except Exception as e:
msg = "Could not parse markdown of %s" % self
msg += str(e)
self.error_raise(msg)
res = msg
if "{{" in res:
# TODO:*1 rendering does not seem to be perfect ok
res = j.tools.jinja2.text_render(text=res, **self.data)
return res
@property
def markdown_source(self):
"""
markdown coming from source
"""
if not self._content:
self._content = j.sal.fs.fileGetContents(self.path)
return self._content
@property
def markdown_clean(self):
# remove the code blocks (comments are already gone)
print('markdown_clean')
from IPython import embed
embed(colors='Linux')
return None
@property
def markdown_clean_summary(self):
c = self.content_clean
lines = c.split("\n")
counter = 0
out = ""
while counter < 20 and counter < len(lines):
line = lines[counter]
counter += 1
if line.strip() == "" and counter > 10:
return out
if len(line) > 0 and line.strip()[0] == "#" and counter > 4:
return out
out += "%s\n" % line
return out
def _data_update(self, data):
res = {}
for key, valUpdate2 in data.items():
# check for the keys not in the self.data yet and add them, the others are done above
if key not in self._data:
self._data[key] = copy.copy(valUpdate2) # needs to be copy.copy otherwise we rewrite source later
def link_get(self, filename=None, cat=None, nr=0, die=True):
"""
@param cat: image, doc,link, officedoc, imagelink #doc is markdown
"""
res = self.links_get(filename=filename, cat=cat)
if len(res) == 0:
if die:
raise RuntimeError("could not find link %s:%s" % (filename, cat))
else:
return None
if nr > len(res):
if die:
raise RuntimeError("could not find link %s:%s at position:%s" % (filename, cat, nr))
else:
return None
return res[nr]
def links_get(self, filename=None, cat=None):
self._links_process()
res = []
for link in self._links:
found = True
if cat is not None and not link.cat == cat:
found = False
if filename is not None and not link.filename.startswith(filename):
found = False
if found:
res.append(link)
return res
def _macros_process(self):
"""
eval the macro
"""
for part in self.parts_get(cat="macro"):
line = part.method
if line.strip() == "":
return self.docsite.error_raise("empty macro cannot be executed", doc=self)
block = part.data
methodcode = line.rstrip(", )") # remove end )
methodcode = methodcode.replace("(", "(self,")
if not methodcode.strip() == line.strip():
# means there are parameters
methodcode += ",content=block)"
else:
methodcode += "(content=block)"
methodcode = methodcode.replace(",,", ",")
if methodcode.strip() == "":
raise RuntimeError("method code cannot be empty")
cmd = "j.tools.docsites.macros." + methodcode
# self.logger.debug(cmd)
# macro = eval(cmd)
try:
macro = eval(cmd) # is the result of the macro which is returned
part.result = macro
except Exception as e:
block = "```python\nERROR IN MACRO*** TODO: *1 ***\ncmd:\n%s\nERROR:\n%s\n```\n" % (cmd, e)
self.logger.error(block)
self.docsite.error_raise(block, doc=self)
part.result = block
def _links_process(self):
"""
results in:
self._images = []
self._links_external = []
self._links_doc =
"""
if not self._links == []:
return
# check links for internal images
# regex = "!+\[.*\] *\([a-zA-Z0-9\.\-\_\ \/\"]+\)" # find all possible images/links
regex = "!*\[.*\] *\(.*\)"
for match in j.data.regex.yieldRegexMatches(regex, self.markdown_source, flags=0):
self.logger.debug("##:file:link:%s" % match)
link = Link(self, match.founditem)
if not link.link_source == "":
self._links.append(link)
# whats this one?
# regex = "src *= *\" */?static"
# for match in j.data.regex.yieldRegexMatches(regex, self.markdown_source, flags=0):
# self._content = self.markdown_source.replace(match.foundpart, "src = \"/")
def part_get(self, text_to_find=None, cat=None, nr=0, die=True):
"""
return part of markdown document e.g. header
@param cat is: table, header, macro, code, comment1line, comment, block, data, image
@param nr is the one you need to have 0 = first one which matches
@param text_to_find looks into the text
"""
return self.markdown_obj.part_get(text_to_find=text_to_find, cat=cat, nr=nr, die=die)
def parts_get(self, text_to_find=None, cat=None):
"""
@param cat is: table, header, macro, code, comment1line, comment, block, data, image
@param text_to_find looks into the text
"""
return self.markdown_obj.parts_get(text_to_find=text_to_find, cat=cat)
def __repr__(self):
return "doc:%s:%s" % (self.name, self.path)
__str__ = __repr__
| 33.417647 | 114 | 0.535557 | 1,378 | 11,362 | 4.285922 | 0.177794 | 0.021673 | 0.008127 | 0.006773 | 0.255842 | 0.219777 | 0.198273 | 0.174738 | 0.142736 | 0.113613 | 0 | 0.004175 | 0.346506 | 11,362 | 339 | 115 | 33.516224 | 0.791246 | 0.135011 | 0 | 0.209205 | 0 | 0.004184 | 0.065052 | 0.005847 | 0 | 0 | 0 | 0.00295 | 0 | 1 | 0.108787 | false | 0 | 0.020921 | 0.016736 | 0.259414 | 0.012552 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a48bcd95f6ff3785768fc221bb5436bed3d1d5bd | 1,937 | py | Python | tally_ho/apps/tally/views/reports/races.py | crononauta/tally-ho | ba2207bfaef27bee3ff13a393983ca493f767238 | [
"Apache-2.0"
] | null | null | null | tally_ho/apps/tally/views/reports/races.py | crononauta/tally-ho | ba2207bfaef27bee3ff13a393983ca493f767238 | [
"Apache-2.0"
] | null | null | null | tally_ho/apps/tally/views/reports/races.py | crononauta/tally-ho | ba2207bfaef27bee3ff13a393983ca493f767238 | [
"Apache-2.0"
] | null | null | null | from django.views.generic import TemplateView
from guardian.mixins import LoginRequiredMixin
from tally_ho.libs.views.exports import valid_ballots
from tally_ho.libs.permissions import groups
from tally_ho.libs.reports import progress as p
from tally_ho.libs.views import mixins
class RacesReportView(LoginRequiredMixin,
mixins.GroupRequiredMixin,
TemplateView):
group_required = groups.SUPER_ADMINISTRATOR
template_name = 'reports/races.html'
def get_per_ballot_progress(self):
data = []
tally_id = self.kwargs.get('tally_id')
archived = p.ArchivedProgressReport(tally_id)
for ballot in valid_ballots(tally_id):
archived_result = archived.for_ballot(ballot)
sc = ballot.sub_constituency
if sc:
data.append({
'ballot': ballot.number,
'district': sc.code,
'race_type': ballot.race_type_name,
'expected': archived_result['denominator'],
'complete': archived_result['number'],
'percentage': archived_result['percentage'],
'id': ballot.id,
'active': ballot.active
})
return data
def get(self, *args, **kwargs):
tally_id = kwargs['tally_id']
per_ballot = self.get_per_ballot_progress()
races = len(per_ballot)
completed = sum([1 for x in per_ballot if isinstance(
x['percentage'], float) and x['percentage'] >= 100])
overview = {
'races': races,
'completed': completed,
'percentage': p.rounded_percent(completed, races)
}
return self.render_to_response(
self.get_context_data(
overview=overview,
per_ballot=per_ballot,
tally_id=tally_id))
| 33.396552 | 64 | 0.587506 | 199 | 1,937 | 5.507538 | 0.39196 | 0.051095 | 0.040146 | 0.054745 | 0.036496 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003053 | 0.323696 | 1,937 | 57 | 65 | 33.982456 | 0.833588 | 0 | 0 | 0 | 0 | 0 | 0.083634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.130435 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a48e2876e063fca41404c9b42cd9234687e02f29 | 1,251 | py | Python | psono/restapi/serializers/share_right_accept.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 48 | 2018-04-19T15:50:58.000Z | 2022-01-23T15:58:11.000Z | psono/restapi/serializers/share_right_accept.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 9 | 2018-09-13T14:56:18.000Z | 2020-01-17T16:44:33.000Z | psono/restapi/serializers/share_right_accept.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 11 | 2019-09-20T11:53:47.000Z | 2021-07-18T22:41:31.000Z | from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
from ..fields import UUIDField
from ..models import User_Share_Right
class ShareRightAcceptSerializer(serializers.Serializer):
share_right_id = UUIDField(required=True)
key = serializers.CharField(max_length=256, required=False)
key_type = serializers.CharField(max_length=256, required=False, default='symmetric')
key_nonce = serializers.CharField(max_length=64, required=False)
def validate(self, attrs: dict) -> dict:
share_right_id = attrs.get('share_right_id')
key_type = attrs.get('key_type')
if key_type not in ['asymmetric', 'symmetric']:
msg = _("Invalid Key Type")
raise exceptions.ValidationError(msg)
try:
user_share_right_obj = User_Share_Right.objects.get(pk=share_right_id, user=self.context['request'].user, accepted=None)
except User_Share_Right.DoesNotExist:
msg = _("You don't have permission to access it or it does not exist or you already accepted or declined this share.")
raise exceptions.ValidationError(msg)
attrs['user_share_right_obj'] = user_share_right_obj
return attrs | 40.354839 | 132 | 0.718625 | 160 | 1,251 | 5.39375 | 0.4625 | 0.115875 | 0.097335 | 0.100811 | 0.17613 | 0.17613 | 0.17613 | 0 | 0 | 0 | 0 | 0.007937 | 0.194245 | 1,251 | 31 | 133 | 40.354839 | 0.848214 | 0 | 0 | 0.090909 | 0 | 0.045455 | 0.159744 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.181818 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a48f32363a4214c8c84b8ccdfb70d7f2134e405c | 3,086 | py | Python | nb_cli/prompts/input.py | cdlaimin/nb-cli | b428a9a24091c072accedbeee56064c6a3cfd15a | [
"MIT"
] | 88 | 2020-10-02T07:16:06.000Z | 2022-03-30T01:24:36.000Z | nb_cli/prompts/input.py | cdlaimin/nb-cli | b428a9a24091c072accedbeee56064c6a3cfd15a | [
"MIT"
] | 13 | 2021-01-28T03:14:35.000Z | 2022-01-15T11:47:21.000Z | nb_cli/prompts/input.py | cdlaimin/nb-cli | b428a9a24091c072accedbeee56064c6a3cfd15a | [
"MIT"
] | 11 | 2021-03-11T15:12:23.000Z | 2022-01-13T10:09:18.000Z | from typing import Callable, Optional
from prompt_toolkit.styles import Style
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.layout import Layout
from prompt_toolkit.lexers import SimpleLexer
from prompt_toolkit.application import get_app
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.validation import Validator
from prompt_toolkit.layout.controls import BufferControl
from prompt_toolkit.formatted_text import AnyFormattedText
from prompt_toolkit.layout.containers import HSplit, Window
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from . import NoAnswer, BasePrompt
class InputPrompt(BasePrompt[str]):
"""Simple Input Prompt.
Style class guide:
```
[?] Choose a choice and return? answer
└┬┘ └──────────────┬──────────┘ └──┬─┘
questionmark question answer
```
"""
def __init__(
self,
question: str,
question_mark: str = "[?]",
validator: Optional[Callable[[str], bool]] = None,
):
self.question: str = question
self.question_mark: str = question_mark
self.validator: Optional[Callable[[str], bool]] = validator
def _reset(self):
self._answered: bool = False
self._buffer: Buffer = Buffer(
name=DEFAULT_BUFFER,
validator=Validator.from_callable(self.validator)
if self.validator
else None,
accept_handler=self._submit,
)
def _build_layout(self) -> Layout:
self._reset()
layout = Layout(
HSplit(
[
Window(
BufferControl(
self._buffer, lexer=SimpleLexer("class:answer")
),
dont_extend_height=True,
get_line_prefix=self._get_prompt,
)
]
)
)
return layout
def _build_style(self, style: Style) -> Style:
default = Style(
[
("questionmark", "fg:#5F819D"),
("question", "bold"),
("answer", "fg:#5F819D"),
]
)
return Style([*default.style_rules, *style.style_rules])
def _build_keybindings(self) -> KeyBindings:
kb = KeyBindings()
@kb.add("enter", eager=True)
def enter(event: KeyPressEvent):
self._buffer.validate_and_handle()
@kb.add("c-c", eager=True)
@kb.add("c-q", eager=True)
def quit(event: KeyPressEvent):
event.app.exit(result=NoAnswer)
return kb
def _get_prompt(
self, line_number: int, wrap_count: int
) -> AnyFormattedText:
return [
("class:questionmark", self.question_mark),
("", " "),
("class:question", self.question.strip()),
("", " "),
]
def _submit(self, buffer: Buffer) -> bool:
self._answered = True
get_app().exit(result=buffer.document.text)
return True
| 29.390476 | 75 | 0.571614 | 304 | 3,086 | 5.736842 | 0.315789 | 0.063073 | 0.107225 | 0.039564 | 0.036697 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00381 | 0.319507 | 3,086 | 104 | 76 | 29.673077 | 0.809524 | 0.053791 | 0 | 0.025 | 0 | 0 | 0.038128 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1125 | false | 0 | 0.1625 | 0.0125 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a492c95951a23587cee545058f4a9aba5d476ad7 | 4,880 | py | Python | IMU/VTK-6.2.0/IO/Geometry/Testing/Python/motor.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 4 | 2016-03-30T14:31:52.000Z | 2019-02-02T05:01:32.000Z | IMU/VTK-6.2.0/IO/Geometry/Testing/Python/motor.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | null | null | null | IMU/VTK-6.2.0/IO/Geometry/Testing/Python/motor.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create cutting planes
planes = vtk.vtkPlanes()
points = vtk.vtkPoints()
norms = vtk.vtkFloatArray()
norms.SetNumberOfComponents(3)
points.InsertPoint(0, 0.0, 0.0, 0.0)
norms.InsertTuple3(0, 0.0, 0.0, 1.0)
points.InsertPoint(1, 0.0, 0.0, 0.0)
norms.InsertTuple3(1, -1.0, 0.0, 0.0)
planes.SetPoints(points)
planes.SetNormals(norms)
# texture
texReader = vtk.vtkStructuredPointsReader()
texReader.SetFileName(VTK_DATA_ROOT + "/Data/texThres2.vtk")
texture = vtk.vtkTexture()
texture.SetInputConnection(texReader.GetOutputPort())
texture.InterpolateOff()
texture.RepeatOff()
# read motor parts...each part colored separately
#
byu = vtk.vtkBYUReader()
byu.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu.SetPartNumber(1)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(byu.GetOutputPort())
tex1 = vtk.vtkImplicitTextureCoords()
tex1.SetInputConnection(normals.GetOutputPort())
tex1.SetRFunction(planes)
# tex1.FlipTextureOn()
byuMapper = vtk.vtkDataSetMapper()
byuMapper.SetInputConnection(tex1.GetOutputPort())
byuActor = vtk.vtkActor()
byuActor.SetMapper(byuMapper)
byuActor.SetTexture(texture)
byuActor.GetProperty().SetColor(GetRGBColor('cold_grey'))
byu2 = vtk.vtkBYUReader()
byu2.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu2.SetPartNumber(2)
normals2 = vtk.vtkPolyDataNormals()
normals2.SetInputConnection(byu2.GetOutputPort())
tex2 = vtk.vtkImplicitTextureCoords()
tex2.SetInputConnection(normals2.GetOutputPort())
tex2.SetRFunction(planes)
# tex2.FlipTextureOn()
byuMapper2 = vtk.vtkDataSetMapper()
byuMapper2.SetInputConnection(tex2.GetOutputPort())
byuActor2 = vtk.vtkActor()
byuActor2.SetMapper(byuMapper2)
byuActor2.SetTexture(texture)
byuActor2.GetProperty().SetColor(GetRGBColor('peacock'))
byu3 = vtk.vtkBYUReader()
byu3.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu3.SetPartNumber(3)
triangle3 = vtk.vtkTriangleFilter()
triangle3.SetInputConnection(byu3.GetOutputPort())
normals3 = vtk.vtkPolyDataNormals()
normals3.SetInputConnection(triangle3.GetOutputPort())
tex3 = vtk.vtkImplicitTextureCoords()
tex3.SetInputConnection(normals3.GetOutputPort())
tex3.SetRFunction(planes)
# tex3.FlipTextureOn()
byuMapper3 = vtk.vtkDataSetMapper()
byuMapper3.SetInputConnection(tex3.GetOutputPort())
byuActor3 = vtk.vtkActor()
byuActor3.SetMapper(byuMapper3)
byuActor3.SetTexture(texture)
byuActor3.GetProperty().SetColor(GetRGBColor('raw_sienna'))
byu4 = vtk.vtkBYUReader()
byu4.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu4.SetPartNumber(4)
normals4 = vtk.vtkPolyDataNormals()
normals4.SetInputConnection(byu4.GetOutputPort())
tex4 = vtk.vtkImplicitTextureCoords()
tex4.SetInputConnection(normals4.GetOutputPort())
tex4.SetRFunction(planes)
# tex4.FlipTextureOn()
byuMapper4 = vtk.vtkDataSetMapper()
byuMapper4.SetInputConnection(tex4.GetOutputPort())
byuActor4 = vtk.vtkActor()
byuActor4.SetMapper(byuMapper4)
byuActor4.SetTexture(texture)
byuActor4.GetProperty().SetColor(GetRGBColor('banana'))
byu5 = vtk.vtkBYUReader()
byu5.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu5.SetPartNumber(5)
normals5 = vtk.vtkPolyDataNormals()
normals5.SetInputConnection(byu5.GetOutputPort())
tex5 = vtk.vtkImplicitTextureCoords()
tex5.SetInputConnection(normals5.GetOutputPort())
tex5.SetRFunction(planes)
# tex5.FlipTextureOn()
byuMapper5 = vtk.vtkDataSetMapper()
byuMapper5.SetInputConnection(tex5.GetOutputPort())
byuActor5 = vtk.vtkActor()
byuActor5.SetMapper(byuMapper5)
byuActor5.SetTexture(texture)
byuActor5.GetProperty().SetColor(GetRGBColor('peach_puff'))
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(byuActor)
ren1.AddActor(byuActor2)
ren1.AddActor(byuActor3)
byuActor3.VisibilityOff()
ren1.AddActor(byuActor4)
ren1.AddActor(byuActor5)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(300, 300)
camera = vtk.vtkCamera()
camera.SetFocalPoint(0.0286334, 0.0362996, 0.0379685)
camera.SetPosition(1.37067, 1.08629, -1.30349)
camera.SetViewAngle(17.673)
camera.SetClippingRange(1, 10)
camera.SetViewUp(-0.376306, -0.5085, -0.774482)
ren1.SetActiveCamera(camera)
# render the image
iren.Initialize()
#iren.Start()
| 28.208092 | 62 | 0.7625 | 522 | 4,880 | 7.095785 | 0.314176 | 0.012959 | 0.015389 | 0.015119 | 0.071544 | 0.070194 | 0.066415 | 0.012419 | 0 | 0 | 0 | 0.050577 | 0.112705 | 4,880 | 172 | 63 | 28.372093 | 0.80485 | 0.084836 | 0 | 0 | 0 | 0 | 0.029647 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008547 | false | 0 | 0.025641 | 0 | 0.042735 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4945675fa5668b6a6e7a48d03c92355e85e8193 | 3,787 | py | Python | Liver_disease/liver_prediction.py | R3DDY97/kaggle_kernels | 8a5a456612bdae712e58188d407714c7cfd04849 | [
"MIT"
] | null | null | null | Liver_disease/liver_prediction.py | R3DDY97/kaggle_kernels | 8a5a456612bdae712e58188d407714c7cfd04849 | [
"MIT"
] | null | null | null | Liver_disease/liver_prediction.py | R3DDY97/kaggle_kernels | 8a5a456612bdae712e58188d407714c7cfd04849 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pandas as pd
# import numpy as np
from sklearn import (svm, preprocessing)
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import (recall_score, precision_score, accuracy_score, confusion_matrix,)
#precision_recall_curve,auc,roc_auc_score,roc_curve,recall_score,classification_report)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
# load and preprocess data
DATA = "/home/reddy/Documents/AI_ML_DL/2_Kaggle/Liver_disease/indian_liver_patient.csv"
def liver_data():
data = pd.read_csv(DATA)
# data.info()
data.head()
data.tail()
data.describe()
# data_bk = data.copy()
# data_nan = data[data.isna().any(axis=1)] # rows having NaN
# nan_rows = list(data_nan.index)
# data_types = data.dtypes
# print(data_types)
# print("Rows having NaN/missing values are {}".format(nan_rows))
# data.groupby("Dataset").size()
# data.groupby("Gender").size()
# max_index = data.iloc[:, 2:-1].idxmax(skipna=True)
# data = data.dropna(axis=0, how='any', inplace=True).replace("Male", 1).replace("Female", 0)
# features = data.columns.tolist()
features = ['Age',
'Gender',
'Total_Bilirubin',
'Direct_Bilirubin',
'Alkaline_Phosphotase',
'Alamine_Aminotransferase',
'Aspartate_Aminotransferase',
'Total_Protiens',
'Albumin',
'Albumin_and_Globulin_Ratio',]
# data.drop([features[i] for i in [3, 5, 8]], axis=1, inplace=True)
data["Gender"] = data["Gender"].map({"Male":1, "Female":0})
data.dropna(axis=0, how='any', inplace=True)
# data.fillna(data['Albumin_and_Globulin_Ratio'].mean(), inplace=True)
data["Dataset"].value_counts()
mldata = data.drop("Dataset", axis=1)
labels = data["Dataset"].map({1:1, 2:0})
# mldata = data.iloc[:, :-1].values
# labels = data.iloc[:, -1].replace(2, 0).values
# mldata = data.iloc[:, [2, 3, 4, 5, 6, 7]].values
# mldata = data.iloc[:, [0, 2, 3, 4, 5, 6, 7, 8, 9]].values #removed gender
# classifier = svm.SVC()
# classifier = RandomForestClassifier(random_state=0)
classifier = LogisticRegression(multi_class='multinomial', solver='newton-cg')
classify_data(classifier, mldata, labels)
def classify_data(classifier, mldata, labels):
#preprocessing data using sk.learn
data_variables = train_test_split(mldata, labels, test_size=0.2, random_state=970)
train_data, test_data, train_label, test_label = data_variables
scaler = preprocessing.StandardScaler().fit(train_data)
train_data_scaled = scaler.transform(train_data)
test_data_scaled = scaler.transform(test_data)
# SVM classifier
# classifier = svm.SVC(random_state=0)
# classifier.fit(train_data, train_label)
# predict_y = classifier.predict(test_data)
# acc_test = classifier.score(test_data, test_label)
# print(acc_test)
# Random Forest classifier
# classifier = RandomForestClassifier(min_samples_split=4)
# classifier = RandomForestClassifier(min_samples_split=4, criterion="entropy")
# classifier = RandomForestClassifier(random_state=0)
classifier.fit(train_data_scaled, train_label)
predict_y = classifier.predict(test_data_scaled)
accuracy = classifier.score(test_data_scaled, test_label)
# accuracy = accuracy_score(test_label, predict_y)
precision = precision_score(test_label, predict_y)
recall = recall_score(test_label, predict_y)
cmatrix = confusion_matrix(test_label, predict_y)
print(accuracy)
print(precision)
print(recall)
print(cmatrix)
if __name__ == '__main__':
liver_data()
| 38.252525 | 97 | 0.686295 | 480 | 3,787 | 5.1875 | 0.3375 | 0.02249 | 0.031325 | 0.027309 | 0.219277 | 0.165462 | 0.08755 | 0.060241 | 0 | 0 | 0 | 0.015873 | 0.184843 | 3,787 | 98 | 98 | 38.642857 | 0.790735 | 0.40375 | 0 | 0 | 0 | 0 | 0.139189 | 0.069369 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.12766 | 0 | 0.170213 | 0.085106 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4951993c951ee5441f92978fa0bae320459a650 | 570 | py | Python | practicalnlp/settings.py | paulomann/practical-nlp-pytorch | 7c6b3612599a4d74bf8d1acdd8a8bd25446b526b | [
"MIT"
] | null | null | null | practicalnlp/settings.py | paulomann/practical-nlp-pytorch | 7c6b3612599a4d74bf8d1acdd8a8bd25446b526b | [
"MIT"
] | null | null | null | practicalnlp/settings.py | paulomann/practical-nlp-pytorch | 7c6b3612599a4d74bf8d1acdd8a8bd25446b526b | [
"MIT"
] | 1 | 2019-09-24T17:13:35.000Z | 2019-09-24T17:13:35.000Z | from os.path import dirname, join
ROOT = dirname(dirname(__file__))
DATA = join(ROOT, 'data')
TRAIN_DATA = join(DATA, 'sst2', 'stsa.binary.phrases.train')
VALIDATION_DATA = join(DATA, 'sst2', 'stsa.binary.dev')
TEST_DATA = join(DATA, 'sst2', 'stsa.binary.test')
PRETRAINED_EMBEDDINGS_FILE = join(DATA, 'GoogleNews-vectors-negative300.bin')
CHECKPOINT_PATH = join(ROOT, "models")
WIKI_TEST_DATA = join(DATA, "wikitext-2", "wiki.test.tokens")
WIKI_VALID_DATA = join(DATA, "wikitext-2", "wiki.valid.tokens")
WIKI_TRAIN_DATA = join(DATA, "wikitext-2", "wiki.train.tokens") | 43.846154 | 77 | 0.738596 | 83 | 570 | 4.879518 | 0.361446 | 0.138272 | 0.177778 | 0.118519 | 0.377778 | 0.377778 | 0 | 0 | 0 | 0 | 0 | 0.017341 | 0.089474 | 570 | 13 | 78 | 43.846154 | 0.763006 | 0 | 0 | 0 | 0 | 0 | 0.336252 | 0.103328 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a495964d82d25e210cda079c174cec9fcd420d1c | 2,447 | py | Python | Tools/extract-sfc.py | Navasnaz/mib2-toolbox | 732f859d0dbb94dcf5c0d8388c959b7389a4c4f0 | [
"MIT"
] | 339 | 2019-09-18T21:46:50.000Z | 2022-03-31T07:50:04.000Z | Tools/extract-sfc.py | Navasnaz/mib2-toolbox | 732f859d0dbb94dcf5c0d8388c959b7389a4c4f0 | [
"MIT"
] | 188 | 2019-09-19T23:09:49.000Z | 2022-03-30T20:21:34.000Z | Tools/extract-sfc.py | Navasnaz/mib2-toolbox | 732f859d0dbb94dcf5c0d8388c959b7389a4c4f0 | [
"MIT"
] | 115 | 2019-09-19T19:49:15.000Z | 2022-03-12T21:10:00.000Z | # ----------------------------------------------------------
# --- Quick 'n' dirty CFF file extractor
#
# File: extract-sfc.py
# Author: Jille
# Revision: 1
# Purpose: MIB2 sfc file exporter
# Comments: Usage: extract-sfc.py <filename> <outdir>
# Changelog: First version
# ----------------------------------------------------------
import struct
import sys
import os
import zlib
if sys.version_info[0] < 3:
sys.exit("You need to run this with Python 3")
try:
from PIL import Image
except ImportError:
sys.exit(" You are missing the PIL module!\n"
" install it by running: \n"
" pip install image")
if len(sys.argv) != 3:
print("usage: extract-sfc.py <filename> <outdir>")
sys.exit(1)
out_dir = sys.argv[2]
if not os.path.exists(out_dir):
os.mkdir(out_dir)
def mkdir_path(path):
if not os.access(path, os.F_OK):
os.mkdir(path)
if not os.path.exists(sys.argv[1]):
print("%s not found" % (sys.argv[1]))
exit(1)
data = open(sys.argv[1], 'rb').read() # Open File with path in sys.argv[1] in mode 'r' reading and 'b' binary mode
offset = 0
counterRGBA = 0
counterL = 0
counterP = 0
offset = 16
(num_files,) = struct.unpack_from('<I', data, offset)
print("Number of files: \t %d" % (num_files))
offset = offset + 4 # offset 20
i = 0
offset_array = []
size_array = []
# go through the entire table of contents to get all paths and offsets
print("id \t offset \t unknown\t size")
while (i < num_files):
(id, unknown1, start_offset, size) = struct.unpack_from('<IIII', data, offset)
offset_array.append(start_offset)
size_array.append(size)
# go on to the next offset
offset = offset + 16
#print("%d %10x %10x %10s " % (i, start_offset, unknown1, size))
i = i + 1
j = 0
print("Extracting files...")
while (j < num_files):
offset = offset_array[j]
size = size_array[j]
file_data = data[offset:offset + size]
file_header = data[offset:offset + 4]
if file_header == b'\x89PNG':
extension = ".png"
else:
extension = ".bin"
# create path
folder = out_dir + "\\"
if not os.path.exists(folder):
os.makedirs(folder)
file = folder + "\\file_" + str(j) + extension
print("Extracting", file)
output_file = open(file, "wb+")
# read data at offset
output_file.write(file_data)
output_file.close()
j = j + 1
print("Done")
| 23.304762 | 115 | 0.591336 | 351 | 2,447 | 4.037037 | 0.390313 | 0.05928 | 0.01976 | 0.023289 | 0.079746 | 0.043754 | 0 | 0 | 0 | 0 | 0 | 0.02069 | 0.229669 | 2,447 | 104 | 116 | 23.528846 | 0.731034 | 0.251328 | 0 | 0 | 0 | 0 | 0.16097 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015625 | false | 0 | 0.09375 | 0 | 0.109375 | 0.109375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4991ee5bb3a8049313bf554b77dbf8520f3ded7 | 2,374 | py | Python | actions/lib/base.py | StackStorm-Exchange/powerdns | 13879e0e66b29a466d82c1077a1d4abde69c0d3e | [
"Apache-2.0"
] | null | null | null | actions/lib/base.py | StackStorm-Exchange/powerdns | 13879e0e66b29a466d82c1077a1d4abde69c0d3e | [
"Apache-2.0"
] | null | null | null | actions/lib/base.py | StackStorm-Exchange/powerdns | 13879e0e66b29a466d82c1077a1d4abde69c0d3e | [
"Apache-2.0"
] | 1 | 2021-12-01T14:49:27.000Z | 2021-12-01T14:49:27.000Z | # coding=utf-8
from st2common import log as logging
from st2common.runners.base_action import Action
from powerdns.exceptions import PDNSCanonicalError, PDNSError
import powerdns
__all__ = ["PowerDNSClient"]
LOG = logging.getLogger(__name__)
class PowerDNSClientError(Exception):
def __init__(self, message):
self.message = message
class PowerDNSClient(Action):
def __init__(self, config, timeout=5):
super(PowerDNSClient, self).__init__(config)
self.timeout = timeout
self.api_key = config.get("api_key")
self.api_url = config.get("api_url")
def _init_powerdns(self):
self.api_client = powerdns.PDNSApiClient(
api_endpoint=self.api_url,
api_key=self.api_key,
timeout=self.timeout
)
self._api = powerdns.PDNSEndpoint(self.api_client)
def _run(self, *args, **kwargs):
raise NotImplementedError
def _select_server_id(self, server_id):
for server in self._api.servers:
if str(server) == server_id:
self.api = server
return
raise PowerDNSClientError("Server not found")
def _select_zone(self, zone_name):
self.api = self.api.get_zone(zone_name)
if not self.api:
raise PowerDNSClientError("Zone not found")
def run(self, server_id, *args, **kwargs):
try:
self.timeout = kwargs.get("response_timeout")
del kwargs["response_timeout"]
except KeyError:
pass
self._init_powerdns()
# remove server_id from args
try:
args = list(args)
args.pop(args.index(server_id))
except ValueError:
pass
rrset = {}
_cpy = kwargs.copy()
for arg, value in _cpy.items():
if arg.startswith("rrset_"):
rrset[arg.split("_")[1]] = value
kwargs.pop(arg)
if rrset and any(rrset.values()):
kwargs["rrsets"] = [powerdns.interface.RRSet(**rrset)]
try:
self._select_server_id(server_id)
if "zone_name" in kwargs:
self._select_zone(kwargs.pop("zone_name"))
return (True, self._run(*args, **kwargs))
except (PowerDNSClientError, PDNSError, PDNSCanonicalError) as e:
return (False, e)
| 28.95122 | 73 | 0.603201 | 267 | 2,374 | 5.116105 | 0.314607 | 0.061493 | 0.016105 | 0.019034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00299 | 0.295703 | 2,374 | 81 | 74 | 29.308642 | 0.813995 | 0.016428 | 0 | 0.081967 | 0 | 0 | 0.051887 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114754 | false | 0.032787 | 0.065574 | 0 | 0.262295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |