hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80eb9de023cd6936eabda295cdecf52af930ed93 | 3,836 | py | Python | App/Graphic/app.py | Joey-Boivin/newegg-tracker | a0ed9337903d0b0a32ddb5713e79313eaf972e28 | [
"MIT"
] | null | null | null | App/Graphic/app.py | Joey-Boivin/newegg-tracker | a0ed9337903d0b0a32ddb5713e79313eaf972e28 | [
"MIT"
] | null | null | null | App/Graphic/app.py | Joey-Boivin/newegg-tracker | a0ed9337903d0b0a32ddb5713e79313eaf972e28 | [
"MIT"
] | null | null | null | """
This is the module containing the graphical user interface for
my Newegg tracker application
"""
from tkinter import *
from tkinter import ttk
import webbrowser
from PIL import ImageTk, Image #Tkinter's image management is outdated
root = Tk()
root.config(bg="#2D2D2D")
root.title("Newegg tracker by Joey-Boivin on GitHub")
root.geometry("1050x900")
main_frame = Frame(root)
main_frame.pack(fill=BOTH, expand=1)
my_canvas = Canvas(main_frame)
my_canvas.pack(side=LEFT, fill=BOTH, expand=1)
my_scrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=my_canvas.yview)
my_scrollbar.pack(side=RIGHT, fill=Y)
my_canvas.configure(bg='#2D2D2D', yscrollcommand=my_scrollbar.set)
my_canvas.bind('<Configure>', lambda e: my_canvas.configure(scrollregion=my_canvas.bbox('all')))
second_frame = Frame(my_canvas)
second_frame.config(bg='#2D2D2D')
my_canvas.create_window((0,0), window=second_frame, anchor='nw')
class Application:
"""
This is the class containing the graphical user interface.
"""
def __init__(self, data:dict):
self.data = data
icons, price_widgets, name_widgets, meta_widgets, button_widgets = self.create_widgets()
self.show_widgets(icons, price_widgets, name_widgets, meta_widgets, button_widgets)
def create_widgets(self):
"""
Creates all the widgets for the gui, including icons, name labels,
metadata about the items, and a "show on Newegg" button.
"""
icons = []
price_widgets = []
name_widgets = []
meta_widgets = []
newegg_button_widgets = []
for tag, _data in self.data['items'].items():
path = f'./Graphic/Images/{_data["img-token"]}.png'
img = ImageTk.PhotoImage(Image.open(path).resize((100,100)))
icons.append(img)
price = list(_data['history'].values())[-1] #last value
price_widget = Label(second_frame, text=price, bg='#2D2D2D', fg='white')
price_widgets.append(price_widget)
metadata = _data['metadata']
display = ""
if metadata:
for key, value in metadata.items():
display += str(key) + ': ' + str(value)
if len(metadata.items()) > 1:
display += '\n'
display = Label(second_frame, text=display, bg='#2D2D2D', fg='white')
meta_widgets.append(display)
name = _data['product-name']
name_widget = Label(second_frame, text=name, bg='#2D2D2D', fg='white')
name_widgets.append(name_widget)
newegg_button = Button(second_frame, text='See on Newegg.ca', bg='Black', fg='white', command=lambda tag=tag: self.show_on_newegg(tag))
newegg_button_widgets.append(newegg_button)
return icons, price_widgets, name_widgets, meta_widgets, newegg_button_widgets
def show_widgets(
self, icons:list, price_widgets:list,
name_widgets:list, meta_widgets:list, button_widgets:list
):
"""
Shows the widgets for the gui
"""
for i in range(int(self.data['number-of-items'])):
panel = Label(second_frame, image=icons[i])
panel.grid(row=i, column=0, padx = '50', pady='10')
name_widgets[i].grid(row=i, column=1, padx = '50', pady='10')
price_widgets[i].grid(row=i,column=2, padx = '50', pady='10')
meta_widgets[i].grid(row=i,column=3, padx = '50', pady='10')
button_widgets[i].grid(row=i, column=4, padx = '40', pady='10')
root.mainloop()
@staticmethod
def show_on_newegg(tag:str):
"""
Opens a new tab on Newegg.ca the tracked item.
"""
webbrowser.open_new(f'www.newegg.ca/{tag}')
| 33.649123 | 147 | 0.62122 | 494 | 3,836 | 4.661943 | 0.307692 | 0.031264 | 0.017369 | 0.030395 | 0.217977 | 0.133739 | 0.095528 | 0.095528 | 0.095528 | 0.050369 | 0 | 0.02152 | 0.248957 | 3,836 | 113 | 148 | 33.946903 | 0.777855 | 0.104536 | 0 | 0 | 0 | 0 | 0.083083 | 0.012298 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.059701 | 0 | 0.149254 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ec147109819e66e84d7d1ec4c6fd44c7807724 | 1,344 | py | Python | MultiLevelForecast/TwoDayAhead/SeasonalPredict.py | AmateurZhang/EnjoyWithDataOnPowerSystems | 64227d0505012d2b5650874c65268e85d9751a17 | [
"MIT"
] | null | null | null | MultiLevelForecast/TwoDayAhead/SeasonalPredict.py | AmateurZhang/EnjoyWithDataOnPowerSystems | 64227d0505012d2b5650874c65268e85d9751a17 | [
"MIT"
] | null | null | null | MultiLevelForecast/TwoDayAhead/SeasonalPredict.py | AmateurZhang/EnjoyWithDataOnPowerSystems | 64227d0505012d2b5650874c65268e85d9751a17 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 21:31:02 2017
@author: thuzhang
"""
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
File='DataBase/SeasonalPredict.csv'
OriginData=pd.read_table(File,sep=",",index_col=False)
Data=OriginData.as_matrix().astype(np.float32)
_LengthOfFile=1095
#Get the properties and the target
Y=Data[:,1]
X=Data[:,2:]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
method=RandomForestRegressor()
method.fit(X_train,y_train.ravel())
Y_Predict=method.predict(X_test)
Y_Error=Y_Predict-y_test
_Result=np.c_[Y_Predict,y_test,Y_Error]
df = pd.DataFrame(data=_Result)
df.to_csv('RandomForestRegressorSeasonal.csv')
_Expection=np.mean(Y_Error)
print('Exception:%d'%_Expection)
print(1-np.sqrt(np.mean((Y_Predict- y_test)**2))/np.mean(y_test))
print(np.sqrt(np.mean((Y_Predict- y_test)**2))/np.mean(y_test))
plt.figure(figsize=(18,8))
plt.plot(y_test,'green',label='Test')
plt.plot(Y_Predict, 'blue',label='Predict')
plt.plot(Y_Error,'orange',label='Error')
plt.legend(loc='best')
plt.show() | 24.888889 | 72 | 0.770833 | 218 | 1,344 | 4.56422 | 0.43578 | 0.045226 | 0.035176 | 0.052261 | 0.074372 | 0.074372 | 0.074372 | 0.074372 | 0.074372 | 0.074372 | 0 | 0.022727 | 0.083333 | 1,344 | 54 | 73 | 24.888889 | 0.784903 | 0.081845 | 0 | 0 | 0 | 0 | 0.088907 | 0.049755 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ed64fdce9e1b564ee1842e39bc09c80184b169 | 3,072 | py | Python | goto_cloud/migration_commander/tests/test_filesystem_mounting.py | jdepoix/goto_cloud | 59bb9923026e1b1dc6e8e08fb6b21300c8e8854a | [
"MIT"
] | 2 | 2018-02-04T23:22:17.000Z | 2019-04-15T12:06:04.000Z | goto_cloud/migration_commander/tests/test_filesystem_mounting.py | jdepoix/goto_cloud | 59bb9923026e1b1dc6e8e08fb6b21300c8e8854a | [
"MIT"
] | null | null | null | goto_cloud/migration_commander/tests/test_filesystem_mounting.py | jdepoix/goto_cloud | 59bb9923026e1b1dc6e8e08fb6b21300c8e8854a | [
"MIT"
] | null | null | null | from unittest.mock import patch, Mock
from remote_host_event_logging.public import RemoteHostEventLogger
from ..filesystem_mounting import FilesystemMountCommand
from ..device_identification import DeviceIdentificationCommand
from .utils import MigrationCommanderTestCase
class TestFilesystemMountCommand(MigrationCommanderTestCase):
def test_execute__mount_applied(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn('sudo mount -a', self.executed_commands)
def test_execute__fstab_edited(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=549c8755-2757-446e-8c78-f76b50491f21\t'
+ DeviceIdentificationCommand._map_mountpoint('/')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=53ad2170-488d-481a-a6ab-5ce0e538f247\t'
+ DeviceIdentificationCommand._map_mountpoint('/mnt/vdc1')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=bcab224c-8407-4783-8cea-f9ea4be3fabf\t'
+ DeviceIdentificationCommand._map_mountpoint('/mnt/vdc2')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
def test_execute__mount_dirs_created(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/'),
self.executed_commands
)
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/mnt/vdc1'),
self.executed_commands
)
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/mnt/vdc2'),
self.executed_commands
)
@patch(
'migration_commander.remote_file_edit.RemoteFileEditor.append',
Mock(side_effect=Exception())
)
def test_execute__failed(self):
self._init_test_data('ubuntu16', 'target__device_identification')
with RemoteHostEventLogger.DisableLoggingContextManager():
with self.assertRaises(FilesystemMountCommand.MountingException):
FilesystemMountCommand(self.source).execute()
def test_execute__with_swap(self):
self._init_test_data('ubuntu12', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
| 34.909091 | 88 | 0.624349 | 268 | 3,072 | 6.865672 | 0.335821 | 0.045652 | 0.06087 | 0.043478 | 0.568478 | 0.501087 | 0.475 | 0.439674 | 0.407065 | 0.352717 | 0 | 0.038031 | 0.272461 | 3,072 | 87 | 89 | 35.310345 | 0.785235 | 0 | 0 | 0.471429 | 0 | 0 | 0.215495 | 0.130208 | 0 | 0 | 0 | 0 | 0.114286 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.157143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80f056ed61bb17bd4239371c1a53a38aef1a13a6 | 7,416 | py | Python | curvipy/graphing_calculator.py | dylannalex/curvipy | 04a03ae270051503cdb583ce0545152eb555a01f | [
"MIT"
] | 27 | 2022-02-22T03:09:22.000Z | 2022-03-20T21:12:52.000Z | curvipy/graphing_calculator.py | dylannalex/curvipy | 04a03ae270051503cdb583ce0545152eb555a01f | [
"MIT"
] | null | null | null | curvipy/graphing_calculator.py | dylannalex/curvipy | 04a03ae270051503cdb583ce0545152eb555a01f | [
"MIT"
] | null | null | null | from typing import Any, Callable
from math import sqrt, sin, acos, cos, pi
import turtle
def _is_int(result: Any) -> bool:
try:
int(result)
return True
except Exception:
return False
def _is_float(result: Any) -> bool:
try:
float(result)
return True
except Exception:
return False
class GraphingCalculator(turtle.Turtle):
def __init__(
self,
background_color: str = "white",
curve_color: str = "black",
curve_width: int = 4,
vector_color: str = "green",
vector_width: int = 3,
vector_head_size: int = 10,
show_axis: bool = True,
axis_color: str = "grey",
axis_width: int = 2,
) -> None:
"""
:param background_color: color name or hex code
:param curve_color: color name or hex code
:param curve_width: integer value that specifies curve width
:param show_axis: draws axis if true
:param axis_color: color name or hex code
:param axis_width: axis value that specifies curve width
"""
turtle.Turtle.__init__(self)
self.shapesize(0.1, 0.1, 0.1)
self.shape("square")
self.speed("fastest")
turtle.bgcolor(background_color)
# Screen attributes
self.background_color = background_color
# Curves attributes
self.curve_color = curve_color
self.curve_width = curve_width
# Vectors attributes
self.vector_color = vector_color
self.vector_width = vector_width
self.vector_head_size = vector_head_size
# Axis attributes
self.axis_color = axis_color
self.show_axis = show_axis
self.axis_width = axis_width
if self.show_axis:
self._draw_axis()
def draw_vector(
self,
vector: tuple[float, float],
x_axis_scale: int = 10,
y_axis_scale: int = 10,
) -> None:
# Check if vector is the cero vector (v = <0, 0>)
vector_norm = sqrt(vector[0] ** 2 + vector[1] ** 2)
if vector_norm == 0:
return
# Graphing calculator setup
self.color(self.vector_color)
self.width(self.vector_width)
self._goto_without_drawing((0, 0))
# Draw vector
scaled_vector = (vector[0] * x_axis_scale, vector[1] * y_axis_scale)
self.goto(scaled_vector)
# Draw vector head
vector_angle = acos(vector[0] / vector_norm)
if vector[1] < 0:
vector_angle *= -1
left_head_vector = (
self.vector_head_size * cos(vector_angle + pi * 5 / 4),
self.vector_head_size * sin(vector_angle + pi * 5 / 4),
)
left_head_endpoint = (
scaled_vector[0] + left_head_vector[0],
scaled_vector[1] + left_head_vector[1],
)
self.goto(left_head_endpoint)
right_head_vector = (
self.vector_head_size * cos(vector_angle - pi * 5 / 4),
self.vector_head_size * sin(vector_angle - pi * 5 / 4),
)
right_head_endpoint = (
scaled_vector[0] + right_head_vector[0],
scaled_vector[1] + right_head_vector[1],
)
self._goto_without_drawing(scaled_vector)
self.goto(right_head_endpoint)
def draw_curve(
self,
curve: Callable,
domain_interval: tuple[int, int],
x_axis_scale: int = 10,
y_axis_scale: int = 10,
) -> None:
"""
Determines if a given curve is a function or a parametrized curve and
draws it.
:param curve: parametrized curve or function to draw
:param domain_interval: curve domain interval
:param x_axis_scale: x axis scaling factor
:param y_axis_scale: y axis scaling factor
"""
curve_evaluation = curve(domain_interval[0])
if _is_int(curve_evaluation) or _is_float(curve_evaluation):
self._draw_function(curve, domain_interval, x_axis_scale, y_axis_scale)
elif len(curve_evaluation) == 2:
self._draw_parametrized_function(
curve, domain_interval, x_axis_scale, y_axis_scale
)
else:
raise ValueError("'curve' should be a function or parametrized curve")
def draw_animated_curve(
self,
parametrized_function: Callable,
domain_interval: tuple[int, int],
vector_frequency: int = 2,
x_axis_scale: int = 10,
y_axis_scale: int = 10,
) -> None:
"""
Given a parametrized function f(t) = <x(t), y(t)>, draws a the
set of vectors {<x(t), y(t)> | t e domain_interval} and then
draws f(t) graph.
:param parametrized_function: curve to draw
:param domain_interval: curve domain interval
:param vector_frequency: the frequency which vectors will be
drawn. The lower frequency the more vectors
:param x_axis_scale: x axis scaling factor
:param y_axis_scale: y axis scaling factor
"""
for t in range(domain_interval[0], domain_interval[1] + 1, vector_frequency):
f_vector = parametrized_function(t)
self.draw_vector(f_vector, x_axis_scale, y_axis_scale)
self._draw_parametrized_function(
parametrized_function, domain_interval, x_axis_scale, y_axis_scale
)
def _draw_function(
self,
f: Callable,
domain_interval: tuple[int, int],
x_axis_scale: int = 10,
y_axis_scale: int = 10,
) -> None:
self.color(self.curve_color)
self.width(self.curve_width)
for x in range(domain_interval[0], domain_interval[1] + 1, 1):
f_point = (
x_axis_scale * x,
y_axis_scale * f(x),
)
if x == domain_interval[0]:
self._goto_without_drawing(f_point)
else:
self.goto(f_point)
def _draw_parametrized_function(
self,
parametrized_function: Callable,
domain_interval: tuple[int, int],
x_axis_scale: int = 10,
y_axis_scale: int = 10,
) -> None:
self.color(self.curve_color)
self.width(self.curve_width)
for t in range(domain_interval[0], domain_interval[1] + 1, 1):
f_vector = parametrized_function(t)
scaled_f_vector = (
x_axis_scale * f_vector[0],
y_axis_scale * f_vector[1],
)
if t == domain_interval[0]:
self._goto_without_drawing(scaled_f_vector)
else:
self.goto(scaled_f_vector)
def _goto_without_drawing(self, position: tuple[int, int, int]) -> None:
self.up()
self.goto(position)
self.down()
def _draw_axis(self) -> None:
self.width(self.axis_width)
self.color(self.axis_color)
w, h = turtle.screensize()
# y axis
self._goto_without_drawing((0, -h * 2))
self.goto(0, h * 2)
# x axis
self._goto_without_drawing((-w * 2, 0))
self.goto((w * 2, 0))
def clear(self) -> None:
"""
Clears the graphic calculator screen.
"""
turtle.clearscreen()
turtle.bgcolor(self.background_color)
if self.show_axis:
self._draw_axis()
| 31.159664 | 85 | 0.58603 | 928 | 7,416 | 4.408405 | 0.150862 | 0.061599 | 0.034221 | 0.034221 | 0.436324 | 0.356881 | 0.340259 | 0.282083 | 0.257639 | 0.215106 | 0 | 0.017554 | 0.324029 | 7,416 | 237 | 86 | 31.291139 | 0.798524 | 0.162352 | 0 | 0.310976 | 0 | 0 | 0.013745 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067073 | false | 0 | 0.018293 | 0 | 0.121951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80f09cadf4fef85309af1bc1f2bfff6d59c73d5d | 6,820 | py | Python | hybridq/noise/channel/utils.py | jsmarsha11/hybridq-nasa | 42f2998a059e5615dce6ccdbf7ae6dc4954bbce9 | [
"Apache-2.0"
] | null | null | null | hybridq/noise/channel/utils.py | jsmarsha11/hybridq-nasa | 42f2998a059e5615dce6ccdbf7ae6dc4954bbce9 | [
"Apache-2.0"
] | null | null | null | hybridq/noise/channel/utils.py | jsmarsha11/hybridq-nasa | 42f2998a059e5615dce6ccdbf7ae6dc4954bbce9 | [
"Apache-2.0"
] | null | null | null | """
Authors: Salvatore Mandra (salvatore.mandra@nasa.gov),
Jeffrey Marshall (jeffrey.s.marshall@nasa.gov)
Copyright © 2021, United States Government, as represented by the Administrator
of the National Aeronautics and Space Administration. All rights reserved.
The HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed under
the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import annotations
import numpy as np
def is_dm(rho: np.ndarray, atol=1e-6) -> bool:
"""
check if the given input a valid density matrix.
"""
rho = np.asarray(rho)
d = int(np.sqrt(np.prod(rho.shape)))
rho_full = np.reshape(rho, (d, d))
hc = np.allclose(rho_full, rho_full.T.conj(), atol=atol)
tp = np.isclose(np.trace(rho_full), 1, atol=atol)
apprx_gtr = lambda y, x: np.real(y) >= x or np.isclose(y, x, atol=atol)
ev = np.linalg.eigvals(rho_full)
psd = np.all([apprx_gtr(e, 0) for e in ev])
return (hc and tp and psd)
def ptrace(state: np.ndarray,
keep: {int, list[int]},
dims: {int, list[int]} = None) -> np.ndarray:
"""
compute the partial trace of a pure state (vector) or density matrix.
state: np.array
One dimensional for pure state e.g. np.array([1,0,0,0])
or two dimensional for density matrix e.g. np.array([[1,0],[0,0]])
keep: list of int
the qubits we want to keep (all others traced out).
Can also specify a single int if only keeping one qubit.
dims: list of int, optional
List of qudit dimensions respecting the ordering of `state`.
Number of qubits is `len(dims)`, and full Hilbert space
dimension is `product(dims)`.
If unspecified, assumes 2 for all.
Returns the density matrix of the remaining qubits.
"""
state = np.asarray(state)
if len(state.shape) not in (1, 2):
raise ValueError('should be pure state (one dimensional) '
'or density matrix (two dimensional). '
f'Received dimension {len(state.shape)}')
# pure state or not
pure = len(state.shape) == 1
if not pure and state.shape[0] != state.shape[1]:
raise ValueError('invalid state input.')
full_dim = np.prod(state.shape[0])
if dims is not None and full_dim != np.prod(dims):
raise ValueError('specified dimensions inconsistent with state')
n_qubits = np.log2(full_dim) if dims is None else len(dims)
if np.isclose(n_qubits, round(n_qubits)):
n_qubits = int(round(n_qubits))
else:
raise ValueError('invalid state size')
keep = [keep] if isinstance(keep, int) else list(keep)
if not np.all([q in range(n_qubits)
for q in keep]) or len(keep) >= n_qubits:
raise ValueError('invalid axes')
if dims is None:
dims = [2] * n_qubits
# dimensions of qubits we keep
final_dims = [dims[i] for i in keep]
final_dim = np.prod(final_dims)
# dimensions to trace out
drop_dim = int(round(full_dim / final_dim))
if pure:
state = state.reshape(dims)
perm = keep + [q for q in range(n_qubits) if q not in keep]
state = np.transpose(state, perm).reshape(final_dim, drop_dim)
return np.einsum('ij,kj->ik', state, state.conj())
else:
# now we have to redefine things in case of a density matrix
# basically we double the sizes
density_dims = dims + dims
keep += [q + n_qubits for q in keep]
perm = keep + [q for q in range(2 * n_qubits) if q not in keep]
state = state.reshape(density_dims)
state = np.transpose(state, perm)
state = state.reshape((final_dim, final_dim, drop_dim, drop_dim))
return np.einsum('ijkk->ij', state)
def is_channel(channel: SuperGate,
atol=1e-8,
order: tuple[any, ...] = None,
**kwargs) -> bool:
"""
Checks using the Choi matrix whether or not `channel` defines
a valid quantum channel.
That is, we check it is a valid CPTP map.
Parameters
----------
channel: MatrixSuperGate or KrausSuperGate
Must have the method 'map()'.
atol: float, optional
absolute tolerance to use for determining channel is CPTP.
order: tuple[any, ...], optional
If provided, Kraus' map is ordered accordingly to `order`.
See `MatrixChannel.map()`
kwargs: kwargs for `MatrixChannel.map()`
"""
C = choi_matrix(channel, order, **kwargs)
dim = _channel_dim(channel)
# trace preserving
tp = np.isclose(C.trace(), dim, atol=atol)
# hermiticity preserving
hp = np.allclose(C, C.conj().T, atol=atol)
# completely positive
apprx_gtr = lambda e, x: np.real(e) >= x or np.isclose(e, x, atol=atol)
cp = np.all([
apprx_gtr(e, 0) and np.isclose(np.imag(e), 0, atol=atol)
for e in np.linalg.eigvals(C)
])
return tp and hp and cp
def choi_matrix(channel: SuperGate,
order: tuple[any, ...] = None,
**kwargs) -> np.ndarray:
"""
return the Choi matrix for channel, of shape (d**2, d**2)
for a d-dimensional Hilbert space.
The channel can be applied as:
Lambda(rho) = Tr_0[ (I \otimes rho^T) C]
where C is the Choi matrix.
Parameters
----------
channel: MatrixSuperGate or KrausSuperGate
Must have the method 'map()'.
order: tuple[any, ...], optional
If provided, Kraus' map is ordered accordingly to `order`.
See `MatrixChannel.map()`
kwargs: kwargs for `MatrixChannel.map()`
"""
if not hasattr(channel, 'map'):
raise ValueError("'channel' must have method 'map()'")
op = channel.map(order, **kwargs)
d = _channel_dim(channel)
C = np.zeros((d**2, d**2), dtype=complex)
for ij in range(d**2):
Eij = np.zeros(d**2)
Eij[ij] = 1
map = op @ Eij # using vectorization
C += np.kron(Eij.reshape((d, d)), map.reshape((d, d)))
return C
def _channel_dim(channel):
# map() gives the dimension squared of the channel
full_dims = channel.map().shape
assert len(full_dims) == 2
assert full_dims[0] == full_dims[1]
d = np.sqrt(full_dims[0])
if not np.isclose(d, int(d)):
raise ValueError('invalid shape for channel')
return int(d)
| 34.795918 | 79 | 0.630645 | 1,010 | 6,820 | 4.20297 | 0.265347 | 0.018139 | 0.02073 | 0.007538 | 0.163251 | 0.139929 | 0.113545 | 0.104123 | 0.08669 | 0.08669 | 0 | 0.009057 | 0.255279 | 6,820 | 195 | 80 | 34.974359 | 0.826541 | 0.405132 | 0 | 0.044944 | 0 | 0 | 0.074557 | 0 | 0 | 0 | 0 | 0 | 0.022472 | 1 | 0.05618 | false | 0 | 0.022472 | 0 | 0.146067 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80f1c40e1c743906487eb873e07151031bc687f3 | 10,299 | py | Python | parser/team23/instruccion/insert_into.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team23/instruccion/insert_into.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team23/instruccion/insert_into.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from abstract.instruccion import *
from tools.console_text import *
from tools.tabla_tipos import *
from tools.tabla_simbolos import *
from storage import jsonMode as funciones
from error.errores import *
from instruccion.P_Key import *
from instruccion.F_Key import *
from instruccion.unique_simple import *
from instruccion.condicion_simple import *
from abstract.retorno import *
from expresion.primitivo import *
class insert_into (instruccion):
def __init__(self,dato,lista, cols_id, line,column,num_nodo):
super().__init__(line, column)
self.dato = dato
self.lista = lista
self.cols_id = cols_id
self.num_nodo = num_nodo
#Nodo AST INSERT INTO
self.nodo = nodo_AST('INSERT INTO', num_nodo)
self.nodo.hijos.append(nodo_AST('INSERT INTO',num_nodo+1))
self.nodo.hijos.append(nodo_AST(dato, num_nodo + 2))
if cols_id != None:
col_index = num_nodo + 9
self.nodo.hijos.append(nodo_AST('(', num_nodo + 7))
for columna in cols_id:
self.nodo.hijos.append(nodo_AST(cols_id, col_index))
col_index += 1
self.nodo.hijos.append(nodo_AST(')', num_nodo + 8))
self.nodo.hijos.append(nodo_AST('VALUES', num_nodo + 3))
self.nodo.hijos.append(nodo_AST('(', num_nodo + 4))
for valor in lista:
self.nodo.hijos.append(valor.nodo)
self.nodo.hijos.append(nodo_AST(')', num_nodo + 6))
# Gramatica
self.grammar_ = "<TR><TD>INSTRUCCION ::= INSERT INTO list_id ID VALUES ( list_val ); </TD><TD> INSTRUCCION = new insert_into( " + dato + ", list_val, list_id );</TD></TR>\n"
if cols_id != None:
self.grammar_ += "<TR><TD> list_id ::= list_id1 , ID </TD><TD> list_id = list_id1.append(ID); </TD></TR>\n"
self.grammar_ += '<TR><TD> list_id ::= ID </TD><TD> list_id = [ID] </TD></TR>\n'
else:
self.grammar_ += "<TR><TD> list_id ::= EPSILON </TD><TD> list_id = None; </TD></TR>\n"
self.grammar_ += '<TR><TD> LIST_VAL ::= LIST_VAL1 , EXPRESSION </TD><TD> LIST_VAL = LIST_VAL1.append( EXPRESSION ); </TD></TR>\n'
self.grammar_ += '<TR><TD> LIST_VAL ::= EXPRESSION </TD><TD> LIST_VAL ::= [] </TD></TR>\n'
for valor in lista:
self.grammar_ += valor.grammar_
def ejecutar(self):
#try:
actual_db = get_actual_use()
valores_iniciales = []
lista_aux = []
for item in self.lista:
valores_iniciales.append(item.ejecutar([]))
retornos = []
index_id = 0
if self.cols_id != None:
#Extraer colummas de la tabla
columnas_table = ts.get_cols(actual_db, self.dato)
if columnas_table != None:
for item_columna in columnas_table:
if index_id < len(self.cols_id):
if self.cols_id[index_id] == item_columna.id_:
lista_aux.append(self.lista[index_id])
retornos.append(valores_iniciales[index_id])
index_id += 1
else:
lista_aux.append(primitivo(self.line, self.column, 'NULL', tipo_primitivo.NULL, self.num_nodo + 1000000000))
retornos.append(retorno('NULL', tipo_primitivo.NULL))
else:
lista_aux.append(primitivo(self.line, self.column, 'NULL', tipo_primitivo.NULL, self.num_nodo + 1000000000))
retornos.append(retorno('NULL', tipo_primitivo.NULL))
else:
errores.append(nodo_error(self.line, self.column, 'E-42P10 invalid column reference: Cannot extract columns to insert data', 'Semántico'))
add_text('E-42P10 invalid column reference: Cannot extract columns to insert data.\n')
else:
retornos = valores_iniciales
valores = []
for item in retornos:
valores.append(item.valor)
tipo_dominante = 17
#Validar tipos de dato de insert en columna y dato
if len(retornos) == ts.count_columns(actual_db, self.dato):
count_pos = 0
for value in retornos:
if value.tipo != tipo_primitivo.NULL:
columna = ts.get_col_by_pos(actual_db, self.dato, count_pos)
tipo_dominante = tipos_tabla[value.tipo.value][columna.tipo.value]
if tipo_dominante != columna.tipo:
errores.append(nodo_error(self.line, self.column, 'E-42809 wrong object type: You cannot insert a data type ' + self.get_str_tipo(value.tipo) + ' in a column of type ' + self.get_str_tipo(columna.tipo), 'Semántico'))
add_text('E-42809 wrong object type: You cannot insert a data type ' + self.get_str_tipo(value.tipo) + ' in a column of type ' + self.get_str_tipo(columna.tipo)+ '\n')
return
count_pos += 1
#Validar el tamaño correcto
if tipo_dominante == tipo_primitivo.CHAR or tipo_dominante == tipo_primitivo.VARCHAR:
if columna.size < len(value.valor):
errores.append(nodo_error(self.line, self.column, 'E-22015 interval field overflow: Data size exceeded ' + value.valor, 'Semántico'))
add_text('E-22015 interval field overflow: Data size exceeded ' + value.valor + '\n')
return
else:
#Omitir NULL, para validar despues las restricciones de columnas
count_pos += 1
else:
errores.append(nodo_error(self.line, self.column, 'E-22005 error in assignment: Columns out of bounds', 'Semántico'))
add_text('E-22005 error in assignment: Columns out of bounds\n')
return
#Validar restricciones de columnas
index_col = 0
for value in valores:
col_actual = ts.get_col_by_pos(actual_db, self.dato, index_col)
if col_actual.condiciones != None:
for restriccion in col_actual.condiciones:
valido = None
if isinstance(restriccion, unique_simple):
pos_col = ts.get_pos_col(actual_db, self.dato, col_actual.id_)
valido = restriccion.ejecutar(self.dato, value, pos_col)
elif isinstance(restriccion, condicion_simple):
pos_col = ts.get_pos_col(actual_db, self.dato, col_actual.id_)
valido = restriccion.ejecutar(value, pos_col)
if isinstance(valido, nodo_error):
errores.append(valido)
salida_consola = valido.valor + '\n'
add_text(salida_consola)
return
elif valido != None:
#Encontramos un cambio para el dato en el default
valores[index_col] = valido
index_col += 1
aux_insert = funciones.insert(actual_db, self.dato, valores)
# Valor de retorno: 0 operación exitosa, 1 error en la operación, 2 database no existente, 3 table no existente, 4 llave primaria duplicada, 5 columnas fuera de límites.
if aux_insert == 0:
add_text("M-00000 successful completion: Row inserted correctly\n")
elif aux_insert == 1:
errores.append(nodo_error(self.line, self.column, 'E-22005 error in assignment: Could not insert row', 'Semántico'))
add_text('E-22005 error in assignment: Could not insert row\n')
elif aux_insert == 2:
errores.append(nodo_error(self.line, self.column, 'E-22005 error in assignment: There is no database with the following ID -> ' + actual_db, 'Sémantico'))
add_text('E-22005 error in assignment:\nThere is no database with the following ID ->' + actual_db + '\n')
elif aux_insert == 3:
errores.append(nodo_error(self.line, self.column, 'E-22005 error in assignment: The table with the following ID does not exist -> ' + self.dato, 'Semántico'))
add_text('E-22005 error in assignment: The table with the following ID does not exist -> ' + self.dato + '\n')
elif aux_insert == 4:
errores.append(nodo_error(self.line, self.column, 'E-22005 error in assignment: Duplicate primary key', 'Semántico'))
add_text('E-22005 error in assignment: Duplicate primary key\n')
elif aux_insert == 5:
errores.append(nodo_error(self.line, self.column, 'E-22005 error in assignment: Columns out of bounds', 'Semántico'))
add_text('E-22005 error in assignment: Columns out of bounds\n')
#except:
# errores.append(nodo_error(self.line, self.column, 'ERROR - No se pudo insertar en tabla: ' + self.dato, 'Semántico'))
# add_text('ERROR - No se pudo insertar en tabla: ' + self.dato + '\n')
def get_str_tipo(self, tipo):
if tipo == tipo_primitivo.SMALLINT:
return "SMALLINT"
elif tipo == tipo_primitivo.INTEGER:
return "INTEGER"
elif tipo == tipo_primitivo.BIGINT:
return "BIGINT"
elif tipo == tipo_primitivo.DECIMAL:
return "DECIMAL"
elif tipo == tipo_primitivo.REAL:
return "REAL"
elif tipo == tipo_primitivo.DOUBLE_PRECISION:
return "DOUBLE PRECISION"
elif tipo == tipo_primitivo.MONEY:
return "MONEY"
elif tipo == tipo_primitivo.VARCHAR:
return "VARCHAR"
elif tipo == tipo_primitivo.CHAR:
return "CHAR"
elif tipo == tipo_primitivo.TEXT:
return "TEXT"
elif tipo == tipo_primitivo.TIMESTAMP:
return "TIMESTAMP"
elif tipo == tipo_primitivo.DATE:
return "DATE"
elif tipo == tipo_primitivo.TIME:
return "TIME"
elif tipo == tipo_primitivo.INTERVAL:
return "INTERVAL"
elif tipo == tipo_primitivo.BOOLEAN:
return "BOOLEAN" | 50.239024 | 240 | 0.585202 | 1,265 | 10,299 | 4.594466 | 0.161265 | 0.049209 | 0.0468 | 0.050585 | 0.461287 | 0.430489 | 0.402099 | 0.38627 | 0.333792 | 0.265485 | 0 | 0.020777 | 0.31304 | 10,299 | 205 | 241 | 50.239024 | 0.800707 | 0.063113 | 0 | 0.161677 | 0 | 0.035928 | 0.202656 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017964 | false | 0 | 0.071856 | 0 | 0.209581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80f7bc024b85ee0e8080b2883866b323df39445c | 3,771 | py | Python | gym_modular/sensors/gripper_velocity_2d_sensor.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | gym_modular/sensors/gripper_velocity_2d_sensor.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | gym_modular/sensors/gripper_velocity_2d_sensor.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | from typing import Dict, Tuple, List
import numpy as np
from .continuous_sensor import ContinuousSensor
from ..ball_placing_task import BallPlacingTask
class GripperVelocity2DSensor(ContinuousSensor[BallPlacingTask]):
def __init__(self, robot_name: str, linear_limit_lower: np.ndarray, linear_limit_upper: np.ndarray,
angular_limit_lower: float, angular_limit_upper: float, sense_angle: bool = True):
super(GripperVelocity2DSensor, self).__init__()
self.__robot_name = robot_name
self.__sense_angle = sense_angle
self.__linear_limit_lower = linear_limit_lower
self.__linear_limit_upper = linear_limit_upper
self.__angular_limit_lower = angular_limit_lower
self.__angular_limit_upper = angular_limit_upper
def _get_limits(self) -> Dict[str, Tuple[np.ndarray, np.ndarray]]:
lin_lims = (self.__linear_limit_lower, self.__linear_limit_upper)
ang_lims = (np.array([self.__angular_limit_lower]), np.array([self.__angular_limit_upper]))
if self.__sense_angle:
return {
"gripper_vel_xy": lin_lims,
"gripper_angular_vel_z": ang_lims
}
else:
return {
"gripper_vel_xy": lin_lims
}
def __observe(self) -> Dict[str, np.ndarray]:
vel = self.task.environment.robots["ur10"].gripper.wrapped_body.links["tcp"].velocity
lin_table_frame, ang_table_frame = self.task.table_top_center_pose.rotation.apply(vel, inverse=True)
if self.__sense_angle:
return {
"gripper_vel_xy": lin_table_frame[:2],
"gripper_angular_vel_z": ang_table_frame[2:3]
}
else:
return {"gripper_vel_xy": lin_table_frame[:2]}
def _reset_unnormalized(self) -> Dict[str, np.ndarray]:
return self.__observe()
def _observe_unnormalized(self) -> Dict[str, np.ndarray]:
return self.__observe()
@classmethod
def from_parameters(cls, robot_name: str, parameters: Dict[str, List[float]],
sense_angle: bool = True) -> "GripperVelocity2DSensor":
"""
Create an BallPlacingEndEffectorVelocityController from an parameters dictionary.
:param robot_name: the name of the robot that is controlled
:param parameters: a dictionary containing the entries end_effector_velocity_limits_lower
and end_effector_velocity_limits_upper
:return: an BallPlacingEndEffectorVelocityController with the given parameters
"""
linear_lower = parameters["end_effector_linear_velocity_limits_lower"]
linear_upper = parameters["end_effector_linear_velocity_limits_upper"]
angular_lower = parameters["end_effector_angular_velocity_limits_lower"]
angular_upper = parameters["end_effector_angular_velocity_limits_upper"]
assert np.all(np.equal(angular_lower, angular_lower[0])) and \
np.all(np.equal(angular_upper, angular_upper[0])) and \
angular_lower[0] == -angular_upper[0], "Differing angular limits are not supported"
assert np.all(np.equal(linear_lower, linear_lower[0])) and \
np.all(np.equal(linear_upper, linear_upper[0])) and \
linear_lower[0] == -linear_upper[0], "Differing linear limits are not supported"
lin = linear_upper[0]
ang = angular_upper[0]
return GripperVelocity2DSensor(
robot_name, linear_limit_lower=np.full(2, -lin), linear_limit_upper=np.full(2, lin),
angular_limit_lower=-ang, angular_limit_upper=ang, sense_angle=sense_angle)
| 49.618421 | 118 | 0.664015 | 442 | 3,771 | 5.255656 | 0.226244 | 0.047353 | 0.034438 | 0.030994 | 0.321997 | 0.229014 | 0.141627 | 0.092553 | 0.074042 | 0 | 0 | 0.007779 | 0.250066 | 3,771 | 75 | 119 | 50.28 | 0.813649 | 0.122779 | 0 | 0.155172 | 0 | 0 | 0.115929 | 0.071033 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.103448 | false | 0 | 0.068966 | 0.034483 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80fa599a04868da515185f5ef67ddee93b107047 | 19,434 | py | Python | src/transe.py | DanielSun94/kgenlu | bbf377c6740040cb1a8b656785e7c5bfdb8371d5 | [
"MIT"
] | null | null | null | src/transe.py | DanielSun94/kgenlu | bbf377c6740040cb1a8b656785e7c5bfdb8371d5 | [
"MIT"
] | null | null | null | src/transe.py | DanielSun94/kgenlu | bbf377c6740040cb1a8b656785e7c5bfdb8371d5 | [
"MIT"
] | null | null | null | # the source codes of transE are from https://github.com/mklimasz/TransE-PyTorch
from absl import app
from absl import flags
import os
import numpy as np
import torch.optim as optim
from torch.utils import data as torch_data
from torch.utils import tensorboard
from collections import Counter
from torch.utils import data
from typing import Dict, Tuple
import torch
from torch import nn
from torch.optim import optimizer
import pickle
FB15K = 'FB15K'
WORDNET = 'WordNet'
dataset = WORDNET # WordNet FB15K
FLAGS = flags.FLAGS
flags.DEFINE_float("lr", default=0.01, help="Learning rate value.")
flags.DEFINE_integer("seed", default=715, help="Seed value.")
flags.DEFINE_integer("batch_size", default=128, help="Maximum batch size.")
flags.DEFINE_integer("validation_batch_size", default=64, help="Maximum batch size during model validation.")
flags.DEFINE_integer("vector_length", default=100, help="Length of entity/relation vector.")
flags.DEFINE_float("margin", default=1.0, help="Margin value in margin-based ranking loss.")
flags.DEFINE_integer("norm", default=1, help="Norm used for calculating dissimilarity metric (usually 1 or 2).")
flags.DEFINE_integer("epochs", default=4000, help="Number of training epochs.")
flags.DEFINE_bool("use_gpu", default=True, help="Flag enabling gpu usage.")
flags.DEFINE_integer("validation_freq", default=10, help="Validate model every X epochs.")
flags.DEFINE_string("checkpoint_path", default="", help="Path to model checkpoint (by default train from scratch).")
flags.DEFINE_string("tensorboard_log_dir", default=os.path.abspath('../resource/runs/'),
help="Path for tensorboard log directory.")
flags.DEFINE_string("checkpoint_folder", default=os.path.abspath('../resource/transe_checkpoint/'),
help='checkpoint folder')
flags.DEFINE_string("load_checkpoint_file", default=None, help='checkpoint file')
if dataset == FB15K:
flags.DEFINE_string("dataset_path", default=os.path.abspath('../resource/FB15k/'), help="Path to dataset.")
elif dataset == WORDNET:
flags.DEFINE_string("dataset_path", default=os.path.abspath('../resource/wordnet/'), help="Path to dataset.")
else:
raise ValueError('Illegal Dataset')
HITS_AT_1_SCORE = float
HITS_AT_3_SCORE = float
HITS_AT_10_SCORE = float
MRR_SCORE = float
METRICS = Tuple[HITS_AT_1_SCORE, HITS_AT_3_SCORE, HITS_AT_10_SCORE, MRR_SCORE]
Mapping = Dict[str, int]
_MODEL_STATE_DICT = "model_state_dict"
_OPTIMIZER_STATE_DICT = "optimizer_state_dict"
_EPOCH = "epoch"
_STEP = "step"
_BEST_SCORE = "best_score"
class TransE(nn.Module):
def __init__(self, entity_count, relation_count, device, norm=1, dim=100, margin=1.0):
super(TransE, self).__init__()
self.entity_count = entity_count
self.relation_count = relation_count
self.device = device
self.norm = norm
self.dim = dim
self.entities_emb = self._init_entity_emb()
self.relations_emb = self._init_relation_emb()
self.criterion = nn.MarginRankingLoss(margin=margin, reduction='none')
def _init_entity_emb(self):
entities_emb = nn.Embedding(num_embeddings=self.entity_count + 1,
embedding_dim=self.dim,
padding_idx=self.entity_count)
uniform_range = 6 / np.sqrt(self.dim)
entities_emb.weight.data.uniform_(-uniform_range, uniform_range)
return entities_emb
def _init_relation_emb(self):
relations_emb = nn.Embedding(num_embeddings=self.relation_count + 1,
embedding_dim=self.dim,
padding_idx=self.relation_count)
uniform_range = 6 / np.sqrt(self.dim)
relations_emb.weight.data.uniform_(-uniform_range, uniform_range)
# -1 to avoid nan for OOV vector
relations_emb.weight.data[:-1, :].div_(relations_emb.weight.data[:-1, :].norm(p=1, dim=1, keepdim=True))
return relations_emb
def forward(self, positive_triplets: torch.LongTensor, negative_triplets: torch.LongTensor):
"""Return model losses based on the input.
:param positive_triplets: triplets of positives in Bx3 shape (B - batch, 3 - head, relation and tail)
:param negative_triplets: triplets of negatives in Bx3 shape (B - batch, 3 - head, relation and tail)
:return: tuple of the model loss, positive triplets loss component, negative triples loss component
"""
# -1 to avoid nan for OOV vector
self.entities_emb.weight.data[:-1, :].div_(self.entities_emb.weight.data[:-1, :].norm(p=2, dim=1, keepdim=True))
assert positive_triplets.size()[1] == 3
positive_distances = self._distance(positive_triplets)
assert negative_triplets.size()[1] == 3
negative_distances = self._distance(negative_triplets)
return self.loss(positive_distances, negative_distances), positive_distances, negative_distances
def predict(self, triplets: torch.LongTensor):
"""Calculated dissimilarity score for given triplets.
:param triplets: triplets in Bx3 shape (B - batch, 3 - head, relation and tail)
:return: dissimilarity score for given triplets
"""
return self._distance(triplets)
def loss(self, positive_distances, negative_distances):
target = torch.tensor([-1], dtype=torch.long, device=self.device)
return self.criterion(positive_distances, negative_distances, target)
def _distance(self, triplets):
"""Triplets should have shape Bx3 where dim 3 are head id, relation id, tail id."""
assert triplets.size()[1] == 3
heads = triplets[:, 0]
relations = triplets[:, 1]
tails = triplets[:, 2]
return (self.entities_emb(heads) + self.relations_emb(relations) - self.entities_emb(tails)).norm(p=self.norm,
dim=1)
def load_checkpoint(checkpoint_path: str, model: nn.Module, optim_: optimizer.Optimizer) -> Tuple[int, int, float]:
"""Loads training checkpoint.
:param checkpoint_path: path to checkpoint
:param model: model to update state
:param optim_: optimizer to update state
:return tuple of starting epoch id, starting step id, best checkpoint score
"""
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint[_MODEL_STATE_DICT])
optim_.load_state_dict(checkpoint[_OPTIMIZER_STATE_DICT])
start_epoch_id = checkpoint[_EPOCH] + 1
step = checkpoint[_STEP] + 1
best_score = checkpoint[_BEST_SCORE]
return start_epoch_id, step, best_score
def save_checkpoint(model: nn.Module, optim_: optimizer.Optimizer, epoch_id: int, step: int, best_score: float,
save_path: str, kg_name: str):
torch.save({
_MODEL_STATE_DICT: model.state_dict(),
_OPTIMIZER_STATE_DICT: optim_.state_dict(),
_EPOCH: epoch_id,
_STEP: step,
_BEST_SCORE: best_score
}, os.path.join(save_path, kg_name+'_'+str(epoch_id)+'_checkpoint.tar'))
def hit_at_k(predictions: torch.Tensor, ground_truth_idx: torch.Tensor, device: torch.device, k: int = 10) -> int:
"""Calculates number of hits@k.
:param predictions: BxN tensor of prediction values where B is batch size and N number of classes. Predictions
must be sorted in class ids order
:param ground_truth_idx: Bx1 tensor with index of ground truth class
:param device: device on which calculations are taking place
:param k: number of top K results to be considered as hits
:return: Hits@K score
"""
assert predictions.size(0) == ground_truth_idx.size(0)
zero_tensor = torch.tensor([0], device=device)
one_tensor = torch.tensor([1], device=device)
_, indices = predictions.topk(k=k, largest=False)
return torch.where(indices == ground_truth_idx, one_tensor, zero_tensor).sum().item()
def cal_mrr(predictions: torch.Tensor, ground_truth_idx: torch.Tensor) -> float:
"""Calculates mean reciprocal rank (MRR) for given predictions and ground truth values.
:param predictions: BxN tensor of prediction values where B is batch size and N number of classes. Predictions
must be sorted in class ids order
:param ground_truth_idx: Bx1 tensor with index of ground truth class
:return: Mean reciprocal rank score
"""
assert predictions.size(0) == ground_truth_idx.size(0)
indices = predictions.argsort()
return (1.0 / (indices == ground_truth_idx).nonzero()[:, 1].float().add(1.0)).sum().item()
def create_mappings(dataset_path: str) -> Tuple[Mapping, Mapping]:
"""Creates separate mappings to indices for entities and relations."""
# counters to have entities/relations sorted from most frequent
entity_counter = Counter()
relation_counter = Counter()
with open(dataset_path, "r") as f:
for line in f:
# -1 to remove newline sign
head, relation, tail = line[:-1].split("\t")
entity_counter.update([head, tail])
relation_counter.update([relation])
entity2id = {}
relation2id = {}
for idx, (mid, _) in enumerate(entity_counter.most_common()):
entity2id[mid] = idx
for idx, (relation, _) in enumerate(relation_counter.most_common()):
relation2id[relation] = idx
return entity2id, relation2id
class FB15KDataset(data.Dataset):
"""Dataset implementation for handling FB15K and FB15K-237."""
def __init__(self, data_path: str, entity2id: Mapping, relation2id: Mapping):
self.entity2id = entity2id
self.relation2id = relation2id
with open(data_path, "r") as f:
# data in tuples (head, relation, tail)
self.data = [line[:-1].split("\t") for line in f]
def __len__(self):
"""Denotes the total number of samples."""
return len(self.data)
def __getitem__(self, index):
"""Returns (head id, relation id, tail id)."""
head, relation, tail = self.data[index]
head_id = self._to_idx(head, self.entity2id)
relation_id = self._to_idx(relation, self.relation2id)
tail_id = self._to_idx(tail, self.entity2id)
return head_id, relation_id, tail_id
@staticmethod
def _to_idx(key: str, mapping: Mapping) -> int:
try:
return mapping[key]
except KeyError:
return len(mapping)
class WordNetDataset(data.Dataset):
"""Dataset implementation for handling WordNet."""
def __init__(self, kg_data: Mapping):
self.data = kg_data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
@staticmethod
def _to_idx(key: str, mapping: Mapping) -> int:
try:
return mapping[key]
except KeyError:
return len(mapping)
def test(model: torch.nn.Module, data_generator: torch_data.DataLoader, entities_count: int,
summary_writer: tensorboard.SummaryWriter, device: torch.device, epoch_id: int, metric_suffix: str,
) -> METRICS:
examples_count = 0.0
hits_at_1 = 0.0
hits_at_3 = 0.0
hits_at_10 = 0.0
mrr = 0.0
entity_ids = torch.arange(end=entities_count, device=device).unsqueeze(0)
for head, relation, tail in data_generator:
current_batch_size = head.size()[0]
head, relation, tail = head.to(device), relation.to(device), tail.to(device)
all_entities = entity_ids.repeat(current_batch_size, 1)
heads = head.reshape(-1, 1).repeat(1, all_entities.size()[1])
relations = relation.reshape(-1, 1).repeat(1, all_entities.size()[1])
tails = tail.reshape(-1, 1).repeat(1, all_entities.size()[1])
# Check all possible tails
triplets = torch.stack((heads, relations, all_entities), dim=2).reshape(-1, 3)
tails_predictions = model.predict(triplets).reshape(current_batch_size, -1)
# Check all possible heads
triplets = torch.stack((all_entities, relations, tails), dim=2).reshape(-1, 3)
heads_predictions = model.predict(triplets).reshape(current_batch_size, -1)
# Concat predictions
predictions = torch.cat((tails_predictions, heads_predictions), dim=0)
ground_truth_entity_id = torch.cat((tail.reshape(-1, 1), head.reshape(-1, 1)))
hits_at_1 += hit_at_k(predictions, ground_truth_entity_id, device=device, k=1)
hits_at_3 += hit_at_k(predictions, ground_truth_entity_id, device=device, k=3)
hits_at_10 += hit_at_k(predictions, ground_truth_entity_id, device=device, k=10)
mrr += cal_mrr(predictions, ground_truth_entity_id)
examples_count += predictions.size()[0]
hits_at_1_score = hits_at_1 / examples_count * 100
hits_at_3_score = hits_at_3 / examples_count * 100
hits_at_10_score = hits_at_10 / examples_count * 100
mrr_score = mrr / examples_count * 100
summary_writer.add_scalar('Metrics/Hits_1/' + metric_suffix, hits_at_1_score, global_step=epoch_id)
summary_writer.add_scalar('Metrics/Hits_3/' + metric_suffix, hits_at_3_score, global_step=epoch_id)
summary_writer.add_scalar('Metrics/Hits_10/' + metric_suffix, hits_at_10_score, global_step=epoch_id)
summary_writer.add_scalar('Metrics/MRR/' + metric_suffix, mrr_score, global_step=epoch_id)
return hits_at_1_score, hits_at_3_score, hits_at_10_score, mrr_score
def main(_):
torch.random.manual_seed(FLAGS.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
batch_size = FLAGS.batch_size
vector_length = FLAGS.vector_length
margin = FLAGS.margin
norm = FLAGS.norm
learning_rate = FLAGS.lr
epochs = FLAGS.epochs
device = torch.device('cuda') if FLAGS.use_gpu else torch.device('cpu')
path = FLAGS.dataset_path
train_generator, validation_generator, test_generator = None, None, None
if dataset == 'FB15K':
train_path = os.path.join(path, "freebase_mtr100_mte100-train.txt")
validation_path = os.path.join(path, "freebase_mtr100_mte100-valid.txt")
test_path = os.path.join(path, "freebase_mtr100_mte100-test.txt")
entity2id, relation2id = create_mappings(train_path)
train_set = FB15KDataset(train_path, entity2id, relation2id)
train_generator = torch_data.DataLoader(train_set, batch_size=batch_size)
validation_set = FB15KDataset(validation_path, entity2id, relation2id)
validation_generator = torch_data.DataLoader(validation_set, batch_size=FLAGS.validation_batch_size)
test_set = FB15KDataset(test_path, entity2id, relation2id)
test_generator = torch_data.DataLoader(test_set, batch_size=FLAGS.validation_batch_size)
print('FB15K data loaded')
elif dataset == WORDNET:
data_obj = pickle.load(open(os.path.join(path, 'wordnet_KG.pkl'), 'rb'))
relation2id = data_obj['relation_idx_dict']
entity2id = data_obj['word_idx_dict']
train_set = WordNetDataset(data_obj['fact_list'])
train_generator = torch_data.DataLoader(train_set, batch_size=batch_size)
print('WordNet data loaded')
else:
raise ValueError('')
model = TransE(entity_count=len(entity2id), relation_count=len(relation2id), dim=vector_length, margin=margin,
device=device, norm=norm) # type: torch.nn.Module
model = model.to(device)
optim_ = optim.SGD(model.parameters(), lr=learning_rate)
summary_writer = tensorboard.SummaryWriter(log_dir=FLAGS.tensorboard_log_dir)
start_epoch_id = 1
step = 0
best_score = 0.0
if FLAGS.load_checkpoint_file is not None:
checkpoint_path = os.path.join(FLAGS.checkpoint_folder, FLAGS.load_checkpoint_file)
start_epoch_id, step, best_score = load_checkpoint(checkpoint_path, model, optim_)
print(model)
# Training loop
for epoch_id in range(start_epoch_id, epochs + 1):
print("Starting epoch: ", epoch_id)
loss_impacting_samples_count = 0
samples_count = 0
model.train()
for local_heads, local_relations, local_tails in train_generator:
local_heads, local_relations, local_tails = (local_heads.to(device), local_relations.to(device),
local_tails.to(device))
positive_triples = torch.stack((local_heads, local_relations, local_tails), dim=1)
# Preparing negatives.
# Generate binary tensor to replace either head or tail. 1 means replace head, 0 means replace tail.
head_or_tail = torch.randint(high=2, size=local_heads.size(), device=device)
random_entities = torch.randint(high=len(entity2id), size=local_heads.size(), device=device)
broken_heads = torch.where(head_or_tail == 1, random_entities, local_heads)
broken_tails = torch.where(head_or_tail == 0, random_entities, local_tails)
negative_triples = torch.stack((broken_heads, local_relations, broken_tails), dim=1)
optim_.zero_grad()
loss, pd, nd = model(positive_triples, negative_triples)
loss.mean().backward()
summary_writer.add_scalar('Loss/train', loss.mean().data.cpu().numpy(), global_step=step)
summary_writer.add_scalar('Distance/positive', pd.sum().data.cpu().numpy(), global_step=step)
summary_writer.add_scalar('Distance/negative', nd.sum().data.cpu().numpy(), global_step=step)
loss = loss.data.cpu()
loss_impacting_samples_count += loss.nonzero().size()[0]
samples_count += loss.size()[0]
optim_.step()
step += 1
summary_writer.add_scalar('Metrics/loss_impacting_samples', loss_impacting_samples_count / samples_count * 100,
global_step=epoch_id)
print('epoch: {}, metrics/loss impacting samples: {}'
.format(epoch_id, loss_impacting_samples_count/samples_count*100))
if epoch_id > 0 and epoch_id % 200 == 0:
save_checkpoint(model, optim_, epoch_id, step, best_score, FLAGS.checkpoint_folder, dataset)
if epoch_id % FLAGS.validation_freq == 0:
model.eval()
if dataset == FB15K:
_, _, hits_at_10, _ = test(model=model, data_generator=validation_generator,
entities_count=len(entity2id),
device=device, summary_writer=summary_writer,
epoch_id=epoch_id, metric_suffix="val")
score = hits_at_10
if score > best_score:
best_score = score
save_checkpoint(model, optim_, epoch_id, step, best_score, FLAGS.checkpoint_folder, dataset)
if dataset == FB15K:
# Testing the best checkpoint on test dataset
load_checkpoint(os.path.join(FLAGS.checkpoint_folder, FLAGS.load_checkpoint_file), model, optim_)
best_model = model.to(device)
best_model.eval()
scores = test(model=best_model, data_generator=test_generator, entities_count=len(entity2id), device=device,
summary_writer=summary_writer, epoch_id=1, metric_suffix="test")
print("Test scores: ", scores)
if __name__ == '__main__':
app.run(main)
| 45.727059 | 120 | 0.679016 | 2,533 | 19,434 | 4.96289 | 0.14094 | 0.01241 | 0.006364 | 0.014 | 0.337046 | 0.291464 | 0.233076 | 0.21478 | 0.180574 | 0.150664 | 0 | 0.019029 | 0.215807 | 19,434 | 424 | 121 | 45.834906 | 0.80584 | 0.120408 | 0 | 0.115132 | 0 | 0 | 0.074756 | 0.010417 | 0 | 0 | 0 | 0 | 0.016447 | 1 | 0.072368 | false | 0 | 0.046053 | 0.006579 | 0.190789 | 0.019737 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80fb338cada946f58190ce2610fb07b263d9785b | 5,361 | py | Python | arctic_cruise/saildrone_sentinel2_true_color_image_each_usv.py | agonmer/Saildrone | 47e618390be6ef160382bd22bb3bdf2482a8644a | [
"Apache-2.0"
] | 3 | 2019-07-08T11:55:44.000Z | 2021-10-06T15:11:18.000Z | arctic_cruise/saildrone_sentinel2_true_color_image_each_usv.py | agonmer/Saildrone | 47e618390be6ef160382bd22bb3bdf2482a8644a | [
"Apache-2.0"
] | null | null | null | arctic_cruise/saildrone_sentinel2_true_color_image_each_usv.py | agonmer/Saildrone | 47e618390be6ef160382bd22bb3bdf2482a8644a | [
"Apache-2.0"
] | 3 | 2020-06-08T06:29:22.000Z | 2020-06-16T15:43:46.000Z | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import gdal
import urllib.request
import xarray as xr
import numpy as np
import time
from datetime import datetime, date, time, timedelta
from matplotlib.pyplot import figure
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import urllib
import requests
import json
# ## Read in the USV daily files and your instance code from a text file
# In[4]:
#read in USV data
file_dir = 'F:/data/cruise_data/saildrone/2019_arctic/daily_files/*.nc'
ds_usv = xr.open_mfdataset(file_dir,data_vars='minimal')
ds_usv.load()
#not useing this right now but consider putting instance here
def get_key(file_name):
myvars = {}
with open(file_name) as myfile:
for line in myfile:
name, var = line.partition("=")[::2]
myvars[name.strip()] = str(var).rstrip()
return myvars
file_key = "C:/Users/gentemann/Google Drive/f_drive/secret_keys/sentinelhub_bingkun.txt"
my_vars = get_key(file_key)
file_key = "C:/Users/gentemann/Google Drive/f_drive/secret_keys/saildrone.txt"
saildrone_key = get_key(file_key)
# ## Use restful API to get USV locations
# In[5]:
endtime = datetime.today().strftime('%Y-%m-%d')
starttime = (datetime.today() + timedelta(days=-5)).strftime('%Y-%m-%d')
#all_usv = ['1041','1033','1034','1035','1036','1037']
all_usv = ['1034','1035','1036','1037']
#get token
payload={'key': saildrone_key['key'], 'secret':saildrone_key['secret']}
headers={'Content-Type':'application/json', 'Accept':'application/json'}
url = 'https://developer-mission.saildrone.com/v1/auth'
res = requests.post(url, json=payload, headers=headers)
json_data = json.loads(res.text)
names=[]
ilen = 500 #len(usv_data['data'])
usv_lats = np.empty((ilen,4))*np.nan
usv_lons = np.empty((ilen,4))*np.nan
usv_time = np.empty((ilen,4))*np.nan
for iusv in range(4):
str_usv = all_usv[iusv]
url = 'https://developer-mission.saildrone.com/v1/timeseries/'+str_usv+'?data_set=vehicle&interval=5&start_date='+starttime+'&end_date='+endtime+'&order_by=desc&limit=500&offset=0'
payload = {}
headers = {'Accept':'application/json','authorization':json_data['token']}
res = requests.get(url, json=payload, headers=headers)
usv_data = json.loads(res.text)
#print(usv_data.data)
for i in range(ilen):
usv_lons[i,iusv]=usv_data['data'][i]['gps_lng']
usv_lats[i,iusv]=usv_data['data'][i]['gps_lat']
usv_time[i,iusv]=usv_data['data'][i]['gps_time']
names.append(str_usv)
xlons = xr.DataArray(usv_lons,coords={'time':usv_time[:,0],'trajectory':names},dims=('time','trajectory'))
xlats = xr.DataArray(usv_lats,coords={'time':usv_time[:,0],'trajectory':names},dims=('time','trajectory'))
ds_usv = xr.Dataset({'lon': xlons,'lat':xlats})
# In[6]:
#plot the usv tracks
#for iusv in range(4):
# plt.plot(ds_usv.lon[:,iusv],ds_usv.lat[:,iusv],label=ds_usv.trajectory[iusv].data)
#plt.legend()
# In[19]:
endtime = datetime.today().strftime('%Y-%m-%d')
starttime = (datetime.today() + timedelta(days=-5)).strftime('%Y-%m-%d')
#use usv data to calculate bounding box
for iusv in range(4):
subset = ds_usv.isel(trajectory=iusv)
subset = subset.where(np.isfinite(subset.lon),drop=True)
lonmin,lonmax = str(subset.lon[0].data-1),str(subset.lon[0].data+1)
latmin,latmax = str(subset.lat[0].data-1),str(subset.lat[0].data+1)
#print(lonmin,lonmax,latmin,latmax)
url = 'https://services.sentinel-hub.com/ogc/wms/'+my_vars["instance"]+'?SERVICE=WMS&REQUEST=GetMap&SHOWLOGO=true&MAXCC=100&TIME='+starttime+'%2F'+endtime+'&CRS=EPSG%3A4326&FORMAT=image%2Ftiff%3Bdepth%3D8&BBOX='+latmax+'%2C'+lonmax+'%2C'+latmin+'%2C'+lonmin+'&evalscriptoverrides=&LAYERS=1_TRUE_COLOR&WIDTH=4076&HEIGHT=1989&NICENAME=S1.tiff&COVERAGE'
urllib.request.urlretrieve(url,'S11.tiff')
#Open S1 ice file
driver=gdal.GetDriverByName('GTiff')
driver.Register()
ds = gdal.Open('S11.tiff')
if ds is None:
print('Could not open the Copernicus Sentinel-1 ice data')
geotransform = ds.GetGeoTransform()
cols = ds.RasterXSize
rows = ds.RasterYSize
xmin=geotransform[0]
ymax=geotransform[3]
xmax=xmin+cols*geotransform[1]
ymin=ymax+rows*geotransform[5]
centerx=(xmin+xmax)/2
centery=(ymin+ymax)/2
#Raster convert to array in numpy
bands = ds.RasterCount
band=ds.GetRasterBand(1)
dataimage= band.ReadAsArray(0,0,cols,rows)
print(xmin,xmax,ymin,ymax)
xx=xmin+np.arange(dataimage.shape[1])/dataimage.shape[1]*(xmax-xmin)
yy=ymin+np.arange(dataimage.shape[0])/dataimage.shape[0]*(ymax-ymin)
#print(xx.shape,yy.shape)
#print(xx[0],xx[-1],yy[0],yy[-1])
fig = plt.figure(figsize=(8, 8), dpi=400)
plt.pcolormesh(xx,yy,dataimage[-1:0:-1,:])# ,vmin=10,vmax=200)
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
for itrag in range(0,ds_usv.trajectory.size):
subset = ds_usv.isel(trajectory=itrag)
subset = subset.where(np.isfinite(subset.lon),drop=True)
plt.plot(subset.lon,subset.lat,label=str(ds_usv.trajectory[itrag].data))
plt.grid(color='w')
plt.legend(loc=2)
#plt.colorbar()
fig_fname = 'C:/Users/gentemann/Google Drive/public/Saildrone/arctic_zoom_'+str(ds_usv.trajectory[iusv].data)+'_'+str(endtime)+'.png'
plt.savefig(fig_fname, transparent=False, format='png',dpi=400)
# In[ ]:
| 34.587097 | 355 | 0.688864 | 826 | 5,361 | 4.380145 | 0.341404 | 0.015202 | 0.015202 | 0.012161 | 0.268657 | 0.195965 | 0.171365 | 0.12272 | 0.12272 | 0.098397 | 0 | 0.031572 | 0.131505 | 5,361 | 154 | 356 | 34.811688 | 0.74549 | 0.132438 | 0 | 0.084211 | 0 | 0.010526 | 0.226436 | 0.114843 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010526 | false | 0 | 0.126316 | 0 | 0.147368 | 0.021053 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80fbb0bb49bd69b0d4c6b4cd19dfc72ce812fabe | 896 | py | Python | tests/snapshots/snapshots/snap_test_query_snapshots.py | adeoke/django-quarantine-workout-graphql | 7d53bb17f8ee9e5276b496d00ff92c4b458af31f | [
"MIT"
] | 1 | 2020-06-01T11:41:52.000Z | 2020-06-01T11:41:52.000Z | tests/snapshots/snapshots/snap_test_query_snapshots.py | adeoke/django-quarantine-workout-graphql | 7d53bb17f8ee9e5276b496d00ff92c4b458af31f | [
"MIT"
] | 5 | 2020-06-06T15:14:21.000Z | 2021-06-10T19:25:55.000Z | tests/snapshots/snapshots/snap_test_query_snapshots.py | adeoke/django-quarantine-workout-graphql | 7d53bb17f8ee9e5276b496d00ff92c4b458af31f | [
"MIT"
] | 1 | 2022-01-19T22:17:44.000Z | 2022-01-19T22:17:44.000Z | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TestAppQueriesSnapshot::test_equipment_against_snapshot equipment_snapshot_resp'] = {
'data': {
'levels': [
{
'difficulty': 'beginner'
},
{
'difficulty': 'intermediate'
},
{
'difficulty': 'advanced'
}
]
}
}
snapshots['TestAppQueriesSnapshot::test_levels_response_against_snapshot levels_snapshot_resp'] = {
'data': {
'levels': [
{
'difficulty': 'beginner'
},
{
'difficulty': 'intermediate'
},
{
'difficulty': 'advanced'
}
]
}
}
| 21.853659 | 99 | 0.482143 | 55 | 896 | 7.563636 | 0.509091 | 0.081731 | 0.168269 | 0.105769 | 0.384615 | 0.384615 | 0.384615 | 0.384615 | 0.384615 | 0.384615 | 0 | 0.005597 | 0.401786 | 896 | 40 | 100 | 22.4 | 0.770522 | 0.069196 | 0 | 0.30303 | 0 | 0 | 0.357401 | 0.167268 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.060606 | 0 | 0.060606 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80fc711d831d4747a33d2fccf9dfb1487eab792f | 2,626 | py | Python | src/beehive/controls/datamaker.py | i-pan/honeycomb | 1f5f8c6c37f2a66b3109412e803994a9dc6ad10d | [
"MIT"
] | 2 | 2020-11-09T05:32:59.000Z | 2021-07-06T07:51:50.000Z | src/beehive/controls/datamaker.py | i-pan/honeycomb | 1f5f8c6c37f2a66b3109412e803994a9dc6ad10d | [
"MIT"
] | null | null | null | src/beehive/controls/datamaker.py | i-pan/honeycomb | 1f5f8c6c37f2a66b3109412e803994a9dc6ad10d | [
"MIT"
] | 1 | 2020-12-10T19:20:22.000Z | 2020-12-10T19:20:22.000Z | import logging
import numpy as np
import pandas as pd
import os, os.path as osp
from ..builder import build_dataset, build_dataloader
def get_train_val_test_splits(cfg, df):
if 'split' in df.columns and cfg.data.use_fixed_splits:
train_df = df[df.split == 'train']
valid_df = df[df.split == 'valid']
test_df = df[df.split == 'test']
return train_df, valid_df, test_df
i, o = cfg.data.inner_fold, cfg.data.outer_fold
if isinstance(i, (int,float)):
if cfg.local_rank == 0:
logger = logging.getLogger('root')
logger.info(f'<inner fold> : {i}')
logger.info(f'<outer fold> : {o}')
test_df = df[df.outer == o]
df = df[df.outer != o]
train_df = df[df[f'inner{o}'] != i]
valid_df = df[df[f'inner{o}'] == i]
valid_df = valid_df.drop_duplicates().reset_index(drop=True)
test_df = test_df.drop_duplicates().reset_index(drop=True)
else:
if cfg.local_rank == 0:
logger = logging.getLogger('root')
logger.info('No inner fold specified ...')
logger.info(f'<outer fold> : {o}')
test_df = None
train_df = df[df.outer != o]
valid_df = df[df.outer == o]
valid_df = valid_df.drop_duplicates().reset_index(drop=True)
return train_df, valid_df, test_df
def prepend_filepath(lst, prefix):
return np.asarray([osp.join(prefix, item) for item in lst])
def get_train_val_dataloaders(cfg):
INPUT_COL = cfg.data.input or 'filename'
LABEL_COL = cfg.data.target or 'Target'
df = pd.read_csv(cfg.data.annotations)
train_df, valid_df, _ = get_train_val_test_splits(cfg, df)
data_dir = cfg.data.data_dir
train_inputs = prepend_filepath(train_df[INPUT_COL], data_dir)
train_labels = train_df[LABEL_COL].values
valid_inputs = prepend_filepath(valid_df[INPUT_COL], data_dir)
valid_labels = valid_df[LABEL_COL].values
train_dataset = build_dataset(cfg,
data_info=dict(inputs=train_inputs, labels=train_labels),
mode='train')
valid_dataset = build_dataset(cfg,
data_info=dict(inputs=valid_inputs, labels=valid_labels),
mode='valid')
if cfg.local_rank == 0:
logger = logging.getLogger('root')
logger.info(f'TRAIN : n={len(train_dataset)}')
logger.info(f'VALID : n={len(valid_dataset)}')
train_loader = build_dataloader(cfg,
dataset=train_dataset,
mode='train')
valid_loader = build_dataloader(cfg,
dataset=valid_dataset,
mode='valid')
return train_loader, valid_loader
| 33.240506 | 68 | 0.640518 | 380 | 2,626 | 4.184211 | 0.223684 | 0.045283 | 0.033962 | 0.027673 | 0.439623 | 0.365409 | 0.365409 | 0.260377 | 0.176101 | 0.155346 | 0 | 0.001493 | 0.234958 | 2,626 | 78 | 69 | 33.666667 | 0.789945 | 0 | 0 | 0.258065 | 0 | 0 | 0.084604 | 0.016768 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.080645 | 0.016129 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
037dc06068dc0af9522cd2267f30af8e21e1a997 | 376 | py | Python | Previous_State_On_Repo/WordLevelDataAndProcessing/English/toggler.py | rohun-tripati/pythonRepo | 91a7d536f7be05adc15e4d5add0a8a4a08c28c62 | [
"Unlicense"
] | 1 | 2018-06-25T19:20:48.000Z | 2018-06-25T19:20:48.000Z | Previous_State_On_Repo/WordLevelDataAndProcessing/English/toggler.py | rohun-tripati/pythonRepo | 91a7d536f7be05adc15e4d5add0a8a4a08c28c62 | [
"Unlicense"
] | null | null | null | Previous_State_On_Repo/WordLevelDataAndProcessing/English/toggler.py | rohun-tripati/pythonRepo | 91a7d536f7be05adc15e4d5add0a8a4a08c28c62 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import os, sys, time
import Image
import pprint
import numpy as np
def imgtotoggle (path, outpath):
image = Image.open(path)
image = image.convert("L")
pixels = image.load()
size = image.size
for j in range(size[0]):
for i in range(size[1]):
if pixels[j, i] < 200:
pixels[j, i] = 0
else:
pixels[j, i] = 255
image.save(outpath) | 16.347826 | 32 | 0.643617 | 63 | 376 | 3.84127 | 0.555556 | 0.086777 | 0.099174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030405 | 0.212766 | 376 | 23 | 33 | 16.347826 | 0.787162 | 0.053191 | 0 | 0 | 0 | 0 | 0.002809 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.25 | 0 | 0.3125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
038103ef2240aabfd8cc0c60138c47f1f616f668 | 1,578 | py | Python | libs/Python_Server.py | yingshaoxo/E_Python | 1d971e5e16681a5a641d5362e1f2f719fd828fd5 | [
"MIT"
] | 17 | 2018-08-24T14:34:20.000Z | 2021-02-24T02:40:14.000Z | libs/Python_Server.py | yingshaoxo/E_Python | 1d971e5e16681a5a641d5362e1f2f719fd828fd5 | [
"MIT"
] | 1 | 2018-06-20T19:11:19.000Z | 2018-06-21T13:47:00.000Z | libs/Python_Server.py | yingshaoxo/E_Python | 1d971e5e16681a5a641d5362e1f2f719fd828fd5 | [
"MIT"
] | 6 | 2019-03-24T15:34:50.000Z | 2021-09-30T07:42:20.000Z | import sys
import os
import subprocess
EXEC = sys.executable #local pythonw.exe
def run_py_file(py_path):
result = subprocess.run([EXEC, py_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
return str(result.stdout)
def run_py_codes(py_codes):
codes = str(py_codes)
if codes.count('print(')==0 and codes.count('import ')==0:
try:
result = str(eval(codes))
except:
result = 'error'
return result
else:
py_path = os.path.dirname(os.path.realpath(__file__)) + '\\codes.txt'
code_bytes = codes.encode('utf-8', 'ignore')
open(py_path, 'wb').write(code_bytes)
result = str(run_py_file(py_path))
return result
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def home_page(): #http://127.0.0.1:5000
return 'POST codes to http://127.0.0.1:5000/Python/'
@app.route('/Python/', methods = ['POST', 'GET'])
def run_python():
if request.method == 'GET': #http://127.0.0.1:5000/Python
return 'Only support POST!'
elif request.method == 'POST': #POST codes to http://127.0.0.1:5000/Python/
try:
codes = request.data.decode('utf-8')
except:
codes = request.data.decode('gb2312')
print(codes)
if codes=='':
return 'You give me nothing!'
else:
return run_py_codes(codes)
@app.errorhandler(500)
def handle_bad_request():
return '500\nInternal Server Error'
if __name__ == '__main__':
app.run()
| 27.206897 | 119 | 0.612801 | 215 | 1,578 | 4.32093 | 0.376744 | 0.032293 | 0.034446 | 0.038751 | 0.13563 | 0.103337 | 0.088267 | 0.066738 | 0.066738 | 0.066738 | 0 | 0.045075 | 0.240811 | 1,578 | 57 | 120 | 27.684211 | 0.730384 | 0.069075 | 0 | 0.177778 | 0 | 0 | 0.130375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0.044444 | 0.4 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0382db25417fae1cb9a82dae1650705be38a3663 | 2,062 | py | Python | paramikosshrsa/learntossh_updated_Sam.py | ryankimball/pyna | 86b67e775925953abdc8d3ecaf786dda08d91bfa | [
"MIT"
] | null | null | null | paramikosshrsa/learntossh_updated_Sam.py | ryankimball/pyna | 86b67e775925953abdc8d3ecaf786dda08d91bfa | [
"MIT"
] | null | null | null | paramikosshrsa/learntossh_updated_Sam.py | ryankimball/pyna | 86b67e775925953abdc8d3ecaf786dda08d91bfa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
## std library imports on top
import os
## 3rd party imports below
import paramiko
## work assigned to a junior programming asset on our team
from jrprogrammer import cmdissue
def get_creds():
ip = input("IP: ")
user = input("Username: ")
# return [ip, user]
# return {"ip": ip, "user": user}
return (ip, user)
def get_cmds() -> list:
cmds = ["ls", "ps", "ls -a", "date"]
my_cmds = []
while True:
print(f"Commands to Run: {my_cmds}")
cmd = int(input("Which command would you like to run?\n1: {}\n2: {}\n3: {}\n4: {}\nPress 5 to exit\n".format(*cmds)))
if cmd == 5:
break
my_cmds.append(cmds[cmd-1])
return my_cmds
# while True:
# try:
# cmd = input("What command would you like to run? Press Ctrl C to quit\n")
# if cmd == "":
# break
# cmds.append(cmd)
# except KeyboardInterrupt as err:
# print(err)
# break
# return cmds
def main():
## create session object
sshsession = paramiko.SSHClient()
sshsession.set_missing_host_key_policy(paramiko.AutoAddPolicy())
mykey = paramiko.RSAKey.from_private_key_file("/home/student/.ssh/id_rsa")
## create SSH connection
while True:
try:
creds = get_creds()
host = creds[0]
user = creds[1]
sshsession.connect(hostname=host, username=user, pkey=mykey)
#our_commands = ["touch sshworked.txt", "touch create.txt", "touch file3.txt", "ls"]
our_commands = get_cmds()
for x in our_commands:
## call our imported function and save the value returned
resp = cmdissue(x, sshsession)
## if returned response is not null, print it out
if resp != "":
print(resp)
## end the SSH connection
sshsession.close()
except KeyboardInterrupt as err:
print("Lunch time!!!")
break
if __name__ == '__main__':
main()
| 26.779221 | 125 | 0.56741 | 255 | 2,062 | 4.482353 | 0.509804 | 0.020997 | 0.020997 | 0.026247 | 0.099738 | 0.041995 | 0 | 0 | 0 | 0 | 0 | 0.008457 | 0.311833 | 2,062 | 76 | 126 | 27.131579 | 0.79704 | 0.344326 | 0 | 0.105263 | 0 | 0.026316 | 0.13767 | 0.018911 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.078947 | 0 | 0.210526 | 0.078947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0384d896ee9aa5e8128b188d58ec411b0796b93b | 4,909 | py | Python | src/elchempy/indexer/EC_index.py | MyPyDavid/ECpy | b74842b64eca86d2181067fdb22bfa8fa4b2c8bb | [
"MIT"
] | 3 | 2022-01-04T09:06:15.000Z | 2022-03-05T08:24:01.000Z | src/elchempy/indexer/EC_index.py | MyPyDavid/ECpy | b74842b64eca86d2181067fdb22bfa8fa4b2c8bb | [
"MIT"
] | null | null | null | src/elchempy/indexer/EC_index.py | MyPyDavid/ECpy | b74842b64eca86d2181067fdb22bfa8fa4b2c8bb | [
"MIT"
] | 1 | 2022-03-05T12:17:49.000Z | 2022-03-05T12:17:49.000Z | """ Collects the files of index files of a folder"""
from pathlib import Path
from collections import Counter
from functools import wraps
import datetime
from typing import Tuple, List, Dict, Union, Collection
# import re
# import copy
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
from elchempy.indexer.data import DATABASE
# from elchempy.dataloaders.files_func_collector import run_func_on_files
# from elchempy.experiments.dataloaders.fetcher import ElChemData
from elchempy.indexer.helpers import find_relevant_files_in_folder
from elchempy.indexer.creator import create_index
### for Developing
from elchempy.config import LOCAL_FILES, RAW_DATA_DIR, DEST_DATA_DIR
### 3rd Party imports
import pandas as pd
from pyarrow import ArrowInvalid
#%%
class ElChemIndex:
"""
Creates an index of files in a given folder.
Collects the parsers instances for a list of files.
Can include metadata from file instrospection or only from parsing the filename
"""
supported_store_types = ["feather", "sqlite"]
def __init__(
self,
folder,
dest_dir=None,
include_metadata=False,
multi_run=False,
store_type="feather",
force_reload=False,
):
self._folder = Path(folder)
self._dest_dir = Path(dest_dir)
self._multi_run = multi_run
self._include_metadata = include_metadata
self._store_type = store_type
self._force_reload = force_reload
self.files = find_relevant_files_in_folder(self._folder)
# = files#[500::]
self.store_file = self.get_store_file(
store_type=self._store_type, dest_dir=self._dest_dir
)
loaded_index = self.load_index(self.store_file)
if isinstance(loaded_index, pd.DataFrame) and not self._force_reload:
index = loaded_index
ecpps, ecds = None, None # class instance objects are not reloaded
else:
index, ecpps, ecds = create_index(
self.files,
multi_run=self._multi_run,
include_metadata=self._include_metadata,
)
self.store_index(index, self.store_file, overwrite=True)
self.index = index
self.ecpps = ecpps
self.ecds = ecds
def add_methods(self):
"""for persistence and loading of this 'collection' in eg a database or pkl file"""
def get_store_file(self, store_type="", dest_dir=None, filename="index"):
if not (store_type and dest_dir):
return None
if store_type not in self.supported_store_types:
logger.warning("store type {store_type} is not supported")
return None
if dest_dir.is_file():
dest_dir = dest_dir.parent
daily_filepath = dest_dir.joinpath(f"{datetime.date.today()}_{filename}")
if "feather" in store_type:
store_file = daily_filepath.with_suffix(".feather")
return store_file
def store_index(self, index, store_file: Path = None, overwrite=False):
if not store_file:
logger.warning(f"No store file given: {store_file}")
return None
if not isinstance(index, pd.DataFrame):
logger.warning(f"Index type is not pd.DataFrame: {type(index)}")
return None
if store_file.exists() and not overwrite:
logger.warning(f"Index file exists and will not be overwritten.")
return None
index = index.reset_index()
try:
index.to_feather(store_file)
except ArrowInvalid as exc:
logger.error(f"error to_feather: {store_file}\n{exc}")
logger.info(f"Index saved to : {store_file}")
def load_index(self, store_file: Path = None):
if not store_file.exists():
logger.warning(f"Store file does not exist: {store_file}")
return None
try:
index = pd.read_feather(store_file)
except ArrowInvalid as exc:
logger.error(f"error read_feather: {store_file}\n{exc}")
index = None
logger.info(f"Index loaded from : {store_file}")
return index
#%%
def _dev_testing():
files = self.files
ElChemPathParser(files[159])
if __name__ == "__main__":
folder = LOCAL_FILES[0].parent.parent
# folder = '/mnt/DATA/EKTS_CloudStation/CloudStation/Experimental data/Raw_data/VERSASTAT'
ecppcol = ElChemIndex(
RAW_DATA_DIR,
dest_dir=DEST_DATA_DIR,
multi_run=True,
include_metadata=True,
force_reload=False,
)
self = ecppcol
idx = self.index
# print(self.index.ecpp_token_remainder.values)
# print(self.index['ecpp_token_remainder'].dropna().unique())
| 29.220238 | 94 | 0.65716 | 625 | 4,909 | 4.9312 | 0.2784 | 0.064244 | 0.016872 | 0.017521 | 0.113563 | 0.057106 | 0.03634 | 0.03634 | 0.03634 | 0.03634 | 0 | 0.002195 | 0.257486 | 4,909 | 167 | 95 | 29.39521 | 0.843347 | 0.161744 | 0 | 0.114286 | 0 | 0 | 0.103941 | 0.008374 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.114286 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0385d5fe370d2f68ea94283b4a443ec6d8a7e020 | 4,102 | py | Python | inference_evaluation/inference_utils/image_utils.py | embedded-machine-learning/eml-tools | 9c9d12f9b970a42360bc6ca350f3b67ad822b141 | [
"Apache-2.0"
] | null | null | null | inference_evaluation/inference_utils/image_utils.py | embedded-machine-learning/eml-tools | 9c9d12f9b970a42360bc6ca350f3b67ad822b141 | [
"Apache-2.0"
] | null | null | null | inference_evaluation/inference_utils/image_utils.py | embedded-machine-learning/eml-tools | 9c9d12f9b970a42360bc6ca350f3b67ad822b141 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Image handling utilities
License_info:
# ==============================================================================
# ISC License (ISC)
# Copyright 2020 Christian Doppler Laboratory for Embedded Machine Learning
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# The following script uses several method fragments from Tensorflow
https://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/create_pascal_tf_record.py
Tensorflow has the following licence:
# ==============================================================================
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
# Futures
from __future__ import print_function
# Built-in/Generic Imports
import os
# Libs
import numpy as np
import re
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow as tf
from six import BytesIO
# Own modules
__author__ = 'Alexander Wendt'
__copyright__ = 'Copyright 2020, Christian Doppler Laboratory for ' \
'Embedded Machine Learning'
__credits__ = ['']
__license__ = 'ISC'
__version__ = '0.2.0'
__maintainer__ = 'Alexander Wendt'
__email__ = 'alexander.wendt@tuwien.ac.at'
__status__ = 'Experiental'
def load_image_into_numpy_array(path):
"""Load an image from file into a numpy array.
Puts image into numpy array to feed into tensorflow graph.
Note that by convention we put it into a numpy array with shape
(height, width, channels), where channels=3 for RGB.
Args:
path: a file path (this can be local or on colossus)
Returns:
uint8 numpy array with shape (img_height, img_width, 3)
"""
img_data = tf.io.gfile.GFile(path, 'rb').read()
image = Image.open(BytesIO(img_data))
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def get_images_name(image_folder):
image_folder = image_folder.replace('\\', '/')
image_names = [f for f in os.listdir(image_folder)
if re.search(r'([a-zA-Z0-9\s_\\.\-\(\):])+(.jpg|.jpeg|.png)$', f)]
return image_names
def show_save_figure(fig, output_dir=None, filename=None, show_image=True):
'''
Show and save an image
:param
output_dir: Directory to put image
filename: Filename to use. No file ending necessary. Png will be used. If None, then image is not saved.
If image filename and outputdir is is set, the image will be saved
show_image: Show image as non blocking. Default: True
'''
if filename:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
fig.savefig(os.path.join(output_dir, filename))
if show_image:
plt.show(block=False)
plt.pause(0.1)
plt.close() | 35.059829 | 115 | 0.684057 | 572 | 4,102 | 4.776224 | 0.472028 | 0.021962 | 0.016105 | 0.02123 | 0.047584 | 0.047584 | 0.047584 | 0.047584 | 0.047584 | 0 | 0 | 0.008603 | 0.178206 | 4,102 | 117 | 116 | 35.059829 | 0.801839 | 0.655046 | 0 | 0 | 0 | 0 | 0.149554 | 0.054315 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.216216 | 0 | 0.351351 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
038616b45c12c7c5aab0395285a81ab4509f1b05 | 2,013 | py | Python | molecule/upgrade/tests/test_default.py | cmu-sei/ansible-role-yaf | 5fed4a5805e0751e37d8b05d459556b9367041e6 | [
"Apache-2.0"
] | null | null | null | molecule/upgrade/tests/test_default.py | cmu-sei/ansible-role-yaf | 5fed4a5805e0751e37d8b05d459556b9367041e6 | [
"Apache-2.0"
] | 1 | 2021-02-22T15:48:44.000Z | 2021-02-22T15:48:44.000Z | molecule/upgrade/tests/test_default.py | cmu-sei/ansible-role-yaf | 5fed4a5805e0751e37d8b05d459556b9367041e6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Carnegie Mellon University.
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING
# INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON
# UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS
# TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE
# OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE
# MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND
# WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
# Released under a MIT (SEI)-style license, please see license.txt or contact
# permission@sei.cmu.edu for full terms.
# [DISTRIBUTION STATEMENT A] This material has been approved for public release
# and unlimited distribution. Please see Copyright notice for non-US
# Government use and distribution.
# CERT is registered in the U.S. Patent and Trademark Office by Carnegie Mellon
# University.
# This Software includes and/or makes use of the following Third-Party Software
# subject to its own license:
# 1. ansible (https://github.com/ansible/ansible/tree/devel/licenses) Copyright
# 2019 Red Hat, Inc.
# 2. molecule
# (https://github.com/ansible-community/molecule/blob/master/LICENSE) Copyright
# 2018 Red Hat, Inc.
# 3. testinfra (https://github.com/philpep/testinfra/blob/master/LICENSE)
# Copyright 2020 Philippe Pepiot.
# DM20-0509
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('svc', [
'yaf'
])
def test_svc(host, svc):
service = host.service(svc)
assert service.is_running
# assert service.is_enabled
def test_yaf_version(host):
version = "2.12.1"
command = """PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig pkg-config \
--modversion libyaf"""
cmd = host.run(command)
assert version in cmd.stdout
| 38.711538 | 87 | 0.762047 | 286 | 2,013 | 5.311189 | 0.548951 | 0.046083 | 0.078999 | 0.02765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016959 | 0.150522 | 2,013 | 51 | 88 | 39.470588 | 0.871345 | 0.690512 | 0 | 0 | 0 | 0 | 0.243289 | 0.134228 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03895f641c1dcc508acc7bfd2ad2e29b29a5ad04 | 18,329 | py | Python | fuel_agent/fuel_agent/tests/test_nailgun.py | Axam/nsx-web | 4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5 | [
"Apache-2.0"
] | 1 | 2021-04-06T16:13:35.000Z | 2021-04-06T16:13:35.000Z | fuel_agent/fuel_agent/tests/test_nailgun.py | Axam/nsx-web | 4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5 | [
"Apache-2.0"
] | null | null | null | fuel_agent/fuel_agent/tests/test_nailgun.py | Axam/nsx-web | 4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent.drivers import nailgun
from fuel_agent import errors
from fuel_agent.utils import hardware_utils as hu
PROVISION_SAMPLE_DATA = {
"profile": "ubuntu_1204_x86_64",
"name_servers_search": "\"domain.tld\"",
"uid": "1",
"interfaces": {
"eth2": {
"static": "0",
"mac_address": "08:00:27:b1:d7:15"
},
"eth1": {
"static": "0",
"mac_address": "08:00:27:46:43:60"
},
"eth0": {
"ip_address": "10.20.0.3",
"dns_name": "node-1.domain.tld",
"netmask": "255.255.255.0",
"static": "0",
"mac_address": "08:00:27:79:da:80"
}
},
"interfaces_extra": {
"eth2": {
"onboot": "no",
"peerdns": "no"
},
"eth1": {
"onboot": "no",
"peerdns": "no"
},
"eth0": {
"onboot": "yes",
"peerdns": "no"
}
},
"power_type": "ssh",
"power_user": "root",
"kernel_options": {
"udevrules": "08:00:27:79:da:80_eth0,08:00:27:46:43:60_eth1,"
"08:00:27:b1:d7:15_eth2",
"netcfg/choose_interface": "08:00:27:79:da:80"
},
"power_address": "10.20.0.253",
"name_servers": "\"10.20.0.2\"",
"ks_meta": {
"timezone": "America/Los_Angeles",
"master_ip": "10.20.0.2",
"mco_enable": 1,
"mco_vhost": "mcollective",
"mco_pskey": "unset",
"mco_user": "mcollective",
"puppet_enable": 0,
"fuel_version": "5.0.1",
"install_log_2_syslog": 1,
"mco_password": "marionette",
"puppet_auto_setup": 1,
"puppet_master": "fuel.domain.tld",
"mco_auto_setup": 1,
"auth_key": "fake_auth_key",
"pm_data": {
"kernel_params": "console=ttyS0,9600 console=tty0 rootdelay=90 "
"nomodeset",
"ks_spaces": [
{
"name": "sda",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-"
"b385c7cd",
"disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/tmp",
"size": 200,
"type": "partition",
"file_system": "ext2",
"partition_guid": "fake_guid",
"name": "TMP"
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 19438,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 45597,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sda",
"size": 65535
},
{
"name": "sdb",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-"
"708af674",
"disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sdb",
"size": 65535
},
{
"name": "sdc",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-"
"84e74fdf",
"disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0",
"size": 65535
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 19374,
"volumes": [
{
"mount": "/",
"size": 15360,
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": 4014,
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "min",
"label": "Zero size volume",
"min_size": 0,
"volumes": [
{
"mount": "none",
"size": 0,
"type": "lv",
"name": "zero_size",
"file_system": "xfs"
}
],
"type": "vg",
"id": "zero_size"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 5120,
"volumes": [
{
"mount": "/var/lib/glance",
"size": 175347,
"type": "lv",
"name": "glance",
"file_system": "xfs"
}
],
"type": "vg",
"id": "image"
}
]
},
"mco_connector": "rabbitmq",
"mco_host": "10.20.0.2"
},
"name": "node-1",
"hostname": "node-1.domain.tld",
"slave_name": "node-1",
"power_pass": "/root/.ssh/bootstrap.rsa",
"netboot_enabled": "1"
}
LIST_BLOCK_DEVICES_SAMPLE = [
{'uspec':
{'DEVLINKS': [
'disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/wwn-fake_wwn_1',
'/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_1',
'ID_WWN': 'fake_wwn_1',
'DEVPATH': '/devices/pci0000:00/0000:00:1f.2/ata1/host0/'
'target0:0:0/0:0:0:0/block/sda',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sda',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sda',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'
},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/wwn-fake_wwn_2'],
'ID_SERIAL_SHORT': 'fake_serial_2',
'ID_WWN': 'fake_wwn_2',
'DEVPATH': '/devices/pci0000:00/0000:00:3f.2/ata2/host0/'
'target0:0:0/0:0:0:0/block/sdb',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdb',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sdb',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/wwn-fake_wwn_3',
'/dev/disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_3',
'ID_WWN': 'fake_wwn_3',
'DEVPATH': '/devices/pci0000:00/0000:00:0d.0/ata4/host0/target0:0:0/'
'0:0:0:0/block/sdc',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdc',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'},
'startsec': '0',
'device': '/dev/sdc',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
]
class TestNailgun(test_base.BaseTestCase):
def setUp(self):
super(TestNailgun, self).setUp()
self.drv = nailgun.Nailgun(PROVISION_SAMPLE_DATA)
def test_match_device_by_id_matches(self):
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_matches",
"disk/by-id/fake_ata_dont_matches"
]
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path"
]
}
}
self.assertTrue(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_id_matches(self):
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_dont_matches",
"disk/by-id/fake_ata_dont_matches"
],
"id": "sdd"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertTrue(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_dont_macthes(self):
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_dont_matches",
"disk/by-id/fake_ata_dont_matches"
],
"id": "sda"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertFalse(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_configdrive_scheme(self):
cd_scheme = self.drv.configdrive_scheme()
self.assertEqual('fake_auth_key', cd_scheme.common.ssh_auth_key)
self.assertEqual('node-1.domain.tld', cd_scheme.common.hostname)
self.assertEqual('node-1.domain.tld', cd_scheme.common.fqdn)
self.assertEqual('node-1.domain.tld', cd_scheme.common.fqdn)
self.assertEqual('"10.20.0.2"', cd_scheme.common.name_servers)
self.assertEqual('"domain.tld"', cd_scheme.common.search_domain)
self.assertEqual('10.20.0.2', cd_scheme.common.master_ip)
self.assertEqual('http://10.20.0.2:8000/api',
cd_scheme.common.master_url)
self.assertEqual('08:00:27:79:da:80_eth0,08:00:27:46:43:60_eth1,'
'08:00:27:b1:d7:15_eth2', cd_scheme.common.udevrules)
self.assertEqual('08:00:27:79:da:80', cd_scheme.common.admin_mac)
self.assertEqual('10.20.0.3', cd_scheme.common.admin_ip)
self.assertEqual('255.255.255.0', cd_scheme.common.admin_mask)
self.assertEqual('eth0', cd_scheme.common.admin_iface_name)
self.assertEqual('America/Los_Angeles', cd_scheme.common.timezone)
self.assertEqual('fuel.domain.tld', cd_scheme.puppet.master)
self.assertEqual('unset', cd_scheme.mcollective.pskey)
self.assertEqual('mcollective', cd_scheme.mcollective.vhost)
self.assertEqual('10.20.0.2', cd_scheme.mcollective.host)
self.assertEqual('mcollective', cd_scheme.mcollective.user)
self.assertEqual('marionette', cd_scheme.mcollective.password)
self.assertEqual('rabbitmq', cd_scheme.mcollective.connector)
self.assertEqual('ubuntu', cd_scheme.profile)
@mock.patch.object(hu, 'list_block_devices')
def test_partition_scheme(self, mock_lbd):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_scheme = self.drv.partition_scheme()
self.assertEqual(5, len(p_scheme.fss))
self.assertEqual(4, len(p_scheme.pvs))
self.assertEqual(3, len(p_scheme.lvs))
self.assertEqual(2, len(p_scheme.vgs))
self.assertEqual(1, len(p_scheme.mds))
self.assertEqual(3, len(p_scheme.parteds))
@mock.patch.object(hu, 'list_block_devices')
def test_image_scheme(self, mock_lbd):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_scheme = self.drv.partition_scheme()
i_scheme = self.drv.image_scheme(p_scheme)
self.assertEqual(1, len(i_scheme.images))
img = i_scheme.images[0]
self.assertEqual('gzip', img.container)
self.assertEqual('ext4', img.image_format)
self.assertEqual('/dev/mapper/os-root', img.target_device)
self.assertEqual(
'http://%s/targetimages/%s.img.gz' % (
self.drv.data['ks_meta']['master_ip'],
self.drv.data['profile'].split('_')[0]),
img.uri)
self.assertEqual(None, img.size)
def test_getlabel(self):
self.assertEqual('', self.drv._getlabel(None))
long_label = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.assertEqual(' -L %s ' % long_label[:12],
self.drv._getlabel(long_label))
@mock.patch.object(hu, 'list_block_devices')
def test_disk_dev_not_found(self, mock_lbd):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
fake_ks_disk = {
"name": "fake",
"extra": [
"disk/by-id/fake_scsi_matches",
"disk/by-id/fake_ata_dont_matches"
]
}
self.assertRaises(errors.DiskNotFoundError, self.drv._disk_dev,
fake_ks_disk)
| 37.482618 | 78 | 0.420809 | 1,720 | 18,329 | 4.280233 | 0.207558 | 0.07335 | 0.028253 | 0.00652 | 0.530834 | 0.513583 | 0.472698 | 0.441592 | 0.397039 | 0.320565 | 0 | 0.08067 | 0.43663 | 18,329 | 488 | 79 | 37.559426 | 0.632287 | 0.030007 | 0 | 0.397826 | 0 | 0.013043 | 0.278807 | 0.095131 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.019565 | false | 0.006522 | 0.01087 | 0 | 0.032609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
038c2d87bdf66ab34919ec05d38821c57168aa89 | 3,100 | py | Python | examples/proxy/broadcast.py | KangFengjian/maro | 2694a75731d5174ba5b33780670ba38d776d8c5a | [
"MIT"
] | 1 | 2021-04-16T14:53:47.000Z | 2021-04-16T14:53:47.000Z | examples/proxy/broadcast.py | KangFengjian/maro | 2694a75731d5174ba5b33780670ba38d776d8c5a | [
"MIT"
] | 2 | 2020-12-15T09:13:43.000Z | 2020-12-16T08:02:41.000Z | examples/proxy/broadcast.py | KangFengjian/maro | 2694a75731d5174ba5b33780670ba38d776d8c5a | [
"MIT"
] | 1 | 2021-10-01T09:17:43.000Z | 2021-10-01T09:17:43.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import multiprocessing as mp
from maro.communication import Proxy, SessionType
def worker(group_name):
"""
The main worker logic includes initialize proxy and handle jobs from the master.
Args:
group_name (str): Identifier for the group of all communication components.
"""
proxy = Proxy(group_name=group_name,
component_type="worker",
expected_peers={"master": 1})
counter = 0
print(f"{proxy.component_name}'s counter is {counter}.")
# nonrecurring receive the message from the proxy.
for msg in proxy.receive(is_continuous=False):
print(f"{proxy.component_name} receive message from {msg.source}.")
if msg.tag == "INC":
counter += 1
print(f"{proxy.component_name} receive INC request, {proxy.component_name}'s count is {counter}.")
proxy.reply(received_message=msg, tag="done")
def master(group_name: str, worker_num: int, is_immediate: bool = False):
"""
The main master logic includes initialize proxy and allocate jobs to workers.
Args:
group_name (str): Identifier for the group of all communication components,
worker_num (int): The number of workers,
is_immediate (bool): If True, it will be an async mode; otherwise, it will be an sync mode.
Async Mode: The proxy only returns the session id for sending messages. Based on the local task priority,
you can do something with high priority before receiving replied messages from peers.
Sync Mode: It will block until the proxy returns all the replied messages.
"""
proxy = Proxy(group_name=group_name,
component_type="master",
expected_peers={"worker": worker_num})
if is_immediate:
session_ids = proxy.ibroadcast(tag="INC",
session_type=SessionType.NOTIFICATION)
# do some tasks with higher priority here.
replied_msgs = proxy.receive_by_id(session_ids)
else:
replied_msgs = proxy.broadcast(tag="INC",
session_type=SessionType.NOTIFICATION)
for msg in replied_msgs:
print(f"{proxy.component_name} get receive notification from {msg.source} with message session stage " +
f"{msg.session_stage}.")
if __name__ == "__main__":
"""
This is a single-host multiprocess program used to simulate the communication in the distributed system.
For the completed usage experience of the distributed cluster, please use the MARO CLI.
"""
mp.set_start_method("spawn")
group_name = "proxy_broadcast_INC_example"
worker_number = 5
is_immediate = True
workers = mp.Pool(worker_number)
master_process = mp.Process(target=master, args=(group_name, worker_number, is_immediate,))
master_process.start()
workers.map(worker, [group_name] * worker_number)
workers.close()
master_process.join()
workers.join()
| 36.904762 | 117 | 0.662903 | 392 | 3,100 | 5.089286 | 0.369898 | 0.049624 | 0.045113 | 0.0401 | 0.232581 | 0.177444 | 0.106266 | 0.106266 | 0.065163 | 0.065163 | 0 | 0.001729 | 0.253871 | 3,100 | 83 | 118 | 37.349398 | 0.860787 | 0.301935 | 0 | 0.1 | 0 | 0 | 0.202444 | 0.07492 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
038e6d50ab6c21396044a16b0e1fba76ef2bb5a3 | 5,045 | py | Python | bacpypes/bvll/bip_simple.py | cbergmiller/bacpypes | 7b1f2e989787c2c1f807680fee5ee7a71b3689ab | [
"MIT"
] | 1 | 2018-01-11T13:10:15.000Z | 2018-01-11T13:10:15.000Z | bacpypes/bvll/bip_simple.py | cbergmiller/bacpypes | 7b1f2e989787c2c1f807680fee5ee7a71b3689ab | [
"MIT"
] | null | null | null | bacpypes/bvll/bip_simple.py | cbergmiller/bacpypes | 7b1f2e989787c2c1f807680fee5ee7a71b3689ab | [
"MIT"
] | null | null | null | import logging
from ..comm import Client, Server
from ..link import Address, LocalBroadcast, PDU
from .bvlpdu import ForwardedNPDU, OriginalBroadcastNPDU, OriginalUnicastNPDU, ReadBroadcastDistributionTableAck, \
ReadForeignDeviceTableAck, Result, WriteBroadcastDistributionTable, ReadBroadcastDistributionTable, \
RegisterForeignDevice, ReadForeignDeviceTable, DeleteForeignDeviceTableEntry, DistributeBroadcastToNetwork
from .bip_sap import BIPSAP
DEBUG = False
_logger = logging.getLogger(__name__)
__all__ = ['BIPSimple']
class BIPSimple(BIPSAP, Client, Server):
def __init__(self, sapID=None, cid=None, sid=None):
"""A BIP node."""
if DEBUG: _logger.debug('__init__ sapID=%r cid=%r sid=%r', sapID, cid, sid)
BIPSAP.__init__(self, sapID)
Client.__init__(self, cid)
Server.__init__(self, sid)
def indication(self, pdu):
if DEBUG: _logger.debug('indication %r', pdu)
# check for local stations
if pdu.pduDestination.addrType == Address.localStationAddr:
# make an original unicast PDU
xpdu = OriginalUnicastNPDU(pdu, destination=pdu.pduDestination, user_data=pdu.pduUserData)
if DEBUG: _logger.debug(' - xpdu: %r', xpdu)
# send it downstream
self.request(xpdu)
# check for broadcasts
elif pdu.pduDestination.addrType == Address.localBroadcastAddr:
# make an original broadcast PDU
xpdu = OriginalBroadcastNPDU(pdu, destination=pdu.pduDestination, user_data=pdu.pduUserData)
if DEBUG: _logger.debug(' - xpdu: %r', xpdu)
# send it downstream
self.request(xpdu)
else:
_logger.warning('invalid destination address: %r', pdu.pduDestination)
def confirmation(self, pdu):
if DEBUG: _logger.debug('confirmation %r', pdu)
# some kind of response to a request
if isinstance(pdu, Result):
# send this to the service access point
self.sap_response(pdu)
elif isinstance(pdu, ReadBroadcastDistributionTableAck):
# send this to the service access point
self.sap_response(pdu)
elif isinstance(pdu, ReadForeignDeviceTableAck):
# send this to the service access point
self.sap_response(pdu)
elif isinstance(pdu, OriginalUnicastNPDU):
# build a vanilla PDU
xpdu = PDU(pdu.pduData, source=pdu.pduSource, destination=pdu.pduDestination, user_data=pdu.pduUserData)
if DEBUG: _logger.debug(' - xpdu: %r', xpdu)
# send it upstream
self.response(xpdu)
elif isinstance(pdu, OriginalBroadcastNPDU):
# build a PDU with a local broadcast address
xpdu = PDU(pdu.pduData, source=pdu.pduSource, destination=LocalBroadcast(), user_data=pdu.pduUserData)
if DEBUG: _logger.debug(' - xpdu: %r', xpdu)
# send it upstream
self.response(xpdu)
elif isinstance(pdu, ForwardedNPDU):
# build a PDU with the source from the real source
xpdu = PDU(pdu.pduData, source=pdu.bvlciAddress, destination=LocalBroadcast(), user_data=pdu.pduUserData)
if DEBUG: _logger.debug(' - xpdu: %r', xpdu)
# send it upstream
self.response(xpdu)
elif isinstance(pdu, WriteBroadcastDistributionTable):
# build a response
xpdu = Result(code=0x0010, user_data=pdu.pduUserData)
xpdu.pduDestination = pdu.pduSource
# send it downstream
self.request(xpdu)
elif isinstance(pdu, ReadBroadcastDistributionTable):
# build a response
xpdu = Result(code=0x0020, user_data=pdu.pduUserData)
xpdu.pduDestination = pdu.pduSource
# send it downstream
self.request(xpdu)
elif isinstance(pdu, RegisterForeignDevice):
# build a response
xpdu = Result(code=0x0030, user_data=pdu.pduUserData)
xpdu.pduDestination = pdu.pduSource
# send it downstream
self.request(xpdu)
elif isinstance(pdu, ReadForeignDeviceTable):
# build a response
xpdu = Result(code=0x0040, user_data=pdu.pduUserData)
xpdu.pduDestination = pdu.pduSource
# send it downstream
self.request(xpdu)
elif isinstance(pdu, DeleteForeignDeviceTableEntry):
# build a response
xpdu = Result(code=0x0050, user_data=pdu.pduUserData)
xpdu.pduDestination = pdu.pduSource
# send it downstream
self.request(xpdu)
elif isinstance(pdu, DistributeBroadcastToNetwork):
# build a response
xpdu = Result(code=0x0060, user_data=pdu.pduUserData)
xpdu.pduDestination = pdu.pduSource
# send it downstream
self.request(xpdu)
else:
_logger.warning('invalid pdu type: %s', type(pdu))
| 45.863636 | 117 | 0.636075 | 514 | 5,045 | 6.138132 | 0.200389 | 0.049445 | 0.038352 | 0.076704 | 0.548653 | 0.548653 | 0.471315 | 0.471315 | 0.449128 | 0.449128 | 0 | 0.008258 | 0.279881 | 5,045 | 109 | 118 | 46.284404 | 0.860171 | 0.13558 | 0 | 0.364865 | 0 | 0 | 0.043669 | 0 | 0 | 0 | 0.008318 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.067568 | 0 | 0.121622 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
038f505b334cb3a60e56c15adf216d9924d30d0c | 4,721 | py | Python | pytint/interpreters.py | semicolonTransistor/PyTint | 0f70fe756c285cda38b3a91318af02382a505263 | [
"MIT"
] | 1 | 2020-08-14T19:41:45.000Z | 2020-08-14T19:41:45.000Z | pytint/interpreters.py | semicolonTransistor/PyTint | 0f70fe756c285cda38b3a91318af02382a505263 | [
"MIT"
] | null | null | null | pytint/interpreters.py | semicolonTransistor/PyTint | 0f70fe756c285cda38b3a91318af02382a505263 | [
"MIT"
] | null | null | null | from typing import List, Deque, Dict, Set, Iterable
Symbol = str
State = str
class FAConfiguration:
def __init__(self, tape: List[Symbol], state: State, symbol_read: Symbol, prev_index: int, epsilon_set=None):
if epsilon_set is None:
epsilon_set = set()
self.tape = tape
self.state = state
self.prev_index = prev_index
self.epsilon_set = epsilon_set
self.symbol_read = symbol_read
FAConfigurations = List[FAConfiguration]
FAComputationHistory = List[FAConfigurations]
FATransitions = Dict[State, Dict[Symbol, Set[State]]]
class FiniteAutomaton:
def __init__(self, name="FA"):
self.transitions: FATransitions = dict()
self.start_state: State = ""
self.accept_states: Set[State] = set()
self.current_configurations: FAConfigurations = list()
self.computation_history: FAComputationHistory = list()
self.complete: bool = False
self.accepted: bool = False
self.name: str = name
def add_transition(self, state: State, symbol: Symbol, next_state: State):
if state not in self.transitions:
self.transitions[state] = dict()
if symbol not in self.transitions[state]:
self.transitions[state][symbol] = set()
self.transitions[state][symbol].add(next_state)
def set_start_state(self, state: State):
self.start_state = state
def add_accepting_state(self, state: State):
self.accept_states.add(state)
def start_new_computation(self, tape: Iterable[Symbol]):
# reset internal states and prepare for new computation
self.current_configurations = list()
self.complete = False
self.accepted = False
self.computation_history = list()
# set up the starting state
self.current_configurations.append(FAConfiguration(list(tape), self.start_state, "", -1))
self.computation_history.append(self.current_configurations)
def simulate_one_step(self) -> bool:
# do not execute if computation is already completed
if self.complete:
return True
new_configurations: FAConfigurations = list()
for index in range(len(self.current_configurations)):
this_config = self.current_configurations[index]
# terminate computation if an accepting state has been reached with an empty tape
if this_config.state in self.accept_states and not this_config.tape:
self.complete = True
self.accepted = True
return True
# check if there is a entry for the state in the transition function
if this_config.state not in self.transitions:
# skip this config since there is no transition function out of this state.
continue
# check for epsilon transitions
# marked with lower case epsilon(unicode code point 03F5)
if "ϵ" in self.transitions[this_config.state]:
for next_state in self.transitions[this_config.state]["ϵ"]:
if next_state not in this_config.epsilon_set:
epsilon_set = this_config.epsilon_set.copy()
epsilon_set.add(next_state)
new_configurations.append(FAConfiguration(this_config.tape,
next_state,
"ϵ",
index,
epsilon_set))
# explore non-epsilon transitions if input tape is not empty
if this_config.tape:
new_tape = this_config.tape[1:]
symbol = this_config.tape[0]
# check if a transition function exists for the current state and symbol
if symbol in self.transitions[this_config.state]:
for next_state in self.transitions[this_config.state][symbol]:
new_configurations.append(FAConfiguration(new_tape, next_state, symbol, index))
# if no new states are possible and no accepting state has been reached,
# end computation and mark non-acceptance
if not new_configurations:
self.complete = True
self.accepted = False
return True
else:
self.computation_history.append(new_configurations)
self.current_configurations = new_configurations
def simulate_util_completion(self):
while not self.simulate_one_step():
pass
| 38.696721 | 113 | 0.606863 | 523 | 4,721 | 5.3174 | 0.219885 | 0.053937 | 0.062927 | 0.030205 | 0.143114 | 0.054657 | 0.054657 | 0.054657 | 0.054657 | 0.054657 | 0 | 0.001881 | 0.324296 | 4,721 | 121 | 114 | 39.016529 | 0.869906 | 0.143614 | 0 | 0.0875 | 0 | 0 | 0.001242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.0125 | 0.0125 | 0 | 0.175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
039430910e7060e427d541a78522663bedae56d1 | 4,342 | py | Python | lejian/apis/user.py | PuZheng/LEJAIN-backend | 1647b63cb409842566f3d2cd9771f8b8856c1a03 | [
"MIT"
] | null | null | null | lejian/apis/user.py | PuZheng/LEJAIN-backend | 1647b63cb409842566f3d2cd9771f8b8856c1a03 | [
"MIT"
] | 13 | 2015-10-23T04:43:51.000Z | 2015-12-19T14:30:33.000Z | lejian/apis/user.py | PuZheng/lejian-backend | 1647b63cb409842566f3d2cd9771f8b8856c1a03 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import os.path
from flask import _request_ctx_stack, current_app, request, url_for
from flask.ext import login
from flask.ext.principal import identity_changed, Identity, AnonymousIdentity
from itsdangerous import URLSafeTimedSerializer, BadTimeSignature
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.security import check_password_hash
from genuine_ap import const
from genuine_ap.basemain import app
from genuine_ap.apis import ModelWrapper, wraps
from genuine_ap.exceptions import AuthenticateFailure
from genuine_ap.models import User
class UserWrapper(login.UserMixin, ModelWrapper):
"""
a wrapper of the actual user model
"""
__serializer__ = URLSafeTimedSerializer(
secret_key=app.config.get('SECRET_KEY'),
salt=app.config.get('SECURITY_SALT'))
@property
def permissions(self):
ret = set()
for group in self.groups:
for perm in group.permissions:
ret.add(perm)
return ret
@property
def auth_token(self):
'''
get the authentiaction token, see
`https://flask-login.readthedocs.org/en/latest/#flask.ext.login.
LoginManager.token_loader`_
'''
return self.__serializer__.dumps([self.id, self.name,
self.password])
@property
def pic_url(self):
user_pic = os.path.join('user_pics', str(self.id) + '.jpg')
if os.path.exists(os.path.join('static', user_pic)):
return url_for('static', filename=user_pic)
return ''
@property
def small_pic_url(self):
user_pic = os.path.join('user_pics', str(self.id) + '_small.jpg')
if os.path.exists(os.path.join('static', user_pic)):
return url_for('static', filename=user_pic)
return ''
@property
def default_url(self):
from genuine_ap.vendor import vendor_model_view
from genuine_ap.retailer import retailer_model_view
if self.group_id == const.VENDOR_GROUP:
if self.vendor:
return vendor_model_view.url_for_object(self.vendor)
else:
return url_for('no_vendor')
if self.group_id == const.RETAILER_GROUP:
if self.retailer:
return retailer_model_view.url_for_object(self.retailer)
else:
return url_for('no_retailer')
return self.group.default_url
def as_dict(self, include_auth_token=False):
ret = {
'id': self.id,
'name': self.name,
'group': self.group.as_dict(),
'create_time': self.create_time.strftime('%Y-%m-%d'),
'pic_url': self.pic_url,
'small_pic_url': self.small_pic_url,
}
if include_auth_token:
ret['auth_token'] = self.auth_token
return ret
class GroupWrapper(ModelWrapper):
def as_dict(self):
return {
'id': self.id,
'name': self.name,
}
def get_user(id_):
if not id_:
return None
# TODO 这里需要优化
try:
return wraps(User.query.filter(User.id == id_).one())
except NoResultFound:
return None
def load_user_from_token():
ctx = _request_ctx_stack.top
token = request.args.get('auth_token')
identity = AnonymousIdentity()
if token is None:
ctx.user = current_app.login_manager.anonymous_user()
else:
try:
ctx.user = get_user(UserWrapper.__serializer__.loads(token)[0])
identity = Identity(ctx.user.id)
# change identity to reset permissions
except BadTimeSignature:
ctx.user = current_app.login_manager.anonymous_user()
identity_changed.send(current_app._get_current_object(), identity=identity)
def authenticate(name, password):
"""
authenticate a user, test if name and password mathing
:return: an authenticated User or None if can't authenticated
:rtype: User
:raise: exceptions.AuthenticateFailure
"""
try:
user = User.query.filter(User.name == name).one()
if check_password_hash(user.password, password):
return UserWrapper(user)
raise AuthenticateFailure("用户名或者密码错误")
except NoResultFound:
raise AuthenticateFailure("用户名或者密码错误")
| 31.23741 | 79 | 0.638415 | 524 | 4,342 | 5.078244 | 0.278626 | 0.015784 | 0.034198 | 0.010522 | 0.19316 | 0.166103 | 0.132281 | 0.132281 | 0.100714 | 0.100714 | 0 | 0.000626 | 0.264164 | 4,342 | 138 | 80 | 31.463768 | 0.832238 | 0.098342 | 0 | 0.31 | 0 | 0 | 0.049948 | 0 | 0 | 0 | 0 | 0.007246 | 0 | 1 | 0.1 | false | 0.04 | 0.14 | 0.01 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03970b2533b2ca57d1a6281354a0bf6b80e84225 | 2,324 | py | Python | tests/fs/test_base.py | mikiec84/mrjob | 801fffffdc6af860edd7813c948f9da341305b21 | [
"Apache-2.0"
] | null | null | null | tests/fs/test_base.py | mikiec84/mrjob | 801fffffdc6af860edd7813c948f9da341305b21 | [
"Apache-2.0"
] | null | null | null | tests/fs/test_base.py | mikiec84/mrjob | 801fffffdc6af860edd7813c948f9da341305b21 | [
"Apache-2.0"
] | null | null | null | # Copyright 2009-2015 Yelp and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from unittest import TestCase
from mrjob.fs.base import Filesystem
from tests.py2 import Mock
from tests.py2 import patch
from tests.quiet import no_handlers_for_logger
from tests.sandbox import SandboxedTestCase
class CatTestCase(SandboxedTestCase):
def test_multiple_files(self):
fs = Filesystem()
fs.ls = Mock(return_value=['path1', 'path2', 'path3'])
fs._cat_file = Mock(return_value=[b'chunk1\n', b'chunk2'])
chunks = list(fs.cat('whatever'))
self.assertEqual(
chunks,
[b'chunk1\n', b'chunk2', b'',
b'chunk1\n', b'chunk2', b'',
b'chunk1\n', b'chunk2'])
class JoinTestCase(SandboxedTestCase):
def setUp(self):
super(JoinTestCase, self).setUp()
self.fs = Filesystem()
def test_local_paths(self):
self.assertEqual(self.fs.join('foo', 'bar'),
'foo%sbar' % os.path.sep)
self.assertEqual(self.fs.join('foo', '%sbar' % os.path.sep),
'%sbar' % os.path.sep)
self.assertEqual(self.fs.join('foo', 'bar', 'baz'),
'foo%sbar%sbaz' % (os.path.sep, os.path.sep))
def test_path_onto_uri(self):
self.assertEqual(self.fs.join('hdfs://host', 'path'),
'hdfs://host/path')
def test_uri_onto_anything(self):
self.assertEqual(self.fs.join('hdfs://host', 'hdfs://host2/path'),
'hdfs://host2/path')
self.assertEqual(self.fs.join('/', 'hdfs://host2/path'),
'hdfs://host2/path')
self.assertEqual(self.fs.join('/', 'hdfs://host2/path', 'subdir'),
'hdfs://host2/path/subdir')
| 34.176471 | 74 | 0.61833 | 303 | 2,324 | 4.686469 | 0.412541 | 0.038028 | 0.093662 | 0.103521 | 0.272535 | 0.250704 | 0.247887 | 0.223944 | 0.171831 | 0.171831 | 0 | 0.017674 | 0.245267 | 2,324 | 67 | 75 | 34.686567 | 0.791904 | 0.240534 | 0 | 0.051282 | 0 | 0 | 0.163906 | 0.013706 | 0 | 0 | 0 | 0 | 0.205128 | 1 | 0.128205 | false | 0 | 0.179487 | 0 | 0.358974 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0397f4e8843e1786622b1eaaa945cb10043ae58c | 931 | py | Python | WebODM-master/app/templatetags/plugins.py | abhinavsri000/UAVision | 895fd883c1f5f492b4dcb573568b60ef03132cf6 | [
"MIT"
] | null | null | null | WebODM-master/app/templatetags/plugins.py | abhinavsri000/UAVision | 895fd883c1f5f492b4dcb573568b60ef03132cf6 | [
"MIT"
] | 8 | 2019-11-24T14:15:19.000Z | 2020-04-19T09:06:04.000Z | WebODM-master/app/templatetags/plugins.py | abhinavsri000/UAVision | 895fd883c1f5f492b4dcb573568b60ef03132cf6 | [
"MIT"
] | null | null | null | from django import template
from app.plugins import get_active_plugins
import itertools
register = template.Library()
@register.simple_tag(takes_context=False)
def get_plugins_js_includes():
# Flatten all urls for all plugins
js_urls = list(itertools.chain(*[plugin.get_include_js_urls() for plugin in get_active_plugins()]))
return "\n".join(map(lambda url: "<script src='{}'></script>".format(url), js_urls))
@register.simple_tag(takes_context=False)
def get_plugins_css_includes():
# Flatten all urls for all plugins
css_urls = list(itertools.chain(*[plugin.get_include_css_urls() for plugin in get_active_plugins()]))
return "\n".join(map(lambda url: "<link href='{}' rel='stylesheet' type='text/css'>".format(url), css_urls))
@register.simple_tag()
def get_plugins_main_menus():
# Flatten list of menus
return list(itertools.chain(*[plugin.main_menu() for plugin in get_active_plugins()]))
| 40.478261 | 112 | 0.745435 | 137 | 931 | 4.824818 | 0.350365 | 0.054463 | 0.096823 | 0.108926 | 0.567322 | 0.567322 | 0.526475 | 0.305598 | 0.305598 | 0.163389 | 0 | 0 | 0.119227 | 931 | 22 | 113 | 42.318182 | 0.806098 | 0.093448 | 0 | 0.133333 | 0 | 0 | 0.094048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0.066667 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0399f8d11a55a533cbe1bcfb45223a3acc4ce972 | 4,132 | py | Python | omh_nbi/handlers/uni_set.py | BroadbandForum/obbaa-vomci | 949ad64e1b0521a892f223b7cf97e4e792b2ea1f | [
"Apache-2.0"
] | null | null | null | omh_nbi/handlers/uni_set.py | BroadbandForum/obbaa-vomci | 949ad64e1b0521a892f223b7cf97e4e792b2ea1f | [
"Apache-2.0"
] | null | null | null | omh_nbi/handlers/uni_set.py | BroadbandForum/obbaa-vomci | 949ad64e1b0521a892f223b7cf97e4e792b2ea1f | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Broadband Forum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# UNI handler. Provisions per-ONU-UNI MEs
#
# Created by I.Ternovsky (Broadcom) on 17 July 2020
#
""" Set up UNI port handler.
This handler is triggered by creation of YANG object representing ONU UNI.
It performs the following sequence:
- Enable UNI
- Create MAC Bridge Port Config Data
- Create EXT_VLAN_TAG_OPER_CFG
- Set EXT_VLAN_TAG_OPER_CFG
- Create Multicast Operations Profile
- Create Multicast Subscriber Config Info
"""
from database.omci_me_types import *
from database.omci_me import ME
from encode_decode.omci_action_set import SetAction
from encode_decode.omci_action_create import CreateAction
from omh_nbi.onu_driver import OnuDriver
from omh_nbi.omh_handler import OmhHandler, OMHStatus
from omci_logger import OmciLogger
from .omh_handler_utils import get_uni, create_mac_bridge_port, create_ext_vlan_tag_oper_config_data
import copy
logger = OmciLogger.getLogger(__name__)
class UniSetHandler(OmhHandler):
def __init__(self, onu: 'OnuDriver', uni_name: str, uni_id: int):
""" Set UNI port.
Args:
uni_name : UNI interface name in the YANG model
uni_id: 0-based UNI index. Usually it is determined by parent-rel-pos attribute in the
parent hardware component. Note that unlike parent-rel-pos, uni_id is 0-based.
The corresponding ME of type PPTP_Eth_UNI=11, VEIP_InterfacePoint=329 must exist
in the ONU MIB.
Returns:
handler completion status
"""
super().__init__(name='set_uni', onu = onu, description='set_uni: {}.{}-{}'.format(onu.onu_id, uni_id, uni_name))
self._uni_name = uni_name
self._uni_id = uni_id
def _enable_uni(self, uni: ME) -> OMHStatus:
"""Enable UNI"""
if uni.admin_state == 'UNLOCK':
return OMHStatus.OK
uni = copy.deepcopy(uni)
uni.admin_state = 'UNLOCK'
return self.transaction(SetAction(self, uni, ('admin_state',)))
def run_to_completion(self) -> OMHStatus:
logger.info(self.info())
# Do nothing if UNI is already in the local MIB
if self._onu.get_by_name(self._uni_name) is not None:
logger.info('{} - Already configured'.format(self.info()))
return OMHStatus.OK
#
# At this point ONU MIB is freshly populated. Start the initial provisioning
#
uni_me = get_uni(self._onu, self._uni_id)
if uni_me is None:
return self.logerr_and_return(OMHStatus.ERROR_IN_PARAMETERS,
'UNI {} is not found'.format(self._uni_id))
# 2. Enable UNI
logger.info('{} - Enable'.format(self.info()))
status = self._enable_uni(uni_me)
if status != OMHStatus.OK:
return status
# 3. Create MAC Bridge Port Config Data ME
logger.info('{} - Create MAC Bridge Port'.format(self.info()))
status = create_mac_bridge_port(self, uni_me)
if status != OMHStatus.OK:
return status
# 4. Create EXT_VLAN_TAG_OPER_CFG
logger.info('{} - Create EXT VLAN Tagging Oper Config Data'.format(self.info()))
status = create_ext_vlan_tag_oper_config_data(self, uni_me)
if status != OMHStatus.OK:
return status
uni_me.user_name = self._uni_name
self._onu.set(uni_me)
# XXX TODO
# - Create Multicast Operations Profile
# - Create Multicast Subscriber Config Info
return OMHStatus.OK
| 37.225225 | 121 | 0.669409 | 569 | 4,132 | 4.662566 | 0.347979 | 0.026385 | 0.02827 | 0.035809 | 0.227667 | 0.15605 | 0.116849 | 0.094233 | 0.080663 | 0 | 0 | 0.007747 | 0.250242 | 4,132 | 110 | 122 | 37.563636 | 0.848612 | 0.413843 | 0 | 0.2 | 0 | 0 | 0.078867 | 0 | 0 | 0 | 0 | 0.009091 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
039bf52827205dd5799cf017fb621c4a333c409f | 4,269 | py | Python | application/dash_application/views/runreport.py | oicr-gsi/dashi | 34bf96a7d447095df525df3ad27dbe10f4e3dde0 | [
"MIT"
] | 2 | 2020-03-27T21:20:20.000Z | 2022-02-09T07:42:51.000Z | application/dash_application/views/runreport.py | oicr-gsi/dashi | 34bf96a7d447095df525df3ad27dbe10f4e3dde0 | [
"MIT"
] | 89 | 2019-06-06T18:31:19.000Z | 2022-03-07T20:38:36.000Z | application/dash_application/views/runreport.py | oicr-gsi/dashi | 34bf96a7d447095df525df3ad27dbe10f4e3dde0 | [
"MIT"
] | 2 | 2019-06-06T17:50:53.000Z | 2019-06-06T17:52:32.000Z | import dash_html_components as html
import dash_core_components as core
from dash.dependencies import Input, Output
from ..dash_id import init_ids
import json
import pandas
import numpy
import plotly.figure_factory as ff
import gsiqcetl.load
from gsiqcetl.runreport.constants import CacheSchema
page_name = "runreport/proj_hist"
ids = init_ids([
"project",
"focused_run",
"coverage_dist",
"click-data"
])
idx = pandas.IndexSlice
rr = gsiqcetl.load.runreport(CacheSchema.v1)
rr_col = gsiqcetl.load.runreport_columns(CacheSchema.v1)
COL_PROJECT = "Project"
rr[COL_PROJECT] = rr[rr_col.Library].apply(lambda x: x.split("_", 1)[0])
rr.set_index([COL_PROJECT, rr_col.Run], inplace=True)
rr.sort_values([rr_col.Run, COL_PROJECT], ascending=False, inplace=True)
# Count how many runs per project
runs_per_proj_count = rr.reset_index().groupby(COL_PROJECT)[rr_col.Run].nunique()
proj_with_multi_run = runs_per_proj_count[runs_per_proj_count > 1].index
# Only display project that have more than one run
rr = rr.loc[idx[proj_with_multi_run, :], :]
rr = rr.groupby([COL_PROJECT, rr_col.Run]).filter(lambda x: len(x) > 1)
proj_list = list(rr.index.get_level_values(COL_PROJECT).unique())
proj_top = proj_list[0]
proj_list.sort()
run_list = list(rr.loc[idx[proj_top, :], :].index.get_level_values(rr_col.Run).unique())
layout = html.Div(
[
core.Dropdown(
id=ids["project"],
options=[{"label": v, "value": v} for v in proj_list],
value=proj_top,
clearable=False,
),
core.Dropdown(id=ids["focused_run"], clearable=False),
# core.Graph(
# id='coverage_hist'
# ),
core.Graph(id=ids["coverage_dist"]),
html.Pre(id=ids["click-data"]),
]
)
def init_callbacks(dash_app):
# When a project is selected,
# show only runs where the project is found
@dash_app.callback(
Output(ids["focused_run"], "options"),
[
Input(ids["project"], "value")
]
)
@dash_app.server.cache.memoize(timeout=60)
def set_focused_run_based_on_project(project):
runs = rr.loc[idx[project, :], :].index.get_level_values(rr_col.Run).unique()
return [{"label": v, "value": v} for v in runs]
# When a project is selected
# Set the newest run as the default selection
@dash_app.callback(
Output(ids["focused_run"], "value"),
[
Input(ids["project"], "value")
]
)
def set_focused_run_default_value_when_options_change(project):
runs = rr.loc[idx[project, :], :].index.get_level_values("Run").unique()
return list(runs)[0]
@dash_app.callback(
Output(ids["coverage_dist"], "figure"),
[
Input(ids["project"], "value"),
Input(ids["focused_run"], "value")
],
)
@dash_app.server.cache.memoize(timeout=60)
def create_coverage_dist(project, run_to_focus):
highlight = rr.loc[idx[project, run_to_focus], rr_col.Coverage]
other_runs = rr.index.get_level_values(rr_col.Run).difference(
highlight.index.get_level_values(rr_col.Run)
)
other_runs_data = rr.loc[idx[project, other_runs], rr_col.Coverage]
if len(other_runs_data.unique()) < 2:
return []
try:
if len(other_runs_data) > 0:
return ff.create_distplot(
[list(highlight), list(other_runs_data)],
["Selected Run", "All Other Runs"],
)
else:
return ff.create_distplot([list(highlight)], ["Selected Run"])
# Thrown if all data points have the same value
except numpy.linalg.linalg.LinAlgError:
return ff.create_distplot([list(other_runs_data)], ["All Other Run"])
# If data set only has one value
except ValueError:
return ff.create_distplot([list(other_runs_data)], ["All Other Run"])
@dash_app.callback(
Output(ids["click-data"], "children"),
[
Input(ids["coverage_dist"], "clickData")
],
)
@dash_app.server.cache.memoize(timeout=60)
def display_click_data(clickData):
return json.dumps(clickData, indent=2)
| 31.160584 | 88 | 0.635278 | 570 | 4,269 | 4.536842 | 0.249123 | 0.025135 | 0.024749 | 0.044084 | 0.311292 | 0.24594 | 0.199536 | 0.138438 | 0.105955 | 0.073473 | 0 | 0.005197 | 0.233778 | 4,269 | 136 | 89 | 31.389706 | 0.785387 | 0.078707 | 0 | 0.127451 | 0 | 0 | 0.086968 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04902 | false | 0 | 0.098039 | 0.009804 | 0.22549 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
039d26f5b0893b66b5a9af6d7de3662450287ccc | 4,570 | py | Python | app.py | learnTech/tass-calendar | c0f130438d594b5856e9f24917d864a1fa5dfeda | [
"MIT"
] | null | null | null | app.py | learnTech/tass-calendar | c0f130438d594b5856e9f24917d864a1fa5dfeda | [
"MIT"
] | null | null | null | app.py | learnTech/tass-calendar | c0f130438d594b5856e9f24917d864a1fa5dfeda | [
"MIT"
] | null | null | null | from flask import Flask, escape, request, make_response
import os
import requests
from Crypto.Cipher import AES
import urllib
import base64
from datetime import datetime, timedelta
# These variables should be configured as per your own parameters for each application in your API Gateway Maintanence portal.
# They can be configured using the .env file
# Token as generated by API Gateway
tokenKey=os.environ['TOKEN_KEY']
# Specified upon API setup in TASS API Gateway Maintenance program.
appCode=os.environ['APPCODE']
# TASS company to work with (see top right of TASS.web).
companyCode=os.environ['COMPANY_CODE']
# TASS API version.
apiVersion=os.environ['API_VERSION']
# TASS API method.
method=os.environ['METHOD']
# TASS API endpoint.
endPoint=os.environ['ENDPOINT']
# Parameters for passthrough - varies based on method.
parameters = "{\"start_date\":\"01/01/2021\",\"end_date\":\"01/01/2022\"}"
# Name of calendar
name=os.environ['NAME']
def getEncryptedToken(token, params):
#decode the token from b64 format
decoded = base64.b64decode(token)
plaintext = params
#put ECB padding in place for plaintext
length = 16 - (len(plaintext) % 16)
plaintext += chr(length)*length
rijndael = AES.new(decoded, AES.MODE_ECB)
#encrypt the plaintext
ciphertext = rijndael.encrypt(plaintext)
ciphertext = base64.b64encode(ciphertext)
return ciphertext
def getDecryptedToken(token, encrypted):
#decode from b64 for both the token and the encrypted data
decoded = base64.b64decode(token)
encoded = base64.b64decode(encrypted)
decoder = AES.new(decoded, AES.MODE_ECB)
#decrypt the data
output = decoder.decrypt(encoded)
return output
def getURLRequest(endPoint, method, appCode, companyCode, apiVersion, parameters, tokenKey):
encrypted = getEncryptedToken(tokenKey, parameters)
requestDict = {"method": method, "appcode": appCode, "company": companyCode, "v": apiVersion, "token": encrypted}
requestStr = urllib.parse.urlencode(requestDict)
URLString = endPoint + '?' + requestStr
return URLString
def create_calendar():
url = getURLRequest(endPoint, method, appCode, companyCode, apiVersion, parameters, tokenKey)
# Make a get request
content = requests.get(url=url).json()
ical = "BEGIN:VCALENDAR\r\n"
ical += "VERSION:2.0\r\n"
ical += "PRODID:-//" + name + "//EN\r\n"
for event in content["events"]:
# Event name and description
_id = str(event["id"])
_summary = event["title"]
_description = event["description"]
# Event categories
_category = event["cat_desc"]
# Time and date settings
_stamp = datetime.strptime(event["start"], "%Y-%m-%d %H:%M:%S").strftime("%Y%m%dT%H%M%S")
_start = datetime.strptime(event["start"], "%Y-%m-%d %H:%M:%S").strftime("%Y%m%dT%H%M%S")
_end = datetime.strptime(event["end"], "%Y-%m-%d %H:%M:%S").strftime("%Y%m%dT%H%M%S")
_all_day = bool(event["all_day"])
# Construct the ics file
ical += "BEGIN:VEVENT\r\n"
ical += "UID:" + _id + "\r\n"
ical += "SUMMARY:" + _summary + "\r\n"
ical += "DESCRIPTION:" + _description + "\r\n"
ical += "CATEGORIES:" + _category + "\r\n"
ical += "DTSTAMP:" + _stamp + "\r\n"
# All day events require a different time/end format than normal events
# This will change the format if an all day event is selected
if _all_day:
_start = datetime.strptime(_start, "%Y%m%dT%H%M%S")
# All day events are triggered when the start date is set to midnight
# and the end date is set to midnight the following day.
# Therefore we need to add 1 to the start date when forming the end date.
_end = _start + timedelta(days=1)
_start = _start.strftime("%Y%m%d")
_end = _end.strftime("%Y%m%d")
ical += "DTSTART;VALUE=DATE:" + _start + "\r\n"
ical += "DTEND;VALUE=DATE:" + _end + "\r\n"
else:
ical += "DTSTART:" + _start + "\r\n"
ical += "DTEND:" + _end + "\r\n"
ical += "END:VEVENT\r\n"
ical += "END:VCALENDAR\r\n"
return ical
app = Flask(__name__)
@app.route('/calendar.ics')
def calendar_ics():
# Get the calendar
_calendar = create_calendar()
response = make_response(_calendar)
response.headers["Content-Disposition"] = "attachment; filename=calendar.ics"
response.headers["Content-type"] = "text/calendar; charset=UTF-8"
return response
| 35.426357 | 126 | 0.650109 | 597 | 4,570 | 4.896147 | 0.341709 | 0.010263 | 0.02258 | 0.006842 | 0.135477 | 0.111529 | 0.095792 | 0.095792 | 0.039343 | 0.039343 | 0 | 0.01314 | 0.217287 | 4,570 | 128 | 127 | 35.703125 | 0.804026 | 0.232166 | 0 | 0.025641 | 0 | 0 | 0.170353 | 0.006033 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064103 | false | 0 | 0.089744 | 0 | 0.217949 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
039d58f8df8f1a6c6b5b34a51d35079ea0d2d111 | 948 | py | Python | oppoudpsdk/async_helpers.py | simbaja/oppoudpsdk | e2b08ce51a15ced6c4e96583a9b61b21f6a8cc59 | [
"MIT"
] | 1 | 2022-02-17T20:17:21.000Z | 2022-02-17T20:17:21.000Z | oppoudpsdk/async_helpers.py | simbaja/oppoudpsdk | e2b08ce51a15ced6c4e96583a9b61b21f6a8cc59 | [
"MIT"
] | null | null | null | oppoudpsdk/async_helpers.py | simbaja/oppoudpsdk | e2b08ce51a15ced6c4e96583a9b61b21f6a8cc59 | [
"MIT"
] | null | null | null | import asyncio
from typing import AsyncIterator
class OppoStreamIterator:
def __init__(self, reader: asyncio.StreamReader):
self._reader = reader
def __aiter__(self):
return self
async def __anext__(self):
val = await self._reader.readuntil(b'\r')
if val == b'':
raise StopAsyncIteration
return val
async def CancellableAsyncIterator(async_iterator: AsyncIterator, cancellation_event: asyncio.Event) -> AsyncIterator:
cancellation_task = asyncio.create_task(cancellation_event.wait())
result_iter = async_iterator.__aiter__()
while not cancellation_event.is_set():
done, pending = await asyncio.wait(
[cancellation_task, result_iter.__anext__()],
return_when=asyncio.FIRST_COMPLETED
)
for done_task in done:
if done_task == cancellation_task:
for pending_task in pending:
await pending_task
break
else:
yield done_task.result()
| 29.625 | 118 | 0.712025 | 109 | 948 | 5.816514 | 0.431193 | 0.047319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208861 | 948 | 31 | 119 | 30.580645 | 0.845333 | 0 | 0 | 0 | 0 | 0 | 0.00211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0.037037 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
039ddd7ec3fbf7309dac9c7083e3a7f3f5c0a95b | 3,455 | py | Python | Project-2/Ishan Pandey/google_search.py | Mercury1508/IEEE-LEAD-2.0 | 91d24ccf2f24c62f92f0d23bcfcb3988e6d5acd8 | [
"MIT"
] | 1 | 2021-06-03T16:08:33.000Z | 2021-06-03T16:08:33.000Z | Project-2/Ishan Pandey/google_search.py | Mercury1508/IEEE-LEAD-2.0 | 91d24ccf2f24c62f92f0d23bcfcb3988e6d5acd8 | [
"MIT"
] | 16 | 2021-04-27T12:58:03.000Z | 2021-05-28T14:02:14.000Z | Project-2/Ishan Pandey/google_search.py | Mercury1508/IEEE-LEAD-2.0 | 91d24ccf2f24c62f92f0d23bcfcb3988e6d5acd8 | [
"MIT"
] | 70 | 2021-04-26T13:48:35.000Z | 2021-05-28T21:04:34.000Z | from PIL import ImageTk
import PIL.Image
from tkinter import *
import urllib
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
# -------URL generator----------
def url_gen(search_term):
search_term = (urllib.parse.quote(search_term, safe=''))
template = f"https://www.google.com/search?q={search_term}"
return template
# ---------GOOGLE SEARCH----------
def google(search_term,):
url = url_gen(search_term)
headers = {
"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"}
page = requests.get(url, headers=headers)
global soup
soup = BeautifulSoup(page.text, "html.parser")
search_results()
def search_results():
global heading
global description
global link
global result_found
first_search = soup.find("div", attrs={"class": "hlcw0c"})
heading = first_search.find("h3", attrs={"class": "LC20lb DKV0Md"}).get_text()
description = first_search.find("span", attrs={"class": "aCOpRe"}).get_text()
a = first_search.find('a')
link = a['href']
if link[0] == '/':
link = "https://www.google.com"+link
# GUI App
root = Tk()
root.geometry("1000x650")
root.title("Google.com")
root.configure(background='white')
def open_page():
global driver
driver = webdriver.Chrome("./chromedriver.exe")
driver.get(link)
def destroy_frame():
global result_container
result_container.pack_forget()
result_container = LabelFrame(root,bg="white",borderwidth=0,highlightthickness=0)
result_container.pack(pady=20)
def display_result():
global display_frm
display_frm = LabelFrame(result_container, padx=5, pady=5)
my_label = Label(display_frm, text="Search Result",font=('Bahnschrift Light', 10, 'normal'))
heading_lable = Label(display_frm, text=heading,font=('Bahnschrift Light', 15, 'normal'))
description_label = Label(display_frm, text=description, wraplength=700, justify=LEFT, font=('Bahnschrift Light', 10, 'normal'))
visit_button = Button(display_frm, text="Visit Page", command=open_page)
close_button = Button(display_frm, text="Close", command=destroy_frame)
display_frm.pack(pady=5, padx=30)
my_label.grid(row=0, column=0, padx=0)
heading_lable.grid(row=1, column=0)
description_label.grid(row=2, column=0)
visit_button.grid(row=3, column=0)
close_button.grid(row=3, column=1)
def search():
global result_container
result_container.pack_forget()
result_container = LabelFrame(root,bg="white",borderwidth=0,highlightthickness=0)
result_container.pack(pady=20)
search_term = str(search_entry.get())
google(search_term)
display_result()
my_img1 = ImageTk.PhotoImage(PIL.Image.open('./Images\google.png'))
empty_label1 = Label(root, height=7, bg="white")
google_label = Label(root, image=my_img1, bg="white")
empty_label2 = Label(root, height=2, bg="white")
search_entry = Entry(root, width=50, font=('Bahnschrift Light', 15, 'normal'))
empty_label3 = Label(root, height=1, bg="white")
search_button = Button(root, text="Google Search",padx=10, pady=10, command=search, bg="white")
result_container = LabelFrame(root,bg="white",borderwidth=0,highlightthickness=0)
empty_label1.pack()
google_label.pack()
empty_label2.pack()
search_entry.pack(ipady=2)
empty_label3.pack()
search_button.pack()
result_container.pack(pady=20)
root.mainloop()
| 30.307018 | 139 | 0.707959 | 475 | 3,455 | 5 | 0.305263 | 0.069474 | 0.04 | 0.036632 | 0.261053 | 0.144421 | 0.144421 | 0.144421 | 0.144421 | 0.144421 | 0 | 0.033345 | 0.140666 | 3,455 | 113 | 140 | 30.575221 | 0.766588 | 0.02055 | 0 | 0.121951 | 0 | 0.012195 | 0.14497 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085366 | false | 0 | 0.085366 | 0 | 0.182927 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
039ffde3f9ae262159889d92dcb803cf3d4e8069 | 1,910 | py | Python | cooper/demo/sweep_weights.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | cooper/demo/sweep_weights.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | cooper/demo/sweep_weights.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | """
To run this module, execute `python -m cooper.demo.sweep_weights`
from this repository's base directory.
To save the outputs to a file, use stdout redirection:
`python -m cooper.demo.sweep_weights > data.csv`
"""
from typing import List
from typing_extensions import TypeAlias
from multiprocessing import Pool
from cooper.ctrnn import Ctrnn
from cooper.util import get_beers_fitness
from tqdm.contrib.concurrent import process_map
# Weight: TypeAlias = tuple[int, int]
# Datum: TypeAlias = tuple[float, float, float]
# Param: TypeAlias = tuple[float, float]
THREAD_COUNT = 10
BOUNDS = 16.0
STEP = 4
PROGENITOR = Ctrnn()
PROGENITOR.set_bias(0, 4.515263949538321)
PROGENITOR.set_bias(1, -9.424874214362415)
PROGENITOR.set_weight(0, 0, 5.803844919954994)
PROGENITOR.set_weight(0, 1, 16.0)
PROGENITOR.set_weight(1, 0, -16.0)
PROGENITOR.set_weight(1, 1, 3.5073044750632754)
WEIGHT_A = (0, 0)
WEIGHT_B = (1, 1)
LOG_OUTPUT = True
def get_sweep(step: float = STEP, bounds: float = BOUNDS) :
params= []
y = -bounds
while y <= BOUNDS:
x = -bounds
while x <= BOUNDS:
params.append((x, y))
x += step
y += step
return params
def main(param):
ctrnn = PROGENITOR.clone()
ctrnn.weights[WEIGHT_A] = param[0]
ctrnn.weights[WEIGHT_B] = param[1]
fitness = get_beers_fitness(ctrnn)
datum = (param[0], param[1], fitness)
if LOG_OUTPUT:
print(f"{datum[0]},{datum[1]},{datum[2]:0.4f}")
return datum
def to_csv(data) :
header = "a,b,fitness\n"
lines = [f"{d[0]},{d[1]},{d[2]}" for d in data]
return header + "\n".join(lines)
if __name__ == "__main__":
print(f"w{WEIGHT_A[0]}{WEIGHT_A[1]},w{WEIGHT_B[0]}{WEIGHT_B[1]},fitness", flush=True)
p = Pool(THREAD_COUNT)
sweep = get_sweep()
r = process_map(main, sweep, max_workers=THREAD_COUNT, chunksize=1)
#data = p.map(main, sweep)
| 26.527778 | 89 | 0.669634 | 289 | 1,910 | 4.287197 | 0.349481 | 0.062954 | 0.06134 | 0.027441 | 0.083939 | 0.083939 | 0 | 0 | 0 | 0 | 0 | 0.069948 | 0.191623 | 1,910 | 71 | 90 | 26.901408 | 0.732513 | 0.190576 | 0 | 0 | 0 | 0.021277 | 0.09316 | 0.065147 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.12766 | 0 | 0.255319 | 0.042553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03a0253a80d8409783b6104ce76182534fc2fd5c | 494 | py | Python | en/add_symbol.py | Gabriel-Chen/Narcissus | 998a9d9c4dfed0e537c934acd94389aeeb35cf6b | [
"MIT"
] | null | null | null | en/add_symbol.py | Gabriel-Chen/Narcissus | 998a9d9c4dfed0e537c934acd94389aeeb35cf6b | [
"MIT"
] | null | null | null | en/add_symbol.py | Gabriel-Chen/Narcissus | 998a9d9c4dfed0e537c934acd94389aeeb35cf6b | [
"MIT"
] | null | null | null | import json
words_sheet = open('key_words_en.json', 'r')
try:
reader = json.load(words_sheet)
words_sheet.close()
for item in reader:
key_word = item['Environmental Keywords in English']
symbol = item['symbol']
f = open(key_word + '.json', 'r')
gf = json.load(f)
f.close()
for tweet in gf:
tweet['marker-symbol'] = symbol
f = open(key_word + '.json', 'w')
json.dump(gf, f)
except:
words_sheet.close()
| 26 | 60 | 0.566802 | 67 | 494 | 4.044776 | 0.402985 | 0.147601 | 0.110701 | 0.103321 | 0.162362 | 0.162362 | 0 | 0 | 0 | 0 | 0 | 0 | 0.291498 | 494 | 18 | 61 | 27.444444 | 0.774286 | 0 | 0 | 0.117647 | 0 | 0 | 0.165992 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03a171ff1e04756ab7e71d65f6b4386a31a3421e | 2,453 | py | Python | volatility/volatility_compare.py | larrys54321/quant_corner | 3dc6f3f3d1ce1fa002c226bd5c5f845b91710687 | [
"MIT"
] | null | null | null | volatility/volatility_compare.py | larrys54321/quant_corner | 3dc6f3f3d1ce1fa002c226bd5c5f845b91710687 | [
"MIT"
] | null | null | null | volatility/volatility_compare.py | larrys54321/quant_corner | 3dc6f3f3d1ce1fa002c226bd5c5f845b91710687 | [
"MIT"
] | null | null | null | import yfinance as yf
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from arch import arch_model
from volatility.utils import get_percent_chg, Option, set_plot, get_ATR
start = datetime(2000, 1, 1)
end = datetime(2021, 3, 17)
symbol = 'QQQ'
tickerData = yf.Ticker(symbol)
df = tickerData.history(period='1d', start=start, end=end)
df['Date'] = df.index
df['vol_5'] = 50 * np.log(df['Close'] / df['Close'].shift(1)).rolling(window=5).std() * np.sqrt(5)
df['vol_15'] = 50 * np.log(df['Close'] / df['Close'].shift(1)).rolling(window=15).std() * np.sqrt(21)
df['vol_5'] = df['vol_5'].fillna(0)
df['vol_15'] = df['vol_15'].fillna(0)
get_ATR(df, 5, f=50)
get_ATR(df, 15, f=50)
get_percent_chg(df, 5)
get_percent_chg(df, 15)
closes = df.Close
returns = df.Close.pct_change().fillna(0)
df['ret_1a'] = returns
test_size = 365*5
test_size = 300
keyList, keyList_vol, keyList_ATR = ['ret_5', 'ret_15'], ['vol_5', 'vol_15'], ['ATR_5', 'ATR_15']
fig, ax = plt.subplots(figsize=(10, 5), nrows=3, ncols=1)
k = 0
for k in range(len(keyList)):
key, key_vol, key_ATR = keyList[k], keyList_vol[k], keyList_ATR[k]
returns = 100 * df[key].dropna()
predictions = []
print('key', key, 'key_vol', key_vol)
for i in range(test_size):
train = returns[:-(test_size-i)]
model = arch_model(train, p=2, q=2)
model_fit = model.fit(disp='off')
pred_val = model_fit.forecast(horizon=1)
predictions.append(np.sqrt(pred_val.variance.values[-1,:][0]))
predictions = pd.Series(predictions, index=returns.index[-test_size:])
ax[k].plot(df['Date'][-test_size:], df[key_ATR][-test_size:], linewidth=0.5, color='g')
ax[k].plot(df['Date'][-test_size:], df['vol_5'][-test_size:], linewidth=0.5, color='b')
ax[k].plot(df['Date'][-test_size:], predictions, linewidth=0.5, color='r')
ax[k].xaxis.set_ticklabels([])
set_plot(ax[k])
ax[k].legend([key_ATR, 'vol_5', 'Garch Vol '+key], loc=2, fontsize=8)
k += 1
ax[k].set_xlabel('Date')
ax[k].plot(df['Date'][-test_size:], np.array(closes[len(closes)-test_size:])/5-50, label='Close', color='b')
ax[k].plot(df['Date'][-test_size:], 100 * df['ret_5'][-test_size:], label='ret_5', linewidth=0.5, color='r')
ax[k].plot(df['Date'][-test_size:], 100 * df['ret_15'][-test_size:], label='ret_15', linewidth=0.5, color='g')
set_plot(ax[k])
ax[k].legend(['Close', 'ret_5', 'ret_15'], loc=2, fontsize=8)
plt.show() | 42.293103 | 110 | 0.653893 | 434 | 2,453 | 3.539171 | 0.267281 | 0.083333 | 0.027344 | 0.035156 | 0.24349 | 0.231771 | 0.203776 | 0.141276 | 0.111328 | 0.089844 | 0 | 0.053296 | 0.128007 | 2,453 | 58 | 111 | 42.293103 | 0.664797 | 0 | 0 | 0.036364 | 0 | 0 | 0.085982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.127273 | 0 | 0.127273 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03a24ae5d5f90ce732d9ff0c7b909cd8a3827243 | 1,588 | py | Python | enhanced_purchase.py | RadekD/gmp | 5b413ef71f09389f80afdd94750189c4b34fde99 | [
"BSD-2-Clause"
] | null | null | null | enhanced_purchase.py | RadekD/gmp | 5b413ef71f09389f80afdd94750189c4b34fde99 | [
"BSD-2-Clause"
] | null | null | null | enhanced_purchase.py | RadekD/gmp | 5b413ef71f09389f80afdd94750189c4b34fde99 | [
"BSD-2-Clause"
] | null | null | null | from .event import event
def enhanced_item(name, unit_price, quantity=None, item_id=None,
category=None, brand=None, variant=None, **extra_data):
payload = {
'nm': name, 'pr': str(unit_price), 'qt': quantity or 1
}
if item_id:
payload['id'] = item_id
if category:
payload['ca'] = category
if brand:
payload['br'] = brand
if variant:
payload['va'] = variant
payload.update(extra_data)
return payload
def enhanced_purchase(transaction_id, items, revenue,
url_page, tax=None, shipping=None, host=None,
affiliation=None, coupon=None,
**extra_data):
if not items:
raise ValueError('You need to specify at least one item')
for e in event('ecommerce', 'purchase'):
yield e
payload = {
'pa': 'purchase', 'ti': transaction_id, 'dp': url_page,
'tr': str(revenue), 'tt': '0'}
if shipping:
payload['ts'] = str(shipping)
if tax is not None:
payload['tt'] = str(tax)
if host:
payload['dh'] = host
if affiliation:
payload['ta'] = affiliation
if coupon:
payload['tcc'] = coupon
payload.update(extra_data)
for position, item in enumerate(items):
payload.update(_finalize_enhanced_purchase_item(item, position + 1))
yield payload
def _finalize_enhanced_purchase_item(item, position):
position_prefix = 'pr{0}'.format(position)
final_item = {}
for key, value in item.items():
final_item[position_prefix + key] = value
return final_item
| 26.032787 | 76 | 0.612091 | 199 | 1,588 | 4.743719 | 0.376884 | 0.038136 | 0.027542 | 0.04661 | 0.084746 | 0.084746 | 0 | 0 | 0 | 0 | 0 | 0.003433 | 0.266373 | 1,588 | 60 | 77 | 26.466667 | 0.806867 | 0 | 0 | 0.085106 | 0 | 0 | 0.064861 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.021277 | 0 | 0.12766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03a35033c40c291fd45a784eb8bab0f9516b6496 | 5,022 | py | Python | modules/manager.py | horsy-ml/horsy | 1161df2e83c201784ea674bd1d53e76831b15a0f | [
"MIT"
] | null | null | null | modules/manager.py | horsy-ml/horsy | 1161df2e83c201784ea674bd1d53e76831b15a0f | [
"MIT"
] | null | null | null | modules/manager.py | horsy-ml/horsy | 1161df2e83c201784ea674bd1d53e76831b15a0f | [
"MIT"
] | null | null | null | import json
import threading
from rich import print
from modules.request import request
import modules.vars as horsy_vars
import os
import zipfile
from modules.virustotal import scan_to_cli
from modules.http_status import handle
from ezzdl import dl
def install(package):
"""
Install an app
:param package:
:return:
"""
r = request.get(f"{horsy_vars.protocol}{horsy_vars.server_url}/packages/json/{package}")
r_code = handle(r.status_code)
r = r.text
r = json.loads(r)
if r_code[1] not in [200, 201]:
return r_code[0]
try:
# Inform the user
print(f"[green]App {r['name']} found, information loaded[/]")
# Create the app directory
if not os.path.exists('{1}apps\{0}'.format(r['name'], horsy_vars.horsypath)):
os.makedirs('{1}apps\{0}'.format(r['name'], horsy_vars.horsypath))
# Get all download files urls
to_download = [r['url']]
if r['download']:
print(f"Found dependency")
to_download.append(r['download'])
# Download all files
dl(to_download, '{0}apps\{1}'.format(horsy_vars.horsypath, r['name']))
print()
# Scan main file
scan_to_cli('{2}apps\{0}\{1}'.format(r['name'], r['url'].split('/')[-1], horsy_vars.horsypath))
print()
# Unzip the main file if needed
def unzip(file, where):
with zipfile.ZipFile(file, 'r') as zip_ref:
zip_ref.extractall(where)
print(f"[green]Extracted[/]")
if r['url'].split('.')[-1] == 'zip':
print(f"Extracting {r['url'].split('/')[-1]}")
unzip('{2}apps\{0}\{1}'.format(r['name'], r['url'].split('/')[-1], horsy_vars.horsypath),
'{1}apps\{0}'.format(r['name'], horsy_vars.horsypath))
os.remove('{2}apps/{0}/{1}'.format(r['name'], r['url'].split('/')[-1], horsy_vars.horsypath))
print()
# Scan dependencies
try:
if r['download']:
if scan_to_cli('{2}apps\{0}\{1}'.format(r['name'], r['download'].split('/')[-1],
horsy_vars.horsypath))['detect']['malicious'] > 0:
print(f"[red]Dependency can be malicious. It may run now, if this added to installation "
f"config[/]")
input("Press enter if you want continue, or ctrl+c to exit")
print()
except:
pass
# Execute install script
if r['install']:
print(f"Found install option")
threading.Thread(target=os.system, args=('{2}apps\{0}\{1}'.format(r['name'], r['install'],
horsy_vars.horsypath),)).start()
print()
# Create launch script
print(f"Generating launch script")
with open('{1}apps\{0}.bat'.format(r['name'], horsy_vars.horsypath), 'w+') as f:
f.write(f"@ECHO off\n")
f.write(f"""{r['run'].replace('$appdir$', f'%horsypath%/apps/{r["name"]}')} %*\n""")
# Update versions file
with open(horsy_vars.horsypath + 'apps/versions.json', 'r') as f:
versions = json.load(f)
with open(horsy_vars.horsypath + 'apps/versions.json', 'w') as f:
versions[r['name']] = r['version']
f.write(json.dumps(versions))
print(f"Versions file updated")
# Done message
print(f"[green][OK] All done![/]")
print(f"[green]You can run your app by entering [italic white]{r['name']}[/] in terminal[/]")
except:
print("[red]Unexpected error[/]")
raise
# return
def uninstall(package):
"""
Uninstall package
:param package:
:return:
"""
if os.path.exists('{1}apps/{0}'.format(package, horsy_vars.horsypath)):
os.system('rmdir /s /q "{1}apps/{0}"'.format(package, horsy_vars.horsypath))
print(f"[green][OK] Files deleted[/]")
else:
print(f"[red]App {package} is not installed or doesn't have files[/]")
if os.path.isfile('{1}apps/{0}.bat'.format(package, horsy_vars.horsypath)):
os.remove("{1}apps/{0}.bat".format(package, horsy_vars.horsypath))
print(f"[green][OK] Launch script deleted[/]")
else:
print(f"[red]App {package} is not installed or doesn't have launch script[/]")
def apps_list(is_gui=False):
"""
List all installed apps
:param is_gui:
:return:
"""
apps = list()
if os.path.exists('{0}apps'.format(horsy_vars.horsypath)):
if not is_gui:
print(f"[green]Installed apps:[/]")
for file in os.listdir('{0}apps'.format(horsy_vars.horsypath)):
if file.endswith(".bat") and not is_gui:
print(f"{file.split('.')[0]}")
elif file.endswith(".bat"):
apps.append(file.split('.')[0])
if is_gui:
return sorted(apps)
| 35.617021 | 110 | 0.545599 | 649 | 5,022 | 4.154083 | 0.257319 | 0.070104 | 0.120178 | 0.022255 | 0.32678 | 0.299703 | 0.287463 | 0.255564 | 0.20141 | 0.137611 | 0 | 0.014432 | 0.282557 | 5,022 | 140 | 111 | 35.871429 | 0.733833 | 0.073078 | 0 | 0.141304 | 0 | 0.021739 | 0.271316 | 0.033887 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.01087 | 0.108696 | 0 | 0.173913 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03a4223d11ef6ce96b39c23302d12d9c2d63b9be | 1,310 | py | Python | setup.py | aheadlead/download_wrapper | 036bc7cb071eadc0bc24a445bcaa5c2305d48083 | [
"MIT"
] | 1 | 2021-06-20T14:22:11.000Z | 2021-06-20T14:22:11.000Z | setup.py | aheadlead/download_wrapper | 036bc7cb071eadc0bc24a445bcaa5c2305d48083 | [
"MIT"
] | null | null | null | setup.py | aheadlead/download_wrapper | 036bc7cb071eadc0bc24a445bcaa5c2305d48083 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
import sys
import download_wrapper
from setuptools import setup, find_packages
cmdclass = {}
try:
from sphinx.setup_command import BuildDoc
cmdclass = {'build_sphinx': BuildDoc}
except ImportError:
pass
name = download_wrapper.__NAME__
version = download_wrapper.__VERSION__
release = download_wrapper.__RELEASE__
setup(
name=name,
cmdclass=cmdclass,
version=version,
keywords=download_wrapper.__KEYWORDS__,
description=download_wrapper.__DESC__,
license=download_wrapper.__LICENSE__,
author=download_wrapper.__AUTHOR__,
author_email=download_wrapper.__AUTHOR_EMAIL__,
url=download_wrapper.__URL__,
packages=find_packages(),
classifiers=(
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
),
command_options={
'build_sphinx': {
'project': ('setup.py', name),
'version': ('setup.py', version),
'release': ('setup.py', release),
},
},
)
| 26.734694 | 51 | 0.654198 | 131 | 1,310 | 6.129771 | 0.450382 | 0.1868 | 0.124533 | 0.129514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010859 | 0.226718 | 1,310 | 48 | 52 | 27.291667 | 0.781836 | 0.025954 | 0 | 0 | 0 | 0 | 0.262951 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.02439 | 0.121951 | 0 | 0.121951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03ac01fd1a6c4846aeb2f1e3b6476d9fc533c05d | 2,249 | py | Python | saefportal/datalakes/models/local_datalake.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/datalakes/models/local_datalake.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/datalakes/models/local_datalake.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | 1 | 2020-12-16T15:02:52.000Z | 2020-12-16T15:02:52.000Z | import io
import json
import logging
import os
import shutil
from pathlib import Path
import pandas as pd
from django.db import models
from datalakes.util import get_wanted_file
logger = logging.getLogger(__name__)
class LocalDatalake(models.Model):
root_path = models.CharField(max_length=500, default="", blank=True)
def save(self, *args, **kwargs):
# If the object is not yet created, create the root path folder structure.
if not self.pk:
if self.root_path:
Path(self.abs_path).mkdir(parents=True, exist_ok=True)
super(LocalDatalake, self).save(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
path = self.root_path if self.root_path else ""
self.abs_path = os.path.abspath(path)
def __str__(self):
return f"Stored locally at {self.abs_path}"
def list_objects(self, path):
return os.listdir(f"{self.abs_path}/{path}")
def create_folder(self, path, folder_name):
full_path = f"{self.abs_path}/{path}/{folder_name}" if path else f"{self.abs_path}/{folder_name}"
Path(full_path).mkdir(parents=True, exist_ok=True)
logger.info(f"Created folder '{folder_name}' in local datalake.")
def delete_path(self, path):
full_path = f"{self.abs_path}/{path}"
os.remove(full_path) if os.path.isfile(full_path) else shutil.rmtree(full_path, ignore_errors=True)
logger.info(f"Deleted path '{path}' in local datalake.")
def upload_file(self, path, filename, content):
mode = "wb" if filename.split(".")[-1] == "parquet" else "w"
with open(f"{self.abs_path}/{path}/{filename}", mode) as file:
file.write(content)
logger.info(f"Uploaded '{filename}' to '{path}' in local datalake.")
def download_file(self, path, query="latest"):
filename, timestamp = get_wanted_file(query, self.list_objects(path))
with open(f"{self.abs_path}/{path}/{filename}", "rb") as file:
data = pd.read_parquet(io.BytesIO(file.read())) if filename.split(".")[-1] == "parquet" else json.load(file)
logger.info(f"Read '{filename}' from local datalake.")
return data, timestamp
| 33.567164 | 120 | 0.655847 | 318 | 2,249 | 4.468553 | 0.323899 | 0.044335 | 0.069669 | 0.050669 | 0.202674 | 0.16045 | 0.122449 | 0.045039 | 0 | 0 | 0 | 0.002801 | 0.206314 | 2,249 | 66 | 121 | 34.075758 | 0.793277 | 0.032014 | 0 | 0 | 0 | 0 | 0.190345 | 0.08046 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.204545 | 0.045455 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03ae81522e3beb368f2e7e1784f87746e2f971d3 | 766 | py | Python | tracing/tracing/value/add_device_info.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | null | null | null | tracing/tracing/value/add_device_info.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | null | null | null | tracing/tracing/value/add_device_info.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | 1 | 2020-07-24T05:13:01.000Z | 2020-07-24T05:13:01.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import tempfile
from tracing.value import add_shared_diagnostic
def AddDeviceInfo(histograms_json_filename, chrome_version, os_name, os_version,
gpu_info, arch, ram):
device_info = {
'chromeVersion': chrome_version,
'osName': os_name,
'osVersion': os_version,
'gpuInfo': gpu_info,
'arch': arch,
'ram': ram,
}
with tempfile.NamedTemporaryFile() as diagnostic_file:
json.dump(device_info, diagnostic_file)
return add_shared_diagnostic.AddSharedDiagnostic(
histograms_json_filename, 'device', diagnostic_file.name)
| 30.64 | 80 | 0.721932 | 96 | 766 | 5.541667 | 0.604167 | 0.078947 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006525 | 0.199739 | 766 | 24 | 81 | 31.916667 | 0.861338 | 0.20235 | 0 | 0 | 0 | 0 | 0.079077 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03afdcb8ce074b115b45a4eebc1daa532aca42d3 | 9,803 | py | Python | fast_knn_nmt/data/utils.py | Crazy-Chick/fast-knn-nmt | 7336bbe0be1240e70d3c3ac71c4e7cfb4f4ea4ff | [
"Apache-2.0"
] | null | null | null | fast_knn_nmt/data/utils.py | Crazy-Chick/fast-knn-nmt | 7336bbe0be1240e70d3c3ac71c4e7cfb4f4ea4ff | [
"Apache-2.0"
] | null | null | null | fast_knn_nmt/data/utils.py | Crazy-Chick/fast-knn-nmt | 7336bbe0be1240e70d3c3ac71c4e7cfb4f4ea4ff | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
"""
@desc:
"""
import numpy as np
import math
from fairseq.data import data_utils
from fairseq.tasks.translation import TranslationTask
from fairseq.data.language_pair_dataset import LanguagePairDataset
from tqdm import tqdm
from multiprocessing import Pool
from typing import Tuple
from fast_knn_nmt.utils.logger import get_logger
from .path_utils import *
LOGGING = get_logger(__name__)
def warmup_mmap_file(path, n=1000, verbose=True, use_log=True):
megabytes = 1024 * 1024
if (use_log):
LOGGING.info(f"Warming up file {path}")
total = math.floor(os.path.getsize(path)/megabytes)
pbar = tqdm(total=total, desc=f"Warm up") if verbose else None
with open(path, 'rb') as stream:
while stream.read(n * megabytes):
if pbar is not None:
update = n
if update + pbar.n > total:
update = total - pbar.n
pbar.update(update)
def count_chunk_freq(dataset, start, end, vocab_size) -> np.array:
freq = np.zeros([vocab_size], dtype=np.int32)
for sent_idx in range(start, end):
src_ids = dataset[sent_idx]
for token_idx in src_ids:
freq[token_idx] += 1
return freq
def get_token_freq(data_dir, mode, prefix, lang, dictionary=None, dataset=None, num_workers=1, max_sent=0) -> np.array:
"""
get token frequency
Returns:
token_freq: np.array of shape [num_tokens]
"""
cache_path = token_freq_path(data_dir, mode, lang, max_sent=max_sent)
if os.path.exists(cache_path):
LOGGING.info(f"Use cached token freq from {cache_path}")
return np.load(cache_path, allow_pickle=True)
dictionary = dictionary or TranslationTask.load_dictionary(dictionary_path(data_dir, lang))
dataset = dataset or data_utils.load_indexed_dataset(
fairseq_dataset_path(data_dir, mode, prefix, lang),
dictionary
)
max_sent = min(max_sent, len(dataset)) if max_sent else len(dataset)
freq = np.zeros([len(dictionary)], dtype=np.int32)
if num_workers == 1:
for sent_idx in tqdm(range(max_sent), desc="Counting token frequencies"):
src_ids = dataset[sent_idx]
for token_idx in src_ids:
freq[token_idx] += 1
else:
pool = Pool(processes=num_workers)
results = []
chunk_size = max_sent // num_workers
offset = 0
for worker_id in range(num_workers):
results.append(
pool.apply_async(
count_chunk_freq,
(dataset,
offset,
offset + chunk_size if worker_id < num_workers-1 else len(dataset),
len(dictionary),
),
)
)
offset += chunk_size
pool.close()
pool.join()
for r in results:
freq += r.get()
np.save(cache_path, freq)
return freq
def load_token_2d_offsets(data_dir, mode, prefix, lang, freq=None, dictionary=None, dataset=None, all=False, max_sent=0):
"""
build or load cached token 2d offsets
Returns:
token_2d_offsets:
if all=False, it is a list of token offsets, grouped by token idx.
token_2d_offsets[token_idx] is an array of shape [token_freq, 2],
which contains the sentence indexes and intra-sentence offsets where token_idx appears in dataset
if all = True, it is an array of shape [num_tokens, 2]
"""
cache_file = token_2d_offsets_path(data_dir, mode, lang, all_tokens=all, max_sent=max_sent)
if os.path.exists(cache_file):
LOGGING.info(f"Loading token 2d-offsets from {cache_file}")
token_2d_offsets = np.load(cache_file, allow_pickle=True)
return token_2d_offsets
dictionary = dictionary or TranslationTask.load_dictionary(dictionary_path(data_dir, lang))
dataset = dataset or data_utils.load_indexed_dataset(
fairseq_dataset_path(data_dir, mode, prefix, lang),
dictionary
)
max_sent = min(max_sent, len(dataset)) if max_sent else len(dataset)
if not all:
freq = freq if freq is not None else get_token_freq(data_dir, mode, prefix, lang, dictionary, dataset,
num_workers=os.cpu_count(), max_sent=max_sent)
token_2d_offsets = [np.zeros([freq[idx], 2], dtype=np.int32) for idx in range(len(dictionary))]
fill_offsets = np.zeros([len(dictionary)], dtype=np.int32)
offset = 0
for sent_idx in tqdm(range(max_sent), desc="Gathering token offsets"):
src_ids = dataset[sent_idx]
for intra_offset, token_idx in enumerate(src_ids):
fill_offset = fill_offsets[token_idx]
if fill_offset >= freq[token_idx]:
LOGGING.warn(f"token count of {token_idx} exceeds argument freq {freq[token_idx]}, ignore it")
continue
token_2d_offsets[token_idx][fill_offset][0] = sent_idx
token_2d_offsets[token_idx][fill_offset][1] = intra_offset
fill_offsets[token_idx] += 1
offset += len(src_ids)
else:
num_tokens = np.sum(dataset.sizes)
token_2d_offsets = np.zeros([num_tokens, 2], dtype=np.int32)
offset = 0
for sent_idx in tqdm(range(max_sent), desc="Gathering token offsets"):
for token_idx in range(len(dataset[sent_idx])):
token_2d_offsets[offset][0] = sent_idx
token_2d_offsets[offset][1] = token_idx
offset += 1
np.save(cache_file, token_2d_offsets)
LOGGING.info(f"Saved token 2d-offsets to {cache_file}")
return token_2d_offsets
def compute_range_aligns(dataset: LanguagePairDataset, start: int, end: int, pid=0) -> Tuple[np.array, np.array]:
start = max(0, start)
end = min(end, len(dataset))
align_dataset = dataset.align_dataset
token_aligns_num = np.sum(align_dataset.sizes[start: end])
assert token_aligns_num % 2 == 0
token_aligns_num = token_aligns_num // 2
token_aligns = np.zeros([token_aligns_num], dtype=np.int64)
num_tokens = np.sum(dataset.src_sizes[start: end])
token_align_offsets = np.zeros([num_tokens, 2], dtype=np.int64)
offset_idx = 0
align_idx = 0
iterator = tqdm(range(start, end), desc="Computing align array", ) if pid == 0 else range(start, end)
for sent_idx in iterator:
aligns = align_dataset[sent_idx].reshape(-1, 2)
src_len = dataset.src_sizes[sent_idx]
prev_src = -1
prev_start = -1
prev_end = -1
for i in range(len(aligns)):
s = aligns[i][0]
t = aligns[i][1]
if s != prev_src:
if prev_src != -1:
token_align_offsets[offset_idx] = [prev_start, prev_end]
offset_idx += 1
for j in range(prev_src + 1, s):
token_align_offsets[offset_idx] = [prev_end, prev_end]
offset_idx += 1
prev_src = s
prev_start = align_idx
prev_end = align_idx + 1
else:
prev_end += 1
token_aligns[align_idx] = t
align_idx += 1
token_align_offsets[offset_idx] = [prev_start, prev_end]
offset_idx += 1
for j in range(prev_src + 1, src_len):
token_align_offsets[offset_idx] = [prev_end, prev_end]
offset_idx += 1
return token_aligns, token_align_offsets
def get_aligns(data_dir: str, subset: str = "train", dataset: LanguagePairDataset = None, workers: int = 1) -> Tuple[np.array, np.array]:
"""
Args:
data_dir: path to indexed src/align data
subset: train/valid/test
dataset: LanguagePairDataset
workers: cpu cores to build array
Returns:
token_aligns: [num_aligns]
token_align_offsets: [num_tokens, 2], each token's start to end aligns in token_aligns
"""
cache_file = align_path(data_dir=data_dir, mode=subset)
if os.path.exists(cache_file):
LOGGING.info(f"Loading aligns numpy array from {cache_file}")
file = np.load(cache_file)
token_aligns, token_align_offsets = file["aligns"], file["offsets"]
return token_aligns, token_align_offsets
if workers <= 1:
token_aligns, token_align_offsets = compute_range_aligns(dataset, start=0, end=len(dataset))
else:
results = []
pool = Pool(workers)
chunk_size = math.ceil(len(dataset) / workers)
for worker_idx in range(workers):
start = worker_idx * chunk_size
end = start + chunk_size
results.append(pool.apply_async(
func=compute_range_aligns,
args=(dataset, start, end, worker_idx)
))
pool.close()
pool.join()
token_aligns_num = np.sum(dataset.align_dataset.sizes) // 2
token_aligns = np.zeros([token_aligns_num], dtype=np.int64)
num_tokens = np.sum(dataset.src_sizes)
token_align_offsets = np.zeros([num_tokens, 2], dtype=np.int64)
align_idx = 0
offset_idx = 0
for r in results:
chunk_aligns, chunk_offsets = r.get()
token_align_offsets[offset_idx: offset_idx + len(chunk_offsets)] = chunk_offsets + align_idx
offset_idx += len(chunk_offsets)
token_aligns[align_idx: align_idx + len(chunk_aligns)] = chunk_aligns
align_idx += len(chunk_aligns)
LOGGING.info(f"Saving align numpy array to {cache_file}")
np.savez(cache_file, aligns=token_aligns, offsets=token_align_offsets)
return token_aligns, token_align_offsets
| 37.703846 | 137 | 0.624605 | 1,327 | 9,803 | 4.370761 | 0.141673 | 0.036034 | 0.041034 | 0.023793 | 0.410345 | 0.316897 | 0.288793 | 0.248448 | 0.24431 | 0.215862 | 0 | 0.01447 | 0.280934 | 9,803 | 259 | 138 | 37.849421 | 0.808342 | 0.078547 | 0 | 0.308901 | 0 | 0 | 0.047341 | 0 | 0 | 0 | 0 | 0 | 0.005236 | 1 | 0.031414 | false | 0 | 0.052356 | 0 | 0.125654 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03b09e85b4641e24b2163f9fb84c1ad44350f628 | 334 | py | Python | src/algorithms/crush.py | apierr/_hackerrank | 5bea44fed2fc6b454b31566c36c9bf66c04fa11e | [
"MIT"
] | null | null | null | src/algorithms/crush.py | apierr/_hackerrank | 5bea44fed2fc6b454b31566c36c9bf66c04fa11e | [
"MIT"
] | 5 | 2019-09-25T11:20:07.000Z | 2020-06-30T09:58:05.000Z | src/algorithms/crush.py | apierr/hackerrank | 5bea44fed2fc6b454b31566c36c9bf66c04fa11e | [
"MIT"
] | 1 | 2021-02-02T15:06:58.000Z | 2021-02-02T15:06:58.000Z |
n, inputs = [int(n) for n in input().split(" ")]
list = [0]*(n+1)
for _ in range(inputs):
print('\n')
x, y, incr = [int(n) for n in input().split(" ")]
list[x-1] += incr
if((y) <= len(list)):
list[y] -= incr
print(list)
max = x = 0
for i in list:
x = x+i
if(max < x):
max = x
print(max)
| 17.578947 | 53 | 0.47006 | 59 | 334 | 2.644068 | 0.322034 | 0.076923 | 0.089744 | 0.102564 | 0.307692 | 0.307692 | 0.307692 | 0.307692 | 0 | 0 | 0 | 0.017316 | 0.308383 | 334 | 18 | 54 | 18.555556 | 0.658009 | 0 | 0 | 0 | 0 | 0 | 0.012012 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03b48010ad3cd8943b83873c6007c3a73442461a | 9,572 | py | Python | crits/emails/email.py | dicato/crits | 8d500f7175855f1aeefa94caa783f981062ba869 | [
"MIT"
] | null | null | null | crits/emails/email.py | dicato/crits | 8d500f7175855f1aeefa94caa783f981062ba869 | [
"MIT"
] | null | null | null | crits/emails/email.py | dicato/crits | 8d500f7175855f1aeefa94caa783f981062ba869 | [
"MIT"
] | null | null | null | import datetime
from dateutil.parser import parse as date_parser
from mongoengine import Document, StringField, ListField
from django.conf import settings
from cybox.common import String, DateTime
from cybox.core import Observable
from cybox.objects.address_object import Address, EmailAddress
from cybox.objects.email_message_object import EmailHeader, EmailMessage
from crits.core.crits_mongoengine import CritsBaseAttributes, CritsSourceDocument
from crits.core.fields import CritsDateTimeField
from crits.emails.migrate import migrate_email
from crits.core.data_tools import convert_datetimes_to_string
class RawHeadersField(StringField):
"""
Raw Header class.
"""
def transform(self, value):
"""
If we receive a list instead of a string, convert it.
:param value: The raw headers.
:type value: str or list
:returns: str
"""
if isinstance(value, list):
tmp = ''
for v in value:
tmp = ' '.join([tmp, self.transform(v)])
return tmp
return value
class Email(CritsBaseAttributes, CritsSourceDocument, Document):
"""
Email Class.
"""
meta = {
# mongoengine adds fields _cls and _types and uses them to filter database
# responses unless you disallow inheritance. In other words, we
# can't see any of our old data unless we add _cls and _types
# attributes to them or turn off inheritance.
#So we'll turn inheritance off.
# (See http://mongoengine-odm.readthedocs.org/en/latest/guide/defining-documents.html#working-with-existing-data)
"collection": settings.COL_EMAIL,
"crits_type": 'Email',
"latest_schema_version": 1,
"schema_doc": {
'boundary': 'Email boundary',
'campaign': 'List [] of campaigns attributed to this email',
'cc': 'List [] of CC recipients',
'date': 'String of date header field',
'from': 'From header field',
'helo': 'HELO',
'isodate': 'ISODate conversion of date header field',
'message_id': 'Message-ID header field',
'modified': 'When this object was last modified',
'objects': 'List of objects in this email',
'originating_ip': 'Originating-IP header field',
'raw_body': 'Email raw body',
'raw_header': 'Email raw headers',
'relationships': 'List of relationships with this email',
'reply_to': 'Reply-To header field',
'sender': 'Sender header field',
'shared_with': 'Dictionary of sources that this email may be shared with and whether it has been shared already',
'source': 'List [] of sources that provided information on this email',
'subject': 'Email subject',
'to': 'To header field',
'x_originating_ip': 'X-Originating-IP header field',
'x_mailer': 'X-Mailer header field',
},
"jtable_opts": {
'details_url': 'crits.emails.views.email_detail',
'details_url_key': 'id',
'default_sort': "isodate DESC",
'searchurl': 'crits.emails.views.emails_listing',
'fields': [ "from_address", "subject", "isodate",
"source", "campaign", "id", "to",
"status", "cc" ],
'jtopts_fields': [ "details",
"from",
"recip",
"subject",
"isodate",
"source",
"campaign",
"status",
"favorite",
"id"],
'hidden_fields': [],
'linked_fields': [ "source", "campaign",
"from", "subject" ],
'details_link': 'details',
'no_sort': ['recip', 'details']
}
}
boundary = StringField()
cc = ListField(StringField())
date = StringField(required=True)
from_address = StringField(db_field="from")
helo = StringField()
# isodate is an interally-set attribute and on save will be overwritten
# with the isodate version of the email's date attribute.
isodate = CritsDateTimeField()
message_id = StringField()
originating_ip = StringField()
raw_body = StringField()
raw_header = RawHeadersField(db_field="raw_headers")
reply_to = StringField()
sender = StringField()
subject = StringField()
to = ListField(StringField())
x_originating_ip = StringField()
x_mailer = StringField()
def migrate(self):
"""
Migrate to latest schema version.
"""
migrate_email(self)
def _custom_save(self, force_insert=False, validate=True, clean=False,
write_concern=None, cascade=None, cascade_kwargs=None,
_refs=None, username=None, **kwargs):
"""
Override our core custom save. This will ensure if there is a "date"
string available for the email that we generate a corresponding
"isodate" field which is more useful for database sorting/searching.
"""
if hasattr(self, 'date'):
if self.date:
if isinstance(self.date, datetime.datetime):
self.isodate = self.date
self.date = convert_datetimes_to_string(self.date)
else:
self.isodate = date_parser(self.date, fuzzy=True)
else:
if self.isodate:
if isinstance(self.isodate, datetime.datetime):
self.date = convert_datetimes_to_string(self.isodate)
else:
self.isodate = None
return super(self.__class__, self)._custom_save(force_insert, validate,
clean, write_concern, cascade, cascade_kwargs, _refs, username)
def to_cybox_observable(self, exclude=None):
"""
Convert an email to a CybOX Observables.
Pass parameter exclude to specify fields that should not be
included in the returned object.
Returns a tuple of (CybOX object, releasability list).
To get the cybox object as xml or json, call to_xml() or
to_json(), respectively, on the resulting CybOX object.
"""
if exclude == None:
exclude = []
observables = []
obj = EmailMessage()
# Assume there is going to be at least one header
obj.header = EmailHeader()
if 'message_id' not in exclude:
obj.header.message_id = String(self.message_id)
if 'subject' not in exclude:
obj.header.subject = String(self.subject)
if 'sender' not in exclude:
obj.header.sender = Address(self.sender, Address.CAT_EMAIL)
if 'reply_to' not in exclude:
obj.header.reply_to = Address(self.reply_to, Address.CAT_EMAIL)
if 'x_originating_ip' not in exclude:
obj.header.x_originating_ip = Address(self.x_originating_ip,
Address.CAT_IPV4)
if 'x_mailer' not in exclude:
obj.header.x_mailer = String(self.x_mailer)
if 'boundary' not in exclude:
obj.header.boundary = String(self.boundary)
if 'raw_body' not in exclude:
obj.raw_body = self.raw_body
if 'raw_header' not in exclude:
obj.raw_header = self.raw_header
#copy fields where the names differ between objects
if 'helo' not in exclude and 'email_server' not in exclude:
obj.email_server = String(self.helo)
if ('from_' not in exclude and 'from' not in exclude and
'from_address' not in exclude):
obj.header.from_ = EmailAddress(self.from_address)
if 'date' not in exclude and 'isodate' not in exclude:
obj.header.date = DateTime(self.isodate)
observables.append(Observable(obj))
return (observables, self.releasability)
@classmethod
def from_cybox(cls, cybox_obs):
"""
Convert a Cybox DefinedObject to a MongoEngine Email object.
:param cybox_obs: The cybox object to create the Email from.
:type cybox_obs: :class:`cybox.core.Observable``
:returns: :class:`crits.emails.email.Email`
"""
cybox_obj = cybox_obs.object_.properties
email = cls()
if cybox_obj.header:
email.from_address = str(cybox_obj.header.from_)
if cybox_obj.header.to:
email.to = [str(recpt) for recpt in cybox_obj.header.to.to_list()]
for field in ['message_id', 'sender', 'reply_to', 'x_originating_ip',
'subject', 'date', 'x_mailer', 'boundary']:
setattr(email, field, str(getattr(cybox_obj.header, field)))
email.helo = str(cybox_obj.email_server)
if cybox_obj.raw_body:
email.raw_body = str(cybox_obj.raw_body)
if cybox_obj.raw_header:
email.raw_header = str(cybox_obj.raw_header)
return email
| 38.910569 | 125 | 0.570205 | 1,045 | 9,572 | 5.086124 | 0.244976 | 0.015052 | 0.036124 | 0.033866 | 0.071308 | 0.029727 | 0.013547 | 0 | 0 | 0 | 0 | 0.000316 | 0.337965 | 9,572 | 245 | 126 | 39.069388 | 0.838409 | 0.158483 | 0 | 0.018634 | 0 | 0 | 0.182296 | 0.010904 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031056 | false | 0 | 0.074534 | 0 | 0.254658 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03b7e0e6620e7937b8035ee5d2db08dc54dcd32a | 18,147 | py | Python | scratchpad/tomostream_roi/tomostream/tomostream3d.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null | scratchpad/tomostream_roi/tomostream/tomostream3d.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null | scratchpad/tomostream_roi/tomostream/tomostream3d.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null |
'''
Adaptation of Tomostream orthoslice code for doing full 3d reconstructions
Then to apply DL-based image processing or computer vision steps.
'''
import pvaccess as pva
import numpy as np
import queue
import time
import h5py
import threading
import signal
import util
import log
from epics import PV
import solver3d as solver
from roi_utils.roi import load_seg_nn
#CHECK#
#to-do: Zliu: add path to model file here
torch_model_path = '/home/beams/TOMO/gas_hydrates_3dzoom_Dec2021/models/mdl-ep00230.pth'
torch_model_ADet = '/home/beams/TOMO/gas_hydrates_3dzoom_Dec2021/models/mdl-ep0200.pth'
class EncoderStream():
""" Class for streaming reconstuction of ortho-slices on a machine with GPU.
The class creates and broadcasts a pva type pv for concatenated reconstructions
of (x,y,z) ortho-slices. Reconstructons are done by the FBP formula
with direct discretization of the circular integral.
Projection data is taken from the detector pv (pva type channel)
and stored in a queue, dark and flat fields are taken from the pv broadcasted
by the server on the detector machine (see tomoscan_stream.py from Tomoscan package).
Parameters
----------
args : dict
Dictionary of pv variables.
"""
def __init__(self, pv_files, macros):
log.setup_custom_logger("./encoderstream.log")
# init pvs
self.config_pvs = {}
self.control_pvs = {}
self.pv_prefixes = {}
if not isinstance(pv_files, list):
pv_files = [pv_files]
for pv_file in pv_files:
self.read_pv_file(pv_file, macros)
self.show_pvs()
self.epics_pvs = {**self.config_pvs, **self.control_pvs}
prefix = self.pv_prefixes['TomoScan']
# tomoscan pvs
self.epics_pvs['FrameType'] = PV(prefix + 'FrameType')
self.epics_pvs['NumAngles'] = PV(prefix + 'NumAngles')
self.epics_pvs['RotationStep'] = PV(prefix + 'RotationStep')
# Replace PSOPVPrefix to link to check a TomoScanStream PV so it returns if scan IOC is down
# self.epics_pvs['PSOPVPrefix'] = PV(prefix + 'PSOPVPrefix')
# if self.epics_pvs['PSOPVPrefix'].get(as_string=True) == None:
# log.error("TomoScan is down")
# log.error("Type exit() here and start TomoScan first")
# return
# pva type channel for flat and dark fields pv broadcasted from the detector machine
self.epics_pvs['PvaDark'] = pva.Channel(self.epics_pvs['DarkPVAName'].get())
self.pva_dark = self.epics_pvs['PvaDark']
self.epics_pvs['PvaFlat'] = pva.Channel(self.epics_pvs['FlatPVAName'].get())
self.pva_flat = self.epics_pvs['PvaFlat']
self.epics_pvs['PvaTheta'] = pva.Channel(self.epics_pvs['ThetaPVAName'].get())
self.pva_theta = self.epics_pvs['PvaTheta']
# pva type channel that contains projection and metadata
image_pv_name = PV(self.epics_pvs['ImagePVAPName'].get()).get()
self.epics_pvs['PvaPImage'] = pva.Channel(image_pv_name + 'Image')
self.epics_pvs['PvaPDataType_RBV'] = pva.Channel(image_pv_name + 'DataType_RBV')
self.pva_plugin_image = self.epics_pvs['PvaPImage']
# create pva type pv for reconstrucion by copying metadata from the data pv, but replacing the sizes
# This way the ADViewer (NDViewer) plugin can be also used for visualizing reconstructions.
pva_image_data = self.pva_plugin_image.get('')
pva_image_dict = pva_image_data.getStructureDict()
self.pv_rec = pva.PvObject(pva_image_dict)
# run server for reconstruction pv
recon_pva_name = self.epics_pvs['ReconPVAName'].get()
self.server_rec = pva.PvaServer(recon_pva_name, self.pv_rec)
self.epics_pvs['StartRecon'].put('Done')
self.epics_pvs['AbortRecon'].put('Yes')
self.epics_pvs['StartRecon'].add_callback(self.pv_callback)
self.epics_pvs['AbortRecon'].add_callback(self.pv_callback)
self.slv = None
# Set ^C, ^Z interrupt to abort the stream reconstruction
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTSTP, self.signal_handler)
# Start the watchdog timer thread
thread = threading.Thread(target=self.reset_watchdog, args=(), daemon=True)
thread.start()
def pv_callback(self, pvname=None, value=None, char_value=None, **kw):
"""Callback function that is called by pyEpics when certain EPICS PVs are changed
The PVs that are handled are:
- ``StartScan`` : Calls ``run_fly_scan()``
- ``AbortScan`` : Calls ``abort_scan()``
"""
log.debug('pv_callback pvName=%s, value=%s, char_value=%s', pvname, value, char_value)
if (pvname.find('StartRecon') != -1) and (value == 1):
thread = threading.Thread(target=self.begin_stream, args=())
thread.start()
elif (pvname.find('AbortRecon') != -1) and (value == 0):
thread = threading.Thread(target=self.abort_stream, args=())
thread.start()
def signal_handler(self, sig, frame):
"""Calls abort_scan when ^C or ^Z is typed"""
if (sig == signal.SIGINT) or (sig == signal.SIGTSTP):
self.abort_stream()
def reset_watchdog(self):
"""Sets the watchdog timer to 5 every 3 seconds"""
while True:
self.epics_pvs['Watchdog'].put(5)
time.sleep(3)
def reinit_monitors(self):
"""Reinit pv monitoring functions with updating data sizes"""
log.warning('reinit monitors with updating data sizes')
# stop monitors
self.pva_dark.stopMonitor()
self.pva_flat.stopMonitor()
self.pva_plugin_image.stopMonitor()
while(self.pva_dark.isMonitorActive() or
self.pva_flat.isMonitorActive() or
self.pva_plugin_image.isMonitorActive()):
time.sleep(0.01)
time.sleep(0.5)# need to wait for some reason? to check
# take new data sizes
pva_image_data = self.pva_plugin_image.get('')
width = pva_image_data['dimension'][0]['size']
height = pva_image_data['dimension'][1]['size']
self.pv_rec['dimension'] = [{'size': 3*width, 'fullSize': 3*width, 'binning': 1},
{'size': width, 'fullSize': width, 'binning': 1}]
# self.theta = self.epics_pvs['ThetaArray'].get()[:self.epics_pvs['NumAngles'].get()]
self.theta = self.pva_theta.get()['value']
print(self.theta)
#exit()
# update limits on sliders
# epics_pvs['OrthoXlimit'].put(width-1)
# epics_pvs['OrthoYlimit'].put(width-1)
# epics_pvs['OrthoZlimit'].put(height-1)
## create a queue to store projections
# find max size of the queue, the size is equal to the number of angles in the interval of size pi
if(max(self.theta)<180):
buffer_size = len(self.theta)
else:
dtheta = self.theta[1]-self.theta[0]
buffer_size = np.where(self.theta-self.theta[0]>180-dtheta)[0][0]
if(buffer_size*width*height>pow(2,32)):
log.error('buffer_size %s not enough memory', buffer_size)
exit(0)
# queue
self.data_queue = queue.Queue(maxsize=buffer_size)
# self.recon_queue = queue.Queue(maxsize=1)
# take datatype
datatype_list = self.epics_pvs['PvaPDataType_RBV'].get()['value']
self.datatype = datatype_list['choices'][datatype_list['index']].lower()
# update parameters from in the GUI
center = self.epics_pvs['Center'].get()
idx = self.epics_pvs['OrthoX'].get()
idy = self.epics_pvs['OrthoY'].get()
idz = self.epics_pvs['OrthoZ'].get()
rotx = self.epics_pvs['RotX'].get()
roty = self.epics_pvs['RotY'].get()
rotz = self.epics_pvs['RotZ'].get()
fbpfilter = self.epics_pvs['FilterType'].get(as_string=True)
dezinger = self.epics_pvs['Dezinger'].get(as_string=False)
if hasattr(self,'width'): # update parameters for new sizes
self.epics_pvs['Center'].put(center*width/self.width)
self.epics_pvs['OrthoX'].put(int(idx*width/self.width))
self.epics_pvs['OrthoY'].put(int(idy*width/self.width))
self.epics_pvs['OrthoZ'].put(int(idz*width/self.width))
## create solver class on GPU
self.slv = solver.Solver(buffer_size, width, height,
center, idx, idy, idz, rotx, roty, rotz, fbpfilter, dezinger, self.datatype)
self.slv.dn_model = load_seg_nn(torch_model_path)
# temp buffers for storing data taken from the queue
self.proj_buffer = np.zeros([buffer_size, width*height], dtype=self.datatype)
self.theta_buffer = np.zeros(buffer_size, dtype='float32')
self.ids_buffer = np.zeros(buffer_size, dtype='int32')
self.width = width
self.height = height
self.buffer_size = buffer_size
## start PV monitoring
# start monitoring dark and flat fields pv
self.pva_dark.monitor(self.add_dark,'')
self.pva_flat.monitor(self.add_flat,'')
# start monitoring projection data
self.pva_plugin_image.monitor(self.add_data,'')
self.stream_is_running = True
def add_data(self, pv):
"""PV monitoring function for adding projection data and corresponding angle to the queue"""
frame_type = self.epics_pvs['FrameType'].get(as_string=True)
if(self.stream_is_running and self.epics_pvs['FrameType'].get(as_string=True) == 'Projection'):
cur_id = pv['uniqueId'] # unique projection id for determining angles and places in the buffers
# write projection, theta, and id into the queue
data_item = {'projection': pv['value'][0][util.type_dict[self.datatype]],
'theta': self.theta[min(cur_id,len(self.theta)-1)],
'id': np.mod(cur_id, self.buffer_size)
}
if(not self.data_queue.full()):
self.data_queue.put(data_item)
else:
log.warning("queue is full, skip frame")
# pass
# log.info('id: %s type %s queue size %s', cur_id, frame_type, self.data_queue.qsize())
def add_dark(self, pv):
"""PV monitoring function for reading new dark fields from manually running pv server
on the detector machine"""
if(self.stream_is_running and len(pv['value'])==self.width*self.height): # if pv with dark field has cocrrect sizes
data = pv['value'].reshape(self.height, self.width)
self.slv.set_dark(data)
print('Norm dark', np.linalg.norm(data))
log.error('new dark fields acquired')
def add_flat(self, pv):
"""PV monitoring function for reading new flat fields from manually running pv server
on the detector machine"""
if(self.stream_is_running and len(pv['value'])==self.width*self.height): # if pv with flat has correct sizes
data = pv['value'].reshape(self.height, self.width)
self.slv.set_flat(data)
print('Norm flat', np.linalg.norm(data))
log.error('new flat fields acquired')
def begin_stream(self):
"""Run streaming reconstruction by sending new incoming projections from the queue to the solver class,
and broadcasting the reconstruction result to a pv variable
"""
self.reinit_monitors()
self.epics_pvs['ReconStatus'].put('Running')
while(self.stream_is_running):
# take parameters from the GUI
center = self.epics_pvs['Center'].get()
idx = self.epics_pvs['OrthoX'].get()
idy = self.epics_pvs['OrthoY'].get()
idz = self.epics_pvs['OrthoZ'].get()
rotx = self.epics_pvs['RotX'].get()
roty = self.epics_pvs['RotY'].get()
rotz = self.epics_pvs['RotZ'].get()
fbpfilter = self.epics_pvs['FilterType'].get(as_string=True)
dezinger = self.epics_pvs['Dezinger'].get(as_string=False)
# take items from the queue
nitem = 0
while ((not self.data_queue.empty()) and (nitem < self.buffer_size)):
item = self.data_queue.get()
# reinit if data sizes were updated (e.g. after data binning by ROI1)
if(len(item['projection'])!=self.width*self.height):
self.reinit_monitors()
self.proj_buffer[nitem] = item['projection']
self.theta_buffer[nitem] = item['theta']
self.ids_buffer[nitem] = item['id']
nitem += 1
if(nitem == 0):
continue
# log.info('center %s: idx, idy, idz: %s %s %s, rotx, roty, rotz: %s %s %s, filter: %s, dezinger: %s', center, idx, idy, idz, rotx, roty, rotz, fbpfilter, dezinger)
# reconstruct on GPU
util.tic()
# log.info("DATA SHAPE: %s"%str(self.proj_buffer[:nitem].shape))
rec = self.slv.recon_optimized(
self.proj_buffer[:nitem], self.theta_buffer[:nitem], self.ids_buffer[:nitem], center, idx, idy, idz, rotx, roty, rotz, fbpfilter, dezinger)
self.epics_pvs['ReconTime'].put(util.toc())
self.epics_pvs['BufferSize'].put(f'{nitem}/{self.buffer_size}')
# write result to pv
idz, idy, idx = self.slv.roi_pt
rec[0:self.width,idx:idx+3] = np.nan
rec[idy:idy+3,0:self.width] = np.nan
rec[0:self.width,self.width+idx:self.width+idx+3] = np.nan
rec[idz:idz+3,self.width:2*self.width] = np.nan
rec[0:self.width,2*self.width+idy:2*self.width+idy+3] = np.nan
rec[idz:idz+3,2*self.width:3*self.width] = np.nan
self.pv_rec['value'] = ({'floatValue': rec.flatten()},)
self.epics_pvs['OrthoX'].put(idx)
self.epics_pvs['OrthoY'].put(idy)
self.epics_pvs['OrthoZ'].put(idz)
self.epics_pvs['StartRecon'].put('Done')
self.epics_pvs['ReconStatus'].put('Stopped')
def abort_stream(self):
"""Aborts streaming that is running.
"""
self.epics_pvs['ReconStatus'].put('Aborting reconstruction')
if(self.slv is not None):
self.slv.free()
self.stream_is_running = False
def read_pv_file(self, pv_file_name, macros):
"""Reads a file containing a list of EPICS PVs to be used by TomoScan.
Parameters
----------
pv_file_name : str
Name of the file to read
macros: dict
Dictionary of macro substitution to perform when reading the file
"""
pv_file = open(pv_file_name)
lines = pv_file.read()
pv_file.close()
lines = lines.splitlines()
for line in lines:
is_config_pv = True
if line.find('#controlPV') != -1:
line = line.replace('#controlPV', '')
is_config_pv = False
line = line.lstrip()
# Skip lines starting with #
if line.startswith('#'):
continue
# Skip blank lines
if line == '':
continue
pvname = line
# Do macro substitution on the pvName
for key in macros:
pvname = pvname.replace(key, macros[key])
# Replace macros in dictionary key with nothing
dictentry = line
for key in macros:
dictentry = dictentry.replace(key, '')
epics_pv = PV(pvname)
if is_config_pv:
self.config_pvs[dictentry] = epics_pv
else:
self.control_pvs[dictentry] = epics_pv
# if dictentry.find('PVAPName') != -1:
# pvname = epics_pv.value
# key = dictentry.replace('PVAPName', '')
# self.control_pvs[key] = PV(pvname)
if dictentry.find('PVName') != -1:
pvname = epics_pv.value
key = dictentry.replace('PVName', '')
self.control_pvs[key] = PV(pvname)
if dictentry.find('PVPrefix') != -1:
pvprefix = epics_pv.value
key = dictentry.replace('PVPrefix', '')
self.pv_prefixes[key] = pvprefix
def show_pvs(self):
"""Prints the current values of all EPICS PVs in use.
The values are printed in three sections:
- config_pvs : The PVs that are part of the scan configuration and
are saved by save_configuration()
- control_pvs : The PVs that are used for EPICS control and status,
but are not saved by save_configuration()
- pv_prefixes : The prefixes for PVs that are used for the areaDetector camera,
file plugin, etc.
"""
print('configPVS:')
for config_pv in self.config_pvs:
print(config_pv, ':', self.config_pvs[config_pv].get(as_string=True))
print('')
print('controlPVS:')
for control_pv in self.control_pvs:
print(control_pv, ':', self.control_pvs[control_pv].get(as_string=True))
print('')
print('pv_prefixes:')
for pv_prefix in self.pv_prefixes:
print(pv_prefix, ':', self.pv_prefixes[pv_prefix])
| 42.698824 | 175 | 0.58737 | 2,271 | 18,147 | 4.557023 | 0.190665 | 0.051792 | 0.070731 | 0.010146 | 0.267465 | 0.200599 | 0.17393 | 0.154604 | 0.121171 | 0.092376 | 0 | 0.006977 | 0.297074 | 18,147 | 424 | 176 | 42.799528 | 0.804327 | 0.266766 | 0 | 0.170124 | 0 | 0 | 0.09858 | 0.012342 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049793 | false | 0 | 0.049793 | 0 | 0.103734 | 0.045643 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03b8734c360f66455710a790f4a8290958137378 | 1,351 | py | Python | userbot/modules/help.py | masbentoooredoo/WeebProject | d34c14b42801915518a87831f952e5ade95183e2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/help.py | masbentoooredoo/WeebProject | d34c14b42801915518a87831f952e5ade95183e2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/help.py | masbentoooredoo/WeebProject | d34c14b42801915518a87831f952e5ade95183e2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 10 | 2020-12-13T14:32:47.000Z | 2021-04-11T06:45:35.000Z | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
"""Userbot help command"""
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern=r"^\.help(?: |$)(.*)")
async def help_handler(event):
"""For .help command."""
args = event.pattern_match.group(1).lower()
if args:
if args in CMD_HELP:
await event.edit(str(CMD_HELP[args]))
else:
await event.edit(f"**“{args}”** bukan nama modul yang valid.")
else:
head = "Harap tentukan modul mana yang Anda inginkan untuk bantuan!"
head2 = f"Modul yang dimuat : `{len(CMD_HELP)} modul`"
head3 = "Gunakan : `.help [nama modul]`"
head4 = "Daftar untuk semua perintah tersedia dibawah ini : "
string = ""
sep1 = "`---------------------------------------------------`"
for i in sorted(CMD_HELP):
string += "`" + str(i)
string += "` ❖ "
await event.edit(
f"{head}\
\n{head2}\
\n{head3}\
\n{head4}\
\n\n{sep1}\
\n{string}\
\n{sep1}"
)
| 33.775 | 79 | 0.513694 | 153 | 1,351 | 4.496732 | 0.529412 | 0.050872 | 0.061047 | 0.043605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016358 | 0.321244 | 1,351 | 39 | 80 | 34.641026 | 0.732824 | 0.156921 | 0 | 0.068966 | 0 | 0 | 0.28424 | 0.049719 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.068966 | 0 | 0.068966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03bb6948a075551b1b577cc28c8637a3fab83ee4 | 5,300 | py | Python | cognite/experimental/_api/transformation_notifications.py | AlexThunder/cognite-sdk-python-experimental | 468d29e7809793ed45cef5da25dca22418839972 | [
"Apache-2.0"
] | null | null | null | cognite/experimental/_api/transformation_notifications.py | AlexThunder/cognite-sdk-python-experimental | 468d29e7809793ed45cef5da25dca22418839972 | [
"Apache-2.0"
] | null | null | null | cognite/experimental/_api/transformation_notifications.py | AlexThunder/cognite-sdk-python-experimental | 468d29e7809793ed45cef5da25dca22418839972 | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional, Union
from cognite.client import utils
from cognite.client._api_client import APIClient
from cognite.experimental._constants import HANDLER_FILE_NAME, LIST_LIMIT_CEILING, LIST_LIMIT_DEFAULT, MAX_RETRIES
from cognite.experimental.data_classes import (
OidcCredentials,
Transformation,
TransformationBlockedInfo,
TransformationDestination,
TransformationList,
TransformationNotification,
TransformationNotificationFilter,
TransformationNotificationList,
)
class TransformationNotificationsAPI(APIClient):
_RESOURCE_PATH = "/transformations/notifications"
_LIST_CLASS = TransformationNotificationList
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create(
self, notification: Union[TransformationNotification, List[TransformationNotification]]
) -> Union[TransformationNotification, TransformationNotificationList]:
"""`Subscribe for notifications on the transformation errors. <https://docs.cognite.com/api/playground/#operation/subscribeTransformationNotifications>`_
Args:
notification (Union[TransformationNotification, List[TransformationNotification]]): Notification or list of notifications to create.
Returns:
Created notification(s)
Examples:
Create new notifications:
>>> from cognite.experimental import CogniteClient
>>> from cognite.experimental.data_classes import TransformationNotification
>>> c = CogniteClient()
>>> notifications = [TransformationNotification(transformation_id = 1, destination="my@email.com"), TransformationNotification(transformation_external_id="transformation2", destination="other@email.com"))]
>>> res = c.transformations.notifications.create(notifications)
"""
utils._auxiliary.assert_type(notification, "notification", [TransformationNotification, list])
return self._create_multiple(notification)
def list(
self,
transformation_id: Optional[int] = None,
transformation_external_id: str = None,
destination: str = None,
limit: Optional[int] = LIST_LIMIT_DEFAULT,
) -> TransformationNotificationList:
"""`List notification subscriptions. <https://docs.cognite.com/api/playground/#operation/listTransformationNotifications>`_
Args:
transformation_id (Optional[int]): List only notifications for the specified transformation. The transformation is identified by internal numeric ID.
transformation_external_id (str): List only notifications for the specified transformation. The transformation is identified by externalId.
destination (str): Filter by notification destination.
limit (int): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25.
Returns:
TransformationNotificationList: List of transformation notifications
Example:
List all notifications::
>>> from cognite.experimental import CogniteClient
>>> c = CogniteClient()
>>> notifications_list = c.transformations.notifications.list()
List all notifications by transformation id::
>>> from cognite.experimental import CogniteClient
>>> c = CogniteClient()
>>> notifications_list = c.transformations.notifications.list(transformation_id = 1)
List all notifications by transformation external id::
>>> from cognite.experimental import CogniteClient
>>> c = CogniteClient()
>>> notifications_list = c.transformations.notifications.list(transformation_external_id = "myExternalId")
List all notifications by destination::
>>> from cognite.experimental import CogniteClient
>>> c = CogniteClient()
>>> notifications_list = c.transformations.notifications.list(destination = "my@email.com")
"""
if limit in [float("inf"), -1, None]:
limit = LIST_LIMIT_CEILING
filter = TransformationNotificationFilter(
transformation_id=transformation_id,
transformation_external_id=transformation_external_id,
destination=destination,
).dump(camel_case=True)
return self._list(method="GET", limit=limit, filter=filter,)
def delete(self, id: Union[int, List[int]] = None,) -> None:
"""`Deletes the specified notification subscriptions on the transformation. Does nothing when the subscriptions already don't exist <https://doc.cognitedata.com/api/playground/#operation/deleteTransformationNotifications>`_
Args:
id (Union[int, List[int]): Id or list of ids
Returns:
None
Examples:
Delete schedules by id or external id::
>>> from cognite.experimental import CogniteClient
>>> c = CogniteClient()
>>> c.transformations.notifications.delete(id=[1,2,3])
"""
self._delete_multiple(
ids=id, wrap_ids=True,
)
| 42.4 | 231 | 0.675472 | 471 | 5,300 | 7.469214 | 0.280255 | 0.034395 | 0.05884 | 0.04946 | 0.333712 | 0.262081 | 0.220011 | 0.196703 | 0.196703 | 0.175668 | 0 | 0.002486 | 0.241132 | 5,300 | 124 | 232 | 42.741935 | 0.872203 | 0.562453 | 0 | 0 | 0 | 0 | 0.025052 | 0.015658 | 0 | 0 | 0 | 0 | 0.023256 | 1 | 0.093023 | false | 0 | 0.116279 | 0 | 0.325581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03bb90b3312fca9d9a089bd4f563f11678d9d344 | 2,110 | py | Python | 2020/day13.py | PintarM/AdventOfCode | 5837c42ab1ce98efb02aec1ac17967611e2a3851 | [
"MIT"
] | null | null | null | 2020/day13.py | PintarM/AdventOfCode | 5837c42ab1ce98efb02aec1ac17967611e2a3851 | [
"MIT"
] | null | null | null | 2020/day13.py | PintarM/AdventOfCode | 5837c42ab1ce98efb02aec1ac17967611e2a3851 | [
"MIT"
] | 1 | 2020-12-02T13:27:52.000Z | 2020-12-02T13:27:52.000Z | # -*- coding: utf-8 -*-
"""Advent Of Code 2020, Day 13
@author: Matevz
"""
from sympy.ntheory.modular import crt
def get_input(file_name):
"""Process input text file."""
try:
file = open(file_name, 'r')
content = file.read()
except IOError:
print('Cannot read file {}'.format(file_name))
finally:
file.close()
timestamp, lines = content.splitlines()
lines = [int(i) if i != 'x' else i for i in lines.split(',')]
return [int(timestamp), lines]
def waiting_time(timestamp, bus):
"""Bus waiting time."""
before = (timestamp // bus) * bus
return (before + bus) - timestamp
def earliest_bus_product(timestamp, lines):
"""Product of earliest bus time and line number."""
buses = [int(i) for i in lines if i != 'x' ]
times = [waiting_time(timestamp, bus) for bus in buses]
min_time = min(times)
bus_no = buses[times.index(min_time)]
return min_time * bus_no
def buses_delays(timestamp, lines):
"""Bus delay for certain timestamp."""
return [((timestamp + i) % int(v)) for i,v in enumerate(lines) if v != 'x']
def earliest_consecutive_timestamp_naive(lines, start_at=0):
"""Naive implementation of earliest consecutive timestamo."""
increment = int(lines[0])
timestamp = (start_at // increment) * increment
while sum(buses_delays(timestamp, lines)) != 0:
timestamp += increment
return timestamp
# https://www.geeksforgeeks.org/python-sympy-crt-method/
def earliest_consecutive_timestamp(lines):
"""Earliest consecutive timestamp using Chinese Remainder Theorem."""
data = {v: (v - i) % v for i,v in enumerate(lines) if v != 'x'}
moduli = data.keys()
remainders = data.values()
return crt(moduli, remainders)[0]
def main():
"""Solution to both parts of Advent of Code puzzle."""
timestamp, lines = get_input('input/day13.txt')
part1 = earliest_bus_product(timestamp, lines)
part2 = earliest_consecutive_timestamp(lines)
print('First answer {}, second answer {}.'.format(part1, part2))
if __name__ == "__main__":
main()
| 27.402597 | 79 | 0.652133 | 278 | 2,110 | 4.823741 | 0.377698 | 0.09396 | 0.08352 | 0.01044 | 0.1044 | 0.038777 | 0.038777 | 0.038777 | 0.038777 | 0.038777 | 0 | 0.010186 | 0.209005 | 2,110 | 76 | 80 | 27.763158 | 0.793289 | 0.196209 | 0 | 0 | 0 | 0 | 0.049607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170732 | false | 0 | 0.02439 | 0 | 0.341463 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03bd2bfd0c61d2a79ebac5bc28f8f5b65ddc6392 | 5,154 | py | Python | conveyorclient/v1/plans.py | Hybrid-Cloud/conveyorclient | 329fe37ef4fb7229e5bec9575a86c06e5d4da934 | [
"Apache-2.0"
] | null | null | null | conveyorclient/v1/plans.py | Hybrid-Cloud/conveyorclient | 329fe37ef4fb7229e5bec9575a86c06e5d4da934 | [
"Apache-2.0"
] | null | null | null | conveyorclient/v1/plans.py | Hybrid-Cloud/conveyorclient | 329fe37ef4fb7229e5bec9575a86c06e5d4da934 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Huawei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from conveyorclient import base
from conveyorclient.common import constants
class Plan(base.Resource):
def __repr__(self):
return "<Plan: %s>" % self.plan_id
def reset_plan_state(self, state):
self.manager.reset_plan_state(self.plan_id, state)
class PlanManager(base.ManagerWithFind):
"""
Manage :class:`Resource` resources.
"""
resource_class = Plan
def get(self, plan):
"""
Get a plan.
:param plan: The ID of the plan.
:rtype: :class:`Plan`
"""
return self._get("/plans/%s" % plan, "plan")
def delete(self, plan):
"""
Delete a plan.
:param plan: The :class:`Plan` to delete.
"""
return self._delete("/plans/%s" % plan)
def update(self, plan, values):
"""
Update a plan.
:param plan: The :class:`Plan` to update.
:param values: key-values to update.
"""
if not values or not isinstance(values, dict):
return
body = {"plan": values}
self._update("/plans/%s" % plan, body)
def list(self, search_opts=None, marker=None, limit=None, sort_key=None,
sort_dir=None):
"""
Get a list of all plans.
:rtype: list of :class:`Plan`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in search_opts.items():
if val:
qparams[opt] = val
if marker:
qparams['marker'] = marker
if limit and limit != -1:
qparams['limit'] = limit
if sort_key is not None:
if sort_key in constants.PLAN_SORT_KEY_VALUES:
qparams['sort_key'] = sort_key
else:
raise ValueError('sort_key must be one of the following: %s.'
% ', '.join(constants.PLAN_SORT_KEY_VALUES))
if sort_dir is not None:
if sort_dir in constants.SORT_DIR_VALUES:
qparams['sort_dir'] = sort_dir
else:
raise ValueError('sort_dir must be one of the following: %s.'
% ', '.join(constants.SORT_DIR_VALUES))
if qparams:
query_string = "?%s" % urlencode(
sorted(list(qparams.items()), key=lambda x: x[0]))
else:
query_string = ""
return self._list("/plans/detail%s" % query_string, "plans")
def create(self, plan_type, resources, plan_name=None):
"""
Create a clone or migrate plan.
:param type: plan type. 'clone' or 'migrate'
:param resources: A list of resources. "
"Eg: [{'type':'OS::Nova::Server', 'id':'xx'}]
:param name: plan name.
:rtype: :class:`Plan (Actually, only plan_id and
resource_dependencies)`
"""
if not resources or not isinstance(resources, list):
raise base.exceptions.BadRequest("'resources' must be a list.")
body = {"plan": {"plan_type": plan_type, "clone_obj": resources,
"plan_name": plan_name}}
return self._create('/plans', body, 'plan')
def create_plan_by_template(self, template, plan_name=None):
"""
Create a clone or migrate plan by template.
:rtype: :class:`Plan`
"""
body = {"plan": {"template": template,
"plan_name": plan_name}}
resp, body = self.api.client.post("/plans/create_plan_by_template",
body=body)
return body['plan']
def download_template(self, plan):
"""
Create a clone or migrate plan by template.
:param plan:The ID of the plan.
:rtype: :dict
"""
return self._action('download_template', plan)
def reset_plan_state(self, plan, state):
self._action("os-reset_state", plan, {"plan_status": state})
def force_delete_plan(self, plan):
self._action('force_delete-plan', plan, {'plan_id': plan})
def _action(self, action, plan, info=None, **kwargs):
"""
Perform a plan "action" -- download_templdate etc.
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/plans/%s/action' % base.getid(plan)
return self.api.client.post(url, body=body)
| 32.828025 | 78 | 0.569461 | 626 | 5,154 | 4.557508 | 0.265176 | 0.025237 | 0.018226 | 0.018927 | 0.158079 | 0.106905 | 0.106905 | 0.106905 | 0.051875 | 0 | 0 | 0.002836 | 0.315871 | 5,154 | 156 | 79 | 33.038462 | 0.806296 | 0.269693 | 0 | 0.066667 | 0 | 0 | 0.119464 | 0.015152 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.066667 | 0.013333 | 0.386667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03bd7fc88e1ab0df9ab78abcf865b46a9d55c33e | 1,931 | py | Python | submissions/Miner/myCSPs.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
] | null | null | null | submissions/Miner/myCSPs.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
] | null | null | null | submissions/Miner/myCSPs.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
] | null | null | null | import csp
rgb = ['R', 'G', 'B']
d2 = {'A': rgb, 'B': rgb, 'C': ['R'], 'D': rgb}
v2 = d2.keys()
n2 = {'A': ['B', 'C', 'D'],
'B': ['A', 'C', 'D'],
'C': ['A', 'B'],
'D': ['A', 'B']}
def constraints(A, a, B, b):
if A == B: # e.g. NSW == NSW
return True
if a == b: # e.g. WA = G and SA = G
return False
return True
c2 = csp.CSP(v2, d2, n2, constraints)
c2.label = 'Really Lame'
colors = ['R', 'G', 'B', 'Y']
domains = {
'Riften': colors,
'Eastmarch': colors,
'Falkreath': colors,
'Whiterun': colors,
'Winterhold': colors,
'ThePale': colors,
'Markarth': colors,
'Solitude': colors,
'Morthal': colors
}
variables = domains.keys()
neighbors = {
'Riften': ['Eastmarch', 'Whiterun', 'Falkreath'],
'Eastmarch': ['Riften', 'Winterhold', 'Whiterun', 'ThePale'],
'Falkreath': ['Riften', 'Whiterun', 'ThePale', 'Markarth'],
'Whiterun': ['Riften', 'Falkreath', 'Markarth', 'Eastmarch', 'ThePale', 'Morthal'],
'Winterhold': ['ThePale', 'Eastmarch'],
'ThePale': ['Winterhold', 'Morthal', 'Whiterun', 'Eastmarch'],
'Markarth': ['Falkreath', 'Whiterun', 'Morthal', 'Solitude'],
'Solitude': ['Markarth', 'Morthal'],
'Morthal': ['ThePale', 'Whiterun', 'Markarth']
}
skyrim = csp.CSP(variables, domains, neighbors, constraints)
skyrim.label = 'Skyrim Districts'
myCSPs = [
{
'csp': skyrim,
},
{
'csp': skyrim,
'select_unassigned_variable': csp.mrv
},
{
'csp': skyrim,
'order_domain_values': csp.lcv
},
{
'csp': skyrim,
'inference': csp.mac
},
{
'csp': skyrim,
'inference': csp.forward_checking
},
{
'csp': skyrim,
'select_unassigned_variable': csp.mrv,
'order_domain_values': csp.lcv,
'inference': csp.mac,
# 'inference': csp.forward_checking,
}
]
| 21.943182 | 87 | 0.51942 | 200 | 1,931 | 4.965 | 0.285 | 0.012085 | 0.006042 | 0.01007 | 0.136959 | 0.07855 | 0.07855 | 0 | 0 | 0 | 0 | 0.006356 | 0.266701 | 1,931 | 87 | 88 | 22.195402 | 0.694915 | 0.037804 | 0 | 0.115942 | 0 | 0 | 0.314995 | 0.028047 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.014493 | 0 | 0.072464 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03bf606f96d7db50104b5beeafeafef7cf2ccc0a | 2,504 | py | Python | bot.py | Stienvdh/IEToolie | bfa5cb35cdb63a4dceba935c5098e6ab6729228c | [
"BSD-Source-Code"
] | null | null | null | bot.py | Stienvdh/IEToolie | bfa5cb35cdb63a4dceba935c5098e6ab6729228c | [
"BSD-Source-Code"
] | null | null | null | bot.py | Stienvdh/IEToolie | bfa5cb35cdb63a4dceba935c5098e6ab6729228c | [
"BSD-Source-Code"
] | null | null | null | """
Copyright (c) 2021 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
from flask import Flask, request, jsonify
from webexteamssdk import WebexTeamsAPI
import os, re
import ie2k
# get environment variables
WT_BOT_TOKEN = os.environ['WT_BOT_TOKEN']
# uncomment next line if you are implementing a notifier bot
#WT_ROOM_ID = os.environ['WT_ROOM_ID']
# uncomment next line if you are implementing a controller bot
WT_BOT_EMAIL = os.environ['WT_BOT_EMAIL']
# start Flask and WT connection
app = Flask(__name__)
api = WebexTeamsAPI(access_token=WT_BOT_TOKEN)
# defining the decorater and route registration for incoming alerts
@app.route('/', methods=['POST'])
def alert_received():
raw_json = request.get_json()
print(raw_json)
# customize the behaviour of the bot here
welcome_message = '''
Hi, I am IE Toolie.
I can do two things:
- Run the Mac sticky script: type "Stick"
- Find a device: type "Find *IP Address*"
Have a great day ☀!
'''
message_id = raw_json['data']['id']
message_object = api.messages.get(message_id)
message_text = message_object.text.strip().lower()
message = welcome_message
if "stick" in message_text:
message = "Running Sticky MAC"
elif "find" in message_text:
IP_REGEX = r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b'
ip_list = re.findall(IP_REGEX, message_text)
ip_address = convert_list_to_string(ip_list[0],'')
message = "Finding IP address: " + ip_address
# uncomment if you are implementing a controller bot
WT_ROOM_ID = raw_json['data']['roomId']
personEmail_json = raw_json['data']['personEmail']
if personEmail_json != WT_BOT_EMAIL:
api.messages.create(roomId=WT_ROOM_ID, text=message)
return jsonify({'success': True})
def convert_list_to_string(list_object, seperator=''):
return seperator.join(list_object)
if __name__=="__main__":
app.run() | 32.519481 | 72 | 0.708866 | 374 | 2,504 | 4.572193 | 0.462567 | 0.017544 | 0.018713 | 0.035088 | 0.074269 | 0.074269 | 0.074269 | 0.074269 | 0 | 0 | 0 | 0.008437 | 0.195288 | 2,504 | 77 | 73 | 32.519481 | 0.839702 | 0.395767 | 0 | 0 | 0 | 0.025641 | 0.232155 | 0.022015 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.102564 | 0.025641 | 0.205128 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c1692548de12b27e2465162cb0ac18bb61a489 | 5,224 | py | Python | stats_eddie/SVM.py | dalakada/TwiCSv2 | 40672a99a201f6e2aab9dd085e1f4a29e8253f3b | [
"MIT"
] | 2 | 2019-04-01T00:54:39.000Z | 2021-06-22T18:02:47.000Z | stats_eddie/SVM.py | dalakada/TwiCSv2 | 40672a99a201f6e2aab9dd085e1f4a29e8253f3b | [
"MIT"
] | null | null | null | stats_eddie/SVM.py | dalakada/TwiCSv2 | 40672a99a201f6e2aab9dd085e1f4a29e8253f3b | [
"MIT"
] | 2 | 2018-06-20T14:50:03.000Z | 2020-08-27T01:55:34.000Z |
# coding: utf-8
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from scipy import stats
class SVM1():
def __init__(self,train):
#train the algorithm once
self.train = pd.read_csv(train,delimiter=",",sep='\s*,\s*')
self.train['normalized_cap']=self.train['cap']/self.train['cumulative']
self.train['normalized_capnormalized_substring-cap']=self.train['substring-cap']/self.train['cumulative']
self.train['normalized_s-o-sCap']=self.train['s-o-sCap']/self.train['cumulative']
self.train['normalized_all-cap']=self.train['all-cap']/self.train['cumulative']
self.train['normalized_non-cap']=self.train['non-cap']/self.train['cumulative']
self.train['normalized_non-discriminative']=self.train['non-discriminative']/self.train['cumulative']
'''self.cols = ['length','cap','substring-cap','s-o-sCap','all-cap','non-cap','non-discriminative','cumulative',
'normalized_cap',
'normalized_capnormalized_substring-cap',
'normalized_s-o-sCap',
'normalized_all-cap',
'normalized_non-cap',
'normalized_non-discriminative'
]'''
self.cols = ['length','normalized_cap',
'normalized_capnormalized_substring-cap',
'normalized_s-o-sCap',
'normalized_all-cap',
'normalized_non-cap',
'normalized_non-discriminative'
]
self.colsRes = ['class']
# self.trainArr = self.train.as_matrix(self.cols) #training array
# #print(self.trainArr)
# self.trainRes = self.train.as_matrix(self.colsRes) # training results
self.trainArr = self.train[self.cols]
self.trainRes = self.train[self.colsRes].values
self.clf = svm.SVC(probability=True)
self.clf.fit(self.trainArr, self.trainRes) # fit the data to the algorithm
def run(self,x_test,z_score_threshold):
x_test['normalized_cap']=x_test['cap']/x_test['cumulative']
x_test['normalized_capnormalized_substring-cap']=x_test['substring-cap']/x_test['cumulative']
x_test['normalized_s-o-sCap']=x_test['s-o-sCap']/x_test['cumulative']
x_test['normalized_all-cap']=x_test['all-cap']/x_test['cumulative']
x_test['normalized_non-cap']=x_test['non-cap']/x_test['cumulative']
x_test['normalized_non-discriminative']=x_test['non-discriminative']/x_test['cumulative']
#setting features
# testArr= x_test.as_matrix(self.cols)
# #print(testArr)
# testRes = x_test.as_matrix(self.colsRes)
# In[ ]:
testArr = x_test[self.cols]
# In[65]:
#clf = svm.SVC(probability=True)
#clf.fit(trainArr, trainRes) # fit the data to the algorithm
# In[66]:
pred_prob=self.clf.predict_proba(testArr)
# In[67]:
prob_first_column=[]
for i in pred_prob:
prob_first_column.append(i[1])
# In[68]:
#print(x_test_filtered.index.size,len(prob_first_column))
# In[69]:
#print(pred_prob)
x_test['probability']=prob_first_column
# In[70]:
#type(x_test)
# In[46]:
#type(x_test)
# In[71]:
#x_test_filtered.to_csv("results3.csv", sep=',', encoding='utf-8')
# In[48]:
return x_test
'''
# In[109]:
ali.to_csv("Classifier_Results.csv", sep=',', encoding='utf-8')
# In[68]:
pred_class=clf.predict(testArr)
print(pred_class)
# In[69]:
testRes
# In[10]:
count=0
# In[11]:
for i in range(len(pred_class)):
if pred_class[i]==testRes[i]:
count+=1
# In[12]:
count
# In[13]:
len(pred_class)
# In[14]:
float(count)/len(pred_class)
# In[22]:
prob_holder=[]
for idx, cl in enumerate(pred_prob):
prob_holder.append(pred_prob[idx][1])
#x_test.insert(len(x_test.columns),'pred_prob',pred_prob[1])
#print (pred_prob[,1])
#x_test.insert(1, 'bar', df['one'])
# In[23]:
x_test.to_csv("svm_prob.csv", sep=';', encoding='utf-8')
# In[24]:
random_forest_logistic=pd.read_csv("random_forest_logistic.csv",delimiter=";")
# In[25]:
random_forest_logistic
# In[26]:
prob_holder=[]
for idx, cl in enumerate(pred_prob):
prob_holder.append(pred_prob[idx][1])
#x_test.insert(len(x_test.columns),'pred_prob',pred_prob[1])
#print (pred_prob[,1])
#x_test.insert(1, 'bar', df['one'])
# In[27]:
random_forest_logistic.insert(len(random_forest.columns),'svm_with_prob',prob_holder)
print random_forest_logistic
# In[29]:
random_forest_logistic.to_csv("random_forest_logistic_svm_FINAL.csv", sep=';', encoding='utf-8')
# In[34]:
class_x=0
TP=0
TN=0
FP=0
FN=0
for idx, cl in enumerate(pred_prob):
#print pred_prob[idx][1]
#if pred_prob[idx][1]>0.6:
# class_x=1
#elif pred_prob[idx][1]<=0.6:
# class_x=0
class_x = pred_class[idx]
if (class_x ==testRes[idx]) and class_x==1 :
TP+=1
elif (class_x ==testRes[idx]) and class_x==0 :
TN+=1
if class_x == 1 and testRes[idx]==0:
FP+=1
if class_x == 0 and testRes[idx]==1:
FN+=1
# In[35]:
print TP,TN,FP,FN
# In[ ]:
''' | 20.730159 | 120 | 0.626723 | 740 | 5,224 | 4.221622 | 0.187838 | 0.056018 | 0.03073 | 0.044174 | 0.429257 | 0.377401 | 0.330026 | 0.237516 | 0.173496 | 0.173496 | 0 | 0.022936 | 0.207121 | 5,224 | 252 | 121 | 20.730159 | 0.731289 | 0.126149 | 0 | 0 | 0 | 0 | 0.27905 | 0.083715 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.125 | 0 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c1918314d95df834d6cdd5e651896610e70399 | 836 | py | Python | pyviews/rendering/tests/setup_tests.py | eumis/pyviews | 9f3a3a72eddda2bd33bbb16b45d9fa4f9ebfcc0b | [
"MIT"
] | 6 | 2018-02-16T14:37:53.000Z | 2020-03-20T13:22:17.000Z | pyviews/rendering/tests/setup_tests.py | eumis/pyviews | 9f3a3a72eddda2bd33bbb16b45d9fa4f9ebfcc0b | [
"MIT"
] | null | null | null | pyviews/rendering/tests/setup_tests.py | eumis/pyviews | 9f3a3a72eddda2bd33bbb16b45d9fa4f9ebfcc0b | [
"MIT"
] | 1 | 2019-03-15T12:47:42.000Z | 2019-03-15T12:47:42.000Z | from os.path import abspath
from injectool import resolve, SingletonResolver
from pytest import mark
from pyviews.rendering import RenderingPipeline
from pyviews.rendering.setup import use_rendering
@mark.usefixtures('container_fixture')
class UseRenderingTests:
@staticmethod
def test_views_folder():
"""should add views_folder dependency"""
use_rendering()
assert resolve('views_folder') == abspath('views')
@staticmethod
def test_views_extension():
"""should add view_ext dependency"""
use_rendering()
assert resolve('view_ext') == 'xml'
def test_rendering_pipeline(self):
"""should add singleton resolver for RenderingPipeline"""
use_rendering()
assert isinstance(self.container.get_resolver(RenderingPipeline), SingletonResolver)
| 26.967742 | 92 | 0.721292 | 88 | 836 | 6.670455 | 0.454545 | 0.081772 | 0.091993 | 0.081772 | 0.11925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.192584 | 836 | 30 | 93 | 27.866667 | 0.86963 | 0.139952 | 0 | 0.277778 | 0 | 0 | 0.064011 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.166667 | false | 0 | 0.277778 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c2eb282e47a2c54bf098cbd77a6d4905f87e9b | 533 | py | Python | deneme.py | github21yandex/Calismalarim-Python | 1dad342b13c3e4000d5dd8cb79d776e3fc665e3a | [
"Unlicense"
] | null | null | null | deneme.py | github21yandex/Calismalarim-Python | 1dad342b13c3e4000d5dd8cb79d776e3fc665e3a | [
"Unlicense"
] | null | null | null | deneme.py | github21yandex/Calismalarim-Python | 1dad342b13c3e4000d5dd8cb79d776e3fc665e3a | [
"Unlicense"
] | null | null | null |
import sys
import locale
locale.setlocale(locale.LC_ALL, "tr_TR.utf-8")
cumle = "merhaba\tiyi\tmisiN?"
sesli="ıouieaüöIOUİEAÜÖ"
sesliler =""
sessizler=""
gecici=""
tur = "ğıüşöçĞİÜŞÖÇ"
ing = "giusocGIUSOC"
liste = ["ali", "veli", "samet"]
liste = [[1,2,3],[4,5,6],[7,8,9,10]]
liste2 = [1, 2, 3, "mehmed"]
w = open("deneme.txt", "w")
print("ali","ayşe","fatma","veli",sep="\n", file =w)
w.close()
f = open("deneme.txt","r")
dosya = f.readlines()
tc ="12434"
print("tc = %s" %tc)
print("tc = {isim}".format(isim="onur"))
| 14.805556 | 52 | 0.60788 | 86 | 533 | 3.767442 | 0.709302 | 0.012346 | 0.018519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0.133208 | 533 | 35 | 53 | 15.228571 | 0.651515 | 0 | 0 | 0 | 0 | 0 | 0.29434 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c346bda5036c16e0584f1e697773c94ff92837 | 1,377 | py | Python | benchmark.py | btclib-org/btclib_libsecp256k1 | b7bdd0df121c9c674edc88a55ea6cadad75dafef | [
"MIT"
] | null | null | null | benchmark.py | btclib-org/btclib_libsecp256k1 | b7bdd0df121c9c674edc88a55ea6cadad75dafef | [
"MIT"
] | null | null | null | benchmark.py | btclib-org/btclib_libsecp256k1 | b7bdd0df121c9c674edc88a55ea6cadad75dafef | [
"MIT"
] | null | null | null | import time
import btclib.ecc.dsa
import btclib.ecc.ssa
import coincurve
from btclib.hashes import reduce_to_hlen
from btclib.to_pub_key import pub_keyinfo_from_prv_key
import btclib_libsecp256k1.dsa
import btclib_libsecp256k1.ssa
prvkey = 1
pubkey_bytes = pub_keyinfo_from_prv_key(prvkey)[0]
msg_bytes = reduce_to_hlen("Satoshi Nakamoto".encode())
dsa_signature_bytes = btclib_libsecp256k1.dsa.sign(msg_bytes, prvkey)
ssa_signature_bytes = btclib_libsecp256k1.ssa.sign(msg_bytes, prvkey)
def dsa_btclib():
assert btclib.ecc.dsa.verify_(msg_bytes, pubkey_bytes, dsa_signature_bytes)
def ssa_btclib():
assert btclib.ecc.ssa.verify_(msg_bytes, pubkey_bytes, ssa_signature_bytes)
def dsa_coincurve():
assert coincurve.PublicKey(pubkey_bytes).verify(
dsa_signature_bytes, msg_bytes, None
)
def dsa_libsecp256k1():
assert btclib_libsecp256k1.dsa.verify(msg_bytes, pubkey_bytes, dsa_signature_bytes)
def ssa_libsecp256k1():
assert btclib_libsecp256k1.ssa.verify(msg_bytes, pubkey_bytes, ssa_signature_bytes)
def benchmark(func, mult=1):
start = time.time()
for x in range(100 * mult):
func()
end = time.time()
print(f"{func.__name__}:", (end - start) / mult)
benchmark(dsa_btclib, 100)
benchmark(dsa_coincurve, 100)
benchmark(dsa_libsecp256k1, 100)
benchmark(ssa_btclib, 100)
benchmark(ssa_libsecp256k1, 100)
| 25.036364 | 87 | 0.777778 | 197 | 1,377 | 5.111675 | 0.233503 | 0.063555 | 0.067527 | 0.079444 | 0.236346 | 0.196624 | 0.196624 | 0.196624 | 0.196624 | 0.196624 | 0 | 0.050791 | 0.127814 | 1,377 | 54 | 88 | 25.5 | 0.787677 | 0 | 0 | 0 | 0 | 0 | 0.023239 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 1 | 0.166667 | false | 0 | 0.222222 | 0 | 0.388889 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c4437ca693f17d281bdd30bb65c08f78eb2db5 | 7,392 | py | Python | hah_classification/data.py | jieguangzhou/AIChallenger_SentimentAnalysis | 4dcd10c2e12bd266fb19c6b5cf50346766a4a37b | [
"MIT"
] | null | null | null | hah_classification/data.py | jieguangzhou/AIChallenger_SentimentAnalysis | 4dcd10c2e12bd266fb19c6b5cf50346766a4a37b | [
"MIT"
] | null | null | null | hah_classification/data.py | jieguangzhou/AIChallenger_SentimentAnalysis | 4dcd10c2e12bd266fb19c6b5cf50346766a4a37b | [
"MIT"
] | null | null | null | from collections import Counter
import logging
import random
import numpy as np
import jieba
from hah_classification.develop.IO import read_file, write_file
import pandas as pd
import os
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
PAD_IDX = 0
UNK_IDX = 1
COLUMNS = ['location_traffic_convenience', 'location_distance_from_business_district', 'location_easy_to_find',
'service_wait_time', 'service_waiters_attitude', 'service_parking_convenience', 'service_serving_speed',
'price_level', 'price_cost_effective', 'price_discount',
'environment_decoration', 'environment_noise', 'environment_space', 'environment_cleaness',
'dish_portion', 'dish_taste', 'dish_look', 'dish_recommendation',
'others_overall_experience', 'others_willing_to_consume_again']
def segment(sentence):
return [i for i in sentence if i.strip()]
def load_vocab(vocab_path):
"""
读取词典
"""
vocab = {token: index for index, token in
enumerate(read_file(vocab_path, deal_function=lambda x: x.strip() if x != '\n' else x))}
logger.info('load vocab (size:%s) to %s' % (len(vocab), vocab_path))
return vocab
def save_vocab(vocab, vocab_path):
"""
保存词典
"""
sorted_vocab = sorted(vocab.items(), key=lambda x: x[1])
write_file(sorted_vocab, vocab_path, deal_function=lambda x: x[0] + '\n')
logger.info('save vocab (size:%s) to %s' % (len(vocab), vocab_path))
def load_data(data_path, vocab_path, label_vocab_path, create_vocab=False, create_label_vocab=False, min_freq=1,
vocab_size=None, return_label_vocab=False):
msg = 'load data from %s, ' % data_path
data_set = pd.read_csv(data_path)
vocab_ = Counter() if create_vocab else load_vocab(vocab_path)
label_vocab = {} if create_label_vocab else load_vocab(label_vocab_path)
sequences, lengths = [], []
for content in data_set.iloc[:, 1]:
tokens = segment(content)
if create_vocab:
vocab_.update(tokens)
sequences.append(tokens)
lengths.append(len(tokens))
if create_vocab:
vocab = {'<PAD>': PAD_IDX, '<UNK>': UNK_IDX}
# vocab_size 必须大于2
print('ori vocab size %s' % len(vocab_))
vocab_size = max(vocab_size or len(vocab_), 2) - 2
logger.info('create vocab, min freq: %s, vocab_size: %s' % (min_freq, vocab_size))
for token, count in vocab_.most_common(vocab_size):
if not token:
continue
if count < min_freq:
break
else:
vocab[token] = len(vocab)
save_vocab(vocab, vocab_path)
else:
vocab = vocab_
columns = data_set.columns.values.tolist()[2:]
dict_labels = {}
dict_label_vocab = {}
for col in columns:
labels = [str(i) for i in data_set[col]]
col_vocab_path = label_vocab_path + '.' + col
if create_label_vocab:
label_vocab = {vocab: index for index, vocab in enumerate(sorted(set(labels)))}
save_vocab(label_vocab, col_vocab_path)
else:
label_vocab = load_vocab(col_vocab_path)
if not return_label_vocab:
labels = list(map(lambda x: label_vocab[x], labels))
dict_labels[col] = np.array(labels)
dict_label_vocab[col] = label_vocab
if create_label_vocab:
save_vocab(label_vocab, label_vocab_path)
sequences = [[vocab.get(token, UNK_IDX) for token in sequence] for sequence in sequences]
msg += 'total : %s' % len(sequences)
logger.info(msg)
if return_label_vocab:
return np.array(sequences), dict_labels, np.array(lengths), dict_label_vocab
else:
return np.array(sequences), dict_labels, np.array(lengths)
def load_muti_label_data(data_path, vocab_path, create_vocab=False,
min_freq=1,
vocab_size=None):
msg = 'load data from %s, ' % data_path
data_set = pd.read_csv(data_path)
vocab_ = Counter() if create_vocab else load_vocab(vocab_path)
sequences, lengths = [], []
for content in data_set.iloc[:, 1]:
tokens = segment(content)
if create_vocab:
vocab_.update(tokens)
sequences.append(tokens)
lengths.append(len(tokens))
if create_vocab:
vocab = {'<PAD>': PAD_IDX, '<UNK>': UNK_IDX}
# vocab_size 必须大于2
print('ori vocab size %s' % len(vocab_))
vocab_size = max(vocab_size or len(vocab_), 2) - 2
logger.info('create vocab, min freq: %s, vocab_size: %s' % (min_freq, vocab_size))
for token, count in vocab_.most_common(vocab_size):
if not token:
continue
if count < min_freq:
break
else:
vocab[token] = len(vocab)
save_vocab(vocab, vocab_path)
else:
vocab = vocab_
labels = data_set[COLUMNS].values + 2
sequences = [[vocab.get(token, UNK_IDX) for token in sequence] for sequence in sequences]
msg += 'total : %s' % len(sequences)
logger.info(msg)
return np.array(sequences), labels, np.array(lengths)
def batch_iter(sequences, labels, lengths, batch_size=64, reverse=False, cut_length=None, shuffle=True):
"""
将数据集分成batch输出
:param sequences: 文本序列
:param labels: 类别
:param lengths: 文本长度
:param reverse: 是否reverse文本
:param cut_length: 截断文本
:return:
"""
# 打乱数据
data_num = len(sequences)
indexs = list(range(len(sequences)))
if shuffle:
random.shuffle(indexs)
batch_start = 0
shuffle_sequences = sequences[indexs]
shuffle_labels = labels[indexs]
shuffle_lengths = lengths[indexs]
while batch_start < data_num:
batch_end = batch_start + batch_size
batch_sequences = shuffle_sequences[batch_start:batch_end]
batch_labels = shuffle_labels[batch_start:batch_end]
batch_lengths = shuffle_lengths[batch_start:batch_end]
if isinstance(cut_length, int):
# 截断数据
batch_sequences = [sequence[:cut_length] for sequence in batch_sequences]
batch_lengths = np.where(batch_lengths > cut_length, cut_length, batch_lengths)
# padding长度
batch_max_length = batch_lengths.max()
batch_padding_sequences = []
for sequence, length in zip(batch_sequences, batch_lengths):
sequence += [PAD_IDX] * (batch_max_length - length)
if reverse:
sequence.reverse()
batch_padding_sequences.append(sequence)
batch_padding_sequences = np.array(batch_padding_sequences)
yield batch_padding_sequences, batch_labels, batch_lengths
batch_start = batch_end
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
vocab_path = '../data/vocab.txt'
label_vocab_path = '../cnews/label.txt'
data_set = load_data('../data/sentiment_analysis_validationset.csv', vocab_path, label_vocab_path,
create_vocab=True, create_label_vocab=True, vocab_size=5000)
# num = 0
# for sequences, labels, lengths in batch_iter(*data_set, batch_size=64):
# print(sequences.shape[1], lengths.max(), sequences.shape[1] == lengths.max())
| 36.235294 | 115 | 0.645157 | 948 | 7,392 | 4.75 | 0.198312 | 0.053298 | 0.027981 | 0.012436 | 0.434821 | 0.381301 | 0.356873 | 0.328892 | 0.315123 | 0.281368 | 0 | 0.005035 | 0.2477 | 7,392 | 203 | 116 | 36.413793 | 0.804711 | 0.049107 | 0 | 0.405405 | 0 | 0.006757 | 0.118096 | 0.044879 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.054054 | 0.006757 | 0.128378 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c52c6e87b48e4cd8e5aedb1e34b9798cbe5aba | 5,458 | py | Python | lumapps/cli.py | aureldent/lumapps-sdk | 9ee7ddbb0e5c94ec098f1d6c053bfd82698c2e8b | [
"MIT"
] | null | null | null | lumapps/cli.py | aureldent/lumapps-sdk | 9ee7ddbb0e5c94ec098f1d6c053bfd82698c2e8b | [
"MIT"
] | 1 | 2018-12-12T10:06:52.000Z | 2018-12-12T10:31:56.000Z | lumapps/cli.py | aureldent/lumapps-sdk | 9ee7ddbb0e5c94ec098f1d6c053bfd82698c2e8b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function, unicode_literals
import sys
import argparse
import json
from lumapps.utils import ApiCallError, get_conf, set_conf, FILTERS
from lumapps.client import ApiClient
import logging
LIST_CONFIGS = "***LIST_CONFIGS***"
def parse_args():
s = ""
for f in FILTERS:
s += "\nMethods " + f + "\n"
for pth in sorted(FILTERS[f]):
s += " " + pth + "\n"
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
epilog="FILTERS:\n" + s,
)
add_arg = parser.add_argument
add_arg(
"api_method",
nargs="*",
metavar="METHOD_PART",
help="API method with parameters in the form arg_name=value",
)
add_arg("--help", "-h", action="store_true")
add_arg("--debug", "-d", action="store_true")
add_arg("--api", help="JSON file", metavar="FILE")
add_arg("--user", help="user to act on behalf")
add_arg("--auth", help="JSON auth file", metavar="FILE")
add_arg(
"--token",
help="a token can be gotten with " '"getToken customerId=... email=..."',
)
add_arg("--body-file", help="JSON POST data body file", metavar="FILE")
add_arg(
"-p",
"--prune",
action="store_true",
help="Prune extraneous content based on methods being invoked. "
"See below for filters used.",
)
add_arg(
"--config",
"-c",
nargs="?",
default=None,
const=LIST_CONFIGS,
help="SAVE/READ/LIST configuration(s): if a value is provided: "
"SAVE when --auth or --api is specified, READ otherwise; if "
"no value is provided: list saved configs",
metavar="CONF_NAME",
)
add_arg(
"body",
nargs="?",
type=argparse.FileType("r"),
default=sys.stdin,
help=argparse.SUPPRESS,
)
return parser, parser.parse_args()
def list_configs():
conf = get_conf()["configs"]
if not conf:
print("There are no saved configs")
return
print("Saved configs:")
for conf_name in conf:
print(" " + conf_name)
def load_config(api_file, auth_file, user, conf_name):
if conf_name:
configs = get_conf()["configs"]
conf = configs.get(conf_name, {})
if not conf and not auth_file:
sys.exit('config "{}" not found'.format(conf_name))
else:
conf = {}
if auth_file:
with open(auth_file) as fh:
auth_info = json.load(fh)
else:
auth_info = conf.get("auth", None)
if api_file:
with open(api_file) as fh:
api_info = json.load(fh)
else:
api_info = conf.get("api", None)
if not user:
user = conf.get("user", None)
return api_info, auth_info, user
def store_config(api_info, auth_info, conf_name, user=None):
conf = get_conf()
conf["configs"][conf_name] = {"api": api_info, "auth": auth_info, "user": user}
set_conf(conf)
def cast_params(method_parts, params, api):
truths = ("True", "true", "1", "Yes", "yes", "sure", "yeah")
method = api.methods[method_parts]
method_params = method.get("parameters", {})
for param in params:
if method_params.get(param, {}).get("type", "") == "boolean":
params[param] = params[param] in truths
def setup_logger():
level = logging.DEBUG
logger = logging.getLogger()
logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s [%(levelname)s]: %(message)s")
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
def main():
arg_parser, args = parse_args()
if args.debug:
setup_logger()
if not (args.auth or args.api or args.config or args.token):
arg_parser.print_help()
return
if args.config == LIST_CONFIGS:
list_configs()
return
api_info, auth_info, user = load_config(args.api, args.auth, args.user, args.config)
api = ApiClient(auth_info, api_info, user=user, token=args.token, prune=args.prune)
if args.config and (args.auth or args.api):
store_config(api_info, auth_info, args.config, args.user)
if not args.api_method:
arg_parser.print_help()
sys.exit(
"\nNo API method specified. Found these:\n"
+ api.get_method_descriptions(sorted(api.methods))
)
method_parts = tuple(p for p in args.api_method if "=" not in p)
if method_parts not in api.methods:
sys.exit(api.get_matching_methods(method_parts))
if args.help:
print(api.get_help(method_parts, args.debug))
return
params = {
p[0]: p[2] for p in (a.partition("=") for a in args.api_method if "=" in a)
}
if args.body_file:
with open(args.body_file) as fh:
params["body"] = json.load(fh)
elif "body" in params:
params["body"] = json.loads(params["body"])
# elif not sys.stdin.isatty() and args.body:
# s = args.body.read()
# print('will loads this: {}'.format(s))
# params['body'] = json.loads(s)
cast_params(method_parts, params, api)
try:
response = api.get_call(*method_parts, **params)
except ApiCallError as err:
sys.exit(err)
print(json.dumps(response, indent=4, sort_keys=True))
if __name__ == "__main__":
main()
| 30.662921 | 88 | 0.601869 | 725 | 5,458 | 4.369655 | 0.242759 | 0.022727 | 0.017361 | 0.018939 | 0.117109 | 0.051136 | 0 | 0 | 0 | 0 | 0 | 0.000989 | 0.259252 | 5,458 | 177 | 89 | 30.836158 | 0.782587 | 0.030414 | 0 | 0.104575 | 0 | 0 | 0.166068 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045752 | false | 0 | 0.045752 | 0 | 0.130719 | 0.052288 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c75ce6159591454805a4dc5c10c17e271ffe14 | 8,397 | py | Python | webpreview/previews.py | taghash/webpreview | b86cb6d547ddbca40fe69ca59c11a0c6e058f6d5 | [
"MIT"
] | null | null | null | webpreview/previews.py | taghash/webpreview | b86cb6d547ddbca40fe69ca59c11a0c6e058f6d5 | [
"MIT"
] | 2 | 2020-07-02T23:20:05.000Z | 2020-07-03T07:26:36.000Z | webpreview/previews.py | taghash/webpreview | b86cb6d547ddbca40fe69ca59c11a0c6e058f6d5 | [
"MIT"
] | 1 | 2022-03-05T06:33:38.000Z | 2022-03-05T06:33:38.000Z | import re
import requests
from requests.exceptions import *
from bs4 import BeautifulSoup
from .excepts import *
from .helpers import process_image_url
class PreviewBase(object):
"""
Base for all web preview.
"""
def __init__(self, url = None, properties = None, timeout=None, headers=None, content=None, parser='html.parser'):
# if no first argument raise URL required exception
if not url:
raise EmptyURL("Please pass a valid URL as the first argument.")
# raise invalid url exception for invalid URL
# taken from django https://github.com/django/django/blob/master/django/core/validators.py#L68
valid_url = re.compile(
r'^(https?://)?' # scheme is validated separately
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not valid_url.match(url):
raise InvalidURL("The URL is invalid.")
# if no schema add http as default
try:
res = requests.get(url, timeout=timeout, headers=headers)
except (ConnectionError, HTTPError, Timeout, TooManyRedirects):
raise URLUnreachable("The URL does not exist.")
except MissingSchema: # if no schema add http as default
url = "http://" + url
# if content is provided don't fetch from url
if not content:
content = PreviewBase.get_content(url, timeout, headers)
# its safe to assign the url
self.url = url
if not properties:
raise EmptyProperties("Please pass list of properties to be extracted.")
# its safe to assign properties
self.properties = properties
self._soup = BeautifulSoup(content, parser)
@staticmethod
def get_content(url, timeout, headers):
# throw URLUnreachable exception for just incase
try:
res = requests.get(url, timeout=timeout, headers=headers)
except (ConnectionError, HTTPError, Timeout, TooManyRedirects):
raise URLUnreachable("The URL is unreachable.")
if res.status_code == 404:
raise URLNotFound("The web page does not exist.")
return res.text
class GenericPreview(PreviewBase):
"""
Extracts title, description, image from a webpage's body instead of the meta tags.
"""
def __init__(self, url = None, properties = ['title', 'description', 'image'], timeout=None, headers=None, content=None, parser=None):
super(GenericPreview, self).__init__(url, properties, timeout=timeout, headers=headers, content=content, parser=parser)
self.title = self._get_title()
self.description = self._get_description()
self.image = self._get_image()
def _get_title(self):
"""
Extract title from the given web page.
"""
soup = self._soup
# if title tag is present and has text in it, return it as the title
if (soup.title and soup.title.text != ""):
return soup.title.text
# else if h1 tag is present and has text in it, return it as the title
if (soup.h1 and soup.h1.text != ""):
return soup.h1.text
# if no title, h1 return None
return None
def _get_description(self):
"""
Extract description from the given web page.
"""
soup = self._soup
# extract content preview from meta[name='description']
meta_description = soup.find('meta',attrs = {"name" : "description"})
if(meta_description and meta_description['content'] !=""):
return meta_description['content']
# else extract preview from the first <p> sibling to the first <h1>
first_h1 = soup.find('h1')
if first_h1:
first_p = first_h1.find_next('p')
if (first_p and first_p.string != ''):
return first_p.text
# else extract preview from the first <p>
first_p = soup.find('p')
if (first_p and first_p.string != ""):
return first_p.string
# else
return None
def _get_image(self):
"""
Extract preview image from the given web page.
"""
soup = self._soup
# extract the first image which is sibling to the first h1
first_h1 = soup.find('h1')
if first_h1:
first_image = first_h1.find_next_sibling('img')
if first_image and first_image['src'] != "":
return first_image['src']
return None
class SocialPreviewBase(PreviewBase):
"""
Abstract class for OpenGraph, TwitterCard and Google+.
"""
def __init__(self, *args, **kwargs):
super(SocialPreviewBase, self).__init__(*args, **kwargs)
self._set_properties()
# OpengGraph has <meta property="" content="">
# TwitterCard has <meta name="" content="">
# Google+ has <meta itemprop="" content="">
# override this self._target_attribute
def _set_properties(self):
soup = self._soup
for property in self.properties:
property_meta = soup.find('meta', attrs = {self._target_attribute : property})
# turn "og:title" to "title" and "og:price:amount" to price_amount
if re.search(r":", property):
new_property = property.split(':',1)[1].replace(':', '_')
# turn "camelCase" to "camel_case"
elif re.search(r"[A-Z]", property):
# regex taken from 2nd answer at http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
new_property = re.sub('(?!^)([A-Z]+)', r'_\1',property).lower()
else:
new_property = property
# to fix keyError https://www.crummy.com/software/BeautifulSoup/bs4/doc/#miscellaneous
if property_meta and property_meta.get('content'):
# dynamically attach property to instance
self.__dict__[new_property] = self.__dict__.get('new_property') or property_meta.get('content')
else:
self.__dict__[new_property] = self.__dict__.get('new_property') or None
class OpenGraph(SocialPreviewBase):
"""
Gets OpenGraph meta properties of a webpage.
"""
def __init__(self, *args, **kwargs):
self._target_attribute = "property"
super(OpenGraph, self).__init__(*args, **kwargs)
class TwitterCard(SocialPreviewBase):
"""
Gets TwitterCard meta properties of a webpage.
"""
def __init__(self, *args, **kwargs):
self._target_attribute = "name"
super(TwitterCard, self).__init__(*args, **kwargs)
class Schema(SocialPreviewBase):
"""
Gets Schema meta properties from a website.
"""
def __init__(self, *args, **kwargs):
self._target_attribute = "itemprop"
super(Schema, self).__init__(*args, **kwargs)
def web_preview(url, timeout=None, headers=None, absolute_image_url=False, content=None, parser=None):
"""
Extract title, description and image from OpenGraph or TwitterCard or Schema or GenericPreview. Which ever returns first.
"""
og = OpenGraph(url, ['og:title', 'og:description', 'og:image'], timeout=timeout, headers=headers, content=content, parser=parser)
if og.title:
return og.title, og.description, process_image_url(url, og.image, absolute_image_url)
tc = TwitterCard(url, ['twitter:title', 'twitter:description', 'twitter:image'], timeout=timeout, headers=headers,
content=content, parser=parser)
if tc.title:
return tc.title, tc.description, process_image_url(url, tc.image, absolute_image_url)
s = Schema(url, ['name', 'description', 'image'], timeout=timeout, headers=headers, content=content, parser=parser)
if s.name:
return s.name, s.description, process_image_url(url, s.image, absolute_image_url)
gp = GenericPreview(url, timeout=timeout, headers=headers, content=content, parser=parser)
return gp.title, gp.description, process_image_url(url, gp.image, absolute_image_url)
| 40.960976 | 150 | 0.617959 | 1,026 | 8,397 | 4.904483 | 0.210526 | 0.015898 | 0.029213 | 0.038951 | 0.333068 | 0.285374 | 0.274245 | 0.236089 | 0.200517 | 0.183029 | 0 | 0.009794 | 0.258307 | 8,397 | 204 | 151 | 41.161765 | 0.79817 | 0.232464 | 0 | 0.191667 | 0 | 0.016667 | 0.104377 | 0.02389 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.016667 | 0.05 | 0 | 0.316667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c782e3bc4c98e44210318ac8cb634e86e76e91 | 13,832 | py | Python | publicAPI/crest_endpoint.py | EVEprosper/ProsperAPI | 2d25b9210d32ca777204b1dddb56848d7075dd85 | [
"MIT"
] | 13 | 2017-03-27T13:10:52.000Z | 2020-07-30T09:33:11.000Z | publicAPI/crest_endpoint.py | EVEprosper/ProsperAPI | 2d25b9210d32ca777204b1dddb56848d7075dd85 | [
"MIT"
] | 19 | 2016-11-14T00:58:54.000Z | 2018-06-11T16:54:25.000Z | publicAPI/crest_endpoint.py | EVEprosper/ProsperAPI | 2d25b9210d32ca777204b1dddb56848d7075dd85 | [
"MIT"
] | 5 | 2017-04-19T01:12:06.000Z | 2021-03-07T02:23:45.000Z | """crest_endpoint.py: collection of public crest endpoints for Prosper"""
import sys
from os import path
from datetime import datetime
from enum import Enum
import logging
import ujson as json
from flask import Flask, Response, jsonify
from flask_restful import reqparse, Api, Resource, request
import publicAPI.forecast_utils as forecast_utils
import publicAPI.crest_utils as crest_utils
import publicAPI.api_utils as api_utils
import publicAPI.exceptions as exceptions
import publicAPI.config as api_config
import publicAPI.split_utils as split_utils
import prosper.common.prosper_logging as p_logging
import prosper.common.prosper_config as p_config
HERE = path.abspath(path.dirname(__file__))
CONFIG = api_config.CONFIG
LOGGER = p_logging.DEFAULT_LOGGER
DEBUG = False
TEST = api_utils.LOGGER
## Flask Handles ##
API = Api()
APP_HACK = Flask(__name__) #flask-restful CSV writer sucks
class AcceptedDataFormat(Enum):
"""enum for handling format support"""
CSV = 'csv'
JSON = 'json'
def return_supported_types():
"""parse AccpetedDataFormat.__dict__ for accepted types"""
supported_types = []
for key in AcceptedDataFormat.__dict__.keys():
if '_' not in key:
supported_types.append(key.lower())
return supported_types
## Flask Endpoints ##
@API.representation('text/csv')
def output_csv(data, status, headers=None):
"""helper for sending out CSV instead of JSON"""
resp = APP_HACK.make_response(data)
resp.headers['Content-Type'] = 'text/csv'
return resp
class OHLC_endpoint(Resource):
"""Handle calls on OHLC endpoint"""
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'regionID',
type=int,
required=True,
help='regionid for market history API',
location=['args', 'headers']
)
self.reqparse.add_argument(
'typeID',
type=int,
required=True,
help='typeid for market history API',
location=['args', 'headers']
)
self.reqparse.add_argument(
'User-Agent',
type=str,
required=True,
help='User-Agent required',
location=['headers']
)
self.reqparse.add_argument(
'api',
type=str,
required=False,
help='API key for tracking requests',
location=['args', 'headers']
)
self.logger = logging.getLogger('publicAPI')
def get(self, return_type):
"""GET data from CREST and send out OHLC info"""
args = self.reqparse.parse_args()
#TODO: info archive
self.logger.info('OHLC %s Request: %s', return_type, args)
if return_type not in return_supported_types():
return 'INVALID RETURN FORMAT', 405
## Validate inputs ##
try:
crest_utils.validate_id(
'map_regions',
args.get('regionID'),
config=api_config.CONFIG,
logger=self.logger,
)
crest_utils.validate_id(
'inventory_types',
args.get('typeID'),
config=api_config.CONFIG,
logger=self.logger,
)
except exceptions.ValidatorException as err:
self.logger.warning(
'ERROR: unable to validate type/region ids' +
'\n\targs={0}'.format(args),
exc_info=True
)
return err.message, err.status
except Exception: #pragma: no cover
self.logger.error(
'ERROR: unable to validate type/region ids' +
'args={0}'.format(args),
exc_info=True
)
return 'UNHANDLED EXCEPTION', 500
## Fetch CREST ##
try:
#LOGGER.info(api_config.SPLIT_INFO)
if args.get('typeID') in api_config.SPLIT_INFO:
self.logger.info('FORK: using split utility')
data = split_utils.fetch_split_history(
args.get('regionID'),
args.get('typeID'),
config=api_config.CONFIG,
logger=self.logger,
)
else:
data = crest_utils.fetch_market_history(
args.get('regionID'),
args.get('typeID'),
config=api_config.CONFIG,
logger=LOGGER
)
data = crest_utils.data_to_ohlc(data)
except exceptions.ValidatorException as err: #pragma: no cover
self.logger.error(
'ERROR: unable to parse CREST data\n\targs=%s',
args,
exc_info=True
)
return err.message, err.status
except Exception: #pragma: no cover
self.logger.error(
'ERROR: unhandled issue in parsing CREST data\n\targs=%s',
args,
exc_info=True
)
return 'UNHANDLED EXCEPTION', 500
## Format output ##
if return_type == AcceptedDataFormat.JSON.value:
self.logger.info('rolling json response')
data_str = data.to_json(
path_or_buf=None,
orient='records'
)
message = json.loads(data_str)
elif return_type == AcceptedDataFormat.CSV.value:
self.logger.info('rolling csv response')
data_str = data.to_csv(
path_or_buf=None,
header=True,
index=False,
columns=[
'date',
'open',
'high',
'low',
'close',
'volume'
]
)
message = output_csv(data_str, 200)
else: #pragma: no cover
#TODO: CUT?
self.logger.error(
'invalid format requested' +
'\n\targs=%s' +
'\n\treturn_type=%s',
args, return_type,
exc_info=True
)
return 'UNSUPPORTED FORMAT', 500
return message
class ProphetEndpoint(Resource):
"""Handle calls on Prophet endpoint"""
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'regionID',
type=int,
required=True,
help='regionid for market history API',
location=['args', 'headers']
)
self.reqparse.add_argument(
'typeID',
type=int,
required=True,
help='typeid for market history API',
location=['args', 'headers']
)
self.reqparse.add_argument(
'User-Agent',
type=str,
required=True,
help='User-Agent required',
location=['headers']
)
self.reqparse.add_argument(
'api',
type=str,
required=True,
help='API key for tracking requests',
location=['args', 'headers']
)
self.reqparse.add_argument(
'range',
type=int,
required=False,
help='Range for forecasting: default={0} max={1}'.\
format(api_config.DEFAULT_RANGE, api_config.MAX_RANGE),
location=['args', 'headers']
)
self.logger = logging.getLogger('publicAPI')
def get(self, return_type):
args = self.reqparse.parse_args()
self.logger.info('Prophet %s Request: %s', return_type, args)
if return_type not in return_supported_types():
return 'INVALID RETURN FORMAT', 405
forecast_range = api_config.DEFAULT_RANGE
if 'range' in args:
forecast_range = args.get('range')
## Validate inputs ##
try:
api_utils.check_key(
args.get('api'),
throw_on_fail=True,
logger=self.logger,
)
crest_utils.validate_id(
'map_regions',
args.get('regionID'),
config=api_config.CONFIG,
logger=self.logger,
)
crest_utils.validate_id(
'inventory_types',
args.get('typeID'),
config=api_config.CONFIG,
logger=self.logger,
)
forecast_range = forecast_utils.check_requested_range(
forecast_range,
max_range=api_config.MAX_RANGE,
raise_for_status=True
)
except exceptions.ValidatorException as err:
self.logger.warning(
'ERROR: unable to validate type/region ids\n\targs=%s',
args,
exc_info=True
)
return err.message, err.status
except Exception: #pragma: no cover
self.logger.error(
'ERROR: unable to validate type/region ids\n\targs=%s',
args,
exc_info=True
)
return 'UNHANDLED EXCEPTION', 500
## check cache ##
cache_data = forecast_utils.check_prediction_cache(
args.get('regionID'),
args.get('typeID')
)
self.logger.debug(cache_data)
if cache_data is not None:
self.logger.info('returning cached forecast')
message = forecast_reporter(
cache_data,
forecast_range,
return_type,
self.logger,
)
return message
## No cache, get data ##
try:
if args.get('typeID') in api_config.SPLIT_INFO:
LOGGER.info('FORK: using split utility')
data = split_utils.fetch_split_history(
args.get('regionID'),
args.get('typeID'),
data_range=api_config.MAX_RANGE,
config=api_config.CONFIG,
logger=self.logger,
)
data.sort_values(
by='date',
ascending=True,
inplace=True
)
else:
data = forecast_utils.fetch_extended_history(
args.get('regionID'),
args.get('typeID'),
data_range=api_config.MAX_RANGE,
config=api_config.CONFIG,
logger=self.logger,
)
data = forecast_utils.build_forecast(
data,
api_config.MAX_RANGE
)
except exceptions.ValidatorException as err:
#FIX ME: testing?
self.logger.warning(
'ERROR: unable to generate forecast\n\targs=%s',
args,
exc_info=True
)
return err.message, err.status
except Exception: #pragma: no cover
LOGGER.error(
'ERROR: unable to generate forecast\n\targs=%s',
args,
exc_info=True
)
return 'UNHANDLED EXCEPTION', 500
## Update cache ##
forecast_utils.write_prediction_cache(
args.get('regionID'),
args.get('typeID'),
data,
logger=self.logger,
)
try:
message = forecast_reporter(
data,
forecast_range,
return_type,
self.logger,
)
except Exception as err_msg: #pragma: no cover
LOGGER.error(
'invalid format requested'
'\n\targs=%s'
'\n\treturn_type=%s',
args, return_type,
exc_info=True
)
return 'UNABLE TO GENERATE REPORT', 500
return message
def forecast_reporter(
data,
forecast_range,
return_type,
logger=logging.getLogger('publicAPI')
):
"""prepares forecast response for Flask
Args:
data (:obj:`pandas.DataFrame`): Prediction data to report
forecast_range (int): range requested for return
return_type (:enum:`AcceptedDataFormat`): format of return
logger (:obj:`logging.logger`, optional): logging handle
Returns:
Flask-ready return object
"""
report_data = forecast_utils.trim_prediction(
data,
forecast_range
)
print(report_data)
if return_type == AcceptedDataFormat.JSON.value:
logger.info('rolling json response')
data_str = report_data.to_json(
path_or_buf=None,
orient='records'
)
message = json.loads(data_str)
elif return_type == AcceptedDataFormat.CSV.value:
logger.info('rolling csv response')
data_str = report_data.to_csv(
path_or_buf=None,
header=True,
index=False,
columns=[
'date',
'avgPrice',
'yhat',
'yhat_low',
'yhat_high',
'prediction'
]
)
message = output_csv(data_str, 200)
else: #pragma: no cover
raise exceptions.UnsupportedFormat(
status=500,
message='UNABLE TO GENERATE REPORT'
)
return message
## Flask Endpoints ##
API.add_resource(
OHLC_endpoint,
'/CREST/OHLC.<return_type>'
)
API.add_resource(
ProphetEndpoint,
'/CREST/prophet.<return_type>'
)
| 31.579909 | 74 | 0.52393 | 1,362 | 13,832 | 5.155653 | 0.162261 | 0.039875 | 0.018513 | 0.02421 | 0.601538 | 0.568072 | 0.552834 | 0.506124 | 0.487895 | 0.472088 | 0 | 0.004347 | 0.384688 | 13,832 | 437 | 75 | 31.652174 | 0.820703 | 0.07367 | 0 | 0.577128 | 0 | 0 | 0.132834 | 0.004178 | 0 | 0 | 0 | 0.002288 | 0 | 1 | 0.018617 | false | 0 | 0.042553 | 0 | 0.12234 | 0.00266 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03c7c1d39078369622ad7852e9cd54cf976ecdc8 | 1,536 | py | Python | script-diagnostico-covid19/conexao_db.py | am1701/estudos-python | 67b82161782a6862c3a81ab59ee3d92d7c262b36 | [
"MIT"
] | null | null | null | script-diagnostico-covid19/conexao_db.py | am1701/estudos-python | 67b82161782a6862c3a81ab59ee3d92d7c262b36 | [
"MIT"
] | null | null | null | script-diagnostico-covid19/conexao_db.py | am1701/estudos-python | 67b82161782a6862c3a81ab59ee3d92d7c262b36 | [
"MIT"
] | null | null | null | #!/bin/python3
import sys
from time import sleep
try:
import mysql.connector
except:
print('''Você precisa do modulo mysql_connector instalado, no README.md tem todos os passos necessários para
a instalação deste modulo.''')
print('='*30)
pritn("O script irá encerrar em 3 segundos. Bye!!")
sleep(3)
sys.exit(1)
def Conection():
try:
mydb = mysql.connector.connect(
host="sql10.freesqldatabase.com",
database='sql10424407',
user="sql10424407",
password="xCmgJsLGDP"
)
except Exception as e:
print(f'Erro ao conectar ao banco de dados -> {e}')
return False
else:
return True, mydb
def Criar_tabelas():
if Conection():
try:
response, mysql = Conection()
cursor = mysql.cursor()
cursor.execute('''CREATE TABLE dados (id INT(11) NOT NULL AUTO_INCREMENT PRIMARY KEY,nome VARCHAR(255), idade VARCHAR(255), fumante CHAR(1), resultado VARCHAR(255));''')
print('Tabela Criada com Sucesso!')
except Exception as e:
print(f'Error -> {e}')
finally:
cursor.close()
else:
print("Error ao conectar")
def Gravar_dados(nome,idade,fumante,resultado):
query = '''INSERT INTO dados (nome, idade,fumante,resultado) VALUES (%s, %s, %s, %s)'''
values = (nome, idade, fumante, resultado)
if Conection():
response,mysql = Conection()
cursor = mysql.cursor()
try:
cursor.execute(query, values)
mysql.commit()
print("Dados gravados com sucesso!!")
except Exception as e:
print(f"Erro ao gravar dados -> {e}")
finally:
cursor.close()
| 22.925373 | 172 | 0.673177 | 205 | 1,536 | 5.02439 | 0.517073 | 0.040777 | 0.049515 | 0.052427 | 0.234951 | 0.176699 | 0.100971 | 0.100971 | 0 | 0 | 0 | 0.028986 | 0.191406 | 1,536 | 66 | 173 | 23.272727 | 0.800322 | 0.008464 | 0 | 0.372549 | 0 | 0.039216 | 0.394079 | 0.032237 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0.039216 | 0.058824 | 0 | 0.156863 | 0.156863 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03cf66408abae7099aee97c7a650861c8b31d19d | 3,233 | py | Python | 2021/day_18.py | Javitronxo/AdventOfCode | 3a31544824881d353d899bc820d92b005af6b4eb | [
"MIT"
] | 1 | 2021-12-03T14:37:28.000Z | 2021-12-03T14:37:28.000Z | 2021/day_18.py | Javitronxo/AdventOfCode | 3a31544824881d353d899bc820d92b005af6b4eb | [
"MIT"
] | null | null | null | 2021/day_18.py | Javitronxo/AdventOfCode | 3a31544824881d353d899bc820d92b005af6b4eb | [
"MIT"
] | null | null | null | import collections
import itertools
from dataclasses import dataclass
from typing import List
@dataclass
class Node:
value: int
depth: int
def add_numbers(number_one: List[Node], number_two: List[Node]) -> List[Node]:
new_number = [
Node(node.value, node.depth + 1)
for node in number_one + number_two
]
while any(node.depth > 4 for node in new_number) or any(node.value > 9 for node in new_number):
# Explode
while any(node.depth > 4 for node in new_number):
for i in range(len(new_number)):
if new_number[i].depth > 4:
try:
if i - 1 >= 0:
new_number[i - 1].value += new_number[i].value
except IndexError:
pass
try:
new_number[i + 2].value += new_number[i + 1].value
except IndexError:
pass
new_number[i].value = 0
new_number[i].depth -= 1
new_number.pop(i + 1)
break
# Split
while any(node.value > 9 for node in new_number):
for i in range(len(new_number)):
if new_number[i].value > 9:
left_value = new_number[i].value // 2
right_value = new_number[i].value - left_value
new_number[i].value = left_value
new_number[i].depth += 1
new_node = Node(right_value, new_number[i].depth)
new_number.insert(i + 1, new_node)
break
# We might need to explode before splitting again
break
return new_number
def get_magnitude(number: List[Node]) -> int:
while len(number) > 1:
max_depth = max(node.depth for node in number)
for i in range(len(number)):
if number[i].depth == max_depth:
magnitude = number[i].value * 3 + number[i + 1].value * 2
number[i] = Node(magnitude, max_depth - 1)
number.pop(i + 1)
break
return number[0].value
def main():
fish_numbers = collections.deque()
with open("day_18_input.txt") as f:
for line in f.readlines():
depth = 0
number = list()
for char in line:
if char == "[":
depth += 1
elif char == "]":
depth -= 1
elif char.isdigit():
number.append(Node(value=int(char), depth=depth))
else:
continue
fish_numbers.append(number)
permutations = itertools.permutations(fish_numbers, 2)
current_number = fish_numbers.popleft()
while fish_numbers:
current_number = add_numbers(current_number, fish_numbers.popleft())
print(f"Part 1: {get_magnitude(current_number)}")
max_magnitude = -1
for p in permutations:
number = add_numbers(p[0], p[1])
max_magnitude = max(max_magnitude, get_magnitude(number))
print(f"Part 2: {max_magnitude}")
if __name__ == "__main__":
main()
| 33.677083 | 99 | 0.519332 | 390 | 3,233 | 4.133333 | 0.212821 | 0.128412 | 0.080645 | 0.065136 | 0.336228 | 0.210298 | 0.165633 | 0.165633 | 0.165633 | 0.138337 | 0 | 0.018127 | 0.38571 | 3,233 | 95 | 100 | 34.031579 | 0.793555 | 0.018868 | 0 | 0.15 | 0 | 0 | 0.027778 | 0.009785 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0375 | false | 0.025 | 0.05 | 0 | 0.15 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03d1137912ab50699b6179c220fe8310bb178774 | 4,428 | py | Python | server.py | Celnardur/simple_server | 930f0066b26781d5a77ae3ac60caa0ac9b685b9f | [
"MIT"
] | null | null | null | server.py | Celnardur/simple_server | 930f0066b26781d5a77ae3ac60caa0ac9b685b9f | [
"MIT"
] | null | null | null | server.py | Celnardur/simple_server | 930f0066b26781d5a77ae3ac60caa0ac9b685b9f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler
import json
import os
import mimetypes
import sys
# import api
bufsize = 4096
base_path = "./www"
class server(BaseHTTPRequestHandler):
def get_payload(self):
if not ('Content-Length' in self.headers):
return (200, None)
try:
content_length = int(self.headers['Content-Length'])
except ValueError:
return (400, b"Malformed content_length")
if content_length <= 0:
return (200, None)
try:
return (200, json.loads(self.rfile.read(int(content_length))))
except:
return (400, b"Malformed json payload")
def file_response(self, path):
response_code = 200
if not os.path.exists(path):
response_code = 404
path = os.path.join(base_path, '404.html')
if not os.path.exists(path):
self.code_response(404, b'404')
return
if not os.path.isfile(path):
path = os.path.join(path, 'index.html')
mime_type, _ = mimetypes.guess_type(path)
self.send_response(response_code)
self.send_header('Content-Type', mime_type)
self.end_headers()
with open(path, 'rb') as out_file:
buf = out_file.read(bufsize)
while len(buf) > 0:
self.wfile.write(buf)
buf = out_file.read(bufsize)
def code_response(self, response_code, content):
self.send_response(response_code)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(content)
def do_GET(self):
url = self.path.split("?")
path = os.path.join(base_path, url[0][1:])
self.file_response(path)
def do_PUT(self):
(code, payload) = self.get_payload()
# handle malformed payload
if code == 400:
self.code_response(400, bytes(payload, 'utf-8'))
return
print(payload)
url = self.path.split("?")
path = os.path.join(base_path, url[0][1:])
self.file_response(path)
# if url[0][:4] == '/api':
# try:
# (code, body) = api.process(url[0][4:], payload)
# if code == 200:
# api.get_notifications(url[0][4:], payload)
# except:
# self.code_response(500, b'Internal Server Error')
# return
# self.send_response(code)
# self.send_header('Content-Type', 'application/json')
# self.end_headers()
# self.wfile.write(bytes(json.dumps(body), 'utf-8'))
# else:
# path = os.path.join(base_path, url[0][1:])
# self.file_response(path)
def usage():
print("Usage: ./server.py [options]")
print(" -h, --help print this help and exit")
print(" -n, --host_name set the server host name")
print(" -p, --port set the server port number")
print(" -b, --base_path set the base path to serve files from")
if __name__ == "__main__":
host_name = "localhost"
server_port = 8080
arg_n = 1
arg_len = len(sys.argv)
while arg_n < arg_len:
arg = sys.argv[arg_n]
if arg.startswith("-"):
if arg == "--host_name" or arg == "-n":
host_name = sys.argv[arg_n + 1]
elif arg == "--port" or arg == "-p":
server_port = int(sys.argv[arg_n + 1])
elif arg == "--base_path" or arg == "-b":
base_path = sys.argv[arg_n + 1]
if not os.path.isdir(base_path):
print("Specified base path does not exist")
exit(1)
elif arg == "--help" or arg == "-h":
usage()
exit(0)
else:
usage()
exit(1)
arg_n += 1
arg_n += 1
if not os.path.exists(base_path) or not os.path.isdir(base_path):
print("Specified base path " + base_path + " does not exist")
exit(1)
print("Host name: " + host_name)
print("server port: " + str(server_port))
print("base path: " + base_path)
# api.init()
httpd = HTTPServer((host_name, server_port), server)
httpd.serve_forever()
| 30.75 | 84 | 0.536811 | 546 | 4,428 | 4.20696 | 0.232601 | 0.059208 | 0.023509 | 0.023944 | 0.299521 | 0.275577 | 0.223335 | 0.162386 | 0.162386 | 0.162386 | 0 | 0.026111 | 0.334011 | 4,428 | 143 | 85 | 30.965035 | 0.752798 | 0.128275 | 0 | 0.278351 | 0 | 0 | 0.138802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061856 | false | 0 | 0.051546 | 0 | 0.195876 | 0.113402 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03d16b56e7eb71686fb7086761419a2ac514491c | 5,771 | py | Python | contents/1_command_line_reinforcement_learning/treasure_on_right.py | Pankaj-Baranwal/Reinforcement-learning-with-tensorflow | cf738d3e975aa9d2384dcd1a65dbdd156ddd970f | [
"MIT"
] | null | null | null | contents/1_command_line_reinforcement_learning/treasure_on_right.py | Pankaj-Baranwal/Reinforcement-learning-with-tensorflow | cf738d3e975aa9d2384dcd1a65dbdd156ddd970f | [
"MIT"
] | null | null | null | contents/1_command_line_reinforcement_learning/treasure_on_right.py | Pankaj-Baranwal/Reinforcement-learning-with-tensorflow | cf738d3e975aa9d2384dcd1a65dbdd156ddd970f | [
"MIT"
] | null | null | null | """
A simple example for Reinforcement Learning using table lookup Q-learning method.
An agent "o" is on the left of a 1 dimensional world, the treasure is on the rightmost location.
Run this program and to see how the agent will improve its strategy of finding the treasure.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
A better explanation is available at
https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
"""
"""
Notes:
Unlike policy gradient methods, which attempt to learn functions which directly map an observation to an action, Q-Learning attempts to learn the value of being in a given state, and taking a specific action there.
Q learning helps learn long term expected rewards
In it’s simplest implementation, Q-Learning is a table of values for every state (row) and action (column) possible in the environment. Within each cell of the table, we learn a value for how good it is to take a given action within a given state. In the case of the FrozenLake environment (OpenAI), we have 16 possible states (one for each block), and 4 possible actions (the four directions of movement), giving us a 16x4 table of Q-values. We start by initializing the table to be uniform (all zeros), and then as we observe the rewards we obtain for various actions, we update the table accordingly.
For making updates to Q-table values, we use Bellman equation:
Q(s,a) = r + γ(max(Q(s’,a’))
This says that the Q-value for a given state (s) and action (a) should represent the current reward (r) plus the maximum discounted (γ) future reward expected according to our own table for the next state (s’) we would end up in. The discount variable allows us to decide how important the possible future rewards are compared to the present reward. By updating in this way, the table slowly begins to obtain accurate measures of the expected future reward for a given action in a given state.
"""
import numpy as np
import pandas as pd
import time
np.random.seed(2) # reproducible
N_STATES = 6 # the length of the 1 dimensional world
ACTIONS = ['left', 'right'] # available actions
EPSILON = 0.9 # greedy police. There is 0.1 probability of randomness so that agent may be able to explore the world and find robust solutions
ALPHA = 0.1 # learning rate
GAMMA = 0.9 # discount factor. discount variable allows us to decide how important the possible future rewards are compared to the present reward.
MAX_EPISODES = 13 # maximum episodes
FRESH_TIME = 0.2 # fresh time for one move
def build_q_table(n_states, actions):
"""
Initialize a zero-valued q-table of states and actions
"""
table = pd.DataFrame(
np.zeros((n_states, len(actions))), # q_table initial values
columns=actions, # actions's name
)
# print(table) # show table
return table
def choose_action(state, q_table):
"""
Decide on the next move.
Act non-greedily every now and then,
or explore arena if unexplored,
else choose the state with maximum reward
"""
# This is how to choose an action
state_actions = q_table.iloc[state, :]
if (np.random.uniform() > EPSILON) or ((state_actions == 0).all()): # act non-greedy or state-action have no value (unexplored arena)
action_name = np.random.choice(ACTIONS)
else: # act greedy
action_name = state_actions.idxmax() # replace argmax to idxmax as argmax means a different function in newer version of pandas
return action_name
def get_env_feedback(S, A):
# This is how agent will interact with the environment
if A == 'right': # move right
if S == N_STATES - 2: # terminate
S_ = 'terminal'
R = 1
else:
S_ = S + 1
R = 0
else: # move left
R = 0
if S == 0:
S_ = S # reach the wall
else:
S_ = S - 1
# New state and reward obtained
return S_, R
def update_env(S, episode, step_counter):
# This is how environment is updated
env_list = ['-']*(N_STATES-1) + ['T'] # '---------T' our environment
if S == 'terminal':
interaction = 'Episode %s: total_steps = %s' % (episode+1, step_counter)
print('\r{}'.format(interaction), end='')
time.sleep(2)
print('\r ', end='')
else:
env_list[S] = 'o'
interaction = ''.join(env_list)
print('\r{}'.format(interaction), end='')
time.sleep(FRESH_TIME)
def update_q_table(q_table, S, A, S_, R):
"""
Bellman equation
"""
is_terminated = False
q_predict = q_table.loc[S, A]
if S_ != 'terminal':
q_target = R + GAMMA * q_table.iloc[S_, :].max() # next state is not terminal
else:
q_target = R # next state is terminal
is_terminated = True # terminate this episode
q_table.loc[S, A] += ALPHA * (q_target - q_predict) # update
return q_table, S_, is_terminated
def rl():
# main part of RL loop
q_table = build_q_table(N_STATES, ACTIONS)
for episode in range(MAX_EPISODES):
step_counter = 0
S = 0
is_terminated = False
update_env(S, episode, step_counter)
while not is_terminated:
A = choose_action(S, q_table)
S_, R = get_env_feedback(S, A) # take action & get next state and reward
q_table, S, is_terminated = update_q_table(q_table, S, A, S_, R) # move to next state
update_env(S, episode, step_counter+1)
step_counter += 1
return q_table
if __name__ == "__main__":
q_table = rl()
print('\r\nQ-table:\n')
print(q_table)
| 40.929078 | 603 | 0.666609 | 881 | 5,771 | 4.266742 | 0.315551 | 0.03352 | 0.009311 | 0.013567 | 0.142059 | 0.117584 | 0.081937 | 0.063315 | 0.063315 | 0.051609 | 0 | 0.01105 | 0.247271 | 5,771 | 140 | 604 | 41.221429 | 0.854282 | 0.300295 | 0 | 0.151899 | 0 | 0 | 0.053912 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075949 | false | 0 | 0.037975 | 0 | 0.177215 | 0.063291 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03d430af1217994371a7fc92b45f322c0f6446ff | 6,427 | py | Python | common/data.py | looselyconnected/fastai | 12d38d760393f1a677cc825d99283c8efcffbef5 | [
"MIT"
] | null | null | null | common/data.py | looselyconnected/fastai | 12d38d760393f1a677cc825d99283c8efcffbef5 | [
"MIT"
] | null | null | null | common/data.py | looselyconnected/fastai | 12d38d760393f1a677cc825d99283c8efcffbef5 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from fastai.structured import apply_cats
from io import StringIO
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import seaborn as sns
import time
import os
from contextlib import contextmanager
from sklearn.metrics import mean_squared_error
def transform_columns(df, cat_vars, cont_vars):
for v in cat_vars:
df[v] = df[v].astype('category').cat.as_ordered()
for v in cont_vars:
df[v] = df[v].fillna(0).astype('float32')
def get_embedding_sizes(cat_vars, df):
cat_sz = [(c, len(df[c].cat.categories)) for c in cat_vars]
embedding_sizes = [(c, min(50, (c+1)//2)) for _,c in cat_sz]
return embedding_sizes
def get_validation_index(df, frac=0.25, random=True):
if random:
return df.sample(frac=frac).index
else:
total = len(df)
return list(range(int(total - total*frac), total))
def lr_find(learner, start_lr=1e-4, end_lr=1):
learner.lr_find(start_lr=start_lr, end_lr=end_lr)
learner.sched.plot(100)
# the dfs is a list of dataframes, the cols is a list of corresponding column names to be set as
# categorical. This function makes sure that the categorical var of the columns maps to the same hash table.
def set_common_categorical(dfs, col):
all_df = pd.DataFrame([], columns=[col])
for df in dfs:
all_df = pd.concat([all_df, df[[col]]])
all_df[col] = all_df[col].astype('category').cat.as_ordered()
for df in dfs:
apply_cats(df, all_df)
def load_file(fname):
try:
df = pd.read_hdf(fname, 'k')
return df
except Exception:
return None
def save_file(df, fname):
df.to_hdf(fname, 'k', format='table')
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
# rmse
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, prefix=None, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, prefix=prefix, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Display/plot feature importance
def display_importances(feature_importance_df_):
sorted_df = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(
by="importance", ascending=False)
print(sorted_df)
threshold = 40
cols = sorted_df[:threshold].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances.png')
# reduce memory
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose:
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def set_to_float32(df):
numerics = ['int8', 'int16', 'int32', 'int64', 'float16', 'float64']
for col in df.columns:
if df[col].dtypes in numerics:
df[col] = df[col].fillna(0.0).astype('float32')
def add_stat_features(df, cols):
for feature in cols:
df['mean_' + feature] = (df[feature] - df[feature].mean())
df['z_' + feature] = df['mean_' + feature] / df[feature].std(ddof=0)
df['sq_' + feature] = (df[feature]) ** 2
df['sqrt_' + feature] = np.abs(df[feature]) ** (1 / 2)
df['log_' + feature] = np.log(df['sq_' + feature] + 10) / 2
def prediction_to_df(target_col, pred):
if len(pred.shape) == 2:
pred_cols = [f'{target_col}_{i}' for i in range(pred.shape[1])]
else:
pred_cols = [target_col]
return pd.DataFrame(pred, columns=pred_cols)
# Given a csv file, return a dataframe with just the last row
def get_last_row(filename):
try:
statinfo = os.stat(filename)
f = open(filename)
header = f.readline()
f.seek(0 if statinfo.st_size <= 4096 else statinfo.st_size - 4096, 0)
last_rows = f.read(4096).split('\n')
last_row = last_rows[-1]
if len(last_row) == 0:
last_row = last_rows[-2]
str = header + last_row
return pd.read_csv(StringIO(str))
except:
return None
# Append the diffs to the end of the file
def append_diff_to_csv(filename, df, fieldname):
orig = pd.read_csv(filename)
combined = pd.merge(orig, df, on=df.columns.tolist(), how='outer')
diff = combined.loc[~combined[fieldname].isin(orig[fieldname])]
if len(diff) == 0:
return
diff_csv = diff.to_csv(header=False, index=False)
f = open(filename, 'a')
f.write(diff_csv)
| 34.005291 | 110 | 0.632644 | 976 | 6,427 | 4.009221 | 0.256148 | 0.030667 | 0.022489 | 0.020445 | 0.193202 | 0.157168 | 0.106312 | 0.012267 | 0 | 0 | 0 | 0.026497 | 0.230745 | 6,427 | 188 | 111 | 34.18617 | 0.764968 | 0.063793 | 0 | 0.115108 | 0 | 0 | 0.060929 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122302 | false | 0 | 0.122302 | 0.007194 | 0.330935 | 0.028777 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03d7a95c066c675726f7f6a54e1d1bc42d1e9abc | 4,902 | py | Python | neural_network_lyapunov/examples/tinydiffsim/learn_ca_dyn.py | hongkai-dai/neural-network-lyapunov-1 | 8843c13f69f7f39cbb939ab250413e76f61843f6 | [
"MIT"
] | 58 | 2021-06-21T08:59:52.000Z | 2022-03-31T14:35:23.000Z | neural_network_lyapunov/examples/tinydiffsim/learn_ca_dyn.py | StanfordASL/neural-network-lyapunov | 9e5db1c7f91b42df729026c9aa8575bc126f66b6 | [
"MIT"
] | 8 | 2021-08-22T05:31:23.000Z | 2022-03-29T03:47:07.000Z | neural_network_lyapunov/examples/tinydiffsim/learn_ca_dyn.py | StanfordASL/neural-network-lyapunov | 9e5db1c7f91b42df729026c9aa8575bc126f66b6 | [
"MIT"
] | 11 | 2021-06-21T04:29:59.000Z | 2022-03-30T05:54:43.000Z | import neural_network_lyapunov.worlds as worlds
import neural_network_lyapunov.utils as utils
import neural_network_lyapunov.control_affine_system as mut
import pytinydiffsim as pd
import argparse
import torch
import yaml
def load_multibody(cfg):
world = pd.TinyWorld()
urdf_data = pd.TinyUrdfParser().load_urdf(
worlds.urdf_path(cfg["world"]["urdf"]))
mb = pd.TinyMultiBody(cfg["world"]["floating"])
urdf2mb = pd.UrdfToMultiBody2()
urdf2mb.convert2(urdf_data, world, mb)
actuation_mask = [
j.joint_name in cfg["world"]["actuated_joints"] for
j in urdf_data.joints]
return mb, actuation_mask
def generate_dataset(cfg, mb, actuation_mask):
dtype = torch.float64
q_dim = mb.q.size()
qd_dim = mb.qd.size()
tau_dim = mb.tau.size()
gravity = pd.Vector3(0., 0., -9.81)
q = pd.VectorX(q_dim)
qd = pd.VectorX(qd_dim)
tau = pd.VectorX(tau_dim)
u_dim = sum(actuation_mask)
xu_lo = torch.cat([torch.tensor(cfg["data"]["q_lo"], dtype=dtype),
torch.tensor(cfg["data"]["qd_lo"], dtype=dtype),
torch.tensor(cfg["data"]["u_lo"], dtype=dtype)])
xu_up = torch.cat([torch.tensor(cfg["data"]["q_up"], dtype=dtype),
torch.tensor(cfg["data"]["qd_up"], dtype=dtype),
torch.tensor(cfg["data"]["u_up"], dtype=dtype)])
samples = utils.uniform_sample_in_box(
xu_lo, xu_up, cfg["data"]["num_samples"])
data = []
labels = []
for k in range(samples.shape[0]):
q_sample = samples[k, :q_dim]
qd_sample = samples[k, q_dim:q_dim+qd_dim]
u_sample = samples[k, q_dim+qd_dim:q_dim+qd_dim+u_dim]
for i in range(q_dim):
q[i] = q_sample[i]
for i in range(qd_dim):
qd[i] = qd_sample[i]
j = 0
for i in range(tau_dim):
if actuation_mask[i]:
tau[i] = u_sample[j]
j += 1
else:
tau[i] = 0
assert(j == u_dim)
mb.q = q
mb.qd = qd
mb.tau = tau
pd.forward_dynamics(mb, gravity)
qdd_sample = torch.tensor(
[mb.qdd[i] for i in range(mb.qdd.size())], dtype=dtype)
label = torch.cat([qd_sample, qdd_sample])
data.append(samples[k, :].unsqueeze(0))
labels.append(label.unsqueeze(0))
data = torch.cat(data, dim=0)
labels = torch.cat(labels, dim=0)
dataset = torch.utils.data.TensorDataset(data, labels)
return dataset
def get_models(cfg):
dtype = torch.float64
x_dim = len(cfg['data']['q_lo']) + len(cfg['data']['qd_lo'])
u_dim = len(cfg['data']['u_lo'])
hid_f = tuple([x_dim] + cfg['train']['f_hid'] + [x_dim])
hid_G = tuple([x_dim] + cfg['train']['G_hid'] + [x_dim * u_dim])
forward_model_f = utils.setup_relu(hid_f,
params=None,
bias=True,
negative_slope=0.01,
dtype=dtype)
forward_model_G = utils.setup_relu(hid_G,
params=None,
bias=True,
negative_slope=0.01,
dtype=dtype)
return forward_model_f, forward_model_G
def train_models(cfg, dataset, forward_model_f, forward_model_G,
verbose=False):
dtype = torch.float64
x_equ = torch.tensor(cfg['train']['x_equ'], dtype=dtype)
u_equ = torch.tensor(cfg['train']['u_equ'], dtype=dtype)
mut.train_control_affine_forward_model(
forward_model_f, forward_model_G, x_equ, u_equ,
dataset, cfg['train']['epoch'], cfg['train']['lr'],
batch_size=cfg['train']['batch_size'],
verbose=verbose)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="trains control affine dynamics models using " +
"Tiny Differentiable Simulator (~Bullet)")
parser.add_argument("cfg_path", type=str)
parser.add_argument("--generate_dataset", action="store_true")
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
with open(args.cfg_path, 'r') as cfg_file:
cfg = yaml.safe_load(cfg_file)
if args.generate_dataset:
mb, actuation_mask = load_multibody(cfg)
dataset = generate_dataset(cfg, mb, actuation_mask)
torch.save(dataset, cfg['world']['name'] + "_dataset.pt")
else:
dataset = torch.load(cfg['world']['name'] + "_dataset.pt")
forward_model_f, forward_model_G = get_models(cfg)
test_loss = train_models(
cfg, dataset, forward_model_f, forward_model_G, verbose=args.verbose)
models = [forward_model_f, forward_model_G]
torch.save(models, cfg['world']['name'] + "_models.pt")
| 33.806897 | 77 | 0.583639 | 657 | 4,902 | 4.112633 | 0.208524 | 0.066617 | 0.041451 | 0.03997 | 0.285344 | 0.226499 | 0.139156 | 0.072539 | 0.072539 | 0.072539 | 0 | 0.008496 | 0.279682 | 4,902 | 144 | 78 | 34.041667 | 0.756726 | 0 | 0 | 0.113043 | 0 | 0 | 0.083639 | 0 | 0 | 0 | 0 | 0 | 0.008696 | 1 | 0.034783 | false | 0 | 0.06087 | 0 | 0.121739 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03da330638b8a1576e15bc62abc446e553a243a9 | 5,210 | py | Python | WF_SDK/dmm.py | Digilent/WaveForms-SDK-Getting-Started | ae33d5e19552dea41ddfca6ec038fe462a922c73 | [
"MIT"
] | 3 | 2021-12-14T18:08:11.000Z | 2022-02-26T11:06:30.000Z | WF_SDK/dmm.py | Digilent/WaveForms-SDK-Getting-Started | ae33d5e19552dea41ddfca6ec038fe462a922c73 | [
"MIT"
] | null | null | null | WF_SDK/dmm.py | Digilent/WaveForms-SDK-Getting-Started | ae33d5e19552dea41ddfca6ec038fe462a922c73 | [
"MIT"
] | null | null | null | """ DIGITAL MULTIMETER CONTROL FUNCTIONS: open, measure, close """
import ctypes # import the C compatible data types
from sys import platform, path # this is needed to check the OS type and get the PATH
from os import sep # OS specific file path separators
# load the dynamic library, get constants path (the path is OS specific)
if platform.startswith("win"):
# on Windows
dwf = ctypes.cdll.dwf
constants_path = "C:" + sep + "Program Files (x86)" + sep + "Digilent" + sep + "WaveFormsSDK" + sep + "samples" + sep + "py"
elif platform.startswith("darwin"):
# on macOS
lib_path = sep + "Library" + sep + "Frameworks" + sep + "dwf.framework" + sep + "dwf"
dwf = ctypes.cdll.LoadLibrary(lib_path)
constants_path = sep + "Applications" + sep + "WaveForms.app" + sep + "Contents" + sep + "Resources" + sep + "SDK" + sep + "samples" + sep + "py"
else:
# on Linux
dwf = ctypes.cdll.LoadLibrary("libdwf.so")
constants_path = sep + "usr" + sep + "share" + sep + "digilent" + sep + "waveforms" + sep + "samples" + sep + "py"
# import constants
path.append(constants_path)
import dwfconstants as constants
"""-----------------------------------------------------------------------"""
def open(device_data):
"""
initialize the digital multimeter
"""
# enable the DMM
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(0), ctypes.c_double(1.0))
return
"""-----------------------------------------------------------------------"""
def measure(device_data, mode, ac=False, range=0, high_impedance=False):
"""
measure a voltage/current/resistance/continuity/temperature
parameters: - device data
- mode: "voltage", "low current", "high current", "resistance", "continuity", "diode", "temperature"
- ac: True means AC value, False means DC value, default is DC
- range: voltage/current/resistance/temperature range, 0 means auto, default is auto
- high_impedance: input impedance for DC voltage measurement, False means 10MΩ, True means 10GΩ, default is 10MΩ
returns: - the measured value in V/A/Ω/°C, or None on error
"""
# set voltage mode
if mode == "voltage":
# set coupling
if ac:
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmACVoltage)
else:
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmDCVoltage)
# set input impedance
if high_impedance:
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(5), ctypes.c_double(1))
else:
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(5), ctypes.c_double(0))
# set high current mode
elif mode == "high current":
# set coupling
if ac:
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmACCurrent)
else:
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmDCCurrent)
# set low current mode
elif mode == "low current":
# set coupling
if ac:
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmACLowCurrent)
else:
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmDCLowCurrent)
# set resistance mode
elif mode == "resistance":
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmResistance)
# set continuity mode
elif mode == "continuity":
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmContinuity)
# set diode mode
elif mode == "diode":
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmDiode)
# set temperature mode
elif mode == "temperature":
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(1), constants.DwfDmmTemperature)
# set range
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(2), ctypes.c_double(range))
# fetch analog I/O status
if dwf.FDwfAnalogIOStatus(device_data.handle) == 0:
# signal error
return None
# get reading
measurement = ctypes.c_double()
dwf.FDwfAnalogIOChannelNodeStatus(device_data.handle, ctypes.c_int(3), ctypes.c_int(3), ctypes.byref(measurement))
return measurement.value
"""-----------------------------------------------------------------------"""
def close(device_data):
"""
reset the instrument
"""
# disable the DMM
dwf.FDwfAnalogIOChannelNodeSet(device_data.handle, ctypes.c_int(3), ctypes.c_int(0), ctypes.c_double(0))
# reset the instrument
dwf.FDwfAnalogIOReset(device_data.handle)
return
| 43.057851 | 149 | 0.638196 | 616 | 5,210 | 5.288961 | 0.238636 | 0.081645 | 0.09822 | 0.057397 | 0.391958 | 0.38981 | 0.38981 | 0.38981 | 0.38981 | 0.38981 | 0 | 0.011693 | 0.212092 | 5,210 | 120 | 150 | 43.416667 | 0.78173 | 0.234165 | 0 | 0.178571 | 0 | 0 | 0.070559 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0 | 0.071429 | 0 | 0.196429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03db746274ce14f01fd2c68bd4565066f688e0cc | 4,765 | py | Python | ActionSystem.py | Rookfighter/TextAdventure | f24176065c9be26bd3bee983c2a63215452cb314 | [
"MIT"
] | 2 | 2016-03-21T15:26:02.000Z | 2016-03-21T15:26:05.000Z | ActionSystem.py | Rookfighter/TextAdventure | f24176065c9be26bd3bee983c2a63215452cb314 | [
"MIT"
] | null | null | null | ActionSystem.py | Rookfighter/TextAdventure | f24176065c9be26bd3bee983c2a63215452cb314 | [
"MIT"
] | null | null | null | from EventSystem import Event
import utils
class ActionSystem:
def __init__(self, player, rooms, tuiSystem, eventQueue):
self.__player = player
self.__rooms = rooms
self.__tuiSystem = tuiSystem
self.__eventQueue = eventQueue
# a mapping for input actions to functions
self.__actions = {
'use': self.__use,
'take': self.__take,
'goto': self.__goto,
'examine': self.__examine,
'inventory':self.__inventory,
'room': self.__room
}
def __findObject(self, param):
currRoom = self.__rooms[self.__player.room]
obj = utils.findObjectByName(currRoom.objects, param)
if not obj is None:
return obj
return utils.findObjectByName(self.__player.inventory, param)
def __findDirection(self, param):
currRoom = self.__rooms[self.__player.room]
paramUp = param.upper()
for direction in currRoom.directions:
roomName = self.__rooms[direction['room']].name
if direction['visible'] and \
(paramUp == direction['name'].upper() or paramUp == roomName.upper()):
return direction
return None
def __createOnUseEvents(self, obj):
currRoom = self.__rooms[self.__player.room]
if not obj['name'] in currRoom.onUse:
self.__tuiSystem.printNoEffect()
else:
events = currRoom.onUse[obj['name']]
for event in events:
self.__eventQueue.append(Event(event['type'], event))
# remove on use events
del currRoom.onUse[obj['name']]
def __use(self, param):
"""
Callback for "use" command. Uses an item either from inventory or
from the current room.
"""
obj = self.__findObject(param)
if obj is None:
self.__tuiSystem.printInvalidObject(param)
return
if obj['useable']:
self.__createOnUseEvents(obj)
else:
self.__tuiSystem.printUnusableObject(obj['name'])
def __take(self, param):
"""
Callback for "take" command. Removes a object from the current room
and adds it to the inventory.
"""
obj = self.__findObject(param)
if obj is None:
self.__tuiSystem.printInvalidObject(param)
return
if obj['takeable']:
self.__rooms[self.__player.room].objects.remove(obj)
self.__player.inventory.append(obj)
obj['takeable'] = False
self.__tuiSystem.printObjectTaken(obj['name'])
else:
self.__tuiSystem.printObjectUntakeable(obj['name'])
def __createOnEnterEvents(self):
currRoom = self.__rooms[self.__player.room]
for event in currRoom.onEnter:
self.__eventQueue.append(Event(event['type'], event))
# remove on enter events
del currRoom.onEnter[:]
def __goto(self, param):
"""
Callback for "goto" command. Moves to the next room by either specifying
the direction or the next room name.
"""
direction = self.__findDirection(param)
if direction is None:
self.__tuiSystem.printInvalidDirection(param)
return
if direction['locked']:
self.__tuiSystem.printDoorLocked()
else:
self.__player.room = direction['room']
self.__createOnEnterEvents()
return
def __examine(self, param):
"""
Callback for "examine" command. Prints the examine field of an object.
"""
obj = self.__findObject(param)
if obj is None:
self.__tuiSystem.printInvalidObject(param)
else:
self.__tuiSystem.printExamine(obj)
def __inventory(self, param):
"""
Callback for "inventory" command. Prints the current inventory.
"""
self.__tuiSystem.printInventory()
def __room(self, param):
"""
Callback for "room" command. Prints the current room.
"""
self.__tuiSystem.printRoom(self.__player.room)
def getActions(self):
return self.__actions.keys()
def update(self, actStr):
self.__player.action = None
action = actStr
param = ''
# try to find a separating space
idx = actStr.find(' ')
if idx > 0:
action = actStr[:idx]
param = actStr[idx+1:]
# check if the given action is valid
if not action in self.__actions:
self.__tuiSystem.printInvalidAction(action)
return
# execute the action
self.__actions[action](param) | 30.741935 | 87 | 0.583631 | 484 | 4,765 | 5.485537 | 0.229339 | 0.06855 | 0.036911 | 0.045198 | 0.183804 | 0.175141 | 0.151789 | 0.151789 | 0.121657 | 0.086252 | 0 | 0.000617 | 0.320252 | 4,765 | 155 | 88 | 30.741935 | 0.81908 | 0.137671 | 0 | 0.247525 | 0 | 0 | 0.028412 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128713 | false | 0 | 0.019802 | 0.009901 | 0.257426 | 0.128713 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03deed2eb45e83f7284046e17bf2aaf69dbbad88 | 14,394 | py | Python | train_model.py | taiducvu/VNG-NSFW-DETECTION | f49b6ad21ed9c8646c402a37015a80258fb79a68 | [
"MIT"
] | null | null | null | train_model.py | taiducvu/VNG-NSFW-DETECTION | f49b6ad21ed9c8646c402a37015a80258fb79a68 | [
"MIT"
] | null | null | null | train_model.py | taiducvu/VNG-NSFW-DETECTION | f49b6ad21ed9c8646c402a37015a80258fb79a68 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import data as dt
import vng_model as md
import time
import csv
import math
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset',
"""Direction where the training set is""")
tf.app.flags.DEFINE_string('val_dir', '/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset',
"""Direction where the validation set is""")
tf.app.flags.DEFINE_integer('num_steps', 500000,
"The number of steps in updating the weights of models")
tf.app.flags.DEFINE_string('checkpoint_dir_resnet', '/home/taivu/workspace/Pycharm_Nudity_Detection/pretrain_weight',
"""Direction where the checkpoint is""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/home/taivu/workspace/Pycharm_Nudity_Detection/checkpoint_model',
"""Direction where the checkpoint of model is saved""")
tf.app.flags.DEFINE_float('learning_rate', 1e-3,
"""Learning rate for optimization""")
tf.app.flags.DEFINE_integer('num_train_sample', 8000,
"""The number of training samples""")
tf.app.flags.DEFINE_integer('num_val_sample', 1156,
"""The number of validate samples""")
tf.app.flags.DEFINE_integer('batch_size', 32,
"The size of a image batch")
tf.app.flags.DEFINE_float('weight_decay', 0.01,
"""Weight decay""")
# Flags for validation process
tf.app.flags.DEFINE_boolean('use_val', True,
"""Whether using the validation set in the training process""")
tf.app.flags.DEFINE_integer('val_batch_size', 128,
"""The size of a validate data batch""")
# Logging the result
tf.app.flags.DEFINE_boolean('is_logging', True,
"""Whether logging the result of training model""")
tf.app.flags.DEFINE_string('log_dir', '/home/taivu/workspace/Pycharm_Nudity_Detection/checkpoint_model',
"""Direction where the log file is saved""")
tf.app.flags.DEFINE_string('summaries_dir', '/home/taivu/Dropbox/Pycharm_Nudity_Detection/log',
"""Direction where the log tensorboard is saved""")
def set_flag(flag, value):
flag.assign(value)
def train():
"""
:return:
"""
# Read data
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
train_flag = tf.Variable(True, trainable=False)
train_path = os.path.join(FLAGS.train_dir, 'transfer_learning_train.tfrecords')
val_path = os.path.join(FLAGS.val_dir, 'transfer_learning_val.tfrecords')
tr_samples, tr_lb = dt.input_data(train_path, FLAGS.batch_size)
val_samples, val_lb = dt.input_data(val_path, 1156, False)
samples, labels = tf.cond(train_flag,
lambda: (tr_samples, tr_lb),
lambda: (val_samples, val_lb))
samples = tf.squeeze(samples, [1, 2])
logits = md.inference(samples)
loss = md.loss(logits, labels)
correct_predict = tf.equal(tf.cast(tf.arg_max(logits, 1), tf.int32), labels)
val_acc = tf.reduce_mean(tf.cast(correct_predict, tf.float32))
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
train_step = tf.train.RMSPropOptimizer(1e-5).minimize(loss)
coord = tf.train.Coordinator()
format_str = ('%d step: %.2f (%.1f examples/sec; %0.3f sec/batch)')
with tf.Session() as sess:
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run(tf.global_variables_initializer())
# samp_batch, lb_batch = sess.run([tr_samples, tr_lb])
# size: samp_batch [#batch_size, 1, 1, 2048]
# tr_lb [#batch_size,]
for idx in range(FLAGS.num_epochs):
start_time = time.time()
_, loss_value = sess.run([train_step, loss])
duration = time.time() - start_time
examples_per_sec = FLAGS.batch_size / float(duration)
sec_per_batch = float(duration)
if idx % 10 == 0:
set_flag(train_flag, False)
acc = sess.run([val_acc])
set_flag(train_flag, True)
print('Validation accuracy: %.2f'%acc[0])
print(format_str %(idx, loss_value, examples_per_sec, sec_per_batch))
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
sess.close()
def train_resnet():
"""
The function is used trainto the model. We use the 'Stochastic Gradient Descent' algorithm to
optimize the weights of model. More detail, we initialize them by using pre-trained weights of Resnet model.
To train model, we freeze the first block of model and train the other. The learning rate in layers that belong to
Resnet model is set 5 times larger than in additional layers.
:return:
"""
batch_ls = []
for batch in range(2, 8):
name_batch = '4000x224x224_batch_' + str(batch) + '.tfrecords'
train_batch = os.path.join(FLAGS.train_dir, name_batch)
batch_ls.append(train_batch)
val_path = os.path.join(FLAGS.train_dir, '4000x224x224_batch_1.tfrecords')
with tf.Graph().as_default() as g:
# ------------------------- BUILD THE GRAPH OF MODEL ---------------------------- #
x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='input_features')
y_ = tf.placeholder(tf.int32, (None,), name='labels')
val_x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='validate_features')
val_y = tf.placeholder(tf.int32, (None,), name='validate_labels')
tr_samples, tr_labels = dt.input_data(batch_ls, FLAGS.batch_size)
val_samples, val_labels = dt.input_data([val_path], FLAGS.val_batch_size, False)
logit = md.inference_resnet(x, is_training=False, is_log=FLAGS.is_logging)
val_logit = md.inference_resnet(val_x, is_training=False, reuse=True, is_log=FLAGS.is_logging)
# Define variables to output the predict of model and to evaluate one
resnet_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='resnet_v1_50')
resnet_weight_ls = []
for idx in range(0, 159, 3):
resnet_weight_ls.append(resnet_var_ls[idx])
loss = md.loss(logit, y_, resnet_weight_ls)
val_loss = md.loss(val_logit, val_y, resnet_weight_ls)
hat_y = tf.arg_max(logit, 1, name='predict_label')
val_pre_y = tf.arg_max(val_logit, 1, name='val_predict_label')
correct_pre = tf.equal(tf.cast(hat_y, tf.int32), y_)
val_correct_predict = tf.equal(tf.cast(val_pre_y, tf.int32), val_y)
accuracy = tf.reduce_mean(tf.cast(correct_pre, tf.float32))
val_accuracy = tf.reduce_mean(tf.cast(val_correct_predict, tf.float32))
tf.summary.scalar('train_loss', loss) # Log the value of the train loss
tf.summary.scalar('accuracy', accuracy) # Log the accuracy
# ------------------------------------- END -------------------------------------- #
# -------------------------------Optimizing process ------------------------------ #
resnet_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='resnet_v1_50')
add_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='additional_layers')
opt_1 = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
opt_2 = tf.train.GradientDescentOptimizer(5*FLAGS.learning_rate)
# Freeze the weights of from first to third blocks
grads = tf.gradients(loss, resnet_var_ls[153:] + add_var_ls)
# Do gradient descent only on a particular weight set
num_opt_resnet_layers = len(resnet_var_ls[153:])
grads_1 = grads[:num_opt_resnet_layers] # Do gradient for Resnet's layers
grads_2 = grads[num_opt_resnet_layers:] # Do gradient for Additional layers
train_opt_1 = opt_1.apply_gradients(zip(grads_1, resnet_var_ls[153:]))
train_opt_2 = opt_2.apply_gradients(zip(grads_2, add_var_ls))
train_opt = tf.group(train_opt_1, train_opt_2)
# ------------------------------------- END -------------------------------------- #
saver_my_model = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=50)
# ------------------ Support for loading the trained weights of Resnet ----------- #
saver_resnet = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope='resnet_v1_50'))
ckpt_resnet = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir_resnet)
###################################################################################
# ----------------------------------- TENSORBOARD --------------------------------
# merged = tf.summary.merge_all()
# train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', graph=g)
# test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# ------ Load pre-trained weights of the Resnet model -------------------------- #
if ckpt_resnet and ckpt_resnet.model_checkpoint_path:
saver_resnet.restore(sess, ckpt_resnet.model_checkpoint_path)
print('Load pre-trained weights of Resnet successfully!')
else:
print('Checkpoint of Resnet not found!')
####################################################################################
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
format_str=('Step %d: %0.2f (%0.1f samples/sec; %0.3f secs/batch)')
steps_per_epoch = int(math.ceil(float(FLAGS.num_train_sample)/FLAGS.batch_size))
for idx in range(FLAGS.num_steps):
tr_x, tr_y = sess.run([tr_samples, tr_labels])
start_time = time.time()
_, loss_value = sess.run([train_opt, loss], feed_dict={x: tr_x, y_: tr_y})
duration = time.time() - start_time
examples_per_sec = FLAGS.batch_size / float(duration)
sec_per_batch = float(duration)
print(format_str % (idx, loss_value, examples_per_sec, sec_per_batch))
mean_acc = 0
mean_val_acc = 0
mean_tr_loss = 0
mean_val_loss = 0
if (idx + 1) % steps_per_epoch == 0 or idx == 0:
# Logging the performance of model in training process
if FLAGS.use_val and FLAGS.is_logging:
val_iter = int(math.ceil(FLAGS.num_val_sample)/FLAGS.val_batch_size)
for i in range(val_iter):
v_x, v_y = sess.run([val_samples, val_labels])
val_acc, val_err = sess.run([val_accuracy, val_loss], feed_dict={x: tr_x,
y_: tr_y,
val_x: v_x,
val_y: v_y})
# train_writer.add_summary(summary, idx) # Log
if i == 0:
mean_val_acc = val_acc
mean_val_loss = val_err
else:
mean_val_acc = 1.0/(i + 1)*(val_acc + i*mean_val_acc)
mean_val_loss = 1.0/(i + 1)*(val_err + i*mean_val_loss)
print('Validation accuracy: %0.2f'%mean_val_acc)
for i in range(steps_per_epoch):
eval_tr_x, eval_tr_y = sess.run([tr_samples, tr_labels])
tr_acc, loss_value = sess.run([accuracy, loss], feed_dict={x:eval_tr_x, y_:eval_tr_y})
if i == 0:
mean_acc = tr_acc
mean_tr_loss = loss_value
else:
mean_acc = 1.0/(i+1)*(tr_acc + i*mean_acc)
mean_tr_loss = 1.0/(i+1)*(loss_value + i*mean_tr_loss)
# -------------------- Writing log-file ------------------------------
log_path = os.path.join(FLAGS.log_dir, 'result.csv')
if os.path.isfile(log_path) and idx == 0:
os.remove(log_path)
with open(log_path, 'a') as csvfile:
print('Writing data into csv file ...')
csv_writer = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow([idx, mean_tr_loss, mean_val_loss, mean_acc, mean_val_acc])
print('Finish writing!')
# ---------------------------- END ------------------------------------
elif FLAGS.use_val:
print('Is training')
else:
print('Set True for use_val flag to log the performance of model in training process!')
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')
saver_my_model.save(sess, checkpoint_path, global_step=idx)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
sess.close()
def main(argv=None):
train_resnet()
if __name__ == '__main__':
tf.app.run()
| 41.481268 | 118 | 0.553633 | 1,732 | 14,394 | 4.33776 | 0.170323 | 0.011314 | 0.021296 | 0.031945 | 0.388527 | 0.310928 | 0.238387 | 0.182084 | 0.157327 | 0.141887 | 0 | 0.018474 | 0.304293 | 14,394 | 346 | 119 | 41.601156 | 0.731776 | 0.133458 | 0 | 0.164103 | 0 | 0.005128 | 0.108865 | 0.039315 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020513 | false | 0 | 0.051282 | 0 | 0.071795 | 0.05641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03deff63d4fe5a3fc6c43174f1b43ca9ad27e1c4 | 1,417 | py | Python | scanpy_gene_filtering.py | granatumx/gbox-py | b3e264a22bc6a041f2dd631d952eae29c0ecae21 | [
"MIT"
] | null | null | null | scanpy_gene_filtering.py | granatumx/gbox-py | b3e264a22bc6a041f2dd631d952eae29c0ecae21 | [
"MIT"
] | 1 | 2020-06-16T17:14:45.000Z | 2020-06-16T17:14:45.000Z | scanpy_gene_filtering.py | granatumx/gbox-py | b3e264a22bc6a041f2dd631d952eae29c0ecae21 | [
"MIT"
] | 2 | 2020-06-16T16:42:40.000Z | 2020-08-28T16:59:42.000Z | import math
import scanpy.api as sc
import numpy as np
from granatum_sdk import Granatum
def main():
gn = Granatum()
adata = gn.ann_data_from_assay(gn.get_import("assay"))
min_cells_expressed = gn.get_arg("min_cells_expressed")
min_mean = gn.get_arg("min_mean")
max_mean = gn.get_arg("max_mean")
min_disp = gn.get_arg("min_disp")
max_disp = gn.get_arg("max_disp")
num_genes_before = adata.shape[1]
sc.pp.filter_genes(adata, min_cells=min_cells_expressed)
filter_result = sc.pp.filter_genes_dispersion(
adata.X, flavor='seurat', min_mean=math.log(min_mean), max_mean=math.log(max_mean), min_disp=min_disp, max_disp=max_disp,
)
adata = adata[:, filter_result.gene_subset]
sc.pl.filter_genes_dispersion(filter_result)
gn.add_current_figure_to_results(
"Each dot represent a gene. The gray dots are the removed genes. The x-axis is log-transformed.",
zoom=3,
dpi=50,
height=400,
)
gn.add_result(
"\n".join(
[
"Number of genes before filtering: **{}**".format(num_genes_before),
"",
"Number of genes after filtering: **{}**".format(adata.shape[1]),
]
),
type="markdown",
)
gn.export(gn.assay_from_ann_data(adata), "Filtered Assay", dynamic=False)
gn.commit()
if __name__ == "__main__":
main()
| 26.735849 | 129 | 0.637262 | 200 | 1,417 | 4.205 | 0.405 | 0.035672 | 0.047562 | 0.039239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007407 | 0.237826 | 1,417 | 52 | 130 | 27.25 | 0.771296 | 0 | 0 | 0 | 0 | 0.025641 | 0.188426 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.128205 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03e365fc58c1889f65eac6521d7aecdc3431dbf8 | 3,538 | py | Python | pagapp/application_api/album_api.py | eugeneandrienko/PyArtistsGallery | b75114955859d45d9dfb5c901213f25a6e09f488 | [
"MIT"
] | null | null | null | pagapp/application_api/album_api.py | eugeneandrienko/PyArtistsGallery | b75114955859d45d9dfb5c901213f25a6e09f488 | [
"MIT"
] | null | null | null | pagapp/application_api/album_api.py | eugeneandrienko/PyArtistsGallery | b75114955859d45d9dfb5c901213f25a6e09f488 | [
"MIT"
] | null | null | null | """Handlers for albums' API calls."""
import json
from flask import request, current_app
from flask_login import login_required
from pagapp.support_functions import remove_danger_symbols
from pagapp.application_api import application_api
from pagapp.models import db
from pagapp.models.albums import Albums
from pagapp.models.pictures import Pictures
from pagapp.application_api.html_generators import generate_action_buttons_html
def _generate_album_table_item(album):
return {
'name': album.album_name,
'pics_count': Pictures.query.filter_by(album_id=album.id).count(),
'description': album.album_description,
'actions': generate_action_buttons_html(
album.id, album.album_name, album.album_description,
'editAlbumModal', 'deleteAlbum'
)
}
@application_api.route('/get-albums-list')
@login_required
def get_albums_list():
"""Returns list of albums.
Returns JSON array, which contains list
of albums. Sample result:
[
{
'name': u'Test album name',
'pics_count': 1,
'description': u'Test album description',
'delete': u'button HTML code'
}
]
"""
return json.dumps(
[_generate_album_table_item(album) for album in Albums.query.all()])
@application_api.route('/get-albums-list-short')
def get_albums_list_short():
"""Returns short list of albums.
Returns JSON array, which looks like next example:
[
{
'id': 1,
'name': 'Test album name'
}
]
"""
return json.dumps(
[
{
'id': album.id,
'name': album.album_name
} for album in Albums.query.all()])
@application_api.route('/delete-album', methods=['POST'])
@login_required
def delete_album():
"""Deletes album with given ID if it is one album in database."""
album_id = remove_danger_symbols(request.form['album_id'])
album = Albums.query.filter_by(id=album_id)
if album.count() != 1:
current_app.logger.error(
"Count of albums with given ID ({}) is more than 1.".format(
album_id))
return 'Cannot delete album, too much IDs!', 404
else:
current_app.logger.debug("Deleting album with ID {}.".format(album_id))
db.session.delete(album.first())
db.session.commit()
return '', 200
@application_api.route('/edit-album', methods=['POST'])
@login_required
def edit_album():
"""Edit album with given ID, name and description."""
album_id = remove_danger_symbols(request.form['album_id'])
album = Albums.query.filter_by(id=album_id)
if album.count() == 0:
current_app.logger.error(
"Album with given ID ({}) does not exists.".format(album_id))
return 'Album does not exists!', 404
if album.count() != 1:
current_app.logger.error(
"Count of albums with given ID ({}) is more than 1.".format(
album_id))
return 'Cannot edit album, too much IDs!', 404
album_name = remove_danger_symbols(request.form['album_name'])
album_description = remove_danger_symbols(request.form['album_description'])
current_app.logger.debug(
"Editing album with ID {}. New name: {}. New description: {}.".format(
album_id, album_name, album_description))
album.first().album_name = album_name
album.first().album_description = album_description
db.session.commit()
return '', 200
| 31.589286 | 80 | 0.644997 | 444 | 3,538 | 4.952703 | 0.236486 | 0.047749 | 0.043201 | 0.047294 | 0.374261 | 0.311505 | 0.230105 | 0.200091 | 0.200091 | 0.160982 | 0 | 0.008169 | 0.238836 | 3,538 | 111 | 81 | 31.873874 | 0.808392 | 0.153759 | 0 | 0.314286 | 0 | 0 | 0.169896 | 0.007612 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.128571 | 0.014286 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03e4c99b5e129d999b826ed6583329b22a13afb6 | 2,097 | py | Python | DNBC4tools/tools/utils.py | lishuangshuang0616/DNBC4dev | 2fc54e87edc5586e92d470669b6aa37661ac016a | [
"MIT"
] | null | null | null | DNBC4tools/tools/utils.py | lishuangshuang0616/DNBC4dev | 2fc54e87edc5586e92d470669b6aa37661ac016a | [
"MIT"
] | null | null | null | DNBC4tools/tools/utils.py | lishuangshuang0616/DNBC4dev | 2fc54e87edc5586e92d470669b6aa37661ac016a | [
"MIT"
] | null | null | null | import os,sys
import time
import logging
import sys
from datetime import timedelta
from subprocess import check_call
from DNBC4tools.__init__ import _root_dir
def str_mkdir(arg):
if not os.path.exists(arg):
os.system('mkdir -p %s'%arg)
def change_path():
os.environ['PATH'] += ':'+'/'.join(str(_root_dir).split('/')[0:-4])+ '/bin'
os.environ['LD_LIBRARY_PATH'] = '/'.join(str(_root_dir).split('/')[0:-4]) + '/lib'
def python_path():
python = '/'.join(str(_root_dir).split('/')[0:-4])+ '/bin/python'
return python
def rm_temp(*args):
for i in args:
os.remove(i)
def start_print_cmd(arg):
print(arg)
check_call(arg,shell=True)
def logging_call(popenargs,name,dir):
today = time.strftime('%Y%m%d', time.localtime(time.time()))
logfile = '%s/log/%s.%s.txt'%(dir,name,today)
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(level = logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter
file_handler = logging.FileHandler(logfile,encoding="utf8")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
logger.info('Promgram start...')
logger.info(popenargs)
start = time.time()
check_call(popenargs,shell=True)
logger.info('Promgram end...')
end = time.time()
used = timedelta(seconds=end - start)
logger.info('Program time used: %s', used)
logger.info('\n')
def judgeFilexits(*args):
for input_files in args:
for input_file in input_files.split(','):
if not os.path.exists(input_file):
print(" ------------------------------------------------")
print("Error: Cannot find input file or dir %s."%(str(input_file)))
print(" ------------------------------------------------")
sys.exit()
else:
pass
| 33.285714 | 93 | 0.593705 | 260 | 2,097 | 4.653846 | 0.373077 | 0.041322 | 0.027273 | 0.034711 | 0.091736 | 0.063636 | 0.063636 | 0.063636 | 0 | 0 | 0 | 0.004848 | 0.213162 | 2,097 | 62 | 94 | 33.822581 | 0.728485 | 0 | 0 | 0.037037 | 0 | 0 | 0.156414 | 0.04578 | 0.018519 | 0 | 0 | 0 | 0 | 1 | 0.12963 | false | 0.018519 | 0.12963 | 0 | 0.277778 | 0.092593 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03e65ebb8db11f83d90833408439680a1da222bc | 3,432 | py | Python | riberry/app/backends/impl/celery/base.py | srafehi/riberry | 2ffa48945264177c6cef88512c1bc80ca4bf1d5e | [
"MIT"
] | 2 | 2019-12-09T10:24:36.000Z | 2019-12-09T10:26:56.000Z | riberry/app/backends/impl/celery/base.py | srafehi/riberry | 2ffa48945264177c6cef88512c1bc80ca4bf1d5e | [
"MIT"
] | 2 | 2018-06-11T11:34:28.000Z | 2018-08-22T12:00:19.000Z | riberry/app/backends/impl/celery/base.py | srafehi/riberry | 2ffa48945264177c6cef88512c1bc80ca4bf1d5e | [
"MIT"
] | null | null | null | from typing import Dict, AnyStr
import celery
import riberry
from . import patch, tasks, addons
from .executor import TaskExecutor
from .tracker import CeleryExecutionTracker
def send_task_process_rib_kwargs(self, *args, **kwargs):
riberry_properties = {}
if kwargs.get('kwargs'):
for key, value in kwargs['kwargs'].items():
if key.startswith('__rib_'):
riberry_properties[key.replace('__rib_', '', 1)] = value
class CeleryBackend(riberry.app.backends.RiberryApplicationBackend):
instance: celery.Celery
ENTRY_POINT_TASK_NAME = 'riberry.core.app.entry_point'
CHECK_EXTERNAL_TASK_NAME = 'riberry.core.app.check_external_task'
def __init__(self, instance):
super().__init__(instance=instance)
self.executor = TaskExecutor()
self._celery_execution_tracker = CeleryExecutionTracker(backend=self)
def initialize(self):
patch.patch_send_task(instance=self.instance, func=send_task_process_rib_kwargs)
# Register "entry point" task
self.task(
name=self.ENTRY_POINT_TASK_NAME,
)(self.executor.entry_point_executor())
# Register "external task checker" task
self.task(
name=self.CHECK_EXTERNAL_TASK_NAME,
max_retries=None,
)(self.executor.external_task_executor())
def default_addons(self) -> Dict[AnyStr, 'riberry.app.addons.Addon']:
return {
'scale': addons.Scale(),
'background': addons.BackgroundTasks(),
'external-receiver': addons.ExternalTaskReceiver(),
}
def register_task(self, func, **options) -> celery.Task:
wrapped_func, options = self.executor.riberry_task_executor_wrapper(func=func, task_options=options)
return self.instance.task(**options)(wrapped_func)
def task_by_name(self, name: AnyStr):
return self.instance.tasks[name]
def start_execution(self, execution_id, root_id, entry_point) -> AnyStr:
task = self.task_by_name(self.ENTRY_POINT_TASK_NAME)
task_signature = task.si(
execution_id=execution_id,
form=entry_point.form,
__rib_stream=entry_point.stream,
__rib_step=entry_point.step,
)
callback_success = tasks.execution_complete.si(status='SUCCESS', stream=entry_point.stream)
callback_failure = tasks.execution_complete.si(status='FAILURE', stream=entry_point.stream)
task_signature.options['root_id'] = root_id
callback_success.options['root_id'] = root_id
callback_failure.options['root_id'] = root_id
exec_signature = task_signature.on_error(callback_failure) | callback_success
exec_signature.options['root_id'] = root_id
riberry.app.util.events.create_event(
name='stream',
root_id=root_id,
task_id=root_id,
data={
'stream': entry_point.stream,
'state': 'QUEUED',
}
)
return exec_signature.apply_async().id
def create_receiver_task(self, external_task_id, validator):
return self.task_by_name(self.CHECK_EXTERNAL_TASK_NAME).si(
external_task_id=external_task_id,
validator=validator,
)
def active_task(self):
return celery.current_task
def _execution_tracker(self):
return self._celery_execution_tracker
| 34.32 | 108 | 0.668415 | 396 | 3,432 | 5.462121 | 0.242424 | 0.060102 | 0.02589 | 0.027739 | 0.192325 | 0.092926 | 0 | 0 | 0 | 0 | 0 | 0.00038 | 0.232809 | 3,432 | 99 | 109 | 34.666667 | 0.821117 | 0.018939 | 0 | 0.027027 | 0 | 0 | 0.062128 | 0.026159 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.081081 | 0.067568 | 0.364865 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03e6fbea2bc31d53355d4df0ef6a940806f422a0 | 2,753 | py | Python | amitypes/hsd.py | slac-lcls/amityping | a9f9c1a0d9077ff093e05a383ecd3bd05e0ce64d | [
"BSD-3-Clause-LBNL"
] | null | null | null | amitypes/hsd.py | slac-lcls/amityping | a9f9c1a0d9077ff093e05a383ecd3bd05e0ce64d | [
"BSD-3-Clause-LBNL"
] | null | null | null | amitypes/hsd.py | slac-lcls/amityping | a9f9c1a0d9077ff093e05a383ecd3bd05e0ce64d | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-12-13T01:54:32.000Z | 2020-12-13T01:54:32.000Z | import typing
from mypy_extensions import TypedDict
from amitypes.array import Array1d
__all__ = [
'TypedDict',
'PeakTimes',
'HSDSegementPeakTimes',
'HSDPeakTimes',
'Peaks',
'HSDSegmentPeaks',
'HSDPeaks',
'HSDSegmentWaveforms',
'HSDWaveforms',
'HSDAssemblies',
'HSDTypes',
]
PeakTimes = typing.List[Array1d]
HSDSegementPeakTimes = TypedDict(
"HSDSegementPeakTimes",
{
'0': PeakTimes,
'1': PeakTimes,
'2': PeakTimes,
'3': PeakTimes,
'4': PeakTimes,
'5': PeakTimes,
'6': PeakTimes,
'7': PeakTimes,
'8': PeakTimes,
'9': PeakTimes,
'10': PeakTimes,
'11': PeakTimes,
'12': PeakTimes,
'13': PeakTimes,
'14': PeakTimes,
'15': PeakTimes,
},
total=False)
HSDPeakTimes = typing.Dict[int, HSDSegementPeakTimes]
Peaks = typing.Tuple[typing.List[int], typing.List[Array1d]]
HSDSegmentPeaks = TypedDict(
"HSDSegmentPeaks",
{
'0': Peaks,
'1': Peaks,
'2': Peaks,
'3': Peaks,
'4': Peaks,
'5': Peaks,
'6': Peaks,
'7': Peaks,
'8': Peaks,
'9': Peaks,
'10': Peaks,
'11': Peaks,
'12': Peaks,
'13': Peaks,
'14': Peaks,
'15': Peaks,
},
total=False)
HSDPeaks = typing.Dict[int, HSDSegmentPeaks]
HSDSegmentWaveforms = TypedDict(
"HSDSegmentWaveforms",
{
'times': Array1d,
'0': Array1d,
'1': Array1d,
'2': Array1d,
'3': Array1d,
'4': Array1d,
'5': Array1d,
'6': Array1d,
'7': Array1d,
'8': Array1d,
'9': Array1d,
'10': Array1d,
'11': Array1d,
'12': Array1d,
'13': Array1d,
'14': Array1d,
'15': Array1d,
},
total=False)
HSDWaveforms = typing.Dict[int, HSDSegmentWaveforms]
HSDAssemblies = typing.TypeVar('HSDAssemblies')
HSDTypes = {HSDPeakTimes, HSDPeaks, HSDWaveforms, HSDAssemblies}
| 25.256881 | 64 | 0.392663 | 174 | 2,753 | 6.183908 | 0.252874 | 0.027881 | 0.036245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.062049 | 0.496549 | 2,753 | 108 | 65 | 25.490741 | 0.714286 | 0 | 0 | 0.034483 | 0 | 0 | 0.097348 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034483 | 0 | 0.034483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03e72378fe20cb97a330f6cb48639d89ec4e8658 | 4,975 | py | Python | lib/astc-encoder/Test/astc_test_image_dl.py | atteneder/KTX-Software | 15369663a43d72972dfe0a7e3597d3ef6d90b6b9 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-1-Clause",
"BSD-3-Clause"
] | 619 | 2015-04-30T14:55:02.000Z | 2022-03-30T06:56:14.000Z | lib/astc-encoder/Test/astc_test_image_dl.py | atteneder/KTX-Software | 15369663a43d72972dfe0a7e3597d3ef6d90b6b9 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-1-Clause",
"BSD-3-Clause"
] | 283 | 2018-02-05T01:42:19.000Z | 2022-03-24T09:26:16.000Z | lib/astc-encoder/Test/astc_test_image_dl.py | atteneder/KTX-Software | 15369663a43d72972dfe0a7e3597d3ef6d90b6b9 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-1-Clause",
"BSD-3-Clause"
] | 167 | 2015-04-30T15:06:25.000Z | 2022-03-24T03:02:00.000Z | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2019-2020 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
The ``astc_test_image_dl`` utility provides a means to programatically download
test images that are available online, avoiding the need to duplicate them in
the git repository.
"""
import os
import sys
import urllib.request
from PIL import Image
TEST_IMAGE_DIR = os.path.join("Test", "Images")
def download(testSet, index, srcUrl, dstPath):
"""
Download a single image.
Args:
testSet (str): The test set name.
index (int): The download index.
srcUrl (str): The download URL.
dstPath (str): The destination path.
"""
dirName = os.path.dirname(dstPath)
if not os.path.exists(dirName):
os.makedirs(dirName)
# Skip downloads if the file already exists
if not os.path.exists(dstPath):
print("%s image %u: Downloading" % (testSet, index))
urllib.request.urlretrieve(srcUrl, dstPath)
else:
print("%s image %u: Skipping" % (testSet, index))
def make_landscape(imgPath):
"""
Make an image on disk landscape aspect (edit in place)
Args:
imgPath: The pth of the image on disk.
"""
img = Image.open(imgPath)
if img.size[0] < img.size[1]:
img = img.rotate(90, expand=True)
img.save(imgPath)
def make_mixed_image(imgPathA, imgPathB, dstPath):
"""
Make image consisting of RGB from A's RGB, and alpha from B's luminance.
Args:
imgPathA: The path of input A on disk.
imgPathB: The path of input B on disk.
dstPath: The path of the destination.
"""
imgA = Image.open(imgPathA)
imgB = Image.open(imgPathB).convert("L")
imgA.putalpha(imgB)
dirs = os.path.dirname(dstPath)
if not os.path.exists(dirs):
os.makedirs(dirs)
imgA.save(dstPath)
def make_montage(imageDir, dstPath):
"""
Make a single mosaic montage consisting of all of the Kodak images.
Args:
imgDir: The directory path of the Kodak images on disk.
dstPth: The file path of the resulting montage.
"""
cols = 6
rows = 4
width = 768
height = 512
images = os.listdir(imageDir)
images.sort()
montage = Image.new('RGB', (width * cols, height * rows))
for i, src in enumerate(images):
im = Image.open(os.path.join(imageDir, src))
col = i % cols
row = i // cols
montage.paste(im, (width * col, height * row))
dirs = os.path.dirname(dstPath)
if not os.path.exists(dirs):
os.makedirs(dirs)
montage.save(dstPath)
def retrieve_kodak_set():
"""
Download the public domain Kodak image set.
To make test set mosaics easier to build we rotate images to make
everything landscape.
"""
testSet = "Kodak"
# Download the original RGB images
for i in range(1, 25):
fle = "ldr-rgb-kodak%02u.png" % i
dst = os.path.join(TEST_IMAGE_DIR, "Kodak", "LDR-RGB", fle)
src = "http://r0k.us/graphics/kodak/kodak/kodim%02u.png" % i
download(testSet, i, src, dst)
# Canonicalize image aspect
make_landscape(dst)
# Make some correlated alpha RGBA images
fle = "ldr-rgb-kodak%02u.png" # Expand later
pattern = os.path.join(TEST_IMAGE_DIR, "Kodak", "LDR-RGB", fle)
for i in (22, 23):
imgA = pattern % i
fle = "ldr-rgba-kodak%02u+ca.png" % i
dst = os.path.join(TEST_IMAGE_DIR, "KodakSim", "LDR-RGBA", fle)
make_mixed_image(imgA, imgA, dst)
# Make some non-correlated alpha RGBA images
for i, j in ((22, 24), (23, 20)):
imgA = pattern % i
imgB = pattern % j
fle = "ldr-rgba-kodak%02u+%02u+nca.png" % (i, j)
dst = os.path.join(TEST_IMAGE_DIR, "KodakSim", "LDR-RGBA", fle)
make_mixed_image(imgA, imgB, dst)
# Make a large montage
srcDir = os.path.join(TEST_IMAGE_DIR, "Kodak", "LDR-RGB")
fle = "ldr-rgb-montage.png"
dst = os.path.join(TEST_IMAGE_DIR, "KodakMnt", "LDR-RGB", fle)
make_montage(srcDir, dst)
def main():
"""
The main function.
Returns:
int: The process return code.
"""
retrieve_kodak_set()
return 0
if __name__ == "__main__":
sys.exit(main())
| 27.638889 | 79 | 0.617085 | 688 | 4,975 | 4.405523 | 0.335756 | 0.029693 | 0.026394 | 0.032333 | 0.168591 | 0.151105 | 0.138898 | 0.13065 | 0.13065 | 0.114814 | 0 | 0.01485 | 0.24201 | 4,975 | 179 | 80 | 27.793296 | 0.788915 | 0.410653 | 0 | 0.135135 | 0 | 0 | 0.116831 | 0.035779 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.054054 | 0 | 0.148649 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03ea99f6452fd0664caac74940ef228cb2d560ff | 3,549 | py | Python | cmapPy/clue_api_client/clue_api_client.py | RCBiczok/cmapPy | 580b0d656892e72f58047666a94e2769ddf63b3f | [
"BSD-3-Clause"
] | 2 | 2019-03-01T18:20:10.000Z | 2019-05-05T13:04:32.000Z | cmapPy/clue_api_client/clue_api_client.py | RCBiczok/cmapPy | 580b0d656892e72f58047666a94e2769ddf63b3f | [
"BSD-3-Clause"
] | 10 | 2022-03-14T18:40:45.000Z | 2022-03-22T12:45:02.000Z | cmapPy/clue_api_client/clue_api_client.py | RCBiczok/cmapPy | 580b0d656892e72f58047666a94e2769ddf63b3f | [
"BSD-3-Clause"
] | 2 | 2019-01-30T10:37:50.000Z | 2019-01-30T10:45:37.000Z | import requests
import logging
import cmapPy.clue_api_client.setup_logger as setup_logger
import json
__authors__ = "David L. Lahr"
__email__ = "dlahr@broadinstitute.org"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class ClueApiClient(object):
"""Basic class for running queries against CLUE api
"""
def __init__(self, base_url=None, user_key=None):
"""
Args:
base_url: specific URL to use for the CLUE api, e.g. https://dev-api.clue.io/api/
user_key: user key to use for authentication, available from CLUE account
Returns:
"""
self.base_url = base_url
self.headers = {"user_key":user_key}
def run_filter_query(self, resource_name, filter_clause):
"""run a query (get) against the CLUE api, using the API and user key fields of self and the fitler_clause provided
Args:
resource_name: str - name of the resource / collection to query - e.g. genes, perts, cells etc.
filter_clause: dictionary - contains filter to pass to API to; uses loopback specification
Returns: list of dictionaries containing the results of the query
"""
url = self.base_url + "/" + resource_name
params = {"filter":json.dumps(filter_clause)}
r = requests.get(url, headers=self.headers, params=params)
logger.debug("requests.get result r.status_code: {}".format(r.status_code))
ClueApiClient._check_request_response(r)
return r.json()
def run_count_query(self, resource_name, where_clause):
"""run a query (get) against CLUE api
Args:
resource_name: str - name of the resource / collection to query - e.g. genes, perts, cells etc.
where_clause: dictionary - contains where clause to pass to API to; uses loopback specification
Returns: dictionary containing the results of the query
"""
url = self.base_url + "/" + resource_name + "/count"
params = {"where":json.dumps(where_clause)}
r = requests.get(url, headers=self.headers, params=params)
logger.debug("requests.get result r.status_code: {}".format(r.status_code))
ClueApiClient._check_request_response(r)
return r.json()
def run_post(self, resource_name, data):
url = self.base_url + "/" + resource_name
r = requests.post(url, data=data, headers=self.headers)
logger.debug("requests.post result r.status_code: {}".format(r.status_code))
ClueApiClient._check_request_response(r)
return r.json()
def run_delete(self, resource_name, id):
url = self.base_url + "/" + resource_name + "/" + id
r = requests.delete(url, headers=self.headers)
logger.debug("requests.delete result r.status_code: {}".format(r.status_code))
ClueApiClient._check_request_response(r)
did_delete = r.json()["count"] == 1
return did_delete
def run_put(self, resource_name, id, data):
url = self.base_url + "/" + resource_name + "/" + id
r = requests.put(url, data=data, headers=self.headers)
logger.debug("requests.put result r.status_code: {}".format(r.status_code))
ClueApiClient._check_request_response(r)
return r.json()
@staticmethod
def _check_request_response(response):
assert response.status_code == 200, "ClueApiClient request failed response.status_code: {} response.reason: {}".format(
response.status_code, response.reason) | 35.848485 | 130 | 0.658495 | 461 | 3,549 | 4.874187 | 0.234273 | 0.057855 | 0.048954 | 0.031153 | 0.556742 | 0.52826 | 0.489542 | 0.47441 | 0.47441 | 0.3587 | 0 | 0.001475 | 0.235841 | 3,549 | 99 | 131 | 35.848485 | 0.827065 | 0.257537 | 0 | 0.354167 | 0 | 0 | 0.138097 | 0.018065 | 0 | 0 | 0 | 0 | 0.020833 | 1 | 0.145833 | false | 0 | 0.083333 | 0 | 0.354167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03ec5ee390c83bab9a9210d74914176c04449ccd | 2,467 | py | Python | src/gtk3/entrycompletion/MainWindow.py | alexandrebarbaruiva/gui-python-gtk | 7b8e8ab05645271ae55e1e2165eefc9c8f5b0250 | [
"MIT"
] | 42 | 2020-05-09T16:23:23.000Z | 2022-03-28T13:05:32.000Z | src/gtk3/entrycompletion/MainWindow.py | alexandrebarbaruiva/gui-python-gtk | 7b8e8ab05645271ae55e1e2165eefc9c8f5b0250 | [
"MIT"
] | 2 | 2020-05-27T19:23:54.000Z | 2022-03-08T01:42:59.000Z | src/gtk3/entrycompletion/MainWindow.py | alexandrebarbaruiva/gui-python-gtk | 7b8e8ab05645271ae55e1e2165eefc9c8f5b0250 | [
"MIT"
] | 8 | 2020-05-09T16:23:28.000Z | 2022-03-31T22:44:45.000Z | # -*- coding: utf-8 -*-
"""GTK.EntryCompletion()."""
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gtk, GObject, Gio
class MainWindow(Gtk.ApplicationWindow):
brazilian_states = [
(1, 'Acre'), (2, 'Alagoas'), (3, 'Amapá'), (4, 'Amazonas'),
(5, 'Bahia'), (6, 'Ceará'), (7, 'Distrito Federal'), (8, 'Espírito Santo'),
(9, 'Goiás'), (10, 'Maranhão'), (11, 'Mato Grosso'), (12, 'Mato Grosso do Sul'),
(13, 'Minas Gerais'), (14, 'Pará'), (15, 'Paraíba'), (16, 'Paraná'),
(17, 'Pernambuco'), (18, 'Piauí'), (19, 'Rio de Janeiro'),
(20, 'Rio Grande do Norte'), (21, 'Rio Grande do Sul'), (22, 'Rondônia'),
(23, 'Roraima'), (24, 'Santa Catarina'), (25, 'São Paulo'), (26, 'Sergipe'),
(27, 'Tocantins'),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_title(title='GTK.EntryCompletion')
self.set_default_size(width=1366 / 2, height=768 / 2)
self.set_position(position=Gtk.WindowPosition.CENTER)
self.set_default_icon_from_file(filename='../../assets/icons/icon.png')
vbox = Gtk.Box.new(orientation=Gtk.Orientation.VERTICAL, spacing=12)
vbox.set_border_width(border_width=12)
self.add(widget=vbox)
label = Gtk.Label.new(str='Digite o nome de algum estado brasileiro:')
vbox.pack_start(child=label, expand=False, fill=True, padding=0)
liststore = Gtk.ListStore.new([GObject.TYPE_INT, GObject.TYPE_STRING])
for state in self.brazilian_states:
liststore.append(row=state)
completion = Gtk.EntryCompletion.new()
completion.set_model(model=liststore)
completion.set_text_column(column=1)
entry = Gtk.Entry.new()
entry.set_completion(completion=completion)
vbox.add(widget=entry)
self.show_all()
class Application(Gtk.Application):
def __init__(self):
super().__init__(application_id='br.natorsc.Exemplo',
flags=Gio.ApplicationFlags.FLAGS_NONE)
def do_startup(self):
Gtk.Application.do_startup(self)
def do_activate(self):
win = self.props.active_window
if not win:
win = MainWindow(application=self)
win.present()
def do_shutdown(self):
Gtk.Application.do_shutdown(self)
if __name__ == '__main__':
import sys
app = Application()
app.run(sys.argv)
| 32.038961 | 88 | 0.615728 | 301 | 2,467 | 4.86711 | 0.561462 | 0.019113 | 0.015017 | 0.027304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032933 | 0.224564 | 2,467 | 76 | 89 | 32.460526 | 0.73288 | 0.018241 | 0 | 0 | 0 | 0 | 0.154387 | 0.011176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.057692 | 0 | 0.211538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03f0e18a2fb018fcb36f1fa0b744ae71787750e6 | 4,821 | py | Python | Chapter06/chapter_6_utils.py | YongBeomKim/py-finance | 7c7830904b67cd23c47e793e1f47a9702e7765f3 | [
"MIT"
] | 1 | 2021-03-16T04:32:15.000Z | 2021-03-16T04:32:15.000Z | Chapter06/chapter_6_utils.py | YongBeomKim/py-finance | 7c7830904b67cd23c47e793e1f47a9702e7765f3 | [
"MIT"
] | null | null | null | Chapter06/chapter_6_utils.py | YongBeomKim/py-finance | 7c7830904b67cd23c47e793e1f47a9702e7765f3 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.stats import norm
def simulate_gbm(s_0, mu, sigma, n_sims, T, N, random_seed=42, antithetic_var=False):
'''
Function used for simulating stock returns using Geometric Brownian Motion.
Parameters
----------
s_0 : float
Initial stock price
mu : float
Drift coefficient
sigma : float
Diffusion coefficient
n_sims : int
Number of simulations paths
dt : float
Time increment, most commonly a day
T : float
Length of the forecast horizon, same unit as dt
N : int
Number of time increments in the forecast horizon
random_seed : int
Random seed for reproducibility
antithetic_var : bool
Boolean whether to use antithetic variates approach to reduce variance
Returns
-------
S_t : np.ndarray
Matrix (size: n_sims x (T+1)) containing the simulation results.
Rows respresent sample paths, while columns point of time.
'''
np.random.seed(random_seed)
# time increment
dt = T/N
# Brownian
if antithetic_var:
dW_ant = np.random.normal(scale=np.sqrt(dt),
size=(int(n_sims/2), N + 1))
dW = np.concatenate((dW_ant, -dW_ant), axis=0)
else:
dW = np.random.normal(scale=np.sqrt(dt),
size=(n_sims, N + 1))
# simulate the evolution of the process
S_t = s_0 * np.exp(np.cumsum((mu - 0.5 * sigma ** 2) * dt + sigma * dW,
axis=1))
S_t[:, 0] = s_0
return S_t
def black_scholes_analytical(S_0, K, T, r, sigma, type='call'):
'''
Function used for calculating the price of European options using the analytical form of the Black-Scholes model.
Parameters
------------
s_0 : float
Initial stock price
K : float
Strike price
T : float
Time to maturity in years
r : float
Annualized risk-free rate
sigma : float
Standard deviation of the stock returns
Returns
-----------
option_premium : float
The premium on the option calculated using the Black-Scholes model
'''
d1 = (np.log(S_0 / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S_0 / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
if type == 'call':
option_premium = (S_0 * norm.cdf(d1, 0, 1) - K * np.exp(-r * T) * norm.cdf(d2, 0, 1))
elif type == 'put':
option_premium = (K * np.exp(-r * T) * norm.cdf(-d2, 0, 1) - S_0 * norm.cdf(-d1, 0, 1))
else:
raise ValueError('Wrong input for type!')
return option_premium
def lsmc_american_option(S_0, K, T, N, r, sigma, n_sims, option_type, poly_degree, random_seed=42):
'''
Function used for calculating the price of American options using Least Squares Monte Carlo
algorithm of Longstaff and Schwartz (2001).
Parameters
------------
S_0 : float
Initial stock price
K : float
Strike price
T : float
Time to maturity in years
N : int
Number of time increments in the forecast horizon
r : float
Annualized risk-free rate
sigma : float
Standard deviation of the stock returns
n_sims : int
Number of paths to simulate
option_type : str
Type of the option. Allowable: ['call', 'put']
poly_degree : int
Degree of the polynomial to fit in the LSMC algorithm
random_seed : int
Random seed for reproducibility
Returns
-----------
option_premium : float
The premium on the option
'''
dt = T / N
discount_factor = np.exp(-r * dt)
gbm_simulations = simulate_gbm(s_0=S_0, mu=r, sigma=sigma,
n_sims=n_sims, T=T, N=N,
random_seed=random_seed)
if option_type == 'call':
payoff_matrix = np.maximum(
gbm_simulations - K, np.zeros_like(gbm_simulations))
elif option_type == 'put':
payoff_matrix = np.maximum(
K - gbm_simulations, np.zeros_like(gbm_simulations))
value_matrix = np.zeros_like(payoff_matrix)
value_matrix[:, -1] = payoff_matrix[:, -1]
for t in range(N - 1, 0, -1):
regression = np.polyfit(
gbm_simulations[:, t], value_matrix[:, t + 1] * discount_factor, poly_degree)
continuation_value = np.polyval(regression, gbm_simulations[:, t])
value_matrix[:, t] = np.where(payoff_matrix[:, t] > continuation_value,
payoff_matrix[:, t],
value_matrix[:, t + 1] * discount_factor)
option_premium = np.mean(value_matrix[:, 1] * discount_factor)
return option_premium
| 31.103226 | 117 | 0.581 | 648 | 4,821 | 4.186728 | 0.257716 | 0.010321 | 0.016218 | 0.018798 | 0.382234 | 0.352009 | 0.336896 | 0.237376 | 0.214523 | 0.180612 | 0 | 0.018204 | 0.316324 | 4,821 | 154 | 118 | 31.305195 | 0.804915 | 0.417133 | 0 | 0.163265 | 0 | 0 | 0.015789 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061224 | false | 0 | 0.040816 | 0 | 0.163265 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03f1cc820b06286b1457546292140f6fdb636884 | 552 | py | Python | namedtuple.py | rmayherr/python | 830aec82e3ab155b66d01032eac71bbe6f961fce | [
"MIT"
] | null | null | null | namedtuple.py | rmayherr/python | 830aec82e3ab155b66d01032eac71bbe6f961fce | [
"MIT"
] | null | null | null | namedtuple.py | rmayherr/python | 830aec82e3ab155b66d01032eac71bbe6f961fce | [
"MIT"
] | null | null | null | from collections import namedtuple
import sys
def avg_marks_manual_input():
l = int(input("How many times?"))
r = namedtuple('r',input("Headers?").split(' '),rename=True)
a = [r._make(input().split(' ')) for i in range(l)]
print(f'{sum([int(i.marks) for i in a]) / l:.2f}')
first_line = sys.stdin.readline()
r = namedtuple('r',sys.stdin.readline(),rename=True)
a = [r._make((' '.join(sys.stdin.readline().strip().split())).split(' ')) for i in range(int(first_line))]
print(f'{sum([int(i.MARKS) for i in a]) / int(first_line):.2f}')
| 36.8 | 106 | 0.637681 | 91 | 552 | 3.78022 | 0.395604 | 0.046512 | 0.069767 | 0.069767 | 0.331395 | 0.145349 | 0.145349 | 0.145349 | 0.145349 | 0.145349 | 0 | 0.004175 | 0.132246 | 552 | 14 | 107 | 39.428571 | 0.713987 | 0 | 0 | 0 | 0 | 0.090909 | 0.222826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03f2a4a307188ecd417f28abec6cc88bd1e7da2d | 829 | py | Python | tests/test_simulator/test_ff.py | leonardt/magma | d3e8c9500ec3b167df8ed067e0c0305781c94ab6 | [
"MIT"
] | 167 | 2017-10-08T00:59:22.000Z | 2022-02-08T00:14:39.000Z | tests/test_simulator/test_ff.py | leonardt/magma | d3e8c9500ec3b167df8ed067e0c0305781c94ab6 | [
"MIT"
] | 719 | 2017-08-29T17:58:28.000Z | 2022-03-31T23:39:18.000Z | tests/test_simulator/test_ff.py | leonardt/magma | d3e8c9500ec3b167df8ed067e0c0305781c94ab6 | [
"MIT"
] | 14 | 2017-09-01T03:25:16.000Z | 2021-11-05T13:30:24.000Z | from .test_primitives import PRIM_FF
import magma as m
from magma.simulator import PythonSimulator
from magma.scope import *
def test_sim_ff():
class TestCircuit(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO()
ff = PRIM_FF()
m.wire(io.I, ff.D)
m.wire(ff.Q, io.O)
sim = PythonSimulator(TestCircuit, TestCircuit.CLK)
sim.evaluate()
val = sim.get_value(TestCircuit.O)
assert(val is False)
sim.advance()
val = sim.get_value(TestCircuit.O)
assert(val is False)
sim.set_value(TestCircuit.I, True)
sim.evaluate()
val = sim.get_value(TestCircuit.O)
assert(val is False)
sim.advance()
val = sim.get_value(TestCircuit.O)
assert(val is True)
sim.advance()
val = sim.get_value(TestCircuit.O)
assert(val is True)
| 24.382353 | 62 | 0.651387 | 128 | 829 | 4.132813 | 0.296875 | 0.181474 | 0.085066 | 0.132325 | 0.497164 | 0.497164 | 0.497164 | 0.497164 | 0.497164 | 0.497164 | 0 | 0 | 0.221954 | 829 | 33 | 63 | 25.121212 | 0.820155 | 0 | 0 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.185185 | 1 | 0.037037 | false | 0 | 0.148148 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03f2f46697905dc14454de66877a20450a1d4124 | 2,223 | py | Python | literature-review/q-learning/q-learning.py | ByronDev121/literature-review | 23c276e92534793d85c7af5c24d93603f8ee7678 | [
"MIT"
] | null | null | null | literature-review/q-learning/q-learning.py | ByronDev121/literature-review | 23c276e92534793d85c7af5c24d93603f8ee7678 | [
"MIT"
] | null | null | null | literature-review/q-learning/q-learning.py | ByronDev121/literature-review | 23c276e92534793d85c7af5c24d93603f8ee7678 | [
"MIT"
] | null | null | null | import gym
import time
import os
import matplotlib.pyplot as plt
import numpy as np
from gym.envs.registration import register
from agent import QAgent
register(
id='FrozenLake8x8NoSLip-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
# kwargs={"map_name": "8x8", 'is_slippery': False},
kwargs={'is_slippery': True},
max_episode_steps=200,
reward_threshold=0.99,
# optimum = 1
)
env = gym.make("FrozenLake8x8NoSLip-v0")
print("Observation space:", env.observation_space)
print("Action space:", env.action_space)
steps = 0
total_reward = 0
ep_rewards = []
aggr_ep_rewards = {
'ep': [],
'avg': [],
'min': [],
'max': [],
}
agent = QAgent(env)
for ep in range(200):
episode_reward = 0
state = env.reset()
done = False
while not done:
steps += 1
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
if not reward == 1:
if done:
reward = -0.1
else:
reward = -0.001
episode_reward += reward
agent.train((state, action, next_state, reward, done))
state = next_state
total_reward += reward
print("s:", state, "a:", action)
print("Episode: {}, Total reward: {}, eps: {}".format(ep, total_reward, agent.eps))
env.render()
# print(agent.q_table)
time.sleep(0.05)
if not ep == 199:
os.system('cls' if os.name == 'nt' else 'clear')
ep_rewards.append(episode_reward)
average_reward = sum(ep_rewards)/len(ep_rewards)
aggr_ep_rewards['ep'].append(ep)
aggr_ep_rewards['avg'].append(average_reward)
aggr_ep_rewards['min'].append(min(ep_rewards))
aggr_ep_rewards['max'].append(max(ep_rewards))
env.close()
plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['avg'], label='avg reward')
plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['min'], label='min reward')
plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['max'], label='max reward')
plt.xlabel('iterations')
plt.legend(loc=4)
plt.show()
plt.figure()
plt.plot(np.arange(0, steps, 1), agent.max_q, label='Max Q')
plt.xlabel('iterations')
plt.ylabel('max Q')
plt.legend(loc=4)
plt.show()
| 26.783133 | 91 | 0.634728 | 311 | 2,223 | 4.369775 | 0.321543 | 0.112583 | 0.105224 | 0.055188 | 0.182487 | 0.150846 | 0.086093 | 0.086093 | 0.086093 | 0.060338 | 0 | 0.02221 | 0.210076 | 2,223 | 82 | 92 | 27.109756 | 0.751708 | 0.036887 | 0 | 0.086957 | 0 | 0 | 0.124474 | 0.035096 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.101449 | 0 | 0.101449 | 0.057971 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03f309a30afe79b57c7abb4ede08dda5bc7ee303 | 625 | py | Python | data/clean_test.py | iotayo/aivivn-tone | 7f3d5ce41d5a0bd659f3aaeb7d351a7a046c3004 | [
"MIT"
] | 40 | 2019-07-31T18:13:13.000Z | 2021-12-07T09:45:07.000Z | data/clean_test.py | iotayo/aivivn-tone | 7f3d5ce41d5a0bd659f3aaeb7d351a7a046c3004 | [
"MIT"
] | 5 | 2019-08-07T10:21:52.000Z | 2021-05-26T06:08:48.000Z | data/clean_test.py | iotayo/aivivn-tone | 7f3d5ce41d5a0bd659f3aaeb7d351a7a046c3004 | [
"MIT"
] | 15 | 2019-07-31T17:57:53.000Z | 2021-05-27T10:43:46.000Z | from tqdm import tqdm
input_path = "./test_word_per_line.txt"
output_path = "./test_cleaned.txt"
curr_id = ""
curr_sent = []
with open(output_path, mode="wt", encoding="utf-8") as f:
lines = open(input_path).readlines()
for idx, line in tqdm(enumerate(lines)):
if idx == 0:
continue
line = line.strip()
line_id = line.split(",")[0][:3]
if line_id != curr_id:
if len(curr_sent) > 0:
f.write("{},{}\n".format(curr_id, ' '.join(curr_sent)))
curr_id = line_id
curr_sent = []
curr_sent.append(line.split(",")[-1])
if idx == len(lines) - 1:
f.write("{},{}\n".format(curr_id, ' '.join(curr_sent)))
| 28.409091 | 59 | 0.6288 | 101 | 625 | 3.673267 | 0.425743 | 0.12938 | 0.053908 | 0.070081 | 0.167116 | 0.167116 | 0.167116 | 0.167116 | 0.167116 | 0 | 0 | 0.013384 | 0.1632 | 625 | 21 | 60 | 29.761905 | 0.695985 | 0 | 0 | 0.2 | 0 | 0 | 0.1072 | 0.0384 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03f3ee5a1e2a0e3c2f5c128492384043eac78f5c | 1,583 | py | Python | translators/test.py | uiuc-arc/Storm | afa9cc5d4ef7e163a5255140c27aca4cf1b47036 | [
"MIT"
] | 2 | 2020-01-02T22:55:04.000Z | 2021-12-10T01:21:30.000Z | translators/test.py | uiuc-arc/Storm | afa9cc5d4ef7e163a5255140c27aca4cf1b47036 | [
"MIT"
] | null | null | null | translators/test.py | uiuc-arc/Storm | afa9cc5d4ef7e163a5255140c27aca4cf1b47036 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import antlr4
from antlr4 import *
from language.antlr.Template2Lexer import Template2Lexer
from language.antlr.Template2Parser import Template2Parser
from language.antlr.Template2Listener import Template2Listener
from newbackends.pyrotranslator import PyroTranslator
from language.Reorder import ReOrder
parser = argparse.ArgumentParser()
parser.add_argument('-a')
parser.add_argument('-it', default=4000)
parser.add_argument('-samples', action='store_true')
parser.add_argument('-lr', default=None)
parser.add_argument('-ag', action='store_true')
parser.add_argument('program')
algo=vars(parser.parse_args())['a']
iters=vars(parser.parse_args())['it']
samples=vars(parser.parse_args())['samples']
learning_rate=vars(parser.parse_args())['lr']
templatefile=vars(parser.parse_args())['program']
autoguide=vars(parser.parse_args())['ag']
outputname=templatefile.split("/")[-1].replace(".template","")
class MyWalker(Template2Listener):
def __init__(self):
pass
def enterArith(self, ctx):
print(ctx)
with open('/tmp/' + outputname , 'w') as tmpfile:
tmpfile.write(ReOrder(templatefile).reordered_code)
templatefile='/tmp/' + outputname
template = antlr4.FileStream(templatefile)
lexer = Template2Lexer(template)
stream = antlr4.CommonTokenStream(lexer)
parser = Template2Parser(stream)
w = PyroTranslator(algo, iters,samples, autoguide)
template = parser.template()
walker = ParseTreeWalker()
walker.walk(w, template)
with open(outputname + ".py", 'w') as output:
output.write(w.output_program)
| 27.293103 | 62 | 0.759949 | 189 | 1,583 | 6.253968 | 0.386243 | 0.045685 | 0.086294 | 0.096447 | 0.054146 | 0.054146 | 0 | 0 | 0 | 0 | 0 | 0.012614 | 0.098547 | 1,583 | 57 | 63 | 27.77193 | 0.815697 | 0.012634 | 0 | 0 | 0 | 0 | 0.059012 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.025 | 0.2 | 0 | 0.275 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03f524b31fd0d91cd3b0e302abe39bb4f73b97a6 | 6,355 | py | Python | pytc/experiments/base.py | jharman25/pytc | d9ccde3f04e35a3d821ff37a4ad42e62a048d4ac | [
"Unlicense"
] | 20 | 2017-04-27T16:30:03.000Z | 2021-08-12T19:42:05.000Z | pytc/experiments/base.py | jharman25/pytc | d9ccde3f04e35a3d821ff37a4ad42e62a048d4ac | [
"Unlicense"
] | 15 | 2016-12-12T20:40:44.000Z | 2022-02-20T12:05:37.000Z | pytc/experiments/base.py | jharman25/pytc | d9ccde3f04e35a3d821ff37a4ad42e62a048d4ac | [
"Unlicense"
] | 6 | 2016-06-23T00:54:21.000Z | 2020-05-19T05:24:20.000Z | __description__ = \
"""
experiments.py
Classes for loading experimental ITC data and associating those data with a
model.
Units:
Volumes are in microliters
Temperatures are in Kelvin
Concentrations are in molar
Energy is `units`, where `units` is specified when instantiating the
ITCExperiment class. It must be a in the AVAIL_UNITS dictionary.
"""
__author__ = "Michael J. Harms"
__date__ = "2016-06-22"
import random, string, os
import numpy as np
class BaseITCExperiment:
"""
Class that holds an experimental ITC measurement and a model that describes it.
"""
AVAIL_UNITS = {"cal/mol":1.9872036,
"kcal/mol":0.0019872036,
"J/mol":8.3144598,
"kJ/mol":0.0083144598}
def __init__(self,dh_file,model,shot_start=1,units="cal/mol",
uncertainty=0.1,**model_kwargs):
"""
Parameters
----------
dh_file: string
integrated heats file written out by origin software.
model: ITCModel subclass instance
ITCModel subclass to use for modeling
shot_start: int
what shot to use as the first real point. Shots start at 0, so
default=1 discards first point.
units : string
file units ("cal/mol","kcal/mol","J/mol","kJ/mol")
uncertainty : float > 0.0
uncertainty in integrated heats (set to same for all shots, unless
specified in something like NITPIC output file).
**model_kwargs: any keyword arguments to pass to the model. Any
keywords passed here will override whatever is
stored in the dh_file.
"""
self.dh_file = dh_file
self._shot_start = shot_start
# Deal with units
self._units = units
try:
self._R = self.AVAIL_UNITS[self._units]
except KeyError:
err = "units must be one of:\n"
for k in self.AVAIL_UNITS.keys():
err += " {}\n".format(k)
err += "\n"
raise ValueError(err)
# For numerical reasons, there should always be *some* uncertainty
self._uncertainty = uncertainty
if self._uncertainty == 0.0:
self._uncertainty = 1e-12
# Load in heats
extension = self.dh_file.split(".")[-1]
self._read_heats_file()
# Initialize model using information read from heats file
self._model = model(S_cell=self.stationary_cell_conc,
T_syringe=self.titrant_syringe_conc,
cell_volume=self.cell_volume,
shot_volumes=self._shots,**model_kwargs)
r = "".join([random.choice(string.ascii_letters) for i in range(20)])
self._experiment_id = "{}_{}".format(self.dh_file,r)
def _read_heats_file(self):
"""
Dummy heat reading file.
"""
pass
@property
def dQ(self):
"""
Return heats calculated by the model with parameters defined in params
dictionary.
"""
if len(self._model.dQ) == 0:
return np.array(())
return self._model.dQ[self._shot_start:]
@property
def dilution_heats(self):
"""
Return dilution heats calculated by the model with parameters defined
in params dictionary.
"""
if len(self._model.dilution_heats) == 0:
return np.array(())
return self._model.dilution_heats[self._shot_start:]
@property
def param_values(self):
"""
Values of fit parameters.
"""
return self._model.param_values
@property
def param_stdevs(self):
"""
Standard deviations on fit parameters.
"""
return self._model.param_stdevs
@property
def param_ninetyfives(self):
"""
95% confidence intervals on fit parmeters.
"""
return self._model.param_ninetyfives
@property
def model(self):
"""
Fitting model.
"""
return self._model
@property
def shot_start(self):
"""
Starting shot to use.
"""
return self._shot_start
@shot_start.setter
def shot_start(self,value):
"""
Change starting shot.
"""
self._shot_start = value
@property
def heats(self):
"""
Return experimental heats.
"""
return self._heats[self._shot_start:]
@heats.setter
def heats(self,heats):
"""
Set the heats.
"""
self._heats[self._shot_start:] = heats[:]
@property
def heats_stdev(self):
"""
Standard deviation on the uncertainty of the heat.
"""
return self._heats_stdev[self._shot_start:]
@heats_stdev.setter
def heats_stdev(self,heats_stdev):
"""
Set the standard deviation on the uncertainty of the heat.
"""
self._heats_stdev[self._shot_start:] = heats_stdev[:]
@property
def mol_injected(self):
"""
Return the mols injected over shots.
"""
# uL * mol/L * L/1e6 uL -> mol
return self._shots[self._shot_start:]*self.titrant_syringe_conc*1e-6
@property
def mole_ratio(self):
"""
Return the mole ratio of titrant to stationary.
"""
return self._model.mole_ratio[self._shot_start:]
@property
def experiment_id(self):
"""
Return a unique experimental id.
"""
return self._experiment_id
@property
def units(self):
"""
Units for file.
"""
return self._units
@units.setter
def units(self,units):
"""
Change the units.
"""
# Deal with units
self._units = units
try:
self._R = self.AVAIL_UNITS[self._units]
except KeyError:
err = "units must be one of:\n"
for k in self.AVAIL_UNITS.keys():
err += " {}\n".format(k)
err += "\n"
raise ValueError(err)
@property
def R(self):
"""
Experiment gas constant.
"""
return self._R
| 24.631783 | 83 | 0.557671 | 722 | 6,355 | 4.728532 | 0.296399 | 0.044815 | 0.041886 | 0.02109 | 0.265378 | 0.2314 | 0.196251 | 0.179262 | 0.132982 | 0.132982 | 0 | 0.016627 | 0.346971 | 6,355 | 257 | 84 | 24.727626 | 0.806024 | 0.260425 | 0 | 0.333333 | 0 | 0 | 0.035225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.196078 | false | 0.009804 | 0.019608 | 0 | 0.392157 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03f8ccce3ecf35b88bf241958bca977a39da0829 | 2,743 | py | Python | tests/_geom/test_path_bezier_3d_continual.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | 16 | 2021-04-16T02:01:29.000Z | 2022-01-01T08:53:49.000Z | tests/_geom/test_path_bezier_3d_continual.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | 613 | 2021-03-24T03:37:38.000Z | 2022-03-26T10:58:37.000Z | tests/_geom/test_path_bezier_3d_continual.py | simon-ritchie/apyscript | c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279 | [
"MIT"
] | 2 | 2021-06-20T07:32:58.000Z | 2021-12-26T08:22:11.000Z | import re
from random import randint
from typing import Match
from typing import Optional
from retrying import retry
import apysc as ap
from apysc._expression import var_names
from tests.testing_helper import assert_attrs
class TestPathBezier3DContinual:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test___init__(self) -> None:
path_bezier_3d_continual: ap.PathBezier3DContinual = \
ap.PathBezier3DContinual(
control_x=10, control_y=20, dest_x=30, dest_y=40,
relative=True)
assert_attrs(
expected_attrs={
'_path_label': ap.PathLabel.BEZIER_3D_CONTINUAL,
'_relative': True,
'_control_x': 10,
'_control_y': 20,
'_dest_x': 30,
'_dest_y': 40,
},
any_obj=path_bezier_3d_continual)
assert isinstance(path_bezier_3d_continual._control_x, ap.Int)
assert isinstance(path_bezier_3d_continual._control_y, ap.Int)
assert isinstance(path_bezier_3d_continual._dest_x, ap.Int)
assert isinstance(path_bezier_3d_continual._dest_y, ap.Int)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__get_svg_str(self) -> None:
continual: ap.PathBezier3DContinual = \
ap.PathBezier3DContinual(
control_x=10, control_y=20, dest_x=30, dest_y=40)
svg_str: str = continual._get_svg_str()
match: Optional[Match] = re.match(
pattern=(
rf'{var_names.STRING}_\d+? \+ '
rf'String\({continual._control_x.variable_name}\) \+ " " \+ '
rf'String\({continual._control_y.variable_name}\) \+ " " \+ '
rf'String\({continual._dest_x.variable_name}\) \+ " " \+ '
rf'String\({continual._dest_y.variable_name}\)'
),
string=svg_str)
assert match is not None
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_update_path_data(self) -> None:
continual: ap.PathBezier3DContinual = \
ap.PathBezier3DContinual(
control_x=10, control_y=20, dest_x=30, dest_y=40,
relative=False)
continual.update_path_data(
control_x=100, control_y=200, dest_x=300, dest_y=400,
relative=True)
assert_attrs(
expected_attrs={
'_control_x': 100,
'_control_y': 200,
'_dest_x': 300,
'_dest_y': 400,
'_relative': True,
},
any_obj=continual)
| 38.633803 | 78 | 0.582574 | 311 | 2,743 | 4.755627 | 0.244373 | 0.043272 | 0.08046 | 0.085193 | 0.633536 | 0.633536 | 0.532792 | 0.49831 | 0.49831 | 0.406356 | 0 | 0.050375 | 0.319723 | 2,743 | 70 | 79 | 39.185714 | 0.742229 | 0 | 0 | 0.285714 | 0 | 0 | 0.125327 | 0.075196 | 0 | 0 | 0 | 0 | 0.126984 | 1 | 0.047619 | false | 0 | 0.126984 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03fa298b7ad91850feb3603a0d09e4d69c6f4462 | 2,108 | py | Python | cogs/owner.py | SuperYellowSystem/FactorioQuizzBot | 55dfea657c64dd9d92bfa9657de66159cc7e79e9 | [
"MIT"
] | 1 | 2018-04-08T16:47:02.000Z | 2018-04-08T16:47:02.000Z | cogs/owner.py | SuperYellowSystem/FactorioQuizBot | 55dfea657c64dd9d92bfa9657de66159cc7e79e9 | [
"MIT"
] | null | null | null | cogs/owner.py | SuperYellowSystem/FactorioQuizBot | 55dfea657c64dd9d92bfa9657de66159cc7e79e9 | [
"MIT"
] | null | null | null | # NB: Based on the work of Rapptz on RoboDanny
# src: https://github.com/Rapptz/RoboDanny/blob/rewrite/cogs/admin.py
# ======================================================================
# imports
# ======================================================================
from discord.ext import commands as cmds
import traceback
from cogs.utils import checks
import logging
logger = logging.getLogger(__name__)
# ======================================================================
class Owner:
"""Owner-only commands that make the bot easier to debug/manage."""
def __init__(self, bot):
self.bot = bot
@checks.is_owner()
@cmds.command(hidden=True)
async def load(self, ctx, *, module):
"""Loads a module."""
try:
self.bot.load_extension(module)
except Exception as e:
logger.error(f'Error while loading cog {module}', e)
await ctx.send(f'```py\n{traceback.format_exc()}\n```')
else:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
@checks.is_owner()
@cmds.command(hidden=True)
async def unload(self, ctx, *, module):
"""Unloads a module."""
try:
self.bot.unload_extension(module)
except Exception as e:
logger.error(f'Error while unloading cog {module}', e)
await ctx.send(f'```py\n{traceback.format_exc()}\n```')
else:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
@checks.is_owner()
@cmds.command(name='reload', hidden=True)
async def _reload(self, ctx, *, module):
"""Reloads a module."""
try:
self.bot.unload_extension(module)
self.bot.load_extension(module)
except Exception as e:
logger.error(f'Error while reloading cog {module}', e)
await ctx.send(f'```py\n{traceback.format_exc()}\n```')
else:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
# ======================================================================
def setup(bot: cmds.Bot):
bot.add_cog(Owner(bot))
| 33.460317 | 72 | 0.52277 | 243 | 2,108 | 4.440329 | 0.345679 | 0.038925 | 0.036145 | 0.047266 | 0.578313 | 0.569045 | 0.569045 | 0.569045 | 0.512512 | 0.456905 | 0 | 0 | 0.223435 | 2,108 | 62 | 73 | 34 | 0.659133 | 0.221537 | 0 | 0.585366 | 0 | 0 | 0.167734 | 0.069142 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.097561 | 0 | 0.170732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03fac18b0bfdebf09b938bb372680e33554a0ff7 | 817 | py | Python | Problem Solving with Algorithms and Data Structures/Chapter 1/e14.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | Problem Solving with Algorithms and Data Structures/Chapter 1/e14.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | Problem Solving with Algorithms and Data Structures/Chapter 1/e14.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null |
from e14_b import MyDeck, MyCard
def demo_a():
card_2_heart = MyCard(2, '❤')
print(card_2_heart)
card_3_diamond = MyCard(3, '♦')
print(card_3_diamond)
card_13_spade = MyCard(13, '♠')
print(card_13_spade)
def demo_b():
cards_deck = MyDeck(num_of_deck=1)
cards_deck.shuffle_deck()
print(cards_deck.get_length())
num_of_player = 3
num_of_card = 2
players = cards_deck.dist_cards(num_of_player, num_of_card)
print(players)
for player_cards in players:
sum_cards = sum(card.rank for card in player_cards)
while sum_cards < 18 and len(player_cards) < 5:
card_hit = cards_deck.hit_card()
player_cards.append(card_hit)
sum_cards = sum(card.rank for card in player_cards)
print(players)
# demo_a()
demo_b()
| 24.757576 | 63 | 0.662179 | 130 | 817 | 3.830769 | 0.323077 | 0.090361 | 0.040161 | 0.060241 | 0.156627 | 0.156627 | 0.156627 | 0.156627 | 0.156627 | 0.156627 | 0 | 0.032051 | 0.23623 | 817 | 32 | 64 | 25.53125 | 0.761218 | 0.009792 | 0 | 0.166667 | 0 | 0 | 0.003722 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.041667 | 0 | 0.125 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03ffdfae2150b0ebaf5889a8ceb87f9a3acb8bf4 | 616 | py | Python | pyrival/graphs/components.py | MattJDavidson/aoc2021 | 1c26697da55e58408f36525639d201303f808b1b | [
"Apache-2.0"
] | 748 | 2018-09-27T01:08:12.000Z | 2022-03-25T17:31:56.000Z | pyrival/graphs/components.py | MattJDavidson/aoc2021 | 1c26697da55e58408f36525639d201303f808b1b | [
"Apache-2.0"
] | 38 | 2019-02-24T14:50:02.000Z | 2022-03-25T01:27:50.000Z | pyrival/graphs/components.py | MattJDavidson/aoc2021 | 1c26697da55e58408f36525639d201303f808b1b | [
"Apache-2.0"
] | 288 | 2018-10-29T11:55:57.000Z | 2022-03-20T04:37:27.000Z | def connected_components(n, graph):
components, visited = [], [False] * n
def dfs(start):
component, stack = [], [start]
while stack:
start = stack[-1]
if visited[start]:
stack.pop()
continue
else:
visited[start] = True
component.append(start)
for i in graph[start]:
if not visited[i]:
stack.append(i)
return component
for i in range(n):
if not visited[i]:
components.append(dfs(i))
return components
| 22 | 41 | 0.470779 | 62 | 616 | 4.66129 | 0.403226 | 0.069204 | 0.041522 | 0.089965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002857 | 0.431818 | 616 | 27 | 42 | 22.814815 | 0.822857 | 0 | 0 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff002fdb0500e9897354e32946b348260a72d40c | 976 | py | Python | lino/modlib/uploads/__init__.py | NewRGB/lino | 43799e42107169ff173d3b8bc0324d5773471499 | [
"BSD-2-Clause"
] | 1 | 2019-11-13T19:38:50.000Z | 2019-11-13T19:38:50.000Z | lino/modlib/uploads/__init__.py | NewRGB/lino | 43799e42107169ff173d3b8bc0324d5773471499 | [
"BSD-2-Clause"
] | null | null | null | lino/modlib/uploads/__init__.py | NewRGB/lino | 43799e42107169ff173d3b8bc0324d5773471499 | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2010-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Adds functionality for uploading files to the server and managing them. See
:doc:`/specs/uploads`.
"""
from lino import ad, _
class Plugin(ad.Plugin):
"See :doc:`/dev/plugins`."
verbose_name = _("Uploads")
menu_group = "office"
def setup_main_menu(self, site, user_type, m):
mg = self.get_menu_group()
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('uploads.MyUploads')
def setup_config_menu(self, site, user_type, m):
mg = self.get_menu_group()
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('uploads.Volumes')
m.add_action('uploads.UploadTypes')
def setup_explorer_menu(self, site, user_type, m):
mg = self.get_menu_group()
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('uploads.AllUploads')
m.add_action('uploads.UploadAreas')
| 27.885714 | 79 | 0.659836 | 143 | 976 | 4.272727 | 0.41958 | 0.052373 | 0.081833 | 0.139116 | 0.432079 | 0.432079 | 0.432079 | 0.432079 | 0.432079 | 0.432079 | 0 | 0.010417 | 0.213115 | 976 | 34 | 80 | 28.705882 | 0.785156 | 0.209016 | 0 | 0.315789 | 0 | 0 | 0.159033 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.052632 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff01277f87c5e7fac7a733b20837d1ed277d4304 | 2,097 | py | Python | cloudferrylib/os/actions/upload_file_to_image.py | toha10/CloudFerry | 5f844a480d3326d1fea74cca35b648c32d390fab | [
"Apache-2.0"
] | null | null | null | cloudferrylib/os/actions/upload_file_to_image.py | toha10/CloudFerry | 5f844a480d3326d1fea74cca35b648c32d390fab | [
"Apache-2.0"
] | null | null | null | cloudferrylib/os/actions/upload_file_to_image.py | toha10/CloudFerry | 5f844a480d3326d1fea74cca35b648c32d390fab | [
"Apache-2.0"
] | null | null | null |
from cloudferrylib.base.action import action
from fabric.api import run, settings
from cloudferrylib.utils import utils as utl
CLOUD = 'cloud'
BACKEND = 'backend'
CEPH = 'ceph'
ISCSI = 'iscsi'
COMPUTE = 'compute'
INSTANCES = 'instances'
INSTANCE_BODY = 'instance'
INSTANCE = 'instance'
DIFF = 'diff'
EPHEMERAL = 'ephemeral'
DIFF_OLD = 'diff_old'
EPHEMERAL_OLD = 'ephemeral_old'
PATH_DST = 'path_dst'
HOST_DST = 'host_dst'
PATH_SRC = 'path_src'
HOST_SRC = 'host_src'
TEMP = 'temp'
FLAVORS = 'flavors'
class UploadFileToImage(action.Action):
def run(self, info=None, **kwargs):
cfg = self.cloud.cloud_config.cloud
img_res = self.cloud.resources[utl.IMAGE_RESOURCE]
for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems():
# init
image_id = info[INSTANCES][instance_id][utl.INSTANCE_BODY]['image_id']
base_file = "%s/%s" % (self.cloud.cloud_config.cloud.temp, "temp%s_base" % instance_id)
image_name = "%s-image" % instance_id
images = img_res.read_info(image_id=image_id)[utl.IMAGES_TYPE]
image_format = images[image_id][utl.IMAGE_BODY]['disk_format']
if img_res.config.image.convert_to_raw:
image_format = utl.RAW
# action
with settings(host_string=cfg.host):
out = run(("glance --os-username=%s --os-password=%s --os-tenant-name=%s " +
"--os-auth-url=http://%s:35357/v2.0 " +
"image-create --name %s --disk-format=%s --container-format=bare --file %s| " +
"grep id") %
(cfg.user,
cfg.password,
cfg.tenant,
cfg.host,
image_name,
image_format,
base_file))
image_id = out.split("|")[2].replace(' ', '')
info[INSTANCES][instance_id][INSTANCE_BODY]['image_id'] = image_id
return {
'info': info
}
| 34.95 | 106 | 0.560801 | 244 | 2,097 | 4.614754 | 0.340164 | 0.049734 | 0.026643 | 0.035524 | 0.044405 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005559 | 0.313782 | 2,097 | 59 | 107 | 35.542373 | 0.776928 | 0.005246 | 0 | 0 | 0 | 0.04 | 0.175312 | 0.011047 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0.04 | 0.06 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff01ccc9f186d9a1a01af517bc0fcb70db7dd2eb | 1,672 | py | Python | application.py | roniemartinez/real-time-charts-with-fastapi | f5f30f8283acafb30a97a5060803fe9d1d872b69 | [
"MIT"
] | 28 | 2022-03-20T14:10:04.000Z | 2022-03-30T21:50:15.000Z | application.py | roniemartinez/real-time-charts-with-fastapi | f5f30f8283acafb30a97a5060803fe9d1d872b69 | [
"MIT"
] | null | null | null | application.py | roniemartinez/real-time-charts-with-fastapi | f5f30f8283acafb30a97a5060803fe9d1d872b69 | [
"MIT"
] | 1 | 2022-03-21T21:50:27.000Z | 2022-03-21T21:50:27.000Z | import json
import logging
import random
import sys
from datetime import datetime
from typing import Iterator
import asyncio
from fastapi import FastAPI
from fastapi.responses import HTMLResponse, StreamingResponse
from fastapi.requests import Request
from fastapi.templating import Jinja2Templates
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
logger = logging.getLogger(__name__)
application = FastAPI()
templates = Jinja2Templates(directory="templates")
random.seed() # Initialize the random number generator
@application.get("/", response_class=HTMLResponse)
async def index(request: Request) -> templates.TemplateResponse:
return templates.TemplateResponse("index.html", {"request": request})
async def generate_random_data(request: Request) -> Iterator[str]:
"""
Generates random value between 0 and 100
:return: String containing current timestamp (YYYY-mm-dd HH:MM:SS) and randomly generated data.
"""
client_ip = request.client.host
logger.info("Client %s connected", client_ip)
while True:
json_data = json.dumps(
{
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"value": random.random() * 100,
}
)
yield f"data:{json_data}\n\n"
await asyncio.sleep(1)
@application.get("/chart-data")
async def chart_data(request: Request) -> StreamingResponse:
response = StreamingResponse(generate_random_data(request), media_type="text/event-stream")
response.headers["Cache-Control"] = "no-cache"
response.headers["X-Accel-Buffering"] = "no"
return response
| 30.962963 | 106 | 0.711124 | 199 | 1,672 | 5.899497 | 0.502513 | 0.037479 | 0.030664 | 0.042589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007225 | 0.172249 | 1,672 | 53 | 107 | 31.54717 | 0.84104 | 0.022727 | 0 | 0 | 0 | 0 | 0.133559 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.297297 | 0 | 0.351351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff0349321664c71151b8b6a6936853d728c7a48f | 2,666 | py | Python | Source/GeniusLyricsGUI.py | Fingolfin7/Music-Metadata-and-Lyrics | 6b26c048b9a0905f0097d2bd212285296ab3ba54 | [
"MIT"
] | 4 | 2021-08-07T00:15:12.000Z | 2021-12-12T20:45:15.000Z | Source/GeniusLyricsGUI.py | Fingolfin7/Music-Metadata-and-Lyrics | 6b26c048b9a0905f0097d2bd212285296ab3ba54 | [
"MIT"
] | 1 | 2021-08-06T23:29:47.000Z | 2021-08-06T23:29:47.000Z | Source/GeniusLyricsGUI.py | Fingolfin7/Music-Metadata-and-Lyrics | 6b26c048b9a0905f0097d2bd212285296ab3ba54 | [
"MIT"
] | 3 | 2021-08-07T01:53:20.000Z | 2021-08-07T23:21:02.000Z | import os
import threading
from tkinter import *
from tkinter import messagebox
from GeniusLyrics import search_song_lyrics, song_dict
class GeniusLyricsGUI:
def __init__(self, tk_root: Tk):
tk_root.resizable(width=False, height=False)
self.lyrics_frame = Frame(tk_root)
self.songName = StringVar()
self.artistName = StringVar()
self.top_section = LabelFrame(self.lyrics_frame, font="Calibri", pady=2)
Label(self.top_section, text="Song").pack(side=LEFT, padx=4)
self.songEntry = Entry(self.top_section, textvariable=self.songName)
self.songEntry.pack(side=LEFT, padx=4)
Label(self.top_section, text="Artist").pack(side=LEFT, padx=4)
self.artistName = Entry(self.top_section, textvariable=self.artistName)
self.artistName.pack(side=LEFT, padx=4)
self.searchButton = Button(self.top_section, text="Search")
self.searchButton.bind("<Button-1>", self.search)
tk_root.bind("<Return>", self.search)
self.searchButton.pack(side=LEFT)
# create scrollbar
self.scrollbar = Scrollbar(self.lyrics_frame)
self.top_section.pack(side=TOP)
self.output = Text(self.lyrics_frame, font="Calibri 11", width=self.top_section.winfo_width(), height=25)
self.lyrics_frame.pack()
def search(self, event=None):
def thread_func():
os.system("cls")
print(f"Song: {self.songName.get()}\nArtist: {self.artistName.get()}\n")
lyrics = search_song_lyrics(self.songName.get(), self.artistName.get())
if lyrics:
if lyrics[0] == "\n" and lyrics[1] == "\n":
lyrics = lyrics[2:]
print(lyrics)
# pack and attach to textbox
self.scrollbar.pack(side=RIGHT, fill=Y)
self.output.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.output.yview)
# pack output
self.output.pack(side=BOTTOM, fill=BOTH)
self.output.delete(1.0, "end")
self.output.insert(1.0, lyrics)
else:
messagebox.showinfo(
"Failed",
f"Couldn't find lyrics for\n'{self.songName.get()}' by '{self.artistName.get()}'"
)
search_thread = threading.Thread(target=thread_func)
search_thread.start()
def main():
root = Tk()
root.title('Music Metadata & Lyrics')
root.resizable(width=False, height=False)
GeniusLyricsGUI(root)
root.mainloop()
if __name__ == "__main__":
main()
| 33.325 | 113 | 0.609152 | 317 | 2,666 | 5 | 0.318612 | 0.035331 | 0.070662 | 0.040379 | 0.199369 | 0.126814 | 0 | 0 | 0 | 0 | 0 | 0.008678 | 0.265191 | 2,666 | 79 | 114 | 33.746835 | 0.800408 | 0.02063 | 0 | 0 | 0 | 0.017857 | 0.091293 | 0.041427 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.089286 | 0 | 0.178571 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff03dab57925319f515c5c11149dcf10058593ae | 1,124 | py | Python | raspy/components/motors/motor_state_change_event.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | raspy/components/motors/motor_state_change_event.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | raspy/components/motors/motor_state_change_event.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | """This module contains the MotorStateChangeEvent type."""
from raspy.components.motors import motor_state
class MotorStateChangeEvent(object):
"""The event that gets raised when a motor changes state."""
def __init__(self, old_state, new_state):
"""Initialize a new instance of MotorStateChangeEvent.
:param int old_state: The state the motor was in prior to the change.
:param int new_state: The current state of the motor since the change.
"""
self.__oldState = old_state
if self.__oldState is None:
self.__oldState = motor_state.STOP
self.__newState = new_state
if self.__newState is None:
self.__newState = motor_state.STOP
@property
def old_state(self):
"""Get the state the motor was in prior to the change.
:returns: The previous state.
:rtype: int
"""
return self.__oldState
@property
def new_state(self):
"""Get the new (current) state.
:returns: The new (current) state.
:rtype: int
"""
return self.__newState
| 27.414634 | 78 | 0.634342 | 139 | 1,124 | 4.906475 | 0.366906 | 0.046921 | 0.032258 | 0.046921 | 0.175953 | 0.108504 | 0.108504 | 0.108504 | 0.108504 | 0.108504 | 0 | 0 | 0.286477 | 1,124 | 40 | 79 | 28.1 | 0.850374 | 0.420819 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.066667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff04665b138db9fa8f8563e4a316caa05474ffd6 | 2,537 | py | Python | contrib/PyTorch/Official/cv/image_object_detection/RetinaNet/tests/test_model_analysis.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch_Dynamic_Shape/tests/test_model_analysis.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch_Dynamic_Shape/tests/test_model_analysis.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
import detectron2.model_zoo as model_zoo
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.utils.analysis import flop_count_operators, parameter_count
def get_model_zoo(config_path):
"""
Like model_zoo.get, but do not load any weights (even pretrained)
"""
cfg_file = model_zoo.get_config_file(config_path)
cfg = get_cfg()
cfg.merge_from_file(cfg_file)
if not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
return build_model(cfg)
class RetinaNetTest(unittest.TestCase):
def setUp(self):
self.model = get_model_zoo("COCO-Detection/retinanet_R_50_FPN_1x.yaml")
def test_flop(self):
# RetinaNet supports flop-counting with random inputs
inputs = [{"image": torch.rand(3, 800, 800)}]
res = flop_count_operators(self.model, inputs)
self.assertTrue(int(res["conv"]), 146) # 146B flops
def test_param_count(self):
res = parameter_count(self.model)
self.assertTrue(res[""], 37915572)
self.assertTrue(res["backbone"], 31452352)
class FasterRCNNTest(unittest.TestCase):
def setUp(self):
self.model = get_model_zoo("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")
def test_flop(self):
# Faster R-CNN supports flop-counting with random inputs
inputs = [{"image": torch.rand(3, 800, 800)}]
res = flop_count_operators(self.model, inputs)
# This only checks flops for backbone & proposal generator
# Flops for box head is not conv, and depends on #proposals, which is
# almost 0 for random inputs.
self.assertTrue(int(res["conv"]), 117)
def test_param_count(self):
res = parameter_count(self.model)
self.assertTrue(res[""], 41699936)
self.assertTrue(res["backbone"], 26799296)
| 35.236111 | 81 | 0.707923 | 356 | 2,537 | 4.912921 | 0.441011 | 0.032018 | 0.038879 | 0.018296 | 0.317896 | 0.317896 | 0.287021 | 0.287021 | 0.287021 | 0.256146 | 0 | 0.036346 | 0.197477 | 2,537 | 71 | 82 | 35.732394 | 0.822692 | 0.382736 | 0 | 0.342857 | 0 | 0 | 0.079085 | 0.054902 | 0 | 0 | 0 | 0 | 0.171429 | 1 | 0.2 | false | 0 | 0.171429 | 0 | 0.457143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff07394e395b62d46e9233ac28ffd68f59cd8a96 | 649 | py | Python | app.py | tommorris/ftfyweb | c9bb5b2381b8dc4d831ac7113041988b9e1d2629 | [
"MIT"
] | null | null | null | app.py | tommorris/ftfyweb | c9bb5b2381b8dc4d831ac7113041988b9e1d2629 | [
"MIT"
] | null | null | null | app.py | tommorris/ftfyweb | c9bb5b2381b8dc4d831ac7113041988b9e1d2629 | [
"MIT"
] | null | null | null | import ftfy
from flask import Flask, request, Response
import os
app = Flask(__name__)
@app.route("/", methods=["GET"])
def index():
return '<!DOCTYPE html>\n<html><head><title>FTFY on the web</title><body><form method="POST" action="/"><textarea name="text"></textarea><br /><input type="submit" /></body></html>'
@app.route("/", methods=["POST"])
def translate():
data = unicode(request.form['text'])
headers = {"Content-Type": "text/plain; charset=UTF-8"}
return Response(ftfy.fix_text(data), 200, headers=headers)
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| 32.45 | 185 | 0.654854 | 93 | 649 | 4.430108 | 0.580645 | 0.014563 | 0.072816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021201 | 0.127889 | 649 | 19 | 186 | 34.157895 | 0.706714 | 0 | 0 | 0 | 0 | 0.066667 | 0.371341 | 0.120185 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.2 | 0.066667 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff08e3833e5406d35fb9da8d119e002439370636 | 1,399 | py | Python | python/test.py | yushulx/myget-barcode-sample | f47d79794f439cae4baf33e6299802dd35587da7 | [
"CNRI-Python"
] | null | null | null | python/test.py | yushulx/myget-barcode-sample | f47d79794f439cae4baf33e6299802dd35587da7 | [
"CNRI-Python"
] | null | null | null | python/test.py | yushulx/myget-barcode-sample | f47d79794f439cae4baf33e6299802dd35587da7 | [
"CNRI-Python"
] | null | null | null |
from dbr import DynamsoftBarcodeReader
dbr = DynamsoftBarcodeReader()
def InitLicense(license):
dbr.InitLicense(license)
def DecodeFile(fileName):
try:
results = dbr.DecodeFile(fileName)
textResults = results["TextResults"]
resultsLength = len(textResults)
print("count: " + str(resultsLength))
if resultsLength != 0:
for textResult in textResults:
print(textResult["BarcodeFormatString"])
print(textResult["BarcodeText"])
localizationResult = textResult["LocalizationResult"]
x1 = localizationResult["X1"]
y1 = localizationResult["Y1"]
x2 = localizationResult["X2"]
y2 = localizationResult["Y2"]
x3 = localizationResult["X3"]
y3 = localizationResult["Y3"]
x4 = localizationResult["X4"]
y4 = localizationResult["Y4"]
localizationPoints = [(x1,y1),(x2,y2),(x3,y3),(x4,y4)]
print(localizationPoints)
else :
print("No barcode detected")
except Exception as err:
print(err)
if __name__ == "__main__":
#you can change the following three variables' value to your own value.
licenseKey = "Input your own license"
fileName = r"../test.jpg"
InitLicense(licenseKey)
DecodeFile(fileName) | 34.121951 | 71 | 0.593281 | 120 | 1,399 | 6.85 | 0.5 | 0.065693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025667 | 0.303788 | 1,399 | 41 | 72 | 34.121951 | 0.818275 | 0.050036 | 0 | 0 | 0 | 0 | 0.106928 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.029412 | 0 | 0.088235 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff094fcc3bfffb49faac08b0936857ef5e95c080 | 3,963 | py | Python | examples/plotting/AdaptiveW_process_SA.py | JiazhengChai/synergy_DRL | c08e78e5fe39d9d46213e1bf07b8dafc2195b05a | [
"MIT"
] | 2 | 2020-01-07T04:12:42.000Z | 2021-12-21T22:25:31.000Z | examples/plotting/AdaptiveW_process_SA.py | JiazhengChai/synergy_DRL | c08e78e5fe39d9d46213e1bf07b8dafc2195b05a | [
"MIT"
] | 11 | 2019-11-29T02:59:34.000Z | 2022-03-12T00:07:28.000Z | examples/plotting/AdaptiveW_process_SA.py | JiazhengChai/synergy_DRL | c08e78e5fe39d9d46213e1bf07b8dafc2195b05a | [
"MIT"
] | 1 | 2020-04-28T12:06:40.000Z | 2020-04-28T12:06:40.000Z | from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.metrics import r2_score
from matplotlib import pyplot as plt
import os
from matplotlib.lines import Line2D
from exp_variant_class import exp_variant#,PCA
from sklearn.decomposition import PCA
import argparse
from scipy import integrate
import csv
from scipy.stats.stats import pearsonr
import pandas as pd
def gauss(x, mu, a = 1, sigma = 1/6):
return a * np.exp(-(x - mu)**2 / (2*sigma**2))
def R2():
return r'R^{{{e:d}}}'.format(e=int(2))
cmap = plt.cm.viridis
cmaplist = [cmap(i) for i in range(cmap.N)]
cmaplen=len(cmaplist)
color_list=['b','r','g','c','m','y','k','#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf']
plt.rcParams["figure.figsize"] = (10,8)
parser = argparse.ArgumentParser()
parser.add_argument('--agentt',
type=str,choices=['HCheavy','HC','A','Antheavy','FC','Ctp','G','HC_E1'])
args = parser.parse_args()
tif=False
sortt=False
standscale=True
temporal=True
manual_pca=False
recon_num=8
ori_total_vec_rsq=9
truncated_start=200
dll=50
std=True
agentt=args.agentt
precheck=False
if 'HC' in agentt:
total_vec = 6
total_chk=30
ori_final = 3000
ori_begin = 100
ori_step = 100
x_speed_index=8
desired_dist=500
if 'E1' in agentt:
agentt_folder='HC_E1'
elif 'heavy' in agentt:
agentt_folder = 'HCheavy'
else:
agentt_folder = 'HC'
elif 'FC' in agentt:
total_vec = 12
total_chk=30
ori_final = 3000
ori_begin = 100
ori_step = 100
x_speed_index=14
desired_dist=500
agentt_folder='FC'
top_folder=agentt
file_path=os.path.abspath(os.getcwd())
path_list=file_path.split('/')
while path_list[-1] !="synergyDRL":
path_list.pop(-1)
cwd="/".join(path_list)
path_to_folder=cwd+'/experiments_results/Synergy/all_csv/raw_csv'
final = ori_final
begin = ori_begin
step = ori_step
path_to_csv=path_to_folder+'/'+agentt_folder
output_folder=cwd+'/experiments_results/Synergy/all_csv/process_SA_intermediate/'+agentt
if not os.path.exists(output_folder):
os.makedirs(output_folder, exist_ok=True)
process_csv = open(output_folder+ '/' + agentt +'_process_all_surface.csv', 'w')
writer = csv.writer(process_csv, lineterminator='\n')
writer.writerow(['Trials', 'Corr SA_P', 'Corr SA_PI', 'Corr SA_E','FSA', 'DSA', 'ASA','FP', 'FPI', 'FE'])
TD3_data=[]
for csv_ in os.listdir(path_to_csv):
current_csv = pd.read_csv(path_to_csv + '/' + csv_)
current_name_list=csv_.split('_')
current_name_list=current_name_list[0:-2]
name=''
for cn in current_name_list:
name=name+cn+'_'
name=name[0:-1]
P_list = current_csv['P']
PI_list = current_csv['PI']
E_list = current_csv['E']
SA_list = current_csv['Surface Area']
Checkpoint_list = current_csv['Checkpoint']
P_list = np.asarray(P_list)
PI_list = np.asarray(PI_list)
E_list = np.asarray(E_list)
SA_list = np.asarray(SA_list)
Checkpoint_list = np.asarray(Checkpoint_list)
corr_SA_P = np.corrcoef(SA_list, P_list)[0, 1]
corr_SA_PI = np.corrcoef(SA_list, PI_list)[0, 1]
corr_SA_E = np.corrcoef(SA_list, E_list)[0, 1]
FP = P_list[0]
FPI = PI_list[0]
FE = E_list[0]
FSA = SA_list[0]
DSA = SA_list[0] - SA_list[-1]
SA_list2 = np.copy(SA_list)
ASA = 0
neg_ASA = 0
for sa in SA_list:
for sa2 in SA_list2:
diff = sa - sa2
if diff >= 0 and diff > ASA:
ASA = diff
elif diff < 0:
if diff < neg_ASA:
neg_ASA = diff
if np.abs(neg_ASA) > ASA:
ASA = neg_ASA
if 'TD3' not in name:
writer.writerow([name,corr_SA_P,corr_SA_PI,corr_SA_E,FSA,DSA,ASA,FP,FPI,FE])
else:
TD3_data.append([name,corr_SA_P,corr_SA_PI,corr_SA_E,FSA,DSA,ASA,FP,FPI,FE])
for row in TD3_data:
writer.writerow(row)
process_csv.close()
| 23.873494 | 127 | 0.661368 | 637 | 3,963 | 3.872841 | 0.309262 | 0.029185 | 0.028375 | 0.013377 | 0.135387 | 0.125659 | 0.125659 | 0.093231 | 0.093231 | 0.093231 | 0 | 0.036255 | 0.199596 | 3,963 | 165 | 128 | 24.018182 | 0.741488 | 0.001009 | 0 | 0.096 | 0 | 0 | 0.096057 | 0.032609 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016 | false | 0 | 0.104 | 0.016 | 0.136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff098e38a6ea9af6ee303beb724496a6885a6fff | 1,100 | py | Python | p886m/possible_bipartition.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | 1 | 2020-02-20T12:04:46.000Z | 2020-02-20T12:04:46.000Z | p886m/possible_bipartition.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | null | null | null | p886m/possible_bipartition.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | null | null | null | from typing import List
from collections import defaultdict
class Solution:
def possibleBipartition(self, N: int, dislikes: List[List[int]]) -> bool:
graph = defaultdict(list)
for a, b in dislikes:
graph[a].append(b)
graph[b].append(a)
RED, BLUE = 0, 1
color = dict()
def dfs(person: int, c: int) -> bool:
if person in color:
return color[person] == c
color[person] = c
return all(dfs(nb, c ^ 1) for nb in graph[person])
return all(dfs(n, RED) for n in range(1, N + 1) if n not in color)
# TESTS
tests = [
{"N": 4, "dislikes": [[1, 2], [1, 3], [2, 4]], "expected": True},
{"N": 3, "dislikes": [[1, 2], [1, 3], [2, 3]], "expected": False},
{
"N": 5,
"dislikes": [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5]],
"expected": False,
},
]
for t in tests:
sol = Solution()
actual = sol.possibleBipartition(t["N"], t["dislikes"])
print("Possible bipartition in", t["dislikes"], "->", actual)
assert actual == t["expected"]
| 28.205128 | 77 | 0.513636 | 151 | 1,100 | 3.741722 | 0.337748 | 0.047788 | 0.053097 | 0.038938 | 0.046018 | 0.046018 | 0 | 0 | 0 | 0 | 0 | 0.039216 | 0.304545 | 1,100 | 38 | 78 | 28.947368 | 0.699346 | 0.004545 | 0 | 0 | 0 | 0 | 0.092406 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.266667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff0ad96003ab263c05c0ef76f377fb269621afc1 | 1,315 | py | Python | misc/wysalem.py | wy2136/wython | 0eaa9db335d57052806ae956afe6a34705407628 | [
"MIT"
] | 1 | 2022-03-21T21:24:40.000Z | 2022-03-21T21:24:40.000Z | misc/wysalem.py | wy2136/wython | 0eaa9db335d57052806ae956afe6a34705407628 | [
"MIT"
] | null | null | null | misc/wysalem.py | wy2136/wython | 0eaa9db335d57052806ae956afe6a34705407628 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Wenchang Yang (wenchang@princeton.edu)
# Fri May 15 13:45:34 EDT 2020
if __name__ == '__main__':
from misc.timer import Timer
tt = Timer(f'start {__file__}')
import sys, os.path, os, glob
import xarray as xr, numpy as np, pandas as pd
#import matplotlib.pyplot as plt
#more imports
import salem
#
if __name__ == '__main__':
tt.check('end import')
#start from here
def get_world_shape(cntry_names=None):
'''return GeoDataFrame of countries given country names.'''
dfgeo = salem.read_shapefile(salem.get_demo_file('world_borders.shp'))
if cntry_names is None:
return dfgeo
elif isinstance(cntry_names, str):
if cntry_names.lower() == 'china':
return dfgeo[ dfgeo['CNTRY_NAME'].isin(['China', 'Taiwan']) ]
else:
return dfgeo[ dfgeo['CNTRY_NAME']==cntry_names ]
else:
return dfgeo[ dfgeo['CNTRY_NAME'].isin(cntry_names) ]
if __name__ == '__main__':
from wyconfig import *
print('world')
shdf = get_world_shape()
print(shdf)
shdf.plot()
print('China')
shdf = get_world_shape('China')
print(shdf)
shdf.plot()
print('United States')
shdf = get_world_shape('United States')
print(shdf)
shdf.plot()
tt.check(f'**Done**')
plt.show()
| 24.811321 | 74 | 0.642586 | 178 | 1,315 | 4.47191 | 0.466292 | 0.075377 | 0.065327 | 0.079146 | 0.169598 | 0.114322 | 0 | 0 | 0 | 0 | 0 | 0.011776 | 0.225095 | 1,315 | 52 | 75 | 25.288462 | 0.769382 | 0.152091 | 0 | 0.314286 | 0 | 0 | 0.146872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.171429 | 0 | 0.314286 | 0.171429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff0b2afd610385ef75e98eb2924a9e2596709742 | 4,120 | py | Python | ops/update_version.py | DropD/sparkplug | be817103ef32cbc908de004659b538993d14eeb4 | [
"MIT"
] | 5 | 2017-11-28T14:57:53.000Z | 2019-01-16T08:48:10.000Z | ops/update_version.py | DropD/sparkplug | be817103ef32cbc908de004659b538993d14eeb4 | [
"MIT"
] | 49 | 2017-04-11T11:18:58.000Z | 2021-02-10T23:06:20.000Z | ops/update_version.py | DropD/sparkplug | be817103ef32cbc908de004659b538993d14eeb4 | [
"MIT"
] | 8 | 2017-06-16T17:01:33.000Z | 2021-02-09T10:28:03.000Z | """Update version numbers everywhere based on git tags."""
from __future__ import print_function
import os
import re
import json
import fileinput
import contextlib
import subprocess
try:
# prefer the backport for Python <3.5
from pathlib2 import Path
except ImportError:
from pathlib import Path
import collections
from packaging import version
def subpath(*args):
return os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', *args))
@contextlib.contextmanager
def file_input(*args, **kwargs):
"""Context manager for a FileInput object."""
input_fo = fileinput.FileInput(*args, **kwargs)
try:
yield input_fo
finally:
input_fo.close()
class VersionUpdater(object):
"""
Version number synchronisation interface.
Updates the version information in
* setup.json
* aiida_vasp/__init__.py
to the current version number.
The current version number is either parsed from the output of ``git describe --tags --match v*.*.*``, or if the command fails for
any reason, from setup.json. The current version number is decided on init, syncronization can be executed by calling ``.sync()``.
"""
version_pat = re.compile(r'\d+.\d+.\d+(-(alpha|beta|rc)(.\d+){0,3}){0,1}')
init_version_pat = re.compile(r'(__version__ = )([\'"])(.*?)([\'"])', re.DOTALL | re.MULTILINE)
replace_tmpl = r'\1\g<2>{}\4'
def __init__(self):
"""Initialize with documents that should be kept up to date and actual version."""
self.top_level_init = Path(subpath('reentry', '__init__.py'))
self.setup_json = Path(subpath('setup.json'))
self.version = self.get_version()
def write_to_init(self):
init_content = self.top_level_init.read_text()
self.top_level_init.write_text(re.sub(self.init_version_pat, self.new_version_str, init_content, re.DOTALL | re.MULTILINE))
def write_to_setup(self):
"""Write the updated version number to setup.json."""
with open(str(self.setup_json), 'r') as setup_fo:
# preserve order
setup = json.load(setup_fo, object_pairs_hook=collections.OrderedDict)
setup['version'] = str(self.version)
with open(str(self.setup_json), 'w') as setup_fo:
json.dump(setup, setup_fo, indent=4, separators=(',', ': '))
@property
def new_version_str(self):
return self.replace_tmpl.format(str(self.version))
@property
def setup_version(self):
"""Grab the parsed version from setup.json."""
with open(str(self.setup_json), 'r') as setup_fo:
setup = json.load(setup_fo)
try:
version_string = setup['version']
except KeyError:
raise AttributeError('No version found in setup.json')
return version.parse(version_string)
@property
def init_version(self):
"""Grab the parsed version from the init file."""
match = re.search(self.init_version_pat, self.top_level_init.read_text())
if not match:
raise AttributeError('No __version__ found in top-level __init__.py')
return version.parse(match.groups()[2])
@property
def tag_version(self):
"""Get the current version number from ``git describe``, fall back to setup.json."""
try:
describe_byte_string = subprocess.check_output(['git', 'describe', '--tags', '--match', 'v*.*.*'])
match = re.search(self.version_pat, describe_byte_string.decode(encoding='UTF-8'))
version_string = match.string[match.pos:match.end()]
return version.parse(version_string)
except subprocess.CalledProcessError:
return self.setup_version
def get_version(self):
return max(self.setup_version, self.init_version, self.tag_version)
def sync(self):
if self.version > self.init_version:
self.write_to_init()
if self.version > self.setup_version:
self.write_to_setup()
if __name__ == '__main__':
VERSION_UPDATER = VersionUpdater()
VERSION_UPDATER.sync()
| 34.049587 | 134 | 0.65801 | 539 | 4,120 | 4.816327 | 0.315399 | 0.045069 | 0.023112 | 0.035439 | 0.24037 | 0.114792 | 0.060092 | 0.033128 | 0.033128 | 0.033128 | 0 | 0.004042 | 0.219417 | 4,120 | 120 | 135 | 34.333333 | 0.803172 | 0.205097 | 0 | 0.157895 | 0 | 0.026316 | 0.0794 | 0.014067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144737 | false | 0 | 0.157895 | 0.039474 | 0.447368 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff0b92cac5191bfb5cafcf2ad2b6f9c3f1b55cd2 | 1,516 | py | Python | my_twitter_bot.py | Tomonori-Yoshino/Automating_tweets | 117b09973f22a2438335facfdb427dbb2e23d005 | [
"MIT"
] | null | null | null | my_twitter_bot.py | Tomonori-Yoshino/Automating_tweets | 117b09973f22a2438335facfdb427dbb2e23d005 | [
"MIT"
] | null | null | null | my_twitter_bot.py | Tomonori-Yoshino/Automating_tweets | 117b09973f22a2438335facfdb427dbb2e23d005 | [
"MIT"
] | null | null | null | import tweepy
import time
print("this is my twitter bot")
CONSUMER_KEY = 'Xqpf3M4z5poRH6yp17lGXgUSU'
CONSUMER_SECRET = '2RVEN70Ti4QCl0kDMptrY4AnJF6kqEOBFdSEV1aZf3rBEN8AeL'
ACCESS_KEY = '732483098336595968-hH2jZVrpjKy0WxkM6dHzyZJ5kGgCrE9'
ACCESS_SECRET = '98B9BTk2toO3qLIppsMcsPThgOSoOf9GiGZ55OGFIypYE'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
mentions = api.mentions_timeline()
FILE_NAME = 'last_seen_id.txt'
def retrieve_last_seen_id(file_name):
f_read = open(file_name, 'r')
last_seen_id = int(f_read.read().strip())
f_read.close()
return last_seen_id
def save_last_seen_id(last_seen_id, file_name):
f_write = open(file_name, 'w')
f_write.write(str(last_seen_id))
f_write.close()
return
def reply_to_tweets():
print('Retrieving and replying to tweets...')
last_seen_id = retrieve_last_seen_id(FILE_NAME)
mentions = api.mentions_timeline(
last_seen_id,
tweet_mode='extended'
)
for mention in reversed(mentions):
print(str(mention.id) + '-' + mention.full_text)
last_seen_id = mention.id
save_last_seen_id(last_seen_id, FILE_NAME)
if '#helloworld' in mention.full_text.lower():
print('found #helloworld!')
print('reponding back...')
api.update_status('@' + mention.user.screen_name + '#HelloWorld back to you!!', mention.id)
while True:
reply_to_tweets()
time.sleep(10)
| 28.074074 | 103 | 0.718338 | 196 | 1,516 | 5.229592 | 0.382653 | 0.101463 | 0.126829 | 0.054634 | 0.115122 | 0.115122 | 0.062439 | 0.062439 | 0.062439 | 0 | 0 | 0.0392 | 0.175462 | 1,516 | 53 | 104 | 28.603774 | 0.7808 | 0 | 0 | 0 | 0 | 0 | 0.215842 | 0.112211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.05 | 0 | 0.175 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff0ff3bb7018607eef270c5c0dfe4b9eddf207b1 | 6,850 | py | Python | crypto.py | souravdas142/LiveChain | f590060e8d77f29694e746459c4e4f780c37295c | [
"MIT"
] | 1 | 2021-05-08T08:12:34.000Z | 2021-05-08T08:12:34.000Z | crypto.py | souravdas142/LiveChain | f590060e8d77f29694e746459c4e4f780c37295c | [
"MIT"
] | null | null | null | crypto.py | souravdas142/LiveChain | f590060e8d77f29694e746459c4e4f780c37295c | [
"MIT"
] | 1 | 2021-05-08T08:13:18.000Z | 2021-05-08T08:13:18.000Z | import random
class crypto:
P = 2 ** 256 - 2 ** 32 - 2 ** 9 - 2 ** 8 - 2 ** 7 - 2 ** 6 - 2 ** 4 - 1 # prime number for modulus operations
order = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 # order for the elliptic curve y^2=x^3+7 ,used in bitcoin
Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 # x co-ordinate of generating point of secp256k1 i.e. curve used in bitcoin
Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 # y co-ordinate of generating point of secp256k1 i.e. curve used in bitcoin
# mod inverse
# Extended Euclidean Algorithm for finding mod inverse
def modinv(self,a, n):
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
ratio = high // low
nm = hm - lm * ratio
new = high - low * ratio
hm = lm
high = low
lm = nm
low = new
return lm % n
# Elliptic curve point addition
def ECadd(self,xp, yp, xq, yq):
# point addition will not work if both the point are same. So, point doubling is required
if xp == xq and yp == yq:
return self.ECdouble(xp, yp)
m = ((yq - yp) * self.modinv(xq - xp, self.P)) % self.P
xr = (m * m - xp - xq) % self.P
yr = (m * (xp - xr) - yp) % self.P
return (xr, yr)
# Elliptic curve point doubling
def ECdouble(self,xp, yp):
l = ((3 * xp * xp) * self.modinv(2 * yp, self.P)) % self.P
X = (l * l - 2 * xp) % self.P
Y = (l * (xp - X) - yp) % self.P
return X, Y
# Elliptic curve point multiplication
##1. scaler multiplication with generator point
def ECmult(self,scaler):
if scaler == 0:
return 0, 0
_2pX = [0] * 258
_2pY = [0] * 258
_2pX[0], _2pY[0] = self.Gx, self.Gy
_X = self.Gx
_Y = self.Gy
for i in range(1, 257):
_2pX[i], _2pY[i] = self.ECdouble(_2pX[i - 1], _2pY[i - 1])
index = 0
while not (scaler & 1):
index += 1
scaler >>= 1
_X = _2pX[index]
_Y = _2pY[index]
scaler >>= 1
index += 1
while scaler > 0:
if scaler & 1:
_X, _Y = self.ECadd(_X, _Y, _2pX[index], _2pY[index])
scaler >>= 1
index += 1
return _X, _Y
# fast EC scaler multiplication 4x faster than previous method
# this can be improved more by catching precalculating group by few bits.
def ECmult_fast(scaler):
if scaler==0:
return 0,0
if(scaler==1):
return Gx
scaler-=1
_2px=Gx
_2py=Gy
_x=Gx
_y=Gy
while(scaler):
if scaler&1:
_x,_y=ECadd(_x,_y,_2px,_2py)
scaler>>=1
_2px,_2py=ECdouble(_2px,_2py)
return _x,_y
##2. scaler multiplication with other point on secp256k1 curve
###Example 2*(4G)=8G 4*(5G)=20G etc.
def ECmultp(self,Sx, Sy, scaler):
_2pX = [0] * 258
_2pY = [0] * 258
_2pX[0], _2pY[0] = Sx, Sy
_X = Sx
_Y = Sy
for i in range(1, 257):
_2pX[i], _2pY[i] = self.ECdouble(_2pX[i - 1], _2pY[i - 1])
index = 0
while not (scaler & 1):
index += 1
scaler >>= 1
_X = _2pX[index]
_Y = _2pY[index]
scaler >>= 1
index += 1
while scaler > 0:
if scaler & 1:
_X, _Y = self.ECadd(_X, _Y, _2pX[index], _2pY[index])
scaler >>= 1
index += 1
return _X, _Y
#Generate a private key
def generate_private_key(self):
st = 1 << 254
en = self.order - 1
rn = random.randrange(st, en)
(rx, ry) = self.ECmult(rn)
rn=(rx+ry)%self.order
(rx, ry) = self.ECmult(rn)
return ry;
#private key to public key conversion i.e. x coordinate of G*priv
def private_to_public(self,priv_k):
return self.ECmult(priv_k)[0]
## Signing message and returning random part and signature
def sign_message(self,msg,prv_k):
print("signing....")
#order=115792089237316195423570985008687907852837564279074904382605163141518161494337
st=1<<254
en=self.order-1
rn=random.randrange(st, en)
#print(rn)
(rx,ry)=self.ECmult(rn)
#print(rx,ry)
sign=(((rx*prv_k)%self.order+msg)*self.modinv(rn,self.order))%self.order
print("rx:",rx)
print("sign:",sign)
return (rx,sign)
#verifying message
def verify_message(self,s,r,msg,pub_x,pub_y):
print("verifying...")
m_s=(msg*self.modinv(s,self.order))%self.order
r_s=(r*self.modinv(s,self.order))%self.order
(x,y)=self.ECmult(m_s)
(x2,y2)=self.ECmultp(pub_x,pub_y,r_s)
(x3,y3)=self.ECadd(x,y,x2,y2)
print("(x,y):",(x,y))
print("(x2,y2):",(x2,y2))
print("(x3,y3):",(x3,y3))
if(x3==r):
#print("valid")
return True
else:
#print("invalid")
return False
#encryption of message to be send
#Currently this is only xor(NOT SECURE), it is to be changed to ECadd & ECsub
def encrypt_message(self,msg,r,prv_k,pub_k):
(px,py)=self.ECmultp(pub_k[0],pub_k[1],prv_k)
(rx,ry)=self.ECmultp(px,py,r)
enc_msg=0
l=0
cnt=0
for ch in msg:
cnt+=1
# print(ord(ch))
l<<=8
l+=ord(ch)
if(cnt==30):
cnt=0
l=l^rx
enc_msg <<= 256
enc_msg|=l
l=0
if(l):
l = l ^ rx
enc_msg <<= 256
enc_msg |= l
# print(l)
# enc_msg=l+rx
# print(enc_msg)
return enc_msg
# encoded_string = msg.encode()
# byte_array = bytearray(encoded_string)
# print(byte_array)
# encryption of message to be send
# Currently this is only xor(NOT SECURE), it is to be changed to ECadd & ECsub Concept
def decrypt_message(self,enc_msg,r,prv_k,pub_k):
(px, py) = self.ECmultp(pub_k[0], pub_k[1], prv_k)
(rx, ry) = self.ECmultp(px, py, r)
msg=""
while(enc_msg):
l=enc_msg&((1<<256)-1)^rx
while(l):
ch_i=l&255
msg=chr(ch_i)+msg
l>>=8
enc_msg>>=256
#print(msg)
return msg
| 32.464455 | 164 | 0.500292 | 912 | 6,850 | 3.638158 | 0.212719 | 0.008439 | 0.0217 | 0.023508 | 0.330018 | 0.312236 | 0.312236 | 0.280892 | 0.280892 | 0.268837 | 0 | 0.105611 | 0.38073 | 6,850 | 210 | 165 | 32.619048 | 0.676568 | 0.208467 | 0 | 0.408805 | 0 | 0 | 0.010251 | 0 | 0 | 0 | 0.012766 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.006289 | 0.006289 | 0.220126 | 0.044025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff1042f2a23ccb08afabbfa3dbb2d4b96cc2e564 | 2,062 | py | Python | fio.py | fabrickit-fablib/benchmark | 48670230ab67192074047dbbc397c2b5e13edcdf | [
"MIT"
] | null | null | null | fio.py | fabrickit-fablib/benchmark | 48670230ab67192074047dbbc397c2b5e13edcdf | [
"MIT"
] | null | null | null | fio.py | fabrickit-fablib/benchmark | 48670230ab67192074047dbbc397c2b5e13edcdf | [
"MIT"
] | null | null | null | # coding: utf-8
import copy
import json
from fabkit import * # noqa
from fablib.base import SimpleBase
class Fio(SimpleBase):
def __init__(self):
self.data_key = 'sysbench'
self.data = {
'mysql': {
'user': 'sysbench',
'password': 'sysbench',
'database': 'sysbench',
},
'oltp_option': {
'range_size': 100,
'table_size': 10000000,
'tables': 2,
'threads': 1,
'events': 0,
'time': 60,
},
}
self.packages = {
'CentOS Linux 7.*': [
'epel-release',
'fio',
],
}
self.services = {}
def prepare(self):
self.init()
self.install_packages()
filer.mkdir('/tmp/fio')
filer.template('/tmp/fio_job.ini')
def bench(self):
self.init()
sudo('fio --output=/tmp/fio_out.json --output-format=json /tmp/fio_job.ini')
with api.hide('stdout'):
result = run('cat /tmp/fio_out.json')
data = {}
result = json.loads(result)
global_options = result['global options']
for job in result['jobs']:
options = copy.deepcopy(global_options)
options.update(job['job options'])
rw = options['rw']
bs = options['bs']
iodepth = options['iodepth']
numjobs = options.get('numjobs', 1)
name = '{0}_{1}_qd{2}_j{3}'.format(rw, bs, iodepth, numjobs)
data[name] = {
'read_iops': job['read']['iops'],
'read_latency': job['read']['clat_ns']['percentile']['99.000000'],
'write_iops': job['write']['iops'],
'write_latency': job['write']['clat_ns']['percentile']['99.000000'],
'cpu_usr': job['usr_cpu'],
'cpu_sys': job['sys_cpu'],
'cpu_ctx': job['ctx'],
}
return data
| 28.246575 | 84 | 0.461203 | 206 | 2,062 | 4.475728 | 0.470874 | 0.032538 | 0.02603 | 0.02603 | 0.052061 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030685 | 0.383608 | 2,062 | 72 | 85 | 28.638889 | 0.694729 | 0.008729 | 0 | 0.033333 | 0 | 0.016667 | 0.238119 | 0.012739 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.016667 | 0.066667 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff11450e5548a704cb271296b84792ba3051678a | 5,433 | py | Python | knowledgebase/db/mongo.py | linkdd/knowledgebase | 89cd5a6802efd1359863f91b742a43684354b8b2 | [
"MIT"
] | 1 | 2017-03-20T08:48:33.000Z | 2017-03-20T08:48:33.000Z | knowledgebase/db/mongo.py | linkdd/knowledgebase | 89cd5a6802efd1359863f91b742a43684354b8b2 | [
"MIT"
] | null | null | null | knowledgebase/db/mongo.py | linkdd/knowledgebase | 89cd5a6802efd1359863f91b742a43684354b8b2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from knowledgebase.db.base import Vertex as BaseVertex
from knowledgebase.db.base import Edge as BaseEdge
from knowledgebase.db.base import ElementView as BaseElementView
from knowledgebase.db.base import Graph as BaseGraph
from bson.objectid import ObjectId
from pymongo import MongoClient
from copy import deepcopy
VERTEX_COLLECTION = 'kb_vertices'
EDGE_COLLECTION = 'kb_edges'
class Vertex(BaseVertex):
__collection__ = VERTEX_COLLECTION
@classmethod
def get(cls, graph, eid):
elt = graph.db[VERTEX_COLLECTION].find_one({'_id': ObjectId(eid)})
if elt is not None:
elt['eid'] = str(elt.pop('_id'))
elt = cls(graph, data=elt)
return elt
@classmethod
def get_all(cls, graph):
elts = graph.db[VERTEX_COLLECTION].find()
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = cls(graph, data=elt)
yield elt
def save(self):
doc = self.data()
if self.eid is not None:
_id = ObjectId(doc.pop('eid'))
self._graph.db[VERTEX_COLLECTION].replace_one(
{'_id': _id},
doc
)
else:
ret = self._graph.db[VERTEX_COLLECTION].insert_one(doc)
self.eid = str(ret.inserted_id)
def outE(self, **properties):
properties['source'] = ObjectId(self.eid)
elts = self._graph.db[EDGE_COLLECTION].find(properties)
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.edge_class(self._graph, data=elt)
yield elt
def inE(self, **properties):
properties['target'] = ObjectId(self.eid)
elts = self._graph.db[EDGE_COLLECTION].find(properties)
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.edge_class(self._graph, data=elt)
yield elt
def bothE(self, **properties):
src_properties = deepcopy(properties)
src_properties['source'] = ObjectId(self.eid)
tgt_properties = deepcopy(properties)
tgt_properties['target'] = ObjectId(self.eid)
elts = self._graph.db[EDGE_COLLECTION].find({
'$or': [
src_properties,
tgt_properties
]
})
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.edge_class(self._graph, data=elt)
yield elt
class Edge(BaseEdge):
__collection__ = EDGE_COLLECTION
@classmethod
def get(cls, graph, eid):
elt = graph.db[EDGE_COLLECTION].find_one({'_id': ObjectId(eid)})
if elt is not None:
elt['eid'] = str(elt.pop('_id'))
elt = cls(graph, data=elt)
return elt
@classmethod
def get_all(cls, graph):
elts = graph.db[EDGE_COLLECTION].find()
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = cls(graph, data=elt)
yield elt
def save(self):
doc = self.data()
if self.eid is not None:
_id = ObjectId(doc.pop('eid'))
self._graph.db[EDGE_COLLECTION].replace_one(
{'_id': _id},
doc
)
else:
ret = self._graph.db[EDGE_COLLECTION].insert_one(doc)
self.eid = str(ret.inserted_id)
def outV(self):
elt = self._graph.db[VERTEX_COLLECTION].find_one({
'_id': ObjectId(self.target)
})
if elt is not None:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.vertex_class(self._graph, data=elt)
return elt
def inV(self):
elt = self._graph.db[VERTEX_COLLECTION].find_one({
'_id': ObjectId(self.source)
})
if elt is not None:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.vertex_class(self._graph, data=elt)
return elt
def bothV(self):
elts = self._graph.db[VERTEX_COLLECTION].find({
'_id': {'$in': [
ObjectId(self.source),
ObjectId(self.target)
]}
})
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.vertex_class(self._graph, data=elt)
yield elt
class ElementView(BaseElementView):
def find(self, **properties):
elts = self.graph.db[self.cls.__collection__].find(properties)
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self.cls(self.graph, data=elt)
yield elt
def update(self, eid, **data):
self.graph.db[self.cls.__collection__].update_one(
{'_id': ObjectId(eid)},
{'$set': data}
)
def delete(self, eid):
self.graph.db[self.cls.__collection__].remove_one(
{'_id': ObjectId(eid)}
)
class Graph(BaseGraph):
elementview_class = ElementView
vertex_class = Vertex
edge_class = Edge
def __init__(self, *args, **kwargs):
super(Graph, self).__init__(*args, **kwargs)
self.conn = MongoClient(self.conf.uri)
self.db = self.conn.get_default_database()
if self.conf.username and self.conf.password:
self.db.authenticate(self.conf.username, self.conf.password)
def close(self):
self.conn.close()
| 27.029851 | 74 | 0.561936 | 653 | 5,433 | 4.48392 | 0.134763 | 0.079918 | 0.048839 | 0.045082 | 0.684085 | 0.616803 | 0.580943 | 0.571721 | 0.563525 | 0.563525 | 0 | 0.000267 | 0.311798 | 5,433 | 200 | 75 | 27.165 | 0.78283 | 0.003865 | 0 | 0.510204 | 0 | 0 | 0.028096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115646 | false | 0.013605 | 0.047619 | 0 | 0.251701 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff13684383add06888abbfe0e42239e9acfeb522 | 1,591 | py | Python | visualization/example_and_references/plot_vector_field.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2020-10-23T14:40:09.000Z | 2020-10-23T14:40:09.000Z | visualization/example_and_references/plot_vector_field.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | visualization/example_and_references/plot_vector_field.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2021-05-27T20:38:45.000Z | 2021-05-27T20:38:45.000Z | """
Drawing Vector Fields
https://stackoverflow.com/questions/25342072/computing-and-drawing-vector-fields
Adding colorbar to existing axis
https://stackoverflow.com/questions/32462881/add-colorbar-to-existing-axis
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
"""
Create data
"""
y, x = np.mgrid[10:-10:100j, 10:-10:100j]
x_obstacle, y_obstacle = 0.0, 0.0
alpha_obstacle, a_obstacle, b_obstacle = 1.0, 1e3, 2e3
p = -alpha_obstacle * np.exp(-((x - x_obstacle)**2 / a_obstacle
+ (y - y_obstacle)**2 / b_obstacle))
# For the absolute values of "dx" and "dy" to mean anything, we'll need to
# specify the "cellsize" of our grid. For purely visual purposes, though,
# we could get away with just "dy, dx = np.gradient(p)".
dy, dx = np.gradient(p, np.diff(y[:2, 0])[0], np.diff(x[0, :2])[0])
"""
Version one
"""
skip = (slice(None, None, 10), slice(None, None, 10))
fig, axes = plt.subplots(2)
ax = axes[0]
im = ax.imshow(
p,
extent=[x.min(), x.max(), y.min(), y.max()],
cmap=plt.get_cmap('plasma'),
)
ax.quiver(x[skip], y[skip], dx[skip], dy[skip])
# fig.colorbar(im)
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
ax.set(aspect=1, title='Quiver Plot')
"""
Version two
"""
ax = axes[1]
ax.streamplot(x, y, dx, dy, color=p, density=0.5, cmap='gist_earth')
cont = ax.contour(x, y, p, cmap='gist_earth')
ax.clabel(cont)
ax.set(aspect=1, title='Streamplot with contours')
plt.show()
| 25.66129 | 80 | 0.670647 | 267 | 1,591 | 3.921348 | 0.438202 | 0.007641 | 0.036294 | 0.057307 | 0.061127 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047337 | 0.15022 | 1,591 | 61 | 81 | 26.081967 | 0.727071 | 0.27027 | 0 | 0 | 0 | 0 | 0.069533 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.107143 | 0 | 0.107143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |