hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2df5218e49c9b42a48032a2f8b0a70abe590d7e1 | 5,354 | py | Python | meteo.py | mhaberler/jumpvis | 93b3b723d27aab7f3d4319cc91d06432022ddc6d | [
"MIT"
] | null | null | null | meteo.py | mhaberler/jumpvis | 93b3b723d27aab7f3d4319cc91d06432022ddc6d | [
"MIT"
] | null | null | null | meteo.py | mhaberler/jumpvis | 93b3b723d27aab7f3d4319cc91d06432022ddc6d | [
"MIT"
] | null | null | null | import logging
import sys
import xarray as xr # http://xarray.pydata.org/
from metpy.units import units
import metpy.calc as mpcalc
from czml3 import Packet
from czml3.properties import (
# Billboard,
# Clock,
Color,
# Label,
# Point,
# Material,
# Model,
# ViewFrom,
# Orientation,
# Path,
# Position,
PositionList,
Polyline,
SolidColorMaterial,
# PolylineOutlineMaterial,
PolylineArrowMaterial,
# PolylineDashMaterial,
# PolylineMaterial
)
import seaborn as sns
from geographiclib.constants import Constants
from geographiclib.geodesic import Geodesic
# https://stackoverflow.com/questions/33001420/find-destination-coordinates-given-starting-coordinates-bearing-and-distance
# https://stackoverflow.com/a/33026930/2468365
# cartographicdegrees cartographicdegrees degrees meters
def getEndpoint(lat1, lon1, bearing, distance):
geod = Geodesic(Constants.WGS84_a, Constants.WGS84_f)
d = geod.Direct(lat1, lon1, bearing, distance)
return d['lat2'], d['lon2']
class Meteo:
def __init__(self,
netcdf=None,
bbox=None,
windcolors="viridis"):
self.wind_colormap = sns.color_palette(windcolors, as_cmap=True)
self.bbox = bbox
if self.bbox:
ds = xr.open_dataset(netcdf)
self.ds = ds.where((ds.t.latitude > self.bbox['min_latitude']) &
(ds.t.latitude < self.bbox['max_latitude']) &
(ds.t.longitude > self.bbox['min_longitude']) &
(ds.t.longitude < self.bbox['max_longitude']), drop=True)
ds.close()
else:
self.ds = xr.open_dataset(netcdf)
logging.debug("%s: data_vars %s", netcdf, ds.data_vars)
logging.debug("valid_time %s", ds.valid_time)
logging.debug("time %s", ds.time)
logging.debug("step %s", ds.step)
def czml_wind_vectors(self, layer):
wind_packets = []
speed_scale = 100
for lat in self.ds.coords['latitude']:
for lon in self.ds.coords['longitude']:
u = self.ds.u.sel(latitude=lat, longitude=lon,
generalVerticalLayer=layer)
v = self.ds.v.sel(latitude=lat, longitude=lon,
generalVerticalLayer=layer)
w = self.ds.wz.sel(latitude=lat, longitude=lon,
generalVerticalLayer=layer)
t = self.ds.t.sel(latitude=lat, longitude=lon,
generalVerticalLayer=layer)
qv = self.ds.q.sel(latitude=lat, longitude=lon,
generalVerticalLayer=layer)
p = self.ds.pres.sel(latitude=lat, longitude=lon,
generalVerticalLayer=layer)
tempK = float(t) * units.K
celsius = tempK.to('degC').magnitude
# Dewpoint K
dewpt = mpcalc.dewpoint_from_specific_humidity(qv, t, p)
# relative humidity
relhum = mpcalc.relative_humidity_from_dewpoint(t, dewpt)
height = self.ds.h.sel(
latitude=lat, longitude=lon, generalVerticalLayer=layer)
ms = mpcalc.wind_speed(u, v) * units.kt
wdir = mpcalc.wind_direction(u, v, convention='from')
logging.debug("%f %f %f %3.1f %3.1f", float(lat), float(
lon), float(height), ms.magnitude, wdir.magnitude)
logging.debug("wind direction %f", wdir.magnitude) # radians
logging.debug("wind speed %f", ms.magnitude)
latend, lonend = getEndpoint(lat, lon, wdir.
magnitude,
ms.magnitude * speed_scale)
plist = PositionList(cartographicDegrees=[float(lon),
float(lat),
float(height),
lonend, latend, float(height)])
if True:
# norm = matplotlib.colors.Normalize(vmin=10.0, vmax=20.0)
# print(norm(15.0)) # 0.5
ratio = float(height)/6000.
arrow_color = SolidColorMaterial(color=Color(rgbaf=self.wind_colormap(ratio)))
else:
arrow_rgba = [200, 200, 0, 255]
arrow_color = SolidColorMaterial(color=Color(rgba=arrow_rgba))
mat = PolylineArrowMaterial(polylineArrow=arrow_color)
pl = Polyline(width=5,
#show=True,
#clampToGround=False,
material=mat,
positions=plist)
p3 = Packet(id="%4.fm wind %3.1fkt %3.0f' %3.1f°/%3.1f° rh %2.0f%%" % (
height,
ms.magnitude,
wdir.magnitude,
celsius, dewpt.to('degC').magnitude,relhum*100),
polyline=pl)
wind_packets.append(p3)
return wind_packets
| 36.924138 | 123 | 0.520732 | 528 | 5,354 | 5.208333 | 0.359848 | 0.024 | 0.035636 | 0.058545 | 0.222909 | 0.129818 | 0.129818 | 0 | 0 | 0 | 0 | 0.025225 | 0.378035 | 5,354 | 144 | 124 | 37.180556 | 0.8 | 0.102167 | 0 | 0.085106 | 0 | 0.010638 | 0.049571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031915 | false | 0 | 0.106383 | 0 | 0.170213 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2df5f7eab6052f3e891da6a10f332205f7943272 | 4,616 | py | Python | models/small_unet.py | ajithvallabai/Circle-U-Net | 45733fea2f567837f890d6d93c503ad23b013715 | [
"MIT"
] | 1 | 2021-06-05T09:47:55.000Z | 2021-06-05T09:47:55.000Z | models/small_unet.py | ajithvallabai/Circle-U-Net | 45733fea2f567837f890d6d93c503ad23b013715 | [
"MIT"
] | null | null | null | models/small_unet.py | ajithvallabai/Circle-U-Net | 45733fea2f567837f890d6d93c503ad23b013715 | [
"MIT"
] | 1 | 2021-09-19T21:17:27.000Z | 2021-09-19T21:17:27.000Z | from tensorflow.keras.layers import Input, Add, Dropout, Permute, add, concatenate, UpSampling2D
from tensorflow.keras.layers import Convolution2D, ZeroPadding2D, MaxPooling2D, Cropping2D, Conv2D, BatchNormalization
from tensorflow.compat.v1.layers import conv2d_transpose
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
def UNet(n_filters=16, bn=True, dilation_rate=1):
'''Validation Image data generator
Inputs:
n_filters - base convolution filters
bn - flag to set batch normalization
dilation_rate - convolution dilation rate
Output: Unet keras Model
'''
# Define input batch shape
batch_shape = (256, 256, 3)
inputs = Input(batch_shape=(5, 256, 256, 3))
print(inputs)
conv1 = Conv2D(n_filters * 1, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(inputs)
if bn:
conv1 = BatchNormalization()(conv1)
conv1 = Conv2D(n_filters * 1, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv1)
if bn:
conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(conv1)
conv2 = Conv2D(n_filters * 2, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(pool1)
if bn:
conv2 = BatchNormalization()(conv2)
conv2 = Conv2D(n_filters * 2, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv2)
if bn:
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(conv2)
conv3 = Conv2D(n_filters * 4, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(pool2)
if bn:
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(n_filters * 4, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv3)
if bn:
conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(conv3)
conv4 = Conv2D(n_filters * 8, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(pool3)
if bn:
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(n_filters * 8, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv4)
if bn:
conv4 = BatchNormalization()(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(conv4)
conv5 = Conv2D(n_filters * 16, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(pool4)
if bn:
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(n_filters * 16, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv5)
if bn:
conv5 = BatchNormalization()(conv5)
up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)
conv6 = Conv2D(n_filters * 8, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(up6)
if bn:
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(n_filters * 8, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv6)
if bn:
conv6 = BatchNormalization()(conv6)
up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)
conv7 = Conv2D(n_filters * 4, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(up7)
if bn:
conv7 = BatchNormalization()(conv7)
conv7 = Conv2D(n_filters * 4, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv7)
if bn:
conv7 = BatchNormalization()(conv7)
up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)
conv8 = Conv2D(n_filters * 2, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(up8)
if bn:
conv8 = BatchNormalization()(conv8)
conv8 = Conv2D(n_filters * 2, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv8)
if bn:
conv8 = BatchNormalization()(conv8)
up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=3)
conv9 = Conv2D(n_filters * 1, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(up9)
if bn:
conv9 = BatchNormalization()(conv9)
conv9 = Conv2D(n_filters * 1, (3, 3), activation='relu', padding='same', dilation_rate=dilation_rate)(conv9)
if bn:
conv9 = BatchNormalization()(conv9)
conv10 = Conv2D(24, (1, 1), activation='softmax', padding='same', dilation_rate=dilation_rate)(conv9)
model = Model(inputs=inputs, outputs=conv10)
return model | 41.214286 | 118 | 0.67266 | 581 | 4,616 | 5.215146 | 0.153184 | 0.162376 | 0.119142 | 0.144224 | 0.744224 | 0.49538 | 0.49538 | 0.480528 | 0.480528 | 0.422442 | 0 | 0.060976 | 0.182842 | 4,616 | 112 | 119 | 41.214286 | 0.742312 | 0.047227 | 0 | 0.486486 | 0 | 0 | 0.047608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013514 | false | 0 | 0.067568 | 0 | 0.094595 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2df7323affd2ebec4b1620e5d4d2839848f89cdd | 2,546 | py | Python | ui/__init__.py | qenops/dGraph | b67c835bf60f1627a79d3e22183301f34431c5b3 | [
"Apache-2.0"
] | 1 | 2019-03-20T18:17:49.000Z | 2019-03-20T18:17:49.000Z | ui/__init__.py | qenops/dGraph | b67c835bf60f1627a79d3e22183301f34431c5b3 | [
"Apache-2.0"
] | null | null | null | ui/__init__.py | qenops/dGraph | b67c835bf60f1627a79d3e22183301f34431c5b3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# pylint: disable=bad-whitespace, line-too-long
'''User interface submodule for dGraph scene description module based on glfw
David Dunn
Feb 2017 - created
ALL UNITS ARE IN METRIC
ie 1 cm = .01
www.qenops.com
'''
__author__ = ('David Dunn')
__version__ = '1.6'
__all__ = []
from dGraph.ui import dglfw as fw
from dGraph.ui.dglfw import *
import numpy as np
#from . import dglfw as fw
#from .dglfw import *
WINDOWSTACKS = {} # Each window can have 1 associated renderGraph
WINDOWS = []
# should look at glfw Monitor objects
class Display(object):
''' A class that defines the physical properties of a display AKA a monitor'''
def __init__(self, name, monitor, bezel=None,location=(0.,0.,0.)):
self.name = name
self.classifier = 'display'
self.resolution, self.colorDepth, self.fps = [np.array(a) for a in fw.get_video_mode(monitor)]
self.glResolution = np.flipud(self.resolution)
#print(self.resolution,self.colorDepth, self.fps)
self.fps = 60 if self.fps == 59 else 30 if self.fps == 29 else self.fps # fix rounding down errors
self.size = np.array(fw.get_monitor_physical_size(monitor))/1000.
self.screenPosition = np.array(fw.get_monitor_pos(monitor))
self.bezel = None if bezel is None else np.array(bezel)
self.location = None if location is None else np.array(location) # the top left corner of the display (not the bezel)
@property
def width(self):
return self.resolution[0]
@property
def height(self):
return self.resolution[1]
def pixelSize(self):
return self.size/self.resolution
def resize_window_callback(window, w, h):
''' BROKEN - DON'T USE
Need to figure out how to track this
what is rederStack -> window relationship??? '''
renderGraph = WINDOWSTACKS[window]
width = w if w > 1 else 2
height = h if h > 1 else 2
renderGraph._width = None
renderGraph._height = None
for cam in cameras:
cam.setResolution((width/2, height)) # for binocular ???
for node in renderGraph:
node.setup(renderGraph.width, renderGraph.height)
def get_window_id(window):
try:
id = WINDOWS.index(window)
except ValueError:
id = len(WINDOWS)
WINDOWS.append(window)
return id
def close_window(window):
id = get_window_id(window)
rg = WINDOWSTACKS.get(id, None)
if rg is not None:
rg.removeWindow(window)
WINDOWS[id] = None
fw.set_window_should_close(window, True) | 33.064935 | 126 | 0.673213 | 364 | 2,546 | 4.612637 | 0.423077 | 0.05003 | 0.025015 | 0.017868 | 0.107207 | 0.041691 | 0 | 0 | 0 | 0 | 0 | 0.016235 | 0.225844 | 2,546 | 77 | 127 | 33.064935 | 0.835616 | 0.265122 | 0 | 0.039216 | 0 | 0 | 0.010899 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137255 | false | 0 | 0.058824 | 0.058824 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2df84c3e6613791a1bd1619fefe6e749c4454933 | 1,855 | py | Python | Server/src/quadradiusr_server/notification.py | kjarosh/QuadradiusR | 2e55188bf9c9cd980ec6d11fce51830d0b4749d7 | [
"MIT"
] | 6 | 2022-02-08T11:16:39.000Z | 2022-03-27T10:41:19.000Z | Server/src/quadradiusr_server/notification.py | kjarosh/QuadradiusR | 2e55188bf9c9cd980ec6d11fce51830d0b4749d7 | [
"MIT"
] | 60 | 2022-02-08T10:33:36.000Z | 2022-03-27T15:30:57.000Z | Server/src/quadradiusr_server/notification.py | kjarosh/QuadradiusR | 2e55188bf9c9cd980ec6d11fce51830d0b4749d7 | [
"MIT"
] | 2 | 2022-02-11T12:50:39.000Z | 2022-02-17T00:11:32.000Z | import abc
import asyncio
import fnmatch
from abc import ABC
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Tuple
@dataclass
class Notification:
topic: str
subject_id: str
data: dict
class Handler(ABC):
@abc.abstractmethod
async def handle(self, notification: Notification):
pass
class NotificationService:
def __init__(self) -> None:
self.handlers: Dict[str, List[Tuple[str, Handler]]] = \
defaultdict(lambda: [])
def register_handler(
self, subject_id: str,
topic: str, handler: Handler):
self.handlers[subject_id].append((topic, handler))
def unregister_handler(
self, subject_id: str,
topic: str, handler: Handler):
self.handlers[subject_id].remove((topic, handler))
def notify(self, notification: Notification):
import asyncio
asyncio.create_task(self.notify_now(notification))
async def notify_now(self, notification: Notification):
handlers = set()
for subject_id, tpl in self.handlers.items():
for topic, handler in tpl:
if self._subject_matches(notification.subject_id, subject_id) and \
self._topic_matches(notification.topic, topic):
handlers.add(handler)
await asyncio.gather(*[h.handle(notification) for h in handlers])
def _subject_matches(self, subject_id: str, subject_id_wildcard: str):
if subject_id_wildcard == '*':
return True
return subject_id == subject_id_wildcard
def _topic_matches(self, topic: str, topic_wildcard: str):
if topic_wildcard == '*':
return True
filtered = fnmatch.filter([topic], topic_wildcard)
if filtered:
return True
| 29.919355 | 83 | 0.646361 | 210 | 1,855 | 5.538095 | 0.271429 | 0.100602 | 0.041273 | 0.041273 | 0.1135 | 0.1135 | 0.1135 | 0.1135 | 0.1135 | 0.1135 | 0 | 0 | 0.265768 | 1,855 | 61 | 84 | 30.409836 | 0.853891 | 0 | 0 | 0.183673 | 0 | 0 | 0.001078 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122449 | false | 0.020408 | 0.163265 | 0 | 0.489796 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dfab7efee4a5b2c160f01c920e607475e00542c | 12,475 | py | Python | pytorch_unet/processing/augments.py | mukeshmithrakumar/UNet | 3f83f5116cd897293f1075f448703b75930707d5 | [
"MIT"
] | 11 | 2019-02-03T14:20:24.000Z | 2021-06-28T15:18:59.000Z | pytorch_unet/processing/augments.py | mukeshmithrakumar/radnet | 3f83f5116cd897293f1075f448703b75930707d5 | [
"MIT"
] | null | null | null | pytorch_unet/processing/augments.py | mukeshmithrakumar/radnet | 3f83f5116cd897293f1075f448703b75930707d5 | [
"MIT"
] | 2 | 2019-07-19T20:00:24.000Z | 2020-02-18T04:49:49.000Z | import random
import cv2
import numpy as np
import torch
from torchvision.transforms import RandomApply, Compose
class PrepareImageAndMask(object):
"""Prepare images and masks like fixing channel numbers."""
def __call__(self, data):
img = data['input']
img = img[:, :, :3] # max 3 channels
img = img / 255
if 'mask' in data:
mask = data['mask']
else:
mask = np.zeros(img.shape[:2], dtype=img.dtype)
data['input'] = img.astype(np.float32)
data['mask'] = mask.astype(np.float32)
return data
def to_tensor(pic):
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((0, 1, 2)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
class ConvertToTensor(object):
""" Converts the image to tensor.
Note:
Modified from PyTorch vision ToTensor. Converts a PIL Image or numpy.ndarray (H x W x C) in the
range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] by calling the
to_tensor function.
"""
def __call__(self, data):
trans_images_arr = np.expand_dims(data['input'], axis=0)
trans_labels_arr = np.expand_dims(data['mask'], axis=0)
data['input'] = to_tensor(trans_images_arr)
data['mask'] = to_tensor(trans_labels_arr)
return data
class ResizeToNxN(object):
"""Resize input images to rgb NxN and the masks into gray NxN.
Note:
uses cv2.INTER_LINEAR which implements bilinear interpolation for resizing.
"""
def __init__(self, n=128):
self.n = n
def __call__(self, data):
n = self.n
data['input'] = cv2.resize(data['input'], (n, n), interpolation=cv2.INTER_LINEAR)
data['mask'] = cv2.resize(data['mask'], (n, n), interpolation=cv2.INTER_NEAREST)
return data
def compute_padding(h, w, n=128):
if h % n == 0:
dy0, dy1 = 0, 0
else:
dy = n - h % n
dy0 = dy // 2
dy1 = dy - dy0
if w % n == 0:
dx0, dx1 = 0, 0
else:
dx = n - w % n
dx0 = dx // 2
dx1 = dx - dx0
return dy0, dy1, dx0, dx1
class PadToNxN(object):
"""Apply Pad to image size NxN using border reflection.
Note:
uses copyMakeBorder which and BORDER_REFLECT_101 which basically reflects the border of the image to pad.
"""
def __init__(self, n=128):
self.n = n
def __call__(self, data):
n = self.n
h, w = data['input'].shape[:2]
dy0, dy1, dx0, dx1 = compute_padding(h, w, n)
data['input'] = cv2.copyMakeBorder(data['input'], dy0, dy1, dx0, dx1, cv2.BORDER_REFLECT_101)
data['mask'] = cv2.copyMakeBorder(data['mask'], dy0, dy1, dx0, dx1, cv2.BORDER_REFLECT_101)
return data
class HorizontalFlip(object):
"""Flip input and masks horizontally."""
def __call__(self, data):
data['input'] = cv2.flip(data['input'], 1)
data['mask'] = cv2.flip(data['mask'], 1)
return data
class BrightnessShift(object):
"""Applies Brightness shift to the images.
Note:
When changing the brightness of an image, a constant is added or subtracted from the luminnance of all
sample values. Here we are shifting the histogram left (subtraction) or right (addition) by a max value.
"""
def __init__(self, max_value=0.1):
self.max_value = max_value
def __call__(self, data):
img = data['input']
img += np.random.uniform(-self.max_value, self.max_value)
data['input'] = np.clip(img, 0, 1)
return data
class BrightnessScaling(object):
"""Applies Brightness scaling to the images.
Note:
Brightness scaling scales the histogram by a max value.
"""
def __init__(self, max_value=0.08):
self.max_value = max_value
def __call__(self, data):
img = data['input']
img *= np.random.uniform(1 - self.max_value, 1 + self.max_value)
data['input'] = np.clip(img, 0, 1)
return data
class GammaChange(object):
"""Applies Gamma change to the images.
Note:
is a nonlinear operation used to encode and decode luminance values in images.
"""
def __init__(self, max_value=0.08):
self.max_value = max_value
def __call__(self, data):
img = data['input']
img = img ** (1.0 / np.random.uniform(1 - self.max_value, 1 + self.max_value))
data['input'] = np.clip(img, 0, 1)
return data
def do_elastic_transform(image, mask, grid=10, distort=0.2):
height, width = image.shape[:2]
x_step = int(grid)
xx = np.zeros(width, np.float32)
prev = 0
for x in range(0, width, x_step):
start = x
end = x + x_step
if end > width:
end = width
cur = width
else:
cur = prev + x_step * (1 + random.uniform(-distort, distort))
xx[start:end] = np.linspace(prev, cur, end - start)
prev = cur
y_step = int(grid)
yy = np.zeros(height, np.float32)
prev = 0
for y in range(0, height, y_step):
start = y
end = y + y_step
if end > height:
end = height
cur = height
else:
cur = prev + y_step * (1 + random.uniform(-distort, distort))
yy[start:end] = np.linspace(prev, cur, end - start)
prev = cur
# grid
map_x, map_y = np.meshgrid(xx, yy)
map_x = map_x.astype(np.float32)
map_y = map_y.astype(np.float32)
image = cv2.remap(image, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101,
borderValue=(0, 0, 0,))
mask = cv2.remap(mask, map_x, map_y, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REFLECT_101,
borderValue=(0, 0, 0,))
# mask = (mask > 0.5).astype(np.float32)
return image, mask
class ElasticDeformation(object):
"""Applies Elastic deformation to the images.
Note:
Elastic deformation of images as described in [Simard2003]_ (with modifications).
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
"""
def __init__(self, grid=10, max_distort=0.15):
self.grid = grid
self.max_distort = max_distort
def __call__(self, data):
distort = np.random.uniform(0, self.max_distort)
img, mask = do_elastic_transform(data['input'], data['mask'], self.grid, distort)
data['input'] = img
data['mask'] = mask
return data
def do_rotation_transform(image, mask, angle=0):
height, width = image.shape[:2]
cc = np.cos(angle / 180 * np.pi)
ss = np.sin(angle / 180 * np.pi)
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2, height / 2])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101,
borderValue=(0, 0, 0,))
mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_REFLECT_101,
borderValue=(0, 0, 0,))
# mask = (mask > 0.5).astype(np.float32)
return image, mask
class Rotation(object):
"""Applies to the Rotation to the images.
Note:
Does rotation transformation.
"""
def __init__(self, max_angle=15):
self.max_angle = max_angle
def __call__(self, data):
angle = np.random.uniform(-self.max_angle, self.max_angle)
img, mask = do_rotation_transform(data['input'], data['mask'], angle)
data['input'] = img
data['mask'] = mask
return data
def do_horizontal_shear(image, mask, scale=0):
height, width = image.shape[:2]
dx = int(scale * width)
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
box1 = np.array([[+dx, 0], [width + dx, 0], [width - dx, height], [-dx, height], ], np.float32)
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
# mask = (mask > 0.5).astype(np.float32)
return image, mask
class HorizontalShear(object):
"""Applies Horizontal Shear to the images.
Note:
horizontal shear (or shear parallel to the x axis) is a function that takes a generic point with coordinates
(x,y) to the point (x+my,y); where m is a fixed parameter, called the shear factor.
"""
def __init__(self, max_scale=0.2):
self.max_scale = max_scale
def __call__(self, data):
scale = np.random.uniform(-self.max_scale, self.max_scale)
img, mask = do_horizontal_shear(data['input'], data['mask'], scale)
data['input'] = img
data['mask'] = mask
return data
class HWCtoCHW(object):
"""Converts HWC to CHW."""
def __call__(self, data):
data['input'] = data['input'].transpose((2, 0, 1))
return data
def augmentations(args):
"""Applies random augmentations for the input images based on the transform probability.
Note:
Many methods are taken from https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/63974.
The user can specify between geometric, image or both types of transforms to the images since sometimes
some transformations work well for certain datasets.
:param args:
image_size (int) : size of the image to be resized.
transform_prob (float) : probability to apply transformations on the data.
:return:
a compose of transformations.
"""
augment_type = 'geometric'
transform_prob = args.transform_prob
if augment_type == 'geometric':
geometric_transforms = Compose([RandomApply([HorizontalShear(max_scale=0.07)], p=transform_prob),
RandomApply([Rotation(max_angle=15)], p=transform_prob),
RandomApply([ElasticDeformation(max_distort=0.15)], p=transform_prob),
ResizeToNxN(args.image_size),
ConvertToTensor()
])
return geometric_transforms
elif augment_type == 'image':
brightness_transform = Compose([RandomApply([BrightnessShift(max_value=0.1)], p=transform_prob),
RandomApply([BrightnessScaling(max_value=0.08)], p=transform_prob),
RandomApply([GammaChange(max_value=0.08)], p=transform_prob),
ResizeToNxN(args.image_size),
ConvertToTensor()
])
return brightness_transform
elif augment_type == 'both':
both_transforms = Compose([RandomApply([HorizontalShear(max_scale=0.07)], p=transform_prob),
RandomApply([Rotation(max_angle=15)], p=transform_prob),
RandomApply([ElasticDeformation(max_distort=0.15)], p=transform_prob),
RandomApply([BrightnessShift(max_value=0.1)], p=transform_prob),
RandomApply([BrightnessScaling(max_value=0.08)], p=transform_prob),
RandomApply([GammaChange(max_value=0.08)], p=transform_prob),
ResizeToNxN(args.image_size),
ConvertToTensor()
])
return both_transforms
| 34.271978 | 116 | 0.587735 | 1,579 | 12,475 | 4.499683 | 0.181127 | 0.031668 | 0.018578 | 0.025334 | 0.430683 | 0.389163 | 0.361295 | 0.355384 | 0.341872 | 0.330472 | 0 | 0.036818 | 0.294589 | 12,475 | 363 | 117 | 34.366391 | 0.770568 | 0.196954 | 0 | 0.457014 | 0 | 0 | 0.022529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.022624 | 0 | 0.289593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dfb60993df01b905cc7b936dd1d08f5223caffa | 21,391 | py | Python | bdscan/classComponentList.py | matthewb66/test_python_action | c6c8ff47982584a8bc124d343094ff12aedcea09 | [
"Apache-2.0"
] | null | null | null | bdscan/classComponentList.py | matthewb66/test_python_action | c6c8ff47982584a8bc124d343094ff12aedcea09 | [
"Apache-2.0"
] | null | null | null | bdscan/classComponentList.py | matthewb66/test_python_action | c6c8ff47982584a8bc124d343094ff12aedcea09 | [
"Apache-2.0"
] | null | null | null | import re
import os
# import shutil
import sys
import tempfile
import hashlib
# import sys
import json
from operator import itemgetter
from bdscan import classComponent, classNugetComponent, classNpmComponent, classMavenComponent, classPyPiComponent, \
classConanComponent, classCargoComponent, classHexComponent, classGoLangComponent, classCondaComponent, \
classDartComponent
from bdscan import utils, globals
class ComponentList:
md_directdeps_header = \
f"\n## SUMMARY Direct Dependencies with vulnerabilities:\n\n" \
f"| Direct Dependency | Changed | Num Direct Vulns | Max Direct Vuln Severity | Num Indirect Vulns | " \
f"Max Indirect Vuln Severity | Upgrade to |\n| --- | --- | --- | --- | --- | --- | --- |\n"
md_comp_lic_hdr = \
"\n## SUMMARY License violations:\n\n" \
"| Parent | Child Component | License | Policy Violated | Direct Dep Changed |\n" \
"| --- | --- | --- | --- | --- |\n"
def __init__(self):
self.compids = []
self.components = []
def add(self, compid):
if compid in self.compids:
return self.components[self.compids.index(compid)]
globals.printdebug(f"DEBUG: add(compid={compid})")
arr = re.split('[/:]', compid)
ns = arr[0]
if ns == 'npmjs':
component = classNpmComponent.NpmComponent(compid, arr[1], arr[2], ns)
elif ns == 'nuget':
component = classNugetComponent.NugetComponent(compid, arr[1], arr[2], ns)
elif ns == 'maven':
component = classMavenComponent.MavenComponent(compid, arr[1], arr[2], arr[3], ns)
elif ns == 'pypi':
component = classPyPiComponent.PyPiComponent(compid, arr[1], arr[2], ns)
elif ns == 'conan':
component = classConanComponent.ConanComponent(compid, arr[1], arr[2], ns)
elif ns == 'crates':
component = classCargoComponent.CargoComponent(compid, arr[1], arr[2], ns)
elif ns == 'hex':
component = classHexComponent.HexComponent(compid, arr[1], arr[2], ns)
elif ns == 'golang':
component = classGoLangComponent.GoLangComponent(compid, arr[1], arr[2], ns)
elif ns == 'anaconda':
component = classCondaComponent.CondaComponent(compid, arr[1], arr[2], ns)
elif ns == 'dart':
component = classDartComponent.DartComponent(compid, arr[1], arr[2], ns)
else:
component = classComponent.Component(compid, arr[1], arr[2], ns)
raise ValueError(f'Unsupported package manager {ns}')
self.components.append(component)
self.compids.append(component.compid)
return component
def set_data_in_comp(self, compid, fieldname, data):
if compid in self.compids:
index = self.compids.index(compid)
comp = self.components[index]
return comp.set_data(fieldname, data)
return False
def add_origins_to_comp(self, compid, ver, data):
if compid in self.compids:
index = self.compids.index(compid)
comp = self.components[index]
comp.set_origins(ver, data)
def get_component(self, compid):
if compid in self.compids:
return self.components[self.compids.index(compid)]
return None
def find_upgrade_versions(self, upgrade_major):
for comp in self.components:
comp.find_upgrade_versions(upgrade_major)
def validate_upgrades(self):
detect_jar = utils.get_detect_jar()
bd_output_path = 'upgrade-tests'
detect_connection_opts = [
f'--blackduck.url={globals.args.url}',
f'--blackduck.api.token={globals.args.token}',
"--detect.blackduck.scan.mode=RAPID",
# "--detect.detector.buildless=true",
# detect_connection_opts.append("--detect.maven.buildless.legacy.mode=false")
f"--detect.output.path={bd_output_path}",
"--detect.cleanup=false"
]
if globals.args.trustcert:
detect_connection_opts.append('--blackduck.trust.cert=true')
max_upgrade_count = 0
for comp in self.components:
if len(comp.potentialupgrades) > max_upgrade_count:
max_upgrade_count = len(comp.potentialupgrades)
upgrade_index = 0
while upgrade_index <= max_upgrade_count:
print(f'BD-Scan-Action: Validating upgrades cycle {upgrade_index+1} ...')
# dirname = "snps-upgrade-" + direct_name + "-" + direct_version
dirname = tempfile.TemporaryDirectory()
# os.mkdir(dirname)
origdir = os.getcwd()
os.chdir(dirname.name)
test_upgrade_list = []
test_origdeps_list = []
for comp in self.components:
# Do not process components in package managers not supported by direct upgrade guidance, but use
# regular upgrade guidance if available
if not comp.supports_direct_upgrades():
if globals.debug: print(f"DEBUG: Component {comp.name} via package manager {comp.pm} does not"
"support direct upgrades, skipping")
if comp.upgradeguidance and comp.upgradeguidance[0]:
comp.goodupgrade = comp.upgradeguidance[0]
elif comp.upgradeguidance and comp.upgradeguidance[1]:
comp.goodupgrade = comp.upgradeguidance[1]
continue
if comp.goodupgrade == '' and len(comp.potentialupgrades) > upgrade_index:
if comp.prepare_upgrade(upgrade_index):
test_upgrade_list.append([comp.org, comp.name, comp.potentialupgrades[upgrade_index]])
globals.printdebug(f"Will test upgrade {comp.name}/{comp.version} to "
f"{comp.potentialupgrades[upgrade_index]}")
test_origdeps_list.append(comp.compid)
if len(test_origdeps_list) == 0:
os.chdir(origdir)
dirname.cleanup()
upgrade_index += 1
continue
pm_list = []
for comp in self.components:
if comp.pm not in pm_list and comp.compid in test_origdeps_list:
pm_list.append(comp.pm)
comp.finalise_upgrade()
if len(pm_list) == 1 and pm_list[0] == 'maven' and \
"--detect.detector.buildless=true" not in detect_connection_opts:
detect_connection_opts.append("--detect.detector.buildless=true")
output = False
if globals.debug > 0:
output = True
pvurl, projname, vername, retval = utils.run_detect(detect_jar, detect_connection_opts, output)
if retval == 3:
# Policy violation returned
rapid_scan_data, dep_dict, direct_deps_vuln = utils.process_scan(bd_output_path, globals.bd)
# process_scan(scan_folder, bd, baseline_comp_cache, incremental, upgrade_indirect):
last_vulnerable_dirdeps = []
for vulndep in direct_deps_vuln.components:
#
# find comp in depver_list
for upgradedep, origdep in zip(test_upgrade_list, test_origdeps_list):
if upgradedep[1] == vulndep.name:
# vulnerable_upgrade_list.append([origdep, upgradedep[2]])
last_vulnerable_dirdeps.append(origdep)
break
elif retval != 0:
# Other Detect failure - no upgrades determined
last_vulnerable_dirdeps = []
for upgradedep, origdep in zip(test_upgrade_list, test_origdeps_list):
# vulnerable_upgrade_list.append([origdep, upgradedep[2]])
last_vulnerable_dirdeps.append(origdep)
else:
# Detect returned 0
# All tested upgrades not vulnerable
last_vulnerable_dirdeps = []
for lcomp in self.components:
if (lcomp.compid in test_origdeps_list and lcomp.compid not in last_vulnerable_dirdeps and
len(lcomp.potentialupgrades) >= upgrade_index and lcomp.goodupgrade == ''):
lcomp.set_data('goodupgrade', lcomp.potentialupgrades[upgrade_index])
os.chdir(origdir)
dirname.cleanup()
upgrade_index += 1
return
def check_in_baselineproj(self, baseline_data):
for basecomp in baseline_data:
for baseorig in basecomp['origins']:
if baseorig['externalNamespace'] != '':
basecompid = f"{baseorig['externalNamespace']}:{baseorig['externalId']}"
else:
basecompid = baseorig['externalId']
if basecompid in self.compids:
comp = self.get_component(basecompid)
comp.set_data('inbaseline', True)
break
# def check_projfiles(self):
# for comp in self.components:
# package_file, package_line = comp.get_package_file()
# if package_file == 'Unknown' or package_line <= 0:
# # component doesn't exist in pkgfile - skip
# continue
# package_file = utils.remove_cwd_from_filename(package_file)
# if package_file not in comp.projfiles:
# comp.set_data('projfiles', package_file)
# comp.set_data('projfilelines', package_line)
def get_children(self, dep_dict):
for comp in self.components:
children = []
for alldep in dep_dict.keys():
if comp.compid in dep_dict[alldep]['directparents']:
children.append(alldep)
comp.set_data('children', children)
def calc_vulns(self, rapid_scan_data):
for comp in self.components:
max_vuln_severity = 0
max_vuln_severity_children = 0
existing_vulns = []
existing_vulns_children = []
existing_lic_violations = []
existing_lic_violations_children = []
for rscanitem in rapid_scan_data['items']:
child = False
parent = False
if rscanitem['componentIdentifier'] == comp.compid:
parent = True
else:
for childid in comp.children:
if rscanitem['componentIdentifier'] == childid:
child = True
break
if not parent and not child:
continue
for vuln in rscanitem['policyViolationVulnerabilities']:
# print(f"vuln={vuln}")
parent_name = '-'
parent_ver = '-'
if parent:
if vuln['name'] in existing_vulns:
continue
if max_vuln_severity < vuln['overallScore']:
max_vuln_severity = vuln['overallScore']
elif child:
if vuln['name'] in existing_vulns_children:
continue
if max_vuln_severity_children < vuln['overallScore']:
max_vuln_severity_children = vuln['overallScore']
parent_name = comp.name
parent_ver = comp.version
child_ns, child_name, child_ver = comp.parse_compid(rscanitem['componentIdentifier'])
desc = vuln['description'].replace('\n', ' ')
if len(desc) > 200:
desc = desc[:196]
desc += ' ...'
name = vuln['name']
link = f"{globals.args.url}/api/vulnerabilities/{name}/overview"
vulnname = f'<a href="{link}" target="_blank">{name}</a>'
if comp.inbaseline:
changed = 'No'
else:
changed = 'Yes'
vuln_item = [
f"{parent_name}/{parent_ver}",
f"{child_name}/{child_ver}",
vulnname,
str(vuln['overallScore']),
vuln['violatingPolicies'][0]['policyName'],
desc,
changed
]
if parent and vuln['name'] not in existing_vulns:
comp.add_vuln(name, vuln_item)
comp.set_data('maxvulnscore', max_vuln_severity)
if child and vuln['name'] not in existing_vulns_children:
comp.add_child_vuln(name, vuln_item)
comp.set_data('maxchildvulnscore', max_vuln_severity_children)
# TODO: Revisit license violations
for lic in rscanitem['policyViolationLicenses']:
parent_name = '-'
parent_ver = '-'
if parent:
print(f"lic={lic}")
if lic['name'] in existing_lic_violations:
continue
#if max_vuln_severity < vuln['overallScore']:
# max_vuln_severity = vuln['overallScore']
elif child:
if lic['name'] in existing_lic_violations_children:
continue
#if max_vuln_severity_children < vuln['overallScore']:
# max_vuln_severity_children = vuln['overallScore']
parent_name = comp.name
parent_ver = comp.version
child_ns, child_name, child_ver = comp.parse_compid(rscanitem['componentIdentifier'])
name = lic['name']
# TODO: This link is not user friendly; follow to generate correct link
link = lic['_meta']['href']
#link = f"{globals.args.url}/api/vulnerabilities/{name}/overview"
licname = f'<a href="{link}" target="_blank">{name}</a>'
if comp.inbaseline:
changed = 'No'
else:
changed = 'Yes'
lic_item = [
f"{parent_name}/{parent_ver}",
f"{child_name}/{child_ver}",
licname,
lic['violatingPolicies'][0]['policyName'],
changed
]
if parent and lic['name'] not in existing_lic_violations:
comp.add_lic_violation(name, lic_item)
#comp.set_data('maxvulnscore', max_vuln_severity)
if child and lic['name'] not in existing_lic_violations_children:
comp.add_child_lic_violation(name, lic_item)
#comp.set_data('maxchildvulnscore', max_vuln_severity_children)
# Sort the tables
# vuln_list = sorted(vuln_list, key=itemgetter(3), reverse=True)
# vuln_list_children = sorted(vuln_list_children, key=itemgetter(3), reverse=True)
return
def write_sarif(self, sarif_file):
if os.path.exists(sarif_file):
os.remove(sarif_file)
if os.path.exists(sarif_file):
print(f'BD-Scan-Action: ERROR: Unable to write SARIF file {sarif_file}')
return False
sarif_result = []
sarif_tool_rule = []
for comp in self.components:
# md_comp_vulns_table = comp.md_table()
projfile = ''
projfileline = 1
if len(comp.projfiles) > 0:
projfile = comp.projfiles[0]
if len(comp.projfilelines) > 0:
projfileline = comp.projfilelines[0]
sarif_result.append(
{
'ruleId': comp.name,
'message': {
'text': comp.shorttext()
},
'locations': [
{
'physicalLocation': {
'artifactLocation': {
'uri': projfile,
},
'region': {
'startLine': projfileline,
}
}
}
],
'partialFingerprints': {
'primaryLocationLineHash': hashlib.sha224(b"{compid}").hexdigest(),
}
}
)
if comp.maxchildvulnscore >= 7 or comp.maxvulnscore >= 7:
level = "error"
elif comp.maxchildvulnscore >= 4 or comp.maxvulnscore >= 4:
level = "warning"
else:
level = "note"
if comp.goodupgrade != '':
uhelp = f"{comp.longtext_md()}\n\nRecommended to upgrade to version {comp.goodupgrade}.\n\n"
else:
uhelp = f"{comp.longtext_md()}\n\nNo upgrade available at this time.\n\n"
sarif_tool_rule.append(
{
'id': comp.name,
'shortDescription': {
'text': comp.shorttext(),
},
'fullDescription': {
'text': comp.longtext(),
},
'help': {
'text': '',
'markdown': uhelp,
},
'defaultConfiguration': {
'level': level,
},
'properties': {
'tags': ["security"],
'security-severity': str(comp.maxvulnscore)
}
}
)
code_security_scan_report = {
'$schema': "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
'version': "2.1.0",
'runs': [
{
'tool': {
'driver': {
'name': 'Synopsys Black Duck',
'organization': 'Synopsys',
'version': globals.scan_utility_version,
'rules': sarif_tool_rule,
}
},
'results': sarif_result,
}
],
}
try:
with open(sarif_file, "w") as fp:
json.dump(code_security_scan_report, fp, indent=4)
except Exception as e:
print(f"BD-Scan-Action: ERROR: Unable to write to SARIF output file '{sarif_file} - '" + str(e))
return False
return True
def get_comments(self, incremental):
md_main_table = []
md_comp_data_string = ''
md_lic_table_string = ''
for comp in self.components:
if incremental and comp.inbaseline:
continue
if comp.get_num_vulns() > 0:
md_main_table.append(comp.md_summary_table_row())
md_comp_data_string += f"\n### Direct Dependency: {comp.name}/{comp.version}" + comp.md_table()
md_lic_table_string += comp.md_lic_table()
# Sort main table here
md_main_table = sorted(md_main_table, key=itemgetter(4), reverse=True)
md_main_table = sorted(md_main_table, key=itemgetter(6), reverse=True)
sep = ' | '
md_main_table_string = ''
for row in md_main_table:
md_main_table_string += '| ' + sep.join(row) + ' |\n'
md_comments = ''
if len(md_main_table) > 0:
md_comments += self.md_directdeps_header + md_main_table_string
if (len(md_lic_table_string) > 1):
md_comments += self.md_comp_lic_hdr + md_lic_table_string
if len(md_main_table) > 0:
md_comments += '\n\nVulnerable Direct Dependencies listed below:\n\n' + md_comp_data_string
return md_comments
def print_upgrade_summary(self):
print('\n------------------------------------------------------------------------------------')
print('SUMMARY UPGRADE GUIDANCE:')
for comp in self.components:
if comp.goodupgrade != '':
upg = f'Upgrade to {comp.goodupgrade}'
else:
upg = 'No Upgrade Available'
print(f'- {comp.name}/{comp.version}: {upg}')
print('------------------------------------------------------------------------------------\n')
def supports_direct_upgrades(self):
return False
| 42.867735 | 120 | 0.511757 | 2,018 | 21,391 | 5.238355 | 0.18781 | 0.022514 | 0.019866 | 0.013528 | 0.314824 | 0.262984 | 0.236118 | 0.217293 | 0.171602 | 0.137357 | 0 | 0.00616 | 0.385255 | 21,391 | 498 | 121 | 42.953815 | 0.797719 | 0.088402 | 0 | 0.233503 | 0 | 0.005076 | 0.149157 | 0.049959 | 0 | 0 | 0 | 0.002008 | 0 | 1 | 0.035533 | false | 0 | 0.022843 | 0.002538 | 0.098985 | 0.032995 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dfc29d7013519f59daa6312e09bc8cdb96754d9 | 1,334 | py | Python | apps/posts/models.py | DiceNameIsMy/starnavi-task | e2e8d20889d9b4d5cf02e332d88b7b9ec5f4aee4 | [
"MIT"
] | 1 | 2021-10-04T03:08:25.000Z | 2021-10-04T03:08:25.000Z | apps/posts/models.py | DiceNameIsMy/starnavi-task | e2e8d20889d9b4d5cf02e332d88b7b9ec5f4aee4 | [
"MIT"
] | null | null | null | apps/posts/models.py | DiceNameIsMy/starnavi-task | e2e8d20889d9b4d5cf02e332d88b7b9ec5f4aee4 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth import get_user_model
from django.utils import timezone
# Saving media locally is not a good practice
# for a big social network actually. GoogleCloud or AWS
# migth be a better alternative
def get_user_image_path(instance, filename):
return f'users/{instance.author}/posts/{instance.id}/{filename}'
class Post(models.Model):
author = models.ForeignKey(
to=get_user_model(),
null=True,
on_delete=models.SET_NULL,
related_name='posts'
)
image = models.ImageField(
upload_to=get_user_image_path,
blank=True,
null=True,
)
text = models.CharField(
max_length=8192,
blank=True
)
likes = models.ManyToManyField(
to=get_user_model(),
blank=True,
through='Like'
)
created_at = models.DateTimeField(default=timezone.now)
updated_at = models.DateTimeField(auto_now=True)
def count_likes(self):
return len(self.likes.all())
class Like(models.Model):
author = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE
)
post = models.ForeignKey(
Post,
on_delete=models.CASCADE
)
date = models.DateField(default=timezone.now)
time = models.TimeField(default=timezone.now)
| 25.653846 | 68 | 0.664918 | 166 | 1,334 | 5.192771 | 0.493976 | 0.048724 | 0.055684 | 0.037123 | 0.076566 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003941 | 0.23913 | 1,334 | 51 | 69 | 26.156863 | 0.84532 | 0.096702 | 0 | 0.243902 | 0 | 0 | 0.052456 | 0.044963 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.073171 | 0.04878 | 0.463415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dfd2b75b22705c1bffc7af8fe1e8acd1e0693c7 | 350 | py | Python | ABC104/ABC104b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC104/ABC104b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC104/ABC104b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | # ABC104b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
s = input()[:-1]
#print(s.count('C', 2, -1))
if (s[0] != 'A' or s.count('C', 2, -1) != 1):
print('WA')
# print('hi')
exit()
cPos = s.find('C', 2, -1)
if (s[1:cPos].islower() and s[cPos + 1:].islower()):
# print('hi')
print('AC')
exit()
print('WA')
| 19.444444 | 52 | 0.525714 | 59 | 350 | 3.118644 | 0.457627 | 0.032609 | 0.048913 | 0.086957 | 0.146739 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060498 | 0.197143 | 350 | 17 | 53 | 20.588235 | 0.594306 | 0.162857 | 0 | 0.333333 | 0 | 0 | 0.03125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9300b3516d09a5957e2dfc67d70aa1ba14f76b6f | 2,761 | py | Python | HMM_Construction_Scripts/s01-make_tree_dataset.py | dantaslab/resfams_update | 982091818a299d316811fe98c7656762be7284fb | [
"MIT"
] | null | null | null | HMM_Construction_Scripts/s01-make_tree_dataset.py | dantaslab/resfams_update | 982091818a299d316811fe98c7656762be7284fb | [
"MIT"
] | null | null | null | HMM_Construction_Scripts/s01-make_tree_dataset.py | dantaslab/resfams_update | 982091818a299d316811fe98c7656762be7284fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
File Name : s01-make_tree_dataset.py
Author : Max Bernstein
Created On : 2019-07-12
Last Modified : 2019-12-17
Description : A program to prep and rename sequences in order to make tree
anlaysis easier
Dependencies : py-biopython
Usage : s01-make_tree_dataset.py --infile blactamA.fasta
--blast blactamA_card_blast.txt --family blactamA
--out_path path/to/output/directory/
CHANGE LOG :
TODO :
"""
import sys
import os
import argparse
import csv
import re
from Bio import SeqIO
def main(argv):
args = parse_arguments(argv)
infile = args.infile
blast_file = args.blast
#retrieve blast data
seqInfo = {}
for record in SeqIO.parse(infile,'fasta'):
array=[]
for hit in blast_file:
if record.id == hit[0] and hit[1] not in array:
array.append(hit[1])
seqInfo[record.id] = array
#renmane sequences
seq_file = open("{}/{}_tree_dataset.faa".format(args.out_path,args.family), 'w+')
mapping_file = open("{}/{}_tree_mappingFile.txt".format(args.out_path,args.family), 'w+')
counter = 1
for seq,data in seqInfo.items():
outHeaders = []
seqHeader = seq.split("|")
outSeq = "RF-" + str(counter) + "|" + record.id
bHitCount = 1
for bData in data:
outSeq = "RF-" + str(counter) + "-" + str(bHitCount) + "|" + seqHeader[-1] + "|" + bdata.split("|")[-1]
outHeaders.append(outSeq)
bHitCount+=1
if len(outHeaders) < 1:
outHeaders.append(outSeq)
for header in outHeaders:
print(header + "\t" + record.id)
mapping_file.write(header + "\t" + record.id + "\n")
seq_file.write(">" + header + "\n")
seq_file.write(str(record.seq) + "\n")
counter+=1
def parse_arguments(argv):
parser = argparse.ArgumentParser(
prog = 'make_tree_dataset.py',
description = 'A program to prep and rename sequences in order to make tree anlaysis easier')
parser.add_argument(
'-i', '--infile',
help = 'path to input sequences to make trees',
required = True
)
parser.add_argument(
'-b', '--blast',
help = 'path to input blast file',
required = True
)
parser.add_argument(
'-f', '--family',
help = 'assumed resistance family of input sequences',
required = True
)
parser.add_argument(
'-o', '-outpath',
dest = 'out_path',
help = 'Enter path to output directory'
)
return parser.parse_args()
if __name__=="__main__":
main(sys.argv[1:])
| 25.803738 | 115 | 0.572619 | 331 | 2,761 | 4.661631 | 0.374622 | 0.025924 | 0.04407 | 0.033053 | 0.214517 | 0.13221 | 0.13221 | 0.095917 | 0.095917 | 0.095917 | 0 | 0.016054 | 0.300616 | 2,761 | 106 | 116 | 26.04717 | 0.783014 | 0.202825 | 0 | 0.140625 | 0 | 0 | 0.167123 | 0.021918 | 0 | 0 | 0 | 0.009434 | 0 | 1 | 0.03125 | false | 0 | 0.09375 | 0 | 0.140625 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9301758f56b6b3c8022d921d1f16d3e4bc73189b | 39,391 | py | Python | modules/morsecraft.py | Crazy-Ginger/MOSAR | 74f1a7ca1f17a90ede61f37d223a2ae4de6a1088 | [
"MIT"
] | null | null | null | modules/morsecraft.py | Crazy-Ginger/MOSAR | 74f1a7ca1f17a90ede61f37d223a2ae4de6a1088 | [
"MIT"
] | null | null | null | modules/morsecraft.py | Crazy-Ginger/MOSAR | 74f1a7ca1f17a90ede61f37d223a2ae4de6a1088 | [
"MIT"
] | 2 | 2020-09-18T00:02:16.000Z | 2021-02-22T23:42:30.000Z | #!/usr/bin/env python3.5
"""Spacecraft made up of Modules used in conjunction with morse for simulation"""
import math
import operator as op
import numpy as np
import jsonpickle as pickler
from .craftmodule import Module as Module
from .scripts import modControl as modCon
_authors_ = ["Mark A Post", "Rebecca Wardle"]
_copyright_ = "Copyright 2020 Rebecca Wardle"
_license_ = "MIT License"
_credit_ = [
"Mark A Post",
"Rebecca Wardle",
"Robert Fitch",
"Daniela Rus",
"Zachary Butler",
]
_version_ = "-0.1"
class Spacecraft:
"""
A spacecraft class it stores a dictionary of modules and manages their connections and rearrangement
terms:
modules: cubesats (10cm x 10cm x 10cm) with a port on each face
base_ports: these are the ports of the modules assuming the module has no rotation applied to it
they are laid out so that 0->2, 1->3, 4->5
x axis passes through 0 -> 2
y axis passes through 3 -> 1
z axis passes through 5 -> 4
+---+
| 4 |
+---+---+---+---+
| 0 | 3 | 2 | 1 |
+---+---+---+---+
| 5 |
+---+
"""
def __get_coord_path(self, mod_path, final_port, clearance=None):
"""
pass a list of modules and the port the module will be connected to, returns a list of coordinates around the path
:param mod_path: path of modules from the root to the module being relocated
:param final_port: the port the module will be connected to
:param clearance: (optional) how far to keep the module from the structure
:returns: numpy array of floating point numbers that should be external to the modules
TODO
----
find why it sometimes outputs duplicate coords for 3 or so lines as the first part
refactor to remove some for loops
take into account the orientation of the modules (will cause problems with different sized mods)
"""
if clearance is None:
clearance = self.precision
if type(mod_path) != list:
mod_path = list(mod_path)
# initial variables and conditions
mod_path = mod_path[::-1]
moving_mod = mod_path[0]
path = np.array([np.round(self.modules[moving_mod].pos, 2)])
final_pos = self._get_new_position(mod_path[-1], moving_mod, final_port)
moving_mod = self.modules[mod_path[0]]
# get the direction of clearance to place the module clear of the structure
# also ensures that the direction of movement is counter after 2nd connection
# for j in range(2):
# diff = np.round(list(map(op.sub, list(self.modules[mod_path[j]].pos), list(self.modules[mod_path[j+1]].pos))), 3)
# for index in range(len(diff)):
# if abs(diff[index]) >= 0.1:
# axis_of_movement = index
# if j != 0:
# break
# offset = np.round(clearance * np.sign(self.modules[mod_path[0]].pos[index] - self.modules[mod_path[1]].pos[index]), 4)
# finds the vector the first connection moves in so the vector of clearance can be found
diff = np.round(list(map(op.sub, list(self.modules[mod_path[0]].pos), list(self.modules[mod_path[1]].pos),)), 3)
for index in range(len(diff)):
if abs(diff[index]) >= 0.1:
axis_of_movement = index
offset = np.round(clearance * np.sign(self.modules[mod_path[0]].pos[index] - self.modules[mod_path[1]].pos[index]), 4)
# if the path length is long enough ignores the first connection to find the next module
if len(mod_path) > 2:
diff = np.round(list(map(op.sub, list(self.modules[mod_path[1]].pos), list(self.modules[mod_path[2]].pos))), 3)
for index in range(len(diff)):
if abs(diff[index]) >= 0.1:
axis_of_movement = index
# gets the corners in the path
# still using 0.1 for module size needs to be altered to take into account of module dimension
# also doesn't account for module rotation
for index in range(2, len(mod_path)):
diff = np.round(list(map(op.sub, self.modules[mod_path[index - 1]].pos, self.modules[mod_path[index]].pos)), 3)
if abs(diff[axis_of_movement]) > 0.1:
path = np.concatenate([path, np.array([list(np.round(self.modules[mod_path[index - 1]].pos, 2))])])
for index in range(len(diff)):
if abs(diff[index]) > clearance:
axis_of_movement = index
break
# if a chain then add motions up and over the chain (not sure if necessary, chains don't seem to require it)
if len(path) == 1:
# creates 2 new modules to move the module around the chain
over = np.array(np.array([final_pos]))
path = np.concatenate((path, over))
else:
# add the final destination to the path
path = np.concatenate((path, np.array([final_pos])))
# make the first movement to place the module clear of the structure
path[0][axis_of_movement] += offset
mod_coords = np.array([self.modules[x].pos for x in mod_path])
# now with corners in mod_path, extrapolate external coordinates (seems to add a duplicate of the first movement
for i in range(1, len(path)):
# take the previous offset to ensure that clearance is maintained in that axis
dims = [0, 1, 2]
path[i][axis_of_movement] = path[i - 1][axis_of_movement]
dims.remove(axis_of_movement)
for dim in dims:
# if no change from previous axis skip it
if np.round(path[i][dim], 2) == np.round(path[i - 1][dim], 2):
break
axis_of_movement = dim
# checks if the module is the last and if so just add the offset
if i == len(path) - 1:
path[i][dim] += np.round(clearance * np.sign(path[i][dim] - path[i - 1][dim]), 4)
# adds the clearance
else:
try:
cur_mod = mod_path[np.where(mod_coords.all() == path[i])[0][0]]
except IndexError:
print("%i coordinates don't appear to be related to another module in the path" % (path[i]))
offset = np.round((clearance + self.modules[cur_mod].dims[dim] / 2 + moving_mod.dims[dim] / 2) * np.sign(path[i][dim] - path[i - 1][dim]), 4)
path[i][dim] += offset
path = np.concatenate((path, np.array([final_pos])))
# finally move the module around the last corner and onto the docking position
for dim in range(len(path[-1])):
if np.round(abs(path[-2][dim] - path[-1][dim]), 4) == clearance:
path[-1][dim] = path[-2][dim]
continue
else:
path[-1][dim] = final_pos[dim]
path = np.concatenate((path, np.array([final_pos])))
final_mod_path = np.array(path)
return np.round(final_mod_path, 2)
def __get_isolated_mod(self, root):
"""
gets unconnected module from root and path from root to module according to BFS
:param root: the root module in the rearrangement
:returns: module key, list of module keys
"""
to_visit = [[root]]
visited = set()
while to_visit:
path = to_visit.pop(0)
current_node = str(path[-1])
to_return = True
# TODO consider if this will skip anything
if current_node in visited:
continue
# checks if current_node is only connected by 1 link
if sum(x is None for x in self.modules[current_node].cons) == 5 and current_node != root:
return current_node, path
# add the children nodes in order
elif current_node not in visited:
for child in self.modules[current_node].cons:
if child is not None and child not in visited:
new_path = list(path)
new_path.append(child)
to_visit.append(new_path)
to_return = False
visited.add(current_node)
# print(current_node, ": ", path, "\nvisited: ", visited, "\n")
if to_return is True:
return current_node, visited
def __init__(self, tag_length=3, precision=0.01, is_goal=False):
"""
constructor
:param tag_length: int, length of the tags at then end of the module names that descibe their speciality
:param precision: float, general precision of the movements to be made
:is_goal: bool, if the craft created is actually a goal (means that none of the movements actually exist)
"""
self._root = None
self.modules = {}
self.goal = None
self.tag_len = tag_length
self.precision = precision
self.is_goal = is_goal
def add_mod(self, new_id, position, size=(0.1, 0.1, 0.1), rotation=(0, 0, 0)):
"""
Add an unconnected module to the craft dictionary
:param new_id: string, id of the new module
:param position: tuple(floats), x, y, z coordinates of the module
:param size: tuple(floats), x, y, z dimensions of the module
:param rotation: tuple, rotation of the module in x, y, z (cartesian)
"""
position = np.round(position, 4)
new_mod = Module(new_id, size, position)
new_mod.type = new_id[-self.tag_len:]
if not self._root:
self._root = new_mod
x = math.radians(rotation[0]) / 2
y = math.radians(rotation[1]) / 2
z = math.radians(rotation[2]) / 2
cos = math.cos
sin = math.sin
new_mod.rotation = [
cos(x) * cos(y) * cos(z) + sin(x) * sin(y) * sin(z),
sin(x) * cos(y) * cos(z) - cos(x) * sin(y) * sin(z),
cos(x) * sin(y) * cos(z) + sin(x) * cos(y) * sin(z),
cos(x) * cos(y) * sin(z) - sin(x) * sin(y) * cos(z),
]
self.modules[str(new_id)] = new_mod
def set_rotation(self, mod_id, rotation):
""""
Set a module's rotation, cannot be done if the module is already connected to another
:param mod_id: id of the module to rotate
:param rotation: rotation of the module in x, y, z format
:raises KeyError: raises an exception if already connected to structure
"""
if sum(x is None for x in self.modules[current_node].cons) != 6:
raise KeyError("%s is connected to another module" % (mod_id))
x = math.radians(rotation[0]) / 2
y = math.radians(rotation[1]) / 2
z = math.radians(rotation[2]) / 2
cos = math.cos
sin = math.sin
self.modules[mod_id].rotation = [
cos(x) * cos(y) * cos(z) + sin(x) * sin(y) * sin(z),
sin(x) * cos(y) * cos(z) - cos(x) * sin(y) * sin(z),
cos(x) * sin(y) * cos(z) + sin(x) * cos(y) * sin(z),
cos(x) * cos(y) * sin(z) - sin(x) * sin(y) * cos(z),
]
# if rotation is added to modController uncomment to implement the effects on the simulator
# modCon.set_rotation(rotation)
def create_goal(self, add_mods=True, mod_root=False):
"""
creates a sub-object that can then be manipulated to set the goal state of the spacecraft
:param add_mods: (optional) to add all the modules in the current craft
:param mod_root: (optional) select a module to maintain position, if not set first module added is used
"""
self.goal = Spacecraft(self.tag_len, self.precision, is_goal=True)
# adds the modules to the goal, preserving names, positions and dimensions
if add_mods:
for key in self.modules.keys():
self.goal.add_mod(str(key), self.modules[key].pos, self.modules[key].dims)
# sets the root of the goal
if mod_root:
self.goal._root = mod_root
else:
self.goal._root, dump_path = self.__get_isolated_mod(next(iter(self.modules)))
def _get_new_position(self, fixed_mod, moving_mod, port_id):
"""
finds the new positional coordinates of the module being moved
:param fixed_mod: module that is not being moved
:param moving_mod: module that is being moved to connect to the fixed module
:param port_id: base port on the fixed module the moving module will be attached via
:returns: tuple of x, y, z coords of new position for moving module
"""
fixed_mod = self.modules[fixed_mod]
moving_mod = self.modules[moving_mod]
# detect modules with more than one port per face (change offset)
# first get x, y, z diffs to be added
x_diff = (fixed_mod.dims[0] / 2) + (moving_mod.dims[0] / 2)
y_diff = (fixed_mod.dims[1] / 2) + (moving_mod.dims[1] / 2)
z_diff = (fixed_mod.dims[2] / 2) + (moving_mod.dims[2] / 2)
# np.array allows port_id to index the correct offset
ports = [
[-x_diff, 0, 0],
[0, y_diff, 0],
[x_diff, 0, 0],
[0, -y_diff, 0],
[0, 0, z_diff],
[0, 0, -z_diff],
]
# convert quaternions to rotation matrix which can be applied upon the ports
q = fixed_mod.rotation
rotation = np.array(
[
[
1 - 2 * (q[2] ** 2 + q[3] ** 2),
2 * (q[1] * q[2] - q[3] * q[0]),
2 * (q[1] * q[3] + q[2] * q[0]),
0,
],
[
2 * (q[1] * q[2] + q[3] * q[0]),
1 - 2 * (q[1] ** 2 + q[3] ** 2),
2 * (q[2] * q[3] - q[1] * q[0]),
0,
],
[
2 * (q[1] * q[3] - q[2] * q[0]),
2 * (q[2] * q[3] + q[1] * q[0]),
1 - 2 * (q[1] ** 2 + q[2] ** 2),
0,
],
[0, 0, 0, 1],
]
)
# select the port from port_id
diff = np.array(ports[port_id] + [0])
# apply rotation matrix to get new direction of offset then add to fixed mod position
return tuple(map(op.add, fixed_mod.pos, tuple(rotation.dot(diff))[:3]))
def _check_adjacency(self, mod_a, mod_b):
"""
finds if 2 modules are adjacent to each other based on coordinates and dimensions
:param mod_a: primary module key
:param mod_b: secondary module key
:returns: base port id if adjacent, false if not adjacent
"""
mod_a = self.modules[mod_a]
mod_b = self.modules[mod_b]
for i in range(len(mod_a.pos) * 2):
# ensures that both directions of each dim are tested
if i % 2 == 0:
mul = 1
else:
mul = -1
# sets only one dim to the offset the modules would be
difference = [0] * len(mod_a.pos)
difference[i % len(mod_a.pos)] = mul * ((mod_a.dims[i % 3] / 2) + (mod_b.dims[i % 3] / 2))
mod_position = tuple(map(op.add, tuple(mod_a.pos), tuple(difference)))
# refactor into single line without for loop (using sum)
to_return = True
for j in range(len(mod_position)):
if abs(mod_position[j] - mod_b.pos[j]) >= self.precision:
to_return = False
break
if to_return:
port_ids = [2, 3, 4, 0, 1, 5]
return port_ids[i]
return None
def _check_chain(self, mod):
"""
calculate max length of chain around given module
:param mod: module to check as origin of the chain
:returns: the base port on which the chain starts, the number of modules contain in the chain
"""
max_length = 0
for port in self.modules[mod].cons:
if port is not None:
max_length += 1
# diff = list(map(op.sub, self.modules[mod].pos, self.modules[port].pos))
# cont = True
def _get_port(self, mod, base_port):
"""
returns the actual port to connect modules with when passed mod and the port to connect without rotation
:param mod: module which has the rotation checked
:param base_port: the port without rotation (gets axis/direction in which port points)
:returns: port that now points in the direction of the base ports
:raises: ValueError
"""
base_direcs = np.array(
[
[-1, 0, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 1, 0],
[0, 0, -1, 0],
]
)
direction = np.array(base_direcs[base_port])
q = self.modules[mod].rotation
rotation = np.array(
[
[
1 - 2 * (q[2] ** 2 + q[3] ** 2),
2 * (q[1] * q[2] - q[3] * q[0]),
2 * (q[1] * q[3] + q[2] * q[0]),
0,
],
[
2 * (q[1] * q[2] + q[3] * q[0]),
1 - 2 * (q[1] ** 2 + q[3] ** 2),
2 * (q[2] * q[3] - q[1] * q[0]),
0,
],
[
2 * (q[1] * q[3] - q[2] * q[0]),
2 * (q[2] * q[3] + q[1] * q[0]),
1 - 2 * (q[1] ** 2 + q[2] ** 2),
0,
],
[0, 0, 0, 1],
]
)
rotated = np.round(base_direcs.dot(rotation))
# now check which port has the same vector as the base port
for index in range(len(base_direcs)):
if np.array_equal(rotated[index], direction):
return index
raise ValueError("Apperntly no ports point in that direction someone is wrong (blame the writer)")
def connect(self, mod_a, mod_a_port, mod_b, mod_b_port):
"""
Connects the 2 passed modules with the specified ports also ensures that the modules are
:param mod_a: first module key
:param mod_a_port: port id to connect second module to
:param mod_b: second module key
:param mod_b_port: port id to connect first module to
:raises: ValueError
:raises: IndexError
"""
# checks the modules are not already connceted
if self.modules[mod_a].cons[mod_a_port] == mod_b:
if self.modules[mod_b].cons[mod_b_port] == mod_a:
return
# checks that the ports are not already in use
try:
if self.modules[mod_a].cons[mod_a_port] is not None:
raise ValueError("The port %d on %s is already connected to %s" % (mod_a_port, mod_a, self.modules[mod_a].cons[mod_a_port]))
except IndexError:
raise IndexError("Port %d does not exist in this dimension" % (mod_a_port))
try:
if self.modules[mod_b].cons[mod_b_port] is not None:
raise ValueError("The port %d on %s is already in use" % (mod_b_port, mod_b))
except IndexError:
raise IndexError("Port %d does not exist in this dimension" % (mod_b_port))
# give postitions to connected module
if self.is_goal:
if mod_a == self._root:
self.modules[mod_b].pos = self._get_new_position(mod_a, mod_b, mod_a_port)
elif mod_b == self._root:
self.modules[mod_a].pos = self._get_new_position(mod_b, mod_a, mod_b_port)
elif (self.modules[mod_a].pos is not None) and (self.modules[mod_b].pos is not None):
# checks modules are next to each other
if self._check_adjacency(mod_a, mod_b) is None:
raise ValueError("Modules %s, %s are not adjecent" % (mod_a, mod_b))
elif self.modules[mod_a].pos is not None:
self.modules[mod_b].pos = self._get_new_position(mod_a, mod_b, mod_a_port)
elif self.modules[mod_b].pos is not None:
self.modules[mod_a].pos = self._get_new_position(mod_b, mod_a, mod_b_port)
self.modules[mod_a].cons[mod_a_port] = mod_b
self.modules[mod_b].cons[mod_b_port] = mod_a
# move the cubes to the correct positions
# won't move modules already in place
# checks that modules should actually be moved
if not self.is_goal:
# move the modules into position and ensure they are there
self._move_mod(mod_a, self.modules[mod_a].pos)
self._move_mod(mod_b, self.modules[mod_b].pos)
# links the modules together
modCon.link(mod_a, mod_b)
def connect_all(self, mod_id):
"""
give a mod id, checks all adjacent positions and connects to any modules found there
:param mod_id: module key to connect modules to
"""
if self.modules[mod_id].pos is None:
raise IndexError("%s does not have a position so it not yet connected" % (mod_id))
for mod in self.modules:
if mod != mod_id:
adja = self._check_adjacency(mod_id, mod)
if adja is not None:
# use the returned port to get the actual ports to connect with and then connect the mods
mod_a_port = self._get_port(mod, adja)
# skips if the module is already connected at that port
if self.modules[mod_id].cons[mod_a_port] == mod:
continue
base_cons = [2, 3, 0, 1, 5, 4]
mod_b_port = self._get_port(mod, base_cons[adja])
self.connect(mod_id, mod_a_port, mod, mod_b_port)
def disconnect(self, mod_id, port_id):
"""
Disconnects 2 modules connected together through a specific port on one and unlinks them both
:param mod_id: primary module key which disconnected through
:param port_id: port id to disconnect
:raises: ValueError
"""
if self.modules[mod_id].cons[port_id] is None:
raise ValueError("Port %d on module: %s is not connected" % (port_id, mod_id))
# unlinks modules
# TODO investigate issues with unlinking modules (some modules prefer to unlink one way)
modCon.unlink(mod_id, self.modules[mod_id].cons[port_id])
modCon.unlink(self.modules[mod_id].cons[port_id], mod_id)
# disconnects port on other module
flag = False
for i in range(len(self.modules[self.modules[mod_id].cons[port_id]].cons)):
if self.modules[self.modules[mod_id].cons[port_id]].cons[i] == mod_id:
self.modules[self.modules[mod_id].cons[port_id]].cons[i] = None
flag = True
if not flag:
print("Error Disconnect_all:")
print(mod_id, ": ", self.modules[mod_id].cons)
print(self.modules[mod_id].cons[port_id], ": ", self.modules[self.modules[mod_id].cons[port_id]].cons)
print()
# raise RuntimeError("%s was not connected to %s in both directions" % (mod_id, self.modules[mod_id].cons[port_id]))
self.modules[mod_id].cons[port_id] = None
def disconnect_all(self, mod_id):
"""
Loops through all connections of a given module and if connected runs disconnect
"""
# add a way to avoid disconnect from arm/tug
for port_id in range(len(self.modules[mod_id].cons)):
if self.modules[mod_id].cons[port_id] is not None:
self.disconnect(mod_id, port_id)
def _get_goal_order(self):
"""
Uses BFS to find the order of the goal structure
:returns: linear array of modules
"""
root = self.goal._root
to_visit = [root]
visited = []
while to_visit:
current_node = to_visit[0]
visited.append(current_node)
for child in self.goal.modules[current_node].cons:
# broken?
if child is not None and child not in to_visit and child not in visited:
to_visit.append(child)
to_visit.pop(0)
return visited
def _get_mod_path(self, root, goal):
"""
Dijkstra implementation finds path from root and goal as module ids
:param root: root module key
:param goal: goal module key
:returns: list of module keys that form path from root to goal
"""
to_visit = {root}
est_cost = {root: 0}
final_cost = {}
visited = set()
back_track = {}
while to_visit:
current_node = None
current_score = None
for mod in to_visit:
if current_node is None or est_cost[mod] < current_score:
current_node = mod
current_score = est_cost[mod]
# checks if reached goal
if current_node == goal:
path = [current_node]
while current_node in back_track:
current_node = back_track[current_node]
path.append(current_node)
# if goal[-self._mod_type:] == path[0][-self._mod_type:]:
path.reverse()
return path
to_visit.remove(current_node)
visited.add(current_node)
for neighbour in self.goal.modules[current_node].cons:
if neighbour in visited:
continue
tmp_cost = est_cost[current_node] + 1
if neighbour not in to_visit:
to_visit.add(neighbour)
elif tmp_cost >= final_cost[neighbour]:
continue
back_track[neighbour] = current_node
final_cost[neighbour] = tmp_cost
est_cost[neighbour] = final_cost[neighbour] + 1
def import_from_json(self, file_name, goal=True):
"""
Decode a json file into a craft or craft goal
:param file_name: file name
:param goal: (optional) boolean
:returns: (optional) new craft
"""
with open(file_name, "r") as file:
data = file.read().replace("\n", "")
if goal is False:
new_craft = pickler.decode(data)
try:
new_craft.tag_len
except AttributeError:
new_craft.tag_len = 3
return new_craft
else:
self.goal = pickler.decode(data)
def export_to_json(self, file_name):
"""
Exports current spacecraft as a json file
:param file_name: file name to be outputted
"""
write_file = open(file_name + ".json", "w")
write_file.write(pickler.encode(self))
def _move_mod(self, mod_id, dest, precision=None):
"""
Moves the module to dest (within precision)
:param mod_id: module key
:param dest: coordinates of the destination (x, y, z)
:param precision: (optional) integer/float offset
"""
if precision is None:
precision = self.precision
cont = False
loop_checker = 0
prev_x = 0
prev_y = 0
prev_z = 0
while cont is False:
modCon.setDest(mod_id=mod_id, x=dest[0], y=dest[1], z=dest[2])
pose = modCon.getPose(mod_id)
if round(prev_x - pose["x"], 3) == 0 and round(prev_y - pose["y"], 3) == 0 and round(prev_z - pose["z"], 3) == 0:
loop_checker += 1
if loop_checker > 200:
print("Failed to move: ", mod_id, " to: ", dest)
return
else:
prev_x, prev_y, prev_z = pose["x"], pose["y"], pose["z"]
if dest[0] - precision <= pose["x"] <= dest[0] + precision:
if dest[1] - precision <= pose["y"] <= dest[1] + precision:
if dest[2] - precision <= pose["z"] <= dest[2] + precision:
self.modules[mod_id].pos = tuple(dest)
cont = True
def melt(self, root=None):
"""
Places all modules in a chain
:param root: the module to rearrange all the other cubes around
:returns: list of module keys in new order
"""
# get most extreme module or check passed module
if root is None:
root, dump_path = self.__get_isolated_mod(next(iter(self.modules)))
else:
if root not in self.modules:
raise ValueError("%s is not a valid module" % (root))
good_root = False
for port in self.modules[root].cons:
if port is None:
good_root = True
if good_root is False:
raise ValueError("%s is not a valid root" % (root))
# connect all modules together to ensure optimum paths
for node in self.modules:
self.connect_all(node)
print("root: ", root, "\t", self.modules[root].cons)
# find coords of free space next to root
port_id = None
for i in range(len(self.modules[root].cons)):
if self.modules[root].cons[i] is None:
port_id = i
break
if port_id is None:
raise TypeError("port_id has not been set, check root validity")
base_cons = [2, 3, 0, 1, 5, 4]
# moves all modules into chain
moved = []
to_move = set(self.modules.keys())
while len(to_move) != 0:
# gets an isolated mod and the path of modules that connect it to the root
current_node, current_path = self.__get_isolated_mod(root)
print("Melting: ", current_node)
# gets the path of coordinates for the module to travel along
coord_path = self.__get_coord_path(current_path, base_cons[port_id])
# disconnect the module and move it
self.disconnect_all(current_node)
print(current_node, ": ", self.modules[current_node].cons)
# modCon.setDest(current_node, x=2, y=2, z=2)
# tmp = input()
# move current node over path by getting positions outside of modules
for coords in coord_path:
self._move_mod(current_node, coords)
# connect module to chain (1 needs to be replaced to take account of modules need to be in certain orientations)
self.connect(current_node, self._get_port(current_node, base_cons[port_id]), root, port_id)
moved.append(current_node)
to_move.remove(current_node)
root = current_node
return moved
def sort(self, current_order=None):
"""
Sorts the chain of modules into a chain with the modules in the order needed to be placed into the goal order
:param current_order: (optional) module keys in current order of the chain
"""
# if no current order is passed, find it
if current_order is None:
end_mod, dump = self.__get_isolated_mod(next(iter(self.modules)))
opposite_end, current_order = self.__get_isolated_mod(end_mod)
del end_mod, dump, opposite_end
if self.goal is None:
raise TypeError("goal is not set and therefore cannot be achieved")
# get order for goal then take only module types
goal_order = self._get_goal_order()
goal_order = [elem[-self.tag_len:] for elem in goal_order]
final_places = {}
if len(goal_order) != len(current_order):
# handle this (write later)
raise ValueError("Goal and spacecraft contain different number of modules")
tmp_order = current_order.copy()
# finds where each module type need to be moved
for pos in range(len(goal_order)):
try:
index = [
idx for idx, s in enumerate(tmp_order) if goal_order[pos] in s
][0]
except IndexError:
raise IndexError("%s doesn't exist in craft" % (goal_order[pos]))
final_places[tmp_order[index]] = pos
del tmp_order[index]
base_cons = [2, 3, 0, 1, 5, 4]
# find the occupied ports and makes a list of the unused ones
mid_mod = current_order[len(current_order) // 2]
used = []
for port_id in range(len(self.modules[mid_mod].cons)):
if self.modules[mid_mod].cons[port_id] is not None:
used.append(port_id)
if len(used) != 2:
raise IndexError("The modules are not in a chain")
unused = [0, 1, 2, 3, 4, 5]
unused.remove(used[0])
unused.remove(used[1])
print("\nSplitting in 2")
# splits the row in 2
current_order = [current_order]
for i in range(len(current_order[0]) // 2):
self.disconnect_all(current_order[0][0])
popped_mod = current_order[0].pop(0)
path = current_order[0][-i-1::-1] + [popped_mod]
# print("splitting: ", popped_mod)
if i == 0:
current_order.append([popped_mod])
else:
current_order[1].insert(0, popped_mod)
# moves the module to the new position
# path currently moves the module towards nearest module (oops)
path = self.__get_coord_path(path, unused[0])
path = np.unique(path, axis=0)
# print(path, "\n")
for coord in path:
# print("moving to:", coord)
self._move_mod(popped_mod, coord)
# final_pose = modCon.getPose(popped_mod)
# final_pose = [final_pose["x"]] + [final_pose["y"]] + [final_pose["z"]]
# final_pose = np.round(final_pose, 3)
# connects to row above/below
# self.connect(popped_mod, unused[0], current_order[0][-i - 1], base_cons[unused[0]])
# connect to modules on it's own
self.connect_all(popped_mod)
# for testing: prints out mods and their connections
# for mod in self.modules:
# print(mod, ": ", self.modules[mod].cons)
# remove the now used ports so that bubble sort only uses the remaining dimension
unused.remove(base_cons[unused[0]])
unused.remove(unused[0])
print(goal_order)
print(final_places)
print(current_order[0])
print(current_order[1])
print("beginning bubble")
tmp = input()
# sort each row seperately (could run in parallel?)
for sub_list in current_order:
# sorts each row
for i in range(len(sub_list) - 1):
for j in range(0, len(sub_list) - i - 1):
if final_places[sub_list[j]] > final_places[sub_list[j + 1]]:
# final positions of each module
pos1 = self.modules[sub_list[j]].pos
pos2 = self.modules[sub_list[j + 1]].pos
self.disconnect_all(sub_list[j + 1])
self.disconnect_all(sub_list[j])
# self.move_mod(sub_list[j],)
# take first mod, get unused dimension
# move the first mod up and then ontop of the second module
# move the second mod along to pos of 1st
# move 1st mod down into position
self.connect_all(sub_list[j + 1])
self.connect_all(sub_list[j])
sub_list[j], sub_list[j + 1] = sub_list[j + 1], sub_list[j]
# connect structure together
for key in self.modules:
self.connect_all(key)
# merge sorted rows
if final_places[current_order[0][0]] < final_places[current_order[1][0]]:
root = current_order[0][0]
del current_order[0][0]
else:
root = current_order[1][0]
del current_order[1][0]
if final_places[current_order[0][-1]] > final_places[current_order[1][-1]]:
self.disconnect_all(root)
self.connect(current_order[0][-1], 2, root, 0)
else:
self.disconnect_all(root)
self.connect(current_order[1][-1], 2, root, 0)
final_order = [root]
while len(current_order[0]) > 0 and len(current_order[1]) > 0:
if final_places[current_order[0][0]] < final_places[current_order[1][0]]:
self.disconnect_all(current_order[0][0])
self.connect(root, 2, current_order[0][0], 0)
root = current_order[0][0]
del current_order[0][0]
final_order.append(root)
else:
self.disconnect_all(current_order[1][0])
self.connect(root, 2, current_order[1][0], 0)
root = current_order[1][0]
del current_order[1][0]
final_order.append(root)
for mod in current_order[0]:
self.disconnect_all(mod)
self.connect(root, 2, mod, 0)
root = mod
final_order.append(root)
for mod in current_order[1]:
self.disconnect_all(mod)
self.connect(root, 2, mod, 0)
root = mod
final_order.append(root)
return final_order
def grow(self, order):
"""
Rearranges a sorted module chain to form the goal structure
:param order: module keys in current order of the chain
"""
for idx in range(len(order)):
base_cons = [2, 3, 0, 1, 5, 4]
mod_type = order[idx][-self.tag_len:]
path = order[idx + 1:]
if idx == 0:
self.disconnect_all(order[idx])
self.connect(order[-1], 2, order[idx], 0)
# self.goal.modules[order[idx].replace("_", "-")] = self.goal.modules.pop(order[idx])
# order[idx] = order[idx].replace("_", "-")
continue
path = path + self._get_mod_path(order[0], order[idx])
sucess = False
last_mod = path[-2]
for port in range(len(self.goal.modules[last_mod].cons)):
if self.goal.modules[last_mod].cons[port] is None:
continue
elif (self.goal.modules[last_mod].cons[port][-self.tag_len:] == mod_type):
self.disconnect_all(order[idx])
self.connect(order[idx], base_cons[port], path[-1], port)
sucess = True
self.display()
if not sucess:
raise ValueError("Growing failed. Sucess:", sucess)
| 38.280855 | 161 | 0.550786 | 5,383 | 39,391 | 3.895411 | 0.105703 | 0.047213 | 0.036053 | 0.012972 | 0.288521 | 0.228671 | 0.204349 | 0.178502 | 0.141065 | 0.120797 | 0 | 0.018845 | 0.346653 | 39,391 | 1,028 | 162 | 38.318093 | 0.795928 | 0.277805 | 0 | 0.249561 | 0 | 0 | 0.035902 | 0 | 0 | 0 | 0 | 0.002918 | 0 | 1 | 0.038664 | false | 0 | 0.012302 | 0 | 0.077329 | 0.026362 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9304974cceaa6c0815b75d4d6551003da77ce3ad | 5,625 | py | Python | yumina/syosetu.py | jeffswt/yumina | cdb18dc97e38028f6866b98d3ae43bc375440836 | [
"MIT"
] | 3 | 2017-12-10T03:35:30.000Z | 2018-12-15T23:13:28.000Z | yumina/syosetu.py | jeffswt/yumina | cdb18dc97e38028f6866b98d3ae43bc375440836 | [
"MIT"
] | null | null | null | yumina/syosetu.py | jeffswt/yumina | cdb18dc97e38028f6866b98d3ae43bc375440836 | [
"MIT"
] | null | null | null |
import bs4
import json
import os
import re
import requests
import sqlite3
from . import renderer
def get_webpage(*args, **kwargs):
""" get_webpage(...) -- request webpage content / text """
return requests.get(*args, **kwargs).text.encode('ISO-8859-1').decode('utf-8')
def map_num(s):
""" map_num(str) -- change all full-width characters to half-width. """
s = s.replace('0', '0')\
.replace('1', '1')\
.replace('2', '2')\
.replace('3', '3')\
.replace('4', '4')\
.replace('5', '5')\
.replace('6', '6')\
.replace('7', '7')\
.replace('8', '8')\
.replace('9', '9')\
.replace('\u3000', ' ')
return s
def get_chapter_list(web_id):
sel_1 = r'<div class="chapter_title">.*?</div>'
sel_2 = r'<dd class="subtitle">\n<a href="/%s/\d+/">.*?</a>\n</dd>' % web_id
q1 = map_num(get_webpage('http://ncode.syosetu.com/%s/' % web_id))
q2 = re.findall('(%s|%s)' % (sel_1, sel_2), q1)
q3 = []
for i in q2:
if re.findall(sel_1, i) != []:
sel_3 = r'^<div class="chapter_title">第(\d+)章 (.*?)</div>$'
j = int(re.sub(sel_3, r'\1', i))
k = re.sub(sel_3, r'\2', i)
q3.append(('chapter_title', j, k))
else:
sel_3 = r'^<dd class="subtitle">\n<a href="/%s/(\d+)/">(.*?)</a>\n</dd>$' % web_id
k = int(re.sub(sel_3, r'\1', i))
l = re.sub(r'^[##].*? (.*?)$', r'\1', re.sub(sel_3, r'\2', i))
q3.append(('subtitle', k, l))
return q3
def get_chapter(web_id, chap_id):
q1 = map_num(get_webpage('http://ncode.syosetu.com/%s/%d/' % (web_id, chap_id)))
q2 = bs4.BeautifulSoup(q1, 'html5lib')
q3 = q2.find_all(id='novel_honbun')[0].text
# stylize paragraphs
q3 = re.sub(r'\n +', r'\n', q3)
q3 = re.sub(r'\n\n+', r'\n\n', q3)
q3 = re.sub(r'(^\n+|\n+$)', r'', q3)
# split into lines
q4 = q3.split('\n')
q5 = []
for i in q4:
if re.findall(r'^ *$', i) != []:
q5.append(('break',))
else:
q5.append(('line', [('regular', i.replace(' ', ''))]))
return q5
class SyosetuDatabase:
def __init__(self, filename, syosetu_id, force_clear=False):
found = os.path.exists(filename)
self.base = sqlite3.connect(filename)
self.cur = self.base.cursor()
self.sid = syosetu_id
if not found or force_clear:
self.cur.execute("DROP TABLE IF EXISTS toc;")
self.cur.execute("DROP TABLE IF EXISTS cont;")
self.cur.execute("""
CREATE TABLE toc (
e_type TEXT,
e_id INTEGER,
e_title TEXT
);""");
self.cur.execute("""
CREATE TABLE cont (
t_idx INTEGER,
t_jpn JSONB,
t_jpn_lit JSONB
);""");
return
def get_contents(self):
q1 = []
for i in self.cur.execute("SELECT * FROM toc;"):
q1.append((i[0], i[1], i[2]))
return q1
def get_chapter_title(self, typ, num):
for i in self.get_contents():
if i[0] == typ and i[1] == num:
return i
return (typ, num, '無題')
def get_contents_chapters_id(self):
q1 = []
for i in self.get_contents():
if i[0] == 'subtitle':
q1.append(i[1])
return sorted(list(set(q1)))
def update_contents(self):
toc = get_chapter_list(self.sid)
self.cur.execute("DELETE FROM toc;")
for i in toc:
self.cur.execute("INSERT INTO toc (e_type, e_id, e_title) VALUES (?, ?, ?)",
(i[0], i[1], i[2]))
return
def get_chapter(self, chap_id):
q1 = []
for i in self.cur.execute("SELECT * FROM cont WHERE t_idx = ?", (chap_id,)):
q1.append(i)
if q1 == []:
return []
q = [[], []]
for num in range(0, 2):
for i in json.loads(q1[0][num + 1]):
if i[0] == 'line':
q[num].append(('line', list(tuple(i) for i in i[1])))
else:
q[num].append(('break',))
return q[0], q[1]
def has_chapter(self, chap_id):
q1 = []
for i in self.cur.execute("SELECT * FROM cont WHERE t_idx = ?", (chap_id,)):
q1.append(i)
return q1 != []
def update_chapter(self, chap_id, phonogram_renderer=None):
chap1 = get_chapter(self.sid, chap_id)
cj1 = json.dumps(chap1)
chap2 = renderer.phoneticize(chap1, phonogram_renderer=phonogram_renderer)
cj2 = json.dumps(chap2)
self.cur.execute("DELETE FROM cont WHERE t_idx = ?;", (chap_id,))
self.cur.execute("INSERT INTO cont (t_idx, t_jpn, t_jpn_lit) VALUES (?, ?, ?)",
(chap_id, cj1, cj2))
return
def update_all(self, phonogram_renderer=None, display_progress_bar=False):
self.update_contents()
self.commit()
ch = self.get_contents_chapters_id()
for i in ch:
if not self.has_chapter(i):
self.update_chapter(i, phonogram_renderer=phonogram_renderer)
self.commit()
if display_progress_bar:
print('%s|%s\r' % (str(i).rjust(4), ('=' * int(i / len(ch) * 70)).ljust(70, '.')), end='')
return
def commit(self):
self.base.commit()
return
def close(self):
self.commit()
self.base.close()
return
pass
| 35.377358 | 110 | 0.497778 | 764 | 5,625 | 3.534031 | 0.219895 | 0.031111 | 0.024444 | 0.018519 | 0.292593 | 0.221111 | 0.217407 | 0.177037 | 0.165926 | 0.11037 | 0 | 0.032326 | 0.323556 | 5,625 | 158 | 111 | 35.601266 | 0.677267 | 0.0272 | 0 | 0.176871 | 0 | 0.013605 | 0.19956 | 0.030603 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0.006803 | 0.047619 | 0 | 0.272109 | 0.006803 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9305e2e159b395e28159191379c582290d786c32 | 2,112 | py | Python | cogs/administrator.py | jagadeesh70/arose-discord-bot | de7b8c9d3c01e3028b5dd063c10a372d3a4a3225 | [
"MIT"
] | null | null | null | cogs/administrator.py | jagadeesh70/arose-discord-bot | de7b8c9d3c01e3028b5dd063c10a372d3a4a3225 | [
"MIT"
] | null | null | null | cogs/administrator.py | jagadeesh70/arose-discord-bot | de7b8c9d3c01e3028b5dd063c10a372d3a4a3225 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import sys
def mods_or_owner():
"""
Check that the user has the correct role to execute a command
"""
def predicate(ctx):
return commands.check_any(commands.is_owner(), commands.has_role("Moderator"))
return commands.check(predicate)
class Moderation(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(help='''Just kick that kid''')
@mods_or_owner()
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member = None, reason: str = "Because you were bad. We kicked you."):
if member is not None:
await ctx.guild.kick(member, reason=reason)
await ctx.send(f'**{member}** has been kicked....**reason: {reason}**')
else:
await ctx.send("Please specify user to kick via mention")
@commands.command(help='''Just ban that notorious guy''')
@mods_or_owner()
@commands.guild_only()
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member = None, reason: str = "Because you are naughty. We banned you."):
if member is not None:
await ctx.guild.ban(member, reason=reason)
else:
await ctx.send("Please specify user to kick via mention")
@commands.command(help='''unban the guy u banned :)''')
@mods_or_owner()
@commands.guild_only()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member: str = "", reason: str = "You have been unbanned. Time is over. Please behave"):
if member == "":
await ctx.send("Please specify username as text")
return
bans = await ctx.guild.bans()
for b in bans:
if b.user.name == member:
await ctx.guild.unban(b.user, reason=reason)
await ctx.send("User was unbanned")
return
await ctx.send("User was not found in ban list.")
def setup(client):
client.add_cog(Moderation(client))
| 35.2 | 118 | 0.633996 | 281 | 2,112 | 4.676157 | 0.320285 | 0.060883 | 0.054795 | 0.043379 | 0.469559 | 0.394216 | 0.394216 | 0.394216 | 0.394216 | 0.305936 | 0 | 0 | 0.247633 | 2,112 | 59 | 119 | 35.79661 | 0.826935 | 0.028883 | 0 | 0.347826 | 0 | 0 | 0.20344 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.065217 | 0.021739 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9305fcafb01d19b524fa60ee8115fba60d45f909 | 815 | py | Python | examples/Saxs_Cube/genCubeData.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | examples/Saxs_Cube/genCubeData.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | examples/Saxs_Cube/genCubeData.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | import modelexp
from modelexp.experiments.sas import Saxs
from modelexp.models.sas import Cube
import numpy as np
import random
app = modelexp.Cli()
app.setExperiment(Saxs)
modelRef = app.setModel(Cube)
modelRef.addModel(np.linspace(1e-2, 0.5, 300))
modelRef.setParam('a', 50)
modelRef.setParam('sldCube', 45e-6)
modelRef.setParam('sldSolvent', 10e-6)
modelRef.setParam('sigA', 0.05)
modelRef.setParam('i0', 1)
modelRef.setParam('bg', 0)
modelRef.calcModel()
q = modelRef.getModelset(0).getDomain()
I = modelRef.getModelset(0).getValues()
sig_y = 0.05*I
randomized_y = []
for i in range(len(I)):
randomized_y.append(random.gauss(I[i], 0.10*I[i]))
randomized_y = np.array(randomized_y)
with open('saxsCubeData.xye', 'w') as f:
for i in range(len(I)):
f.write(f'{q[i]}\t{randomized_y[i]}\t{sig_y[i]}\n')
| 25.46875 | 55 | 0.722699 | 135 | 815 | 4.311111 | 0.466667 | 0.164948 | 0.061856 | 0.037801 | 0.051546 | 0.051546 | 0 | 0 | 0 | 0 | 0 | 0.039617 | 0.10184 | 815 | 31 | 56 | 26.290323 | 0.755464 | 0 | 0 | 0.076923 | 0 | 0 | 0.100614 | 0.047853 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.192308 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
930630b81faeff458c2406841098e8b0305b1be5 | 3,621 | py | Python | common-scripts/msys2-binary-convert.py | derickl/conda-recipes | 52df2d8fe92f2c18da8500cdc49b75a8f261f304 | [
"BSD-3-Clause"
] | 1 | 2015-01-30T05:25:29.000Z | 2015-01-30T05:25:29.000Z | common-scripts/msys2-binary-convert.py | derickl/conda-recipes | 52df2d8fe92f2c18da8500cdc49b75a8f261f304 | [
"BSD-3-Clause"
] | null | null | null | common-scripts/msys2-binary-convert.py | derickl/conda-recipes | 52df2d8fe92f2c18da8500cdc49b75a8f261f304 | [
"BSD-3-Clause"
] | null | null | null | import os
from conda_build.metadata import MetaData
import requests
import hashlib
import tarfile
import tempfile
from glob import glob
from shutil import move, copy
from os.path import join, normpath, dirname
from os import makedirs, getenv
from sys import exit
import patch
import re
def get_tar_xz(url, md5):
tmpdir = tempfile.mkdtemp()
urlparts = requests.packages.urllib3.util.url.parse_url(url)
fname = urlparts.path.split('/')[-1]
sig = hashlib.md5()
tmp_tar_xz = join(tmpdir, fname)
if urlparts.scheme == 'file':
path = re.compile('^file://').sub('', url).replace('/', os.sep)
copy(path, tmp_tar_xz)
with open(tmp_tar_xz, "rb") as tar_xz:
for block in iter(lambda: tar_xz.read(1024), b""):
sig.update(block)
else:
with open(tmp_tar_xz, 'wb') as tar_xz:
response = requests.get(url, stream=True)
for block in response.iter_content(1024):
sig.update(block)
tar_xz.write(block)
if sig.hexdigest() != md5:
print(
'ERROR: md5 sum mismatch expected %s, got %s' %
(md5, sig.hexdigest()))
exit(1)
return tmp_tar_xz
def main():
recipe_dir = os.environ["RECIPE_DIR"]
conda_platform = 'win-32' if os.environ["ARCH"] == '32' else 'win-64'
prefix = os.environ['PREFIX']
metadata = MetaData(recipe_dir)
msys2_tar_xz_url = metadata.get_section(
'extra')['msys2-binaries'][conda_platform]['url']
msys2_md5 = metadata.get_section(
'extra')['msys2-binaries'][conda_platform]['md5']
mv_srcs_list = metadata.get_section(
'extra')['msys2-binaries'][conda_platform]['mv-srcs']
mv_dsts_list = metadata.get_section(
'extra')['msys2-binaries'][conda_platform]['mv-dsts']
msys2_tar_xz = get_tar_xz(msys2_tar_xz_url, msys2_md5)
tar = tarfile.open(msys2_tar_xz, 'r|xz')
tar.extractall(path=prefix)
try:
patches = metadata.get_section(
'extra')['msys2-binaries'][conda_platform]['patches']
except:
patches = []
if len(patches):
for patchname in patches:
patchset = patch.fromfile(join(getenv('RECIPE_DIR'), patchname))
patchset.apply(1, root=prefix)
# shutil is a bit funny (like mv) with regards to how it treats
# the destination depending on whether it is an existing directory or not
# (i.e. moving into that versus moving as that).
# Therefore, the rules employed are:
# 1. If mv_dst ends with a '/' it is a directory that you want mv_src
# moved into.
# 2. If mv_src has a wildcard, mv_dst is a directory that you want mv_src
# moved into.
# In these cases we makedirs(mv_dst) and then call move(mv_src, mv_dst)
# .. otherwise we makedirs(dirname(mv_dst)) and call move(mv_src, mv_dst)
# .. however, if no mv_srcs exist we don't makedirs at all.
for mv_src, mv_dst in zip(mv_srcs_list, mv_dsts_list):
mv_dst_definitely_dir = False
mv_srcs = glob(join(prefix, normpath(mv_src)))
if '*' in mv_src or mv_dst.endswith('/') or len(mv_srcs) > 1:
mv_dst_definitely_dir = True
if len(mv_srcs):
mv_dst = join(prefix, normpath(mv_dst))
mv_dst_mkdir = mv_dst
if not mv_dst_definitely_dir:
mv_dst_mkdir = dirname(mv_dst_mkdir)
try:
makedirs(mv_dst_mkdir)
except:
pass
for mv_src in mv_srcs:
move(mv_src, mv_dst)
tar.close()
if __name__ == "__main__":
main()
| 35.851485 | 77 | 0.626899 | 517 | 3,621 | 4.181818 | 0.324952 | 0.043941 | 0.018501 | 0.053191 | 0.191027 | 0.16975 | 0.153099 | 0.153099 | 0.085106 | 0.085106 | 0 | 0.014937 | 0.260425 | 3,621 | 100 | 78 | 36.21 | 0.792382 | 0.161558 | 0 | 0.074074 | 0 | 0 | 0.079696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024691 | false | 0.012346 | 0.160494 | 0 | 0.197531 | 0.012346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9306e30b393e2b459c7f4ab524c75866193a41b4 | 4,404 | py | Python | cyolo_score_following/eval.py | CPJKU/cyolo_score_following | 4b34947a9b7cc19a139ce3768eac6079aaff5cfe | [
"MIT"
] | 7 | 2021-05-23T22:14:30.000Z | 2022-03-07T16:46:18.000Z | cyolo_score_following/eval.py | CPJKU/cyolo_score_following | 4b34947a9b7cc19a139ce3768eac6079aaff5cfe | [
"MIT"
] | null | null | null | cyolo_score_following/eval.py | CPJKU/cyolo_score_following | 4b34947a9b7cc19a139ce3768eac6079aaff5cfe | [
"MIT"
] | 3 | 2021-05-23T22:38:59.000Z | 2021-12-02T19:07:01.000Z |
import argparse
import os
import torch
import numpy as np
from cyolo_score_following.dataset import load_dataset, collate_wrapper, iterate_dataset, CLASS_MAPPING
from cyolo_score_following.utils.data_utils import FPS
from cyolo_score_following.models.yolo import load_pretrained_model
from torch.utils.data import DataLoader
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluation Script')
parser.add_argument('--param_path', help='path to the stored network', type=str)
parser.add_argument('--test_dirs', help='path to test dataset.', nargs='+')
parser.add_argument('--only_onsets', help='only evaluate onset frames', default=False, action='store_true')
parser.add_argument('--batch_size', help='batch size', type=int, default=32)
parser.add_argument('--split_files', help='split file to only evaluate a subset from the test dirs',
default=None, nargs='+')
parser.add_argument('--scale_width', help='sheet image scale factor', type=float, default=416)
parser.add_argument('--num_workers', default=4, type=int, help="number of parallel datapool worker")
parser.add_argument('--load_audio', default=False, action='store_true', help="preload audio files for datapool")
parser.add_argument('--print_piecewise', default=False, action='store_true', help="print statistics for each piece")
parser.add_argument('--save_tag', default=None)
parser.add_argument('--save_dir', type=str, default="")
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
network, criterion = load_pretrained_model(args.param_path)
predict_sb = network.nc == 3
print(network)
print(f"Putting model to {device}")
network.to(device)
network.eval()
dataset = load_dataset(args.test_dirs, augment=False, scale_width=args.scale_width, split_files=args.split_files,
only_onsets=args.only_onsets, load_audio=args.load_audio, predict_sb=predict_sb)
dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers,
collate_fn=collate_wrapper, pin_memory=True)
stats = iterate_dataset(network, dataloader, optimizer=None, criterion=criterion, device=device)
if args.save_tag is not None:
with open(os.path.join(args.save_dir, args.save_tag + "_stats.npy"), "wb") as f:
np.save(f, stats)
ordering = []
max_str_len = 0
for piece in stats['piece_stats'].keys():
ordering.append((piece, np.mean(stats['piece_stats'][piece]['frame_diff'])))
# store maximum string length for printing
str_len = len(piece)
if str_len > max_str_len:
max_str_len = str_len
ordering = sorted(ordering, key=lambda k: k[1], reverse=False)
thresholds = [0.05, 0.1, 0.5, 1.0, 5.0]
if args.print_piecewise:
print("Piecewise frame tracking ratios")
for piece, _ in ordering:
piece_stat = stats['piece_stats'][piece]
print(f"{piece}:")
if 'frame_diff' in piece_stat:
diffs = np.array(piece_stat['frame_diff'])
diffs = diffs / FPS
total = len(diffs)
cumulative_percentage = []
for th in thresholds:
cumulative_percentage.append(np.round(100 * np.sum(diffs <= th) / total, 1))
print("\tTracked Frame Ratios", cumulative_percentage)
for value in CLASS_MAPPING.values():
if value + "_accuracy" in piece_stat:
print(f"\t{value} Accuracy: {piece_stat[value + '_accuracy']:.3f}")
print()
for value in CLASS_MAPPING.values():
if value + "_accuracy" in stats:
print(f'Average accuracy for {value}: {stats[value + "_accuracy"]:.3f}')
frame_diffs = np.concatenate([piece_stats['frame_diff'] for piece_stats in stats['piece_stats'].values()]) / FPS
total_frames = len(frame_diffs)
ratio_str = ""
print('Average frame tracking ratios:')
for th in thresholds:
ratio = np.sum(frame_diffs <= th) / total_frames
percentage = np.round(100 * ratio, 1)
ratio_str += f"& {ratio:.3f} "
print(f'<= {th}: {percentage}')
# string for latex table
ratio_str += "\\\\"
print(ratio_str)
| 40.40367 | 120 | 0.660082 | 578 | 4,404 | 4.821799 | 0.292388 | 0.035522 | 0.067097 | 0.024758 | 0.072838 | 0.054539 | 0.032293 | 0.032293 | 0.032293 | 0.032293 | 0 | 0.008986 | 0.216621 | 4,404 | 108 | 121 | 40.777778 | 0.798841 | 0.014305 | 0 | 0.051282 | 0 | 0 | 0.195296 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.102564 | 0 | 0.102564 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93074a881ac1b60007d9201c550e6e05a4b71ade | 16,344 | py | Python | segmentation_net/segmentation_class/segmentation_train.py | PeterJackNaylor/segmentation_net | 9af94854a662d9529ca6f4bb774bf2603a434a3a | [
"MIT"
] | null | null | null | segmentation_net/segmentation_class/segmentation_train.py | PeterJackNaylor/segmentation_net | 9af94854a662d9529ca6f4bb774bf2603a434a3a | [
"MIT"
] | null | null | null | segmentation_net/segmentation_class/segmentation_train.py | PeterJackNaylor/segmentation_net | 9af94854a662d9529ca6f4bb774bf2603a434a3a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""segnet package file tf_record
Segmentation_base_class -> SegmentationInput -> SegmentationCompile ->
SegmentationSummaries -> Segmentation_model_utils -> Segmentation_train
"""
from datetime import datetime
from tqdm import trange
from ..net_utils import ScoreRecorder
from .segmentation_model_utils import *
def verbose_range(beg, end, word, verbose, verbose_thresh):
"""Monitores the time in range with tqdm
If verbose, use tqdm to take care of estimating end of training.
Args:
beg: integer, where to start iterating
end: integer, where to end iteration (not included)
word: string, to print in the displayed progress_bar
verbose: integer, value of verbose given mostlikely by the object himself
verbose_thresh: integer, will display progress bar if verbose > verbose_thresh
Returns:
An object on which you can iterate that can or not, depending
on the value of verbose print a progress bar to the stdoutput.
"""
returned_range = None
if verbose > verbose_thresh:
returned_range = trange(beg, end, desc=word)
else:
returned_range = range(beg, end)
return returned_range
class SegmentationTrain(SegmentationModelUtils):
def train(self, train_record, test_record=None,
learning_rate=0.001, lr_procedure="1epoch",
weight_decay=0.0005, batch_size=1,
decay_ema=0.9999, k=0.96, n_epochs=10,
early_stopping=3, loss_func=tf.nn.l2_loss,
save_weights=True, new_log=None,
num_parallele_batch=8, restore=False,
track_variable="loss", track_training=False,
tensorboard=True, save_best=True, return_best=False,
decode=tf.float32):
""" Trains the model on train record, optionnaly you can monitor
the training by evaluation the test record
Args:
train_record: string, path to a tensorflow record file for training.
test_record: string or None, if given, the model will be evaluated on
the test data at every epoch.
learning_rate: float (default: 0.001) Initial learning rate for the
gradient descent update.
lr_procedure : string (default: 10epoch) Will be perfome learning rate
decay every 10 epochs.
weight_decay : float (default: 0.0005) Initial value given to the weight
decay, the loss is computed:
loss = loss + weight_decay * sum(loss_func(W)) where W are
training parameters of the model.
batch_size : integer (default: 1) Size of batch to be feeded at each
iterations.
decay_ema : float (default: 0) if 0: ignored
exponential moving average decay parameter to apply to weights
over time for more robust convergence.
k : float (default: 0.96) value by which the learning rate decays every
update.
n_epochs : integer (default: 10) number of epochs to perform
early_stopping : integer, if 0 or None ignored, else the model will stop
training if the tracked variable doesn't go in the right
direction in under early_stopping epochs.
loss_func : tensorflow function (default: l2_loss) to apply on the weights
for the weight decay in the loss function.
save_weights : bool (default: True) If to store the weigths
new_log : string (default: None) if to save the model in a different folder
then the one from which the variables were restored.
num_parallele_batch : integer (default: 8) number of workers to use to
perform paralelle computing.
restore : bool (default: False) if too restore from the new_log given.
track_variable : str (default: loss) which variable to track in order to
perform early stopping.
track_training : bool (default: False) if to track track_variable on the
training data or on the test data.
tensorboard : bool (default: True) if to monitor the model via tensorboard.
save_best : bool (default: True) if to save the best model as last weights
in case of early stopping or if there is a better possible model
with respect to the test set.
return_best : bool (default: True) if to return the best model in case of early
stopping or if there is a better possible model with respect to
the test set.
decode: tensorflow function (default: tf.float32) how to decode the bytes in
the tensorflow records for the input rgb data.
Returns:
An python dictionnary recaping the training and if present the test history.
"""
steps_in_epoch = max(ut.record_size(train_record) // batch_size, 1)
test_steps = ut.record_size(test_record) //batch_size if test_record is not None else None
max_steps = steps_in_epoch * n_epochs
self.tensorboard = tensorboard
if new_log is None:
new_log = self.log
else:
check_or_create(new_log)
stop_early = early_stopping is not None and early_stopping != 0
if not stop_early:
early_stopping = 0
if early_stopping not in [0, 3]:
## this saver is to ensure that we can restore to the best weights at the end
self.saver = self.saver_object(keep=early_stopping + 1,
log=new_log,
restore=restore)
self.score_recorder = ScoreRecorder(self.saver, self.sess,
new_log, stop_early=stop_early,
lag=early_stopping)
if not (k == 0 or k is None or lr_procedure is None or lr_procedure == ""):
with tf.name_scope('learning_rate_scheduler'):
lrs = self.learning_rate_scheduler(learning_rate, k, lr_procedure,
steps_in_epoch)
if self.verbose:
msg = "learning_rate_scheduler added \
with initial_value = {}, k = {} \
and decrease every = {}"
tqdm.write(msg.format(learning_rate, k, lr_procedure))
self.learning_rate = lrs
else:
lrs = learning_rate
if self.verbose:
tqdm.write("Learning_rate fixed to :{}".format(lrs))
if self.tensorboard:
sw, ms, stw, mts = self.setup_summary(new_log, test_record)
self.summary_writer = sw
self.merged_summaries = ms
if test_record:
self.summary_test_writer = stw
self.merged_summaries_test = mts
if self.verbose:
tqdm.write("summaries added")
if weight_decay != 0:
with tf.name_scope('regularization'):
self.loss = self.regularize_model(self.loss, loss_func, weight_decay)
if self.verbose:
tqdm.write('regularization weight decay added: {}'.format(weight_decay))
with tf.name_scope('optimization'):
opt = self.optimization(lrs, self.loss, self.training_variables)
if decay_ema != 0 and decay_ema is not None:
with tf.name_scope('exponential_moving_average'):
training_op = self.exponential_moving_average(opt,
self.training_variables,
decay_ema)
if self.verbose:
tqdm.write("Exponential moving average added to prediction")
else:
training_op = opt
with tf.name_scope('input_from_queue'):
image_out, anno_out = self.setup_queues(train_record, test_record,
batch_size, num_parallele_batch,
decode=decode)
# To plug in the queue to the main graph
# with tf.control_dependencies([image_out, anno_out]):
with tf.name_scope('queue_assigning'):
# Control the dependency to allow the flow thought the data queues
assign_rgb_to_queue = tf.assign(self.rgb_v, image_out,
validate_shape=False)
assign_lbl_to_queue = tf.assign(self.lbl_v, anno_out,
validate_shape=False)
assign_to_variable = [assign_rgb_to_queue, assign_lbl_to_queue]
to_control = tf.tuple(assign_to_variable, control_inputs=[image_out, anno_out])
blank = tf.tuple([self.is_training], name=None, control_inputs=to_control)
train_op = tf.tuple([training_op], name=None, control_inputs=to_control)
self.init_uninit([])
begin_iter = 0
begin_epoch = begin_iter // steps_in_epoch
last_epoch = begin_epoch + n_epochs
last_iter = max_steps + begin_iter
range_ = verbose_range(begin_iter, last_iter, "training ",
self.verbose, 0)
self.sess.run(blank)
for step in range_:
self.sess.run(train_op)
if (step - begin_epoch + 1) % steps_in_epoch == 0 and (step - begin_epoch) != 0:
# If we are at the end of an epoch
epoch_number = step // steps_in_epoch
if self.verbose:
i = datetime.now()
msg = i.strftime('[%Y/%m/%d %H:%M:%S]: ')
msg += ' Epoch {} / {}'.format(epoch_number + 1, last_epoch)
tqdm.write(msg)
if save_weights:
self.saver.save(self.sess, new_log + '/' + "model.ckpt",
global_step=epoch_number + 1)
dic_train_record = self.infer_train_step(epoch_number, control=to_control)
self.score_recorder.diggest(epoch_number, dic_train_record)
if test_record:
self.sess.run(blank, feed_dict={self.is_training:False})
dic_test_record = self.infer_test_set(epoch_number, test_steps,
during_training=True, control=to_control)
self.sess.run(blank, feed_dict={self.is_training:True})
self.score_recorder.diggest(epoch_number, dic_test_record, train=False)
if self.score_recorder.stop(track_variable, train_set=track_training):
if self.verbose > 0:
tqdm.write('stopping early')
break
if save_best:
self.score_recorder.save_best(track_variable, save_weights, train_set=track_training)
if return_best:
# actually works when save best
tqdm.write("restore_best NOT IMPLEMENTED")
return self.score_recorder.all_tables()
# ttt1, ttt2 = self.sess.run([test, self.conv1])
# ttt1, ttt2 = self.sess.run([test, self.conv1])
# import matplotlib.pylab as plt
# f, axes = plt.subplots(nrows=9, ncols=ttt1[0].shape[0])
# for i in range(ttt1[0].shape[0]):
# for j in range(8):
# axes[j, i].imshow(ttt2[i,:,:,j].astype('uint8'))
# axes[-1, i].imshow(ttt1[0][i,:,:].astype('uint8'))
# ttt1, ttt2 = self.sess.run([test, self.conv1])
# ttt1, ttt2 = self.sess.run([test, self.conv1])
# fig, axes2 = plt.subplots(nrows=9, ncols=ttt1[0].shape[0])
# for i in range(ttt1[0].shape[0]):
# for j in range(8):
# axes2[j, i].imshow(ttt2[i,:,:,j].astype('uint8'))
# axes2[-1, i].imshow(ttt1[0][i,:,:].astype('uint8'))
# plt.show()
# import pdb; pdb.set_trace()
# size = self.sess.run([warm_up, warm_up2])
# tqdm.write(str(size[0]))
# size = self.sess.run([warm_up, warm_up2])
# tqdm.write(str(size[0]))
# self.sess.run(warm)
# a, c, d, b, e, ff, ff3, ff2, ff1 = self.sess.run([test, self.probability, self.predictions, self.rgb_ph, self.lbl_ph, self.logit, self.conv3, self.conv2, self.conv1]) # self.label_int,
# import matplotlib.pylab as plt
# f, axes = plt.subplots(nrows=4, ncols=c.shape[0]);
# if b.shape[1] == c.shape[1]:
# dis = 0
# else:
# dis = 92
# if c.shape[0] == 1:
# axes[0].imshow(c[0,:,:,0])
# if dis== 0:
# axes[1].imshow(b[0,:,:].astype('uint8'))
# else:
# axes[1].imshow(b[0,dis:-dis,dis:-dis].astype('uint8'))
# axes[2].imshow(d[0,:,:])
# axes[3].imshow(e[0,:,:,0])
# #axes[4].imshow(entry[0,:,:,0])
# for j in range(5):
# axes[j].axis('off')
# else:
# for i in range(c.shape[0]):
# axes[0, i].imshow(c[i,:,:,0])
# if dis== 0:
# axes[1, i].imshow(b[i,:,:].astype('uint8'))
# else:
# axes[1, i].imshow(b[i,dis:-dis,dis:-dis].astype('uint8'))
# axes[2, i].imshow(d[i,:,:])
# axes[3, i].imshow(e[i,:,:,0])
# #axes[4, i].imshow(entry[i,:,:,0])
# for j in range(4):
# axes[j, i].axis('off')
# plt.savefig("train/train_{}.png".format(step))
# f, axes = plt.subplots(nrows=2, ncols=c.shape[0]);
# if c.shape[0] == 1:
# for j in range(2):
# axes[j].imshow(ff[0,:,:,j])
# axes[j].axis('off')
# else:
# for i in range(c.shape[0]):
# for j in range(2):
# axes[j, i].imshow(ff[i,:,:,j])
# axes[j, i].axis('off')
# plt.savefig("train/logit_{}.png".format(step))
# f, axes = plt.subplots(nrows=8, ncols=c.shape[0]);
# if c.shape[0] == 1:
# for j in range(8):
# axes[j].imshow(ff3[0,:,:,j])
# axes[j].axis('off')
# else:
# for i in range(c.shape[0]):
# for j in range(8):
# axes[j, i].imshow(ff3[i,:,:,j])
# axes[j, i].axis('off')
# plt.savefig("train/conv3_{}.png".format(step))
# f, axes = plt.subplots(nrows=8, ncols=c.shape[0]);
# if c.shape[0] == 1:
# for j in range(8):
# axes[j].imshow(ff2[0,:,:,j])
# axes[j].axis('off')
# else:
# for i in range(c.shape[0]):
# for j in range(8):
# axes[j, i].imshow(ff2[i,:,:,j])
# axes[j, i].axis('off')
# plt.savefig("train/conv2_{}.png".format(step))
# f, axes = plt.subplots(nrows=8, ncols=c.shape[0]);
# if c.shape[0] == 1:
# for j in range(8):
# axes[j].imshow(ff1[0,:,:,j])
# axes[j].axis('off')
# else:
# for i in range(c.shape[0]):
# for j in range(8):
# axes[j, i].imshow(ff1[i,:,:,j])
# axes[j, i].axis('off')
# plt.savefig("train/conv1_{}.png".format(step))
| 45.781513 | 198 | 0.523067 | 1,980 | 16,344 | 4.177273 | 0.181313 | 0.017773 | 0.012695 | 0.015959 | 0.276992 | 0.220409 | 0.195865 | 0.185225 | 0.159836 | 0.150647 | 0 | 0.020157 | 0.374694 | 16,344 | 356 | 199 | 45.910112 | 0.789139 | 0.456987 | 0 | 0.108527 | 0 | 0 | 0.040654 | 0.005894 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015504 | false | 0 | 0.031008 | 0 | 0.069767 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9307a12d7ab7ec9787a35aa2cc8ebd65e88b80b2 | 757 | py | Python | examples/mlbasics_learn_to_add.py | JonathanRaiman/dali-cython-stub | e258469aeb1d4cb3e4cdf5c07e8948f461a038f1 | [
"MIT"
] | 7 | 2016-06-20T17:50:06.000Z | 2019-12-13T17:27:46.000Z | examples/mlbasics_learn_to_add.py | JonathanRaiman/dali-cython | e258469aeb1d4cb3e4cdf5c07e8948f461a038f1 | [
"MIT"
] | 6 | 2015-08-04T07:25:38.000Z | 2015-08-13T22:06:22.000Z | examples/mlbasics_learn_to_add.py | JonathanRaiman/dali-cython | e258469aeb1d4cb3e4cdf5c07e8948f461a038f1 | [
"MIT"
] | 2 | 2016-07-04T21:38:14.000Z | 2016-08-31T02:53:19.000Z | from test_dali import Mat, random, MatOps, Graph
num_examples = 100
example_size = 3
iterations = 150
lr = 0.01
X = random.uniform(
0.0,
1.0 / example_size,
size=(num_examples, example_size)
)
ones = Mat.ones((X.shape[1], 1))
Y = X.dot(ones)
X = MatOps.consider_constant(X)
Y = MatOps.consider_constant(Y)
W = random.uniform(-1.0, 1.0, (example_size, 1))
print(repr(W))
for i in range(iterations):
predY = X.dot(W)
error = ((predY - Y) ** 2).sum()
print(repr(error))
# line below can be replaced by simply error.grad()
error.dw += 1
Graph.backward()
# there are much nicer solvers in Dali,
# but here we write out gradient descent
# explicitly
W.w -= W.dw * lr
W.dw = 0
print(repr(W))
| 22.264706 | 55 | 0.627477 | 124 | 757 | 3.758065 | 0.5 | 0.094421 | 0.012876 | 0.042918 | 0.060086 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041308 | 0.232497 | 757 | 33 | 56 | 22.939394 | 0.760757 | 0.180978 | 0 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
930ea439321e8015beffc7c1651e06f8c268d480 | 1,628 | py | Python | arakat-core/src/pipeline_generator/family_base/Join.py | sopaoglu/arakat | efa32fcc93076801cad24ab850ecdf9048a824e8 | [
"Apache-2.0"
] | 23 | 2018-08-18T17:32:40.000Z | 2021-10-05T22:57:06.000Z | arakat-core/src/pipeline_generator/family_base/Join.py | sopaoglu/arakat | efa32fcc93076801cad24ab850ecdf9048a824e8 | [
"Apache-2.0"
] | 23 | 2018-09-22T08:47:07.000Z | 2021-08-04T07:08:34.000Z | arakat-core/src/pipeline_generator/family_base/Join.py | sopaoglu/arakat | efa32fcc93076801cad24ab850ecdf9048a824e8 | [
"Apache-2.0"
] | 22 | 2018-08-17T10:33:31.000Z | 2021-10-05T22:57:07.000Z | import os
from src.domain.ErrorTypes import ErrorTypes
from src.utils.code_generation import CodeGenerationUtils
from src.validity import IncomingEdgeValidityChecker
# Add other join options as well
# How about join cascades
# Add necessary checks for stream-stream, stream-batch joins...
# Note that for stream-static joins, stream df must be on left.
def generate_code(args):
node = args["node"]
requireds_info = args["requireds_info"]
edges = args["edges"]
checklist={"df_count": {2}, "model_count": {0}}
error, extra= IncomingEdgeValidityChecker.check_validity(node["id"], requireds_info, edges, checklist)
code=[]
shared_function_set = set()
if(error == ErrorTypes.NO_ERROR):
df_names=__get_dfs_to_join(extra)
code.extend(["df_" + node["id"] + "=" + df_names[0] + ".join(" + df_names[1] + ", " + CodeGenerationUtils.handle_primitive(node["parameters"]["join_column"]["value"]) + ")", os.linesep])
return code, shared_function_set, error
def __get_dfs_to_join(extra):
# IncomingEdgeValidityChecker return a sorted list of df info by the order given by user.
# For now, we allow only two dataframes to be joined. However, we can handle a cascade of joins as well.
# For this purpose, change node-specs of join to get more than one column name to join, and then generate each join-statement code...
df_names=[]
for elem in extra["dfs"]:
if ("portion" in elem):
df_names.append("df_" + elem["source_id"] + "[" + str(elem["portion"]) + "]")
else:
df_names.append("df_" + elem["source_id"])
return df_names | 42.842105 | 194 | 0.690418 | 225 | 1,628 | 4.831111 | 0.475556 | 0.045078 | 0.033119 | 0.038638 | 0.080957 | 0.049678 | 0.049678 | 0 | 0 | 0 | 0 | 0.003023 | 0.187346 | 1,628 | 38 | 195 | 42.842105 | 0.818594 | 0.30774 | 0 | 0 | 0 | 0 | 0.114286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
930fa7bf77bac97e46a52354c3d1f9d8c81fb05f | 464 | py | Python | examples/python/cpu/tensors/ocean_auto_cast_01.py | kant/ocean-tensor-package | fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d | [
"Apache-2.0"
] | 27 | 2018-08-16T21:32:49.000Z | 2021-11-30T10:31:08.000Z | examples/python/cpu/tensors/ocean_auto_cast_01.py | kant/ocean-tensor-package | fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d | [
"Apache-2.0"
] | null | null | null | examples/python/cpu/tensors/ocean_auto_cast_01.py | kant/ocean-tensor-package | fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d | [
"Apache-2.0"
] | 13 | 2018-08-17T17:33:16.000Z | 2021-11-30T10:31:09.000Z | ## Automatic type case and broadcast
import pyOcean_cpu as ocean
import sys
def exceptionMsg() :
print("Expected error: %s" % str(sys.exc_info()[1]))
def failTest(command) :
try :
eval(command)
except :
exceptionMsg()
a = ocean.int16([1,2,3])
b = ocean.float([1])
print(a+b)
ocean.setAutoTypecast(False)
print(a+3)
failTest("a+3.2")
print(b+3)
print(b+3.2)
failTest("a+b")
ocean.setAutoBroadcast(False)
print(a+3)
failTest("a+[3]")
| 13.647059 | 55 | 0.657328 | 73 | 464 | 4.150685 | 0.479452 | 0.026403 | 0.046205 | 0.079208 | 0.145215 | 0.145215 | 0.145215 | 0 | 0 | 0 | 0 | 0.03876 | 0.165948 | 464 | 33 | 56 | 14.060606 | 0.744186 | 0.071121 | 0 | 0.095238 | 0 | 0 | 0.0726 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.190476 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9310e77db1e72d4d632caad266185a2b309d6ee4 | 2,140 | py | Python | examples/stresstest_logger.py | felfel/logging-py | 62e836da8f666286e190dfc1a4428eb04375d08c | [
"MIT"
] | 2 | 2018-08-24T12:45:56.000Z | 2020-02-23T07:59:34.000Z | examples/stresstest_logger.py | felfel/logging-py | 62e836da8f666286e190dfc1a4428eb04375d08c | [
"MIT"
] | 6 | 2018-07-10T11:43:09.000Z | 2018-10-22T11:34:49.000Z | examples/stresstest_logger.py | felfel/logging-py | 62e836da8f666286e190dfc1a4428eb04375d08c | [
"MIT"
] | 1 | 2018-07-13T09:32:58.000Z | 2018-07-13T09:32:58.000Z | from loggingpy import Logger
from loggingpy import BundlingHttpSink
import time
import random
import logging
import sys
from examples import uris # you must provide these uri strings (just any uri that accepts requests.post(...) requests)
if __name__ == "__main__":
# Sumologic token url (just a basic string)
sumoUri = uris.sumoUri
# Logz.io token url (just a basic string)
elasticUri = uris.elasticUri
# these are two sinks of type BatchedHttpSink, which extend the logging.Handler class
sumoSink = BundlingHttpSink('test_app', sumoUri)
elasticSink = BundlingHttpSink('test_app', elasticUri)
# however, you can use basic logging.Handler derived classes together with the ones here
stdoutSink = logging.StreamHandler(sys.stdout)
# configure the logger with a list of handlers to which it pushes the messages
Logger.with_sinks([sumoSink, elasticSink, stdoutSink])
# get logger of context
logger = Logger("Calculator")
# this is just some basic code that generates different types of exceptions and then pushes different messages
try:
for i in range(0, 100000):
try:
div = random.randint(0, 20)
x = 123 / div
if div == 1:
raise Exception("Because I can")
logger.info(payload_type="MathOperation", message='You performed a division', payload={
"OperationType": "Division",
"OperationDetails": {
"Div": div,
"Result": x
}
}
)
except Exception as e:
logger.fatal(message='What the fck just happened???', exception=e, payload_type="CalculationError")
if i % 100 == 0:
time.sleep(random.randint(10, 5000)/1000)
if i % 500 == 0:
print("Got " + str(i))
except BaseException as e: # this catch is required in order to shutdown the logger properly
pass
print("Flushing logger...")
Logger.flush()
print('...Done.')
| 33.968254 | 119 | 0.606075 | 245 | 2,140 | 5.240816 | 0.55102 | 0.020249 | 0.029595 | 0.020249 | 0.037383 | 0.037383 | 0 | 0 | 0 | 0 | 0 | 0.021754 | 0.312617 | 2,140 | 62 | 120 | 34.516129 | 0.851122 | 0.287383 | 0 | 0.04878 | 0 | 0 | 0.135403 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.02439 | 0.170732 | 0 | 0.170732 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
931746dd6e36cf03f12c48ba2eadffc6ec81a1e9 | 903 | py | Python | single-results/parse_file.py | andrewevans0102/sensor-sampling-with-react-recharts | b5491ad30e8bb633c79ea3c6b6c51f70eb197f41 | [
"MIT"
] | null | null | null | single-results/parse_file.py | andrewevans0102/sensor-sampling-with-react-recharts | b5491ad30e8bb633c79ea3c6b6c51f70eb197f41 | [
"MIT"
] | null | null | null | single-results/parse_file.py | andrewevans0102/sensor-sampling-with-react-recharts | b5491ad30e8bb633c79ea3c6b6c51f70eb197f41 | [
"MIT"
] | null | null | null | ## write files originally copied from stack abuse at
## https://stackabuse.com/writing-files-using-python/
## read files originally copied from stack abuse at
## https://stackabuse.com/reading-files-with-python/
def writeOutput(line):
appendFilehandle = open('clean_history.log','a')
appendFilehandle.write(line)
appendFilehandle.close()
# define the name of the file to read from
filename = "working_history_daily.log"
# open the file for reading
readFilehandle = open(filename, 'r')
while True:
# read a single line
line = readFilehandle.readline()
if not line:
break
# # cleanup file so clean 24 hour blocks have been recorded
if("03/18/2021" in line and "00" in line and "error" not in line and "Checksum" not in line and "DHT" not in line and "buffer" not in line):
writeOutput(line)
# close the pointer to that file
readFilehandle.close() | 33.444444 | 144 | 0.712071 | 130 | 903 | 4.923077 | 0.507692 | 0.05625 | 0.070313 | 0.05625 | 0.171875 | 0.171875 | 0.171875 | 0.171875 | 0.171875 | 0.171875 | 0 | 0.016416 | 0.190476 | 903 | 27 | 145 | 33.444444 | 0.859097 | 0.415282 | 0 | 0 | 0 | 0 | 0.151751 | 0.048638 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93190146b61f7d460199298bec31da8c98e4b313 | 2,528 | py | Python | tests/test_multiple_pool.py | UT-Covid/compartmental_model_case_studies | 324e2c92453c928e64c637d6e6fbe570fb714cdb | [
"BSD-3-Clause-Clear"
] | 1 | 2021-02-04T14:59:32.000Z | 2021-02-04T14:59:32.000Z | tests/test_multiple_pool.py | UT-Covid/compartmental_model_case_studies | 324e2c92453c928e64c637d6e6fbe570fb714cdb | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/test_multiple_pool.py | UT-Covid/compartmental_model_case_studies | 324e2c92453c928e64c637d6e6fbe570fb714cdb | [
"BSD-3-Clause-Clear"
] | null | null | null | import os
import sys
import pytest
import pickle
import numpy as np
import xarray as xr
from .pytest_utils import fp, md5sum, call_with_legacy_params, assert_objects_equal
from SEIRcity.simulate.multiple_pool import multiple_pool
from SEIRcity.simulate.multiple_serial import multiple_serial
from SEIRcity.param import aggregate_params_and_data
class TestPool(object):
@pytest.mark.parametrize("legacy_pickle,yaml_fp", [
(fp("tests/data/multiple_serial_result1.pckl"),
fp("tests/data/configs/multiple_reps_single_scenario5.yaml")),
])
def test_mp_can_return_xarray(self, legacy_pickle, yaml_fp, tmp_path):
"""Can generate same outputs as serial version.
"""
assert os.path.isfile(legacy_pickle)
assert os.path.isfile(yaml_fp)
config = aggregate_params_and_data(yaml_fp=yaml_fp)
kwargs = {
'legacy_pickle': legacy_pickle,
'func': multiple_pool
}
kwargs['override_args'] = [config]
legacy_result, new_result = call_with_legacy_params(**kwargs)
assert isinstance(legacy_result.outcomes, xr.DataArray)
assert isinstance(new_result.outcomes, xr.DataArray)
assert legacy_result.outcomes.dims == new_result.outcomes.dims
# print("legacy: ", legacy_result.outcomes)
# print("new: ", new_result.outcomes)
# print("legacy coords: ", legacy_result.outcomes.coords)
# print("new coords: ", new_result.outcomes.coords)
@pytest.mark.slow
@pytest.mark.parametrize("legacy_pickle,yaml_fp", [
(fp("tests/data/multiple_serial_result6.pckl"),
fp("tests/data/configs/multiple_scenario2.yaml"))
])
def test_mp_can_return_xarray_slow(self, legacy_pickle, yaml_fp):
"""same as above but slow"""
assert os.path.isfile(legacy_pickle)
assert os.path.isfile(yaml_fp)
config = aggregate_params_and_data(yaml_fp=yaml_fp)
legacy_result, new_result = call_with_legacy_params(
legacy_pickle=legacy_pickle,
func=multiple_pool,
override_args=[config])
assert isinstance(legacy_result.outcomes, xr.DataArray)
assert isinstance(new_result.outcomes, xr.DataArray)
assert legacy_result.outcomes.dims == new_result.outcomes.dims
# print("legacy: ", legacy_result.outcomes)
# print("new: ", new_result.outcomes)
print("legacy coords: ", legacy_result.outcomes.coords)
print("new coords: ", new_result.outcomes.coords)
| 42.133333 | 83 | 0.697785 | 315 | 2,528 | 5.32381 | 0.234921 | 0.133572 | 0.095408 | 0.042934 | 0.711986 | 0.685748 | 0.64997 | 0.568873 | 0.519976 | 0.519976 | 0 | 0.002478 | 0.201741 | 2,528 | 59 | 84 | 42.847458 | 0.828543 | 0.134098 | 0 | 0.347826 | 0 | 0 | 0.125633 | 0.099402 | 0 | 0 | 0 | 0 | 0.23913 | 1 | 0.043478 | false | 0 | 0.217391 | 0 | 0.282609 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
931a12e506bc76c7a5c3ea40e50db2b8ec6b9495 | 3,668 | py | Python | core/polyaxon/client/client.py | jjasonkal/polyaxon | 8454b29b2b971b965de8a7bf63afdd48f07d6d53 | [
"Apache-2.0"
] | null | null | null | core/polyaxon/client/client.py | jjasonkal/polyaxon | 8454b29b2b971b965de8a7bf63afdd48f07d6d53 | [
"Apache-2.0"
] | null | null | null | core/polyaxon/client/client.py | jjasonkal/polyaxon | 8454b29b2b971b965de8a7bf63afdd48f07d6d53 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from polyaxon import settings
from polyaxon.client.transport import Transport
class PolyaxonClient:
def __init__(self, config=None, token=None):
self._config = config or settings.CLIENT_CONFIG
self._config.token = token or settings.AUTH_CONFIG.token
self._transport = None
self.api_client = polyaxon_sdk.ApiClient(
self.config.sdk_config, **self.config.client_header
)
self._projects_v1 = None
self._runs_v1 = None
self._auth_v1 = None
self._users_v1 = None
self._versions_v1 = None
self._agents_v1 = None
self._components_v1 = None
self._models_v1 = None
def reset(self):
self._transport = None
self._projects_v1 = None
self._runs_v1 = None
self._auth_v1 = None
self._users_v1 = None
self._versions_v1 = None
self._agents_v1 = None
self._components_v1 = None
self._models_v1 = None
self.api_client = polyaxon_sdk.ApiClient(
self.config.sdk_config, **self.config.client_header
)
def set_health_check(self, url):
self.transport.set_health_check(url)
def unset_health_check(self, url):
self.transport.unset_health_check(url)
@property
def transport(self):
if not self._transport:
self._transport = Transport(config=self.config)
return self._transport
@property
def config(self):
return self._config
@property
def projects_v1(self):
if not self._projects_v1:
self._projects_v1 = polyaxon_sdk.ProjectsV1Api(self.api_client)
return self._projects_v1
@property
def runs_v1(self):
if not self._runs_v1:
self._runs_v1 = polyaxon_sdk.RunsV1Api(self.api_client)
return self._runs_v1
@property
def auth_v1(self):
if not self._auth_v1:
self._auth_v1 = polyaxon_sdk.AuthV1Api(self.api_client)
return self._auth_v1
@property
def users_v1(self):
if not self._users_v1:
self._users_v1 = polyaxon_sdk.UsersV1Api(self.api_client)
return self._users_v1
@property
def versions_v1(self):
if not self._versions_v1:
self._versions_v1 = polyaxon_sdk.VersionsV1Api(self.api_client)
return self._versions_v1
@property
def agents_v1(self):
if not self._agents_v1:
self._agents_v1 = polyaxon_sdk.AgentsV1Api(self.api_client)
return self._agents_v1
@property
def components_v1(self):
if not self._components_v1:
self._components_v1 = polyaxon_sdk.HubComponentsV1Api(self.api_client)
return self._components_v1
@property
def models_v1(self):
if not self._models_v1:
self._models_v1 = polyaxon_sdk.HubModelsV1Api(self.api_client)
return self._models_v1
def sanitize_for_serialization(self, value):
return self.api_client.sanitize_for_serialization(value)
| 30.065574 | 82 | 0.67121 | 479 | 3,668 | 4.847599 | 0.235908 | 0.062016 | 0.064599 | 0.050388 | 0.335917 | 0.204996 | 0.178295 | 0.178295 | 0.178295 | 0.178295 | 0 | 0.024863 | 0.254362 | 3,668 | 121 | 83 | 30.31405 | 0.824132 | 0.156489 | 0 | 0.372093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.174419 | false | 0 | 0.034884 | 0.023256 | 0.348837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
931bcf76748e923ca4a633a3cbeb069fcf538af1 | 1,898 | py | Python | metrics/rabbitmq.py | ONSdigital/ras-rm-metrics | 16931ad6b479b6b30f4ba8934e79d8633ebd8032 | [
"MIT"
] | null | null | null | metrics/rabbitmq.py | ONSdigital/ras-rm-metrics | 16931ad6b479b6b30f4ba8934e79d8633ebd8032 | [
"MIT"
] | 1 | 2018-12-03T11:10:34.000Z | 2018-12-03T11:10:34.000Z | metrics/rabbitmq.py | ONSdigital/ras-rm-metrics | 16931ad6b479b6b30f4ba8934e79d8633ebd8032 | [
"MIT"
] | 2 | 2018-08-23T15:39:25.000Z | 2021-04-11T08:10:53.000Z | import json
import cfenv
import requests
import logging
from structlog import wrap_logger
logger = wrap_logger(logging.getLogger(__name__))
class RabbitMQ:
def __init__(self, cf_service_name):
self.cf_service_name = cf_service_name
self.services = self._get_services_from_cf()
def log_metrics(self):
for service_name, uri in self.services.items():
queues = self._fetch_metrics_for_all_queues(service_name, uri)
for queue_metrics in queues:
self._log_queue_metrics(service_name, queue_metrics)
def _get_services_from_cf(self):
return {s.name: s.credentials['http_api_uri']
for s in cfenv.AppEnv().services
if cfenv.match_all(s.env, {'label': self.cf_service_name})}
def _fetch_metrics_for_all_queues(self, service, uri):
uri = uri.rstrip('/') + '/queues'
response = requests.get(uri)
if response.status_code != requests.codes.ok:
logger.error(
self._service_logger_name(service),
f'GET {uri} - [{response.status_code}] - {response.text}')
return []
return [self._prepare_metrics(service, metrics) for metrics in
response.json()]
def _prepare_metrics(self, service, raw):
return {
'service': service,
'name': raw['name'],
'messages': raw['messages']
}
def _log_queue_metrics(self, service_name, queue_metrics):
logger.info(
queue=self._queue_logger_name(service_name, queue_metrics['name']),
messages=queue_metrics['messages'])
def _queue_logger_name(self, service_name, queue_name):
return f'{self._service_logger_name(service_name)}.{queue_name}'
def _service_logger_name(self, service):
return f'sdc.metrics.{self.cf_service_name}.{service}'
| 32.169492 | 79 | 0.643836 | 232 | 1,898 | 4.909483 | 0.232759 | 0.125549 | 0.057068 | 0.059701 | 0.122037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25079 | 1,898 | 58 | 80 | 32.724138 | 0.800985 | 0 | 0 | 0 | 0 | 0 | 0.115911 | 0.064278 | 0 | 0 | 0 | 0 | 0 | 1 | 0.186047 | false | 0 | 0.116279 | 0.093023 | 0.465116 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
931d4f107dcc3a3742cb0972921784cc9ef9d306 | 4,026 | py | Python | Resnet/resnet.py | yt4766269/pytorch_zoo | fef877a15c3541771512e9f9489c3023aee20819 | [
"Apache-2.0"
] | 1 | 2021-07-22T02:56:13.000Z | 2021-07-22T02:56:13.000Z | Resnet/resnet.py | yt4766269/pytorch_zoo | fef877a15c3541771512e9f9489c3023aee20819 | [
"Apache-2.0"
] | null | null | null | Resnet/resnet.py | yt4766269/pytorch_zoo | fef877a15c3541771512e9f9489c3023aee20819 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as function
def conv3x3(in_features: int, out_features: int, stride: int = 1) -> nn.Conv2d:
'''3x3 conv with padding'''
return nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=3, stride=stride, padding=1)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, input_planes, planes, stride = 1, downsample = None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_features=input_planes, out_features=planes, stride = stride)
self.bn1 = nn.BatchNorm2d(num_features=planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(in_features=planes, out_features=planes)
self.bn2 = nn.BatchNorm2d(num_features=planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.downsample is not None:
residual = self.downsample(residual)
x = x+residual
out = self.relu(x)
return out
class Bottlenect(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride = 1, downsample = None):
super(Bottlenect, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1,stride = stride, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride = stride, padding = 1, bias = False)
self.bn2 = nn.BatchNorm2d(num_features=planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias = False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.downsample is not None:
residual = self.downsample(residual)
x = x + residual
x = self.relu(x)
return x
class Resnet(nn.Module):
def __init__(self, block:nn.Module, layer_num:int, num_class:int) -> None:
super(Resnet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layer_num[0])
self.layer2 = self._make_layer(block, 128, layer_num[1], stride=2)
self.layer3 = self._make_layer(block, 256, layer_num[2], stride=2)
self.layer4 = self._make_layer(block, 512, layer_num[3], stride=2)
self.avgpool = nn.AvgPool2d(kernel_size=7)
self.fc = nn.Linear(512 * block.expansion, num_class)
def _make_layer(self, block, planes, blocks, stride = 1) -> nn.Module:
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes*block.expansion, kernel_size=1, stride=stride, bias = False),
nn.BatchNorm2d(planes*block.expansion)
)
layers = []
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 35.946429 | 113 | 0.605812 | 549 | 4,026 | 4.324226 | 0.15847 | 0.021904 | 0.055602 | 0.021062 | 0.393008 | 0.346672 | 0.303707 | 0.259056 | 0.226201 | 0.226201 | 0 | 0.037151 | 0.271237 | 4,026 | 111 | 114 | 36.27027 | 0.771984 | 0.005216 | 0 | 0.414894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.031915 | 0 | 0.223404 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932488970519b338525f68c216759facdd4c7e7e | 13,443 | py | Python | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/system/zfs.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | 1 | 2020-10-14T00:06:54.000Z | 2020-10-14T00:06:54.000Z | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/system/zfs.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | null | null | null | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/system/zfs.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | 2 | 2015-08-06T07:45:48.000Z | 2017-01-04T17:47:16.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- Manages ZFS file systems on Solaris and FreeBSD. Can manage file systems, volumes and snapshots. See zfs(1M) for more information about the properties.
version_added: "1.1"
options:
name:
description:
- File system, snapshot or volume name e.g. C(rpool/myfs)
required: true
state:
description:
- Whether to create (C(present)), or remove (C(absent)) a file system, snapshot or volume.
required: true
choices: [present, absent]
aclinherit:
description:
- The aclinherit property.
required: False
choices: [discard,noallow,restricted,passthrough,passthrough-x]
aclmode:
description:
- The aclmode property.
required: False
choices: [discard,groupmask,passthrough]
atime:
description:
- The atime property.
required: False
choices: ['on','off']
canmount:
description:
- The canmount property.
required: False
choices: ['on','off','noauto']
casesensitivity:
description:
- The casesensitivity property.
required: False
choices: [sensitive,insensitive,mixed]
checksum:
description:
- The checksum property.
required: False
choices: ['on','off',fletcher2,fletcher4,sha256]
compression:
description:
- The compression property.
required: False
choices: ['on','off',lzjb,gzip,gzip-1,gzip-2,gzip-3,gzip-4,gzip-5,gzip-6,gzip-7,gzip-8,gzip-9,lz4,zle]
copies:
description:
- The copies property.
required: False
choices: [1,2,3]
dedup:
description:
- The dedup property.
required: False
choices: ['on','off']
devices:
description:
- The devices property.
required: False
choices: ['on','off']
exec:
description:
- The exec property.
required: False
choices: ['on','off']
jailed:
description:
- The jailed property.
required: False
choices: ['on','off']
logbias:
description:
- The logbias property.
required: False
choices: [latency,throughput]
mountpoint:
description:
- The mountpoint property.
required: False
nbmand:
description:
- The nbmand property.
required: False
choices: ['on','off']
normalization:
description:
- The normalization property.
required: False
choices: [none,formC,formD,formKC,formKD]
primarycache:
description:
- The primarycache property.
required: False
choices: [all,none,metadata]
quota:
description:
- The quota property.
required: False
readonly:
description:
- The readonly property.
required: False
choices: ['on','off']
recordsize:
description:
- The recordsize property.
required: False
refquota:
description:
- The refquota property.
required: False
refreservation:
description:
- The refreservation property.
required: False
reservation:
description:
- The reservation property.
required: False
secondarycache:
description:
- The secondarycache property.
required: False
choices: [all,none,metadata]
setuid:
description:
- The setuid property.
required: False
choices: ['on','off']
shareiscsi:
description:
- The shareiscsi property.
required: False
choices: ['on','off']
sharenfs:
description:
- The sharenfs property.
required: False
sharesmb:
description:
- The sharesmb property.
required: False
snapdir:
description:
- The snapdir property.
required: False
choices: [hidden,visible]
sync:
description:
- The sync property.
required: False
choices: ['on','off']
utf8only:
description:
- The utf8only property.
required: False
choices: ['on','off']
volsize:
description:
- The volsize property.
required: False
volblocksize:
description:
- The volblocksize property.
required: False
vscan:
description:
- The vscan property.
required: False
choices: ['on','off']
xattr:
description:
- The xattr property.
required: False
choices: ['on','off']
zoned:
description:
- The zoned property.
required: False
choices: ['on','off']
author: Johan Wiren
'''
EXAMPLES = '''
# Create a new file system called myfs in pool rpool
- zfs: name=rpool/myfs state=present
# Create a new volume called myvol in pool rpool.
- zfs: name=rpool/myvol state=present volsize=10M
# Create a snapshot of rpool/myfs file system.
- zfs: name=rpool/myfs@mysnapshot state=present
# Create a new file system called myfs2 with snapdir enabled
- zfs: name=rpool/myfs2 state=present snapdir=enabled
'''
import os
class Zfs(object):
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
self.immutable_properties = [ 'casesensitivity', 'normalization', 'utf8only' ]
def exists(self):
cmd = [self.module.get_bin_path('zfs', True)]
cmd.append('list')
cmd.append('-t all')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create(self):
if self.module.check_mode:
self.changed = True
return
properties=self.properties
volsize = properties.pop('volsize', None)
volblocksize = properties.pop('volblocksize', None)
if "@" in self.name:
action = 'snapshot'
else:
action = 'create'
cmd = [self.module.get_bin_path('zfs', True)]
cmd.append(action)
if volblocksize:
cmd.append('-b %s' % volblocksize)
if properties:
for prop, value in properties.iteritems():
cmd.append('-o %s="%s"' % (prop, value))
if volsize:
cmd.append('-V')
cmd.append(volsize)
cmd.append(self.name)
(rc, err, out) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed=True
else:
self.module.fail_json(msg=out)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
cmd = [self.module.get_bin_path('zfs', True)]
cmd.append('destroy')
cmd.append(self.name)
(rc, err, out) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=out)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
cmd = self.module.get_bin_path('zfs', True)
args = [cmd, 'set', prop + '=' + value, self.name]
(rc, err, out) = self.module.run_command(args)
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=out)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.iteritems():
if current_properties[prop] != value:
if prop in self.immutable_properties:
self.module.fail_json(msg='Cannot change property %s after creation.' % prop)
else:
self.set_property(prop, value)
def get_current_properties(self):
def get_properties_by_name(propname):
cmd = [self.module.get_bin_path('zfs', True)]
cmd += ['get', '-H', propname, self.name]
rc, out, err = self.module.run_command(cmd)
return [l.split('\t')[1:3] for l in out.splitlines()]
properties = dict(get_properties_by_name('all'))
if 'share.*' in properties:
# Some ZFS pools list the sharenfs and sharesmb properties
# hierarchically as share.nfs and share.smb respectively.
del properties['share.*']
for p, v in get_properties_by_name('share.all'):
alias = p.replace('.', '') # share.nfs -> sharenfs (etc)
properties[alias] = v
return properties
def run_command(self, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
def main():
# FIXME: should use dict() constructor like other modules, required=False is default
module = AnsibleModule(
argument_spec = {
'name': {'required': True},
'state': {'required': True, 'choices':['present', 'absent']},
'aclinherit': {'required': False, 'choices':['discard', 'noallow', 'restricted', 'passthrough', 'passthrough-x']},
'aclmode': {'required': False, 'choices':['discard', 'groupmask', 'passthrough']},
'atime': {'required': False, 'choices':['on', 'off']},
'canmount': {'required': False, 'choices':['on', 'off', 'noauto']},
'casesensitivity': {'required': False, 'choices':['sensitive', 'insensitive', 'mixed']},
'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']},
'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']},
'copies': {'required': False, 'choices':['1', '2', '3']},
'dedup': {'required': False, 'choices':['on', 'off']},
'devices': {'required': False, 'choices':['on', 'off']},
'exec': {'required': False, 'choices':['on', 'off']},
# Not supported
#'groupquota': {'required': False},
'jailed': {'required': False, 'choices':['on', 'off']},
'logbias': {'required': False, 'choices':['latency', 'throughput']},
'mountpoint': {'required': False},
'nbmand': {'required': False, 'choices':['on', 'off']},
'normalization': {'required': False, 'choices':['none', 'formC', 'formD', 'formKC', 'formKD']},
'primarycache': {'required': False, 'choices':['all', 'none', 'metadata']},
'quota': {'required': False},
'readonly': {'required': False, 'choices':['on', 'off']},
'recordsize': {'required': False},
'refquota': {'required': False},
'refreservation': {'required': False},
'reservation': {'required': False},
'secondarycache': {'required': False, 'choices':['all', 'none', 'metadata']},
'setuid': {'required': False, 'choices':['on', 'off']},
'shareiscsi': {'required': False, 'choices':['on', 'off']},
'sharenfs': {'required': False},
'sharesmb': {'required': False},
'snapdir': {'required': False, 'choices':['hidden', 'visible']},
'sync': {'required': False, 'choices':['on', 'off']},
# Not supported
#'userquota': {'required': False},
'utf8only': {'required': False, 'choices':['on', 'off']},
'volsize': {'required': False},
'volblocksize': {'required': False},
'vscan': {'required': False, 'choices':['on', 'off']},
'xattr': {'required': False, 'choices':['on', 'off']},
'zoned': {'required': False, 'choices':['on', 'off']},
},
supports_check_mode=True
)
state = module.params.pop('state')
name = module.params.pop('name')
# Get all valid zfs-properties
properties = dict()
for prop, value in module.params.iteritems():
if prop in ['CHECKMODE']:
continue
if value:
properties[prop] = value
result = {}
result['name'] = name
result['state'] = state
zfs=Zfs(module, name, properties)
if state == 'present':
if zfs.exists():
zfs.set_properties_if_changed()
else:
zfs.create()
elif state == 'absent':
if zfs.exists():
zfs.destroy()
result.update(zfs.properties)
result['changed'] = zfs.changed
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| 32.160287 | 196 | 0.575839 | 1,424 | 13,443 | 5.392556 | 0.206461 | 0.12697 | 0.135434 | 0.097409 | 0.447584 | 0.417502 | 0.243652 | 0.164214 | 0.164214 | 0.113556 | 0 | 0.006348 | 0.285204 | 13,443 | 417 | 197 | 32.23741 | 0.792798 | 0.081306 | 0 | 0.365651 | 0 | 0.00831 | 0.502232 | 0.026378 | 0 | 0 | 0 | 0.002398 | 0 | 1 | 0.027701 | false | 0.01108 | 0.00554 | 0 | 0.058172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93257115dcc4e6c8454010d2189516afc8ea523a | 4,491 | py | Python | stlearn/plotting/deconvolution_plot.py | duypham2108/dev_st | 47adcfa5803eba7549b1185ec69d2317b386d9ff | [
"BSD-3-Clause"
] | null | null | null | stlearn/plotting/deconvolution_plot.py | duypham2108/dev_st | 47adcfa5803eba7549b1185ec69d2317b386d9ff | [
"BSD-3-Clause"
] | null | null | null | stlearn/plotting/deconvolution_plot.py | duypham2108/dev_st | 47adcfa5803eba7549b1185ec69d2317b386d9ff | [
"BSD-3-Clause"
] | null | null | null | from typing import Optional, Union
from anndata import AnnData
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib as mpl
import numpy as np
def deconvolution_plot(
adata: AnnData,
library_id: str = None,
use_label: str = "louvain",
cluster: [int, str] = None,
celltype: str = None,
celltype_threshold: float = 0,
data_alpha: float = 1.0,
threshold: float = 0.0,
cmap: str = "tab20",
tissue_alpha: float = 1.0,
title: str = None,
spot_size: Union[float, int] = 10,
show_axis: bool = False,
show_legend: bool = True,
cropped: bool = True,
margin: int = 100,
name: str = None,
dpi: int = 150,
output: str = None,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Clustering plot for sptial transcriptomics data. Also it has a function to display trajectory inference.
Parameters
----------
adata
Annotated data matrix.
library_id
Library id stored in AnnData.
use_label
Use label result of clustering method.
list_cluster
Choose set of clusters that will display in the plot.
data_alpha
Opacity of the spot.
tissue_alpha
Opacity of the tissue.
cmap
Color map to use.
spot_size
Size of the spot.
show_axis
Show axis or not.
show_legend
Show legend or not.
show_trajectory
Show the spatial trajectory or not. It requires stlearn.spatial.trajectory.pseudotimespace.
show_subcluster
Show subcluster or not. It requires stlearn.spatial.trajectory.global_level.
name
Name of the output figure file.
dpi
DPI of the output figure.
output
Save the figure as file or not.
copy
Return a copy instead of writing to adata.
Returns
-------
Nothing
"""
# plt.rcParams['figure.dpi'] = dpi
imagecol = adata.obs["imagecol"]
imagerow = adata.obs["imagerow"]
fig, ax = plt.subplots()
label = adata.obsm["deconvolution"].T
tmp = label.sum(axis=1)
label_filter = label.loc[tmp[tmp >= np.quantile(tmp, threshold)].index]
if cluster is not None:
base = adata.obs[adata.obs[use_label] == str(cluster)][["imagecol", "imagerow"]]
else:
base = adata.obs[["imagecol", "imagerow"]]
if celltype is not None:
base = base.loc[
adata.obs_names[adata.obsm["deconvolution"][celltype] > celltype_threshold]
]
label_filter_ = label_filter[base.index]
color_vals = list(range(0, len(label_filter_), 1))
my_norm = mpl.colors.Normalize(0, len(label_filter_))
my_cmap = mpl.cm.get_cmap(cmap, len(color_vals))
for i, xy in enumerate(base.values):
_ = ax.pie(
label_filter_.T.iloc[i].values,
colors=my_cmap.colors,
center=(xy[0], xy[1]),
radius=spot_size,
frame=True,
)
ax.autoscale()
if library_id is None:
library_id = list(adata.uns["spatial"].keys())[0]
image = adata.uns["spatial"][library_id]["images"][
adata.uns["spatial"][library_id]["use_quality"]
]
ax_pie = fig.add_axes([0.5, -0.4, 0.03, 0.5])
def my_autopct(pct):
return ("%1.0f%%" % pct) if pct >= 4 else ""
ax_pie.pie(
label_filter_.sum(axis=1),
colors=my_cmap.colors,
radius=5,
frame=True,
autopct=my_autopct,
pctdistance=1.1,
startangle=90,
wedgeprops=dict(width=(2), edgecolor="w", antialiased=True),
textprops={"fontsize": 5},
)
ax_pie.set_axis_off()
ax_cb = fig.add_axes([0.9, 0.25, 0.03, 0.5], axisbelow=False)
cb = mpl.colorbar.ColorbarBase(ax_cb, cmap=my_cmap, norm=my_norm, ticks=color_vals)
cb.ax.tick_params(size=0)
loc = np.array(color_vals) + 0.5
cb.set_ticks(loc)
cb.set_ticklabels(label_filter_.index)
cb.outline.set_visible(False)
# Overlay the tissue image
ax.imshow(
image,
alpha=1,
zorder=-1,
)
ax.axis("off")
if cropped:
ax.set_xlim(imagecol.min() - margin, imagecol.max() + margin)
ax.set_ylim(imagerow.min() - margin, imagerow.max() + margin)
ax.set_ylim(ax.get_ylim()[::-1])
# plt.gca().invert_yaxis()
if name is None:
name = use_label
if output is not None:
fig.savefig(output + "/" + name, dpi=dpi, bbox_inches="tight", pad_inches=0)
plt.show()
| 26.110465 | 108 | 0.609441 | 608 | 4,491 | 4.373355 | 0.320724 | 0.033095 | 0.010154 | 0.009026 | 0.060925 | 0.029334 | 0.029334 | 0 | 0 | 0 | 0 | 0.018643 | 0.271432 | 4,491 | 171 | 109 | 26.263158 | 0.79401 | 0.226898 | 0 | 0.041237 | 0 | 0 | 0.045083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020619 | false | 0 | 0.061856 | 0.010309 | 0.092784 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9326eba3e2a32b8391b62c5d9ad30ac89d990e0b | 5,536 | py | Python | g2p_train/train_and_export.py | gitter-badger/TensorVox | a87a326249e5e31f15c25c41458df01638fd5fd0 | [
"MIT"
] | 132 | 2020-08-14T04:37:23.000Z | 2022-03-30T04:49:58.000Z | g2p_train/train_and_export.py | gitter-badger/TensorVox | a87a326249e5e31f15c25c41458df01638fd5fd0 | [
"MIT"
] | 7 | 2020-11-19T03:55:14.000Z | 2022-03-18T00:54:58.000Z | g2p_train/train_and_export.py | gitter-badger/TensorVox | a87a326249e5e31f15c25c41458df01638fd5fd0 | [
"MIT"
] | 14 | 2020-08-16T10:25:14.000Z | 2021-12-21T06:32:09.000Z | from tqdm import tqdm
import os
import argparse
import tensorflow as tf
import yaml
import shutil
global_max = 0
cumodel = None
def safemkdir(dirn):
if not os.path.isdir(dirn):
os.mkdir(dirn)
def preprocess(in_fname,char_phn_tok):
words = list()
phn = list()
print("Opening file...")
with open(in_fname,"r",encoding="utf-8") as f:
for li in tqdm(f.readlines()):
spl = li.strip().split("\t")
if len(spl) > 1:
words.append(spl[0].lower()) #convert to lowercase for re-exporting later
phn.append(spl[1])
if char_phn_tok:
print("Tokenizing phoneme strings in char level too")
phntok = tf.keras.preprocessing.text.Tokenizer(lower=False,filters='"\t\n',char_level=char_phn_tok)
txttok = tf.keras.preprocessing.text.Tokenizer(char_level=True)
print("Fitting on texts...")
phntok.fit_on_texts(phn)
txttok.fit_on_texts(words)
print("Converting to sequences")
txtseqs = txttok.texts_to_sequences(words)
phnseqs = phntok.texts_to_sequences(phn)
txt_max = len(max(txtseqs, key=len))
phn_max = len(max(phnseqs,key=len))
global global_max
global_max = max(txt_max,phn_max)
print("Common padding index is " + str(global_max))
txtpadded = tf.keras.preprocessing.sequence.pad_sequences(txtseqs,padding="post",maxlen=global_max)
phnpadded = tf.keras.preprocessing.sequence.pad_sequences(phnseqs,padding="post",maxlen=global_max)
txtsize = len(txttok.word_index)
phnsize = len(phntok.word_index)
return txtpadded, phnpadded, txtsize, phnsize, phntok.word_index, txttok.word_index, words, phn
def getmodel(input_shape, in_vocab_size, out_vocab_size,gru_size,in_lr):
model = tf.keras.models.Sequential([tf.keras.layers.Embedding(in_vocab_size, gru_size, input_length=input_shape[1], input_shape=input_shape[1:]),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru_size,input_shape=input_shape[1:],return_sequences=True)),
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1024,activation="relu")),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(out_vocab_size,activation="softmax"))])
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(in_lr),
metrics=['accuracy'])
return model
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec([1,], dtype=tf.int32, name="input_len"),
tf.TensorSpec([1,], dtype=tf.float32, name="input_temperature"),
],
)
def callg2p(input_ids,input_len,input_temperature):
#Generate padding
pad = tf.zeros([global_max - input_len[0]],dtype=tf.int32)
#Add padding to input_ids and reshape
input_ids = tf.concat([input_ids,pad],0)
input_ids = tf.reshape(input_ids,[-1,global_max])
#Predict
pred = cumodel(input_ids)
#Apply temperature
predx = tf.squeeze(pred, 0)
predx /= input_temperature
#Select IDs
retids = tf.random.categorical(predx, 1)
#Remove padding
bool_mask = tf.not_equal(retids, 0)
phn_ids = tf.boolean_mask(retids, bool_mask)
return tf.cast(phn_ids,tf.int32)
def exportdict(indict,outf):
f = open(outf,"w")
for de in indict:
f.write(de + "\t" + str(indict[de]) + "\n")
f.close()
def export_model(folname,in_model,in_phnwi,in_charwi):
safemkdir(folname)
exportdict(in_phnwi,os.path.join(folname,"phn2id.txt"))
exportdict(in_charwi,os.path.join(folname,"char2id.txt"))
print("Exporting model...")
in_model.save(os.path.join(folname,"model"),save_format="tf",signatures=callg2p)
def main():
parser = argparse.ArgumentParser(description="Train and export a G2P model")
parser.add_argument(
"--config-path",
default="config/default.yaml",
type=str,
help="Path of config",
)
parser.add_argument(
"--dict-path",
default="dict.txt",
type=str,
help="Path of dictionary",
)
parser.add_argument(
"--out-path",
default="model1",
type=str,
help="Output path of model",
)
parser.add_argument(
"--char-tok-phn",
action="store_true",
help="Whether to tokenize phoneme strings by char. Turn this on if using IPA or some other phoneme with no spaces inbetween",
)
args = parser.parse_args()
txtpadded, phnpadded, txtsize, phnsize, phn_wi, txt_wi, words, phns = preprocess(args.dict_path,args.char_tok_phn)
yf = open(args.config_path,"r")
config = yaml.load(yf)
yf.close()
print("Finished preprocessing. Getting model")
global cumodel
cumodel = getmodel(txtpadded.shape,txtsize + 1,phnsize + 1,config["gru_dims"],config["learning_rate"])
x_train = txtpadded
y_train = phnpadded
print("Starting training...")
cumodel.fit(x_train, y_train, batch_size=config["batch_size"], epochs=config["epochs"],validation_split=config["val_per"])
print("Starting export...")
export_model(args.out_path,cumodel,phn_wi,txt_wi)
print("Re-exporting dict...")
outdict = open(os.path.join(args.out_path,"dict.txt"),"w",encoding="utf-8")
for idx, w in enumerate(words):
outdict.write(w + "\t" + phns[idx] + "\n")
outdict.close()
print("Done!")
if __name__ == "__main__":
main()
| 29.291005 | 151 | 0.665643 | 755 | 5,536 | 4.715232 | 0.313907 | 0.027528 | 0.029213 | 0.014326 | 0.123876 | 0.048315 | 0.025843 | 0.025843 | 0 | 0 | 0 | 0.009232 | 0.197796 | 5,536 | 188 | 152 | 29.446809 | 0.792389 | 0.025831 | 0 | 0.054688 | 0 | 0.007813 | 0.137604 | 0.005757 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054688 | false | 0 | 0.046875 | 0 | 0.125 | 0.085938 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9328f45eec569820bd8aed41540890adb6c0a93e | 1,299 | py | Python | e2xgrader/preprocessors/overwritecells.py | mhwasil/e2xgrader | 14c57c0b8e4bd7a689a9f98066c700b83818e954 | [
"MIT"
] | null | null | null | e2xgrader/preprocessors/overwritecells.py | mhwasil/e2xgrader | 14c57c0b8e4bd7a689a9f98066c700b83818e954 | [
"MIT"
] | null | null | null | e2xgrader/preprocessors/overwritecells.py | mhwasil/e2xgrader | 14c57c0b8e4bd7a689a9f98066c700b83818e954 | [
"MIT"
] | null | null | null | import json
from nbformat.notebooknode import NotebookNode
from nbconvert.exporters.exporter import ResourcesDict
from typing import Tuple
from nbgrader.preprocessors import OverwriteCells as NbgraderOverwriteCells
from ..utils.extra_cells import is_extra_cell
class OverwriteCells(NbgraderOverwriteCells):
def preprocess_cell(self,
cell: NotebookNode,
resources: ResourcesDict,
cell_index: int
) -> Tuple[NotebookNode, ResourcesDict]:
if not is_extra_cell(cell):
return super().preprocess_cell(cell, resources, cell_index)
grade_id = cell.metadata.get('nbgrader', {}).get('grade_id', None)
if grade_id is None:
return cell, resources
try:
source_cell = self.gradebook.find_source_cell(
grade_id,
self.notebook_id,
self.assignment_id
)
except MissingEntry:
self.log.warning(f'Cell {grade_id} does not exist in database')
del cell.metadata.nbgrader['grade_id']
return cell, resources
cell.metadata.extended_cell.source = json.loads(source_cell.source)
return cell, resources | 35.108108 | 75 | 0.617398 | 133 | 1,299 | 5.864662 | 0.413534 | 0.053846 | 0.073077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.315627 | 1,299 | 37 | 76 | 35.108108 | 0.87739 | 0 | 0 | 0.103448 | 0 | 0 | 0.050769 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.206897 | 0 | 0.413793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932b0148fc6f49141d5f5ef655554fa68bc9e82e | 1,462 | py | Python | qtvscodestyle/examples/widget_gallery/ui/dock.py | Greatness7/QtVSCodeStyle | 2654ca967c7ae5db3ce3fb46657ace9f1104f6b9 | [
"MIT"
] | 8 | 2021-10-04T00:21:25.000Z | 2022-03-14T19:57:03.000Z | qtvscodestyle/examples/widget_gallery/ui/dock.py | Greatness7/QtVSCodeStyle | 2654ca967c7ae5db3ce3fb46657ace9f1104f6b9 | [
"MIT"
] | null | null | null | qtvscodestyle/examples/widget_gallery/ui/dock.py | Greatness7/QtVSCodeStyle | 2654ca967c7ae5db3ce3fb46657ace9f1104f6b9 | [
"MIT"
] | 3 | 2021-11-15T23:58:33.000Z | 2022-02-01T18:50:01.000Z | from qtvscodestyle.qtpy.QtCore import Qt
from qtvscodestyle.qtpy.QtWidgets import QDockWidget, QMainWindow, QTextEdit
class DockUI:
def _setup_ui(self, main_win: QMainWindow) -> None:
# Attribute
left_dock = QDockWidget("Left dock")
right_dock = QDockWidget("Right dock")
top_dock = QDockWidget("Top dock")
bottom_dock = QDockWidget("Bottom dock")
docks = [left_dock, right_dock, top_dock, bottom_dock]
# Setup ui
left_dock.setWidget(QTextEdit("This is the left widget."))
right_dock.setWidget(QTextEdit("This is the right widget."))
top_dock.setWidget(QTextEdit("This is the top widget."))
bottom_dock.setWidget(QTextEdit("This is the bottom widget."))
for dock in docks:
dock.setAllowedAreas(
Qt.DockWidgetArea.LeftDockWidgetArea
| Qt.DockWidgetArea.RightDockWidgetArea
| Qt.DockWidgetArea.BottomDockWidgetArea
| Qt.DockWidgetArea.TopDockWidgetArea
)
# Layout
main_win.setCentralWidget(QTextEdit("This is the central widget."))
main_win.addDockWidget(Qt.DockWidgetArea.LeftDockWidgetArea, left_dock)
main_win.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, right_dock)
main_win.addDockWidget(Qt.DockWidgetArea.TopDockWidgetArea, top_dock)
main_win.addDockWidget(Qt.DockWidgetArea.BottomDockWidgetArea, bottom_dock)
| 44.30303 | 83 | 0.688782 | 152 | 1,462 | 6.467105 | 0.282895 | 0.130214 | 0.076297 | 0.091556 | 0.284842 | 0.24822 | 0 | 0 | 0 | 0 | 0 | 0 | 0.22777 | 1,462 | 32 | 84 | 45.6875 | 0.870682 | 0.0171 | 0 | 0 | 0 | 0 | 0.113747 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.08 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932c3dccec809da71964fb1b9ece1362885627e8 | 7,563 | py | Python | interpretdl/interpreter/gradient_cam.py | Tyihou/InterpretDL | df8894f8703634df4bfcbdcc495a3d12b220028c | [
"Apache-2.0"
] | 1 | 2021-03-11T02:38:51.000Z | 2021-03-11T02:38:51.000Z | interpretdl/interpreter/gradient_cam.py | Tyihou/InterpretDL | df8894f8703634df4bfcbdcc495a3d12b220028c | [
"Apache-2.0"
] | null | null | null | interpretdl/interpreter/gradient_cam.py | Tyihou/InterpretDL | df8894f8703634df4bfcbdcc495a3d12b220028c | [
"Apache-2.0"
] | null | null | null | import typing
from typing import Any, Callable, List, Tuple, Union
import numpy as np
import os, sys
from PIL import Image
from .abc_interpreter import Interpreter
from ..data_processor.readers import preprocess_image, read_image, restore_image, preprocess_inputs
from ..data_processor.visualizer import visualize_heatmap
class GradCAMInterpreter(Interpreter):
"""
Gradient CAM Interpreter.
More details regarding the GradCAM method can be found in the original paper:
https://arxiv.org/abs/1610.02391
"""
def __init__(self,
paddle_model,
trained_model_path,
use_cuda=True,
model_input_shape=[3, 224, 224]) -> None:
"""
Initialize the GradCAMInterpreter.
Args:
paddle_model (callable): A user-defined function that gives access to model predictions.
It takes the following arguments:
- data: Data inputs.
and outputs predictions. See the example at the end of ``interpret()``.
trained_model_path (str): The pretrained model directory.
use_cuda (bool, optional): Whether or not to use cuda. Default: True
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.trained_model_path = trained_model_path
self.use_cuda = use_cuda
self.model_input_shape = model_input_shape
self.paddle_prepared = False
def interpret(self,
inputs,
target_layer_name,
labels=None,
visual=True,
save_path=None):
"""
Main function of the interpreter.
Args:
inputs (str or list of strs or numpy.ndarray): The input image filepath or a list of filepaths or numpy array of read images.
target_layer_name (str): The target layer to calculate gradients.
labels (list or tuple or numpy.ndarray, optional): The target labels to analyze. The number of labels should be equal to the number of images. If None, the most likely label for each image will be used. Default: None
visual (bool, optional): Whether or not to visualize the processed image. Default: True
save_path (str or list of strs or None, optional): The filepath(s) to save the processed image(s). If None, the image will not be saved. Default: None
:return: interpretations/heatmap for each image
:rtype: numpy.ndarray
Example::
import interpretdl as it
def paddle_model(data):
import paddle.fluid as fluid
class_num = 1000
model = ResNet50()
logits = model.net(input=image_input, class_dim=class_num)
probs = fluid.layers.softmax(logits, axis=-1)
return probs
gradcam = it.GradCAMInterpreter(paddle_model, "assets/ResNet50_pretrained",True)
gradcam.interpret(
'assets/catdog.png',
'res5c.add.output.5.tmp_0',
label=None,
visual=True,
save_path='assets/gradcam_test.jpg')
"""
imgs, data, save_path = preprocess_inputs(inputs, save_path,
self.model_input_shape)
self.target_layer_name = target_layer_name
if not self.paddle_prepared:
self._paddle_prepare()
bsz = len(data)
if labels is None:
_, _, out = self.predict_fn(
data, np.zeros(
(bsz, 1), dtype='int64'))
labels = np.argmax(out, axis=1)
labels = np.array(labels).reshape((bsz, 1))
feature_map, gradients, _ = self.predict_fn(data, labels)
f = np.array(feature_map)
g = np.array(gradients)
mean_g = np.mean(g, (2, 3))
heatmap = f.transpose([0, 2, 3, 1])
dim_array = np.ones((1, heatmap.ndim), int).ravel()
dim_array[heatmap.ndim - 1] = -1
dim_array[0] = bsz
heatmap = heatmap * mean_g.reshape(dim_array)
heatmap = np.mean(heatmap, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap_max = np.max(heatmap, axis=tuple(np.arange(1, heatmap.ndim)))
heatmap /= heatmap_max.reshape((bsz, ) + (1, ) * (heatmap.ndim - 1))
for i in range(bsz):
visualize_heatmap(heatmap[i], imgs[i], visual, save_path[i])
return heatmap
def _paddle_prepare(self, predict_fn=None):
if predict_fn is None:
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
image_op = fluid.data(
name='image',
shape=[None] + self.model_input_shape,
dtype='float32')
label_op = fluid.layers.data(
name='label', shape=[None, 1], dtype='int64')
probs = self.paddle_model(image_op)
if isinstance(probs, tuple):
probs = probs[0]
# manually switch the model to test mode
for op in main_program.global_block().ops:
if op.type == 'batch_norm':
op._set_attr('use_global_stats', True)
elif op.type == 'dropout':
op._set_attr('dropout_prob', 0.0)
# fetch the target layer
trainable_vars = list(main_program.list_vars())
for v in trainable_vars:
if v.name == self.target_layer_name:
conv = v
class_num = probs.shape[-1]
one_hot = fluid.layers.one_hot(label_op, class_num)
one_hot = fluid.layers.elementwise_mul(probs, one_hot)
target_category_loss = fluid.layers.reduce_sum(
one_hot, dim=1)
# target_category_loss = - fluid.layers.cross_entropy(probs, label_op)[0]
# add back-propagration
p_g_list = fluid.backward.append_backward(
target_category_loss)
# calculate the gradients w.r.t. the target layer
gradients_map = fluid.gradients(target_category_loss,
conv)[0]
if self.use_cuda:
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id)
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
fluid.io.load_persistables(exe, self.trained_model_path,
main_program)
def predict_fn(data, labels):
feature_map, gradients, out = exe.run(
main_program,
feed={'image': data,
'label': labels},
fetch_list=[conv, gradients_map, probs])
return feature_map, gradients, out
self.predict_fn = predict_fn
self.paddle_prepared = True
| 40.44385 | 228 | 0.553087 | 861 | 7,563 | 4.672474 | 0.289199 | 0.021874 | 0.022371 | 0.014169 | 0.058663 | 0.021377 | 0 | 0 | 0 | 0 | 0 | 0.014146 | 0.364406 | 7,563 | 186 | 229 | 40.66129 | 0.822758 | 0.293005 | 0 | 0 | 0 | 0 | 0.020132 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038095 | false | 0 | 0.085714 | 0 | 0.152381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932c64f215335ed9010eadefdd112aeb33d21c55 | 740 | py | Python | examples/ec2_role_example/ec2_role_example.py | YuvalShaul/easyawslib | 59284f8b408ccb5a1846f6c2a2982a0b7a5e28dd | [
"MIT"
] | null | null | null | examples/ec2_role_example/ec2_role_example.py | YuvalShaul/easyawslib | 59284f8b408ccb5a1846f6c2a2982a0b7a5e28dd | [
"MIT"
] | null | null | null | examples/ec2_role_example/ec2_role_example.py | YuvalShaul/easyawslib | 59284f8b408ccb5a1846f6c2a2982a0b7a5e28dd | [
"MIT"
] | 1 | 2021-04-13T10:39:16.000Z | 2021-04-13T10:39:16.000Z | from easyaws.ec2_vm import Ec2Tool
from easyaws.s3_bucket import S3Bucket
def get_metadata_creds():
creds = Ec2Tool.get_credentials()
print('metadata credentials:', creds)
return creds
def get_metadata_role_arn():
role_arn = Ec2Tool.get_role()
print('role arn: ', role_arn)
return role_arn
def list_s3_buckets():
my_s3 = S3Bucket(bucket_name='my-first-bucket-84629694625')
ans = my_s3.s3_client.list_buckets()
print(ans)
return ans
def do_all():
region = 'us-east-1'
aws_access_key_id, aws_secret_access_key, token = get_metadata_creds()
role_arn = get_metadata_role_arn()
try:
list_s3_buckets()
except Exception as e:
print('Creds not good!!!', e)
do_all() | 23.870968 | 74 | 0.698649 | 108 | 740 | 4.462963 | 0.416667 | 0.10166 | 0.058091 | 0.074689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040404 | 0.197297 | 740 | 31 | 75 | 23.870968 | 0.771044 | 0 | 0 | 0 | 0 | 0 | 0.11336 | 0.036437 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.375 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932e61c9520a7b1646e51a8e609370789b6fc95a | 972 | py | Python | utils/set_user_to_trusted.py | DCGM/pero_ocr_web | e901027712827278f9ace914f6ccba16d3ac280f | [
"BSD-2-Clause"
] | 2 | 2020-05-07T13:58:31.000Z | 2021-01-27T09:33:07.000Z | utils/set_user_to_trusted.py | DCGM/pero_ocr_web | e901027712827278f9ace914f6ccba16d3ac280f | [
"BSD-2-Clause"
] | 47 | 2019-09-17T19:20:07.000Z | 2022-03-20T12:33:28.000Z | utils/set_user_to_trusted.py | DCGM/pero_ocr_web | e901027712827278f9ace914f6ccba16d3ac280f | [
"BSD-2-Clause"
] | 1 | 2019-10-02T10:42:35.000Z | 2019-10-02T10:42:35.000Z | import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import argparse
from app.db import User
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--database', type=str, required=True, help="Database.")
parser.add_argument('-e', '--email', type=str, required=True, help="Email of user.")
args = parser.parse_args()
return args
def main():
args = parseargs()
database_url = 'sqlite:///' + args.database
engine = create_engine(database_url, convert_unicode=True, connect_args={'check_same_thread': False})
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
user = db_session.query(User).filter(User.email == args.email).first()
user.trusted = 1
db_session.commit()
if __name__ == '__main__':
sys.exit(main())
| 30.375 | 105 | 0.646091 | 113 | 972 | 5.345133 | 0.486726 | 0.044702 | 0.082781 | 0.062914 | 0.076159 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001337 | 0.230453 | 972 | 31 | 106 | 31.354839 | 0.80615 | 0 | 0 | 0 | 0 | 0 | 0.081276 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.217391 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932e660d4a56e07ffab941905822671642e41cf2 | 2,025 | py | Python | tests/test_backends/test_redis.py | theruziev/aio_pubsub | 7629c6c5fe02218e5300fa11c6f0cf2beaf8aaf5 | [
"MIT"
] | 7 | 2019-06-11T12:39:39.000Z | 2021-03-23T13:41:01.000Z | tests/test_backends/test_redis.py | hugokernel/aio_pubsub | 992762bd316793d588de055075eedf70f2087870 | [
"MIT"
] | 150 | 2019-05-30T09:18:07.000Z | 2022-02-04T17:21:17.000Z | tests/test_backends/test_redis.py | theruziev/aio_pubsub | 7629c6c5fe02218e5300fa11c6f0cf2beaf8aaf5 | [
"MIT"
] | 3 | 2019-07-12T13:37:13.000Z | 2021-02-20T20:53:12.000Z | import aioredis
import pytest
from aio_pubsub.backends.redis import RedisPubSub
@pytest.fixture
async def create_pub_sub_conn():
pub = await aioredis.create_redis("redis://localhost:6379/0?encoding=utf-8")
sub = await aioredis.create_redis("redis://localhost:6379/0?encoding=utf-8")
yield pub, sub
pub.close()
sub.close()
@pytest.mark.asyncio
async def test_subscriber_isinstance(create_pub_sub_conn):
from aio_pubsub.backends.redis import RedisSubscriber
pubsub = RedisPubSub(*create_pub_sub_conn)
subscriber = await pubsub.subscribe("a_chan")
assert isinstance(subscriber, RedisSubscriber)
@pytest.mark.asyncio
async def test_iteration_protocol(create_pub_sub_conn):
pubsub = RedisPubSub(*create_pub_sub_conn)
subscriber = await pubsub.subscribe("a_chan")
await pubsub.publish("a_chan", "hello world!")
subscriber = subscriber.__aiter__()
assert await subscriber.__anext__() == "hello world!"
@pytest.mark.asyncio
async def test_pubsub(create_pub_sub_conn):
pubsub = RedisPubSub(*create_pub_sub_conn)
subscriber = await pubsub.subscribe("a_chan")
await pubsub.publish("a_chan", "hello world!")
await pubsub.publish("a_chan", "hello universe!")
subscriber = subscriber.__aiter__()
assert await subscriber.__anext__() == "hello world!"
assert await subscriber.__anext__() == "hello universe!"
@pytest.mark.asyncio
async def test_not_subscribed_chan(create_pub_sub_conn):
pubsub = RedisPubSub(*create_pub_sub_conn)
subscriber_a_chan = await pubsub.subscribe("a_chan")
subscriber_c_chan = await pubsub.subscribe("c_chan")
await pubsub.publish("a_chan", "hello world!")
await pubsub.publish("b_chan", "junk message")
await pubsub.publish("c_chan", "hello universe!")
subscriber_a_chan = subscriber_a_chan.__aiter__()
subscriber_c_chan = subscriber_c_chan.__aiter__()
assert await subscriber_a_chan.__anext__() == "hello world!"
assert await subscriber_c_chan.__anext__() == "hello universe!"
| 35.526316 | 80 | 0.749136 | 263 | 2,025 | 5.380228 | 0.186312 | 0.042403 | 0.076325 | 0.101767 | 0.669965 | 0.64523 | 0.470671 | 0.470671 | 0.470671 | 0.384452 | 0 | 0.006885 | 0.139259 | 2,025 | 56 | 81 | 36.160714 | 0.804934 | 0 | 0 | 0.409091 | 0 | 0 | 0.142222 | 0.038519 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932f2c5b9f0c92ca43e0d3230731f07ab383efe2 | 1,223 | py | Python | financescraper/extractors.py | kpitzen/financescraper | cfe32af33e1903a5725813f3d604f56025b21634 | [
"MIT"
] | 1 | 2020-07-23T10:58:28.000Z | 2020-07-23T10:58:28.000Z | financescraper/extractors.py | kpitzen/financescraper | cfe32af33e1903a5725813f3d604f56025b21634 | [
"MIT"
] | null | null | null | financescraper/extractors.py | kpitzen/financescraper | cfe32af33e1903a5725813f3d604f56025b21634 | [
"MIT"
] | null | null | null | '''Contains classes and methods dedicated to scraping web finance data'''
from json.decoder import JSONDecodeError
import demjson
import requests
class BaseStockDataPump():
'''Base class for intake of stock data'''
def __init__(self, url, stock_name, output_queue = None, chunk_size: int = 5):
self._url = url
self._data = None
self._get_stock_data()
self._stock_name = stock_name
self._output_queue = output_queue
self._chunk_size = chunk_size
def _get_stock_data(self):
data_request = requests.get(self._url)
try:
assert 'application/json' in data_request.headers['Content-Type']
except AssertionError:
print(data_request)
raise NotImplementedError('We require JSON returns!')
try:
request_data = data_request.json()
except JSONDecodeError:
request_data = data_request.text
request_data = demjson.decode(request_data)
self._data = request_data
def feed_data(self):
print('>>Feeding {} data...'.format(self._stock_name))
self._output_queue.put((self._stock_name, self._data))
self._output_queue.put('kill')
| 32.184211 | 82 | 0.654947 | 146 | 1,223 | 5.171233 | 0.410959 | 0.087417 | 0.051656 | 0.042384 | 0.063576 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001095 | 0.253475 | 1,223 | 37 | 83 | 33.054054 | 0.825849 | 0.084219 | 0 | 0.071429 | 0 | 0 | 0.06853 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.107143 | false | 0 | 0.107143 | 0 | 0.25 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932f6e9ea473e0bd5ce0911e26562b22f5c7faf1 | 596 | py | Python | backend/settings/prod.py | DataHack-CSCE606/django-vue-template | 9dd1b1bf91223383938b844ed484de2d3b949a4d | [
"MIT"
] | null | null | null | backend/settings/prod.py | DataHack-CSCE606/django-vue-template | 9dd1b1bf91223383938b844ed484de2d3b949a4d | [
"MIT"
] | 1 | 2021-04-26T04:48:16.000Z | 2021-04-26T04:48:16.000Z | backend/settings/prod.py | DataHack-CSCE606/django-vue-template | 9dd1b1bf91223383938b844ed484de2d3b949a4d | [
"MIT"
] | null | null | null | """ Production Settings """
import os
import dj_database_url
#import django_heroku
from .dev import *
############
# DATABASE #
############
DATABASES = {
'default': dj_database_url.config(
default=os.getenv('DATABASE_URL')
)
}
############
# SECURITY #
############
DEBUG = bool(os.getenv('DJANGO_DEBUG', ''))
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', SECRET_KEY)
# Set to your Domain here (eg. 'django-vue-template-demo.herokuapp.com')
ALLOWED_HOSTS = ['portfoliotradingassistant.herokuapp.com', 'localhost:8000', 'localhost:8080']
#django_heroku.settings(locals())
| 18.625 | 95 | 0.654362 | 67 | 596 | 5.626866 | 0.567164 | 0.087533 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015504 | 0.134228 | 596 | 31 | 96 | 19.225806 | 0.715116 | 0.275168 | 0 | 0 | 0 | 0 | 0.309973 | 0.105121 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
932fdc8a4b8b337b35f8ecfe153137f737d06999 | 5,561 | py | Python | backend/crud.py | jphacks/A_2111 | 22624f3f6bb4cc4eb40cc16a2113b7e860d5159e | [
"MIT"
] | 8 | 2021-10-31T06:45:27.000Z | 2021-11-30T04:33:17.000Z | backend/crud.py | jphacks/A_2111 | 22624f3f6bb4cc4eb40cc16a2113b7e860d5159e | [
"MIT"
] | 55 | 2021-10-29T18:25:09.000Z | 2022-02-27T19:42:48.000Z | backend/crud.py | jphacks/A_2111 | 22624f3f6bb4cc4eb40cc16a2113b7e860d5159e | [
"MIT"
] | 5 | 2021-11-23T05:41:59.000Z | 2021-12-20T02:20:19.000Z | from fastapi import HTTPException, status
import os
from uuid import uuid4
from firebase import db
from firebase_admin import firestore
import numpy as np
# 全ての登録情報を取得
async def get_all_members():
docs = db.collection("members").stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
return data
# 特定の登録情報を取得
async def get_member(uuid: str):
docs = db.collection("members").where("uuid", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) == 0:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="このIDは見つかりません")
return data
# すべてのリレーション情報を取得
async def get_all_familiars():
docs = db.collection("familiars").stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
return data
# 特定のリレーション情報を取得
async def get_familiar(uuid: str):
docs = db.collection("familiars").where("start", "==", uuid).stream()
docs2 = db.collection("familiars").where("end", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
for doc in docs2:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
return data
# メンバー登録
async def create_member(name: str, size: str, vector: str) -> str:
size_width = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
if size not in size_width:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="1~10のいずれかの整数を半角で入力してください")
uuid = str(uuid4())
doc_ref = db.collection("members").document()
doc_ref.set({
"uuid": uuid,
"name": name,
"size": size,
"vector": vector
})
return uuid
# リレーション登録
async def create_familiar(start: str, end: str):
doc_ref = db.collection("familiars").document()
doc_ref.set({
"start": start,
"end": end
})
return True
# 既存のリレーションの有無を確認
async def existed_familiar(start: str, end: str):
docs = db.collection("familiars").where("start", "==", start).where("end", "==", end).stream()
docs2 = db.collection("familiars").where("start", "==", end).where("end", "==", start).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
for doc in docs2:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) != 0:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="このIDはすでに登録されています")
return True
# 登録情報を更新
async def update_member(uuid: str, name: str, size: str):
size_width = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
if size not in size_width:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="1~10のいずれかの整数を半角で入力してください")
docs = db.collection("members").where("uuid", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) == 0:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="あなたのIDが見つかりませんでした")
doc_ref = db.collection("members").document(data[0]["id"])
result = doc_ref.update({"name": name, "size": size})
return result
# 登録情報を削除
async def remove_member(uuid: str):
docs = db.collection("members").where("uuid", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) == 0:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="あなたのIDが見つかりませんでした")
result = db.collection("members").document(data[0]["id"]).delete()
return result
# 登録情報を削除した際、それに付随するリレーションも全て削除
async def remove_familiar_related_member(uuid: str):
docs = db.collection("familiars").where("start", "==", uuid).stream()
docs2 = db.collection("familiars").where("end", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
for doc in docs2:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
i = 0
while True:
if i > len(data) - 1:
break
db.collection("familiars").document(data[i]["id"]).delete()
i += 1
return True
# 特定のリレーションを削除
async def remove_familiar(start: str, end: str):
docs = db.collection("familiars").where("start", "==", start).where("end", "==", end).stream()
docs2 = db.collection("familiars").where("start", "==", end).where("end", "==", start).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
for doc in docs2:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) == 0:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="登録しているIDがありません")
result = db.collection("familiars").document(data[0]["id"]).delete()
return result
# cos類似度を計算
async def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
# もらったベクトルとDBに登録されているベクトルを照合
async def login(uuid: str, vector: list):
already_registered_vector = db.collection("members").where("uuid", "==", uuid).stream()
for vec in already_registered_vector:
post = {"id": vec.id, **vec.to_dict()}
cosine_result = await cosine_similarity(vector, post["vector"])
return cosine_result
| 30.387978 | 103 | 0.608344 | 724 | 5,561 | 4.569061 | 0.160221 | 0.039299 | 0.031439 | 0.043229 | 0.654172 | 0.631197 | 0.617594 | 0.570133 | 0.570133 | 0.570133 | 0 | 0.016099 | 0.218126 | 5,561 | 182 | 104 | 30.554945 | 0.74471 | 0.032368 | 0 | 0.625 | 0 | 0 | 0.089858 | 0.008949 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.044118 | 0 | 0.139706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
933070cff398c58c442f2e8174797188b26d5e04 | 1,429 | py | Python | modules/html.py | fagcinsk/pytools | 772172451f0b27c7c09508698d24bee2ef40ddb5 | [
"MIT"
] | 1 | 2021-01-05T20:49:02.000Z | 2021-01-05T20:49:02.000Z | modules/html.py | fagcinsk/pytools | 772172451f0b27c7c09508698d24bee2ef40ddb5 | [
"MIT"
] | 1 | 2021-01-13T20:15:02.000Z | 2021-01-14T19:16:10.000Z | modules/html.py | fagcinsk/pytools | 772172451f0b27c7c09508698d24bee2ef40ddb5 | [
"MIT"
] | 1 | 2021-01-05T13:59:08.000Z | 2021-01-05T13:59:08.000Z |
class Html:
"""HTML utilities"""
@staticmethod
def ahrefs(url):
"""Get <a> hrefs related to domain"""
from lib.pt_html import get_page_ahrefs
for href in get_page_ahrefs(url):
print(href)
def sel(self, url, selector, fmt=None):
"""Shows some part of source by selector
selector -- css selector, ex.: ul>li
"""
for res in self._soup(url).select(selector):
if fmt == 'csv':
for tr in res.find_all('tr'):
print(tr.get_text(','))
return
print(res.prettify())
def xpath(self, url, xpath):
"""Shows some part of source by xpath
xpath -- ex.: //a/@href
"""
from lxml import etree
for res in self._lxml(url).xpath(xpath):
if isinstance(res, etree._ElementUnicodeResult):
print(res)
else:
print(etree.tostring(res, pretty_print=True).decode())
def src(self, url):
"""Shows prettified html source,"""
print(self._soup(url).prettify())
@staticmethod
def _soup(url):
from bs4 import BeautifulSoup
from requests import get
return BeautifulSoup(get(url).text, 'html.parser')
@staticmethod
def _lxml(url):
from lxml import html
from requests import get
return html.fromstring(get(url).text)
| 28.019608 | 70 | 0.554934 | 171 | 1,429 | 4.555556 | 0.380117 | 0.057766 | 0.033376 | 0.038511 | 0.12837 | 0.05905 | 0 | 0 | 0 | 0 | 0 | 0.001048 | 0.3324 | 1,429 | 50 | 71 | 28.58 | 0.815514 | 0.148355 | 0 | 0.15625 | 0 | 0 | 0.014719 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.1875 | 0 | 0.5 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9330b5b56e6e3955dd86252f7beed42138f64599 | 2,959 | py | Python | loss.py | aadhithya/pytorch-yolo-v1 | c362ab4305d22ccf1c0481f9b693e32bf50bd46e | [
"MIT"
] | null | null | null | loss.py | aadhithya/pytorch-yolo-v1 | c362ab4305d22ccf1c0481f9b693e32bf50bd46e | [
"MIT"
] | null | null | null | loss.py | aadhithya/pytorch-yolo-v1 | c362ab4305d22ccf1c0481f9b693e32bf50bd46e | [
"MIT"
] | null | null | null | from model import YOLOv1
import torch
import torch.nn as nn
class YOLOv1Loss(nn.Module):
def __init__(self, S=7, B=2, C=20):
"""
__init__ initialize YOLOv1 Loss.
Args:
S (int, optional): split_size. Defaults to 7.
B (int, optional): number of boxes. Defaults to 2.
C (int, optional): number of classes. Defaults to 20.
"""
super().__init__()
self.mse = nn.MSELoss(reduction="sum")
self.S = S
self.B = B
self.C = C
self.l_noobl = 0.5
self.l_coord = 5
def forward(self, predictions, target):
predictions = predictions.reshape(-1, self.S, self.S, self.C + Self.B*5)
iou_b1 = get_iou(predictions[...,21:25], target[...,21:25])
iou_b2 = get_iou(predictions[...,26:30], target[...,21:25])
ious = torch.stack([iou_b1, iou_b2], 0)
_, max_iou = torch.max(ious, dim=0)
exists_box = target[...,20].unsqueeze(3) # select target objectness.object
# * Box Coordinates Loss
# Select the bounding boxes with highest IoU
box_predictions = exists_box * (
(
max_iou * predictions[..., 26:30] +
(1 - max_iou) * predictions[..., 21:25]
)
)
# Select targets which has an object
box_targets = exists_box * target[...,21:25]
box_predictions[...,2:4] = torch.sign(box_predictions[...,2:4]) * torch.sqrt(
torch.abs(box_predictions[..., 2:4]) + 1e-6
)
box_targets[..., 2:4] = torch.sqrt(box_targets[..., 2:4])
box_loss = self.mse(
torch.flatten(box_predictions, end_dim=-2),
torch.flatten(box_targets, end_dim=-2)
)
# * Object Losss
pred_box = (
max_iou * predictions[..., 25:26] +
(1-max_iou) * predictions[..., 20:21]
)
object_loss = self.mse(
torch.flatten(exists_box * pred_box),
torch.flatten(exists_box * target[..., 20:21])
)
# * No Object Loss
# For the first box
no_boject_loss = self.mse(
torch.flatten((1-max_iou) * predictions[...,20:21], start_dim=1),
torch.flatten((1-max_iou) * target[...,20:21], start_dim=1)
)
# For the second box
no_boject_loss += self.mse(
torch.flatten(max_iou * predictions[...,25:26], start_dim=1),
torch.flatten(max_iou * target[...,20:21], start_dim=1)
)
# * Class prediction Loss
class_loss = self.mse(
torch.flatten(exists_box * predictions[...,:20], end_dim=-2),
torch.flatten(exists_box * target[...,:20], end_dim=-2)
)
# * Total Loss
loss = (
self.l_coord * box_loss
+ object_loss
+ self.l_noobl * no_boject_loss
+ class_loss
)
return loss
| 31.147368 | 85 | 0.528219 | 370 | 2,959 | 4.032432 | 0.254054 | 0.080429 | 0.068365 | 0.053619 | 0.308311 | 0.190349 | 0.121984 | 0.079088 | 0 | 0 | 0 | 0.054995 | 0.330179 | 2,959 | 94 | 86 | 31.478723 | 0.69778 | 0.149375 | 0 | 0 | 0 | 0 | 0.001223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0 | 0.04918 | 0 | 0.114754 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93310a9f4d736b657493fe8d8dcee72bcfd8191c | 5,332 | py | Python | Inverter.glyphsFilter/Contents/Resources/plugin.py | mekkablue/Inverter | 8ed3ba6199c738c7621da7a6a223af2f6b021828 | [
"Apache-2.0"
] | 1 | 2015-01-12T10:24:58.000Z | 2015-01-12T10:24:58.000Z | Inverter.glyphsFilter/Contents/Resources/plugin.py | mekkablue/Inverter | 8ed3ba6199c738c7621da7a6a223af2f6b021828 | [
"Apache-2.0"
] | 2 | 2016-01-29T16:56:59.000Z | 2018-01-01T14:51:21.000Z | Inverter.glyphsFilter/Contents/Resources/plugin.py | mekkablue/Inverter | 8ed3ba6199c738c7621da7a6a223af2f6b021828 | [
"Apache-2.0"
] | 1 | 2017-12-30T21:20:14.000Z | 2017-12-30T21:20:14.000Z | # encoding: utf-8
from __future__ import division, print_function, unicode_literals
###########################################################################################################
#
#
# Filter with dialog Plugin
#
# Read the docs:
# https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/Filter%20with%20Dialog
#
# For help on the use of Interface Builder:
# https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates
#
#
###########################################################################################################
import objc
from GlyphsApp import *
from GlyphsApp.plugins import *
from AppKit import NSAffineTransform, NSPoint
from math import tan, pi
class Inverter(FilterWithDialog):
dialog = objc.IBOutlet()
topEdgeField = objc.IBOutlet()
bottomEdgeField = objc.IBOutlet()
overlapField = objc.IBOutlet()
@objc.python_method
def settings(self):
self.menuName = Glyphs.localize({
'en': 'Inverter',
'de': 'Umkehren',
'fr': 'Inverter',
'es': 'Invertar',
'it': 'Invertire',
'pt': 'Inverter',
})
self.actionButtonLabel = Glyphs.localize({
'en': 'Invert',
'de': 'Umkehren',
'fr': 'Inverter',
'es': 'Invertar',
'it': 'Invertire',
'pt': 'Inverter',
})
Glyphs.registerDefault( "com.mekkablue.Inverter.topEdge", 800.0 )
Glyphs.registerDefault( "com.mekkablue.Inverter.bottomEdge", -200.0 )
Glyphs.registerDefault( "com.mekkablue.Inverter.overlap", 5.0 )
# Load dialog from .nib (without .extension)
self.loadNib('IBdialog', __file__)
# On dialog show
@objc.python_method
def start(self):
# Set value of text field
self.topEdgeField.setFloatValue_( Glyphs.defaults['com.mekkablue.Inverter.topEdge'] )
self.bottomEdgeField.setFloatValue_( Glyphs.defaults['com.mekkablue.Inverter.bottomEdge'] )
self.overlapField.setFloatValue_( Glyphs.defaults['com.mekkablue.Inverter.overlap'] )
self.topEdgeField.becomeFirstResponder()
@objc.IBAction
def setTopEdge_( self, sender ):
# Store value coming in from dialog
Glyphs.defaults['com.mekkablue.Inverter.topEdge'] = sender.floatValue()
# Trigger redraw
self.update()
@objc.IBAction
def setBottomEdge_( self, sender ):
Glyphs.defaults['com.mekkablue.Inverter.bottomEdge'] = sender.floatValue()
self.update()
@objc.IBAction
def setOverlap_( self, sender ):
Glyphs.defaults['com.mekkablue.Inverter.overlap'] = sender.floatValue()
self.update()
@objc.python_method
def pathRect( self, bottomLeft, topRight, italicAngle=0.0, downShift=0.0 ):
try:
# coordinates of rectangle:
myCoordinates = (
NSPoint( bottomLeft.x, bottomLeft.y ),
NSPoint( topRight.x, bottomLeft.y ),
NSPoint( topRight.x, topRight.y ),
NSPoint( bottomLeft.x, topRight.y )
)
# build the path:
rectangle = GSPath()
for thisPoint in myCoordinates:
newNode = GSNode()
newNode.type = 1 # GSLINE
newNode.position = thisPoint
rectangle.nodes.append( newNode )
rectangle.closed = True
# skew if there is an italic angle:
if not italicAngle == 0.0:
# calculate & build skew transformation:
skewTangens = tan( italicAngle/180*pi )
skew = NSAffineTransform.transform()
skew.setTransformStruct_( (1.0, 0.0, skewTangens, 1.0, 0.0, downShift) )
skew.translateXBy_yBy_( 0.0, -downShift )
# apply transformation to points of rectangle:
for thisNode in rectangle.nodes:
thisNode.position = skew.transformPoint_( thisNode.position )
return rectangle
except Exception as e:
import traceback
print(traceback.format_exc())
print("pathRect: %s" % str(e))
@objc.python_method
def filter(self, layer, inEditView, customParameters):
topEdge = float( Glyphs.defaults['com.mekkablue.Inverter.topEdge'] )
bottomEdge = float( Glyphs.defaults['com.mekkablue.Inverter.bottomEdge'] )
overlap = float( Glyphs.defaults['com.mekkablue.Inverter.overlap'] )
# Called on font export, override with values from customParameters:
if 'top' in customParameters:
topEdge = customParameters['top']
if 'bottom' in customParameters:
bottomEdge = customParameters['bottom']
if 'overlap' in customParameters:
overlap = customParameters['overlap']
# upper and lower edges of rectangle:
bottomLeft = NSPoint( -overlap, bottomEdge )
topRight = NSPoint( layer.width+overlap, topEdge )
# check italic angle and skew origin:
thisMaster = layer.associatedFontMaster()
skewAngle = thisMaster.italicAngle
halfXHeight = thisMaster.xHeight * 0.5
# build the rectangle path:
rectangle = self.pathRect( bottomLeft, topRight, skewAngle, halfXHeight )
# add it to the decomposed glyph:
if rectangle:
layer.decomposeComponents()
layer.removeOverlap()
try:
# GLYPHS 3
layer.shapes.append( rectangle )
except:
# GLYPHS 2
layer.paths.append( rectangle )
layer.correctPathDirection()
@objc.python_method
def generateCustomParameter( self ):
return "%s; top:%s; bottom:%s; overlap:%s" % (
self.__class__.__name__,
Glyphs.defaults['com.mekkablue.Inverter.topEdge'],
Glyphs.defaults['com.mekkablue.Inverter.bottomEdge'],
Glyphs.defaults['com.mekkablue.Inverter.overlap'],
)
@objc.python_method
def __file__(self):
"""Please leave this method unchanged"""
return __file__
| 30.820809 | 107 | 0.683796 | 576 | 5,332 | 6.255208 | 0.350694 | 0.049958 | 0.083264 | 0.086595 | 0.30086 | 0.262837 | 0.08826 | 0.063836 | 0.063836 | 0.02831 | 0 | 0.009594 | 0.159415 | 5,332 | 172 | 108 | 31 | 0.794288 | 0.153788 | 0 | 0.226087 | 0 | 0 | 0.157166 | 0.109078 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078261 | false | 0 | 0.06087 | 0.008696 | 0.208696 | 0.026087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9336657b701c8f38490d58496d625f32174dfce9 | 1,832 | py | Python | rlrunner/termination/dynamic_tc.py | PriestTheBeast/RLRunner | 0626508a8133b67947afc1039c30b4fd512656a2 | [
"MIT"
] | 8 | 2020-07-06T19:32:30.000Z | 2020-08-11T05:50:32.000Z | rlrunner/termination/dynamic_tc.py | PriestTheBeast/RLRunner | 0626508a8133b67947afc1039c30b4fd512656a2 | [
"MIT"
] | null | null | null | rlrunner/termination/dynamic_tc.py | PriestTheBeast/RLRunner | 0626508a8133b67947afc1039c30b4fd512656a2 | [
"MIT"
] | null | null | null | from rlrunner.termination.base_termination_condition import BaseTerminationCondition
from collections import deque
class DynamicTC(BaseTerminationCondition):
"""
This is a more complex and dynamic termination condition
It will see if there has been sufficient progress in the last X episodes
and if not it will assume the agent has stopped learning and terminate the run
"""
def __init__(self, epi_interval_for_progress=50, nr_exploits_in_interval=10):
super().__init__()
self.epi_interval_for_progress = epi_interval_for_progress
self.nr_exploits_in_interval = nr_exploits_in_interval
# this will calculate how frequent the exploit episodes will be to match the requirements wanted
# in the default case it will be 50//10 = 5, so in every 5 episodes one of them will be an exploit episode
self.exploit_every_x_epi = self.epi_interval_for_progress // self.nr_exploits_in_interval
# info about the progress in the last X episodes
self.info = deque(maxlen=self.nr_exploits_in_interval)
self.cumulative_rewards = 0
def is_exploit_episode(self, episode_number):
return episode_number % self.exploit_every_x_epi == 0
def update_info(self, episode_number, transition):
# It will be more precise to measure the progress only from exploit episodes
if self.is_exploit_episode(episode_number):
_, _, reward, _, done = transition
self.cumulative_rewards += reward
if done:
self.info.append(self.cumulative_rewards)
self.cumulative_rewards = 0
def check_termination(self, episode_number):
# that "3" reward difference is kinda hardcoded for the simple_env reward function
# but you get the point
if episode_number > self.epi_interval_for_progress:
avg = sum(self.info) / len(self.info)
best_value = max(self.info)
if best_value - avg < 3:
return True
return False
| 38.978723 | 108 | 0.782205 | 276 | 1,832 | 4.945652 | 0.384058 | 0.057143 | 0.051282 | 0.080586 | 0.254945 | 0.149451 | 0.067399 | 0.067399 | 0.067399 | 0 | 0 | 0.009734 | 0.158843 | 1,832 | 46 | 109 | 39.826087 | 0.876055 | 0.34607 | 0 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.076923 | 0.038462 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
933cb13bc7fe5bd1b62885cb8b25ce8a810ed468 | 2,082 | py | Python | Packs/CrowdStrikeFalcon/Scripts/ReadNetstatFile/ReadNetstatFile.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | null | null | null | Packs/CrowdStrikeFalcon/Scripts/ReadNetstatFile/ReadNetstatFile.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | 40 | 2022-03-03T07:34:00.000Z | 2022-03-31T07:38:35.000Z | Packs/CrowdStrikeFalcon/Scripts/ReadNetstatFile/ReadNetstatFile.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | null | null | null | from CommonServerPython import *
COMMAND_NAME = 'netstat'
def get_netstat_file_name(command_files):
if command_files and isinstance(command_files, dict):
netstat_files = command_files.get(COMMAND_NAME, [])
if netstat_files:
if isinstance(netstat_files, list):
# we want to get the last file name
return netstat_files[len(netstat_files) - 1].get('Filename')
elif isinstance(netstat_files, dict):
return netstat_files.get('Filename') # type:ignore
def get_file_name_from_context() -> str:
crowdstrike_context = demisto.context().get('CrowdStrike', {})
all_command_files = []
if isinstance(crowdstrike_context, list):
for ctx in crowdstrike_context:
if cmd_ctx := ctx.get('Command'):
all_command_files.append(cmd_ctx)
elif isinstance(crowdstrike_context, dict) and (cmd_ctx := crowdstrike_context.get('Command')):
all_command_files.append(cmd_ctx)
for command_file in all_command_files[::-1]: # get last file in context
if file_name := get_netstat_file_name(command_file):
return file_name
return ""
def get_file_entry_id(file_name):
file_entry_id = ""
if file_name:
entries = demisto.executeCommand('getEntries', {})
for entry in entries:
file_entry = demisto.get(entry, 'File')
is_correct_file = file_name.lower() == file_entry.lower()
if is_correct_file:
file_entry_id = entry['ID']
break
return file_entry_id
def get_file_content(file_entry_id):
if file_entry_id:
res = execute_command('getFilePath', {'id': file_entry_id})
file_path = res.get('path')
with open(file_path, 'r') as f:
file_content = f.read()
return file_content
def main():
file_name = get_file_name_from_context()
if file_name:
demisto.results(get_file_content(get_file_entry_id(file_name)))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 33.580645 | 99 | 0.650336 | 265 | 2,082 | 4.735849 | 0.230189 | 0.082869 | 0.07012 | 0.035857 | 0.196016 | 0.094024 | 0.058964 | 0.058964 | 0 | 0 | 0 | 0.00128 | 0.24976 | 2,082 | 61 | 100 | 34.131148 | 0.802177 | 0.033622 | 0 | 0.085106 | 0 | 0 | 0.054283 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.021277 | 0 | 0.255319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93448c13d9f2db38fbc78aac40bc93d67e0581e9 | 5,241 | py | Python | src/app/bert.py | korney3/ARES_RVision_Hack | 86b53b5e9c5495e988951cc3a11afe61c883d2a6 | [
"MIT"
] | 1 | 2021-09-08T16:17:32.000Z | 2021-09-08T16:17:32.000Z | src/app/bert.py | korney3/ARES_RVision_Hack | 86b53b5e9c5495e988951cc3a11afe61c883d2a6 | [
"MIT"
] | null | null | null | src/app/bert.py | korney3/ARES_RVision_Hack | 86b53b5e9c5495e988951cc3a11afe61c883d2a6 | [
"MIT"
] | 3 | 2021-03-31T09:11:59.000Z | 2021-08-18T07:18:51.000Z | import pandas as pd
import glob
from tqdm import tqdm, trange
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy as np
import json
import os
import requests
from flask import Flask, request, Response
from flask_cors import CORS
from requests import Request, Session
import json
import transformers
from transformers import BertForTokenClassification, AdamW
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertConfig
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from transformers import get_linear_schedule_with_warmup
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
app = Flask(__name__)
cors = CORS(app)
@app.route(('/annotate_text'))
def annotate_text():
data = request.args.get('text', default='APT', type=str)
text = []
for para in data.strip().split('\n\n'):
para = ' '.join(para.strip().replace("\n", " ").split())
if para!='':
text.extend(sent_tokenize(para))
annotation = []
for test_sentence in text:
prev_label='O'
tokenized_sentence = tokenizer.encode(test_sentence)
input_ids = torch.tensor([tokenized_sentence])#.cuda()
with torch.no_grad():
output = model(input_ids)
label_indices = np.argmax(output[0].to('cpu').numpy(), axis=2)
tokens = tokenizer.convert_ids_to_tokens(input_ids.to('cpu').numpy()[0])
new_tokens, new_labels = [], []
for token, label_idx in zip(tokens, label_indices[0]):
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(tag_values[label_idx])
new_tokens.append(token)
from nltk import pos_tag
from nltk.tree import Tree
from nltk.chunk import conlltags2tree
tokens = new_tokens
tags = new_labels
# tag each token with pos
pos_tags = [pos for token, pos in pos_tag(tokens)]
# convert the BIO / IOB tags to tree
conlltags = [(token, pos, tg) for token, pos, tg in zip(tokens, pos_tags, tags)]
ne_tree = conlltags2tree(conlltags)
# parse the tree to get our original text
original_text = []
for subtree in ne_tree:
# checking for 'O' tags
if type(subtree) == Tree:
original_label = subtree.label()
original_string = " ".join([token for token, pos in subtree.leaves()])
if (original_string!='[CLS]' and original_string!='[SEP]'):
if original_label==prev_label:
original_text.append(original_string)
else:
original_text.append('<'+original_label.upper()+'>'+original_string)
prev_label = original_label
elif type(subtree)==tuple:
if (subtree[0]!='[CLS]' and subtree[0]!='[SEP]'):
if prev_label!='O':
original_text[-1]+='</'+original_label.upper()+'>'
prev_label='O'
original_text.append(subtree[0])
annotation+=[tokenizer.convert_tokens_to_string(original_text)]
json_string = json.dumps({'parse':'\n'.join(annotation),'f1_macro':macro_f1[-1], 'prec_macro':macro_prec[-1], 'rec_macro':macro_rec[-1]}, ensure_ascii=False)
response = Response(json_string, content_type="application/json; charset=utf-8")
return '\n'.join(annotation)
@app.route(('/kill_flask'))
def kill_flask():
raise ValueError('Server was killed')
if __name__ == '__main__':
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
DATA_PATH = '../../data/processed/'
LOG_PATH = '../../models/BERT_baseline/'
with open(os.path.join(LOG_PATH,'macro_prec.txt'),'r') as f:
macro_prec = f.read().strip().split('\n')
with open(os.path.join(LOG_PATH,'macro_rec.txt'),'r') as f:
macro_rec = f.read().strip().split('\n')
with open(os.path.join(LOG_PATH,'macro_f1.txt'),'r') as f:
macro_f1 = f.read().strip().split('\n')
tokenizer = BertTokenizer.from_pretrained(os.path.join(LOG_PATH), do_lower_case=False)
model = BertForTokenClassification.from_pretrained(LOG_PATH)
tag_values = ['B-identity',
'I-malware',
'B-org',
'B-industry',
'I-org',
'I-city',
'I-user',
'B-software',
'I-cve',
'B-file',
'I-mitre_attack',
'B-theat_actor',
'I-appdata',
'B-ioc',
'B-mitre_attack',
'B-cve',
'B-technique',
'B-name',
'I-technique',
'I-program',
'I-tool',
'B-user',
'B-major',
'B-city',
'B-appdata',
'I-identity',
'I-ioc',
'O',
'B-timestamp',
'B-pid',
'B-program',
'I-name',
'I-country',
'I-campaign',
'I-local',
'B-country',
'B-campaign',
'B-local',
'I-windows',
'B-attack_pattern',
'B-excel',
'B-n',
'I-timestamp',
'I-software',
'I-industry',
'B-update',
'B-threat_actor',
'B-tool',
'I-type',
'B-windows',
'I-file',
'B-malware',
'B-type',
'I-input',
'B-input',
'I-threat_actor',
'PAD']
app.run(host='0.0.0.0', port=5002)
| 26.336683 | 159 | 0.655218 | 717 | 5,241 | 4.616457 | 0.304045 | 0.025378 | 0.02719 | 0.036254 | 0.106042 | 0.036858 | 0.036858 | 0.036858 | 0.027795 | 0.027795 | 0 | 0.007091 | 0.192711 | 5,241 | 198 | 160 | 26.469697 | 0.77523 | 0.024423 | 0 | 0.038217 | 0 | 0 | 0.149961 | 0.014487 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012739 | false | 0 | 0.197452 | 0 | 0.216561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9348323aeab818c58eea18c65f534561d0ac86e0 | 1,026 | py | Python | codewars-python/Kingdoms-E-2-The-curse-(simplified).py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | 2 | 2021-09-06T22:13:12.000Z | 2021-11-22T08:50:04.000Z | codewars-python/Kingdoms-E-2-The-curse-(simplified).py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | null | null | null | codewars-python/Kingdoms-E-2-The-curse-(simplified).py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | null | null | null |
#https://www.codewars.com/kata/6159dda246a119001a7de465/train/python
def translate(s, voc):
s = s.split(' ')
output = []
for speech in s:
for vocabulary in voc:
real_speech = speech_decoder(speech, vocabulary)
if real_speech:
output.append(real_speech)
return ' '.join(output)
def speech_decoder(speech, vocabulary):
copy_speech = speech
length_raw_string = len(copy_speech.translate(str.maketrans('','','?!,.')))
if length_raw_string != len(vocabulary):
return False
i = 0
output = []
for word in speech:
if word == '*':
output.append(vocabulary[i])
i += 1
continue
if word in '?!,.':
output.append(word)
continue
if word == vocabulary[i]:
output.append(vocabulary[i])
else:
return False
i += 1
result = ''.join(output)
output.clear()
return result
| 19 | 79 | 0.531189 | 108 | 1,026 | 4.944444 | 0.37037 | 0.089888 | 0.071161 | 0.108614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03012 | 0.352827 | 1,026 | 53 | 80 | 19.358491 | 0.774096 | 0.065302 | 0 | 0.3125 | 0 | 0 | 0.011494 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
934873d20743eecdf15b1e37621d9c7a3c1fcae4 | 3,878 | py | Python | locs/datasets/charged_data.py | mkofinas/locs | 4cb0ab9e989ebfee42d1d2850bdf3360336b5c1c | [
"MIT"
] | 16 | 2021-11-04T07:57:58.000Z | 2022-03-01T17:45:32.000Z | locs/datasets/charged_data.py | mkofinas/locs | 4cb0ab9e989ebfee42d1d2850bdf3360336b5c1c | [
"MIT"
] | null | null | null | locs/datasets/charged_data.py | mkofinas/locs | 4cb0ab9e989ebfee42d1d2850bdf3360336b5c1c | [
"MIT"
] | null | null | null | import os
import numpy as np
import torch
from torch.utils.data import Dataset
class ChargedData(Dataset):
def __init__(self, data_path, mode, params):
self.mode = mode
self.data_path = data_path
if self.mode == 'train':
path = os.path.join(data_path, 'train_feats')
edge_path = os.path.join(data_path, 'train_edges')
elif self.mode == 'val':
path = os.path.join(data_path, 'valid_feats')
edge_path = os.path.join(data_path, 'valid_edges')
elif self.mode == 'test':
path = os.path.join(data_path, 'test_feats')
edge_path = os.path.join(data_path, 'test_edges')
self.feats = torch.load(path)
self.edges = torch.load(edge_path)
self.same_norm = params['same_data_norm']
self.symmetric_norm = params['symmetric_data_norm']
self.no_norm = params['no_data_norm']
self.vel_norm_norm = params['vel_norm_norm']
if not self.no_norm:
self._normalize_data()
def _normalize_data(self):
train_data = torch.load(os.path.join(self.data_path, 'train_feats'))
if self.same_norm:
self.feat_max = train_data.max()
self.feat_min = train_data.min()
self.feats = (self.feats - self.feat_min)*2/(self.feat_max-self.feat_min) - 1
elif self.vel_norm_norm:
self.vel_norm_max = np.linalg.norm(train_data[..., 3:], axis=-1).max()
self.feats[..., :3] = self.feats[..., :3] / self.vel_norm_max
self.feats[..., 3:] = self.feats[..., 3:] / self.vel_norm_max
else:
if self.symmetric_norm:
self.loc_max = train_data[:, :, :, :3].abs().max()
self.loc_min = -self.loc_max
self.vel_max = train_data[:, :, :, 3:].abs().max()
self.vel_min = -self.vel_max
else:
self.loc_max = train_data[:, :, :, :3].max()
self.loc_min = train_data[:, :, :, :3].min()
self.vel_max = train_data[:, :, :, 3:].max()
self.vel_min = train_data[:, :, :, 3:].min()
self.feats[:,:,:, :3] = (self.feats[:,:,:,:3]-self.loc_min)*2/(self.loc_max - self.loc_min) - 1
self.feats[:,:,:,3:] = (self.feats[:,:,:,3:]-self.vel_min)*2/(self.vel_max-self.vel_min)-1
def unnormalize(self, data):
if self.no_norm:
return data.numpy()
elif self.same_norm:
return (data + 1) * (self.feat_max - self.feat_min) / 2. + self.feat_min
elif self.vel_norm_norm:
result1 = data[..., :3] * self.vel_norm_max
result2 = data[..., 3:] * self.vel_norm_max
return np.concatenate([result1, result2], axis=-1)
else:
result1 = (data[:, :, :, :3] + 1) * (self.loc_max - self.loc_min) / 2. + self.loc_min
result2 = (data[:, :, :, 3:] + 1) * (self.vel_max - self.vel_min) / 2. + self.vel_min
return np.concatenate([result1, result2], axis=-1)
def torch_unnormalize(self, data):
if self.no_norm:
return data
elif self.same_norm:
return (data + 1) * (self.feat_max - self.feat_min) / 2. + self.feat_min
elif self.vel_norm_norm:
result1 = data[..., :3] * self.vel_norm_max
result2 = data[..., 3:] * self.vel_norm_max
return torch.cat([result1, result2], axis=-1)
else:
result1 = (data[:, :, :, :3] + 1) * (self.loc_max - self.loc_min) / 2. + self.loc_min
result2 = (data[:, :, :, 3:] + 1) * (self.vel_max - self.vel_min) / 2. + self.vel_min
return torch.cat([result1, result2], axis=-1)
def __getitem__(self, idx):
return {'inputs': self.feats[idx], 'edges': self.edges[idx]}
def __len__(self):
return len(self.feats)
| 44.574713 | 107 | 0.550542 | 524 | 3,878 | 3.832061 | 0.108779 | 0.087151 | 0.060259 | 0.055777 | 0.616036 | 0.603586 | 0.511454 | 0.38994 | 0.330179 | 0.289343 | 0 | 0.022399 | 0.28623 | 3,878 | 86 | 108 | 45.093023 | 0.703035 | 0 | 0 | 0.320513 | 0 | 0 | 0.040227 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.051282 | 0.025641 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
934b67cb1527f74ff3f23c604c7293d16bac3821 | 30,891 | py | Python | acq4/util/database/database.py | aleonlein/acq4 | 4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555 | [
"MIT"
] | 1 | 2020-06-04T17:04:53.000Z | 2020-06-04T17:04:53.000Z | acq4/util/database/database.py | aleonlein/acq4 | 4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555 | [
"MIT"
] | 24 | 2016-09-27T17:25:24.000Z | 2017-03-02T21:00:11.000Z | acq4/util/database/database.py | sensapex/acq4 | 9561ba73caff42c609bd02270527858433862ad8 | [
"MIT"
] | 4 | 2016-10-19T06:39:36.000Z | 2019-09-30T21:06:45.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pickle, re, os
import acq4.Manager
import collections
import acq4.util.functions as functions
import acq4.util.advancedTypes as advancedTypes
import acq4.util.debug as debug
from acq4.util import Qt
import six
from six.moves import range
import sqlite3
class SqliteDatabase:
"""Encapsulates an SQLITE database to add more features.
Arbitrary SQL may be executed by calling the db object directly, eg: db('select * from table')
Using the select() and insert() methods will do automatic type conversions and allows
any picklable objects to be directly stored in BLOB type columns. (it is not necessarily
safe to store pickled objects in TEXT columns)
NOTE: Data types in SQLITE work differently than in most other DBs--each value may take any type
regardless of the type specified by its column.
"""
def __init__(self, fileName=':memory:'):
## decide on an appropriate name for this connection.
## For file connections, the name should always be the name of the file
## to avoid opening more than one connection to the same file.
if fileName != ':memory:':
fileName = os.path.abspath(fileName)
self._connectionName = fileName
self.db = sqlite3.connect(self._connectionName)
self.db.row_factory = sqlite3.Row
self.db.isolation_level = None
self.tables = None
self._transactions = []
self._readTableList()
def close(self):
if self.db is None:
return
self.db.close()
self.db = None
## no need to remove the connection entirely.
#import gc
#gc.collect() ## try to convince python to clean up the db immediately so we can remove the connection
#Qt.QSqlDatabase.removeDatabase(self._connectionName)
def exe(self, cmd, data=None, batch=False, toDict=True, toArray=False):
"""Execute an SQL query. If data is provided, it should be a list of dicts and each will
be bound to the query and executed sequentially. Returns the query object.
Arguments:
cmd - The SQL query to execute
data - List of dicts, one per record to be processed
For each record, data is bound to the query by key name
{"key1": "value1"} => ":key1"="value1"
batch - If True, then all input data is processed in a single execution.
In this case, data must be provided as a dict-of-lists or record array.
toDict - If True, return a list-of-dicts representation of the query results
toArray - If True, return a record array representation of the query results
"""
p = debug.Profiler('SqliteDatabase.exe', disabled=True)
p.mark('Command: %s' % cmd)
if data is None:
cur = self.db.execute(cmd)
p.mark("Executed with no data")
else:
data = TableData(data)
res = []
if batch:
cur = self.db.executemany(cmd, data.__iter__())
else:
for d in data:
p.mark("bound values for record")
self.db.execute(cmd, d)
p.mark("executed with data")
if cmd is not None:
if str(cmd)[:6].lower() == 'create':
self.tables = None ## clear table cache
if toArray:
ret = self._queryToArray(cur)
elif toDict:
ret = self._queryToDict(cur)
else:
ret = cur
p.finish()
return ret
def __call__(self, *args, **kargs):
return self.exe(*args, **kargs)
def select(self, table, columns='*', where=None, sql='', toDict=True, toArray=False, distinct=False, limit=None, offset=None):
"""
Construct and execute a SELECT statement, returning the results.
============== ================================================================
**Arguments:**
table The name of the table from which to read data
columns (list or str) List of column names to read from table. The default is '*', which reads all columns
If *columns* is given as a string, it is inserted verbatim into the SQL command.
If *columns* is given as a list, it is converted to a string of comma-separated, quoted names.
where Optional dict of {column: value} pairs. only results where column=value will be returned
distinct (bool) If true, omit all redundant results
limit (int) Limit the number of results that may be returned (best used with offset argument)
offset (int) Omit a certain number of results from the beginning of the list
sql Optional string to be appended to the SQL query (will be inserted before limit/offset arguments)
toDict If True, return a list-of-dicts (this is the default)
toArray if True, return a numpy record array
============== ================================================================
"""
p = debug.Profiler("SqliteDatabase.select", disabled=True)
if columns != '*':
#if isinstance(columns, six.string_types):
#columns = columns.split(',')
if not isinstance(columns, six.string_types):
qf = []
for f in columns:
if f == '*':
qf.append(f)
else:
qf.append('"'+f+'"')
columns = ','.join(qf)
#columns = quoteList(columns)
whereStr = self._buildWhereClause(where, table)
distinct = "distinct" if (distinct is True) else ""
limit = ("limit %d" % limit) if (limit is not None) else ""
offset = ("offset %d" % offset) if (offset is not None) else ""
cmd = "SELECT %s %s FROM %s %s %s %s %s" % (distinct, columns, table, whereStr, sql, limit, offset)
p.mark("generated command")
q = self.exe(cmd, toDict=toDict, toArray=toArray)
p.finish()
return q
def iterSelect(self, *args, **kargs):
"""
Return a generator that iterates through the results of a select query using limit/offset arguments.
This is useful for select queries that would otherwise return a very large list of results.
All arguments are passed through to select(). By default, limit=1000 and offset=0.
Note that if you specify limit or offset, they MUST be given as keyword arguments.
"""
if 'chunkSize' in kargs: ## for compatibility with iterInsert
kargs['limit'] = kargs['chunkSize']
del kargs['chunkSize']
if 'offset' not in kargs:
kargs['offset'] = 0
if 'limit' not in kargs:
kargs['limit'] = 1000
while True:
res = self.select(*args, **kargs)
if res is None or len(res) == 0:
break
yield res
kargs['offset'] += kargs['limit']
def insert(self, table, records=None, replaceOnConflict=False, ignoreExtraColumns=False, **args):
"""Insert records (a dict or list of dicts) into table.
If records is None, a single record may be specified via keyword arguments.
==================== =======================================
**Arguments:**
table Name of the table to insert into
records Data to insert. May be a variety of formats: numpy record array, list of dicts,
dict of lists, dict of values (single record)
replaceOnConflict If True, inserts that conflict with pre-existing data will overwrite the
pre-existing data. This occurs, for example, when a column has a 'unique'
constraint.
ignoreExtraColumns If True, ignore any extra columns in the data that do not exist in the table
==================== =======================================
"""
for n,nmax in self.iterInsert(table=table, records=records, replaceOnConflict=replaceOnConflict, ignoreExtraColumns=ignoreExtraColumns, chunkAll=True, **args):
pass
def iterInsert(self, table, records=None, replaceOnConflict=False, ignoreExtraColumns=False, chunkSize=500, chunkAll=False, **args):
"""
Iteratively insert chunks of data into a table while yielding a tuple (n, max)
indicating progress. This *must* be used inside a for loop::
for n,nmax in db.iterInsert(table, data):
print("Insert %d%% complete" % (100. * n / nmax))
Use the chunkSize argument to determine how many records are inserted per iteration.
See insert() for a description of all other options.
"""
p = debug.Profiler("SqliteDatabase.insert", disabled=True)
if records is None:
records = [args]
#if type(records) is not list:
#records = [records]
if len(records) == 0:
return
ret = []
with self.transaction():
## Rememember that _prepareData may change the number of columns!
records = TableData(self._prepareData(table, records, ignoreUnknownColumns=ignoreExtraColumns, batch=True))
p.mark("prepared data")
columns = list(records.keys())
insert = "INSERT"
if replaceOnConflict:
insert += " OR REPLACE"
#print "Insert:", columns
cmd = "%s INTO %s (%s) VALUES (%s)" % (insert, table, quoteList(columns), ','.join([':'+f for f in columns]))
numRecs = len(records)
if chunkAll: ## insert all records in one go.
self.exe(cmd, records, batch=True)
yield (numRecs, numRecs)
return
chunkSize = int(chunkSize) ## just make sure
offset = 0
i = 0
while offset < len(records):
#print len(columns), len(records[0]), len(self.tableSchema(table))
chunk = records[offset:offset+chunkSize]
self.exe(cmd, chunk, batch=True)
offset += len(chunk)
yield (offset, numRecs)
p.mark("Transaction done")
p.finish()
def delete(self, table, where):
with self.transaction():
whereStr = self._buildWhereClause(where, table)
cmd = "DELETE FROM %s %s" % (table, whereStr)
return self(cmd)
def update(self, table, vals, where=None, rowid=None, sql=''):
"""Update records in the DB.
Arguments:
vals: dict of {column: value} pairs
where: SQL clause specifying rows to update
rowid: int row IDs. Used instead of 'where'
sql: SQL string to append to end of statement"""
if rowid is not None:
if where is not None:
raise Exception("'where' and 'rowid' are mutually exclusive arguments.")
where = {'rowid': rowid}
with self.transaction():
whereStr = self._buildWhereClause(where, table)
setStr = ', '.join(['"%s"=:%s' % (k, k) for k in vals])
cmd = "UPDATE %s SET %s %s %s" % (table, setStr, whereStr, sql)
data = self._prepareData(table, [vals], batch=True)
return self(cmd, data, batch=True)
def transaction(self, name=None):
"""Return an enterable Transaction instance.
Use thusly::
with db.transaction():
db.doStuff()
db.doMoreStuff()
If an exception is raised while the transaction is active, all changes will be rolled back.
Note that wrapping multiple database operations in a transaction can *greatly* increase
performance.
"""
return Transaction(self, name)
def lastInsertRow(self):
q = self("select last_insert_rowid()")
return list(q[0].values())[0]
def replace(self, *args, **kargs):
return self.insert(*args, replaceOnConflict=True, **kargs)
def createTable(self, table, columns, sql=""):
"""Create a table in the database.
table: (str) the name of the table to create
columns: (list) a list of tuples (name, type, constraints) defining columns in the table.
all 3 elements othe tuple are strings; constraints are optional.
Types may be any string, but are typically int, real, text, or blob.
(see sqlite 'CREATE TABLE')
"""
#print "create table", table, ', '.join(columns)
columns = parseColumnDefs(columns)
columnStr = []
for name, conf in columns.items():
columnStr.append('"%s" %s %s' % (name, conf['Type'], conf.get('Constraints', '')))
columnStr = ','.join(columnStr)
self('CREATE TABLE "%s" (%s) %s' % (table, columnStr, sql))
self._readTableList()
def createIndex(self, table, columns, ifNotExist=True):
"""
Create an index on table (columns)
*columns* may be the name of a single column or a list of column names.
(see sqlite 'CREATE INDEX')
"""
ine = "IF NOT EXISTS" if ifNotExist else ""
if isinstance(columns, six.string_types):
columns = [columns]
name = table + '__' + '_'.join(columns)
colStr = quoteList(columns)
cmd = 'CREATE INDEX %s "%s" ON "%s" (%s)' % (ine, name, table, colStr)
self(cmd)
def addColumn(self, table, colName, colType, constraints=None):
"""
Add a column to a table.
"""
if constraints is None:
constraints = ''
self('ALTER TABLE "%s" ADD COLUMN "%s" %s %s' % (table, colName, colType, constraints))
self.tables = None
def listTables(self):
"""
Return a list of the names of tables in the DB.
"""
if self.tables is None:
self._readTableList()
return list(self.tables.keys())
def removeTable(self, table):
self('DROP TABLE "%s"' % table)
def hasTable(self, table):
self.listTables() ## make sure table list has been generated
return table in self.tables ## this is a case-insensitive operation
def tableSchema(self, table):
"""
Return a dict {'columnName': 'type', ...} for the specified table.
"""
if self.tables is None:
self._readTableList()
return self.tables[table].copy() ## this is a case-insensitive operation
def tableLength(self, table):
return self('select count(*) from "%s"' % table)[0]['count(*)']
def _buildWhereClause(self, where, table):
if where is None or len(where) == 0:
return ''
where = self._prepareData(table, where)[0]
conds = []
for k,v in where.items():
if isinstance(v, six.string_types):
conds.append('"%s"=\'%s\'' % (k, v))
else:
conds.append('"%s"=%s' % (k,v))
whereStr = "WHERE " + " AND ".join(conds)
return whereStr
def _prepareData(self, table, data, ignoreUnknownColumns=False, batch=False):
## Massage data so it is ready for insert into the DB. (internal use only)
## - data destined for BLOB columns is pickled
## - numerical columns convert to int or float
## - text columns convert to unicode
## converters may be a dict of {'columnName': function}
## that overrides the default conversion funcitons.
## Returns a dict-of-lists if batch=True, otherwise list-of-dicts
data = TableData(data)
converters = {}
## determine the conversion functions to use for each column.
schema = self.tableSchema(table)
for k in schema:
if k in converters:
continue
typ = schema[k].lower()
if typ == 'blob':
converters[k] = lambda obj: buffer(pickle.dumps(obj))
elif typ == 'int':
converters[k] = int
elif typ == 'real':
converters[k] = float
elif typ == 'text':
converters[k] = str
else:
converters[k] = lambda obj: obj
if batch:
newData = dict([(k,[]) for k in data.columnNames() if not (ignoreUnknownColumns and (k not in schema))])
else:
newData = []
for rec in data:
newRec = {}
for k in rec:
if k not in schema:
if ignoreUnknownColumns:
continue
#if addUnknownColumns: ## Is this just a bad idea?
#dtyp = self.suggestColumnType(rec[k])
#self.addColumn(table, k, dtyp)
if rec[k] is None:
newRec[k] = None
else:
try:
newRec[k] = converters[k](rec[k])
except:
newRec[k] = rec[k]
if k.lower() != 'rowid':
if k not in schema:
raise Exception("Column '%s' not present in table '%s'" % (k, table))
print("Warning: Setting %s column %s.%s with type %s" % (schema[k], table, k, str(type(rec[k]))))
if batch:
for k in newData:
newData[k].append(newRec.get(k, None))
else:
newData.append(newRec)
#print "new data:", newData
return newData
def _queryToDict(self, q):
prof = debug.Profiler("_queryToDict", disabled=True)
res = []
for rec in q:
res.append(self._readRecord(rec))
return res
def _queryToArray(self, q):
prof = debug.Profiler("_queryToArray", disabled=True)
recs = self._queryToDict(q)
prof.mark("got records")
if len(recs) < 1:
#return np.array([]) ## need to return empty array *with correct columns*, but this is very difficult, so just return None
return None
rec1 = recs[0]
dtype = functions.suggestRecordDType(rec1, singleRecord=True)
#print rec1, dtype
arr = np.empty(len(recs), dtype=dtype)
arr[0] = tuple(rec1.values())
for i in range(1, len(recs)):
arr[i] = tuple(recs[i].values())
prof.mark('converted to array')
prof.finish()
return arr
def _readRecord(self, rec):
prof = debug.Profiler("_readRecord", disabled=True)
data = collections.OrderedDict()
names = list(rec.keys())
for i in range(len(rec)):
val = rec[i]
name = names[i]
## Unpickle byte arrays into their original objects.
## (Hopefully they were stored as pickled data in the first place!)
if isinstance(val, buffer):
val = pickle.loads(str(val))
data[name] = val
prof.finish()
return data
def _readTableList(self):
"""Reads the schema for each table, extracting the column names and types."""
names = self("select name from sqlite_master where type='table' or type='view'")
tables = advancedTypes.CaselessDict()
for table in names:
table = table['name']
columns = advancedTypes.CaselessDict()
recs = self('PRAGMA table_info(%s)' % table)
for rec in recs:
columns[rec['name']] = rec['type']
tables[table] = columns
self.tables = tables
def quoteList(strns):
"""Given a list of strings, return a single string like '"string1", "string2",...'
Note: in SQLite, double quotes are for escaping table and column names;
single quotes are for string literals.
"""
return ','.join(['"'+s+'"' for s in strns])
class Transaction:
"""See SQLiteDatabase.transaction()"""
def __init__(self, db, name=None):
self.db = db
self.name = name
def __enter__(self):
if self.name is None:
self.name = 'transaction%d' % len(self.db._transactions)
self.db('SAVEPOINT %s' % self.name)
self.db._transactions.append(self)
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.db('RELEASE SAVEPOINT %s' % self.name)
else:
try:
self.db('ROLLBACK TRANSACTION TO %s' % self.name)
self.db.tables = None ## make sure we are forced to re-read the table list after the rollback.
except Exception:
print("WARNING: Error occurred during transaction and rollback failed.")
if self.db._transactions[-1] is not self:
print(self, self.db._transactions)
raise Exception('Tried to exit transaction before another nested transaction has finished.')
self.db._transactions.pop(-1)
class TableData:
"""
Class for presenting multiple forms of tabular data through a consistent interface.
May contain:
- numpy record array
- list-of-dicts (all dicts are _not_ required to have the same keys)
- dict-of-lists
- dict (single record)
Note: if all the values in this record are lists, it will be interpreted as multiple records
Data can be accessed and modified by column, by row, or by value
data[columnName] # returns list or array
data[rowId] # returns dict or ordereddict
data[columnName, rowId] = value
data[columnName] = [value, value, ...]
data[rowId] = {columnName: value, ...}
"""
def __init__(self, data):
self.data = data
if isinstance(data, np.ndarray):
self.mode = 'array'
elif isinstance(data, list):
self.mode = 'list'
elif isinstance(data, dict):
types = set(map(type, list(data.values())))
## dict may be a dict-of-lists or a single record
types -= set([list, np.ndarray]) ## if dict contains any non-sequence values, it is probably a single record.
if len(types) != 0:
self.data = [self.data]
self.mode = 'list'
else:
self.mode = 'dict'
elif isinstance(data, TableData) or 'TableData' in str(type(data)):
self.data = data.data
self.mode = data.mode
else:
raise Exception("Cannot create TableData from object '%s' (type='%s')" % (str(data), type(data)))
for fn in ['__getitem__', '__setitem__']:
setattr(self, fn, getattr(self, '_TableData'+fn+self.mode))
self.copy = getattr(self, 'copy_' + self.mode)
def originalData(self):
return self.data
def toArray(self):
if self.mode == 'array':
return self.data
if len(self) < 1:
#return np.array([]) ## need to return empty array *with correct columns*, but this is very difficult, so just return None
return None
rec1 = self[0]
#dtype = functions.suggestRecordDType(self)
## Need to look through all data before deciding on dtype.
## It is not sufficient to look at just the first record,
## nor to look at the column types.
types = {k:set() for k in self.keys()}
for rec in self:
for k,v in rec.items():
types[k].add(type(v))
dtype = []
for k in self.keys():
t = types[k]
if t == set([float]) or t == set([float, type(None)]):
dtype.append((k, float))
elif t == set([int]):
dtype.append((k, int))
else:
dtype.append((k, object))
#print rec1, dtype
arr = np.empty(len(self), dtype=dtype)
arr[0] = tuple(rec1.values())
for i in range(1, len(self)):
arr[i] = tuple(self[i].values())
return arr
def __getitem__array(self, arg):
if isinstance(arg, six.string_types):
return self.data[arg]
elif isinstance(arg, int):
return collections.OrderedDict([(k, self.data[k][arg]) for k in self.columnNames()])
elif isinstance(arg, tuple):
return self.data[arg[0]][arg[1]]
elif isinstance(arg, slice):
return TableData(self.data[arg])
else:
raise Exception("Cannot index TableData with object '%s' (type='%s')" % (str(arg), type(arg)))
def __getitem__list(self, arg):
if isinstance(arg, six.string_types):
return [d.get(arg, None) for d in self.data]
elif isinstance(arg, int):
return self.data[arg]
elif isinstance(arg, tuple):
arg = self._orderArgs(arg)
return self.data[arg[0]][arg[1]]
elif isinstance(arg, slice):
return TableData(self.data[arg])
else:
raise Exception("Cannot index TableData with object '%s' (type='%s')" % (str(arg), type(arg)))
def __getitem__dict(self, arg):
if isinstance(arg, six.string_types):
return self.data[arg]
elif isinstance(arg, int):
return collections.OrderedDict([(k, v[arg]) for k, v in self.data.items()])
elif isinstance(arg, tuple):
arg = self._orderArgs(arg)
return self.data[arg[1]][arg[0]]
elif isinstance(arg, slice):
return TableData(collections.OrderedDict([(k, v[arg]) for k, v in self.data.items()]))
else:
raise Exception("Cannot index TableData with object '%s' (type='%s')" % (str(arg), type(arg)))
def __setitem__array(self, arg, val):
if isinstance(arg, tuple):
self.data[arg[0]][arg[1]] = val
else:
self.data[arg] = val
def __setitem__list(self, arg, val):
if isinstance(arg, six.string_types):
if len(val) != len(self.data):
raise Exception("Values (%d) and data set (%d) are not the same length." % (len(val), len(self.data)))
for i, rec in enumerate(self.data):
rec[arg] = val[i]
elif isinstance(arg, int):
self.data[arg] = val
elif isinstance(arg, tuple):
arg = self._orderArgs(arg)
self.data[arg[0]][arg[1]] = val
else:
raise TypeError(type(arg))
def __setitem__dict(self, arg, val):
if isinstance(arg, six.string_types):
if len(val) != len(self.data[arg]):
raise Exception("Values (%d) and data set (%d) are not the same length." % (len(val), len(self.data[arg])))
self.data[arg] = val
elif isinstance(arg, int):
for k in self.data:
self.data[k][arg] = val[k]
elif isinstance(arg, tuple):
arg = self._orderArgs(arg)
self.data[arg[1]][arg[0]] = val
else:
raise TypeError(type(arg))
def _orderArgs(self, args):
## return args in (int, str) order
if isinstance(args[0], six.string_types):
return (args[1], args[0])
else:
return args
def copy_array(self):
return TableData(self.data.copy())
def copy_list(self):
return TableData([rec.copy() for rec in self.data])
def copy_dict(self):
return TableData({k:v[:] for k,v in self.data.items()})
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __len__(self):
if self.mode == 'array' or self.mode == 'list':
return len(self.data)
else:
return max(list(map(len, self.data.values())))
def columnNames(self):
"""returns column names in no particular order"""
if self.mode == 'array':
return self.data.dtype.names
elif self.mode == 'list':
if len(self.data) == 0:
return []
return list(self.data[0].keys()) ## all records must have all keys.
#names = set()
#for row in self.data:
#names.update(row.keys())
#return list(names)
elif self.mode == 'dict':
return list(self.data.keys())
def keys(self):
return self.columnNames()
def parseColumnDefs(defs, keyOrder=None):
"""
Translate a few different forms of column definitions into a single common format.
These formats are accepted for all methods which request column definitions (createTable,
checkTable, etc)
list of tuples: [(name, type, <constraints>), ...]
dict of strings: {name: type, ...}
dict of tuples: {name: (type, <constraints>), ...}
dict of dicts: {name: {'Type': type, ...}, ...}
Returns dict of dicts as the common format.
"""
if keyOrder is None:
keyOrder = ['Type', 'Constraints']
def isSequence(x):
return isinstance(x, list) or isinstance(x, tuple)
def toDict(args):
d = collections.OrderedDict()
for i,v in enumerate(args):
d[keyOrder[i]] = v
if i >= len(keyOrder) - 1:
break
return d
if isSequence(defs) and all(map(isSequence, defs)):
return collections.OrderedDict([(c[0], toDict(c[1:])) for c in defs])
if isinstance(defs, dict):
ret = collections.OrderedDict()
for k, v in defs.items():
if isSequence(v):
ret[k] = toDict(v)
elif isinstance(v, dict):
ret[k] = v
elif isinstance(v, six.string_types):
ret[k] = {'Type': v}
else:
raise Exception("Invalid column-list specification: %s" % str(defs))
return ret
else:
raise Exception("Invalid column-list specification: %s" % str(defs))
| 40.592641 | 167 | 0.547182 | 3,648 | 30,891 | 4.594846 | 0.155154 | 0.020045 | 0.0105 | 0.002506 | 0.197172 | 0.159945 | 0.145925 | 0.123076 | 0.092352 | 0.089846 | 0 | 0.003966 | 0.338901 | 30,891 | 760 | 168 | 40.646053 | 0.816815 | 0.294358 | 0 | 0.241164 | 0 | 0 | 0.081334 | 0.002024 | 0.002079 | 0 | 0 | 0 | 0 | 1 | 0.106029 | false | 0.002079 | 0.024948 | 0.018711 | 0.251559 | 0.008316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
934b94904687667744b39ca9e00a4c7308a9eefb | 7,381 | py | Python | HighLevelAnalyzer.py | mikeITMattersMost/saleae_si443x_decoder | 00693ca7580f449355bc3f83f8f48038684e34b4 | [
"MIT"
] | null | null | null | HighLevelAnalyzer.py | mikeITMattersMost/saleae_si443x_decoder | 00693ca7580f449355bc3f83f8f48038684e34b4 | [
"MIT"
] | null | null | null | HighLevelAnalyzer.py | mikeITMattersMost/saleae_si443x_decoder | 00693ca7580f449355bc3f83f8f48038684e34b4 | [
"MIT"
] | null | null | null | from typing import Iterable, Optional, Union
from saleae.analyzers import HighLevelAnalyzer, AnalyzerFrame, StringSetting, NumberSetting, ChoicesSetting
si_registers = {
0x00: "Device Type (R)",
0x01: "Device Version (R)",
0x02: "Device Status (R)",
0x03: "Interrupt Status 1 (R)",
0x04: "Interrupt Status 2 (R)",
0x05: "Interrupt Enable 1 (R/W)",
0x06: "Interrupt Enable 2 (R/W)",
0x07: "Operating & Function Control 1 (R/W)",
0x08: "Operating & Function Control 2 (R/W)",
0x09: "Crystal Oscillator Load Capacitance (R/W)",
0x0A: "Microcontroller Output Clock (R/W)",
0x0B: "GPIO0 Configuration (R/W)",
0x0C: "GPIO1 Configuration (R/W)",
0x0D: "GPIO2 Configuration (R/W)",
0x0E: "I/O Port Configuration (R/W)",
0x0F: "ADC Configuration (R/W)",
0x10: "ADC Sensor Amplifier Offset (R/W)",
0x11: "ADC Value (R)",
0x12: "Temperature Sensor Control (R/W)",
0x13: "Temperature Value Offset (R/W)",
0x14: "Wake-Up Timer Period 1 (R/W)",
0x15: "Wake-Up Timer Period 2 (R/W)",
0x16: "Wake-Up Timer Period 3 (R/W)",
0x17: "Wake-Up Timer Value 1 (R)",
0x18: "Wake-Up Timer Value 2 (R)",
0x19: "Low-Duty Cycle Mode Duration (R/W)",
0x1A: "Low Battery Detector Threshold (R/W)",
0x1B: "Battery Voltage Level (R)",
0x1C: "IF Filter Bandwidth (R/W)",
0x1D: "AFC Loop Gearshift Override (R/W)",
0x1E: "AFC Timing Control (R/W)",
0x1F: "Clock Recovery Gearshift Override (R/W)",
0x20: "Clock Recovery Oversampling Ratio (R/W)",
0x21: "Clock Recovery Offset 2 (R/W)",
0x22: "Clock Recovery Offset 1 (R/W)",
0x23: "Clock Recovery Offset 0 (R/W)",
0x24: "Clock Recovery Timing Loop Gain 1 (R/W)",
0x25: "Clock Recovery Timing Loop Gain 0 (R/W)",
0x26: "Received Signal Strength Indicator (R)",
0x27: "RSSI Threshold for Clear Channel Indicator (R/W)",
0x28: "Antenna Diversity Register 1 (R)",
0x29: "Antenna Diversity Register 2 (R)",
0x2A: "AFC Limiter (R/W)",
0x2B: "AFC Correction Read (R)",
0x2C: "OOK Counter Value 1 (R/W)",
0x2D: "OOK Counter Value 2 (R/W)",
0x2E: "Slicer Peak Hold (R/W)",
0x2F: "Reserved (0x2F)",
0x30: "Data Access Control (R/W)",
0x31: "EzMAC status 0 (R)",
0x32: "Header Control 1 (R/W)",
0x33: "Header Control 2 (R/W)",
0x34: "Preamble Length (R/W)",
0x35: "Preamble Detection Control (R/W)",
0x36: "Sync Word 3 (R/W)",
0x37: "Sync Word 2 (R/W)",
0x38: "Sync Word 1 (R/W)",
0x39: "Sync Word 0 (R/W)",
0x3A: "Transmit Header 3 (R/W)",
0x3B: "Transmit Header 2 (R/W)",
0x3C: "Transmit Header 1 (R/W)",
0x3D: "Transmit Header 0 (R/W)",
0x3E: "Transmit Packet Length (R/W)",
0x3F: "Check Header 3 (R/W)",
0x40: "Check Header 2 (R/W)",
0x41: "Check Header 1 (R/W)",
0x42: "Check Header 0 (R/W)",
0x43: "Header Enable 3 (R/W)",
0x44: "Header Enable 2 (R/W)",
0x45: "Header Enable 1 (R/W)",
0x46: "Header Enable 0 (R/W)",
0x47: "Received Header 3 (R)",
0x48: "Received Header 2 (R)",
0x49: "Received Header 1 (R)",
0x4A: "Received Header 0 (R)",
0x4B: "Received Packet Length (R)",
0x4C: "Reserved (0x4C)",
0x4D: "Reserved (0x4D)",
0x4E: "Reserved (0x4E)",
0x4F: "ADC8 Control (R/W)",
0x50: "Reserved (0x50)",
0x51: "Reserved (0x51)",
0x52: "Reserved (0x52)",
0x53: "Reserved (0x53)",
0x54: "Reserved (0x54)",
0x55: "Reserved (0x55)",
0x56: "Reserved (0x56)",
0x57: "Reserved (0x57)",
0x58: "Reserved (0x58)",
0x59: "Reserved (0x59)",
0x5A: "Reserved (0x5A)",
0x5B: "Reserved (0x5B)",
0x5C: "Reserved (0x5C)",
0x5D: "Reserved (0x5D)",
0x5E: "Reserved (0x5E)",
0x5F: "Reserved (0x5F)",
0x60: "Channel Filter Coefficient Address (R/W)",
0x61: "Reserved (0x61)",
0x62: "Crystal Oscillator/Control Test (R/W)",
0x63: "Reserved (0x63)",
0x64: "Reserved (0x64)",
0x65: "Reserved (0x65)",
0x66: "Reserved (0x66)",
0x67: "Reserved (0x67)",
0x68: "Reserved (0x68)",
0x69: "AGC Override 1 (R/W)",
0x6A: "Reserved (0x6A)",
0x6B: "Reserved (0x6B)",
0x6C: "Reserved (0x6C)",
0x6D: "TX Power (R/W)",
0x6E: "TX Data Rate 1 (R/W)",
0x6F: "TX Data Rate 0 (R/W)",
0x70: "Modulation Mode Control 1 (R/W)",
0x71: "Modulation Mode Control 2 (R/W)",
0x72: "Frequency Deviation (R/W)",
0x73: "Frequency Offset 1 (R/W)",
0x74: "Frequency Offset 2 (R/W)",
0x75: "Frequency Band Select (R/W)",
0x76: "Nominal Carrier Frequency 1 (R/W)",
0x77: "Nominal Carrier Frequency 0 (R/W)",
0x78: "Reserved (0x78)",
0x79: "Frequency Hopping Channel Select (R/W)",
0x7A: "Frequency Hopping Step Size (R/W)",
0x7B: "Reserved (0x7B)",
0x7C: "TX FIFO Control 1 (R/W)",
0x7D: "TX FIFO Control 2 (R/W)",
0x7E: "RX FIFO Control (R/W)",
0x7F: "FIFO Access (R/W)"
}
def get_register_name(register_addr: int) -> str:
try:
return si_registers[register_addr]
except KeyError:
return "UNKNOWN_REGISTER"
class Hla(HighLevelAnalyzer):
result_types = {
"si_address": {"format": "{{data.rw}} {{data.reg}} {{data.value}}"},
"si_read": {"format": "{{data.rw}} {{data.reg}} {{data.value}}"},
"si_write": {"format": "{{data.rw}} {{data.reg}} {{data.value}}"},
}
def __init__(self):
# Previous frame type
# https://support.saleae.com/extensions/analyzer-frame-types/spi-analyzer
self._previous_type: str = ""
# current address
self._address: Optional[int] = None
# current access type
self._rw: str = ""
def decode(self, frame: AnalyzerFrame) -> Optional[Union[Iterable[AnalyzerFrame], AnalyzerFrame]]:
""" Decode frames. """
is_first_byte: bool = self._previous_type == "enable"
self._previous_type: str = frame.type
if frame.type != "result":
return None
mosi: bytes = frame.data["mosi"]
miso: bytes = frame.data["miso"]
#print("mosi bytes: ", mosi)
#print("miso bytes: ", miso)
if is_first_byte:
try:
self._address = mosi[0]
except IndexError:
return None
self._rw = "Write" if self._address & 0x80 != 0 else "Read"
# normalize the address, removing the read/write bit
self._address &= 0x7F
return AnalyzerFrame(
"si_address",
start_time=frame.start_time,
end_time=frame.end_time,
data={"reg": get_register_name(self._address), "rw": self._rw, "value": "reg_"+f"0x{self._address:02X}"},
)
else:
if self._rw.lower() == "write":
try:
byte = mosi[0]
except IndexError:
return None
else:
try:
byte = miso[0]
except IndexError:
return None
ret = AnalyzerFrame(
"si_"+self._rw.lower(),
start_time=frame.start_time,
end_time=frame.end_time,
data={
"reg": get_register_name(self._address),
"rw": self._rw,
"value": self._rw.lower()+"-> "+f"0x{byte:02X}" + " = ASCII "+chr(byte),
},
)
if self._address != 0xFF: # FIFO
self._address += 1
self._address &= 0x7F
return ret
| 33.55 | 132 | 0.578377 | 978 | 7,381 | 4.306748 | 0.322086 | 0.036562 | 0.012108 | 0.009497 | 0.096629 | 0.077398 | 0.062678 | 0.05603 | 0.041785 | 0.041785 | 0 | 0.090312 | 0.257418 | 7,381 | 219 | 133 | 33.703196 | 0.678161 | 0.034413 | 0 | 0.098958 | 0 | 0 | 0.459019 | 0.002952 | 0 | 0 | 0.092226 | 0 | 0 | 1 | 0.015625 | false | 0 | 0.010417 | 0 | 0.078125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
934d55cab0f4fe65121120a091896d64ce49a6d5 | 3,719 | py | Python | tests/test_block_until_url.py | aniruddha2000/init | fe2a32d2736c359a6911cc22bc42007ac97c5b10 | [
"BSD-3-Clause"
] | 3 | 2017-10-13T18:40:37.000Z | 2020-02-05T07:36:04.000Z | tests/test_block_until_url.py | aniruddha2000/init | fe2a32d2736c359a6911cc22bc42007ac97c5b10 | [
"BSD-3-Clause"
] | null | null | null | tests/test_block_until_url.py | aniruddha2000/init | fe2a32d2736c359a6911cc22bc42007ac97c5b10 | [
"BSD-3-Clause"
] | 5 | 2017-03-07T03:53:55.000Z | 2020-08-12T13:11:17.000Z | #!/usr/bin/python2.7
# Copyright (c) 2013 The CoreOS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import os
import select
import signal
import subprocess
import threading
import time
import unittest
script_path = os.path.abspath('%s/../../bin/block-until-url' % __file__)
class UsageTestCase(unittest.TestCase):
def test_no_url(self):
proc = subprocess.Popen([script_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEquals(proc.returncode, 1)
self.assertEquals(out, '')
self.assertIn('invalid url', err)
def test_invalid_url(self):
proc = subprocess.Popen([script_path, 'fooshizzle'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEquals(proc.returncode, 1)
self.assertEquals(out, '')
self.assertIn('invalid url', err)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_test_data(self):
if self.path == '/ok':
ok_data = 'OK!\n'
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Content-Length', str(len(ok_data)))
if self.command != 'HEAD':
self.wfile.write(ok_data)
elif self.path == '/404':
self.send_error(404)
else:
# send nothing so curl fails
pass
def do_GET(self):
self.send_test_data()
def do_HEAD(self):
self.send_test_data()
def log_message(self, format, *args):
pass
class HttpTestCase(unittest.TestCase):
def setUp(self):
self.server = BaseHTTPServer.HTTPServer(
('localhost', 0), TestRequestHandler)
self.server_url = 'http://%s:%s' % self.server.server_address
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
def tearDown(self):
self.server.shutdown()
def test_quick_ok(self):
proc = subprocess.Popen([script_path, '%s/ok' % self.server_url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEquals(proc.returncode, 0)
self.assertEquals(out, '')
self.assertEquals(err, '')
def test_quick_404(self):
proc = subprocess.Popen([script_path, '%s/404' % self.server_url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEquals(proc.returncode, 0)
self.assertEquals(out, '')
self.assertEquals(err, '')
def test_timeout(self):
proc = subprocess.Popen([script_path, '%s/bogus' % self.server_url],
bufsize=4096,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timeout = time.time() + 2 # kill after 2 seconds
while time.time() < timeout:
time.sleep(0.1)
self.assertIs(proc.poll(), None, 'script terminated early!')
proc.terminate()
out, err = proc.communicate()
self.assertEquals(proc.returncode, -signal.SIGTERM)
self.assertEquals(out, '')
self.assertEquals(err, '')
if __name__ == '__main__':
unittest.main()
| 32.622807 | 76 | 0.579726 | 402 | 3,719 | 5.233831 | 0.350746 | 0.098859 | 0.042776 | 0.054658 | 0.437738 | 0.437738 | 0.378802 | 0.296103 | 0.271863 | 0.271863 | 0 | 0.013194 | 0.307072 | 3,719 | 113 | 77 | 32.911504 | 0.80326 | 0.0605 | 0 | 0.37931 | 0 | 0 | 0.052752 | 0.008028 | 0 | 0 | 0 | 0 | 0.183908 | 1 | 0.126437 | false | 0.022989 | 0.091954 | 0 | 0.252874 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93502650f25d22a96c9c76f56f389213f7e2df2e | 1,883 | py | Python | fasttest.py | godkillok/node2vec | 17b65e1adad01e3881078bc6e9d9eb34e671a296 | [
"MIT"
] | 1 | 2019-12-09T09:14:11.000Z | 2019-12-09T09:14:11.000Z | fasttest.py | godkillok/node2vec | 17b65e1adad01e3881078bc6e9d9eb34e671a296 | [
"MIT"
] | null | null | null | fasttest.py | godkillok/node2vec | 17b65e1adad01e3881078bc6e9d9eb34e671a296 | [
"MIT"
] | null | null | null | import os
import json
from collections import defaultdict
corpus_folder="/data/tanggp/all_text_data_text_pipe/eval_golden"
tags_dic=defaultdict(int)
co=0
node=defaultdict(int)
tags_text=[]
for root, _, files in os.walk(corpus_folder):
for file in files:
raw_corpus_file_path = os.path.join(root, file)
with open(raw_corpus_file_path,"r",encoding="utf8") as f:
lines=f.readlines()
for li in lines:
li=json.loads(li)
tags=li.get("tags",[])
tags=[ta.lower().strip() for ta in tags]
tags=sorted(tags)
tags_text.append(' ;'.join(tags))
with open("/data/tanggp/tmp/tags_text",'w',encoding="utf8") as f:
for ta in tags_text:
f.writelines(ta+'\n')
import fastText as ft
FASTTEXT_SOFTWARE = '/data/tanggp/fastText-0.1.0'
#os.system("cd {} && ./fasttext skipgram -input /data/tanggp/tmp/tags_text -dim 100 -output /data/tanggp/tmp/tags_w2v".format(FASTTEXT_SOFTWARE))
FAST_TEXT_MODEL_PATH='/data/tanggp/tmp/tags_w2v.bin'
model = ft.load_model(FAST_TEXT_MODEL_PATH)
sentor_vetor_list=[]
with open("/data/tanggp/tmp/in_node2vec", "r", encoding="utf8") as f:
in_node2vec=f.readlines()
for text in in_node2vec:
try:
text=text.strip()
sentor_vetor_array=model.get_sentence_vector(text)
sentor_vetor = ','.join([str(w) for w in list(sentor_vetor_array)])
sentor_vetor_list.append(sentor_vetor)
except Exception as e:
print("wrong text ---{}".format(text))
print(e)
with open("/data/tanggp/tmp/w2vec_fast_id", "w", encoding="utf8") as f:
for i,text in enumerate(in_node2vec):
text=text.strip()
f.writelines(text+'\t'+str(i) + '\n')
with open("/data/tanggp/tmp/w2vec_embed", "w", encoding="utf8") as f:
for i in sentor_vetor_list:
f.writelines(i+'\n')
| 33.625 | 145 | 0.650558 | 283 | 1,883 | 4.144876 | 0.310954 | 0.076726 | 0.077579 | 0.063939 | 0.216539 | 0.094629 | 0.034101 | 0 | 0 | 0 | 0 | 0.013324 | 0.202868 | 1,883 | 55 | 146 | 34.236364 | 0.768155 | 0.076474 | 0 | 0.044444 | 0 | 0 | 0.156682 | 0.124424 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088889 | 0 | 0.088889 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
935806f4923d4948dc0c54a63cff487df1a38a40 | 4,205 | py | Python | simulations/structure_function_xray_sims.py | st--/Mrk_335 | 4e9afd0f9b1904ac11209220d4f6896d9be33a0d | [
"MIT"
] | 12 | 2021-03-11T21:27:34.000Z | 2022-01-03T09:37:04.000Z | simulations/structure_function_xray_sims.py | st--/Mrk_335 | 4e9afd0f9b1904ac11209220d4f6896d9be33a0d | [
"MIT"
] | null | null | null | simulations/structure_function_xray_sims.py | st--/Mrk_335 | 4e9afd0f9b1904ac11209220d4f6896d9be33a0d | [
"MIT"
] | 2 | 2021-03-20T22:29:28.000Z | 2021-10-01T03:12:39.000Z | # Copyright Ryan-Rhys Griffiths 2021
# Author: Ryan-Rhys Griffiths
"""
Comparison of GP-interpolated X-ray and true structure functions where the GP interpolated
structure functions are computed following the introduction of gaps into lightcurves.
"""
import numpy as np
from matplotlib import pyplot as plt
from simulation_utils import load_sim_data
from structure_function_utils import compute_gp_structure_function
TIMINGS_FILE = '../processed_data/xray_simulations/x_ray_sim_times.pickle'
GAPPED_FILE = 'sim_curves/xray_lightcurves.dat'
GROUND_TRUTH_FILE = 'sim_curves/xray_lightcurves_no_gaps.dat'
resolution = 5.3
nsims = 1000 # number of simulated curves i.e length of gapped_file
kernel = 'Matern' # ['Matern', 'RQ']
f_plot = False
if __name__ == '__main__':
if kernel == 'Matern':
tag = 'Matern_12'
else:
tag = 'Rational Quadratic'
# Load the times for gap points, times for full curves, count rates for gap points and count rates for full curves
# Matrix because second dimension corresponds to nsims.
time, test_times, gapped_count_rates_matrix, ground_truth_count_rates_matrix = load_sim_data(TIMINGS_FILE,
GAPPED_FILE,
GROUND_TRUTH_FILE)
for i in range(0, 15):
# file handle for GP lightcurve
handle = f'SF_xray_samples_{tag} Kernel_iteration_{i}.txt'
gapped_count_rates = np.reshape(gapped_count_rates_matrix[i, :], (-1, 1))
count_rates = np.reshape(ground_truth_count_rates_matrix[i, :], (-1, 1))
gp_count_rates = np.reshape(np.loadtxt(fname=f'SF_samples/xray/{handle}'), (-1, 1))
gapped_tao_plot, gapped_structure_function_vals = compute_gp_structure_function(gapped_count_rates, time, resolution=resolution)
ground_truth_tao_plot, ground_truth_structure_function_vals = compute_gp_structure_function(count_rates, test_times, resolution=resolution)
gp_tao_plot, gp_structure_function_vals = compute_gp_structure_function(gp_count_rates, test_times, resolution=resolution)
np.savetxt(f'saved_sf_values/xray/_gapped_tao_plot_{i}.txt', gapped_tao_plot, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/gapped_structure_function_vals_{i}.txt', gapped_structure_function_vals, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/{kernel}_gp_tao_plot_{i}.txt', gp_tao_plot, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/ground_truth_structure_function_vals_{i}.txt', ground_truth_structure_function_vals, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/ground_truth_tao_plot_{i}.txt', ground_truth_tao_plot, fmt='%.15f')
np.savetxt(f'saved_sf_values/xray/{kernel}_gp_structure_function_vals_{i}.txt', gp_structure_function_vals, fmt='%.15f')
if f_plot:
fig, ax = plt.subplots(1)
plt.scatter(gapped_tao_plot, gapped_structure_function_vals, s=10, marker='+', label='Gapped')
plt.scatter(ground_truth_tao_plot, ground_truth_structure_function_vals, s=10, marker='+', label='Ground Truth')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$\tau$' + ' (days)')
plt.ylabel('SF')
plt.xlim([10, 700])
plt.title('X-ray Gapped Structure Function')
plt.tight_layout()
plt.legend()
plt.savefig(f'SF_sims_figures/xray/gapped_structure_function_{i}')
plt.close()
fig, ax = plt.subplots(1)
plt.scatter(gp_tao_plot, gp_structure_function_vals, s=10, marker='+', label='GP')
plt.scatter(ground_truth_tao_plot, ground_truth_structure_function_vals, s=10, marker='+', label='Ground Truth')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$\tau$' + ' (days)')
plt.ylabel('SF')
plt.xlim([10, 700])
plt.title(f'X-ray GP {kernel} Structure Function')
plt.tight_layout()
plt.legend()
plt.savefig(f'SF_sims_figures/xray/gp_{kernel}_structure_function_{i}')
plt.close()
| 48.895349 | 147 | 0.670392 | 565 | 4,205 | 4.649558 | 0.244248 | 0.142368 | 0.103921 | 0.03426 | 0.564903 | 0.466311 | 0.422155 | 0.306433 | 0.291587 | 0.272554 | 0 | 0.016183 | 0.221165 | 4,205 | 85 | 148 | 49.470588 | 0.785954 | 0.120571 | 0 | 0.333333 | 0 | 0 | 0.232356 | 0.171824 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
935b2e44b7b2c45351f62e76c25c2670f2799100 | 3,223 | py | Python | 3-2.TextLSTM/TextLSTM-Torch.py | aserron/nlp-tutorial | 299dc4369a3c9597b5ac2042c606afe7da67b72f | [
"MIT"
] | 5 | 2020-08-28T02:45:56.000Z | 2021-11-23T07:03:52.000Z | 3-2.TextLSTM/TextLSTM-Torch.py | aserron/nlp-tutorial | 299dc4369a3c9597b5ac2042c606afe7da67b72f | [
"MIT"
] | null | null | null | 3-2.TextLSTM/TextLSTM-Torch.py | aserron/nlp-tutorial | 299dc4369a3c9597b5ac2042c606afe7da67b72f | [
"MIT"
] | 5 | 2020-09-28T01:13:22.000Z | 2021-05-21T01:13:47.000Z | '''
code by Tae Hwan Jung(Jeff Jung) @graykode
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import sys
dtype = torch.FloatTensor
char_arr = [c for c in 'abcdefghijklmnopqrstuvwxyz']
word_dict = {n: i for i, n in enumerate(char_arr)}
number_dict = {i: w for i, w in enumerate(char_arr)}
n_class = len(word_dict) # number of class(=number of vocab)
seq_data = ['make', 'need', 'coal', 'word', 'love', 'hate', 'live', 'home', 'hash', 'star']
# TextLSTM Parameters
n_step = 3
n_hidden = 128
def make_batch(seq_data):
input_batch, target_batch = [], []
for seq in seq_data:
input = [word_dict[n] for n in seq[:-1]] # 'm', 'a' , 'k' is input
target = word_dict[seq[-1]] # 'e' is target
input_batch.append(np.eye(n_class)[input])
target_batch.append(target)
return torch.Tensor(input_batch), torch.LongTensor(target_batch)
class TextLSTM(nn.Module):
# MOSTLY THE SAME EXCEPT FOR THE USAGE OF THE CELL STATE
def __init__(self):
super(TextLSTM, self).__init__()
self.lstm = nn.LSTM(input_size=n_class, hidden_size=n_hidden)
self.W = nn.Parameter(torch.randn([n_hidden, n_class]).type(dtype))
self.b = nn.Parameter(torch.randn([n_class]).type(dtype))
def forward(self, hidden_state, cell_state, X):
input = X.transpose(0, 1) # X : [n_step, batch_size, n_class]
# hidden_state: [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
# cell_state: [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
# hidden_state_size: the same with the RNN - hidden state
# cell_state_size: the same with the RNN - hidden state
outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
# outputs: [n_step, batch_size, num_directions(=1) * n_hidden]
# outputs_size: the same with the RNN - outputs
outputs = outputs[-1] # [batch_size, n_hidden]
model = torch.mm(outputs, self.W) + self.b # model : [batch_size, n_class]
return model
input_batch, target_batch = make_batch(seq_data)
print("*"*30)
print("input_batch_size:", input_batch.size())
print("*"*30)
print("target_batch_size:", target_batch.size())
print("*"*30)
model = TextLSTM()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
for name, param in model.named_parameters():
print("PARAM: ", name, param.size())
print("-"*30)
# Training
for epoch in range(1000):
optimizer.zero_grad()
hidden_state = torch.zeros(1, len(input_batch), n_hidden)
cell_state = torch.zeros(1, len(input_batch), n_hidden)
output = model(hidden_state, cell_state, input_batch)
loss = criterion(output, target_batch)
if (epoch + 1) % 100 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
inputs = [sen[:3] for sen in seq_data]
hidden_state_t = torch.zeros(1, len(input_batch), n_hidden)
cell_state_t = torch.zeros(1, len(input_batch), n_hidden)
predict = model(hidden_state_t, cell_state_t, input_batch).data.max(1, keepdim=True)[1]
print(inputs, '->', [number_dict[n.item()] for n in predict.squeeze()])
| 31.910891 | 91 | 0.665529 | 486 | 3,223 | 4.195473 | 0.269547 | 0.058852 | 0.024522 | 0.039235 | 0.196175 | 0.166258 | 0.155959 | 0.155959 | 0.155959 | 0.102011 | 0 | 0.017544 | 0.186472 | 3,223 | 100 | 92 | 32.23 | 0.760107 | 0.201365 | 0 | 0.05 | 0 | 0 | 0.053333 | 0.010196 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.083333 | 0 | 0.183333 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
935bc89978bfdfe24ff5ac32092e0b5efcbf9ef8 | 5,366 | py | Python | userbot/modules/filemanager.py | elevenrin/WeebProject | 68e3b342afb7fa55293652f458d7366289856f38 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/filemanager.py | elevenrin/WeebProject | 68e3b342afb7fa55293652f458d7366289856f38 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/filemanager.py | elevenrin/WeebProject | 68e3b342afb7fa55293652f458d7366289856f38 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Credits to Userge for Remove and Rename
import io
import os
import os.path
import shutil
import time
from os.path import dirname, exists, isdir, isfile, join
from shutil import rmtree
from userbot import CMD_HELP
from userbot.events import register
from userbot.utils import humanbytes
MAX_MESSAGE_SIZE_LIMIT = 4095
@register(outgoing=True, pattern=r"^\.ls ?(.*)")
async def lst(event):
if event.fwd_from:
return
cat = event.pattern_match.group(1)
path = cat if cat else os.getcwd()
if not exists(path):
await event.edit(
f"`Tidak ada direktori atau file seperti itu dengan nama` **{cat}**, `periksa lagi!`"
)
return
if isdir(path):
if cat:
msg = "`Folder dan file di `{}` :\n\n".format(path)
else:
msg = "`Folder dan file di direktori saat ini` :\n\n"
lists = os.listdir(path)
files = ""
folders = ""
for contents in sorted(lists):
catpath = path + "/" + contents
if not isdir(catpath):
size = os.stat(catpath).st_size
if contents.endswith((".mp3", ".flac", ".wav", ".m4a")):
files += "🎵 "
elif contents.endswith((".opus")):
files += "🎙 "
elif contents.endswith(
(".mkv", ".mp4", ".webm", ".avi", ".mov", ".flv")
):
files += "🎞 "
elif contents.endswith(
(".zip", ".tar", ".tar.gz", ".rar", ".7z", ".xz")
):
files += "🗜 "
elif contents.endswith(
(".jpg", ".jpeg", ".png", ".gif", ".bmp", ".ico", ".webp")
):
files += "🖼 "
elif contents.endswith((".exe", ".deb")):
files += "⚙️ "
elif contents.endswith((".iso", ".img")):
files += "💿 "
elif contents.endswith((".apk", ".xapk")):
files += "📱 "
elif contents.endswith((".py")):
files += "🐍 "
else:
files += "📄 "
files += f"`{contents}` - __{humanbytes(size)}__\n"
else:
folders += f"📁 `{contents}`\n"
msg = msg + folders + files if files or folders else msg + "__direktori kosong__"
else:
size = os.stat(path).st_size
msg = "The details of given file :\n\n"
if path.endswith((".mp3", ".flac", ".wav", ".m4a")):
mode = "🎵 "
elif path.endswith((".opus")):
mode = "🎙 "
elif path.endswith((".mkv", ".mp4", ".webm", ".avi", ".mov", ".flv")):
mode = "🎞 "
elif path.endswith((".zip", ".tar", ".tar.gz", ".rar", ".7z", ".xz")):
mode = "🗜 "
elif path.endswith((".jpg", ".jpeg", ".png", ".gif", ".bmp", ".ico", ".webp")):
mode = "🖼 "
elif path.endswith((".exe", ".deb")):
mode = "⚙️ "
elif path.endswith((".iso", ".img")):
mode = "💿 "
elif path.endswith((".apk", ".xapk")):
mode = "📱 "
elif path.endswith((".py")):
mode = "🐍 "
else:
mode = "📄 "
time.ctime(os.path.getctime(path))
time2 = time.ctime(os.path.getmtime(path))
time3 = time.ctime(os.path.getatime(path))
msg += f"**Lokasi** : `{path}`\n"
msg += f"**Ikon** : `{mode}`\n"
msg += f"**Ukuran** : `{humanbytes(size)}`\n"
msg += f"**Waktu terakhir diubah** : `{time2}`\n"
msg += f"**Waktu terakhir diakses** : `{time3}`"
if len(msg) > MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(msg)) as out_file:
out_file.name = "ls.txt"
await event.client.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=path,
)
await event.delete()
else:
await event.edit(msg)
@register(outgoing=True, pattern=r"^\.rm ?(.*)")
async def rmove(event):
"""Removing Directory/File"""
cat = event.pattern_match.group(1)
if not cat:
await event.edit("`Jalur file tidak ada!`")
return
if not exists(cat):
await event.edit("`Jalur file tidak ada!`")
return
if isfile(cat):
os.remove(cat)
else:
rmtree(cat)
await event.edit(f"**{cat}** `dihapus!`")
@register(outgoing=True, pattern=r"^\.rn ([^|]+)\|([^|]+)")
async def rname(event):
"""Renaming Directory/File"""
cat = str(event.pattern_match.group(1)).strip()
new_name = str(event.pattern_match.group(2)).strip()
if not exists(cat):
await event.edit(f"`Jalur file` : **{cat}** `tidak ada!`")
return
new_path = join(dirname(cat), new_name)
shutil.move(cat, new_path)
await event.edit(f"`Nama diganti dari` **{cat}** `menjadi` **{new_path}**")
CMD_HELP.update(
{
"file": "`.ls [direktori]`"
"\n➥ Dapatkan daftar file di dalam direktori."
"\n\n`.rm [direktori/file]`"
"\n➥ Hapus file atau direktori."
"\n\n`.rn [direktori/file] | [nama baru]`"
"\n➥ Mengubah nama file atau direktori."
}
)
| 33.962025 | 98 | 0.479873 | 607 | 5,366 | 4.227348 | 0.314662 | 0.035074 | 0.062354 | 0.034295 | 0.252143 | 0.132892 | 0.112627 | 0.077163 | 0.032736 | 0.032736 | 0 | 0.005664 | 0.341968 | 5,366 | 157 | 99 | 34.178344 | 0.713679 | 0.007268 | 0 | 0.169014 | 0 | 0 | 0.216062 | 0.008734 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.070423 | 0 | 0.105634 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
935d0c7ba01171407513e36657e2685947f91c56 | 1,946 | py | Python | test/test_input_data.py | simonpf/qprof | 3a501ca7dc694a3455be928453afa13c236ad492 | [
"MIT"
] | 1 | 2020-09-19T12:00:57.000Z | 2020-09-19T12:00:57.000Z | test/test_input_data.py | simonpf/qprof | 3a501ca7dc694a3455be928453afa13c236ad492 | [
"MIT"
] | null | null | null | test/test_input_data.py | simonpf/qprof | 3a501ca7dc694a3455be928453afa13c236ad492 | [
"MIT"
] | null | null | null | """
Tests for the qprof.input_data module.
"""
import numpy as np
import pytest
from qprof.models import get_normalizer, get_model
from qprof.input_data import BinInputData
NETCDF4_AVAILABLE = False
try:
import netCDF4
NETCDF4_AVAILABLE = True
except ImportError:
pass
def test_bin_input_data(test_data):
"""
Ensure that bin data is correctly converted to retrieval input.
"""
normalizer = get_normalizer()
input_file = BinInputData(test_data["bin_file"],
normalizer)
batch = input_file.get_batch()
batch = normalizer.invert(batch)
bin_data = input_file.bin_file.handle
assert np.all(np.isclose(batch[:, :3],
bin_data["brightness_temperatures"][:, :3]))
assert np.all(np.isclose(batch[:, 15],
bin_data["two_meter_temperature"]))
assert np.all(np.isclose(batch[:, 16],
bin_data["total_column_water_vapor"]))
st = np.where(batch[:, 17:36])[1]
assert np.all(np.isclose(st, input_file.bin_file.surface_type))
at = np.where(batch[:, 36:])[1]
assert np.all(np.isclose(at, input_file.bin_file.airmass_type))
@pytest.mark.skipif(not NETCDF4_AVAILABLE, reason="netCDF4 package missing.")
def test_bin_retrieval(test_data):
"""
Ensure that bin data is correctly converted to retrieval input.
"""
normalizer = get_normalizer()
model = get_model()
input_file = test_data["bin_file"]
folder = input_file.parent
input_data = BinInputData(input_file,
normalizer)
results = input_data.run_retrieval(model)
retrieval_file = input_data.write_retrieval_results(folder, results)
assert retrieval_file.name[:-2] == input_file.name[:-3]
data = netCDF4.Dataset(retrieval_file)
y = data["truth"][:].data
assert np.all(np.isclose(input_data.bin_file.handle["surface_precip"], y))
| 29.044776 | 78 | 0.661357 | 251 | 1,946 | 4.896414 | 0.322709 | 0.065907 | 0.053702 | 0.063466 | 0.251424 | 0.235151 | 0.174125 | 0.136697 | 0.136697 | 0.136697 | 0 | 0.014579 | 0.224563 | 1,946 | 66 | 79 | 29.484848 | 0.799867 | 0.085303 | 0 | 0.097561 | 0 | 0 | 0.072947 | 0.039058 | 0 | 0 | 0 | 0 | 0.170732 | 1 | 0.04878 | false | 0.02439 | 0.146341 | 0 | 0.195122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9366ef921a5f620eac2a027b90f615edf00495a8 | 57,075 | py | Python | vmware_nsx/common/config.py | salv-orlando/vmware-nsx | 6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd | [
"Apache-2.0"
] | null | null | null | vmware_nsx/common/config.py | salv-orlando/vmware-nsx | 6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd | [
"Apache-2.0"
] | null | null | null | vmware_nsx/common/config.py | salv-orlando/vmware-nsx | 6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
from neutron.conf.db import l3_hamode_db
from vmware_nsx._i18n import _
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import nsxv_constants
from vmware_nsx.dvs import dvs_utils
from vmware_nsx.extensions import projectpluginmap
from vmware_nsx.extensions import routersize
LOG = logging.getLogger(__name__)
DEFAULT_VDR_TRANSIT_NETWORK = "169.254.2.0/28"
DEFAULT_PLR_ADDRESS = "169.254.2.3"
class AgentModes(object):
AGENT = 'agent'
AGENTLESS = 'agentless'
COMBINED = 'combined'
class MetadataModes(object):
DIRECT = 'access_network'
INDIRECT = 'dhcp_host_route'
class ReplicationModes(object):
SERVICE = 'service'
SOURCE = 'source'
base_opts = [
cfg.IntOpt('max_lp_per_bridged_ls', default=5000,
deprecated_group='NVP',
help=_("Maximum number of ports of a logical switch on a "
"bridged transport zone. The recommended value for "
"this parameter varies with NSX version.\nPlease use:\n"
"NSX 2.x -> 64\nNSX 3.0, 3.1 -> 5000\n"
"NSX 3.2 -> 10000")),
cfg.IntOpt('max_lp_per_overlay_ls', default=256,
deprecated_group='NVP',
help=_("Maximum number of ports of a logical switch on an "
"overlay transport zone")),
cfg.IntOpt('concurrent_connections', default=10,
deprecated_group='NVP',
help=_("Maximum concurrent connections to each NSX "
"controller.")),
cfg.IntOpt('nsx_gen_timeout', default=-1,
deprecated_name='nvp_gen_timeout',
deprecated_group='NVP',
help=_("Number of seconds a generation id should be valid for "
"(default -1 meaning do not time out)")),
cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT,
deprecated_group='NVP',
help=_("If set to access_network this enables a dedicated "
"connection to the metadata proxy for metadata server "
"access via Neutron router. If set to dhcp_host_route "
"this enables host route injection via the dhcp agent. "
"This option is only useful if running on a host that "
"does not support namespaces otherwise access_network "
"should be used.")),
cfg.StrOpt('default_transport_type', default='stt',
deprecated_group='NVP',
help=_("The default network tranport type to use (stt, gre, "
"bridge, ipsec_gre, or ipsec_stt)")),
cfg.StrOpt('agent_mode', default=AgentModes.AGENT,
deprecated_group='NVP',
help=_("Specifies in which mode the plugin needs to operate "
"in order to provide DHCP and metadata proxy services "
"to tenant instances. If 'agent' is chosen (default) "
"the NSX plugin relies on external RPC agents (i.e. "
"dhcp and metadata agents) to provide such services. "
"In this mode, the plugin supports API extensions "
"'agent' and 'dhcp_agent_scheduler'. If 'agentless' "
"is chosen (experimental in Icehouse), the plugin will "
"use NSX logical services for DHCP and metadata proxy. "
"This simplifies the deployment model for Neutron, in "
"that the plugin no longer requires the RPC agents to "
"operate. When 'agentless' is chosen, the config option "
"metadata_mode becomes ineffective. The 'agentless' "
"mode works only on NSX 4.1. Furthermore, a 'combined' "
"mode is also provided and is used to support existing "
"deployments that want to adopt the agentless mode. "
"With this mode, existing networks keep being served by "
"the existing infrastructure (thus preserving backward "
"compatibility, whereas new networks will be served by "
"the new infrastructure. Migration tools are provided "
"to 'move' one network from one model to another; with "
"agent_mode set to 'combined', option "
"'network_auto_schedule' in neutron.conf is ignored, as "
"new networks will no longer be scheduled to existing "
"dhcp agents.")),
cfg.StrOpt('replication_mode', default=ReplicationModes.SERVICE,
choices=(ReplicationModes.SERVICE, ReplicationModes.SOURCE),
help=_("Specifies which mode packet replication should be done "
"in. If set to service a service node is required in "
"order to perform packet replication. This can also be "
"set to source if one wants replication to be performed "
"locally (NOTE: usually only useful for testing if one "
"does not want to deploy a service node). In order to "
"leverage distributed routers, replication_mode should "
"be set to 'service'.")),
cfg.FloatOpt('qos_peak_bw_multiplier', default=2.0, min=1.0,
help=_("The QoS rules peak bandwidth value will be the "
"configured maximum bandwidth of the QoS rule, "
"multiplied by this value. Value must be bigger than"
" 1")),
]
connection_opts = [
cfg.StrOpt('nsx_user',
default='admin',
deprecated_name='nvp_user',
help=_('User name for NSX controllers in this cluster')),
cfg.StrOpt('nsx_password',
default='admin',
deprecated_name='nvp_password',
secret=True,
help=_('Password for NSX controllers in this cluster')),
cfg.IntOpt('http_timeout',
default=75,
help=_('Time before aborting a request on an '
'unresponsive controller (Seconds)')),
cfg.IntOpt('retries',
default=2,
help=_('Maximum number of times a particular request '
'should be retried')),
cfg.IntOpt('redirects',
default=2,
help=_('Maximum number of times a redirect response '
'should be followed')),
cfg.ListOpt('nsx_controllers',
default=[],
deprecated_name='nvp_controllers',
help=_('Comma-separated list of NSX controller '
'endpoints (<ip>:<port>). When port is omitted, '
'443 is assumed. This option MUST be specified. '
'e.g.: aa.bb.cc.dd, ee.ff.gg.hh.ee:80')),
cfg.IntOpt('conn_idle_timeout',
default=900,
help=_('Reconnect connection to nsx if not used within this '
'amount of time.')),
]
cluster_opts = [
cfg.StrOpt('default_tz_uuid',
help=_("This is uuid of the default NSX Transport zone that "
"will be used for creating tunneled isolated "
"\"Neutron\" networks. It needs to be created in NSX "
"before starting Neutron with the nsx plugin.")),
cfg.StrOpt('default_l3_gw_service_uuid',
help=_("(Optional) UUID of the NSX L3 Gateway "
"service which will be used for implementing routers "
"and floating IPs")),
cfg.StrOpt('default_l2_gw_service_uuid',
help=_("(Optional) UUID of the NSX L2 Gateway service "
"which will be used by default for network gateways")),
cfg.StrOpt('default_service_cluster_uuid',
help=_("(Optional) UUID of the Service Cluster which will "
"be used by logical services like dhcp and metadata")),
cfg.StrOpt('nsx_default_interface_name', default='breth0',
deprecated_name='default_interface_name',
help=_("Name of the interface on a L2 Gateway transport node "
"which should be used by default when setting up a "
"network connection")),
]
nsx_common_opts = [
cfg.StrOpt('nsx_l2gw_driver',
help=_("Specify the class path for the Layer 2 gateway "
"backend driver (i.e. NSX-T/NSX-V). This field will be "
"used when a L2 Gateway service plugin is configured.")),
cfg.StrOpt('locking_coordinator_url',
help=_("(Optional) URL for distributed locking coordination "
"resource for lock manager. This value is passed as a "
"parameter to tooz coordinator. By default, value is "
"None and oslo_concurrency is used for single-node "
"lock management.")),
cfg.BoolOpt('api_replay_mode',
default=False,
help=_("If true, the server then allows the caller to "
"specify the id of resources. This should only "
"be enabled in order to allow one to migrate an "
"existing install of neutron to a new VMWare plugin.")),
cfg.ListOpt('nsx_extension_drivers',
default=[],
help=_("An ordered list of extension driver "
"entrypoints to be loaded from the "
"vmware_nsx.extension_drivers namespace.")),
cfg.StrOpt('smtp_gateway',
help=_("(Optional) IP address of SMTP gateway to use for"
"admin warnings.")),
cfg.StrOpt('smtp_from_addr',
help=_("(Optional) email address to use for outgoing admin"
"notifications.")),
cfg.ListOpt('snmp_to_list',
default=[],
help=_("(Optional) List of email addresses for "
"notifications.")),
cfg.IntOpt('octavia_stats_interval',
default=10,
help=_("Interval in seconds for Octavia statistics reporting. "
"0 means no reporting")),
]
nsx_v3_and_p = [
cfg.ListOpt('nsx_api_user',
default=['admin'],
help=_('User names for the NSX managers')),
cfg.ListOpt('nsx_api_password',
default=['default'],
secret=True,
help=_('Passwords for the NSX managers')),
cfg.ListOpt('nsx_api_managers',
default=[],
help=_("IP address of one or more NSX managers separated "
"by commas. The IP address should be of the form:\n"
"[<scheme>://]<ip_address>[:<port>]\nIf scheme is not "
"provided https is used. If port is not provided port "
"80 is used for http and port 443 for https.")),
cfg.BoolOpt('nsx_use_client_auth',
default=False,
help=_("Use client certificate in NSX manager "
"authentication")),
cfg.StrOpt('nsx_client_cert_file',
default='',
help=_("File to contain client certificate and private key")),
cfg.StrOpt('nsx_client_cert_pk_password',
default="",
secret=True,
help=_("password for private key encryption")),
cfg.StrOpt('nsx_client_cert_storage',
default='nsx-db',
choices=['nsx-db', 'none'],
help=_("Storage type for client certificate sensitive data")),
cfg.IntOpt('retries',
default=10,
help=_('Maximum number of times to retry API requests upon '
'stale revision errors.')),
cfg.ListOpt('ca_file',
help=_('Specify a CA bundle files to use in verifying the NSX '
'Managers server certificate. This option is ignored '
'if "insecure" is set to True. If "insecure" is set to '
'False and ca_file is unset, the system root CAs will '
'be used to verify the server certificate.')),
cfg.BoolOpt('insecure',
default=True,
help=_('If true, the NSX Manager server certificate is not '
'verified. If false the CA bundle specified via '
'"ca_file" will be used or if unsest the default '
'system root CAs will be used.')),
cfg.IntOpt('http_timeout',
default=10,
help=_('The time in seconds before aborting a HTTP connection '
'to a NSX manager.')),
cfg.IntOpt('http_read_timeout',
default=180,
help=_('The time in seconds before aborting a HTTP read '
'response from a NSX manager.')),
cfg.IntOpt('http_retries',
default=3,
help=_('Maximum number of times to retry a HTTP connection.')),
cfg.IntOpt('concurrent_connections', default=10,
help=_("Maximum concurrent connections to each NSX "
"manager.")),
cfg.IntOpt('conn_idle_timeout',
default=10,
help=_("The amount of time in seconds to wait before ensuring "
"connectivity to the NSX manager if no manager "
"connection has been used.")),
cfg.IntOpt('redirects',
default=2,
help=_('Number of times a HTTP redirect should be followed.')),
cfg.BoolOpt('log_security_groups_blocked_traffic',
default=False,
help=_("(Optional) Indicates whether distributed-firewall "
"rule for security-groups blocked traffic is logged.")),
cfg.BoolOpt('log_security_groups_allowed_traffic',
default=False,
help=_("(Optional) Indicates whether distributed-firewall "
"security-groups rules are logged.")),
cfg.ListOpt('network_vlan_ranges',
default=[],
help=_("List of <TZ UUID>:<vlan_min>:<vlan_max> "
"specifying Transport Zone UUID usable for VLAN "
"provider networks, as well as ranges of VLAN "
"tags on each available for allocation to networks.")),
cfg.ListOpt('availability_zones',
default=[],
help=_('Optional parameter defining the networks availability '
'zones names for the native dhcp configuration. The '
'configuration of each zone will be under a group '
'names [az:<name>]')),
cfg.StrOpt('metadata_proxy',
help=_("This is the name or UUID of the NSX Metadata Proxy "
"that will be used to enable native metadata service. "
"It needs to be created in NSX before starting Neutron "
"with the NSX plugin.")),
cfg.StrOpt('native_metadata_route',
default="169.254.169.254/31",
help=_("The metadata route used for native metadata proxy "
"service.")),
cfg.BoolOpt('windows_metadata_route',
default=True,
help=_("Inject a route for allowing windows guest access NSX "
"native metadata proxy service")),
cfg.StrOpt('dns_domain',
default='openstacklocal',
help=_("Domain to use for building the hostnames.")),
cfg.ListOpt('nameservers',
default=[],
help=_("List of nameservers to configure for the DHCP "
"binding entries. These will be used if there are no "
"nameservers defined on the subnet.")),
cfg.StrOpt('edge_cluster',
help=_("(Optional) Specifying an edge cluster for Tier1 "
"routers to connect other that the one connected to"
" the Tier0 router")),
cfg.ListOpt('transit_networks',
default=['100.64.0.0/16', 'fc3d:e3c3:7b93::/48'],
help=_("List of transit networks used by NSX tier0 routers. "
"Neutron subnets will not be allowed to use those "
"cidrs")),
cfg.BoolOpt('init_objects_by_tags',
default=False,
help=_("When True, the configured transport zones, router and "
"profiles will be found by tags on the NSX. The scope "
"of the tag will be the value of search_objects_"
"scope. The value of the search tag will be the name "
"configured in each respective configuration.")),
cfg.StrOpt('search_objects_scope',
help=_("This is the scope of the tag that will be used for "
"finding the objects uuids on the NSX during plugin "
"init.")),
cfg.IntOpt('dhcp_lease_time',
default=86400,
help=_("DHCP default lease time.")),
cfg.BoolOpt('support_nsx_port_tagging',
default=False,
help=_("If true, adding neutron tags to ports will also add "
"tags on the NSX logical ports. This feature requires "
"oslo_messaging_notifications driver to be "
"configured.")),
]
nsx_v3_opts = nsx_v3_and_p + [
cfg.StrOpt('dhcp_profile',
help=_("This is the name or UUID of the NSX DHCP Profile "
"that will be used to enable native DHCP service. It "
"needs to be created in NSX before starting Neutron "
"with the NSX plugin")),
cfg.StrOpt('default_overlay_tz',
help=_("This is the name or UUID of the default NSX overlay "
"transport zone that will be used for creating "
"tunneled isolated Neutron networks. It needs to be "
"created in NSX before starting Neutron with the NSX "
"plugin.")),
cfg.StrOpt('default_vlan_tz',
help=_("(Optional) Only required when creating VLAN or flat "
"provider networks. Name or UUID of default NSX VLAN "
"transport zone that will be used for bridging between "
"Neutron networks, if no physical network has been "
"specified")),
cfg.StrOpt('default_bridge_cluster',
deprecated_for_removal=True,
help=_("(Optional) Name or UUID of the default NSX bridge "
"cluster that will be used to perform L2 gateway "
"bridging between VXLAN and VLAN networks. If default "
"bridge cluster UUID is not specified, admin will have "
"to manually create a L2 gateway corresponding to a "
"NSX Bridge Cluster using L2 gateway APIs. This field "
"must be specified on one of the active neutron "
"servers only.")),
cfg.StrOpt('default_bridge_endpoint_profile',
help=_("(Optional) Name or UUID of the default NSX bridge "
"endpoint profile that will be used to perform L2 "
"bridging between networks in the NSX fabric and "
"VLANs external to NSX. If not specified, operators "
"will need to explicitly create a layer-2 gateway in "
"Neutron using the L2 gateway APIs.")),
cfg.StrOpt('default_tier0_router',
help=_("Name or UUID of the default tier0 router that will be "
"used for connecting to tier1 logical routers and "
"configuring external networks")),
cfg.IntOpt('number_of_nested_groups',
default=8,
help=_("(Optional) The number of nested groups which are used "
"by the plugin, each Neutron security-groups is added "
"to one nested group, and each nested group can contain "
"as maximum as 500 security-groups, therefore, the "
"maximum number of security groups that can be created "
"is 500 * number_of_nested_groups. The default is 8 "
"nested groups, which allows a maximum of 4k "
"security-groups, to allow creation of more "
"security-groups, modify this figure.")),
cfg.StrOpt('metadata_mode',
default=MetadataModes.DIRECT,
help=_("If set to access_network this enables a dedicated "
"connection to the metadata proxy for metadata server "
"access via Neutron router. If set to dhcp_host_route "
"this enables host route injection via the dhcp agent. "
"This option is only useful if running on a host that "
"does not support namespaces otherwise access_network "
"should be used.")),
cfg.BoolOpt('metadata_on_demand',
default=False,
help=_("If true, an internal metadata network will be created "
"for a router only when the router is attached to a "
"DHCP-disabled subnet.")),
cfg.BoolOpt('native_dhcp_metadata',
default=True,
help=_("If true, DHCP and metadata proxy services will be "
"provided by NSX backend.")),
cfg.ListOpt('switching_profiles',
default=[],
help=_("Optional parameter defining a list switching profiles "
"uuids that will be attached to all neutron created "
"nsx ports.")),
cfg.BoolOpt('ens_support',
default=False,
help=_("(Optional) Indicates whether ENS transport zones can "
"be used")),
cfg.BoolOpt('disable_port_security_for_ens',
# This flag was relevant only for NSX version that did not
# support ENS with security features
deprecated_for_removal=True,
default=False,
help=_("When True, port security will be set to False for "
"newly created ENS networks and ports, overriding "
"user settings")),
cfg.StrOpt('dhcp_relay_service',
help=_("(Optional) This is the name or UUID of the NSX dhcp "
"relay service that will be used to enable DHCP relay "
"on router ports.")),
cfg.ListOpt('housekeeping_jobs',
default=['orphaned_dhcp_server', 'orphaned_logical_switch',
'orphaned_logical_router', 'mismatch_logical_port',
'orphaned_firewall_section'],
help=_("List of the enabled housekeeping jobs")),
cfg.ListOpt('housekeeping_readonly_jobs',
default=[],
help=_("List of housekeeping jobs which are enabled in read "
"only mode")),
cfg.BoolOpt('housekeeping_readonly',
default=True,
help=_("Housekeeping will only warn about breakage.")),
]
nsx_p_opts = nsx_v3_and_p + [
cfg.StrOpt('dhcp_profile',
help=_("This is the name or UUID of the NSX DHCP Profile, "
"or the name or ID of the Policy DHCP server config "
"that will be used to enable native DHCP service. It "
"needs to be created in NSX before starting Neutron "
"with the NSX plugin")),
cfg.StrOpt('default_tier0_router',
help=_("Name or UUID of the default tier0 router that will be "
"used for connecting to tier1 logical routers and "
"configuring external networks. If only one tier0 "
" router is present on backend, it will be assumed "
"as default unless this value is provided")),
cfg.StrOpt('default_overlay_tz',
help=_("This is the name or UUID of the default NSX overlay "
"transport zone that will be used for creating "
"tunneled isolated Neutron networks. It needs to be "
"created in NSX before starting Neutron with the NSX "
"plugin. If only one overlay transport zone is present "
"on backend, it will be assumed as default unless this "
"value is provided")),
cfg.StrOpt('default_vlan_tz',
help=_("(Optional) Only required when creating VLAN or flat "
"provider networks. Name or UUID of default NSX VLAN "
"transport zone that will be used for bridging between "
"Neutron networks, if no physical network has been "
"specified. If only one VLAN transport zone is present "
"on backend, it will be assumed as default unless this "
"value is provided")),
cfg.StrOpt('waf_profile',
deprecated_for_removal=True,
help=_("(Optional) Name or UUID of the default WAF profile to "
"be attached to L7 loadbalancer listeners")),
cfg.BoolOpt('allow_passthrough',
default=True,
help=_("If True, use nsx manager api for cases which are not "
"supported by the policy manager api")),
cfg.IntOpt('realization_max_attempts',
default=50,
help=_("(Optional) Maximum number of times to retry while "
"waiting for a resource to be realized")),
cfg.IntOpt('realization_wait_sec',
default=1.0,
help=_("(Optional) Number of seconds to wait between attempts "
"for a resource to be realized")),
cfg.BoolOpt('firewall_match_internal_addr',
default=True,
help=_("If True, edge firewall rules will match internal "
"addresses. Else they will match the external "
"addresses")),
]
DEFAULT_STATUS_CHECK_INTERVAL = 2000
DEFAULT_MINIMUM_POOLED_EDGES = 1
DEFAULT_MAXIMUM_POOLED_EDGES = 3
DEFAULT_MAXIMUM_TUNNELS_PER_VNIC = 20
nsxv_opts = [
cfg.StrOpt('user',
default='admin',
help=_('User name for NSXv manager')),
cfg.StrOpt('password',
default='default',
secret=True,
help=_('Password for NSXv manager')),
cfg.StrOpt('manager_uri',
help=_('URL for NSXv manager')),
cfg.StrOpt('ca_file',
help=_('Specify a CA bundle file to use in verifying the NSXv '
'server certificate.')),
cfg.BoolOpt('insecure',
default=True,
help=_('If true, the NSXv server certificate is not verified. '
'If false, then the default CA truststore is used for '
'verification. This option is ignored if "ca_file" is '
'set.')),
cfg.ListOpt('cluster_moid',
default=[],
help=_('(Required) Parameter listing the IDs of the clusters '
'which are used by OpenStack.')),
cfg.StrOpt('datacenter_moid',
help=_('Required parameter identifying the ID of datacenter '
'to deploy NSX Edges')),
cfg.StrOpt('deployment_container_id',
help=_('Optional parameter identifying the ID of datastore to '
'deploy NSX Edges')),
cfg.StrOpt('resource_pool_id',
help=_('Optional parameter identifying the ID of resource to '
'deploy NSX Edges')),
cfg.ListOpt('availability_zones',
default=[],
help=_('Optional parameter defining the availability zones '
'names for deploying NSX Edges. The configuration of '
'each zone will be under a group names [az:<name>]')),
cfg.StrOpt('datastore_id',
help=_('Optional parameter identifying the ID of datastore to '
'deploy NSX Edges')),
cfg.StrOpt('ha_datastore_id',
help=_('Optional parameter identifying the ID of datastore to '
'deploy NSX Edges in addition to data_store_id in case'
'edge_ha is True')),
cfg.BoolOpt('ha_placement_random',
default=False,
help=_('When True and in case edge_ha is True, half of the '
'edges will be placed in the primary datastore as '
'active and the other half will be placed in the '
'ha_datastore')),
cfg.ListOpt('edge_host_groups',
default=[],
help=_('(Optional) If edge HA is used then this will ensure '
'that active/backup edges are placed in the listed '
'host groups. At least 2 predefined host groups need '
'to be configured.')),
cfg.StrOpt('external_network',
help=_('(Required) Network ID for physical network '
'connectivity')),
cfg.IntOpt('task_status_check_interval',
default=DEFAULT_STATUS_CHECK_INTERVAL,
help=_("(Optional) Asynchronous task status check interval. "
"Default is 2000 (millisecond)")),
cfg.StrOpt('vdn_scope_id',
help=_('(Optional) Network scope ID for VXLAN virtual wires')),
cfg.StrOpt('dvs_id',
help=_('(Optional) DVS MoRef ID for DVS connected to '
'Management / Edge cluster')),
cfg.IntOpt('maximum_tunnels_per_vnic',
default=DEFAULT_MAXIMUM_TUNNELS_PER_VNIC,
min=1, max=110,
help=_('(Optional) Maximum number of sub interfaces supported '
'per vnic in edge.')),
cfg.ListOpt('backup_edge_pool',
default=['service:compact:4:10',
'vdr:compact:4:10'],
help=_("Defines edge pool's management range with the format: "
"<edge_type>:[edge_size]:<min_edges>:<max_edges>."
"edge_type: service,vdr. "
"edge_size: compact, large, xlarge, quadlarge "
"and default is compact. By default, edge pool manager "
"would manage service edge with compact size "
"and distributed edge with compact size as following: "
"service:compact:4:10,vdr:compact:"
"4:10")),
cfg.IntOpt('retries',
default=20,
help=_('Maximum number of API retries on endpoint.')),
cfg.StrOpt('mgt_net_moid',
help=_('(Optional) Portgroup MoRef ID for metadata proxy '
'management network')),
cfg.ListOpt('mgt_net_proxy_ips',
default=[],
help=_('(Optional) Comma separated list of management network '
'IP addresses for metadata proxy.')),
cfg.StrOpt('mgt_net_proxy_netmask',
help=_("(Optional) Management network netmask for metadata "
"proxy.")),
cfg.StrOpt('mgt_net_default_gateway',
help=_("(Optional) Management network default gateway for "
"metadata proxy.")),
cfg.ListOpt('nova_metadata_ips',
default=[],
help=_("(Optional) IP addresses used by Nova metadata "
"service.")),
cfg.PortOpt('nova_metadata_port',
default=8775,
help=_("(Optional) TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_shared_secret',
secret=True,
help=_("(Optional) Shared secret to sign metadata requests.")),
cfg.BoolOpt('metadata_insecure',
default=True,
help=_("(Optional) If True, the end to end connection for "
"metadata service is not verified. If False, the "
"default CA truststore is used for verification.")),
cfg.StrOpt('metadata_nova_client_cert',
help=_('(Optional) Client certificate to use when metadata '
'connection is to be verified. If not provided, '
'a self signed certificate will be used.')),
cfg.StrOpt('metadata_nova_client_priv_key',
help=_("(Optional) Private key of client certificate.")),
cfg.BoolOpt('spoofguard_enabled',
default=True,
help=_("(Optional) If True then plugin will use NSXV "
"spoofguard component for port-security feature.")),
cfg.BoolOpt('use_exclude_list',
default=True,
help=_("(Optional) If True then plugin will use NSXV exclude "
"list component when port security is disabled and "
"spoofguard is enabled.")),
cfg.ListOpt('tenant_router_types',
default=['shared', 'distributed', 'exclusive'],
help=_("Ordered list of router_types to allocate as tenant "
"routers. It limits the router types that the Nsxv "
"can support for tenants:\ndistributed: router is "
"supported by distributed edge at the backend.\n"
"shared: multiple routers share the same service "
"edge at the backend.\nexclusive: router exclusively "
"occupies one service edge at the backend.\nNsxv would "
"select the first available router type from "
"tenant_router_types list if router-type is not "
"specified. If the tenant defines the router type with "
"'--distributed','--router_type exclusive' or "
"'--router_type shared', Nsxv would verify that the "
"router type is in tenant_router_types. Admin supports "
"all these three router types.")),
cfg.StrOpt('edge_appliance_user',
secret=True,
help=_("(Optional) Username to configure for Edge appliance "
"login.")),
cfg.StrOpt('edge_appliance_password',
secret=True,
help=_("(Optional) Password to configure for Edge appliance "
"login.")),
cfg.IntOpt('dhcp_lease_time',
default=86400,
help=_("(Optional) DHCP default lease time.")),
cfg.BoolOpt('metadata_initializer',
default=True,
help=_("If True, the server instance will attempt to "
"initialize the metadata infrastructure")),
cfg.ListOpt('metadata_service_allowed_ports',
item_type=types.Port(),
default=[],
help=_('List of tcp ports, to be allowed access to the '
'metadata proxy, in addition to the default '
'80,443,8775 tcp ports')),
cfg.BoolOpt('edge_ha',
default=False,
help=_("(Optional) Enable HA for NSX Edges.")),
cfg.StrOpt('exclusive_router_appliance_size',
default="compact",
choices=routersize.VALID_EDGE_SIZES,
help=_("(Optional) Edge appliance size to be used for creating "
"exclusive router. Valid values: "
"['compact', 'large', 'xlarge', 'quadlarge']. This "
"exclusive_router_appliance_size will be picked up if "
"--router-size parameter is not specified while doing "
"neutron router-create")),
cfg.StrOpt('shared_router_appliance_size',
default="compact",
choices=routersize.VALID_EDGE_SIZES,
help=_("(Optional) Edge appliance size to be used for creating "
"shared router edge. Valid values: "
"['compact', 'large', 'xlarge', 'quadlarge'].")),
cfg.StrOpt('dns_search_domain',
help=_("(Optional) Use this search domain if there is no "
"search domain configured on the subnet.")),
cfg.ListOpt('nameservers',
default=[],
help=_('List of nameservers to configure for the DHCP binding '
'entries. These will be used if there are no '
'nameservers defined on the subnet.')),
cfg.BoolOpt('use_dvs_features',
default=False,
help=_('If True, dvs features will be supported which '
'involves configuring the dvs backing nsx_v directly. '
'If False, only features exposed via nsx_v will be '
'supported')),
cfg.BoolOpt('log_security_groups_blocked_traffic',
default=False,
help=_("(Optional) Indicates whether distributed-firewall "
"rule for security-groups blocked traffic is logged.")),
cfg.BoolOpt('log_security_groups_allowed_traffic',
default=False,
help=_("(Optional) Indicates whether distributed-firewall "
"security-groups allowed traffic is logged.")),
cfg.StrOpt('service_insertion_profile_id',
help=_("(Optional) The profile id of the redirect firewall "
"rules that will be used for the Service Insertion "
"feature.")),
cfg.BoolOpt('service_insertion_redirect_all', default=False,
help=_("(Optional) If set to True, the plugin will create "
"a redirect rule to send all the traffic to the "
"security partner")),
cfg.BoolOpt('use_nsx_policies', default=False,
help=_("If set to True, the plugin will use NSX policies "
"in the neutron security groups.")),
cfg.StrOpt('default_policy_id',
help=_("(Optional) If use_nsx_policies is True, this policy "
"will be used as the default policy for new tenants.")),
cfg.BoolOpt('allow_tenant_rules_with_policy', default=False,
help=_("(Optional) If use_nsx_policies is True, this value "
"will determine if a tenants can add rules to their "
"security groups.")),
cfg.StrOpt('vdr_transit_network', default=DEFAULT_VDR_TRANSIT_NETWORK,
help=_("(Optional) Sets the network address for distributed "
"router TLR-PLR connectivity, with "
"<network IP>/<prefix> syntax")),
cfg.BoolOpt('bind_floatingip_to_all_interfaces', default=False,
help=_("If set to False, router will associate floating ip "
"with external interface of only, thus denying "
"connectivity between hosts on same network via "
"their floating ips. If True, floating ip will "
"be associated with all router interfaces.")),
cfg.BoolOpt('exclusive_dhcp_edge',
default=False,
help=_("(Optional) Have exclusive DHCP edge per network.")),
cfg.IntOpt('bgp_neighbour_hold_down_timer',
default=4,
help=_("(Optional) Set the interval (Seconds) for BGP "
"neighbour hold down time.")),
cfg.IntOpt('bgp_neighbour_keep_alive_timer',
default=1,
help=_("(Optional) Set the interval (Seconds) for BGP "
"neighbour keep alive time.")),
cfg.IntOpt('ecmp_wait_time',
default=2,
help=_("(Optional) Set the wait time (Seconds) between "
"enablement of ECMP.")),
cfg.ListOpt('network_vlan_ranges',
default=[],
help=_("List of <DVS MoRef ID>:<vlan_min>:<vlan_max> "
"specifying DVS MoRef ID usable for VLAN provider "
"networks, as well as ranges of VLAN tags on each "
"available for allocation to networks.")),
cfg.IntOpt('nsx_transaction_timeout',
default=240,
help=_("Timeout interval for NSX backend transactions.")),
cfg.BoolOpt('share_edges_between_tenants',
default=True,
help=_("If False, different tenants will not use the same "
"DHCP edge or router edge.")),
cfg.ListOpt('housekeeping_jobs',
default=['error_dhcp_edge', 'error_backup_edge'],
help=_("List of the enabled housekeeping jobs")),
cfg.ListOpt('housekeeping_readonly_jobs',
default=[],
help=_("List of housekeeping jobs which are enabled in read "
"only mode")),
cfg.BoolOpt('housekeeping_readonly',
default=True,
help=_("Housekeeping will only warn about breakage.")),
cfg.BoolOpt('use_default_block_all',
default=False,
help=_("Use default block all rule when no security groups "
"are set on a port and port security is enabled")),
cfg.BoolOpt('use_routers_as_lbaas_platform',
default=False,
help=_("Use subnet's exclusive router as a platform for "
"LBaaS")),
cfg.BoolOpt('allow_multiple_ip_addresses',
default=False,
help=_("Allow associating multiple IPs to VMs "
"without spoofguard limitations")),
cfg.StrOpt('nsx_sg_name_format',
default='%(name)s (%(id)s)',
help=_("(Optional) Format for the NSX name of an openstack "
"security group")),
cfg.BoolOpt('init_validation',
default=True,
help=_("Set to False to skip plugin init validation")),
cfg.BoolOpt('loadbalancer_pool_transparency',
default=False,
help=_("Create LBaaS pools with transparent mode on. Use with "
"use_routers_as_lbaas_platform enabled")),
cfg.ListOpt('default_edge_size',
default=[],
help=_("(Optional) Defines the default edge size for router, "
"dhcp and loadbalancer edges with the format: "
"<purpose>:<edge_size>. "
"purpose: router, dhcp, lb. "
"edge_size: compact, large, xlarge, quadlarge")),
]
# define the configuration of each NSX-V availability zone.
# the list of expected zones is under nsxv group: availability_zones
# Note: if any of the optional arguments is missing - the global one will be
# used instead.
nsxv_az_opts = [
cfg.StrOpt('resource_pool_id',
help=_('Identifying the ID of resource to deploy NSX Edges')),
cfg.StrOpt('datastore_id',
help=_('Identifying the ID of datastore to deploy NSX Edges')),
cfg.BoolOpt('edge_ha',
default=False,
help=_("(Optional) Enable HA for NSX Edges.")),
cfg.StrOpt('ha_datastore_id',
help=_('Optional parameter identifying the ID of datastore to '
'deploy NSX Edges in addition to data_store_id in case'
'edge_ha is True')),
cfg.BoolOpt('ha_placement_random',
help=_('When True and in case edge_ha is True, half of the '
'edges will be placed in the primary datastore as '
'active and the other half will be placed in the '
'ha_datastore. If this value is not set, the global '
'one will be used')),
cfg.ListOpt('edge_host_groups',
default=[],
help=_('(Optional) If edge HA is used then this will ensure '
'that active/backup edges are placed in the listed '
'host groups. At least 2 predefined host groups need '
'to be configured.')),
cfg.StrOpt('datacenter_moid',
help=_('(Optional) Identifying the ID of datacenter to deploy '
'NSX Edges')),
cfg.ListOpt('backup_edge_pool',
help=_("(Optional) Defines edge pool's management range for "
"the availability zone. If not defined, the global one "
"will be used")),
cfg.StrOpt('mgt_net_moid',
help=_('(Optional) Portgroup MoRef ID for metadata proxy '
'management network')),
cfg.ListOpt('mgt_net_proxy_ips',
default=[],
help=_('(Optional) Comma separated list of management network '
'IP addresses for metadata proxy.')),
cfg.StrOpt('mgt_net_proxy_netmask',
help=_("(Optional) Management network netmask for metadata "
"proxy.")),
cfg.StrOpt('mgt_net_default_gateway',
help=_("(Optional) Management network default gateway for "
"metadata proxy.")),
cfg.StrOpt('external_network',
help=_('(Optional) Network ID for physical network '
'connectivity')),
cfg.StrOpt('vdn_scope_id',
help=_('(Optional) Network scope ID for VXLAN virtual wires')),
cfg.StrOpt('dvs_id',
help=_('(Optional) DVS MoRef ID for DVS connected to '
'Management / Edge cluster')),
cfg.BoolOpt('exclusive_dhcp_edge',
default=False,
help=_("(Optional) Have exclusive DHCP edge per network.")),
cfg.BoolOpt('bind_floatingip_to_all_interfaces', default=False,
help=_("If set to False, router will associate floating ip "
"with external interface of only, thus denying "
"connectivity between hosts on same network via "
"their floating ips. If True, floating ip will "
"be associated with all router interfaces.")),
]
# define the configuration of each NSX-V3 availability zone.
# the list of expected zones is under nsx_v3 group: availability_zones
# Note: if any of the optional arguments is missing - the global one will be
# used instead.
nsx_v3_and_p_az_opts = [
cfg.StrOpt('metadata_proxy',
help=_("The name or UUID of the NSX Metadata Proxy "
"that will be used to enable native metadata service. "
"It needs to be created in NSX before starting Neutron "
"with the NSX plugin.")),
cfg.StrOpt('dhcp_profile',
help=_("The name or UUID of the NSX DHCP Profile "
"that will be used to enable native DHCP service. It "
"needs to be created in NSX before starting Neutron "
"with the NSX plugin")),
cfg.StrOpt('native_metadata_route',
help=_("(Optional) The metadata route used for native metadata "
"proxy service.")),
cfg.StrOpt('dns_domain',
help=_("(Optional) Domain to use for building the hostnames.")),
cfg.ListOpt('nameservers',
help=_("(Optional) List of nameservers to configure for the "
"DHCP binding entries. These will be used if there are "
"no nameservers defined on the subnet.")),
cfg.StrOpt('default_overlay_tz',
help=_("(Optional) This is the name or UUID of the default NSX "
"overlay transport zone that will be used for creating "
"tunneled isolated Neutron networks. It needs to be "
"created in NSX before starting Neutron with the NSX "
"plugin.")),
cfg.StrOpt('default_vlan_tz',
help=_("(Optional) Only required when creating VLAN or flat "
"provider networks. Name or UUID of default NSX VLAN "
"transport zone that will be used for bridging between "
"Neutron networks, if no physical network has been "
"specified")),
cfg.StrOpt('default_tier0_router',
help=_("Name or UUID of the default tier0 router that will be "
"used for connecting to tier1 logical routers and "
"configuring external networks")),
cfg.StrOpt('edge_cluster',
help=_("(Optional) Specifying an edge cluster for Tier1 "
"routers to connect other that the one connected to"
" the Tier0 router")),
]
nsxv3_az_opts = nsx_v3_and_p_az_opts + [
cfg.ListOpt('switching_profiles',
help=_("(Optional) list switching profiles uuids that will be "
"attached to all neutron created nsx ports.")),
cfg.StrOpt('dhcp_relay_service',
help=_("(Optional) This is the name or UUID of the NSX dhcp "
"relay service that will be used to enable DHCP relay "
"on router ports.")),
]
nsxp_az_opts = nsx_v3_and_p_az_opts
nsx_tvd_opts = [
cfg.ListOpt('nsx_v_extension_drivers',
default=[],
help=_("An ordered list of NSX-V extension driver "
"entrypoints to be loaded from the "
"vmware_nsx.extension_drivers namespace.")),
cfg.ListOpt('nsx_v3_extension_drivers',
default=[],
help=_("An ordered list of NSX-T extension driver "
"entrypoints to be loaded from the "
"vmware_nsx.extension_drivers namespace.")),
cfg.ListOpt('dvs_extension_drivers',
default=[],
help=_("An ordered list of DVS extension driver "
"entrypoints to be loaded from the "
"vmware_nsx.extension_drivers namespace.")),
cfg.StrOpt('default_plugin',
default=projectpluginmap.NsxPlugins.NSX_T,
choices=projectpluginmap.VALID_TYPES,
help=_("The default plugin that will be used for new projects "
"that were not added to the projects plugin mapping.")),
cfg.ListOpt('enabled_plugins',
default=[projectpluginmap.NsxPlugins.NSX_T,
projectpluginmap.NsxPlugins.NSX_V,
projectpluginmap.NsxPlugins.DVS],
help=_("The list of plugins that the TVD core plugin will "
"load")),
cfg.ListOpt('nsx_v_default_availability_zones',
default=[],
help=_("The default availability zones that will be used for "
"NSX-V networks and routers creation under the TVD "
"plugin.")),
cfg.ListOpt('nsx_v3_default_availability_zones',
default=[],
help=_("The default availability zones that will be used for "
"NSX-V3 networks and routers creation under the TVD "
"plugin.")),
cfg.IntOpt('init_retries',
default=3,
help=_('Maximum number of times a particular plugin '
'initialization should be retried')),
]
# Register the configuration options
cfg.CONF.register_opts(connection_opts)
cfg.CONF.register_opts(cluster_opts)
cfg.CONF.register_opts(nsx_common_opts)
cfg.CONF.register_opts(nsx_p_opts, group="nsx_p")
cfg.CONF.register_opts(nsx_v3_opts, group="nsx_v3")
cfg.CONF.register_opts(nsxv_opts, group="nsxv")
cfg.CONF.register_opts(nsx_tvd_opts, group="nsx_tvd")
cfg.CONF.register_opts(base_opts, group="NSX")
# register l3_ha config opts. This is due to commit
# a7c633dc8e8a67e65e558ecbdf9ea8efc5468251
cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
def _register_nsx_azs(conf, availability_zones, az_opts):
# first verify that the availability zones are in the format of a
# list of names. The old format was a list of values for each az,
# separated with ':'
if not availability_zones or len(availability_zones[0].split(':')) > 1:
return
for az in availability_zones:
az_group = 'az:%s' % az
conf.register_group(cfg.OptGroup(
name=az_group,
title="Configuration for availability zone %s" % az))
conf.register_opts(az_opts, group=az_group)
# register a group for each nsxv/v3 availability zones
def register_nsxv_azs(conf, availability_zones):
_register_nsx_azs(conf, availability_zones, nsxv_az_opts)
def register_nsxv3_azs(conf, availability_zones):
_register_nsx_azs(conf, availability_zones, nsxv3_az_opts)
def register_nsxp_azs(conf, availability_zones):
_register_nsx_azs(conf, availability_zones, nsxp_az_opts)
register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones)
register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones)
register_nsxp_azs(cfg.CONF, cfg.CONF.nsx_p.availability_zones)
def _get_nsx_az_opts(az, opts):
az_info = {}
group = 'az:%s' % az
if group not in cfg.CONF:
raise nsx_exc.NsxInvalidConfiguration(
opt_name=group,
opt_value='None',
reason=(_("Configuration group \'%s\' must be defined") % group))
for opt in opts:
az_info[opt.name] = cfg.CONF[group][opt.name]
return az_info
def get_nsxv_az_opts(az):
return _get_nsx_az_opts(az, nsxv_az_opts)
def get_nsxv3_az_opts(az):
return _get_nsx_az_opts(az, nsxv3_az_opts)
def get_nsxp_az_opts(az):
return _get_nsx_az_opts(az, nsxp_az_opts)
def validate_nsxv_config_options():
if (cfg.CONF.nsxv.manager_uri is None or
cfg.CONF.nsxv.user is None or
cfg.CONF.nsxv.password is None):
error = _("manager_uri, user, and password must be configured!")
raise nsx_exc.NsxPluginException(err_msg=error)
if cfg.CONF.nsxv.dvs_id is None:
LOG.warning("dvs_id must be configured to support VLANs!")
if cfg.CONF.nsxv.vdn_scope_id is None:
LOG.warning("vdn_scope_id must be configured to support VXLANs!")
if cfg.CONF.nsxv.use_dvs_features and not dvs_utils.dvs_is_enabled(
dvs_id=cfg.CONF.nsxv.dvs_id):
error = _("dvs host/vcenter credentials must be defined to use "
"dvs features")
raise nsx_exc.NsxPluginException(err_msg=error)
for purpose_def in cfg.CONF.nsxv.default_edge_size:
(p, s) = purpose_def.split(':')
if p not in ['lb', 'router', 'dhcp']:
error = _('Invalid service edge purpose %s') % p
raise nsx_exc.NsxPluginException(err_msg=error)
if s not in nsxv_constants.VALID_EDGE_SIZE:
error = _('Invalid service edge size %s') % s
raise nsx_exc.NsxPluginException(err_msg=error)
def validate_nsx_config_options():
if cfg.CONF.nsx_extension_drivers:
error = _("nsx_extension_drivers should not be configured!")
raise nsx_exc.NsxPluginException(err_msg=error)
| 51.792196 | 79 | 0.566307 | 6,424 | 57,075 | 4.882316 | 0.125311 | 0.032521 | 0.012753 | 0.010713 | 0.523403 | 0.459476 | 0.418824 | 0.391022 | 0.368958 | 0.341155 | 0 | 0.007647 | 0.349312 | 57,075 | 1,101 | 80 | 51.839237 | 0.836879 | 0.024985 | 0 | 0.367387 | 0 | 0.000982 | 0.502355 | 0.041947 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009823 | false | 0.014735 | 0.009823 | 0.002947 | 0.034381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9368cd3c71110a578984ff436503f5560c25fa4f | 3,933 | py | Python | ico/kyc.py | miohtama/Smart-Contracts | 8892e85d1c75994871a0fa14eb8c03016db39d88 | [
"Apache-2.0"
] | 1,148 | 2017-03-28T08:41:32.000Z | 2019-01-26T13:39:39.000Z | ico/kyc.py | miohtama/Smart-Contracts | 8892e85d1c75994871a0fa14eb8c03016db39d88 | [
"Apache-2.0"
] | 117 | 2017-03-31T07:31:22.000Z | 2019-01-14T16:14:49.000Z | ico/kyc.py | miohtama/Smart-Contracts | 8892e85d1c75994871a0fa14eb8c03016db39d88 | [
"Apache-2.0"
] | 494 | 2017-03-30T23:11:45.000Z | 2019-01-29T17:41:37.000Z | """AML data passing helpers."""
from binascii import hexlify
from uuid import UUID
from eth_utils import is_checksum_address
def pack_kyc_dataframe(whitelisted_address: str, customer_id: UUID, min_eth_10k: int, max_eth_10k: int) -> bytes:
"""Pack KYC information to the smart contract.
See KYCPayloadDeserializer for the matching Solidity code.
.. note ::
In a long term this will be deprecated in the behalf of the function below.
:param whitelisted_address: Must be whitelisted address in a Ethereum checksummed format
:param customer_id: Customer id as UUIDv8
:param min_eth: Min investment for this customer. Expressed as the parts of 1/10000.
:param max_eth: Max investment for this customer. Expressed as the parts of 1/10000.
:return:
"""
assert is_checksum_address(whitelisted_address)
assert isinstance(customer_id, UUID)
assert type(min_eth_10k) == int
assert type(max_eth_10k) == int
addr_value = int(whitelisted_address, 16)
addr_b = addr_value.to_bytes(20, byteorder="big") # Ethereum address is 20 bytes
customer_b = customer_id.bytes
min_b = min_eth_10k.to_bytes(4, byteorder="big")
max_b = max_eth_10k.to_bytes(4, byteorder="big")
data = addr_b + customer_b + min_b + max_b
assert len(data) == 44, "Got length: {}".format(len(data))
return data
def pack_kyc_pricing_dataframe(whitelisted_address: str, customer_id: UUID, min_eth_10k: int, max_eth_10k: int, pricing_info: int) -> bytes:
"""Pack KYC presale information to the smart contract.
Same as above, but with pricing info included.
See KYCPayloadDeserializer for the matching Solidity code.
:param whitelisted_address: Must be whitelisted address in a Ethereum checksummed format
:param customer_id: Customer id as UUIDv8
:param min_eth: Min investment for this customer. Expressed as the parts of 1/10000.
:param max_eth: Max investment for this customer. Expressed as the parts of 1/10000.
:param pricing_info: Tier identifier or directly one token price in wei.
:return: Raw bytes to send to the contract as a function argument
"""
assert is_checksum_address(whitelisted_address)
assert isinstance(customer_id, UUID)
assert type(min_eth_10k) == int
assert type(max_eth_10k) == int
assert type(pricing_info) == int
addr_value = int(whitelisted_address, 16)
addr_b = addr_value.to_bytes(20, byteorder="big") # Ethereum address is 20 bytes
customer_b = customer_id.bytes
min_b = min_eth_10k.to_bytes(4, byteorder="big")
max_b = max_eth_10k.to_bytes(4, byteorder="big")
pricing_data = pricing_info.to_bytes(32, byteorder="big")
data = addr_b + customer_b + min_b + max_b + pricing_data
assert len(data) == 76, "Got length: {}".format(len(data))
return data
def unpack_kyc_pricing_dataframe(b: bytes) -> dict:
"""Unpack a KYC payloda for diagnostics purposes.
Useful to troubleshoot live transactions. Grab the transaction hex data from Etherscan, starting on [5], make it a single string and use this function to see what parameters where given to the user.
Example::
import binascii
from ico.kyc import unpack_kyc_pricing_dataframe
h = "83dcb...40000000000000000000000000000000000000000000000000000000000000001"
b = binascii.unhexlify(h)
unpack_kyc_pricing_dataframe(b)
"""
assert len(b) == 76, "Got byte array of length: {}".format(len(b))
addr_value = b[0:20]
customer_id = b[20:36]
min_b = b[36:40]
max_b = b[40:44]
pricing_data = b[44:76]
return {
"address": "0x" + hexlify(addr_value).decode("ascii"),
"customer_id": UUID(int=int(hexlify(customer_id), 16)),
"min_payment_eth": int(hexlify(min_b), 16) / 10000.0,
"max_payment_eth": int(hexlify(max_b), 16) / 10000.0,
"pricing_data": int(hexlify(pricing_data), 16),
}
| 40.546392 | 202 | 0.711416 | 581 | 3,933 | 4.619621 | 0.244406 | 0.048435 | 0.026826 | 0.017884 | 0.585693 | 0.540984 | 0.540984 | 0.502981 | 0.4769 | 0.4769 | 0 | 0.057668 | 0.197559 | 3,933 | 96 | 203 | 40.96875 | 0.792776 | 0.426392 | 0 | 0.444444 | 0 | 0 | 0.067733 | 0 | 0 | 0 | 0 | 0 | 0.266667 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
936974027f31a8b9f8b800f37c91ea5285dd409f | 1,227 | py | Python | model_example/3_Feature.py | kn45/ml-flow | d7869a55ef50ddbb28f23572b78b010ae3cec7b9 | [
"MIT"
] | 1 | 2016-08-01T09:26:36.000Z | 2016-08-01T09:26:36.000Z | model_example/3_Feature.py | kn45/ml-flow | d7869a55ef50ddbb28f23572b78b010ae3cec7b9 | [
"MIT"
] | 1 | 2016-10-17T07:00:54.000Z | 2016-10-17T10:00:59.000Z | model_example/3_Feature.py | kn45/ml-flow | d7869a55ef50ddbb28f23572b78b010ae3cec7b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import numpy as np
from mlfutil import *
port_encoder = None
def init():
global port_encoder
port_encoder = PortEncoder()
port_encoder.init()
def build_feat():
infile = sys.argv[1]
outfile = sys.argv[2]
fo = open(outfile, 'w')
data = None
with open(infile) as f:
data = np.array([l.rstrip('\r\n').split('\t') for l in f.readlines()])
data_size = len(data)
for nr, inst in enumerate(data):
feats = []
label = inst[0]
uid = inst[1]
pclass = inst[2] # number
name = inst[3] # string
sex = inst[4] # cat
age = inst[5] # number
sbisp = inst[6] # number
parch = inst[7] # number
ticket = inst[8] # string
fare = inst[9] # number
cabin = inst[10] # string
port = inst[11] # cat
feats += [pclass]
feats += sex_encoder(sex)
feats += [age]
feats += [sbisp]
feats += [parch]
feats += [fare]
feats += port_encoder.encode(port)
print >> fo, '\t'.join(map(str, [label] + feats))
draw_progress(nr, data_size-1)
if __name__ == '__main__':
init()
build_feat()
| 23.150943 | 78 | 0.531377 | 159 | 1,227 | 3.981132 | 0.503145 | 0.086888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020631 | 0.328443 | 1,227 | 52 | 79 | 23.596154 | 0.747573 | 0.06846 | 0 | 0 | 0 | 0 | 0.015018 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.071429 | 0 | 0.119048 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
936978c1dc18be1c7e31b93d94d87e740f03c54a | 764 | py | Python | api/permissions.py | GeRDI-Project/HarvesterControlCenter | ce161a31a6510ae28ffa68b8e0fd43c42060cb07 | [
"Apache-2.0"
] | null | null | null | api/permissions.py | GeRDI-Project/HarvesterControlCenter | ce161a31a6510ae28ffa68b8e0fd43c42060cb07 | [
"Apache-2.0"
] | 9 | 2020-01-07T12:40:26.000Z | 2021-09-22T18:00:38.000Z | api/permissions.py | GeRDI-Project/HarvesterControlCenter | ce161a31a6510ae28ffa68b8e0fd43c42060cb07 | [
"Apache-2.0"
] | null | null | null | """
Permission Module
"""
from rest_framework import permissions
from .models import Harvester
__author__ = "Jan Frömberg"
__copyright__ = "Copyright 2018, GeRDI Project"
__credits__ = ["Jan Frömberg"]
__license__ = "Apache 2.0"
__maintainer__ = "Jan Frömberg"
__email__ = "jan.froemberg@tu-dresden.de"
class IsOwner(permissions.BasePermission):
"""Custom permission class to allow only harvester owners to edit them."""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the harvester owner."""
if isinstance(obj, Harvester):
return obj.owner == request.user
# Write permissions are only allowed to the owner of the harvester.
return obj.owner == request.user
| 29.384615 | 78 | 0.715969 | 93 | 764 | 5.591398 | 0.612903 | 0.063462 | 0.069231 | 0.088462 | 0.130769 | 0.130769 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 0.191099 | 764 | 25 | 79 | 30.56 | 0.831715 | 0.280105 | 0 | 0.153846 | 0 | 0 | 0.191729 | 0.050752 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
936b51f5e479541a9901ff8c24f9c022fbc9749d | 1,435 | py | Python | augment.py | luminide/example-darkmatter | 69c538ac7ec9f2b35d90a7e53503050369c9a4fd | [
"BSD-3-Clause"
] | null | null | null | augment.py | luminide/example-darkmatter | 69c538ac7ec9f2b35d90a7e53503050369c9a4fd | [
"BSD-3-Clause"
] | null | null | null | augment.py | luminide/example-darkmatter | 69c538ac7ec9f2b35d90a7e53503050369c9a4fd | [
"BSD-3-Clause"
] | null | null | null | import albumentations as A
from albumentations.pytorch import ToTensorV2
import cv2
def make_augmenters(conf):
p = conf.aug_prob
crop_size = round(conf.image_size*conf.crop_size)
aug_list = [
A.ShiftScaleRotate(
shift_limit=0.0625, scale_limit=0.2, rotate_limit=25,
interpolation=cv2.INTER_AREA, p=p),
A.RandomCrop(height=crop_size, width=crop_size, always_apply=True),
A.Flip(p=0.5*p),
A.OneOf([
A.MotionBlur(p=0.2*p),
A.MedianBlur(blur_limit=3, p=0.1*p),
A.Blur(blur_limit=3, p=0.1*p),
], p=0.2*p),
A.Perspective(p=0.2*p),
]
if conf.strong_aug:
aug_list.extend([
A.GaussNoise(p=0.2*p),
A.OneOf([
A.OpticalDistortion(p=0.3*p),
A.GridDistortion(p=0.1*p),
A.PiecewiseAffine(p=0.3*p),
], p=0.2*p),
A.OneOf([
A.CLAHE(clip_limit=2, p=0.2*p),
A.Sharpen(p=0.2*p),
A.Emboss(p=0.2*p),
A.RandomBrightnessContrast(p=0.2*p),
], p=0.3*p),
])
aug_list.extend([
A.Normalize(),
ToTensorV2()
])
train_aug = A.Compose(aug_list)
test_aug = A.Compose([
A.CenterCrop(height=crop_size, width=crop_size),
A.Normalize(),
ToTensorV2()
])
return train_aug, test_aug
| 27.596154 | 75 | 0.525436 | 204 | 1,435 | 3.568627 | 0.318627 | 0.043956 | 0.037088 | 0.049451 | 0.186813 | 0.151099 | 0.068681 | 0 | 0 | 0 | 0 | 0.050883 | 0.32892 | 1,435 | 51 | 76 | 28.137255 | 0.705088 | 0 | 0 | 0.311111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.066667 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
936e5c25ad6da90719ceebc9df8fa4a67ac1a358 | 946 | py | Python | setup.py | AjithRamachandran/yamm | 6bb0f878022e39d262ff57d068b6ba2c84484a7c | [
"MIT"
] | null | null | null | setup.py | AjithRamachandran/yamm | 6bb0f878022e39d262ff57d068b6ba2c84484a7c | [
"MIT"
] | null | null | null | setup.py | AjithRamachandran/yamm | 6bb0f878022e39d262ff57d068b6ba2c84484a7c | [
"MIT"
] | null | null | null | VERSION='0.9dev0'
from setuptools import setup, Extension
with open("README.md", "r") as doc:
long_description = doc.read()
doc.close()
yammpy = Extension('yammpy', sources=['yammpy/yammpy.c'], include_dirs=['yammpy/include'])
setup(
name="yammpy",
version=VERSION,
author="Ajith Ramachandran",
author_email="ajithar204@gmail.com",
description="Yet Another Math Module",
long_description=long_description,
long_description_content_type='text/markdown',
url="https://github.com/AjithRamachandran/yamm",
keywords='Mathematics',
license='MIT',
packages=['yammpy'],
tests_require=['unittest'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: Implementation :: CPython",
],
ext_modules=[yammpy],
python_requires='>=3.7',
)
| 28.666667 | 90 | 0.662791 | 101 | 946 | 6.09901 | 0.693069 | 0.097403 | 0.061688 | 0.097403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011688 | 0.186047 | 946 | 32 | 91 | 29.5625 | 0.788312 | 0 | 0 | 0 | 0 | 0 | 0.393235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.035714 | 0 | 0.035714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
936fd0cbca4a4d6d5d56baf7e7d2399c7e26fb3a | 3,194 | py | Python | main.py | shellrazer/Project_1_kaikeba | 77dbadfdd8d57b8b588d7895ba645fb676132f8a | [
"MIT"
] | 8 | 2019-11-10T02:52:04.000Z | 2020-06-03T02:53:28.000Z | main.py | shellrazer/test | 77dbadfdd8d57b8b588d7895ba645fb676132f8a | [
"MIT"
] | null | null | null | main.py | shellrazer/test | 77dbadfdd8d57b8b588d7895ba645fb676132f8a | [
"MIT"
] | 4 | 2019-12-26T07:41:28.000Z | 2020-06-03T02:53:37.000Z | import os
import argparse
from data_loader import pip_data
from train import train_test_interface
from predict import predict
def main():
parser = argparse.ArgumentParser()
# parameters defined for in pip_data
parser.add_argument("--mode", help="pip_data, train, test or predict", default="predict", type=str)
parser.add_argument("--data_dir", help="Data Folder", default="./data", type=str)
parser.add_argument("--max_df", help="tfidf term: max frequency to keep in vocab", default=0.75, type=float)
parser.add_argument("--min_df", help="tfidf term: min counts to keep in vocab", default=2, type=int)
parser.add_argument("--min_tfidf", help="tfidf term: min tfidf to keep in vocab", default=0.1, type=float)
parser.add_argument("--embedding_size", default=256, help="Words embeddings dimension", type=int)
# parameters defined for train and test model
parser.add_argument("--max_lens", default=[98,100,34,103],
help="a list of max lens for merged_train_test,train_X,train_y,test_X", nargs='+', type=int)
parser.add_argument("--batch_sz", default=128, help="batch size", type=int)
parser.add_argument("--test_percent", default=0.05, help="proportion of test samples", type=float)
# encoder is bidirectional gru_unit/2 for one direction
parser.add_argument("--gru_units", default=512, help="Encode and decode GRU cell units number", type=int)
parser.add_argument("--att_units", default=64, help="attention weights", type=int)
parser.add_argument("--learning_rate", default=0.001, help="Learning rate", type=float)
parser.add_argument("--clipvalue", default=2.0, help="gradient clip value", type=float)
parser.add_argument("--checkpoint_dir", help="Checkpoint directory", default='./training_checkpoints', type=str)
parser.add_argument("--save_chkp_epoch", help="Checkpoint save every # epoch", default=5, type=int)
parser.add_argument("--use_checkpoint", help="for train and test, restore from checkpoint?", default=True, type=bool)
parser.add_argument("--train_epoch", help="train epoch", default=15, type=int)
parser.add_argument("--cov_loss_wt", help="coverage loss weight", default=0.5, type=float)
# parameters defined for predict
parser.add_argument("--max_len_y", default=40, help="max words of the predicted abstract", type=int)
parser.add_argument("--min_len_y", default=5, help="min words of the predicted abstract", type=int)
parser.add_argument("--beam_size", default=3, help="beam size for beam search", type=int)
parser.add_argument("--prediction_path", help="Path to save prediction results", default="./prediction.txt", type=str)
args = parser.parse_args()
params = vars(args)
print(params)
assert params["mode"] in ["pip_data","train", "test", "predict"], "The mode must be pip_data, train, test or predict"
assert os.path.exists(params["data_dir"]), "data_dir doesn't exist"
if params["mode"] == "pip_data":
pip_data(params)
elif params["mode"] in ['train','test']:
train_test_interface(params)
elif params["mode"] == "predict":
predict(params)
if __name__ == "__main__":
main() | 60.264151 | 122 | 0.708203 | 463 | 3,194 | 4.721382 | 0.306695 | 0.090576 | 0.171089 | 0.073193 | 0.268984 | 0.10247 | 0.046661 | 0.046661 | 0.046661 | 0.046661 | 0 | 0.016837 | 0.144646 | 3,194 | 53 | 123 | 60.264151 | 0.783309 | 0.051033 | 0 | 0 | 0 | 0 | 0.361414 | 0.020482 | 0 | 0 | 0 | 0 | 0.046512 | 1 | 0.023256 | false | 0 | 0.116279 | 0 | 0.139535 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
936fe72d3a34941c53f0ba058f48f1a592adec38 | 2,226 | py | Python | amplify/agent/collectors/plus/meta.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 308 | 2015-11-17T13:15:33.000Z | 2022-03-24T12:03:40.000Z | amplify/agent/collectors/plus/meta.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 211 | 2015-11-16T15:27:41.000Z | 2022-03-28T16:20:15.000Z | amplify/agent/collectors/plus/meta.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 80 | 2015-11-16T18:20:30.000Z | 2022-03-02T12:47:56.000Z | # -*- coding: utf-8 -*-
from amplify.agent.common.context import context
from amplify.agent.collectors.abstract import AbstractMetaCollector
from amplify.agent.objects.plus.api import TYPE_MAP
__author__ = "Grant Hulegaard"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Grant Hulegaard"
__email__ = "grant.hulegaard@nginx.com"
class PlusStatusObjectMetaCollector(AbstractMetaCollector):
short_name = 'status_meta'
def __init__(self, **kwargs):
super(PlusStatusObjectMetaCollector, self).__init__(**kwargs)
self.register(
self.version
)
@property
def default_meta(self):
zone = self.object.type if self.object.type != 'server_zone' else 'status_zone'
meta = {
'type': self.object.type_template % zone,
'local_name': self.object.local_name,
'local_id': self.object.local_id,
'root_uuid': context.uuid,
'hostname': context.app_config['credentials']['imagename'] or context.hostname,
'version': None
}
return meta
def version(self):
parent = context.objects.find_parent(obj=self.object)
self.meta['version'] = parent.version if parent else None
class PlusApiObjectMetaCollector(AbstractMetaCollector):
short_name = 'api_meta'
def __init__(self, **kwargs):
super(PlusApiObjectMetaCollector, self).__init__(**kwargs)
self.register(
self.version
)
@property
def default_meta(self):
mapped_type = TYPE_MAP.get(self.object.type, self.object.type)
zone = mapped_type if mapped_type != 'server_zone' else 'status_zone'
meta = {
'type': self.object.type_template % zone,
'local_name': self.object.local_name,
'local_id': self.object.local_id,
'root_uuid': context.uuid,
'hostname': context.app_config['credentials']['imagename'] or context.hostname,
'version': None
}
return meta
def version(self):
parent = context.objects.find_parent(obj=self.object)
self.meta['version'] = parent.version if parent else None
| 32.735294 | 91 | 0.648697 | 243 | 2,226 | 5.666667 | 0.288066 | 0.087146 | 0.061002 | 0.039216 | 0.588235 | 0.588235 | 0.550472 | 0.550472 | 0.550472 | 0.550472 | 0 | 0.000591 | 0.240341 | 2,226 | 67 | 92 | 33.223881 | 0.81372 | 0.009434 | 0 | 0.603774 | 0 | 0 | 0.140781 | 0.011353 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.056604 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9370fd3d005a703571a18f0178253fd113e0b6e4 | 3,679 | py | Python | tests/boxes/annotations/test_vatic.py | WildbookOrg/wbia-deprecate-tpl-brambox | 9aa6a69f706d0653a65520c696a7cd66715b6a37 | [
"MIT"
] | 2 | 2019-03-23T03:14:11.000Z | 2019-11-21T07:16:13.000Z | tests/boxes/annotations/test_vatic.py | WildbookOrg/wbia-deprecate-tpl-brambox | 9aa6a69f706d0653a65520c696a7cd66715b6a37 | [
"MIT"
] | null | null | null | tests/boxes/annotations/test_vatic.py | WildbookOrg/wbia-deprecate-tpl-brambox | 9aa6a69f706d0653a65520c696a7cd66715b6a37 | [
"MIT"
] | 1 | 2021-12-01T03:04:53.000Z | 2021-12-01T03:04:53.000Z | # -*- coding: utf-8 -*-
import unittest
from brambox.boxes.annotations.annotation import Annotation
from brambox.boxes.annotations import VaticAnnotation, VaticParser
vatic_string = """-1 0 0 0 0 0 0 0 0 ?
-1 0 0 0 0 0 0 0 0 ?
-1 0 0 0 0 0 0 0 0 person
-1 0 0 0 0 1 0 0 0 person"""
class TestVaticAnnotation(unittest.TestCase):
def setUp(self):
self.anno = VaticAnnotation()
def tearDown(self):
pass
def test_serialize(self):
""" test if major fields: label, x, y, w, h, object_id, frame_nr are serialized """
frame_nr = 100
self.anno.class_label = 'person'
self.anno.object_id = 3
self.anno.x_top_left = 13
self.anno.y_top_left = 14
self.anno.width = 15
self.anno.height = 16
string = self.anno.serialize(frame_nr)
self.assertEqual(string, '3 13 14 28 30 100 0 0 0 person')
def test_serialize_round(self):
""" test if serialize rounds the x,y,w,h values correctly """
self.anno.x_top_left = 12.8
self.anno.y_top_left = 14.4
self.anno.width = 14.56
self.anno.height = 16.1
string = self.anno.serialize()
self.assertEqual(string, '-1 13 14 27 30 0 0 0 0 ?')
def test_serialize_occluded(self):
""" test if occluded flag is serialized """
self.anno.occluded = 1
string = self.anno.serialize()
self.assertEqual(string, '-1 0 0 0 0 0 0 1 0 ?')
def test_serialize_lost(self):
""" test if lost flag is serialized """
self.anno.lost = 1
string = self.anno.serialize()
self.assertEqual(string, '-1 0 0 0 0 0 1 0 0 ?')
def test_deserialize(self):
""" test if major fields: label, x, y, w, h, object_id, frame_nr are processed """
string = '3 13 14 28 30 100 0 0 0 person'
self.anno.deserialize(string)
self.assertEqual(self.anno.object_id, 3)
self.assertAlmostEqual(self.anno.x_top_left, 13)
self.assertAlmostEqual(self.anno.y_top_left, 14)
self.assertAlmostEqual(self.anno.width, 15)
self.assertAlmostEqual(self.anno.height, 16)
self.assertFalse(self.anno.lost)
self.assertFalse(self.anno.occluded)
self.assertEqual(self.anno.class_label, 'person')
def test_deserialize_occluded(self):
""" test if occluded flag is processed """
string = '-1 0 0 0 0 0 0 1 0 ?'
self.anno.deserialize(string)
self.assertTrue(self.anno.occluded)
def test_deserialize_lost(self):
""" test if lost flag is processed """
string = '-1 0 0 0 0 0 1 0 0 ?'
self.anno.deserialize(string)
self.assertTrue(self.anno.lost)
class TestVaticParser(unittest.TestCase):
def setUp(self):
self.parser = VaticParser()
def tearDown(self):
pass
def test_serialize(self):
""" test if basic serialize works """
testanno1 = Annotation()
testanno2 = Annotation()
testanno2.class_label = 'person'
obj = {}
obj['0'] = [testanno1, testanno1, testanno2]
obj['1'] = [testanno2]
string = self.parser.serialize(obj)
self.assertEqual(string, vatic_string)
def test_deserialize(self):
""" test if basic deserialize works """
obj = self.parser.deserialize(vatic_string)
self.assertEqual(type(obj), dict)
self.assertEqual(len(obj), 2)
self.assertEqual(len(obj['0']), 3)
self.assertEqual(len(obj['1']), 1)
self.assertEqual(obj['0'][0].class_label, '')
self.assertEqual(obj['1'][0].class_label, 'person')
if __name__ == '__main__':
unittest.main()
| 32.848214 | 91 | 0.615928 | 515 | 3,679 | 4.302913 | 0.16699 | 0.048736 | 0.052798 | 0.048736 | 0.493231 | 0.419224 | 0.344314 | 0.261282 | 0.254964 | 0.191787 | 0 | 0.062847 | 0.264746 | 3,679 | 111 | 92 | 33.144144 | 0.756377 | 0.117423 | 0 | 0.2 | 0 | 0 | 0.092419 | 0 | 0 | 0 | 0 | 0 | 0.2625 | 1 | 0.1625 | false | 0.025 | 0.0375 | 0 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93713b953d367685ba52811607ebf1403fcfbaeb | 2,307 | py | Python | tests/test_supplemental.py | silnrsi/langtags | d65e9640031915f46d4bee38032fde82175bcfcf | [
"MIT"
] | 7 | 2019-01-19T04:17:58.000Z | 2021-08-05T14:56:18.000Z | tests/test_supplemental.py | silnrsi/langtags | d65e9640031915f46d4bee38032fde82175bcfcf | [
"MIT"
] | 8 | 2018-10-29T20:56:28.000Z | 2022-03-25T23:51:14.000Z | tests/test_supplemental.py | silnrsi/langtags | d65e9640031915f46d4bee38032fde82175bcfcf | [
"MIT"
] | 1 | 2019-01-19T04:17:48.000Z | 2019-01-19T04:17:48.000Z | #!/usr/bin/python
# -*- encoding: utf-8
import unittest, os, re, json
from xml.etree import ElementTree as et
from palaso.sldr.langtags_full import LangTags, LangTag
from itertools import product
langtagjson = os.path.join(os.path.dirname(__file__), '..', 'pub', 'langtags.json')
exceptions = set(["aii-Cyrl"])
class Supplemental(unittest.TestCase):
''' Tests alltags.txt for discrepencies against likelySubtags.xml '''
def setUp(self):
with open(langtagjson, "r") as inf:
self.data = json.load(inf)
self.ltags = {}
for j in self.data:
if j['tag'].startswith("_"):
continue
self.ltags[j['tag']] = j
self.ltags[j['full']] = j
if 'tags' in j:
for t in j['tags']:
self.ltags[t] = j
thisdir = os.path.dirname(__file__)
self.doc = et.parse(os.path.join(thisdir, "supplementalData.xml"))
def test_languageData(self):
failures = []
for e in self.doc.findall('./languageData/language'):
lang = e.get('type')
if lang == "und":
continue
scripts = e.get('scripts', '').split(' ')
regions = e.get('territories', '').split(' ')
for s in scripts:
tag = lang + ("-" + s if len(s) else "")
if tag not in self.ltags:
failures.append(tag)
continue
for r in regions:
if not len(r):
continue
t = tag + "-" + r
if t in self.ltags:
continue
if r not in self.ltags[tag].get('regions', []):
failures.append(t)
if len(failures):
self.fail("Missing tags from supplemental Data" + str(failures))
def test_names(self):
for r in self.data:
if r['tag'].startswith("_"):
continue
if 'names' in r:
if any(x == u'↑↑↑' for x in r['names']):
self.fail("Inherited names item in " + str(r['tag']))
if 'name' in r:
if r['name'] == u'↑↑↑':
self.fail("Inherited name in " + str(['tag']))
| 35.492308 | 83 | 0.483745 | 268 | 2,307 | 4.13806 | 0.358209 | 0.056808 | 0.029757 | 0.030658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000698 | 0.378847 | 2,307 | 64 | 84 | 36.046875 | 0.769016 | 0.042913 | 0 | 0.111111 | 0 | 0 | 0.107322 | 0.010459 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.074074 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9372409c6e9d457358e3990049706136e6ed0a9a | 5,178 | py | Python | trafficmonitor/gui/secondary.py | Sumanth007/Traffic-Monitor | 2623f5c03a362b14415620528f05a91aba960374 | [
"MIT"
] | null | null | null | trafficmonitor/gui/secondary.py | Sumanth007/Traffic-Monitor | 2623f5c03a362b14415620528f05a91aba960374 | [
"MIT"
] | 1 | 2022-03-22T21:21:19.000Z | 2022-03-22T21:21:19.000Z | trafficmonitor/gui/secondary.py | SumanthTirumale/Traffic-Monitor | 2623f5c03a362b14415620528f05a91aba960374 | [
"MIT"
] | null | null | null | from pathlib import Path
from PyQt5.QtWidgets import QDialog, QLineEdit, QCheckBox, QPushButton, QApplication
from PyQt5.QtWidgets import QLabel, QMessageBox, QHBoxLayout, QFormLayout
from PyQt5.QtGui import QIcon
from PyQt5.Qt import Qt
from trafficmonitor.helper_functions import create_path, ping
class Secondary(QDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle("Traffic Monitor")
self.image_path = str(Path(__file__).absolute().parent.parent/"images")
self.setWindowIcon(QIcon(str(Path(self.image_path)/"logo.ico")))
self.resize(400, 200)
self.center()
# initialize values
self.path = create_path()
self.data = {}
# initialize all widgets
self.edit_execution_name = QLineEdit()
self.edit_ip_address = QLineEdit()
self.edit_proxy_address = QLineEdit("127.0.0.1")
self.edit_proxy_port = QLineEdit("9090")
self.check_box_upstream_proxy = QCheckBox("Enable Upstream proxy")
self.button_start = QPushButton("Start")
self.bind_signals()
self.check_upstream_proxy()
self.init_ui()
def center(self):
"""Method to center the QMainWindow"""
frame_gm = self.frameGeometry()
screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())
center_point = QApplication.desktop().screenGeometry(screen).center()
frame_gm.moveCenter(center_point)
self.move(frame_gm.topLeft())
def init_ui(self):
form_layout = QFormLayout()
horizontal_box1 = QHBoxLayout()
horizontal_box2 = QHBoxLayout()
form_layout.addRow(QLabel("Execution Name: "), self.edit_execution_name)
form_layout.addRow(QLabel("Host Address: "), self.edit_ip_address)
horizontal_box1.addStretch()
horizontal_box1.addWidget(self.check_box_upstream_proxy)
horizontal_box1.addStretch()
form_layout.addRow(horizontal_box1)
form_layout.addRow(QLabel("Proxy Address: "), self.edit_proxy_address)
form_layout.addRow(QLabel("Proxy Port: "), self.edit_proxy_port)
horizontal_box2.addStretch()
horizontal_box2.addWidget(self.button_start)
horizontal_box2.addStretch()
form_layout.addRow(horizontal_box2)
self.setLayout(form_layout)
self.setWindowModality(Qt.ApplicationModal)
self.show()
def bind_signals(self):
self.check_box_upstream_proxy.stateChanged.connect(self.check_upstream_proxy)
self.button_start.clicked.connect(self.evt_button_start)
def check_upstream_proxy(self):
if self.check_box_upstream_proxy.isChecked():
self.edit_proxy_address.setDisabled(False)
self.edit_proxy_port.setDisabled(False)
else:
self.edit_proxy_address.setDisabled(True)
self.edit_proxy_port.setDisabled(True)
def evt_button_start(self):
execution_name = self.edit_execution_name.text()
ip_address = self.edit_ip_address.text()
proxy_address = self.edit_proxy_address.text()
proxy_port = self.edit_proxy_port.text()
empty_values = [None, '']
# validate the values
if execution_name not in empty_values:
is_file_exists = Path(f"{self.path}/{execution_name}.db")
if not is_file_exists.exists():
if ip_address not in empty_values:
if ping(ip_address):
if self.check_box_upstream_proxy.isChecked():
if proxy_address not in empty_values:
if proxy_port not in empty_values:
self.data['UPSTREAM_PROXY_IP'] = proxy_address
self.data['UPSTREAM_PROXY_PORT'] = proxy_port
self.data['EXECUTION_NAME'] = execution_name
self.data['IP_ADDRESS'] = ip_address
self.close()
else:
QMessageBox.information(self, "Warning", "Please enter proxy port")
else:
QMessageBox.information(self, "Warning", "Please enter proxy address")
else:
self.data['EXECUTION_NAME'] = execution_name
self.data['IP_ADDRESS'] = ip_address
self.close()
else:
QMessageBox.information(self, "Warning", f"'{ip_address} is unreachable!!'")
else:
QMessageBox.information(self, "Warning", "Please enter host address")
else:
QMessageBox.information(self, "Warning", f"'{execution_name}' already exists!!")
else:
QMessageBox.information(self, "Warning", "Please enter execution name")
| 39.830769 | 104 | 0.595597 | 531 | 5,178 | 5.546139 | 0.242938 | 0.043464 | 0.044143 | 0.061121 | 0.397284 | 0.245161 | 0.152462 | 0.095416 | 0.071986 | 0.071986 | 0 | 0.008427 | 0.312476 | 5,178 | 129 | 105 | 40.139535 | 0.81882 | 0.018154 | 0 | 0.210526 | 0 | 0 | 0.09078 | 0.006268 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063158 | false | 0 | 0.063158 | 0 | 0.136842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9373925e0184fcb2bd8e59c3fee2f516b9d8c7c9 | 1,350 | py | Python | fly.py | anyaevostinar/FlyYeastModel | 22a81a30407cc20dd3491c558cd4266d9a73870f | [
"MIT"
] | null | null | null | fly.py | anyaevostinar/FlyYeastModel | 22a81a30407cc20dd3491c558cd4266d9a73870f | [
"MIT"
] | null | null | null | fly.py | anyaevostinar/FlyYeastModel | 22a81a30407cc20dd3491c558cd4266d9a73870f | [
"MIT"
] | 1 | 2015-05-27T19:00:14.000Z | 2015-05-27T19:00:14.000Z | """
A class library to model fly behavior.
"""
from random import *
WORLD_SIZE = 200
class Fly(object):
'''Doc string'''
def __init__(self, location):
#A list for holding the collection of yeast spores in the fly gut
self.stomach = []
self.location = location
self.fitness = 0
def move(self, yeast_pop):
self.location += randint(0, WORLD_SIZE/4)
if self.location >= WORLD_SIZE:
self.location = self.location - WORLD_SIZE
if not yeast_pop[self.location] and len(self.stomach):
hatched = self.stomach.pop()
hatched.is_spore = False
yeast_pop[self.location] = hatched
def eat(self, yeast):
if yeast.is_spore:
self.stomach.append(yeast)
else:
self.fitness += 1
def reproduce(self):
new_loc=round(gauss(self.location, 2),0)
if new_loc < 0:
new_loc = 0
elif new_loc > WORLD_SIZE-1:
new_loc = WORLD_SIZE-1
#how best to keep fly location within yeast world??
return Fly(new_loc)
def update(self, yeast_pop):
if yeast_pop[self.location]:
self.eat(yeast_pop[self.location])
if self.fitness == 50:
return self.reproduce()
else:
self.move(yeast_pop)
| 27.55102 | 73 | 0.577037 | 175 | 1,350 | 4.308571 | 0.36 | 0.175066 | 0.079576 | 0.132626 | 0.04244 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016484 | 0.325926 | 1,350 | 48 | 74 | 28.125 | 0.812088 | 0.121481 | 0 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.029412 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9375c927126dada2e11408b96c12b7feda3db9d6 | 18,074 | py | Python | lib/googlecloudsdk/compute/subcommands/instances/create.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/compute/subcommands/instances/create.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/compute/subcommands/instances/create.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for creating instances."""
import collections
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import addresses_utils
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import constants
from googlecloudsdk.compute.lib import image_utils
from googlecloudsdk.compute.lib import instance_utils
from googlecloudsdk.compute.lib import metadata_utils
from googlecloudsdk.compute.lib import request_helper
from googlecloudsdk.compute.lib import utils
from googlecloudsdk.compute.lib import windows_password
from googlecloudsdk.compute.lib import zone_utils
from googlecloudsdk.core import log
DISK_METAVAR = (
'name=NAME [mode={ro,rw}] [boot={yes,no}] [device-name=DEVICE_NAME] '
'[auto-delete={yes,no}]')
class Create(base_classes.BaseAsyncCreator,
image_utils.ImageExpander,
addresses_utils.AddressExpander,
zone_utils.ZoneResourceFetcher):
"""Create Google Compute Engine virtual machine instances."""
@staticmethod
def Args(parser):
metadata_utils.AddMetadataArgs(parser)
instance_utils.AddDiskArgs(parser)
instance_utils.AddLocalSsdArgs(parser)
instance_utils.AddImageArgs(parser)
instance_utils.AddCanIpForwardArgs(parser)
instance_utils.AddAddressArgs(parser, instances=True)
instance_utils.AddMachineTypeArgs(parser)
instance_utils.AddMaintenancePolicyArgs(parser)
instance_utils.AddNetworkArgs(parser)
instance_utils.AddNoRestartOnFailureArgs(parser)
instance_utils.AddScopeArgs(parser)
instance_utils.AddTagsArgs(parser)
parser.add_argument(
'--description',
help='Specifies a textual description of the instances.')
parser.add_argument(
'names',
metavar='NAME',
nargs='+',
help='The names of the instances to create.')
utils.AddZoneFlag(
parser,
resource_type='instances',
operation_type='create')
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'instances'
def ValidateLocalSsdFlags(self, args):
for local_ssd in args.local_ssd or []:
interface = local_ssd.get('interface')
if interface and interface not in instance_utils.LOCAL_SSD_INTERFACES:
raise exceptions.ToolException(
'Unexpected local SSD interface: [{given}]. '
'Legal values are [{ok}].'
.format(given=interface,
ok=', '.join(instance_utils.LOCAL_SSD_INTERFACES)))
def ValidateDiskFlags(self, args):
"""Validates the values of all disk-related flags."""
boot_disk_specified = False
for disk in args.disk or []:
disk_name = disk.get('name')
if not disk_name:
raise exceptions.ToolException(
'[name] is missing in [--disk]. [--disk] value must be of the form '
'[{0}].'.format(DISK_METAVAR))
mode_value = disk.get('mode')
if mode_value and mode_value not in ('rw', 'ro'):
raise exceptions.ToolException(
'Value for [mode] in [--disk] must be [rw] or [ro], not [{0}].'
.format(mode_value))
# Ensures that the user is not trying to attach a read-write
# disk to more than one instance.
if len(args.names) > 1 and mode_value == 'rw':
raise exceptions.ToolException(
'Cannot attach disk [{0}] in read-write mode to more than one '
'instance.'.format(disk_name))
boot_value = disk.get('boot')
if boot_value and boot_value not in ('yes', 'no'):
raise exceptions.ToolException(
'Value for [boot] in [--disk] must be [yes] or [no], not [{0}].'
.format(boot_value))
auto_delete_value = disk.get('auto-delete')
if auto_delete_value and auto_delete_value not in ['yes', 'no']:
raise exceptions.ToolException(
'Value for [auto-delete] in [--disk] must be [yes] or [no], not '
'[{0}].'.format(auto_delete_value))
# If this is a boot disk and we have already seen a boot disk,
# we need to fail because only one boot disk can be attached.
if boot_value == 'yes':
if boot_disk_specified:
raise exceptions.ToolException(
'Each instance can have exactly one boot disk. At least two '
'boot disks were specified through [--disk].')
else:
boot_disk_specified = True
if args.image and boot_disk_specified:
raise exceptions.ToolException(
'Each instance can have exactly one boot disk. One boot disk '
'was specified through [--disk] and another through [--image].')
if boot_disk_specified:
if args.boot_disk_device_name:
raise exceptions.ToolException(
'[--boot-disk-device-name] can only be used when creating a new '
'boot disk.')
if args.boot_disk_type:
raise exceptions.ToolException(
'[--boot-disk-type] can only be used when creating a new boot '
'disk.')
if args.boot_disk_size:
raise exceptions.ToolException(
'[--boot-disk-size] can only be used when creating a new boot '
'disk.')
if args.no_boot_disk_auto_delete:
raise exceptions.ToolException(
'[--no-boot-disk-auto-delete] can only be used when creating a '
'new boot disk.')
def UseExistingBootDisk(self, args):
"""Returns True if the user has specified an existing boot disk."""
return any(disk.get('boot') == 'yes' for disk in args.disk or [])
def CreatePersistentAttachedDiskMessages(self, args, instance_ref):
"""Returns a list of AttachedDisk messages and the boot disk's reference."""
disks = []
boot_disk_ref = None
for disk in args.disk or []:
name = disk['name']
# Resolves the mode.
mode_value = disk.get('mode', 'rw')
if mode_value == 'rw':
mode = self.messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE
else:
mode = self.messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY
boot = disk.get('boot') == 'yes'
auto_delete = disk.get('auto-delete') == 'yes'
disk_ref = self.CreateZonalReference(
name, instance_ref.zone,
resource_type='disks')
if boot:
boot_disk_ref = disk_ref
attached_disk = self.messages.AttachedDisk(
autoDelete=auto_delete,
boot=boot,
deviceName=disk.get('device-name'),
mode=mode,
source=disk_ref.SelfLink(),
type=self.messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
# The boot disk must end up at index 0.
if boot:
disks = [attached_disk] + disks
else:
disks.append(attached_disk)
return disks, boot_disk_ref
def CreateLocalSsdMessage(self, zone, device_name, interface):
disk_type_ref = self.CreateZonalReference('local-ssd', zone,
resource_type='diskTypes')
maybe_interface_enum = (
self.messages.AttachedDisk.InterfaceValueValuesEnum(interface)
if interface else None)
return self.messages.AttachedDisk(
type=self.messages.AttachedDisk.TypeValueValuesEnum.SCRATCH,
autoDelete=True,
deviceName=device_name,
interface=maybe_interface_enum,
mode=self.messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
initializeParams=self.messages.AttachedDiskInitializeParams(
diskType=disk_type_ref.SelfLink()),
)
def CreateDefaultBootAttachedDiskMessage(
self, args, boot_disk_size_gb, image_uri, instance_ref):
"""Returns an AttachedDisk message for creating a new boot disk."""
if args.boot_disk_type:
disk_type_ref = self.CreateZonalReference(
args.boot_disk_type, instance_ref.zone,
resource_type='diskTypes')
disk_type_uri = disk_type_ref.SelfLink()
else:
disk_type_ref = None
disk_type_uri = None
return self.messages.AttachedDisk(
autoDelete=not args.no_boot_disk_auto_delete,
boot=True,
deviceName=args.boot_disk_device_name,
initializeParams=self.messages.AttachedDiskInitializeParams(
sourceImage=image_uri,
diskSizeGb=boot_disk_size_gb,
diskType=disk_type_uri),
mode=self.messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
type=self.messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
def FetchDiskResources(self, disk_refs):
"""Returns a list of disk resources corresponding to the disk references."""
requests = []
for disk_ref in disk_refs:
requests.append((
self.compute.disks,
'Get',
self.messages.ComputeDisksGetRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone)))
errors = []
res = list(request_helper.MakeRequests(
requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch some boot disks:')
return res
def CreateServiceAccountMessages(self, args):
"""Returns a list of ServiceAccount messages corresponding to --scopes."""
if args.no_scopes:
scopes = []
else:
scopes = args.scopes or constants.DEFAULT_SCOPES
accounts_to_scopes = collections.defaultdict(list)
for scope in scopes:
parts = scope.split('=')
if len(parts) == 1:
account = 'default'
scope_uri = scope
elif len(parts) == 2:
account, scope_uri = parts
else:
raise exceptions.ToolException(
'[{0}] is an illegal value for [--scopes]. Values must be of the '
'form [SCOPE] or [ACCOUNT=SCOPE].'.format(scope))
# Expands the scope if the user provided an alias like
# "compute-rw".
scope_uri = constants.SCOPES.get(scope_uri, scope_uri)
accounts_to_scopes[account].append(scope_uri)
res = []
for account, scopes in sorted(accounts_to_scopes.iteritems()):
res.append(self.messages.ServiceAccount(
email=account,
scopes=sorted(scopes)))
return res
def CreateNetworkInterfaceMessage(self, args, instance_refs):
"""Returns a new NetworkInterface message."""
network_ref = self.CreateGlobalReference(
args.network, resource_type='networks')
network_interface = self.messages.NetworkInterface(
network=network_ref.SelfLink())
if not args.no_address:
access_config = self.messages.AccessConfig(
name=constants.DEFAULT_ACCESS_CONFIG_NAME,
type=self.messages.AccessConfig.TypeValueValuesEnum.ONE_TO_ONE_NAT)
# If the user provided an external IP, populate the access
# config with it.
if len(instance_refs) == 1:
region = utils.ZoneNameToRegionName(instance_refs[0].zone)
address = self.ExpandAddressFlag(args, region)
if address:
access_config.natIP = address
network_interface.accessConfigs = [access_config]
return network_interface
def CreateRequests(self, args):
self.ValidateDiskFlags(args)
self.ValidateLocalSsdFlags(args)
if args.maintenance_policy:
on_host_maintenance = (
self.messages.Scheduling.OnHostMaintenanceValueValuesEnum(
args.maintenance_policy))
else:
on_host_maintenance = None
scheduling = self.messages.Scheduling(
automaticRestart=not args.no_restart_on_failure,
onHostMaintenance=on_host_maintenance)
service_accounts = self.CreateServiceAccountMessages(args)
if args.tags:
tags = self.messages.Tags(items=args.tags)
else:
tags = None
metadata = metadata_utils.ConstructMetadataMessage(
self.messages,
metadata=args.metadata,
metadata_from_file=args.metadata_from_file)
# If the user already provided an initial Windows password and
# username through metadata, then there is no need to check
# whether the image or the boot disk is Windows.
windows_username_present = False
windows_password_present = False
for kv in metadata.items:
if kv.key == constants.INITIAL_WINDOWS_USER_METADATA_KEY_NAME:
windows_username_present = True
if kv.key == constants.INITIAL_WINDOWS_PASSWORD_METADATA_KEY_NAME:
windows_password_present = True
check_for_windows_image = (not windows_username_present or
not windows_password_present)
boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)
instance_refs = self.CreateZonalReferences(args.names, args.zone)
# Check if the zone is deprecated or has maintenance coming.
self.WarnForZonalCreation(instance_refs)
network_interface = self.CreateNetworkInterfaceMessage(args, instance_refs)
# The element at index i is the machine type URI for instance
# i. We build this list here because we want to delay work that
# requires API calls as much as possible. This leads to a better
# user experience because the tool can fail fast upon a spelling
# mistake instead of delaying the user by making API calls whose
# purpose has already been rendered moot by the spelling mistake.
machine_type_uris = []
for instance_ref in instance_refs:
machine_type_uris.append(self.CreateZonalReference(
args.machine_type, instance_ref.zone,
resource_type='machineTypes').SelfLink())
create_boot_disk = not self.UseExistingBootDisk(args)
add_windows_credentials_to_metadata = False
if create_boot_disk:
image_uri, image_resource = self.ExpandImageFlag(
args, return_image_resource=check_for_windows_image)
if (check_for_windows_image and
image_utils.HasWindowsLicense(image_resource, self.resources)):
log.debug('[%s] is a Windows image.', image_resource.selfLink)
add_windows_credentials_to_metadata = True
else:
image_uri = None
# A list of lists where the element at index i contains a list of
# disk messages that should be set for the instance at index i.
disks_messages = []
# A mapping of zone to boot disk references for all existing boot
# disks that are being attached.
existing_boot_disks = {}
for instance_ref in instance_refs:
persistent_disks, boot_disk_ref = (
self.CreatePersistentAttachedDiskMessages(args, instance_ref))
local_ssds = [
self.CreateLocalSsdMessage(
instance_ref.zone, x.get('device-name'), x.get('interface'))
for x in args.local_ssd or []]
if create_boot_disk:
boot_disk = self.CreateDefaultBootAttachedDiskMessage(
args, boot_disk_size_gb, image_uri, instance_ref)
persistent_disks = [boot_disk] + persistent_disks
else:
existing_boot_disks[boot_disk_ref.zone] = boot_disk_ref
disks_messages.append(persistent_disks + local_ssds)
# Now for every existing boot disk being attached, we have to
# figure out whether it has a Windows license.
if check_for_windows_image and existing_boot_disks:
# Sorts the disk references by zone, so the code behaves
# deterministically.
disk_resources = self.FetchDiskResources(
disk_ref for _, disk_ref in sorted(existing_boot_disks.iteritems()))
for disk_resource in disk_resources:
if image_utils.HasWindowsLicense(disk_resource, self.resources):
log.debug('[%s] has a Windows image.', disk_resource.selfLink)
add_windows_credentials_to_metadata = True
if add_windows_credentials_to_metadata:
if not windows_username_present:
username = self.project.split(':')[-1][
:constants.MAX_WINDOWS_USERNAME_LENGTH]
metadata.items.append(self.messages.Metadata.ItemsValueListEntry(
key=constants.INITIAL_WINDOWS_USER_METADATA_KEY_NAME,
value=username))
if not windows_password_present:
metadata.items.append(self.messages.Metadata.ItemsValueListEntry(
key=constants.INITIAL_WINDOWS_PASSWORD_METADATA_KEY_NAME,
value=windows_password.Generate()))
requests = []
for instance_ref, machine_type_uri, disks in zip(
instance_refs, machine_type_uris, disks_messages):
requests.append(self.messages.ComputeInstancesInsertRequest(
instance=self.messages.Instance(
canIpForward=args.can_ip_forward,
disks=disks,
description=args.description,
machineType=machine_type_uri,
metadata=metadata,
name=instance_ref.Name(),
networkInterfaces=[network_interface],
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags,
),
project=self.project,
zone=instance_ref.zone))
return requests
Create.detailed_help = {
'brief': 'Create Compute Engine virtual machine instances',
'DESCRIPTION': """\
*{command}* facilitates the creation of Google Compute Engine
virtual machines. For example, running:
$ {command} example-instance-1 example-instance-2 example-instance-3 --zone us-central1-a
will create three instances called 'example-instance-1',
'example-instance-2', and 'example-instance-3' in the
``us-central1-a'' zone.
For more examples, refer to the *EXAMPLES* section below.
""",
'EXAMPLES': """\
To create an instance with the latest ``Red Hat Enterprise Linux
6'' image available, run:
$ {command} example-instance --image rhel-6 --zone us-central1-a
""",
}
| 37.1893 | 99 | 0.676441 | 2,122 | 18,074 | 5.583883 | 0.18426 | 0.035784 | 0.03072 | 0.023631 | 0.256477 | 0.165162 | 0.100937 | 0.086758 | 0.066672 | 0.060427 | 0 | 0.002033 | 0.238077 | 18,074 | 485 | 100 | 37.265979 | 0.858398 | 0.108886 | 0 | 0.168022 | 0 | 0.01355 | 0.138348 | 0.008857 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03794 | false | 0.02168 | 0.03523 | 0.00813 | 0.105691 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9376ac585073afdf1faa0ba83d55f23d93bb24fe | 6,215 | py | Python | messidge/broker/identity.py | RantyDave/messidge | ada4dfb1f4df5bcbe3c0920fdf4c75b030624c88 | [
"BSD-2-Clause"
] | 1 | 2017-10-26T00:09:49.000Z | 2017-10-26T00:09:49.000Z | messidge/broker/identity.py | 20ft/messidge | ada4dfb1f4df5bcbe3c0920fdf4c75b030624c88 | [
"BSD-2-Clause"
] | null | null | null | messidge/broker/identity.py | 20ft/messidge | ada4dfb1f4df5bcbe3c0920fdf4c75b030624c88 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2017 David Preece - davep@polymath.tech, All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging
import os
import shortuuid
from threading import Thread
from bottle import Bottle, run, request
from litecache.cache import SqlCache
ident_init = """
CREATE TABLE nodes (pk TEXT NOT NULL UNIQUE, json BLOB);
CREATE TABLE users (pk TEXT NOT NULL UNIQUE, email TEXT NOT NULL, json BLOB);
CREATE TABLE pending (token TEXT NOT NULL UNIQUE, email TEXT NOT NULL);
"""
class Identity:
"""A default provider of identity and configuration"""
def __init__(self, directory="~"):
"""Construct the identity database if it's not there.
:param directory: The directory in which to place the database (identity.sqlite3)"""
self.db = SqlCache(os.path.expanduser(directory), 'identity', ident_init)
def stop(self):
"""Stop the background (SqlCache) thread before closing"""
logging.debug("Closing Identity")
self.db.close()
def create_pending_user(self, email) -> str:
"""Registers the intention for someone to become a registered user
:param email: email address of the user.
:return: confirmation token to give to the user."""
token = shortuuid.uuid()
self.db.mutate("INSERT INTO pending (token, email) VALUES (?, ?)", (token, email))
return token
def pending_users_for_token(self, token) -> []:
"""Return the pending users for the given token (may well be zero).
:param token: the token a user was given in order to be able to confirm their account.
:return: The list of pending users for that token."""
return self.db.query("SELECT email FROM pending WHERE token=?", (token,))
def register_user(self, pk_b64: str, email: str, config: str):
"""Registers a user as being valid.
:param pk_b64: The user's primary key - base64 encoded string.
:param email: The user's email address.
:param config: A json description of any configuration to be associated with the user."""
self.db.mutate("DELETE FROM pending WHERE email=?", (email,))
self.db.mutate("INSERT INTO users (pk, email, json) VALUES (?, ?, ?)", (pk_b64, email, config))
def user_config_from_db(self, pk_b64: str) -> (str, str): # is used to check for presence in the db, too
"""Returns the json configuration for a user.
:param pk_b64: The user's primary key - base64 encoded string.
:return: A tuple of email address and the json configuration."""
return self.db.query_one("SELECT email, json FROM users WHERE pk=?", (pk_b64,), "Unknown user")
def raise_for_no_user(self, email: str):
"""Raises an error if this email address does not have an account.
:param email: email address of the user."""
self.db.query_one("SELECT * FROM users WHERE email=?", (email,), "no validated account")
def register_node(self, pk_b64: str, config: str):
"""Writes a node's configuration into the database.
:param pk_b64: The node's primary key - base64 encoded string.
:param config: A json description of any configuration to be associated with the node."""
self.db.mutate("INSERT INTO nodes (pk, json) VALUES (?, ?)", (pk_b64, config))
def node_config_from_db(self, pk_b64: str) -> str:
"""Returns the json configuration of a node.
:param pk_b64: The node's primary key - base64 encoded string.
:return: The json configuration for the node."""
return self.db.query_one("SELECT json FROM nodes WHERE pk=?", (pk_b64,), "Unknown node")[0]
confirmation_server = Bottle()
class AccountConfirmationServer(Thread):
"""A simple HTTP server for confirming accounts"""
# has single use tokens so no real need to SSL
identity = None
pk = None
port = None
def __init__(self, identity, keys, port):
super().__init__(name=str("Account Confirmation Server"), daemon=True)
AccountConfirmationServer.identity = identity
AccountConfirmationServer.pk = keys.public
AccountConfirmationServer.port = port
self.start()
def stop(self):
logging.debug("Stopping AccountConfirmationServer")
confirmation_server.close()
@staticmethod
@confirmation_server.route('/', method='POST')
def account():
# de-HTTP the request
try:
token, user_pk = request.body.read().decode().split()
except:
logging.warning("Off-spec request to account creation server: " + request.body.read().decode())
return None
# valid token?
pending_records = AccountConfirmationServer.identity.pending_users_for_token(token)
if len(pending_records) == 0:
logging.warning("An attempt was made to confirm an account with an incorrect token: " + token)
return "Fail: this token is either incorrect or has been used already."
user_email = pending_records[0][0]
# all good
AccountConfirmationServer.identity.register_user(user_pk, user_email, "{}")
logging.info("Confirmed an account for: " + user_email)
return AccountConfirmationServer.pk
def run(self):
try:
logging.info("Started account confirmation server: 0.0.0.0:" + str(AccountConfirmationServer.port))
run(app=confirmation_server, host='0.0.0.0', port=AccountConfirmationServer.port, quiet=True)
except OSError:
logging.critical("Could not bind account confirmation server, exiting")
exit(1)
| 42.568493 | 111 | 0.677715 | 832 | 6,215 | 4.989183 | 0.298077 | 0.014454 | 0.01325 | 0.011563 | 0.172489 | 0.137557 | 0.12503 | 0.105999 | 0.07709 | 0.07709 | 0 | 0.010393 | 0.225905 | 6,215 | 145 | 112 | 42.862069 | 0.852422 | 0.370877 | 0 | 0.056338 | 0 | 0 | 0.260355 | 0.006724 | 0 | 0 | 0 | 0 | 0 | 1 | 0.183099 | false | 0 | 0.084507 | 0 | 0.43662 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
937afaaa0ef96d58eb433c0d7ddeed51e90f25a2 | 9,947 | py | Python | src/cc_catalog_airflow/dags/common/storage/media.py | sarayourfriend/openverse-catalog | b12ba815de782032f72ffa4f5620cfc8de8c84bd | [
"MIT"
] | null | null | null | src/cc_catalog_airflow/dags/common/storage/media.py | sarayourfriend/openverse-catalog | b12ba815de782032f72ffa4f5620cfc8de8c84bd | [
"MIT"
] | null | null | null | src/cc_catalog_airflow/dags/common/storage/media.py | sarayourfriend/openverse-catalog | b12ba815de782032f72ffa4f5620cfc8de8c84bd | [
"MIT"
] | null | null | null | import abc
from datetime import datetime
import logging
import os
from typing import Optional, Union
from common.licenses.licenses import is_valid_license_info
from common.storage import util
logger = logging.getLogger(__name__)
# Filter out tags that exactly match these terms. All terms should be
# lowercase.
TAG_BLACKLIST = {"no person", "squareformat"}
# Filter out tags that contain the following terms. All entrees should be
# lowercase.
TAG_CONTAINS_BLACKLIST = {
"flickriosapp",
"uploaded",
":",
"=",
"cc0",
"by",
"by-nc",
"by-nd",
"by-sa",
"by-nc-nd",
"by-nc-sa",
"pdm",
}
COMMON_CRAWL = 'commoncrawl'
PROVIDER_API = 'provider_api'
class MediaStore(metaclass=abc.ABCMeta):
"""
An abstract base class that stores media information from a given provider.
Optional init arguments:
provider: String marking the provider in the `media`
(`image`, `audio` etc) table of the DB.
output_file: String giving a temporary .tsv filename (*not* the
full path) where the media info should be stored.
output_dir: String giving a path where `output_file` should be placed.
buffer_length: Integer giving the maximum number of media information rows
to store in memory before writing them to disk.
"""
def __init__(
self,
provider: Optional[str] = None,
output_file: Optional[str] = None,
output_dir: Optional[str] = None,
buffer_length: int = 100,
media_type: Optional[str] = "generic",
):
logger.info(f"Initialized {media_type} MediaStore"
f" with provider {provider}")
self.media_type = media_type
self._media_buffer = []
self._total_items = 0
self._PROVIDER = provider
self._BUFFER_LENGTH = buffer_length
self._NOW = datetime.now()
self._OUTPUT_PATH = self._initialize_output_path(
output_dir,
output_file,
provider,
)
self.columns = None
def save_item(self, media) -> None:
"""
Appends item data to the buffer as a tsv row,
only if data is valid.
Args:
media: a namedtuple with validated media metadata
"""
tsv_row = self._create_tsv_row(media)
if tsv_row:
self._media_buffer.append(tsv_row)
self._total_items += 1
if len(self._media_buffer) >= self._BUFFER_LENGTH:
self._flush_buffer()
@abc.abstractmethod
def add_item(self, **kwargs):
"""
Abstract method to clean the item data and add it to the store
"""
pass
def clean_media_metadata(self, **media_data) -> Optional[dict]:
"""
Cleans and enriches the base media metadata common for all media types.
Even though we clean license info in the provider API scripts,
we validate it here, too, to make sure we don't have
invalid license information in the database.
Returns a dictionary: media_type-specific fields are untouched,
and for common metadata we:
- validate `license_info`
- enrich `metadata`,
- replace `raw_tags` with enriched `tags`,
- validate `source`,
- add `provider`,
- add `filesize` (with value of None)
Returns None if license is invalid
"""
if (
media_data['license_info'].license is None
or not is_valid_license_info(media_data['license_info'])
):
logger.debug("Discarding media due to invalid license")
return None
media_data['source'] = util.get_source(
media_data.get('source'),
self._PROVIDER
)
# Add ingestion_type column value based on `source`.
# The implementation is based on `ingestion_column`
if media_data.get('ingestion_type') is None:
if media_data['source'] == 'commoncrawl':
media_data['ingestion_type'] = 'commoncrawl'
else:
media_data['ingestion_type'] = 'provider_api'
media_data['tags'] = self._enrich_tags(
media_data.pop('raw_tags', None)
)
media_data['meta_data'] = self._enrich_meta_data(
media_data.pop('meta_data', None),
media_data['license_info'].url,
media_data['license_info'].raw_url,
)
media_data['license_'] = media_data['license_info'].license
media_data['license_version'] = media_data['license_info'].version
media_data.pop('license_info', None)
media_data['provider'] = self._PROVIDER
media_data['filesize'] = None
return media_data
def commit(self):
"""Writes all remaining media items in the buffer to disk."""
self._flush_buffer()
return self.total_items
def _initialize_output_path(
self,
output_dir: Optional[str],
output_file: Optional[str],
provider: str,
) -> str:
"""Creates the path for the tsv file.
If output_dir and output_file ar not given,
the following filename is used:
`/tmp/{provider_name}_{media_type}_{timestamp}.tsv`
Returns:
Path of the tsv file to write media data pulled from providers
"""
if output_dir is None:
logger.info("No given output directory. "
"Using OUTPUT_DIR from environment.")
output_dir = os.getenv("OUTPUT_DIR")
if output_dir is None:
logger.warning(
"OUTPUT_DIR is not set in the environment. "
"Output will go to /tmp."
)
output_dir = "/tmp"
if output_file is not None:
output_file = str(output_file)
else:
datetime_string = datetime.strftime(
self._NOW, '%Y%m%d%H%M%S')
output_file = (
f"{provider}_{self.media_type}"
f"_{datetime_string}.tsv"
)
output_path = os.path.join(output_dir, output_file)
logger.info(f"Output path: {output_path}")
return output_path
@property
def total_items(self):
"""Get total items for directly using in scripts."""
return self._total_items
def _create_tsv_row(self, item):
row_length = len(self.columns)
prepared_strings = [
self.columns[i].prepare_string(item[i]) for i in range(row_length)
]
logger.debug(f"Prepared strings list:\n{prepared_strings}")
for i in range(row_length):
if self.columns[i].REQUIRED and prepared_strings[i] is None:
logger.warning(f"Row missing required {self.columns[i].NAME}")
return None
else:
return (
"\t".join(
[s if s is not None else "\\N"
for s in prepared_strings])
+ "\n"
)
def _flush_buffer(self) -> int:
buffer_length = len(self._media_buffer)
if buffer_length > 0:
logger.info(f"Writing {buffer_length} lines from buffer to disk.")
with open(self._OUTPUT_PATH, "a") as f:
f.writelines(self._media_buffer)
self._media_buffer = []
logger.debug(
f"Total Media Items Processed so far: {self._total_items}"
)
else:
logger.debug("Empty buffer! Nothing to write.")
return buffer_length
@staticmethod
def _tag_blacklisted(tag: Union[str, dict]) -> bool:
"""
Tag is banned or contains a banned substring.
:param tag: the tag to be verified against the blacklist
:return: true if tag is blacklisted, else returns false
"""
if type(tag) == dict: # check if the tag is already enriched
tag = tag.get("name")
if tag in TAG_BLACKLIST:
return True
for blacklisted_substring in TAG_CONTAINS_BLACKLIST:
if blacklisted_substring in tag:
return True
return False
@staticmethod
def _enrich_meta_data(
meta_data, license_url, raw_license_url) -> dict:
"""
Makes sure that meta_data is a dictionary, and contains
license_url and raw_license_url
"""
if type(meta_data) != dict:
logger.debug(f"`meta_data` is not a dictionary: {meta_data}")
enriched_meta_data = {
"license_url": license_url,
"raw_license_url": raw_license_url,
}
else:
enriched_meta_data = meta_data
enriched_meta_data.update(
license_url=license_url, raw_license_url=raw_license_url
)
return enriched_meta_data
def _enrich_tags(self, raw_tags) -> Optional[list]:
"""Takes a list of tags and adds provider information to them
Args:
raw_tags: List of strings or dictionaries
Returns:
A list of 'enriched' tags:
{"name": "tag_name", "provider": self._PROVIDER}
"""
if type(raw_tags) != list:
logger.debug("`tags` is not a list.")
return None
else:
return [
self._format_raw_tag(tag)
for tag in raw_tags
if not self._tag_blacklisted(tag)
]
def _format_raw_tag(self, tag):
if type(tag) == dict and tag.get("name") and tag.get("provider"):
logger.debug(f"Tag already enriched: {tag}")
return tag
else:
logger.debug(f"Enriching tag: {tag}")
return {"name": tag, "provider": self._PROVIDER}
| 34.065068 | 79 | 0.579672 | 1,184 | 9,947 | 4.663007 | 0.217061 | 0.039123 | 0.023184 | 0.021735 | 0.063213 | 0.032241 | 0.016664 | 0.016664 | 0.016664 | 0.016664 | 0 | 0.001053 | 0.331457 | 9,947 | 291 | 80 | 34.182131 | 0.829173 | 0.23957 | 0 | 0.123711 | 0 | 0 | 0.15183 | 0.013599 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06701 | false | 0.005155 | 0.036082 | 0 | 0.190722 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
937ca990f86faab5de19956b6f0ea3640d942692 | 1,864 | py | Python | files/scrapli/app_async.py | cremsburg/SRX-get-security-zones | 6bd0522b956f2e69d7d86a01ffd57802c392a666 | [
"Apache-2.0"
] | null | null | null | files/scrapli/app_async.py | cremsburg/SRX-get-security-zones | 6bd0522b956f2e69d7d86a01ffd57802c392a666 | [
"Apache-2.0"
] | null | null | null | files/scrapli/app_async.py | cremsburg/SRX-get-security-zones | 6bd0522b956f2e69d7d86a01ffd57802c392a666 | [
"Apache-2.0"
] | 2 | 2021-10-30T00:55:47.000Z | 2021-11-16T16:20:54.000Z | import asyncio
import xmltodict
from scrapli_netconf.driver import AsyncNetconfDriver
from scrapli.logging import enable_basic_logging
from jinja2 import Environment, FileSystemLoader
# Enable logging. Create a log file in the current directory.
enable_basic_logging(file=True, level="debug")
GALVESTON = {
"host": "192.168.105.137",
"auth_username": "scrapli",
"auth_password": "juniper123",
"auth_strict_key": False,
"transport": "asyncssh"
}
SANANTONIO = {
"host": "192.168.105.146",
"auth_username": "scrapli",
"auth_password": "juniper123",
"auth_strict_key": False,
"transport": "asyncssh"
}
DEVICES = [GALVESTON, SANANTONIO]
RPC = """
<get-zones-information>
</get-zones-information>
"""
# jinja2 parameters
env = Environment(loader=FileSystemLoader('templates'),trim_blocks=True)
template = env.get_template('test.j2')
# async function to open a connection and return the output of our RPC
async def gather_security_zones(device):
conn = AsyncNetconfDriver(**device)
await conn.open()
result = await conn.rpc(filter_=RPC)
await conn.close()
return result
# primary function
async def main():
"""Function to gather coroutines, await them and print results"""
coroutines = [gather_security_zones(device) for device in DEVICES]
results = await asyncio.gather(*coroutines)
for each in results:
reply_as_dict = xmltodict.parse(each.result)
security_zones = reply_as_dict["rpc-reply"]["zones-information"]["zones-security"]
# template output with jinja2 and save to file
output_from_parsed_template = template.render(security_zones=security_zones)
with open(f"./output/{each.host}.yaml", "w") as fh:
fh.write(output_from_parsed_template)
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main()) | 29.125 | 90 | 0.716738 | 232 | 1,864 | 5.564655 | 0.443966 | 0.050349 | 0.027885 | 0.020139 | 0.117738 | 0.117738 | 0.117738 | 0.117738 | 0.117738 | 0.117738 | 0 | 0.021893 | 0.166845 | 1,864 | 64 | 91 | 29.125 | 0.809401 | 0.111588 | 0 | 0.181818 | 0 | 0 | 0.209962 | 0.045397 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.045455 | 0.113636 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
937ea31fc0d7d3bea616809be52b9e4bc8a4e803 | 5,245 | py | Python | hwsushy/hwsushy/tests/unit/resources/test_base.py | saintifly/Server_Manage_Plugin | ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9 | [
"Apache-2.0"
] | null | null | null | hwsushy/hwsushy/tests/unit/resources/test_base.py | saintifly/Server_Manage_Plugin | ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9 | [
"Apache-2.0"
] | null | null | null | hwsushy/hwsushy/tests/unit/resources/test_base.py | saintifly/Server_Manage_Plugin | ae272e7e3ca065236cc7bc86c296ff9eb83f1bb9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from hwsushy import exceptions
from hwsushy.resources import base as resource_base
from hwsushy.tests.unit import base
class BaseResouce(resource_base.ResourceBase):
def _parse_attributes(self):
pass
class ResourceBaseTestCase(base.TestCase):
def setUp(self):
super(ResourceBaseTestCase, self).setUp()
self.conn = mock.Mock()
self.base_resource = BaseResouce(connector=self.conn, path='/Foo',
redfish_version='1.0.2')
# refresh() is called in the constructor
self.conn.reset_mock()
def test_refresh(self):
self.base_resource.refresh()
self.conn.get.assert_called_once_with(path='/Foo')
def test_refresh_http_error_reraised(self):
self.conn.get.side_effect = exceptions.HTTPError(
method='GET', url='http://foo.bar:8000/redfish/v1', error='boom',
status_code=404)
self.assertRaises(exceptions.ResourceNotFoundError,
self.base_resource.refresh)
self.conn.get.assert_called_once_with(path='/Foo')
def test_refresh_resource_not_found(self):
self.conn.get.side_effect = exceptions.HTTPError(
method='GET', url='http://foo.bar:8000/redfish/v1', error='boom',
status_code=400)
self.assertRaises(exceptions.HTTPError, self.base_resource.refresh)
self.conn.get.assert_called_once_with(path='/Foo')
class TestResouce(resource_base.ResourceBase):
"""A concrete Test Resource to test against"""
def __init__(self, connector, identity, redfish_version=None):
"""Ctor of TestResouce
:param connector: A Connector instance
:param identity: The id of the Resource
:param redfish_version: The version of RedFish. Used to construct
the object according to schema of the given version.
"""
super(TestResouce, self).__init__(connector, 'Fakes/%s' % identity,
redfish_version)
self.identity = identity
def _parse_attributes(self):
pass
class TestResouceCollection(resource_base.ResourceCollectionBase):
"""A concrete Test Resource Collection to test against"""
@property
def _resource_type(self):
return TestResouce
def __init__(self, connector, redfish_version=None):
"""Ctor of TestResourceCollection
:param connector: A Connector instance
:param redfish_version: The version of RedFish. Used to construct
the object according to schema of the given version.
"""
super(TestResouceCollection, self).__init__(connector, 'Fakes',
redfish_version)
class ResourceCollectionBaseTestCase(base.TestCase):
def setUp(self):
super(ResourceCollectionBaseTestCase, self).setUp()
self.conn = mock.MagicMock()
self.test_resource_collection = TestResouceCollection(
self.conn, redfish_version='1.0.x')
self.conn.reset_mock()
def test_get_member(self):
# | GIVEN |
# setting a valid member identity
self.test_resource_collection.members_identities = ('1',)
# | WHEN |
result = self.test_resource_collection.get_member('1')
# | THEN |
self.assertTrue(isinstance(result, TestResouce))
self.assertEqual('1', result.identity)
self.assertEqual('1.0.x', result.redfish_version)
def test_get_member_for_invalid_id(self):
# | GIVEN |
# setting a valid member identity
self.test_resource_collection.members_identities = ('1',)
self.conn.get.side_effect = exceptions.HTTPError(
method='GET', url='http://foo.bar:8000/redfish/v1/Fakes/2',
error='boom', status_code=404)
# | WHEN & THEN |
self.assertRaises(exceptions.ResourceNotFoundError,
self.test_resource_collection.get_member, '2')
self.conn.get.assert_called_once_with(path='Fakes/2')
def test_get_members(self):
# | GIVEN |
# setting some valid member paths
member_ids = ('1', '2')
self.test_resource_collection.members_identities = member_ids
# | WHEN |
result = self.test_resource_collection.get_members()
# | THEN |
self.assertTrue(isinstance(result, list))
for val in result:
self.assertTrue(isinstance(val, TestResouce))
self.assertTrue(val.identity in member_ids)
self.assertEqual('1.0.x', val.redfish_version)
| 37.198582 | 78 | 0.654909 | 609 | 5,245 | 5.474548 | 0.272578 | 0.031194 | 0.052789 | 0.054589 | 0.477205 | 0.383323 | 0.287043 | 0.263647 | 0.253149 | 0.253149 | 0 | 0.012716 | 0.250334 | 5,245 | 140 | 79 | 37.464286 | 0.835198 | 0.253003 | 0 | 0.297297 | 0 | 0 | 0.04792 | 0 | 0 | 0 | 0 | 0 | 0.189189 | 1 | 0.175676 | false | 0.027027 | 0.054054 | 0.013514 | 0.310811 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9380c40a504e6bed441a01b8170bbbf1c938fe3e | 2,719 | py | Python | main.py | Tiggax/SoundSnake | 9d841fff431d37beeb30e73276f645bf422987bb | [
"MIT"
] | null | null | null | main.py | Tiggax/SoundSnake | 9d841fff431d37beeb30e73276f645bf422987bb | [
"MIT"
] | null | null | null | main.py | Tiggax/SoundSnake | 9d841fff431d37beeb30e73276f645bf422987bb | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
from gui.uis.windows.main_window.functions_main_window import *
import sys
import os
from qt_core import *
from gui.core.json_settings import Settings
from gui.uis.windows.main_window import *
from gui.widgets import *
os.environ["QT_FONT_DPI"] = "96"
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
# Load widgets from "gui\uis\main_window\ui_main.py"
self.ui = UI_MainWindow()
self.ui.setup_ui(self)
# LOAD SETTINGS
settings = Settings()
self.settings = settings.items
# SETUP MAIN WINDOW
self.hide_grips = True # Show/Hide resize grips
SetupMainWindow.setup_gui(self)
# SHOW MAIN WINDOW
self.show()
# LEFT MENU BTN IS CLICKED
# Run function when btn is clicked
# Check funtion by object name / btn_id
# ///////////////////////////////////////////////////////////////
def btn_clicked(self):
# GET BT CLICKED
btn = SetupMainWindow.setup_btns(self)
# click events
#HOME
if btn.objectName() == "btn_home":
self.ui.left_menu.select_only_one(btn.objectName())
MainFunctions.set_page(self, self.ui.load_pages.page_1)
#Search
if btn.objectName() == "btn_search":
self.ui.left_menu.select_only_one(btn.objectName())
MainFunctions.set_page(self, self.ui.load_pages.page_2)
#Settings
if btn.objectName() == "btn_settings":
self.ui.left_menu.select_only_one(btn.objectName())
MainFunctions.set_page(self, self.ui.load_pages.page_3)
# TITLE BAR MENU
# ///////////////////////////////////////////////////////////////
# SETTINGS TITLE BAR
# LEFT MENU BTN IS RELEASED
# Run function when btn is released
# Check funtion by object name / btn_id
# ///////////////////////////////////////////////////////////////
def btn_released(self):
# GET BT CLICKED
btn = SetupMainWindow.setup_btns(self)
# DEBUG
print(f"Button {btn.objectName()}, released!")
# RESIZE EVENT
# ///////////////////////////////////////////////////////////////
def resizeEvent(self, event):
SetupMainWindow.resize_grips(self)
# MOUSE CLICK EVENTS
# ///////////////////////////////////////////////////////////////
def mousePressEvent(self, event):
# SET DRAG POS WINDOW
self.dragPos = event.globalPos()
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setWindowIcon(QIcon("icon.ico"))
window = MainWindow()
sys.exit(app.exec()) | 28.621053 | 73 | 0.551673 | 299 | 2,719 | 4.822742 | 0.344482 | 0.033287 | 0.020804 | 0.037448 | 0.351595 | 0.323856 | 0.286408 | 0.286408 | 0.286408 | 0.172677 | 0 | 0.002933 | 0.247517 | 2,719 | 95 | 74 | 28.621053 | 0.701857 | 0.309305 | 0 | 0.121951 | 0 | 0 | 0.051379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0 | 0.170732 | 0 | 0.317073 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93814b0f89541fdaac833798b0100b3575df0b96 | 6,158 | py | Python | checkCardReaders.py | hansliss/IECTools | 96a348f6488eaf3a1263646c77862b0f9a68294b | [
"BSD-2-Clause"
] | null | null | null | checkCardReaders.py | hansliss/IECTools | 96a348f6488eaf3a1263646c77862b0f9a68294b | [
"BSD-2-Clause"
] | null | null | null | checkCardReaders.py | hansliss/IECTools | 96a348f6488eaf3a1263646c77862b0f9a68294b | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
"""checkCardReaders.py: Use a local MySQL database to keep track of card readers and produce a 'changes' JSon document."""
__author__ = "Hans Liss"
__copyright__ = "Copyright 2020, Hans Liss"
__license__ = "BSD 2-Clause License"
__version__ = "1.1"
__maintainer__ = "Hans Liss"
__email__ = "Hans@Liss.nu"
__status__ = "Example code"
from zeep import Client
import MySQLdb
import uuid
import sys
import datetime
import configparser
import json
import argparse
## Read command-line parameters and configuration file
parser = argparse.ArgumentParser(description='Find changes in CardReaders')
parser.add_argument('-c', '--configfile', required=True,
help='path to configuration file')
parser.add_argument('-i', '--instance', required=True,
help='name of the instance to use from the config file')
parser.add_argument('-l', '--logprefix',
help='prefix for log files. Datestamp will be added')
parser.add_argument('-f', '--force', action='store_true',
help='register changes even when deletes exceeds 200')
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.configfile)
if args.logprefix is not None:
logfile = open(datetime.date.today().strftime(args.logprefix + "%Y-%m-%d"), "a")
def log(str):
logfile.write(datetime.datetime.now().strftime("%H:%M:%S\t") + str + "\n")
else:
def log(str):
pass
wsdl = config[args.instance]['wsdl']
endpoint = config[args.instance]['endpoint']
sessiontoken = config[args.instance]['sessiontoken']
# We do some fairly ugly string concatenation to create SQL queries below.
readerFields = ['Id', 'ParentFolderPath', 'Name', 'Description', 'AccessPointId', 'CardReaderType', 'SecurityLevel']
try:
conn = MySQLdb.connect(
host = config[args.instance]['db_host'],
port = 3365,
user = config[args.instance]['db_user'],
password = config[args.instance]['db_password'],
database = config[args.instance]['db_db'],
)
dbCursor = conn.cursor()
dbCursor.execute("CREATE TEMPORARY TABLE readersTemp like readers")
except MySQLdb.Error as e:
print(f"Error connecting to MySQL Platform: {e}")
sys.exit(1)
## Create a SOAP client and from that, create a new service with the correct endpoint
client = Client(wsdl)
client.service._binding_options['address'] = endpoint
## Request data should contain whatever is in the 'request' subdocument within the
## SOAP request XML
request_data={'request' : {'SessionToken' : uuid.UUID('{' + sessiontoken + '}'),
'MessageId' : uuid.uuid4(),
'PageSize' : 100}}
done=False
pageNo=0
doneCount=0
while (not done):
request_data['request']['PageIndex']=pageNo
## Call the method and get a response object
try:
response=client.service.GetCardReadersList(**request_data)
except:
e = sys.exc_info()[0]
print(f"SOAP Error: {e}")
sys.exit(1)
totalCount = response.TotalCount
batch = []
for reader in response.Results.__values__['CardReaderModel']:
values = []
for fieldName in readerFields:
values.append(reader[fieldName])
batch.append(values)
try:
queryString = "INSERT INTO readersTemp ("
first = True
for fieldName in readerFields:
if(first):
first = False
else:
queryString += ","
queryString += fieldName
queryString += ") values (%s,%s,%s,%s,%s,%s,%s)"
dbCursor.executemany(queryString, batch)
except MySQLdb.Error as e:
print(f"Error on insert: {e}")
sys.exit(1)
doneCount = doneCount + len(response.Results.__values__['CardReaderModel'])
pageNo = pageNo + 1
#print("Done %d out of %d, at page %d" % (doneCount, totalCount, pageNo))
if doneCount >= totalCount:
done = True
deleted = [];
added = [];
modified = [];
# These are deleted
dbCursor.execute('SELECT r.Id from readers r left join readersTemp rt on rt.Id = r.Id where rt.Id IS NULL')
for row in dbCursor:
deleted.append(row[0])
log("Deleted: %d" % row[0])
# select all from readersTemp left join readers
fieldListRt = ""
fieldListR = ""
fieldListChanged = ""
first = True
for fieldName in readerFields:
if(first):
first = False
else:
fieldListRt += ","
fieldListR += ","
fieldListChanged += " OR "
fieldListRt += "rt." + fieldName
fieldListR += "r." + fieldName
fieldListChanged += "NOT ( rt." + fieldName + " <=> r." + fieldName + ")"
queryString = "SELECT " + fieldListRt + "," + fieldListR + " from readersTemp rt left join readers r on rt.Id = r.Id where r.Id IS NULL or " + fieldListChanged
dbCursor.execute(queryString)
for row in dbCursor:
# If right side of JOIN is null, the reader has been added
if row[len(readerFields)] is None:
reader = {}
for i in range(len(readerFields)):
reader[readerFields[i]] = row[i]
added.append(reader)
log("Added: %d" % row[0])
else:
reader = {}
reader['Id'] = row[0]
for i in range(len(readerFields)):
if row[i] != row[i + len(readerFields)]:
reader[readerFields[i]] = row[i]
log("Modified: %d field %s changed from \"%s\" to \"%s\"" % (row[0], readerFields[i], row[i + len(readerFields)], row[i]))
modified.append(reader)
if len(deleted) < 200 or args.force:
if len(deleted) > 0 or len(added) > 0 or len(modified) > 0:
dbCursor.execute("DELETE FROM readers")
dbCursor.execute("INSERT INTO readers SELECT * FROM readersTemp")
conn.commit()
update={}
update['timestamp'] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
update['deleted'] = deleted
update['added'] = added
update['modified'] = modified
print(json.dumps(update, indent=2))
else:
print("The number of deletes is large (%d) and the -f flag was not given. Doing nothing." % len(deleted))
conn.close()
| 34.402235 | 159 | 0.631049 | 751 | 6,158 | 5.105193 | 0.340879 | 0.018258 | 0.032864 | 0.020866 | 0.091028 | 0.084507 | 0.065728 | 0.045905 | 0.029212 | 0.029212 | 0 | 0.008066 | 0.234979 | 6,158 | 178 | 160 | 34.595506 | 0.805774 | 0.110263 | 0 | 0.222222 | 0 | 0.020833 | 0.222975 | 0.007878 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013889 | false | 0.013889 | 0.055556 | 0 | 0.069444 | 0.034722 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9383005dd0c349133f874e23088ec259d85b45a7 | 8,460 | py | Python | final/180401040.py | yigitcanustek/blm2010 | 2e86dab3fc225a7679b6c660fb01902423476a94 | [
"Unlicense"
] | 2 | 2020-05-20T19:25:37.000Z | 2021-04-01T21:26:54.000Z | final/180401040.py | yigitcanustek/blm2010 | 2e86dab3fc225a7679b6c660fb01902423476a94 | [
"Unlicense"
] | 15 | 2020-05-18T14:53:18.000Z | 2020-06-26T09:20:50.000Z | final/180401040.py | yigitcanustek/blm2010 | 2e86dab3fc225a7679b6c660fb01902423476a94 | [
"Unlicense"
] | 155 | 2020-04-28T16:14:38.000Z | 2020-06-26T09:46:59.000Z | # ad-soyad : Ramazan AYDIN --- numara : 180401040
print("\n")
"""
Bu python kodunda ;
parametre olarak verilen dosyadaki (veriler.txt) verilerin sırasıyla 1,2,3,4,5,6. dereceden polinoma yakınlaştırarak
bu polinomlardan hangisinin en az hata ile sonucu bulduğunu hesaplayacağiz.
Tespit ettiğiniz polinomunun a ( öğrenci numarasının son rakamı) ile b (dosyanın satır sayısı) arasındaki integrali
hesaplayacağız .
Aynı integrali veriler.txt dosyasındaki verileri kullanarak hesaplayacağız.
Son olarak hesapladığımız bu 2 integralin sonuclarının farklı çıkmasının nedenini yorum.txt dosyasında açıklayacağız.
"""
# veriler.txt dosyasına gider ve oradaki verilerin tamamını okur. ***********
with open("veriler.txt", "r", encoding='utf-8') as file:
dizi = [] #dizi adında bir liste oluşturduk.
for i in file.read().split():
dizi.append(int(i)) #veriler.txt dosyasından okuduğumuz verileri diziye ekledik.
#Dizinin boyutunu tutması için bir fonksiyon oluşturduk.
def size(dizi):
return len(dizi)
n = size(dizi) # n'ye dizinin boyutunu atadık
# kare matris oluşturma
def karematris(calculateMATRIS):
dizi_m = calculateMATRIS.copy()
column = len(calculateMATRIS[0])
line = len(calculateMATRIS)
# left triangular matris hesaplama
for s in range(line - 1):
for y in range(line - 1 - s):
multiplier = dizi_m[y][s] / dizi_m[y + 1][s]
for b in range(column):
dizi_m[y][b] += -multiplier * dizi_m[y + 1][b]
# diagonal matris
for m in range(line - 1, 0, -1):
for n in range(line - 1, line - 1 - m, -1):
multiplier = dizi_m[n][m] / dizi_m[n - 1][m]
for h in range(column):
dizi_m[n][h] += -multiplier * dizi_m[n - 1][h]
cozum = []
for s in range(line - 1, -1, -1):
x = dizi_m[s][line] / dizi_m[s][line - s - 1]
cozum.append(x)
return cozum
# toplam y değerlerini hesapladık
def totalY(dizi):
y = sum(dizi)
return y
totalyi = totalY(dizi)
# xi toplamları tutan dizi
def totalxi(n):
total_x_kare = [] #total_x_kare adında bir dizi olusturduk.
for j in range(1, 13, 1):
kare_x = 0 #kare_x in ilk değerini 0 a eşitledik.
for p in range(n):
kare_x += (p + 1) ** j #dizideki eleman sayısı kadar, x lerin karelerini hesaplattık
total_x_kare.append(kare_x)
total_x_kare.insert(0, n) #hesaplattığımız değerleri diziye ekledik
return total_x_kare
# bütün polinomların (1,6) x^ derecelerini tutan dizi
def xiyiToplam(n, dizi, totalY):
derece_x_totaly = []
for j in range(1, 7, 1): #6 polinom olduğu için 6 kere döndürdük.
deger = 0
for eleman in range(n):
deger += (eleman + 1) ** j * dizi[eleman]
derece_x_totaly.append(deger)
derece_x_totaly.insert(0, totalY)
return derece_x_totaly
"""
6. polinoma kadar gittiğimiz için 7 tane a değerin oluşacak; bu yüzden m=8 e kadar döngümüzü çalıştıracağız
a0, a1, a2, a3, a4, a5, a6, a7
"""
def value_of_a(n, dizi, m=8):
cozum = []
total_x_kare = totalxi(n)
y = totalY(dizi)
derece_x_totaly = xiyiToplam(n, dizi, y)
for x in range(2, m, 1):
yenidizi = []
for i in range(x):
yenidizi.append([])
for j in range(x):
yenidizi[i].append(total_x_kare[j + i])
yenidizi[i].append(derece_x_totaly[i])
if (i == x - 1):
cozum.append(karematris(yenidizi))
yenidizi.clear()
return cozum
"""
deger_a dizisi ;
n. derece polinomun a değerlerini bir dizi olarak tutar
"""
deger_a = value_of_a(n, dizi)
"""
Korelasyon, iki değişken arasında doğrusal bir ilişkiyi ifade eder.
Korelasyon katsayısı ise değişkenler arasındaki ilişkiyi göstermek için kullanılan bir değerdir.
Korelasyon katsayısı;
** 1′e yaklaştıkça iki değişken arasında aynı yöndeki ilişki artar.Değişkenlerden biri artarken diğeri de artar.
** -1′e yaklaştıkça iki değişen arasında ters yönde ilişki artar. Değişkenlerden biri artarken diğeri azalır.
** 0’a yaklaştıkça iki değişken arasındaki ilişki azalır.
Ödevde aynı yönde artan veriler üzerinde işlem yaptığımız için korelasyon katsayısı 1'e en yakın olan
polinomu, en uygun polinom olarak alacağız.
"""
"""
korelasyon değerlerini hesaplayacagız
"""
def Hata_Hesaplama(x, dizi, n, totalY):
S_R = 0
S_T = 0
y = totalY / n
size = len(x)
for i in range(n):
gecici = 0
for j in range(size):
if j == 0:
gecici += x[j]
else:
gecici += x[j] * (i + 1) ** j
S_R += (dizi[i] - gecici) ** 2
S_T += (dizi[i] - y) ** 2
r = ((S_T - S_R) / S_T) ** (1 / 2)
return r
"""
hesapladığımız korelasyon değerleri içinde 1 en yakın kolerasyon değerini bulmalıyız ve bunu döndürmeliyiz
"""
'''
Bu fonksiyonumda , elde ettiğimiz korelasyon değerlerini bir dizi oluşturup ,bu dizi içerisinde tutacağız.
'''
KorelasyonValue = []
for i in deger_a:
e = Hata_Hesaplama(i,dizi,n , totalyi)
KorelasyonValue.append(e)
def en_iyi_kolerasyon(dizi): # Bu fonksiyon 1'e en yakın olan kolerasyon değerini döndürür
sirali_dizi = sorted(dizi) # sorted fonksiyonu ile diziyi sıraladık.
biggest = sirali_dizi[-1]
b = 1
while (biggest != dizi[b - 1]):
b = b + 1
return b, biggest
sayici, en_iyi_korelasyon_degeri = en_iyi_kolerasyon(KorelasyonValue)
print("Sonucu en düşük hata payi ile hesaplayan polinomun derecesi : ", sayici)
print("Sonucu en düşük hata payi ile hesaplayan polinomun Korelasyon değeri : ", en_iyi_korelasyon_degeri)
print("\n")
polinom = deger_a[sayici - 1]
def fonksiyon(w , polinom1 = polinom ):
u = polinom1
total_value = 0
for i in range(len(u)):
total_value += u[i] * (w ** i)
return total_value
"""
Bu fonksiyonda 2.soruyu cevaplayacağız.
Integrali tespit ettiğimiz en iyi korelasyon değerine sahip polinomu kullanarak hesapladık ve sonucu ekrana yazdırdık
"""
def polinom_ile_integral_hesaplama(n):
# okul numaram 180401040 olduğu için son rakamı 0 . Bu yüzden a =10 alacağız.
a = 10
b = n
deltax = 0.001
integral = 0
size = int((b - a) / deltax)
for i in range(size):
integral += deltax * (fonksiyon(a) + fonksiyon(a + deltax)) / 2
a += deltax
print("Polinom kullanarak hesaplanan sonuc : ", integral)
"""
Bu fonksiyonda 3.soruyu cevaplayacağız.
Integrali veriler.txt dosyasındaki verileri kullanarak (polinomu kullanmadan) hesaplayıp bu sonucu da ekrana yazdırdık.
"""
def veriler_ile_integral_hesaplama(n, dizi):
# 180401040 (0) . a değerini 10 olarak alacağız.
a = 10
b = n
integral = 0
for i in range(a - 1, b - 1):
integral += (dizi[i] + dizi[i + 1]) / 2
print("Veriler kullanılarak hesaplanan sonuc : ", integral)
"""
yorum.txt dosyasının içerisinde hesapladimiz 2 integral değerininde neden farklı sonuçlar verdiğini açıklayacağız.
"""
def yorumlarim():
with open("180401040_yorum.txt", "w", encoding='utf-8') as dosya :
dosya.write("ad - soyad : Ramazan AYDIN \n")
dosya.write("NUMARA : 180401040 \n")
dosya.write(" Hesaplamalarımda yamuk metodunu kullandım.\n ")
dosya.write("Hesapladigimiz 2 integral değeri de öngördüğümüz gibi birbirinden farkli çikmistir. \n")
dosya.write(" Bunun nedeni ; \n")
dosya.write("İntegral Hesabi yapılırken , verilen polinomu küçük dikdörtgenlere bölerek ve bunların alanlarını toplayarak hesaplamaya çalışırız. \n ")
dosya.write("Deltax(dikdörtgenin eni) değerini ne kadar küçültürsek ,işleme katılacak alan sayısı artar ve bulacağımız değer o kadar gerçeğe yakın olur.\n ")
dosya.write("Ancak bu iki integral arasındaki farkın temel sebebi , birinci integrali polinom haline getirirken \n ")
dosya.write("belirli bir korelasyon sayısına göre polinoma yaklaştırmamızdandır.\n ")
dosya.write("Bu sebepten, deltax değerlerini eşit aldığımızda bile sonuç farklı olur. \n ")
polinom_ile_integral_hesaplama(n)
veriler_ile_integral_hesaplama(n, dizi)
yorumlarim()
| 33.046875 | 167 | 0.643026 | 1,125 | 8,460 | 4.761778 | 0.304 | 0.024827 | 0.01848 | 0.0112 | 0.11723 | 0.070189 | 0.01792 | 0.01792 | 0.01792 | 0 | 0 | 0.024312 | 0.260993 | 8,460 | 255 | 168 | 33.176471 | 0.832054 | 0.109574 | 0 | 0.088235 | 0 | 0.007353 | 0.194739 | 0.004515 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0 | 0.007353 | 0.154412 | 0.044118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
93843a5470b274737095d437c24a1bad935e4e23 | 1,757 | py | Python | hailo_model_zoo/utils/downloader.py | markgrobman/hailo_model_zoo | 2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf | [
"MIT"
] | 2 | 2021-07-20T15:09:51.000Z | 2021-11-17T11:05:02.000Z | hailo_model_zoo/utils/downloader.py | markgrobman/hailo_model_zoo | 2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf | [
"MIT"
] | null | null | null | hailo_model_zoo/utils/downloader.py | markgrobman/hailo_model_zoo | 2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf | [
"MIT"
] | null | null | null | """
model files downloader
Usage:
>>> from downloader import download
>>> from logging import getLogger
>>> model_files_dir = '.'
>>> hailo_storage = 'https://hailo-modelzoo-pub.s3.eu-west-2.amazonaws.com/'
>>> model_path = 'Classification/mobilenet_v1/pretrained/mobilenet_v1_1_0_224.ckpt.zip'
>>> download(hailo_storage+model_path, model_files_dir, getLogger())
"""
import logging
import zipfile
from pathlib import Path
from requests import get
from tqdm.auto import tqdm
from typing import Union
def _download(url: str, dst: Path) -> None:
resp = get(url, allow_redirects=True, stream=True)
with dst.open('wb') as fout:
with tqdm(
desc=dst.name,
miniters=1,
total=int(resp.headers.get('content-length', 0)),
unit='B',
unit_divisor=1024,
unit_scale=True,
) as progress_bar:
for chunk in resp.iter_content(chunk_size=4096):
fout.write(chunk)
progress_bar.update(len(chunk))
def download(url: str, dst_dir: Union[str, Path], logger: logging.Logger) -> str:
"""downloads a file from given url, and returns the downloaded file name"""
dst_dir.mkdir(parents=True, exist_ok=True)
dst = Path(dst_dir) / Path(url).name
if not(dst.exists() and dst.is_file()):
logger.debug(f'downloading {url} into {dst_dir}')
_download(url, dst)
else:
logger.debug(f'{dst.name} already exists inside {dst_dir}. Skipping download')
if len(list(Path('/'.join(dst.parts[:-1])).iterdir())) == 1:
logger.debug(f'unzipping {dst} into {dst_dir}')
with zipfile.ZipFile(dst, 'r') as zip_fp:
zip_fp.extractall(dst_dir)
return dst.name
| 31.945455 | 91 | 0.638589 | 240 | 1,757 | 4.5375 | 0.466667 | 0.038567 | 0.033058 | 0.031221 | 0.036731 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01551 | 0.229368 | 1,757 | 54 | 92 | 32.537037 | 0.788774 | 0.258395 | 0 | 0 | 0 | 0 | 0.110078 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa730a81c19db224339c86a776c2dccf38b63307 | 3,059 | py | Python | tests/test_resource_hh.py | ollis-nwcouncil/NRELWindData | bc5146ea5577e4ab1b86587d15b783fd302f3895 | [
"BSD-3-Clause"
] | null | null | null | tests/test_resource_hh.py | ollis-nwcouncil/NRELWindData | bc5146ea5577e4ab1b86587d15b783fd302f3895 | [
"BSD-3-Clause"
] | null | null | null | tests/test_resource_hh.py | ollis-nwcouncil/NRELWindData | bc5146ea5577e4ab1b86587d15b783fd302f3895 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pytests for resource handlers with a single hub height
"""
import numpy as np
import os
import pytest
from rex.renewable_resource import WindResource
from rex import TESTDATADIR
def test_single_hh():
"""Test that resource with data at a single hub height will always return
the data at that hub height (and also return a warning)"""
h5 = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012_incomplete_1.h5')
with WindResource(h5) as wind:
# Existing datasets are P0m and T80m
assert np.array_equal(wind['pressure_80m'], wind['pressure_0m'])
assert np.array_equal(wind['temperature_10m'], wind['temperature_80m'])
def test_check_hh():
"""Test that check hub height method will return the hh at the single
windspeed"""
h5 = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012_incomplete_2.h5')
msg = ('Wind resource method _check_hub_height() failed! Should have '
'returned 100 because theres only windspeed at 100m')
with WindResource(h5) as wind:
assert (wind._check_hub_height(120) == 100), msg
def test_sam_df_hh():
"""Test that if there's only windspeed at one HH, all data is returned
from that hh
"""
h5 = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012_incomplete_2.h5')
with WindResource(h5) as wind:
sam_df = wind._get_SAM_df('pressure_80m', 0)
arr1 = wind['pressure_100m', :, 0] * 9.86923e-6
arr2 = sam_df['pressure_100m'].values
msg1 = ('Error: pressure should have been loaded at 100m '
'b/c there is only windspeed at 100m.')
assert np.array_equal(arr1, arr2), msg1
def test_preload_sam_hh():
"""Test the preload_SAM method with a single hub height windspeed in res.
In this case, all variables should be loaded at the single windspeed hh
"""
h5 = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012_incomplete_2.h5')
sites = slice(0, 200)
hub_heights = 80
SAM_res = WindResource.preload_SAM(h5, sites, hub_heights)
with WindResource(h5) as wind:
p = wind['pressure_100m'] * 9.86923e-6
t = wind['temperature_100m']
msg1 = ('Error: pressure should have been loaded at 100m '
'b/c there is only windspeed at 100m.')
msg2 = ('Error: temperature should have been loaded at 100m '
'b/c there is only windspeed at 100m.')
assert np.allclose(SAM_res['pressure', :, :].values, p), msg1
assert np.allclose(SAM_res['temperature', :, :].values, t), msg2
def execute_pytest(capture='all', flags='-rapP'):
"""Execute module as pytest with detailed summary report.
Parameters
----------
capture : str
Log or stdout/stderr capture option. ex: log (only logger),
all (includes stdout/stderr)
flags : str
Which tests to show logs and results for.
"""
fname = os.path.basename(__file__)
pytest.main(['-q', '--show-capture={}'.format(capture), fname, flags])
if __name__ == '__main__':
execute_pytest()
| 33.25 | 79 | 0.662308 | 440 | 3,059 | 4.434091 | 0.322727 | 0.032291 | 0.038442 | 0.024603 | 0.325474 | 0.239364 | 0.213737 | 0.213737 | 0.213737 | 0.213737 | 0 | 0.055838 | 0.227198 | 3,059 | 91 | 80 | 33.615385 | 0.769459 | 0.257601 | 0 | 0.272727 | 0 | 0 | 0.312213 | 0.064279 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0.113636 | false | 0 | 0.113636 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa763da6a9be5df8b6085a6402d291d454f9e88f | 2,482 | py | Python | qdb_cloudwatch/cloudwatch.py | bureau14/qdb-cloudwatch-exporter | ef6e433f7a2085a01db341122315c0612ce97354 | [
"BSD-3-Clause"
] | null | null | null | qdb_cloudwatch/cloudwatch.py | bureau14/qdb-cloudwatch-exporter | ef6e433f7a2085a01db341122315c0612ce97354 | [
"BSD-3-Clause"
] | null | null | null | qdb_cloudwatch/cloudwatch.py | bureau14/qdb-cloudwatch-exporter | ef6e433f7a2085a01db341122315c0612ce97354 | [
"BSD-3-Clause"
] | null | null | null | import boto3
def _get_client():
return boto3.client('cloudwatch')
def _metric_suffix(s):
return s.rsplit('.', 1)[1]
def _coerce_metric(k, v):
if (k.startswith('cpu.')):
# We don't expose CPU metrics through Cloudwatch, as this is already collected
# by the regular metrics.
return None
elif (k == 'license.memory'):
return ('Bytes', float(v))
sufx = _metric_suffix(k)
if sufx == 'total_ns':
return ('Microseconds', float(v) / 1000)
elif (sufx == 'duration_us' or
sufx == 'time_us'):
return ('Microseconds', float(v))
elif (sufx == 'remaining_days'):
return ('Seconds', float(v * 86400))
elif (sufx.startswith('bytes') or sufx.endswith('bytes')):
return ('Bytes', float(v))
elif (sufx.endswith('count')):
return ('Count', float(v))
else:
print('unknown suffix: ', sufx, ', k: ', k, ', v: ', v)
return ('None', float(v))
def _to_metric(k, v):
try:
x = _coerce_metric(k, v)
if x:
(u, v_) = x
return {'MetricName': k,
'Value': v_,
'Unit': u}
except:
return None
def _qdb_to_cloudwatch(stats):
# We want to flatten all metrics into a tuple of 3 items:
# - node_id
# - user_id
# - measurement
ret = list()
for node_id,xs in stats.items():
for user_id,xs_ in xs['by_uid'].items():
dims = [{'Name': 'UserId',
'Value': str(user_id)},
{'Name': 'NodeId',
'Value': str(node_id)}]
for k,v in xs_.items():
m = _to_metric(k, v)
if m:
m['Dimensions'] = dims
ret.append(m)
dims = [{'Name': 'NodeId',
'Value': str(node_id)}]
for k,v in xs['cumulative'].items():
m = _to_metric(k, v)
if m:
m['Dimensions'] = dims
ret.append(m)
return ret
def push_stats(stats, namespace):
client = _get_client()
stats_ = _qdb_to_cloudwatch(stats)
metrics_per_req = 20
metrics = [stats_[i:i+metrics_per_req] for i in range(0, len(stats_), metrics_per_req)]
for metric in metrics:
response = client.put_metric_data(Namespace=namespace,
MetricData=metric)
print("Pushed {} metrics".format(len(stats_)))
| 25.587629 | 91 | 0.514102 | 301 | 2,482 | 4.063123 | 0.362126 | 0.013083 | 0.032706 | 0.032706 | 0.152085 | 0.12592 | 0.12592 | 0.12592 | 0.12592 | 0.12592 | 0 | 0.010468 | 0.345689 | 2,482 | 96 | 92 | 25.854167 | 0.742611 | 0.076551 | 0 | 0.215385 | 0 | 0 | 0.121225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0 | 0.015385 | 0.030769 | 0.307692 | 0.030769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa7b2b7882cb8f5c695df5b6229adb9f484bbb44 | 812 | py | Python | tests/test_grid.py | alexras/boomslang | 62b6dc3a183fd8686b165c4abdb55d10d537b4ab | [
"BSD-3-Clause"
] | 4 | 2015-02-24T06:50:08.000Z | 2020-08-08T03:23:32.000Z | tests/test_grid.py | alexras/boomslang | 62b6dc3a183fd8686b165c4abdb55d10d537b4ab | [
"BSD-3-Clause"
] | 13 | 2017-07-17T15:52:09.000Z | 2017-07-17T15:52:09.000Z | tests/test_grid.py | alexras/boomslang | 62b6dc3a183fd8686b165c4abdb55d10d537b4ab | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from boomslang import Line, Plot
from ImageComparisonTestCase import ImageComparisonTestCase
import unittest
class GridTest(ImageComparisonTestCase, unittest.TestCase):
def __init__(self, testCaseName):
super(GridTest,self).__init__(testCaseName)
self.imageName = "grid.png"
def constructImage(self):
plot = Plot()
line = Line()
line.yValues = [25, 40, 30, 23, 10, 50]
line.xValues = range(len(line.yValues))
plot.add(line)
plot.xLabel = "X Label"
plot.yLabel = "Y Label"
plot.yLimits = (0, 60)
plot.grid = True
plot.save(self.imageName)
ImageComparisonTestCase.register(GridTest)
if __name__ == "__main__":
test = GridTest("testImageComparison")
test.constructImage()
| 24.606061 | 59 | 0.657635 | 87 | 812 | 5.954023 | 0.574713 | 0.030888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024077 | 0.232759 | 812 | 32 | 60 | 25.375 | 0.807384 | 0.024631 | 0 | 0 | 0 | 0 | 0.061947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.136364 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa7c44876b17f83341951ab9a347d918c59e572a | 1,496 | py | Python | ppb_timing.py | ironfroggy/ppb_timing | 4add69342d7e78146c99a604957129161d97dfed | [
"MIT"
] | null | null | null | ppb_timing.py | ironfroggy/ppb_timing | 4add69342d7e78146c99a604957129161d97dfed | [
"MIT"
] | null | null | null | ppb_timing.py | ironfroggy/ppb_timing | 4add69342d7e78146c99a604957129161d97dfed | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from types import FunctionType
from typing import Optional
import ppb
from ppb.systemslib import System
from ppb.utils import get_time
@dataclass
class Timer:
end_time: float
callback: FunctionType
repeating: float = 0
clear: bool = False
until: float = None
def __hash__(self):
return hash(id(self))
def cancel(self):
self.clear = True
class Timers(System):
timers = set()
@classmethod
def delay(cls, seconds, func):
t = Timer(get_time() + seconds, func)
cls.timers.add(t)
return t
@classmethod
def repeat(cls, seconds, func, until=None):
n = get_time()
t = Timer(n + seconds, func, repeating=seconds, until=n + until)
cls.timers.add(t)
return t
@classmethod
def on_idle(cls, idle, signal):
clear = []
for t in list(cls.timers):
if t.clear:
clear.append(t)
else:
now = get_time()
if now >= t.end_time:
if t.until is None or t.until > now:
t.callback()
if t.repeating > 0:
if t.until is None or t.until > now:
t.end_time += t.repeating
continue
clear.append(t)
for t in clear:
cls.timers.remove(t)
delay = Timers.delay
repeat = Timers.repeat
| 23.746032 | 72 | 0.532754 | 181 | 1,496 | 4.337017 | 0.320442 | 0.035669 | 0.035669 | 0.033121 | 0.152866 | 0.152866 | 0.152866 | 0.152866 | 0.066242 | 0.066242 | 0 | 0.002162 | 0.381684 | 1,496 | 62 | 73 | 24.129032 | 0.846486 | 0 | 0 | 0.22 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.12 | 0.02 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa7f5aafe91ba7d0f8bafd8e2dd6804a61920b1e | 1,974 | py | Python | molot/context.py | sydspace/molot | 61541817a189af7340c5fa171c78bbfba1c07836 | [
"MIT"
] | 3 | 2019-08-28T23:46:45.000Z | 2019-10-03T05:46:32.000Z | molot/context.py | sydspace/molot | 61541817a189af7340c5fa171c78bbfba1c07836 | [
"MIT"
] | null | null | null | molot/context.py | sydspace/molot | 61541817a189af7340c5fa171c78bbfba1c07836 | [
"MIT"
] | null | null | null | import os
import shutil
import logging
import urllib.request
import subprocess
from molot.builder import git_hash
class Context:
"""Base context with common operations."""
def ensure_dir(self, path: str, keep_files: bool):
"""Ensures directory path exists.
Arguments:
path {str} -- Directory path.
keep_files {bool} -- Keep existing files if already exists if true.
"""
if os.path.exists(path):
if not keep_files:
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def download_files(
self, file_urls: dict, out_path: str, ignore_existing: bool = False
):
"""Downloads files into target directory.
Arguments:
file_urls {dict} -- Dict of filename => url for download.
out_path {str} -- Output directory path.
Keyword Arguments:
ignore_existing {bool} -- Ignores existing files and re-downloads if true. (default: {False})
"""
if not ignore_existing:
existing_files = os.listdir(out_path)
for f in existing_files:
if f in file_urls:
logging.info("Already exists %s", f)
file_urls.pop(f, None)
else:
os.remove(os.path.join(out_path, f))
for filename in file_urls:
url = file_urls[filename]
logging.info("Downloading %s", url)
urllib.request.urlretrieve(url, os.path.join(out_path, filename))
def add_git_hash(self, out_path: str):
"""Adds Git hash to output.
Arguments:
out_path {str} -- Output directory path.
"""
output = git_hash()
logging.info("Writing Git hash %s", output)
out_file_path = os.path.join(out_path, "git-hash")
with open(out_file_path, "w") as file:
file.write(output)
| 29.462687 | 105 | 0.572948 | 237 | 1,974 | 4.637131 | 0.329114 | 0.050955 | 0.036397 | 0.035487 | 0.099181 | 0.052775 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33232 | 1,974 | 66 | 106 | 29.909091 | 0.833839 | 0.275076 | 0 | 0.114286 | 0 | 0 | 0.04528 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.171429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa7fad14befa168a6dc9f22fdd5717589f466dc3 | 2,720 | py | Python | process.py | ravsa/data_scraping | e7684030e1ff65537fc337f21057053df0e6add0 | [
"Apache-2.0"
] | 2 | 2018-02-18T18:48:44.000Z | 2018-02-22T13:21:21.000Z | process.py | ravsa/data_scraping | e7684030e1ff65537fc337f21057053df0e6add0 | [
"Apache-2.0"
] | 1 | 2018-02-21T17:27:22.000Z | 2018-02-21T17:27:22.000Z | process.py | ravsa/data_scraping | e7684030e1ff65537fc337f21057053df0e6add0 | [
"Apache-2.0"
] | 2 | 2018-06-14T06:09:29.000Z | 2019-01-08T18:41:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from base_functions import BaseFunctions
class SpringIO(BaseFunctions):
def __init__(self):
self.sub_eco = 'spring'
self._output_file = None
super().__init__(self.sub_eco)
def process(self):
for item in self._config.get('dependencies', []):
for version, content in item.items():
print("VERSION: ", version)
for group, dependencies in content.items():
pkg_query = '&style=' + \
'&style='.join([dep['id'] for dep in dependencies])
resp = self.get_query_result(version, pkg_query)
self.processed_data[group] += [dict(pkg, **{'categories': group,
'version': version})
for pkg in resp]
print("GROUP: ", group)
def __str__(self):
return self.sub_eco
class VertxIO(BaseFunctions):
def __init__(self):
self.sub_eco = 'vertx'
self._output_file = None
super().__init__(self.sub_eco)
def process(self):
for version in self._versions:
print("VERSION: ", version)
for item in self._config.get('dependencies', []):
group = item.get('category')
items = item.get('items')
pkg_query = ','.join([it['artifactId']
for it in items if it.get('artifactId')])
resp = self.get_query_result(version, pkg_query)
self.processed_data[group] += [dict(pkg, **{'categories': group,
'version': version
})
for pkg in resp]
print("GROUP: ", group)
def __str__(self):
return self.sub_eco
class WildflyIO(BaseFunctions):
def __init__(self):
self.sub_eco = 'wildfly'
self._output_file = None
super().__init__(self.sub_eco)
def process(self):
for item in self._config.get('dependencies', []):
group = item.get('category')
pkg_query = '&d=' + \
('&d='.join([frac['artifactId']
for frac in item.get('fractions')]))
resp = self.get_query_result('', pkg_query)
self.processed_data[group] += [dict(pkg, **{'categories': group})
for pkg in resp]
print("GROUP: ", group)
def __str__(self):
return self.sub_eco
| 35.324675 | 84 | 0.479412 | 264 | 2,720 | 4.670455 | 0.234848 | 0.051095 | 0.072993 | 0.058394 | 0.675588 | 0.657745 | 0.657745 | 0.57502 | 0.57502 | 0.57502 | 0 | 0.001232 | 0.402941 | 2,720 | 76 | 85 | 35.789474 | 0.758005 | 0.015809 | 0 | 0.603448 | 0 | 0 | 0.082243 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155172 | false | 0 | 0.017241 | 0.051724 | 0.275862 | 0.086207 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa7fd567c727259e88fdafdda00e4c3ee2228b36 | 3,923 | py | Python | recipes/lightgbm/all/conanfile.py | maksim-0/conan-center-index | 4bf032cd73ed8f7bfe379dcd463430ec145b9e80 | [
"MIT"
] | null | null | null | recipes/lightgbm/all/conanfile.py | maksim-0/conan-center-index | 4bf032cd73ed8f7bfe379dcd463430ec145b9e80 | [
"MIT"
] | null | null | null | recipes/lightgbm/all/conanfile.py | maksim-0/conan-center-index | 4bf032cd73ed8f7bfe379dcd463430ec145b9e80 | [
"MIT"
] | null | null | null | from conans import CMake, ConanFile, tools
from conan.tools.microsoft import is_msvc
import functools
required_conan_version = ">=1.33.0"
class LightGBMConan(ConanFile):
name = "lightgbm"
description = "A fast, distributed, high performance gradient boosting (GBT, GBDT, GBRT, GBM or MART) framework based on decision tree algorithms, used for ranking, classification and many other machine learning tasks."
topics = ("machine-learning", "boosting")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/microsoft/LightGBM"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake", "cmake_find_package"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_openmp": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"with_openmp": True
}
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("eigen/3.4.0")
self.requires("fast_double_parser/0.6.0")
self.requires("fmt/8.1.1")
if self.options.with_openmp and self.settings.compiler in ("clang", "apple-clang"):
self.requires("llvm-openmp/11.1.0")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["BUILD_STATIC_LIB"] = not self.options.shared
cmake.definitions["USE_DEBUG"] = self.settings.build_type == "Debug"
cmake.definitions["USE_OPENMP"] = self.options.with_openmp
if self.settings.os == "Macos":
cmake.definitions["APPLE_OUTPUT_DYLIB"] = True
cmake.configure()
return cmake
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "LightGBM")
self.cpp_info.set_property("cmake_target_name", "LightGBM::LightGBM")
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.names["cmake_find_package"] = "LightGBM"
self.cpp_info.names["cmake_find_package_multi"] = "LightGBM"
self.cpp_info.libs = ["lib_lightgbm"] if is_msvc(self) else ["_lightgbm"]
if self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["ws2_32", "iphlpapi"])
elif self.settings.os == "Linux":
self.cpp_info.system_libs.append("pthread")
if not self.options.shared and self.options.with_openmp:
if is_msvc(self):
openmp_flags = ["-openmp"]
elif self.settings.compiler == "gcc":
openmp_flags = ["-fopenmp"]
elif self.settings.compiler in ("clang", "apple-clang"):
openmp_flags = ["-Xpreprocessor", "-fopenmp"]
else:
openmp_flags = []
self.cpp_info.exelinkflags.extend(openmp_flags)
self.cpp_info.sharedlinkflags.extend(openmp_flags)
| 37.009434 | 223 | 0.629875 | 466 | 3,923 | 5.124464 | 0.356223 | 0.045226 | 0.041457 | 0.020101 | 0.19598 | 0.080402 | 0.057789 | 0 | 0 | 0 | 0 | 0.008059 | 0.240887 | 3,923 | 105 | 224 | 37.361905 | 0.793821 | 0.018098 | 0 | 0.068966 | 0 | 0.011494 | 0.225195 | 0.012468 | 0 | 0 | 0 | 0.009524 | 0 | 1 | 0.126437 | false | 0 | 0.034483 | 0.011494 | 0.321839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa8744aeb897b1dd532ef4bcf6032ca2b74ba54a | 2,574 | py | Python | src/letter_classifier/take_pic.py | tomoya777773/KB_1815 | fb71711113cc79cc93809f96799dfb1ba9f5b1ed | [
"MIT"
] | 2 | 2018-10-18T17:38:49.000Z | 2018-10-21T09:57:43.000Z | src/letter_classifier/take_pic.py | tomoya777773/KB_1815 | fb71711113cc79cc93809f96799dfb1ba9f5b1ed | [
"MIT"
] | null | null | null | src/letter_classifier/take_pic.py | tomoya777773/KB_1815 | fb71711113cc79cc93809f96799dfb1ba9f5b1ed | [
"MIT"
] | 3 | 2018-10-20T03:33:07.000Z | 2018-10-28T08:12:58.000Z | from argparse import ArgumentParser
from pathlib import Path
import time
import numpy as np
import cv2
from datetime import datetime
import requests
DIFF_THRESHOLD = 30
DEFFAULT_SLEEP = 1
LINE_ENDPOINT = 'https://uketori.herokuapp.com/important'
VISION_ENDPOINT = 'https://southcentralus.api.cognitive.microsoft.com/customvision/v2.0/Prediction/2d6dff05-36fb-493e-a387-1093bbbb175b/image'
TMP_DIR = Path('../../src/public/images/')
vision_headers = {
'Prediction-Key': '',
'Content-Type': 'application/octet-stream',
'Prediction-Key': '4fdd8e3729b04880af66cdb52d0b5c73',
}
line_headers = {'Content-Type': 'application/json'}
IMPORTANT_TAG = 'important'
NOT_IMPORTANT_TAG = 'not_important'
def detect_diff(img_before, img_after, diff_threthold=DIFF_THRESHOLD):
'''
detect difference between two images.
:param img_before: image to be compared.
:param img_after: image to compare with.
:return: two image is differ or not.
'''
gray_before = cv2.cvtColor(img_before, cv2.COLOR_RGB2GRAY)
gray_after = cv2.cvtColor(img_after, cv2.COLOR_RGB2GRAY)
(width, height) = gray_before.shape
pix_num = width * height
diff = cv2.absdiff(gray_before, gray_after)
mean_diff = np.sum(diff) / pix_num
return mean_diff > diff_threthold
def main(cam_device=0):
if not TMP_DIR.exists():
TMP_DIR.mkdir(parents=True)
cap = cv2.VideoCapture(cam_device)
frame = None
while True:
frame_before = frame
ret, frame = cap.read()
if frame_before is None:
frame_before = frame
cv2.imshow('frame', frame)
diff = detect_diff(frame_before, frame)
if diff:
print(True)
now_time = datetime.now().strftime('%Y%m%d%H%M%S')
target_name = '%s/%s.jpg' % (str(TMP_DIR), now_time)
cv2.imwrite(target_name, frame)
r =requests.post(
VISION_ENDPOINT,
data=open(target_name, "rb"),
headers=vision_headers
).json()
results = r['predictions']
result_tag = results[0]['tagName']
print(result_tag)
if result_tag == IMPORTANT_TAG:
payload = {'result': '%s.jpg' % now_time}
requests.post(LINE_ENDPOINT, data=json.dumps(payload), header=line_headers)
time.sleep(DEFFAULT_SLEEP)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--cam_device', type=int, default=0)
args = parser.parse_args()
main(args.cam_device)
| 28.285714 | 142 | 0.652681 | 320 | 2,574 | 5.034375 | 0.425 | 0.014898 | 0.029795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02889 | 0.233489 | 2,574 | 90 | 143 | 28.6 | 0.787633 | 0.060995 | 0 | 0.032258 | 0 | 0.016129 | 0.171201 | 0.033487 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.177419 | 0 | 0.225806 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa88dadbcf6756dea815702084c2d113a57d337c | 7,286 | py | Python | aerosols/mie.py | loic-rossi/python-titan-aerosols | 6c55be3c54e8078844b3268943d8451e378742d4 | [
"MIT"
] | 1 | 2020-11-25T03:12:57.000Z | 2020-11-25T03:12:57.000Z | aerosols/mie.py | loic-rossi/python-titan-aerosols | 6c55be3c54e8078844b3268943d8451e378742d4 | [
"MIT"
] | 1 | 2021-12-10T17:15:26.000Z | 2021-12-10T17:47:45.000Z | aerosols/mie.py | loic-rossi/python-titan-aerosols | 6c55be3c54e8078844b3268943d8451e378742d4 | [
"MIT"
] | 1 | 2021-12-10T15:48:05.000Z | 2021-12-10T15:48:05.000Z | """Mie module."""
import numpy as np
NANG = 91
NMXX = 150e3
def mie_bohren_huffman(x, refrel, nang=NANG):
"""
Compute mie scattering based on Bohren and Huffman theory
Parameters
----------
x: float
Size parameter = k*radius = 2π/λ * radius
(λ is the wavelength in the medium around the scatterers).
refrel: float
Refraction index (n in complex form for example: 1.5 + 0.02i.
nang: int, optional
Number of angles for S1 and S2 function in range from 0 to π/2.
Returns
-------
S1, S2: numpy.ndarray
Function which correspond to the (complex) phase functions.
Qext:
Extinction efficiency.
Qsca:
Scattering efficiency.
Qback:
Backscatter efficiency.
gsca:
Asymmetry parameter.
Raises
------
ValueError
If the input argument are outside the validity range.
Note
----
This file is converted from [mie.m](http://atol.ucsd.edu/scatlib/index.htm)
Bohren and Huffman originally published the code in their book on light scattering.
Source: http://scatterlib.googlecode.com/files/bhmie_herbert_kaiser_july2012.py
""" # pylint: disable=too-many-locals
if nang > 1_000:
raise ValueError(f"Require NANG = {nang} <= 1000")
if nang < 2:
raise ValueError(
f"Require NANG = {nang} > 1 in order to calculate scattering intensities")
ang = .5 * np.pi / (nang - 1)
mu = np.cos(np.arange(0, nang, 1) * ang)
# Series expansion terminated after NSTOP terms
# Logarithmic derivatives calculated from NMX on down
xstop = x + 4 * np.power(x, 1 / 3) + 2
# xstop = x + 4 * np.power(x, 1/3) + 10 # Old form
ymod = abs(x * refrel)
nmx = np.fix(max(xstop, ymod) + 15)
# BTD experiment 91/1/15: add one more term to series and compare results
# NMX = AMAX1(XSTOP, YMOD) + 16
# test: compute 7001 wavelen > hs between .0001 and 1000 micron
# for a = 1.0 micron SiC grain. When NMX increased by 1, only a single
# computed number changed (out of 4*7001) and it only changed by 1/8387
# Conclusion: we are indeed retaining enough terms in series!
if nmx > NMXX:
raise ValueError(f"nmx = {nmx} > NMXX = {NMXX} for |m|x = {ymod}")
s1_1 = np.zeros(nang, dtype=np.complex128)
s1_2 = np.zeros(nang, dtype=np.complex128)
s2_1 = np.zeros(nang, dtype=np.complex128)
s2_2 = np.zeros(nang, dtype=np.complex128)
pi = np.zeros(nang, dtype=np.complex128)
tau = np.zeros(nang, dtype=np.complex128)
pi0 = np.zeros(nang, dtype=np.complex128)
pi1 = np.ones(nang, dtype=np.complex128)
# Logarithmic derivative D(J) calculated by downward recurrence
# beginning with initial value (0,0) at J = NMX
nn = int(nmx) - 1
d = np.zeros(nn + 1, dtype=np.complex128)
for n in range(0, nn):
en = (nmx - n) / (x * refrel)
d[nn - n - 1] = en - 1 / (d[nn - n] + en)
# Riccati-Bessel functions with real argument X
# calculated by upward recurrence
an, bn = None, None
psi0 = np.cos(x)
psi1 = np.sin(x)
chi0 = -np.sin(x)
chi1 = np.cos(x)
xi1 = psi1 - chi1 * 1j
qsca = 0
gsca = 0
p = -1
nstop = int(xstop)
for n in range(0, nstop):
en = n + 1
fn = (2 * en + 1) / (en * (en + 1))
# for given N, PSI = psi_n CHI = chi_n
# PSI1 = psi_{n-1} CHI1 = chi_{n-1}
# PSI0 = psi_{n-2} CHI0 = chi_{n-2}
# Calculate psi_n and chi_n
psi = (2 * en - 1) * psi1 / x - psi0
chi = (2 * en - 1) * chi1 / x - chi0
xi = psi - chi * 1j
# Store previous values of AN and BN for use
# in computation of g=<np.cos(theta)>
if n > 0:
an1 = an
bn1 = bn
# Compute AN and BN:
an = (d[n] / refrel + en / x) * psi - psi1
an = an / ((d[n] / refrel + en / x) * xi - xi1)
bn = (refrel * d[n] + en / x) * psi - psi1
bn = bn / ((refrel * d[n] + en / x) * xi - xi1)
# Augment sums for Qsca and g=<np.cos(theta)>
qsca += (2 * en + 1) * (abs(an) ** 2 + abs(bn) ** 2)
gsca += ((2 * en + 1) / (en * (en + 1))) * \
(np.real(an) * np.real(bn) + np.imag(an) * np.imag(bn))
if n > 0:
gsca += ((en - 1) * (en + 1) / en) * \
(np.real(an1) * np.real(an) + np.imag(an1) * np.imag(an)
+ np.real(bn1) * np.real(bn) + np.imag(bn1) * np.imag(bn))
# Now calculate scattering intensity pattern
# First do angles from 0 to 90
pi = np.copy(pi1)
tau = en * mu * pi - (en + 1) * pi0
s1_1 += fn * (an * pi + bn * tau)
s2_1 += fn * (an * tau + bn * pi)
# Now do angles greater than 90 using PI and TAU from
# angles less than 90.
# P=1 for N=1,3,...% P=-1 for N=2,4,...
# remember that we have to reverse the order of the elements
# of the second part of s1 and s2 after the calculation
p = -p
s1_2 += fn * p * (an * pi - bn * tau)
s2_2 += fn * p * (bn * pi - an * tau)
psi0 = psi1
psi1 = psi
chi0 = chi1
chi1 = chi
xi1 = psi1 - chi1 * 1j
# Compute pi_n for next value of n
# For each angle J, compute pi_n+1
# from PI = pi_n , PI0 = pi_n-1
pi1 = ((2 * en + 1) * mu * pi - (en + 1) * pi0) / en
pi0 = np.copy(pi)
# Have summed sufficient terms.
# Now compute QSCA, QEXT, QBACK and GSCA
# We have to reverse the order of the elements of the second part of s1 and s2
s1 = np.concatenate((s1_1, s1_2[-2::-1]))
s2 = np.concatenate((s2_1, s2_2[-2::-1]))
gsca = 2 * gsca / qsca
qsca = 2 / x ** 2 * qsca
qext = 4 / x ** 2 * np.real(s1[0])
# More common definition of the backscattering efficiency,
# so that the backscattering cross section really
# has dimension of length squared
qback = 4 * (abs(s1[2 * (nang - 1)]) / x) ** 2
# qback = ((abs( s1[2 * nang - 2])/x )**2 )/np.pi # Old form
return s1, s2, qext, qsca, qback, gsca
def mie(wvln, nr, ni, r, nang=NANG):
"""Compute Mie cross-sections and phase function based on Bohren and Huffman theory.
Parameters
----------
wvln: float
Wavelength (m).
nr: float
Particle real optical index.
ni: float
Particle real imaginary index.
r: float
Particle radius (m).
nang: int, optional
Number of angles for the phase function (range from 0 to π/2)
Returns
-------
qsct: float
Scattering cross section (m^-2).
qext: float
Extinction cross section (m^-2).
qabs: float
Absorption cross section (m^-2).
gg: float
Asymmetry parameter.
theta: numpy.ndarray
Phase function angles (radians).
P: numpy.ndarray
Phase function.
"""
Xm = 2 * np.pi * r / wvln
s1, s2, Qe, Qs, _, gg = mie_bohren_huffman(Xm, complex(nr, ni), nang)
qsct = Qs * np.pi * r ** 2
qext = Qe * np.pi * r ** 2
qabs = qext - qsct
S11 = .5 * (abs(s2) ** 2 + abs(s1) ** 2)
theta = np.linspace(0, np.pi, len(s1))
norm = .5 * np.trapz(S11 * np.sin(theta), x=theta)
P = S11 / norm
return qsct, qext, qabs, gg, theta, P
| 30.742616 | 88 | 0.554625 | 1,108 | 7,286 | 3.617329 | 0.26444 | 0.009731 | 0.038174 | 0.041916 | 0.191617 | 0.168164 | 0.114271 | 0.038423 | 0.02994 | 0.02994 | 0 | 0.056392 | 0.316086 | 7,286 | 236 | 89 | 30.872881 | 0.747943 | 0.462119 | 0 | 0.043956 | 0 | 0 | 0.039757 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021978 | false | 0 | 0.010989 | 0 | 0.054945 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa8b02af1478c1f96b1b0bc909f7b0a37212823b | 2,361 | py | Python | edx_course_team_api/views.py | ibm-skills-network/edx-course-team-api | 662a756f61047bd2f1c10ede2feb6f1a24c2717d | [
"MIT"
] | null | null | null | edx_course_team_api/views.py | ibm-skills-network/edx-course-team-api | 662a756f61047bd2f1c10ede2feb6f1a24c2717d | [
"MIT"
] | null | null | null | edx_course_team_api/views.py | ibm-skills-network/edx-course-team-api | 662a756f61047bd2f1c10ede2feb6f1a24c2717d | [
"MIT"
] | null | null | null | import logging
import json
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.exceptions import ParseError
from rest_framework import status
from rest_framework.authentication import BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.models import User
# edx imports
from student import auth
from student.models import CourseEnrollment
from student.roles import CourseInstructorRole, CourseStaffRole
from opaque_keys.edx.keys import CourseKey
from cms.djangoapps.contentstore.views.user import _course_team_user
log = logging.getLogger(__name__)
USERNAME = 'admin' # the user who will be associated with new courses
ROLE_TYPE_MAPPINGS = {
"staff": CourseStaffRole,
"instructor": CourseInstructorRole
}
ROLE_OPTIONS = list(ROLE_TYPE_MAPPINGS.keys())
class CourseView(APIView):
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated]
def post(self, request, course_key_string):
course_key = CourseKey.from_string(course_key_string)
email = request.data.get("email", None)
if not email:
msg = { "error": "Missing parameter 'email' in body." }
log.info(msg)
raise ParseError(msg)
role = request.data.get("role", None)
if not role:
msg = { "error": "Missing parameter 'role' in body." }
log.info(msg)
raise ParseError(msg)
if role not in ROLE_OPTIONS:
msg = { "error": "Parameter 'role' has to be one of '{}'".format(ROLE_OPTIONS) }
log.info(msg)
raise ParseError(msg)
try:
user = User.objects.get(email=email)
except Exception: # pylint: disable=broad-except
msg = {
"error": "Could not find user by email address '{email}'".format(email=email)
}
return Response(msg, 404)
role_type = ROLE_TYPE_MAPPINGS.get(role)(course_key)
auth.add_users(request.user, role_type, user)
CourseEnrollment.enroll(user, course_key)
msg = "'{email}' is granted '{role}' to '{course_key}'".format(email=email, role=role, course_key=course_key)
log.info(msg)
return Response({'message': "User is added to {}.".format(course_key)})
| 33.253521 | 117 | 0.681491 | 283 | 2,361 | 5.54417 | 0.371025 | 0.051625 | 0.06501 | 0.028681 | 0.061185 | 0.061185 | 0.04334 | 0.04334 | 0 | 0 | 0 | 0.001639 | 0.224905 | 2,361 | 70 | 118 | 33.728571 | 0.855738 | 0.037696 | 0 | 0.132075 | 0 | 0 | 0.120811 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.264151 | 0 | 0.377358 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa8fc5d3e55cdf2ceecce40af28055451e8b8a11 | 927 | py | Python | Markov.py | unrealTOM/MC | 5a4cdf1ee11ef3d438f24dd38e894731103448ac | [
"MIT"
] | 4 | 2020-04-11T09:54:27.000Z | 2021-08-18T07:06:52.000Z | Markov.py | unrealTOM/MC | 5a4cdf1ee11ef3d438f24dd38e894731103448ac | [
"MIT"
] | null | null | null | Markov.py | unrealTOM/MC | 5a4cdf1ee11ef3d438f24dd38e894731103448ac | [
"MIT"
] | 5 | 2019-01-22T03:47:17.000Z | 2022-02-14T18:09:07.000Z | # https://zhuanlan.zhihu.com/p/25610149
import numpy as np
import matplotlib.pyplot as plt
import math
def p(x):
#standard normal
mu=0
sigma=1
return 1/(math.pi*2)**0.5/sigma*np.exp(-(x-mu)**2/2/sigma**2)
#uniform proposal distribution on [-4,4]
def q(x): #uniform
return np.array([0.125 for i in range(len(x))])
x = np.linspace(-4,4,500)
M = 3.5
N=1000 #number of samples needed
i = 1
count = 0
X = np.array([])
while i < N:
u = np.random.rand(10) #evaluate 10 each loop
x = (np.random.rand(10)-0.5)*8
res = u < p(x)/q(x)/M
if any(res):
X = np.hstack((X,x[res]))
i+=len(x[res])
count+=10
count -= len(X) - 1000
X=X[:1000]
x = np.linspace(-4,4,500)
plt.plot(x,p(x))
plt.hist(X,bins=100,density=True)
plt.title('Rejection Sampling')
plt.show()
plt.savefig('result.png', dpi=100)
print (N/count) #proportion of raw sample used
| 21.55814 | 66 | 0.594391 | 172 | 927 | 3.203488 | 0.482558 | 0.027223 | 0.039927 | 0.043557 | 0.058076 | 0.058076 | 0 | 0 | 0 | 0 | 0 | 0.091413 | 0.221143 | 927 | 42 | 67 | 22.071429 | 0.671745 | 0.185545 | 0 | 0.0625 | 0 | 0 | 0.039716 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.09375 | 0.03125 | 0.21875 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa92751ba744314eb889ed22b1f78c58218f77cd | 4,573 | py | Python | utils/awrams/utils/messaging/general.py | Zac-HD/awra_cms | ebc51df859ee665d936cf9600ea29dc8e45321d7 | [
"NetCDF"
] | 2 | 2020-05-10T05:27:16.000Z | 2021-01-20T02:14:23.000Z | utils/awrams/utils/messaging/general.py | Zac-HD/awra_cms | ebc51df859ee665d936cf9600ea29dc8e45321d7 | [
"NetCDF"
] | null | null | null | utils/awrams/utils/messaging/general.py | Zac-HD/awra_cms | ebc51df859ee665d936cf9600ea29dc8e45321d7 | [
"NetCDF"
] | 2 | 2019-12-26T13:36:44.000Z | 2020-03-24T12:23:23.000Z | from io import StringIO
import traceback
import numpy as np
import zmq
import errno
import uuid
import logging
from awrams.utils.metatypes import ObjectDict as o
import subprocess
class Chunk:
def __init__(self,x,y):
self.x = x
self.y = y
self.shape = (1,self.y.stop - self.y.start)
def __repr__(self):
return str((self.x,self.y))
def contains(self,cell):
if cell[0] == self.x:
if cell[1] >= self.y.start and cell[1] < self.y.stop:
return True
return False
def idx(self,cell):
'''
Warning - this doesn't check for validity, only passes back what it thinks is a local index
'''
return cell[1]-self.y.start
NULL_CHUNK = Chunk(-1,slice(-1,-1))
def gen_ipc_handle(prefix='awra'):
suffix = uuid.uuid4().hex
return "ipc:///tmp/" + prefix + '_' + suffix
def send_array(socket, A, flags=0, copy=False, track=True):
"""send a numpy array with metadata"""
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
socket.send_pyobj(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def recv_array(socket, flags=0, copy=False, track=True):
"""recv a numpy array"""
md = socket.recv_pyobj(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
buf = msg
A = np.frombuffer(buf, dtype=md['dtype'])
return A.reshape(md['shape'])
def message(subject,**kwargs):
m = dict(subject=subject,content=dict())
m['content'].update(kwargs)
return m
def get_traceback():
import sys
sio = StringIO()
traceback.print_exc(file=sio)
tb_text = sio.getvalue()
tb = sys.exc_info()[2]
while 1:
if not tb.tb_next:
break
tb = tb.tb_next
frame = tb.tb_frame
tb_text += "Locals:\n"
for key, value in list(frame.f_locals.items()):
if key.startswith('__'):
tb_text += '%s (skipped)\n'%(key)
continue
try:
lstr = " %s = %s" % (key,value)
except:
lstr = " %s (unprintable)" % (key)
tb_text += lstr + '\n'
return tb_text
'''
Support classes and methods
'''
class ZMQLogger(logging.Handler):
def __init__(self, socket, *args, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.socket = socket
def emit(self,record):
'''
Send a message to the socket
'''
try:
self.socket.send_pyobj(message('log_message',content=record.getMessage(),level=record.levelno))
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
#+++ System interrupt, retry
self.emit(record)
else:
raise
class MPLogger:
def __init__(self,queue):
self.queue = queue
def write(self,msg):
self.queue.put(message('log_message',content=msg))
def flush(self):
pass
class QueingLogHandler(logging.Handler):
def __init__(self, queue, *args, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.queue = queue
def emit(self,record):
self.queue.put(message('log_message',content=record.getMessage(),level=record.levelno))
class QueuedLogCollector(object):
def __init__(self,queue):
self.reset()
self.queue = queue
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
def harvest(self):
while not self.queue.empty():
record = self.queue.get()
self.messages[record.levelname.lower()].append(record.getMessage())
def configure_logging_to_zmq_client(channel):
return _configure_client_logging(ZMQLogger(channel),True)
def configure_logging_to_mp_client(queue):
return _configure_client_logging(QueingLogHandler(queue))
def _configure_client_logging(handler,format=False):
from awrams.utils.settings import LOGFORMAT #pylint: disable=no-name-in-module
import logging
import awrams.utils.awrams_log
logger = awrams.utils.awrams_log.establish_logging()
#client_logger = log_writer
#handler = logging.StreamHandler(client_logger)
#if format:
# handler.setFormatter(logging.Formatter(LOGFORMAT))
logger.addHandler(handler)
return handler
def term(msg):
'''
Terminator handler for Managed listeners
'''
return 1
def term_print(msg):
print(msg)
return 1
| 25.405556 | 107 | 0.609009 | 578 | 4,573 | 4.676471 | 0.32526 | 0.033296 | 0.020348 | 0.011099 | 0.173881 | 0.112468 | 0.09471 | 0.076952 | 0.076952 | 0 | 0 | 0.004452 | 0.263285 | 4,573 | 179 | 108 | 25.547486 | 0.797863 | 0.089657 | 0 | 0.121951 | 0 | 0 | 0.03679 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.195122 | false | 0.00813 | 0.105691 | 0.02439 | 0.455285 | 0.03252 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa933e3a2220b0a9dd004d7e37da742b1bfa7167 | 1,941 | py | Python | backend/apps/users/admin.py | abodacs/django-fullstack-biolerplate | 87e8618638eb801fd061c34da9365ff50bebdf77 | [
"MIT"
] | null | null | null | backend/apps/users/admin.py | abodacs/django-fullstack-biolerplate | 87e8618638eb801fd061c34da9365ff50bebdf77 | [
"MIT"
] | null | null | null | backend/apps/users/admin.py | abodacs/django-fullstack-biolerplate | 87e8618638eb801fd061c34da9365ff50bebdf77 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.html import format_html, urlencode
from django.utils.translation import ugettext_lazy as _
from apps.projects.models import Case
from apps.users.forms import EnvoyChangeForm, UserChangeForm, UserCreationForm
from .models import Envoy
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("name", "type",)}),
(_("Permissions"), {"fields": ("is_active", "is_superuser",),}),
(_("Important dates"), {"fields": ("last_login",)}),
)
list_filter = (
"is_active",
"type",
)
list_display = [
"username",
"name",
"type",
]
search_fields = ["name"]
@admin.register(Envoy)
class EnvoyAdmin(auth_admin.UserAdmin):
form = EnvoyChangeForm
add_form = UserCreationForm
list_filter = ("is_active",)
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("name", "type", "areas_in_charge", "mobile",)}),
(_("Permissions"), {"fields": ("is_active",)}),
(_("Important dates"), {"fields": ("last_login",)}),
)
list_display = [
"username",
"name",
"type",
"mobile",
"show_cases_number",
]
search_fields = ["name"]
def show_cases_number(self, obj):
url = reverse("admin:projects_case_changelist") + "?" + urlencode({"envoy_id": f"{obj.id}"})
count = Case.objects.filter(envoy=obj).only("id").count()
return format_html('<a href="{}">{} Cases</a>', url, count)
show_cases_number.short_description = _("Cases Number")
| 29.861538 | 100 | 0.619268 | 207 | 1,941 | 5.589372 | 0.376812 | 0.051858 | 0.04408 | 0.038029 | 0.26102 | 0.217805 | 0.105445 | 0.105445 | 0.105445 | 0.105445 | 0 | 0 | 0.214323 | 1,941 | 64 | 101 | 30.328125 | 0.758689 | 0 | 0 | 0.351852 | 0 | 0 | 0.214323 | 0.015456 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0.037037 | 0.203704 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa936cb125d27698927a89093b09a059c5d97cd9 | 931 | py | Python | gfootball/agent.py | level-antoine/football | 516f63da0ea4696f4c8b6668c65ac4b20385a8fa | [
"Apache-2.0"
] | null | null | null | gfootball/agent.py | level-antoine/football | 516f63da0ea4696f4c8b6668c65ac4b20385a8fa | [
"Apache-2.0"
] | null | null | null | gfootball/agent.py | level-antoine/football | 516f63da0ea4696f4c8b6668c65ac4b20385a8fa | [
"Apache-2.0"
] | 1 | 2022-03-02T14:01:00.000Z | 2022-03-02T14:01:00.000Z | import time
import gfootball.env as football_env
import random
class Agent:
def __init__(self):
pass
if __name__ == '__main__':
env = football_env.create_environment(env_name='1_vs_1_easy', representation='extracted', render=True)
state = env.reset()
action_simple = football_env.observation_preprocessing.football_action_set.action_set_dict["simple"]
obs = env.reset()
while True:
action = random.choice(action_simple)
observation, reward, done, info = env.step(action)
print('-----------------------------------------')
i = 1
for obs in observation:
print(i)
print(obs)
i += 1
time.sleep(1000000000)
print(reward)
print(done)
print(info)
print('-----------------------------------------')
if done:
env.reset()
env.close() | 23.275 | 106 | 0.531686 | 96 | 931 | 4.875 | 0.489583 | 0.070513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021212 | 0.291085 | 931 | 40 | 107 | 23.275 | 0.687879 | 0 | 0 | 0.071429 | 0 | 0 | 0.124464 | 0.087983 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0.035714 | 0.107143 | 0 | 0.178571 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa94064062b6a06f6e48c481bf507660eeb96adc | 2,724 | py | Python | utils.py | reutwerber/DeepLearningHW | 89d1459139b8da294a303dbbe98fc4a48882fb56 | [
"MIT"
] | null | null | null | utils.py | reutwerber/DeepLearningHW | 89d1459139b8da294a303dbbe98fc4a48882fb56 | [
"MIT"
] | null | null | null | utils.py | reutwerber/DeepLearningHW | 89d1459139b8da294a303dbbe98fc4a48882fb56 | [
"MIT"
] | null | null | null | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# plot edible / poisonous
def basic_plots(data):
plt.figure(figsize=(15, 8))
fig, ax = plt.subplots()
sns.countplot(x=data['odor'], hue=data['is-edible'], palette=['black', 'blue'], data=data)
plt.ylabel('Number of Mushrooms')
plt.legend(title=None, labels=['Poisonous', 'Edible'])
plt.title("Edible vs. Poisonous, Sorted by Odor")
plt.savefig("edible_vs_poisonous2_odor.png")
new_data = data.loc[data["odor"] != "n"]
new_data = new_data.loc[data["odor"] != "f"]
# print(data["odor"].value_counts())
# there is only 1 value with odor m, so we will not use it.
new_data = new_data.loc[data["odor"] != "m"]
fig, ax = plt.subplots()
plt.title('Odors: Edible vs Poisonous, Partial Results')
sns.countplot(x=new_data['odor'], hue=new_data['is-edible'], palette=['black', 'blue'], data=new_data)
plt.ylabel('Number of Mushrooms')
plt.legend(title=None, labels=['Poisonous', 'Edible'])
plt.savefig("edible_vs_poisonous3_odor_partial.png")
def check_veil_type_is_zero(df):
flag = False
for item in df['veil-type']:
if item != 0:
flag = True
print(item)
print(flag)
def plot_corr(df):
# Correlation
plt.figure(figsize=(14, 12))
sns.heatmap(df.corr(), linewidths=.1, cmap="Blues", annot=True, annot_kws={"size": 7})
plt.yticks(rotation=0)
plt.savefig("corr.png", format='png', dpi=400, bbox_inches='tight')
def plot_feature_importance(features_list, feature_importance):
sorted_idx = np.argsort(feature_importance)
plt.figure(figsize=(15, 8))
plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align='center', color="blue")
plt.yticks(range(len(sorted_idx)), features_list[sorted_idx], fontsize=12)
plt.xlabel('Importance')
plt.title('Attribute importance')
plt.savefig("featureimp.png", format='png', dpi=500, bbox_inches='tight')
def plot_confusion_matrix(cm, title):
x_axis_labels = odor_labels
y_axis_labels = x_axis_labels
f, ax = plt.subplots(figsize=(7, 7))
hm_plot = sns.heatmap(cm, annot=True, linewidths=0.2, linecolor="black", fmt=".0f", ax=ax,
cmap="Blues", xticklabels=x_axis_labels, yticklabels=y_axis_labels)
hm_plot.set_xticklabels(hm_plot.get_xmajorticklabels(), fontsize=9)
hm_plot.set_yticklabels(hm_plot.get_ymajorticklabels(), fontsize=9)
plt.xlabel("PREDICTED LABEL")
plt.ylabel("TRUE LABEL")
plt.title(title)
plt.savefig(title+'.png', format='png', dpi=500, bbox_inches='tight')
odor_labels = ["almond", "anise", "creosote", "fishy", "foul",
"none", "pungent", "spicy"] | 37.833333 | 106 | 0.669971 | 389 | 2,724 | 4.534704 | 0.367609 | 0.031746 | 0.027211 | 0.02551 | 0.221655 | 0.183673 | 0.183673 | 0.119048 | 0.081633 | 0.081633 | 0 | 0.015432 | 0.167401 | 2,724 | 72 | 107 | 37.833333 | 0.762346 | 0.04699 | 0 | 0.148148 | 0 | 0 | 0.178241 | 0.025463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092593 | false | 0 | 0.148148 | 0 | 0.240741 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa942d0bf0283b06c38a2f2f91cc6d29defda50a | 3,499 | py | Python | fdisk.py | lypant/diskutils | 8ebc84dcb9bedd591b4d4484c9858fb71eaaa709 | [
"MIT"
] | null | null | null | fdisk.py | lypant/diskutils | 8ebc84dcb9bedd591b4d4484c9858fb71eaaa709 | [
"MIT"
] | null | null | null | fdisk.py | lypant/diskutils | 8ebc84dcb9bedd591b4d4484c9858fb71eaaa709 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
class Fdisk():
'''
Composes bash command string capable of creating partitions for given disk
'''
def __init__(self, disk):
self.disk = disk
self.query = ''
self.partitionsCount = 0
def addPrimaryPartition(self, size, type):
self.partitionsCount += 1
# Add new partition...
self.query += 'n\n'
# ...primary one...
self.query += 'p\n'
# ...pick default number...
self.query += '\n'
# ...choose default starting sector...
self.query += '\n'
# ...set partition size
self.query += '%s\n' % size
# Set new partition type...
self.query += 't\n'
# ...for partitons other than 1 it is necessary to give the partition number
if self.partitionsCount != 1:
self.query += '\n' # Use default number
# ...set partition type
self.query += '%s\n' % type
def addExtendedPartition(self):
'''
Assumption - 3 primary partitions already exist
'''
# Add new partition...
self.query += 'n\n'
# ...extended one - necessary to select explicitly
self.query += 'e\n'
# ...pick partition number - done automatically for partition >=4
# ...choose default starting sector...
self.query += '\n'
# ...set partition size
self.query += '\n' # Take all available space for container partition
# No need to select partition type explicitly
def addLogicalPartition(self, size, type):
'''
Assumption - 3 primary and fourth extended partitions already exist
'''
# Add new partition...
self.query += 'n\n'
# ...no need to select partition number explicitly
# ...choose default starting sector...
self.query += '\n'
# ...set partition size
self.query += '%s\n' % size
# Set new partition type
self.query += 't\n'
# ...choose partition number...
self.query += '\n' # Use default number
# ...set partition type
self.query += '%s\n' % type
def setPartitionBootable(self, partitionNumber):
# Toggle bootable flag of a partition...
self.query += 'a\n'
if self.partitionsCount != 1:
self.query += '%s\n' % str(partitionNumber)
def writePartitionTable(self):
self.query += 'w\n'
def getBashCommandString(self):
# TODO Check if "cat and EOFs" are needed when using python
return 'cat <<-EOF | fdisk {disk}\n{query}EOF'.format(
disk=self.disk,
query=self.query)
if __name__ == '__main__':
fd = Fdisk('/dev/sda')
# fd.addPrimaryPartition('+128M', '83')
# fd.addPrimaryPartition('+80G', '83')
# fd.addPrimaryPartition('+6G', '82')
# fd.setPartitionBootable(1)
# fd.writePartitionTable()
# fd.addPrimaryPartition('+128M', '83')
# fd.setPartitionBootable(1)
# fd.addPrimaryPartition('+80G', '83')
# fd.addPrimaryPartition('+6G', '82')
# fd.writePartitionTable()
fd.addPrimaryPartition('+128M', '83')
fd.addPrimaryPartition('+80G', '83')
fd.addPrimaryPartition('+6G', '82')
fd.addExtendedPartition()
fd.addLogicalPartition('+20G', '83')
fd.addLogicalPartition('+20G', '83')
fd.addLogicalPartition('', '83')
fd.setPartitionBootable(1)
fd.writePartitionTable()
print(fd.getBashCommandString())
| 31.809091 | 84 | 0.573021 | 373 | 3,499 | 5.343164 | 0.294906 | 0.103864 | 0.050176 | 0.027597 | 0.52283 | 0.455595 | 0.392373 | 0.344706 | 0.344706 | 0.318113 | 0 | 0.023191 | 0.285224 | 3,499 | 109 | 85 | 32.100917 | 0.773691 | 0.402972 | 0 | 0.4 | 0 | 0 | 0.07333 | 0 | 0 | 0 | 0 | 0.009174 | 0 | 1 | 0.14 | false | 0 | 0 | 0.02 | 0.18 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa9aadc524b7e245b90d1790419586ddf6c23a3b | 329 | py | Python | Hackerearth Set/PalindromicCiphers.py | Siddharth2016/PYTHON3_prog | 9dfa258d87f5b00779d39d9de9a49c1c6cea06be | [
"MIT"
] | 2 | 2019-02-26T14:06:53.000Z | 2019-02-27T17:13:01.000Z | Hackerearth Set/PalindromicCiphers.py | Siddharth2016/PYTHON3_prog | 9dfa258d87f5b00779d39d9de9a49c1c6cea06be | [
"MIT"
] | null | null | null | Hackerearth Set/PalindromicCiphers.py | Siddharth2016/PYTHON3_prog | 9dfa258d87f5b00779d39d9de9a49c1c6cea06be | [
"MIT"
] | 2 | 2017-12-26T07:59:57.000Z | 2018-06-24T03:35:05.000Z | #PALINDROMIC CIPHERS
def value(s):
i = 0
prod = 1
for i in range(len(s)):
prod = prod * (ord(s[i])-96)
return prod
test = int(input())
for i in range(test):
string = str(input())
stringr = string[::-1]
if string == stringr:
print("Palindrome")
else:
print(value(string))
| 18.277778 | 36 | 0.544073 | 46 | 329 | 3.891304 | 0.565217 | 0.022346 | 0.067039 | 0.122905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021739 | 0.300912 | 329 | 17 | 37 | 19.352941 | 0.756522 | 0.057751 | 0 | 0 | 0 | 0 | 0.032362 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa9c90bfa6f98eb6da005c0a9634a34f33cec44f | 2,897 | py | Python | pytorch_disco/nets/metric_learner.py | YunchuZhang/Visually-Grounded-Library-of-Behaviors-for-Generalizing-Manipulation-Across-Objects-Configurations- | 896afda942dfc04e4aaad2ee751c32df1eb17913 | [
"MIT"
] | 1 | 2022-03-14T22:25:17.000Z | 2022-03-14T22:25:17.000Z | pytorch_disco/nets/metric_learner.py | YunchuZhang/Visually-Grounded-Library-of-Behaviors | 896afda942dfc04e4aaad2ee751c32df1eb17913 | [
"MIT"
] | null | null | null | pytorch_disco/nets/metric_learner.py | YunchuZhang/Visually-Grounded-Library-of-Behaviors | 896afda942dfc04e4aaad2ee751c32df1eb17913 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from archs.encoder3D import SimpleEncoder3D
import numpy as np
class MetricLearner(nn.Module):
def __init__(self, num_objects=150, repr_dim=128):
super(MetricLearner, self).__init__()
self.pred_dim = repr_dim
# We need a Siamese style network
self.encoder1 = SimpleEncoder3D(in_channel=32, pred_dim=128, chans=32).cuda()
self.encoder2 = SimpleEncoder3D(in_channel=32, pred_dim=128, chans=32).cuda()
# This size (first dimension) could be num_objects (if we want to learn feature per object) [seems correct to me]
# num_tasks or even completely independent num_clusters (how to choose idx key in this case?)
self.repr = nn.Embedding(num_objects, repr_dim).cuda()
# Initialize weights to extremely small values with mean 0 and std 0.001
self.repr.weight.data.normal_(mean=0,std=0.001)
# We assume that inputs have both positive labels and negative labels
# We assume that inputs consists of one positive sample and B-1 negative samples
def forward(self, feat_tensors, labels):
# We assume that positive sample is at index 0
# And other samples are negative samples
# Inputs is (1, H, W, B, C)
# Output is (1, F)
pos_feat = self.encoder1(feat_tensors[0].unsqueeze(0))
# Inputs is (B-1, H, W, B, C)
# Output is (B-1, F)
neg_feat = self.encoder2(feat_tensors[1:])
# feats is (B, F)
feats = torch.cat([pos_feat, neg_feat], dim=0)
if len(feats.shape) > 2:
feats = torch.squeeze(feats)
import ipdb; ipdb.set_trace()
# We need to normalize the output of encoders
norm = feats.norm(p=2, dim=1, keepdim=True)
feats_normalized = feats.div(norm)
loss = self.compute_loss(feats_normalized, labels, loss_type="n-class")
return loss, feats_normalized
# Note: feats contains both negative and positive feats
# feats[0] belongs to the positive sample
# labels[0] is the key of positive sample
# we are learning embedding per object. We should use object labels and not object category labels here!
def compute_loss(self, feats, object_labels, loss_type, temp=1):
loss = 0
if loss_type == "n-class":
import ipdb; ipdb.set_trace()
indices = self.repr(object_labels)
assert len(feats) == 2, "feats tensor should have shape (batch_size, pred_dim)"
prod = torch.bmm(feats.view(-1, 1, self.pred_dim), indices.view(-1, self.pred_dim, 1))/temp
max_val = torch.max(prod)
prod = prod - max_val
softmax = torch.div(torch.exp(prod[0]), torch.sum(torch.exp(prod)))
loss = -1 * torch.log(softmax)
loss = torch.squeeze(loss)
return loss | 43.893939 | 121 | 0.64515 | 423 | 2,897 | 4.307329 | 0.371158 | 0.023052 | 0.018112 | 0.02854 | 0.090011 | 0.065862 | 0.065862 | 0.051592 | 0.051592 | 0.051592 | 0 | 0.028891 | 0.259234 | 2,897 | 66 | 122 | 43.893939 | 0.82013 | 0.319296 | 0 | 0.054054 | 0 | 0 | 0.034289 | 0 | 0 | 0 | 0 | 0 | 0.027027 | 1 | 0.081081 | false | 0 | 0.189189 | 0 | 0.351351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa9d616c56bfe36fe6cf8e62f1d946df70dc71a6 | 528 | py | Python | script/run_default_lstm.py | datadrivenempathy/who-wrote-this-training | 66ccb65837de24d004b70d39a1e8522a7d1e184f | [
"MIT"
] | 1 | 2019-08-24T04:21:15.000Z | 2019-08-24T04:21:15.000Z | script/run_default_lstm.py | datadrivenempathy/who-wrote-this-training | 66ccb65837de24d004b70d39a1e8522a7d1e184f | [
"MIT"
] | 8 | 2020-01-28T22:42:36.000Z | 2022-02-10T00:21:57.000Z | script/run_default_lstm.py | datadrivenempathy/who-wrote-this-training | 66ccb65837de24d004b70d39a1e8522a7d1e184f | [
"MIT"
] | null | null | null | import harness_util
harness_factory = harness_util.TemplateHarnessFactory()
config = {
'corpusCol': 'description',
'lstmSize': 64,
'dropoutRate': 0,
'kernelRegPenalty': 0.01,
'method': 'sequence',
'numWords': 2000,
'sourceCol': 'source',
'sourceIdCol': 'sourceId',
'sourceIdVectorCol': 'sourceIdVector',
'tokenVectorCol': 'tokenVector',
'tokensCol': 'tokens',
'maxSeqLen': 50
}
harness = harness_factory.build(config)
harness.run('who-wrote-this', 'desc-lstm-size-2', config)
| 22.956522 | 57 | 0.662879 | 49 | 528 | 7.061224 | 0.795918 | 0.063584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029748 | 0.172348 | 528 | 22 | 58 | 24 | 0.762014 | 0 | 0 | 0 | 0 | 0 | 0.418561 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa9fa525a3a637ac331509b0f67a63c258937a06 | 9,124 | py | Python | pycloud/pycloud/model/message.py | SEI-AMS/pycloud | 62764e9d2aae280e019306e3b151b7218bf82f4d | [
"MIT"
] | 14 | 2015-08-20T11:54:56.000Z | 2018-05-23T21:07:44.000Z | pycloud/pycloud/model/message.py | SEI-TAS/pycloud | 62764e9d2aae280e019306e3b151b7218bf82f4d | [
"MIT"
] | 10 | 2015-10-17T07:33:54.000Z | 2018-04-27T20:50:52.000Z | pycloud/pycloud/model/message.py | SEI-AMS/pycloud | 62764e9d2aae280e019306e3b151b7218bf82f4d | [
"MIT"
] | 8 | 2016-03-31T07:04:26.000Z | 2018-04-09T18:08:10.000Z | # KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
import datetime
from pycloud.pycloud.mongo import Model, ObjectID
################################################################################################################
# Represents a mailbox where messages to devices can be put.
################################################################################################################
class DeviceMessage(Model):
# Meta class is needed so that minimongo can map this class onto the database.
class Meta:
collection = "messages"
external = ['device_id', 'service_id', 'message', 'params']
mapping = {
}
################################################################################################################
# Constructor.
################################################################################################################
def __init__(self, *args, **kwargs):
self.device_id = None
self.service_id = None
self.message = None
self.params = {}
self.datetime = datetime.datetime.now()
self.read = False
super(DeviceMessage, self).__init__(*args, **kwargs)
################################################################################################################
# Locate a CommandMailbox by its ID.
################################################################################################################
@staticmethod
def by_id(item_id=None):
record_id = item_id
if not isinstance(record_id, ObjectID):
# noinspection PyBroadException
try:
record_id = ObjectID(record_id)
except:
return None
return DeviceMessage.find_one({'_id': record_id})
################################################################################################################
# Locate a device by its pubilc device ID
################################################################################################################
# noinspection PyBroadException
@staticmethod
def by_device_id(did=None):
messages = []
try:
message_cursor = DeviceMessage.find({'device_id': did})
for message in message_cursor:
messages.append(message)
except:
pass
return messages
################################################################################################################
# Locate a device by its pubilc device ID
################################################################################################################
# noinspection PyBroadException
@staticmethod
def unread_by_device_id(device_id, service_id):
messages = []
try:
message_cursor = DeviceMessage.find({'device_id': device_id, 'service_id': service_id, 'read': False})
for message in message_cursor:
return_message = {}
return_message['device_id'] = message['device_id']
return_message['service_id'] = message['service_id']
return_message['message'] = message['message']
return_message['params'] = message['params']
messages.append(return_message)
except:
pass
return messages
################################################################################################################
#
################################################################################################################
# noinspection PyBroadException
@staticmethod
def mark_all_as_read(device_id, service_id):
messages = []
try:
message_cursor = DeviceMessage.find({'device_id': device_id, 'service_id': service_id, 'read': False})
for message in message_cursor:
message['read'] = True
message.save()
except:
pass
return messages
################################################################################################################
# Cleanly and safely gets a CommandMailbox and removes it from the database.
################################################################################################################
@staticmethod
def find_and_remove(item_id):
# Find the right app and remove it. find_and_modify will only return the document with matching id.
return DeviceMessage.find_and_modify(query={'_id': item_id}, remove=True)
################################################################################################################
# Removes all messages of a certain type for a given device.
################################################################################################################
# noinspection PyBroadException
@staticmethod
def clear_all_messages(device_id, message_string):
messages = DeviceMessage.find({'device_id': device_id,
'message': message_string})
for message in messages:
DeviceMessage.find_and_remove(message._id)
################################################################################################################
# Particular message used to notify a device that it has to create a certain wifi profile and store credentials.
################################################################################################################
class AddTrustedCloudletDeviceMessage(DeviceMessage):
MESSAGE = 'add-trusted-cloudlet'
################################################################################################################
# Constructor.
################################################################################################################
def __init__(self, paired_device_data_bundle, *args, **kwargs):
super(AddTrustedCloudletDeviceMessage, self).__init__(*args, **kwargs)
self.message = self.MESSAGE
self.params = paired_device_data_bundle.__dict__
################################################################################################################
#
################################################################################################################
# noinspection PyBroadException
@staticmethod
def clear_messages(device_id):
AddTrustedCloudletDeviceMessage.clear_all_messages(device_id, AddTrustedCloudletDeviceMessage.MESSAGE)
################################################################################################################
# Particular message used to notify a device that it has to move to a new cloudlet.
################################################################################################################
class ConnectToNewCloudletMessage(DeviceMessage):
MESSAGE = 'move-to-new-cloudlet-network'
################################################################################################################
# Constructor.
################################################################################################################
def __init__(self, paired_device_data_bundle, *args, **kwargs):
super(ConnectToNewCloudletMessage, self).__init__(*args, **kwargs)
self.message = self.MESSAGE
self.params = paired_device_data_bundle.__dict__
################################################################################################################
#
################################################################################################################
# noinspection PyBroadException
@staticmethod
def clear_messages(device_id):
ConnectToNewCloudletMessage.clear_all_messages(device_id, ConnectToNewCloudletMessage.MESSAGE)
| 48.531915 | 344 | 0.437089 | 672 | 9,124 | 5.75 | 0.3125 | 0.045549 | 0.019928 | 0.06677 | 0.320393 | 0.254658 | 0.244048 | 0.244048 | 0.231366 | 0.231366 | 0 | 0.005129 | 0.166594 | 9,124 | 187 | 345 | 48.791444 | 0.503025 | 0.228189 | 0 | 0.431818 | 0 | 0 | 0.060441 | 0.007263 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.034091 | 0.022727 | 0.011364 | 0.284091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faa05b817f60195f3c141ce531008fcb9fb675a7 | 4,473 | py | Python | BioExp/clusters/clusters.py | MiRL-IITM/BioExp | d121661bac7ae2d8c1bed7a52e9a0f550f446baa | [
"MIT"
] | null | null | null | BioExp/clusters/clusters.py | MiRL-IITM/BioExp | d121661bac7ae2d8c1bed7a52e9a0f550f446baa | [
"MIT"
] | null | null | null | BioExp/clusters/clusters.py | MiRL-IITM/BioExp | d121661bac7ae2d8c1bed7a52e9a0f550f446baa | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import keras
import numpy as np
import tensorflow as tf
import os
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram
from sklearn.cluster import AgglomerativeClustering
class Cluster():
"""
A class for conducting an cluster study on a trained keras model instance
"""
def __init__(self, model, weights_pth, layer_name, max_clusters = None):
"""
model : keras model architecture (keras.models.Model)
weights_pth : saved weights path (str)
metric : metric to compare prediction with gt, for example dice, CE
layer_name : name of the layer which needs to be ablated
test_img : test image used for ablation
max_clusters: maximum number of clusters
"""
self.model = model
self.weights = weights_pth
self.layer = layer_name
self.layer_idx = 0
for idx, layer in enumerate(self.model.layers):
if layer.name == self.layer:
self.layer_idx = idx
def get_distances(self, X, model, mode='l2'):
"""
"""
distances = []
weights = []
children=model.children_
dims = (X.shape[1],1)
distCache = {}
weightCache = {}
for childs in children:
c1 = X[childs[0]].reshape(dims)
c2 = X[childs[1]].reshape(dims)
c1Dist = 0
c1W = 1
c2Dist = 0
c2W = 1
if childs[0] in distCache.keys():
c1Dist = distCache[childs[0]]
c1W = weightCache[childs[0]]
if childs[1] in distCache.keys():
c2Dist = distCache[childs[1]]
c2W = weightCache[childs[1]]
d = np.linalg.norm(c1-c2)
# d = np.squeeze(np.dot(c1.T, c2)/ (np.linalg.norm(c1)*np.linalg.norm(c2)))
cc = ((c1W*c1)+(c2W*c2))/(c1W+c2W)
X = np.vstack((X,cc.T))
newChild_id = X.shape[0]-1
# How to deal with a higher level cluster merge with lower distance:
if mode=='l2': # Increase the higher level cluster size suing an l2 norm
added_dist = ((c1Dist**2+c2Dist**2)**0.5)
dNew = (d**2 + added_dist**2)**0.5
elif mode == 'max': # If the previrous clusters had higher distance, use that one
dNew = max(d,c1Dist,c2Dist)
elif mode == 'cosine':
dNew = np.squeeze(np.dot(c1Dist, c2Dist)/ (np.linalg.norm(c1Dist)*np.linalg.norm(c2Dist)))
elif mode == 'actual': # Plot the actual distance.
dNew = d
wNew = (c1W + c2W)
distCache[newChild_id] = dNew
weightCache[newChild_id] = wNew
distances.append(dNew)
weights.append(wNew)
return distances, weights
def plot_dendrogram(self, X, model, threshold=.7):
"""
"""
# Create linkage matrix and then plot the dendrogram
distance, weight = self.get_distances(X,model)
linkage_matrix = np.column_stack([model.children_, distance, weight]).astype(float)
threshold = threshold*np.max(distance)
sorted_ = linkage_matrix[np.argsort(distance)]
splitnode = np.max(sorted_[sorted_[:, 2] > threshold][0, (0,1)])
level = np.log((-.5*splitnode)/(1.*X.shape[0]) + 1.)/np.log(.5)
nclusters = int(np.round((1.*X.shape[0])/(2.**level)))
### New ....
model = AgglomerativeClustering(n_clusters=nclusters).fit(X)
distance, weight = self.get_distances(X, model)
linkage_matrix = np.column_stack([model.children_, distance, weight]).astype(float)
labels = model.labels_
print ("===========================", nclusters, threshold, np.max(distance), np.unique(labels), [sum(labels == i) for i in np.unique(labels)])
# Plot the corresponding dendrogram
return linkage_matrix, labels
def get_clusters(self, threshold=0.5, save_path = None):
"""
Does clustering on feature space
save_path : path to save dendrogram image
threshold : fraction of max distance to cluster
"""
layer_weights = np.array(self.model.layers[self.layer_idx].get_weights())
shape = layer_weights[0].shape
X = layer_weights[0].reshape(layer_weights[0].shape[-1], -1)
position = np.linspace(0, X.shape[-1], X.shape[-1])
X = X + position[None, :]
model = AgglomerativeClustering().fit(X)
# plot the top three levels of the dendrogram
linkage_matrix, labels = self.plot_dendrogram(X, model, threshold = threshold)
if save_path:
os.makedirs(save_path, exist_ok=True)
plt.figure(figsize=(20, 10))
plt.title('Hierarchical Clustering Dendrogram')
R = dendrogram(linkage_matrix, truncate_mode='level')
plt.xlabel("Number of points in node (or index of point if no parenthesis).")
plt.savefig(os.path.join(save_path, '{}_dendrogram.png'.format(self.layer)))
return labels
| 29.235294 | 145 | 0.674268 | 640 | 4,473 | 4.625 | 0.301563 | 0.030743 | 0.02027 | 0.012162 | 0.067568 | 0.067568 | 0.067568 | 0.067568 | 0.067568 | 0.067568 | 0 | 0.023888 | 0.185781 | 4,473 | 152 | 146 | 29.427632 | 0.788852 | 0.224011 | 0 | 0.045977 | 0 | 0 | 0.048667 | 0.007822 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0 | 0.091954 | 0 | 0.183908 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faa2a166815fc698776e953f5e5ffa0e59a2da75 | 1,032 | py | Python | UFAL/PAA/BellmanFord/10557UVAXYZZY.py | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 3 | 2018-12-18T13:39:42.000Z | 2021-06-23T18:05:18.000Z | UFAL/PAA/BellmanFord/10557UVAXYZZY.py | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 1 | 2018-11-02T21:32:40.000Z | 2018-11-02T22:47:12.000Z | UFAL/PAA/BellmanFord/10557UVAXYZZY.py | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 6 | 2018-10-27T14:07:52.000Z | 2019-11-14T13:49:29.000Z | # For some unknown reason... UVA keeps return RunTimeError for Python is this question...
DEBUG = 0
inf = 2**33
while (True):
rooms = -2
while (rooms == -2):
try:
rooms = int(input())
except EOFError:
break
except ValueError:
continue
if (rooms == -1): break
if (rooms != 0):
preGraph = [[] for i in range(rooms + 1)]
energy = [0] * (rooms + 1)
for i in range(1, rooms + 1):
line = list(map(int, input().split()))
energy[i] = line[0]
for j in range(line[1]):
preGraph[i] += [line[j + 2]]
graph = [[] for i in range(rooms + 1)]
for i in range(1, rooms + 1):
for u in preGraph[i]:
graph[i] += [[u, energy[u]]]
cost = [inf] * (rooms + 1)
#bellmanFord(graph, cost, path, loop, 1)
canWin = 1
#if (DEBUG): print("path", path, "cost", cost)
if (canWin):
print("winnable")
else:
print("hopeless")
| 25.170732 | 89 | 0.478682 | 131 | 1,032 | 3.770992 | 0.389313 | 0.08502 | 0.048583 | 0.089069 | 0.153846 | 0.153846 | 0.097166 | 0.097166 | 0.097166 | 0 | 0 | 0.033742 | 0.368217 | 1,032 | 40 | 90 | 25.8 | 0.723926 | 0.165698 | 0 | 0.066667 | 0 | 0 | 0.01867 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faa38c67c94b04c9192f47b8ef3a9d3344c206b5 | 3,531 | py | Python | src/heartbeat/auth.py | pbs/django-heartbeat | 886e5b47c730498816c1458bb1f7e99e622afa9d | [
"MIT"
] | 29 | 2016-02-04T12:50:13.000Z | 2022-01-17T13:57:56.000Z | src/heartbeat/auth.py | pbs/django-heartbeat | 886e5b47c730498816c1458bb1f7e99e622afa9d | [
"MIT"
] | 2 | 2016-04-08T14:39:14.000Z | 2020-07-09T06:55:12.000Z | src/heartbeat/auth.py | pbs/django-heartbeat | 886e5b47c730498816c1458bb1f7e99e622afa9d | [
"MIT"
] | 10 | 2016-02-26T19:34:26.000Z | 2020-01-27T20:16:00.000Z | import re
import base64
import logging
from functools import wraps
from ipaddress import ip_address, ip_network
from .settings import HEARTBEAT
from django.http import HttpResponse
from django.core.exceptions import ImproperlyConfigured
logging.basicConfig(
format='%(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def auth(func):
@wraps(func)
def _decorator(request, *args, **kwargs):
auth = get_auth()
if auth.get('disable', False) is True:
return func(request, *args, **kwargs)
if 'authorized_ips' in auth:
ip = get_client_ip(request)
if is_authorized(ip, auth['authorized_ips']):
return func(request, *args, **kwargs)
prepare_credentials(auth)
if request.META.get('HTTP_AUTHORIZATION'):
authmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ')
if authmeth.lower() == 'basic':
auth = base64.b64decode(auth).decode('utf-8')
username, password = auth.split(':')
if (username == HEARTBEAT['auth']['username'] and
password == HEARTBEAT['auth']['password']):
return func(request, *args, **kwargs)
response = HttpResponse(
"Authentication failed", status=401)
response['WWW-Authenticate'] = 'Basic realm="Welcome to 1337"'
return response
return _decorator
def get_auth():
auth = HEARTBEAT.get('auth')
if not auth:
raise ImproperlyConfigured('Missing auth configuration for heartbeat')
return auth
def prepare_credentials(auth):
if not all([auth.get('username'), auth.get('password')]):
raise ImproperlyConfigured(
'Username or password missing from auth configuration '
'for heartbeat')
def get_access_route(request):
meta = request.META
return (
meta.get('HTTP_X_FORWARDED_FOR') or meta.get('REMOTE_ADDR')
).split(',')
def get_client_ip(request):
access_route = get_access_route(request)
if len(access_route) == 1:
return access_route[0]
expression = """
(^(?!(?:[0-9]{1,3}\.){3}[0-9]{1,3}$).*$)| # will match non valid ipV4
(^127\.0\.0\.1)| # will match 127.0.0.1
(^10\.)| # will match 10.0.0.0 - 10.255.255.255 IP-s
(^172\.1[6-9]\.)| # will match 172.16.0.0 - 172.19.255.255 IP-s
(^172\.2[0-9]\.)| # will match 172.20.0.0 - 172.29.255.255 IP-s
(^172\.3[0-1]\.)| # will match 172.30.0.0 - 172.31.255.255 IP-s
(^192\.168\.) # will match 192.168.0.0 - 192.168.255.255 IP-s
"""
regex = re.compile(expression, re.X)
for ip in access_route:
if not ip:
# it's possible that the first value from X_FORWARDED_FOR
# will be null, so we need to pass that value
continue
if regex.search(ip):
continue
else:
return ip
def is_authorized(ip, authorized_ips):
ip = ip_address(ip)
for item in authorized_ips:
try:
if ip == ip_address(item):
return True
except ValueError:
try:
if ip in ip_network(item):
return True
except ValueError:
logger.warn('The "authorized_ip" list (settings.HEARTBEAT)'
'contains an item that is neither an ip address '
'nor an ip network: {}'.format(item))
| 33.311321 | 78 | 0.580572 | 444 | 3,531 | 4.522523 | 0.310811 | 0.007968 | 0.01992 | 0.02241 | 0.088147 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061625 | 0.292268 | 3,531 | 105 | 79 | 33.628571 | 0.741897 | 0.028037 | 0 | 0.127907 | 0 | 0.069767 | 0.278798 | 0.011957 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081395 | false | 0.046512 | 0.093023 | 0 | 0.302326 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faa6fad861b4fa4374ccfff188630051bf80c8fc | 14,578 | py | Python | saleor/dashboard/order/forms.py | djlowes/russell-westbark | 7c3857e8a35b3bd0fea575ef0031360a1c5c0254 | [
"BSD-3-Clause"
] | null | null | null | saleor/dashboard/order/forms.py | djlowes/russell-westbark | 7c3857e8a35b3bd0fea575ef0031360a1c5c0254 | [
"BSD-3-Clause"
] | null | null | null | saleor/dashboard/order/forms.py | djlowes/russell-westbark | 7c3857e8a35b3bd0fea575ef0031360a1c5c0254 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
from django.conf import settings
from django.core.validators import MinValueValidator
from django.urls import reverse_lazy
from django.utils.translation import npgettext_lazy, pgettext_lazy
from django_prices.forms import MoneyField
from payments import PaymentError, PaymentStatus
from ...account.i18n import (
AddressForm as StorefrontAddressForm, PossiblePhoneNumberFormField)
from ...cart.forms import QuantityField
from ...core.exceptions import InsufficientStock
from ...discount.utils import decrease_voucher_usage
from ...order.emails import send_note_confirmation
from ...order.models import Fulfillment, FulfillmentLine, OrderLine, OrderNote
from ...order.utils import (
add_variant_to_order, cancel_fulfillment, cancel_order,
change_order_line_quantity, merge_duplicates_into_order_line,
recalculate_order)
from ...product.models import Product, ProductVariant, Stock
from ...product.utils import allocate_stock, deallocate_stock
from ..forms import AjaxSelect2ChoiceField
from ..widgets import PhonePrefixWidget
from .utils import fulfill_order_line
class OrderNoteForm(forms.ModelForm):
class Meta:
model = OrderNote
fields = ['content', 'is_public']
widgets = {
'content': forms.Textarea()}
labels = {
'content': pgettext_lazy('Order note', 'Note'),
'is_public': pgettext_lazy(
'Allow customers to see note toggle',
'Customer can see this note')}
def send_confirmation_email(self):
order = self.instance.order
send_note_confirmation.delay(order.pk)
class ManagePaymentForm(forms.Form):
amount = MoneyField(
label=pgettext_lazy(
'Payment management form (capture, refund, release)', 'Amount'),
max_digits=12,
decimal_places=2,
currency=settings.DEFAULT_CURRENCY)
def __init__(self, *args, **kwargs):
self.payment = kwargs.pop('payment')
super().__init__(*args, **kwargs)
def clean(self):
if self.payment.status != self.clean_status:
raise forms.ValidationError(self.clean_error)
def payment_error(self, message):
self.add_error(
None, pgettext_lazy(
'Payment form error', 'Payment gateway error: %s') % message)
def try_payment_action(self, action):
money = self.cleaned_data['amount']
try:
action(money.amount)
except (PaymentError, ValueError) as e:
self.payment_error(str(e))
return False
return True
class CapturePaymentForm(ManagePaymentForm):
clean_status = PaymentStatus.PREAUTH
clean_error = pgettext_lazy('Payment form error',
'Only pre-authorized payments can be captured')
def capture(self):
return self.try_payment_action(self.payment.capture)
class RefundPaymentForm(ManagePaymentForm):
clean_status = PaymentStatus.CONFIRMED
clean_error = pgettext_lazy('Payment form error',
'Only confirmed payments can be refunded')
def refund(self):
return self.try_payment_action(self.payment.refund)
class ReleasePaymentForm(forms.Form):
def __init__(self, *args, **kwargs):
self.payment = kwargs.pop('payment')
super().__init__(*args, **kwargs)
def clean(self):
if self.payment.status != PaymentStatus.PREAUTH:
raise forms.ValidationError(
pgettext_lazy(
'Payment form error',
'Only pre-authorized payments can be released'))
def payment_error(self, message):
self.add_error(
None, pgettext_lazy(
'Payment form error', 'Payment gateway error: %s') % message)
def release(self):
try:
self.payment.release()
except (PaymentError, ValueError) as e:
self.payment_error(str(e))
return False
return True
class CancelOrderLineForm(forms.Form):
def __init__(self, *args, **kwargs):
self.line = kwargs.pop('line')
super().__init__(*args, **kwargs)
def cancel_line(self):
if self.line.stock:
deallocate_stock(self.line.stock, self.line.quantity)
order = self.line.order
self.line.delete()
recalculate_order(order)
class ChangeQuantityForm(forms.ModelForm):
quantity = QuantityField(
validators=[MinValueValidator(1)])
class Meta:
model = OrderLine
fields = ['quantity']
labels = {
'quantity': pgettext_lazy(
'Integer number', 'Quantity')}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.initial_quantity = self.instance.quantity
self.fields['quantity'].initial = self.initial_quantity
def clean_quantity(self):
quantity = self.cleaned_data['quantity']
delta = quantity - self.initial_quantity
stock = self.instance.stock
if stock and delta > stock.quantity_available:
raise forms.ValidationError(
npgettext_lazy(
'Change quantity form error',
'Only %(remaining)d remaining in stock.',
'Only %(remaining)d remaining in stock.',
'remaining') % {
'remaining': (
self.initial_quantity + stock.quantity_available)})
return quantity
def save(self):
quantity = self.cleaned_data['quantity']
stock = self.instance.stock
if stock is not None:
# update stock allocation
delta = quantity - self.initial_quantity
allocate_stock(stock, delta)
change_order_line_quantity(self.instance, quantity)
recalculate_order(self.instance.order)
return self.instance
class CancelOrderForm(forms.Form):
"""Allow canceling an entire order.
Deallocate or increase corresponding stocks for each order line.
"""
restock = forms.BooleanField(initial=True, required=False)
def __init__(self, *args, **kwargs):
self.order = kwargs.pop('order')
super().__init__(*args, **kwargs)
self.fields['restock'].label = npgettext_lazy(
'Cancel order form action',
'Restock %(quantity)d item',
'Restock %(quantity)d items',
'quantity') % {'quantity': self.order.get_total_quantity()}
def clean(self):
data = super().clean()
if not self.order.can_cancel():
raise forms.ValidationError(
pgettext_lazy(
'Cancel order form error',
"This order can't be cancelled"))
return data
def cancel_order(self):
cancel_order(self.order, self.cleaned_data.get('restock'))
class CancelFulfillmentForm(forms.Form):
"""Allow canceling an entire fulfillment.
Increase corresponding stocks for each fulfillment line.
"""
restock = forms.BooleanField(initial=True, required=False)
def __init__(self, *args, **kwargs):
self.fulfillment = kwargs.pop('fulfillment')
super().__init__(*args, **kwargs)
self.fields['restock'].label = npgettext_lazy(
'Cancel fulfillment form action',
'Restock %(quantity)d item',
'Restock %(quantity)d items',
'quantity') % {'quantity': self.fulfillment.get_total_quantity()}
def clean(self):
data = super().clean()
if not self.fulfillment.can_edit():
raise forms.ValidationError(
pgettext_lazy(
'Cancel fulfillment form error',
'This fulfillment can\'t be canceled'))
return data
def cancel_fulfillment(self):
cancel_fulfillment(self.fulfillment, self.cleaned_data.get('restock'))
class FulfillmentTrackingNumberForm(forms.ModelForm):
"""Update tracking number in fulfillment group."""
send_mail = forms.BooleanField(
initial=True, required=False, label=pgettext_lazy(
'Send mail to customer',
'Send notification email to customer'))
class Meta:
model = Fulfillment
fields = ['tracking_number']
labels = {
'tracking_number': pgettext_lazy(
'Fulfillment record', 'Tracking number')}
class RemoveVoucherForm(forms.Form):
def __init__(self, *args, **kwargs):
self.order = kwargs.pop('order')
super().__init__(*args, **kwargs)
def clean(self):
data = super().clean()
if not self.order.voucher:
raise forms.ValidationError(
pgettext_lazy(
'Remove voucher form error',
'This order has no voucher'))
return data
def remove_voucher(self):
self.order.discount_amount = 0
self.order.discount_name = ''
decrease_voucher_usage(self.order.voucher)
self.order.voucher = None
recalculate_order(self.order)
PAYMENT_STATUS_CHOICES = (
[('', pgettext_lazy('Payment status field value', 'All'))] +
PaymentStatus.CHOICES)
class PaymentFilterForm(forms.Form):
status = forms.ChoiceField(choices=PAYMENT_STATUS_CHOICES)
class StockChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.location.name
class ChangeStockForm(forms.ModelForm):
stock = StockChoiceField(queryset=Stock.objects.none())
class Meta:
model = OrderLine
fields = ['stock']
labels = {
'stock': pgettext_lazy(
'Stock record', 'Stock')}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
sku = self.instance.product_sku
self.fields['stock'].queryset = Stock.objects.filter(variant__sku=sku)
self.old_stock = self.instance.stock
def clean_stock(self):
stock = self.cleaned_data['stock']
if stock and stock.quantity_available < self.instance.quantity:
raise forms.ValidationError(
pgettext_lazy(
'Change stock form error',
'Only %(remaining)d remaining in this stock.') % {
'remaining': stock.quantity_available})
return stock
def save(self, commit=True):
quantity = self.instance.quantity
stock = self.instance.stock
self.instance.stock_location = (
stock.location.name if stock.location else '')
if self.old_stock:
deallocate_stock(self.old_stock, quantity)
allocate_stock(stock, quantity)
super().save(commit)
merge_duplicates_into_order_line(self.instance)
return self.instance
class AddVariantToOrderForm(forms.Form):
"""Allow adding lines with given quantity to an order."""
variant = AjaxSelect2ChoiceField(
queryset=ProductVariant.objects.filter(
product__in=Product.objects.available_products()),
fetch_data_url=reverse_lazy('dashboard:ajax-available-variants'))
quantity = QuantityField(
label=pgettext_lazy(
'Add variant to order form label', 'Quantity'),
validators=[MinValueValidator(1)])
def __init__(self, *args, **kwargs):
self.order = kwargs.pop('order')
self.discounts = kwargs.pop('discounts')
super().__init__(*args, **kwargs)
def clean(self):
"""Check if given quantity is available in stocks."""
cleaned_data = super().clean()
variant = cleaned_data.get('variant')
quantity = cleaned_data.get('quantity')
if variant and quantity is not None:
try:
variant.check_quantity(quantity)
except InsufficientStock as e:
error = forms.ValidationError(
pgettext_lazy(
'Add item form error',
'Could not add item. '
'Only %(remaining)d remaining in stock.' %
{'remaining': e.item.get_stock_quantity()}))
self.add_error('quantity', error)
return cleaned_data
def save(self):
"""Add variant to order.
Updates stocks and order.
"""
variant = self.cleaned_data.get('variant')
quantity = self.cleaned_data.get('quantity')
add_variant_to_order(
self.order, variant, quantity, self.discounts)
recalculate_order(self.order)
class AddressForm(StorefrontAddressForm):
phone = PossiblePhoneNumberFormField(
widget=PhonePrefixWidget, required=False)
class FulfillmentForm(forms.ModelForm):
"""Create fulfillment group for a given order."""
send_mail = forms.BooleanField(
initial=True, required=False, label=pgettext_lazy(
'Send mail to customer',
'Send shipment details to your customer now'))
class Meta:
model = Fulfillment
fields = ['tracking_number']
labels = {
'tracking_number': pgettext_lazy(
'Order tracking number',
'Tracking number')}
def __init__(self, *args, **kwargs):
order = kwargs.pop('order')
super().__init__(*args, **kwargs)
self.instance.order = order
class BaseFulfillmentLineFormSet(forms.BaseModelFormSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for form in self.forms:
form.empty_permitted = False
class FulfillmentLineForm(forms.ModelForm):
"""Fulfill order line with given quantity by decreasing stock."""
class Meta:
model = FulfillmentLine
fields = ['order_line', 'quantity']
def clean_quantity(self):
quantity = self.cleaned_data.get('quantity')
order_line = self.cleaned_data.get('order_line')
if quantity > order_line.quantity_unfulfilled:
raise forms.ValidationError(npgettext_lazy(
'Fulfill order line form error',
'%(quantity)d item remaining to fulfill.',
'%(quantity)d items remaining to fulfill.',
'quantity') % {
'quantity': order_line.quantity_unfulfilled,
'order_line': order_line})
return quantity
def save(self, commit=True):
fulfill_order_line(self.instance.order_line, self.instance.quantity)
return super().save(commit)
| 33.902326 | 79 | 0.624571 | 1,507 | 14,578 | 5.864632 | 0.163238 | 0.029871 | 0.013691 | 0.018669 | 0.381082 | 0.303123 | 0.269971 | 0.24157 | 0.220525 | 0.190654 | 0 | 0.000946 | 0.275072 | 14,578 | 429 | 80 | 33.981352 | 0.835352 | 0.035533 | 0 | 0.407295 | 0 | 0 | 0.129076 | 0.00236 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109422 | false | 0 | 0.057751 | 0.009119 | 0.340426 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fab51fc18a5b7d1bb4f84ed9d207bcf5e8fe01fb | 9,740 | py | Python | mina-telegram-alert.py | Makalfo/mina-telegram-alert | 3a18b16528e43385300110f6893c6deab2ad5512 | [
"MIT"
] | 1 | 2022-01-03T13:55:36.000Z | 2022-01-03T13:55:36.000Z | mina-telegram-alert.py | Makalfo/mina-telegram-alert | 3a18b16528e43385300110f6893c6deab2ad5512 | [
"MIT"
] | null | null | null | mina-telegram-alert.py | Makalfo/mina-telegram-alert | 3a18b16528e43385300110f6893c6deab2ad5512 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import requests, configparser
import os
import pandas as pd
import json
import base58
import time
import urllib.request
from google.cloud import bigquery
pd.options.mode.chained_assignment = None
# Mina constants
MINA_DECIMALS = 1 / 1000000000
SLEEP_TIME = 60
class MinaTelegram():
def __init__( self, config_file='config.ini' ):
# read the config and setup telegram
self.name = os.uname()[1]
self.read_config( config_file )
self.setup_telegram()
self.public_key = self.config['Mina']['public_key']
self.client = bigquery.Client()
# transaction
self.recieved = 0
self.sent = 0
# set the bigquery variable
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.config['BigQuery']['credentials']
# get the provider info
self.providers = self.get_providers( )
# hello message
self.send( f'{self.name}: Hello from Mina Watcher!' )
while True:
# obtain blocks
blocks = self.get_blocks( self.config['Mina']['last_block'] )
# check if the block is empty
if blocks.empty:
print( f'Empty Blocks - Sleeping for {SLEEP_TIME}')
time.sleep( SLEEP_TIME )
continue
block_list = blocks['blockheight'].unique()
block_list.sort()
# save the datetime
blocks['datetime'] = blocks['datetime'].apply(lambda x : pd.to_datetime(str(x)))
blocks['receivedtime'] = blocks['receivedtime'].apply(lambda x : pd.to_datetime(str(x)))
blocks['date'] = blocks['datetime'].dt.date
blocks['time'] = blocks['datetime'].dt.time
blocks['received_date'] = blocks['receivedtime'].dt.date
blocks['received_time'] = blocks['receivedtime'].dt.time
blocks['delta_time'] = blocks['receivedtime'] - blocks['datetime']
blocks['delta_time'] = blocks['delta_time'].apply(lambda x : x.total_seconds())
# parse the blocks
for blockheight in block_list:
#print( f'Parsing blockheight: {blockheight}')
# obtain all the blocks of the block height
blocks_of_height = blocks.loc[blocks['blockheight'] == blockheight]
self.parse_blocks( blocks_of_height )
# save / update the config file
self.config['Mina']['last_block'] = str( blocks['blockheight'].max() )
with open( config_file, 'w') as configfile:
self.config.write(configfile)
print( f'Sleeping for {SLEEP_TIME}')
time.sleep( SLEEP_TIME )
def read_config( self, config_file ):
'''
Read the configuration file
'''
config = configparser.ConfigParser()
config.read( config_file )
self.config = config
def setup_telegram( self ):
'''
Setup telegram
'''
self.telegram_token = self.config['Telegram']['telegram_token']
self.telegram_chat_id = self.config['Telegram']['telegram_chat_id']
def send( self, msg ):
'''
Send telegram message
'''
requests.post( f'https://api.telegram.org/bot{self.telegram_token}/sendMessage?chat_id={self.telegram_chat_id}&text={msg}' )
print( msg )
def get_blocks( self, target_blockheight ):
'''Get the blocks'''
query = """
SELECT blockheight,
creator,
canonical,
datetime,
receivedtime,
transactions,
statehash,
FROM minaexplorer.archive.blocks
WHERE blockheight > %s
ORDER BY blockheight DESC""" % target_blockheight
query_job = self.client.query(query)
iterator = query_job.result()
rows = list(iterator)
# if the query returns no data, return empty dataframe
if len( rows ) == 0:
return pd.DataFrame()
# Transform the rows into a nice pandas dataframe
df = pd.DataFrame(data=[list(x.values()) for x in rows], columns=list(rows[0].keys()))
df.drop_duplicates(subset=['statehash'])
df.sort_values(by=['blockheight'], inplace=True)
return df
def get_providers( self ):
'''get provider list'''
output = dict()
# staketab providers
with urllib.request.urlopen(self.config['Providers']['staketab']) as url:
data = json.loads(url.read().decode())
for provider in data['staking_providers']:
output[ provider['provider_address'] ] = provider['provider_title']
# Mina Foundation
mf_data = self.get_csv( self.config['Providers']['mina_foundation'] )
for idx, address in enumerate(mf_data):
output[ address ] = f'Mina Foundation {idx}'
# O1 Labs
mf_data = self.get_csv( self.config['Providers']['o1_labs'] )
for idx, address in enumerate(mf_data):
output[ address ] = f'O1 Labs {idx}'
# unofficial accounts
unofficial = pd.read_csv( self.config['Providers']['unofficial'] )
for idx, row in unofficial.iterrows():
output[ row['address'] ] = row['identity']
return output
def get_csv( self, url ):
'''return the csv as a list'''
req = urllib.request.Request( url )
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0')
content = urllib.request.urlopen(req)
data = pd.read_csv(content, header=None)
return list( data[0] )
def get_provider( self, address ):
'''return the provider name if it is in the provider dictionary'''
if address in self.providers.keys():
address = self.providers[ address ]
return address
def parse_blocks( self, blocks ):
'''parse the blocks of the same blockheight'''
# parse the canonical
blocks.sort_values(by=['canonical'], inplace=True, ascending=False)
for index, block in blocks.iterrows():
self.parse_block( block )
def parse_block( self, block ):
'''parse the block'''
# canonical flag
canonical = 'Canonical' if block.canonical == True else 'Non-Canonical'
# parse the transactions
transactions = self.parse_transactions( block.transactions )
# check if the creator of the block is the public_key
if block.creator == self.public_key:
self.send( f"{canonical} {block.blockheight}: Created Block - {self.get_provider( transactions['coinbase_receiver'] )} Received {transactions['coinbase_reward']} at { block.date } { block.time } [ { block.delta_time } ]" )
# check the transactions
if canonical == 'Canonical':
for transaction in transactions['user_commands']:
if self.public_key in [transaction['from'], transaction['to']]:
# if it is a delegation, omit the amount
if transaction['kind'] == 'STAKE_DELEGATION':
self.send( f"{canonical} {block.blockheight}: Stake Delegation from {self.get_provider( transaction['from'] )} to {self.get_provider( transaction['to'])} [{transaction['memo'].strip()}] at { block.date } { block.time }" )
else:
self.send( f"{canonical} {block.blockheight}: {transaction['kind'].capitalize()} from {self.get_provider( transaction['from'] )} to {self.get_provider( transaction['to'])} for {transaction['amount']} [{transaction['memo'].strip()}] at { block.date } { block.time }" )
if transaction['kind'] == 'PAYMENT':
if transaction['from'] == self.public_key:
self.sent += transaction['amount']
else:
self.recieved += transaction['amount']
# Send how much has gone through the account so far
# self.send( f'Sent: {self.sent}\tReceived: {self.recieved}')
def parse_transactions( self, transactions ):
'''parse transactions'''
if transactions['feetransfer'] == None or transactions['usercommands'] == None:
return { 'coinbase_reward' : 0,
'coinbase_receiver' : '',
'user_commands' : [] }
output = {}
# parse the rewards
output['coinbase_reward'] = transactions['coinbase'] * MINA_DECIMALS
output['coinbase_receiver']= transactions['coinbasereceiveraccount']['publickey']
output['fee'] = []
# parse the fee transfers
for fee_transfer in json.loads( transactions['feetransfer'] ):
output['fee'].append( fee_transfer['fee'] * MINA_DECIMALS )
# user commands
output['user_commands'] = []
for user_command in json.loads( transactions['usercommands'] ):
user_tx = { 'from' : user_command['from'],
'to' : user_command['to'],
'amount': round( user_command['amount'] * MINA_DECIMALS, 4 ),
'fee' : round( user_command['fee'] * MINA_DECIMALS, 4),
'kind' : user_command['kind'],
'memo': self.decode_memo( user_command['memo'] ) }
output['user_commands'].append( user_tx )
return output
def decode_memo( self, memo ):
'''decode the memo'''
decoded = base58.b58decode( memo )[2:-4]
return decoded.decode("utf-8", "strict")
mina_bot = MinaTelegram()
| 40.92437 | 291 | 0.580698 | 1,053 | 9,740 | 5.253561 | 0.242165 | 0.025307 | 0.016269 | 0.0188 | 0.12256 | 0.110629 | 0.092191 | 0.092191 | 0.051338 | 0.039046 | 0 | 0.008326 | 0.297125 | 9,740 | 237 | 292 | 41.097046 | 0.799737 | 0.114784 | 0 | 0.053333 | 0 | 0.033333 | 0.250796 | 0.031246 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.053333 | 0 | 0.193333 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fab6e383e6d4d6588e9129e39975cf189d7be4a2 | 1,935 | py | Python | genetic_algorithm/Prepare_Data_Functions.py | pozzo-research-group/HEAD | 98572c691d0dbef4da19a719427a4b946937e342 | [
"MIT"
] | 1 | 2022-03-31T04:29:54.000Z | 2022-03-31T04:29:54.000Z | genetic_algorithm/Prepare_Data_Functions.py | pozzo-research-group/HEAD | 98572c691d0dbef4da19a719427a4b946937e342 | [
"MIT"
] | null | null | null | genetic_algorithm/Prepare_Data_Functions.py | pozzo-research-group/HEAD | 98572c691d0dbef4da19a719427a4b946937e342 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
def load_df(directory):
df = pd.read_excel(directory)
return df
def subtract_baseline(df, baseline):
x = df.values
cols = df.columns
baseline_col = df[baseline].values
for i in range(1,x.shape[1]):
x[:,i] = x[:,i] - baseline_col
df = pd.DataFrame(x, columns = cols)
return df
def delete_rows(df, rows_delete):
x = df.values
cols = df.columns
x = x[rows_delete:,:]
df = pd.DataFrame(x, columns = cols)
return df
def normalize_df(df):
x = df.values #returns a numpy array
cols = df.columns
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
x_scaled = np.hstack((x[:,0].reshape(-1,1),x_scaled[:,1:]))
df = pd.DataFrame(x_scaled, columns = cols)
return df
def plot_single_graph(df, column):
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(df['Wavelength'], df[column])
ax.set_xlabel('Wavelength (nm)')
ax.set_ylabel('Absorbance')
def find_max_wavelength(df, column):
array = np.vstack((df['Wavelength'], df[column])).T
sorted_array = array[np.argsort(array[:, 1])]
max_wavelength = sorted_array[-1,0]
return max_wavelength
def plot_all_spectra_multiple(df):
for i in range(1, len(df.columns)):
plot_single_graph(df.columns[i])
def plot_all_spectra_single(df):
for i in range(1, len(df.columns)):
plt.plot(df['Wavelength'], df[df.columns[i]])
plt.xlabel('Wavelength (nm)')
plt.ylabel('Absorbance')
def plot_some_spectra_single(df, cols):
plt.figure(figsize=(8,5))
for i in range(0, len(cols)):
plt.plot(df['Wavelength'], df[cols[i]])
plt.xlabel('Wavelength (nm)')
plt.ylabel('Absorbance')
plt.xlim([400,800])
# plt.legend()
| 29.318182 | 63 | 0.630491 | 284 | 1,935 | 4.158451 | 0.28169 | 0.053345 | 0.037257 | 0.037257 | 0.276037 | 0.211685 | 0.174428 | 0.174428 | 0.104996 | 0 | 0 | 0.014716 | 0.22739 | 1,935 | 66 | 64 | 29.318182 | 0.775251 | 0.017571 | 0 | 0.333333 | 0 | 0 | 0.060526 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.074074 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |