hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e702ceebd6384acfed75804122d1e9b9864c6c7 | 2,776 | py | Python | add.py | plasticuproject/cert-dbms | 0a8f1d8eb69610fa1c0403c08d3d3ac057e3d698 | [
"MIT"
] | null | null | null | add.py | plasticuproject/cert-dbms | 0a8f1d8eb69610fa1c0403c08d3d3ac057e3d698 | [
"MIT"
] | null | null | null | add.py | plasticuproject/cert-dbms | 0a8f1d8eb69610fa1c0403c08d3d3ac057e3d698 | [
"MIT"
] | 1 | 2020-10-27T12:06:36.000Z | 2020-10-27T12:06:36.000Z | #!/usr/bin/python3
"""add.py"""
from sys import argv
import datetime
import sqlite3
import pathlib
PATH = pathlib.Path.cwd()
HELP_TEXT = '''
Usage: add.py [-h] directory
-h, --help bring up this help message
directory directory with certs to add
'''
def add_certs(cert_dir: str) -> None:
"""Add new certs to database. Initialize database if none exists."""
# If DATABASE does not exist, initialize it
d_b = cert_dir + '.db'
if (PATH / d_b).is_file() is False:
con = sqlite3.connect(d_b)
cursor_obj = con.cursor()
cursor_obj.execute(
'CREATE TABLE certs(id text PRIMARY KEY, date_added text, applied integer, date_applied text, banned integer, banned_date text, required_activation integer, currently_used integer)'
)
# Add new cert file info for all UNIQUE cert files from directory
con = sqlite3.connect(d_b)
cursor_obj = con.cursor()
added_certs = []
skipped_certs = []
add_path = PATH / cert_dir
for cert_file in add_path.iterdir():
# Check that file in directory is indeed a cert file and set values
if cert_file.is_file(
) and cert_file.suffix == '.txt': # TODO find file sig
cert_name = cert_file.name
added = datetime.datetime.now()
entities = (cert_name, added, 0, 0, 0, 0)
# Try to add UNIQUE cert file to DATABASE
try:
cursor_obj.execute(
'INSERT INTO certs(id, date_added, applied, banned, required_activation, currently_used) VALUES(?, ?, ?, ?, ?, ?)',
entities)
con.commit()
added_certs.append(cert_name)
# If cert file is already in DATABASE then skip
except sqlite3.IntegrityError:
skipped_certs.append(cert_name)
con.close()
# Print output
if skipped_certs:
print('\n[*] Already in DATABASE, skipping:\n')
for _x in skipped_certs:
print('\t' + _x)
if added_certs:
print('\n\n[*] Added to the DATABASE:\n')
for _x in added_certs:
print('\t' + _x)
print(f'\n\n[*] Added: {len(added_certs)}')
print(f'[*] Skipped {len(skipped_certs)}\n')
if __name__ == '__main__':
# Check for help flag
if len(argv) < 2 or argv[1] == '--help' or argv[1] == '-h':
print(HELP_TEXT)
quit()
# Check if directory name is valid, run stuff if so
if (PATH / argv[1]).is_dir():
CERT_DIR = argv[1]
if CERT_DIR[-1] == '/':
CERT_DIR = CERT_DIR[:-1]
try:
add_certs(CERT_DIR)
except KeyboardInterrupt:
quit()
else:
print(f'\n[*] {argv[1]} not a valid directory\n')
| 30.844444 | 193 | 0.583934 |
6e710c139901b3edb6aaa6a1f60ac54de8da8353 | 209 | py | Python | mrq_monitor.py | HyokaChen/violet | b89ddb4f909c2a40e76d89b665949e55086a7a80 | [
"Apache-2.0"
] | 1 | 2020-07-29T15:49:35.000Z | 2020-07-29T15:49:35.000Z | mrq_monitor.py | HyokaChen/violet | b89ddb4f909c2a40e76d89b665949e55086a7a80 | [
"Apache-2.0"
] | 1 | 2019-12-19T10:19:57.000Z | 2019-12-19T11:15:28.000Z | mrq_monitor.py | EmptyChan/violet | b89ddb4f909c2a40e76d89b665949e55086a7a80 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created with IntelliJ IDEA.
Description:
User: jinhuichen
Date: 3/28/2018 4:17 PM
Description:
"""
from mrq.dashboard.app import main
if __name__ == '__main__':
main() | 16.076923 | 34 | 0.650718 |
6e734b51dd3ec79fecc1a0e0800072ebad29c909 | 556 | py | Python | lang/string/reverse-words.py | joez/letspy | 9f653bc0071821fdb49da8c19787dc7e12921457 | [
"Apache-2.0"
] | null | null | null | lang/string/reverse-words.py | joez/letspy | 9f653bc0071821fdb49da8c19787dc7e12921457 | [
"Apache-2.0"
] | null | null | null | lang/string/reverse-words.py | joez/letspy | 9f653bc0071821fdb49da8c19787dc7e12921457 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
def reverse_words(s):
return ' '.join(w[::-1] for w in s.split(' '))
def reverse_words_ext(s):
# support other whitespaces
strs, word = [], ''
for c in s:
if c.isspace():
if word:
strs.append(word[::-1])
word = ''
strs.append(c)
else:
word += c
if word:
strs.append(word[::-1])
return ''.join(strs)
if __name__ == '__main__':
s = input()
for f in (reverse_words, reverse_words_ext):
print(f(s))
| 19.857143 | 50 | 0.491007 |
6e74495ac01d11fb500db642fc48819334b6af0a | 140 | py | Python | k8s/the-project/kubeless/ok-func.py | cjimti/mk | b303e147da77776baf5fee337e356ebeccbe2c01 | [
"MIT"
] | 1 | 2019-04-18T09:52:48.000Z | 2019-04-18T09:52:48.000Z | k8s/the-project/kubeless/ok-func.py | cjimti/mk | b303e147da77776baf5fee337e356ebeccbe2c01 | [
"MIT"
] | null | null | null | k8s/the-project/kubeless/ok-func.py | cjimti/mk | b303e147da77776baf5fee337e356ebeccbe2c01 | [
"MIT"
] | null | null | null | import requests
def ok(event, context):
url = "http://ok:8080/"
response = requests.request("GET", url)
return response.text
| 15.555556 | 43 | 0.65 |
6e74bf0ffc1a010178cf010d5be1824b1235b7ba | 11,166 | py | Python | python-scripts/gt_generate_python_curve.py | TrevisanGMW/maya | 4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92 | [
"MIT"
] | 26 | 2020-11-16T12:49:05.000Z | 2022-03-09T20:39:22.000Z | python-scripts/gt_generate_python_curve.py | TrevisanGMW/maya | 4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92 | [
"MIT"
] | 47 | 2020-11-08T23:35:49.000Z | 2022-03-10T03:43:00.000Z | python-scripts/gt_generate_python_curve.py | TrevisanGMW/maya | 4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92 | [
"MIT"
] | 5 | 2021-01-27T06:10:34.000Z | 2021-10-30T23:29:44.000Z | """
Python Curve Generator
@Guilherme Trevisan - github.com/TrevisanGMW/gt-tools - 2020-01-02
1.1 - 2020-01-03
Minor patch adjustments to the script
1.2 - 2020-06-07
Fixed random window widthHeight issue.
Updated naming convention to make it clearer. (PEP8)
Added length checker for selection before running.
1.3 - 2020-06-17
Changed UI
Added help menu
Added icon
1.4 - 2020-06-27
No longer failing to generate curves with non-unique names
Tweaked the color and text for the title and help menu
1.5 - 2021-01-26
Fixed way the curve is generated to account for closed and opened curves
1.6 - 2021-05-12
Made script compatible with Python 3 (Maya 2022+)
"""
import maya.cmds as cmds
import sys
from decimal import *
from maya import OpenMayaUI as omui
try:
from shiboken2 import wrapInstance
except ImportError:
from shiboken import wrapInstance
try:
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QWidget
except ImportError:
from PySide.QtGui import QIcon, QWidget
# Script Name
script_name = "GT - Generate Python Curve"
# Version:
script_version = "1.6"
#Python Version
python_version = sys.version_info.major
# Default Settings
close_curve = False
add_import = False
# Function for the "Run Code" button
def run_output_code(out):
try:
exec(out)
except Exception as e:
cmds.warning("Something is wrong with your code!")
cmds.warning(e)
# Main Form ============================================================================
def build_gui_py_curve():
window_name = "build_gui_py_curve"
if cmds.window(window_name, exists =True):
cmds.deleteUI(window_name)
# Main GUI Start Here =================================================================================
build_gui_py_curve = cmds.window(window_name, title=script_name + ' (v' + script_version + ')',\
titleBar=True, mnb=False, mxb=False, sizeable =True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
content_main = cmds.columnLayout(adj = True)
# Title
title_bgc_color = (.4, .4, .4)
cmds.separator(h=10, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 270)], cs=[(1, 10)], p=content_main) # Window Size Adjustment
cmds.rowColumnLayout(nc=3, cw=[(1, 10), (2, 200), (3, 50)], cs=[(1, 10), (2, 0), (3, 0)], p=content_main) # Title Column
cmds.text(" ", bgc=title_bgc_color) # Tiny Empty Green Space
cmds.text(script_name, bgc=title_bgc_color, fn="boldLabelFont", align="left")
cmds.button( l ="Help", bgc=title_bgc_color, c=lambda x:build_gui_help_py_curve())
cmds.separator(h=10, style='none', p=content_main) # Empty Space
# Body ====================
body_column = cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main)
cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)])
settings = cmds.checkBoxGrp(columnWidth2=[150, 1], numberOfCheckBoxes=2, \
label1 = 'Add import \"maya.cmds\" ', label2 = "Force Open", v1 = add_import, v2 = close_curve)
cmds.rowColumnLayout(nc=1, cw=[(1, 230)], cs=[(1,0)])
cmds.separator(h=10, style='none') # Empty Space
cmds.button(l ="Generate", bgc=(.6, .6, .6), c=lambda x:generate_python_curve())
cmds.separator(h=10, style='none', p=content_main) # Empty Space
cmds.separator(h=10, p=content_main)
# Bottom ====================
cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main)
cmds.text(label='Output Python Curve' )
output_python = cmds.scrollField(editable=True, wordWrap=True)
cmds.separator(h=10, style='none') # Empty Space
cmds.button(l ="Run Code", c=lambda x:run_output_code(cmds.scrollField(output_python, query=True, text=True)))
cmds.separator(h=10, style='none') # Empty Space
def generate_python_curve():
not_curve_error = "Please make sure you selected a Nurbs Curve or a Bezier Curve object before generating it"
if len(cmds.ls(selection=True)) != 0:
getcontext().prec = 5
sel_one = cmds.ls(sl=1)[0]
shape = cmds.listRelatives(sel_one, s=1 , fullPath=True)[0]
type_checker = str(cmds.objectType(shape))
if "nurbsCurve" in type_checker or "bezierCurve" in type_checker:
opened_curve = cmds.checkBoxGrp (settings, q=True, value2=True)
per_state = cmds.getAttr(shape + '.form')
knots_string = ''
extra_cvs_per = ''
is_periodic = False
if not opened_curve and per_state == 2:
is_periodic=True
curve_info = cmds.arclen(sel_one, ch=True)
curve_knots = cmds.getAttr( curve_info + '.knots[*]' )
knots_string = ', per=True, k=' + str(curve_knots)
cmds.delete(curve_info)
cvs = cmds.getAttr(shape+'.cv[*]')
cvs_list = []
for c in cvs:
cvs_list.append([float(Decimal("%.3f" % c[0])),float(Decimal("%.3f" % c[1])),float(Decimal("%.3f" % c[2]))])
if is_periodic and len(cvs) > 2:
extra_cvs_per = ', '
for i in range(3):
if i != 2:
extra_cvs_per += str(cvs_list[i]) + ', '
else:
extra_cvs_per += str(cvs_list[i])
if cmds.checkBoxGrp(settings, q=True, value1=True):
out = 'import maya.cmds as cmds\n\ncmds.curve(p='
else:
out = 'cmds.curve(p='
out += '[%s' % ', '.join(map(str, cvs_list))
out += extra_cvs_per + '], d='+str(cmds.getAttr(shape+'.degree'))+ knots_string + ')'
print ("#" * 100)
print (out)
print ("#" * 100)
cmds.scrollField(output_python, edit=True, wordWrap=True, text=out ,sl=True)
cmds.setFocus(output_python)
else:
cmds.warning(not_curve_error)
cmds.scrollField(output_python, edit=True, wordWrap=True, text=not_curve_error ,sl=True)
cmds.setFocus(output_python)
else:
cmds.warning(not_curve_error)
# Show and Lock Window
cmds.showWindow(build_gui_py_curve)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/pythonFamily.png')
widget.setWindowIcon(icon)
# Main GUI Ends Here =================================================================================
# Creates Help GUI
def build_gui_help_py_curve():
window_name = "build_gui_help_py_curve"
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title= script_name + " Help", mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
cmds.columnLayout("main_column", p= window_name)
# Title Text
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p="main_column") # Window Size Adjustment
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p="main_column") # Title Column
cmds.text(script_name + " Help", bgc=[.4,.4,.4], fn="boldLabelFont", align="center")
cmds.separator(h=10, style='none', p="main_column") # Empty Space
# Body ====================
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column")
cmds.text(l='This script generates the Python code necessary to create', align="left")
cmds.text(l='a selected curve.', align="left")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Make sure you delete the curve\'s history before ', align="left")
cmds.text(l='generating the code.', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='Add import "maya.cmds":', align="left", fn="boldLabelFont")
cmds.text(l='Adds a line that imports Maya\'s API. This is necessary', align="left")
cmds.text(l='when running python scripts.', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='Force Open: ', align="left", fn="boldLabelFont")
cmds.text(l='Doens\'t check if the curve is periodic leaving it open.', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='"Generate" button:', align="left", fn="boldLabelFont")
cmds.text(l='Outputs the python code necessary to create the curve', align="left")
cmds.text(l='inside the "Output Python Curve" box.', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='Run Code:', align="left", fn="boldLabelFont")
cmds.text(l='Attempts to run the code (or anything written) inside ', align="left")
cmds.text(l='"Output Python Curve" box', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column")
cmds.text('Guilherme Trevisan ')
cmds.text(l='<a href="mailto:trevisangmw@gmail.com">TrevisanGMW@gmail.com</a>', hl=True, highlightColor=[1,1,1])
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1,1,1])
cmds.separator(h=7, style='none') # Empty Space
# Close Button
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column")
cmds.separator(h=10, style='none')
cmds.button(l='OK', h=30, c=lambda args: close_help_gui())
cmds.separator(h=8, style='none')
# Show and Lock Window
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/question.png')
widget.setWindowIcon(icon)
def close_help_gui():
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
#Build UI
if __name__ == '__main__':
build_gui_py_curve() | 40.901099 | 129 | 0.5729 |
6e75ab3bf35f32714181bf627668b80eaa462378 | 1,766 | py | Python | client/core/scene/summary.py | krerkkiat/space-invader | 428b1041c9246b55cb63bc6c0b2ec20beb7a32ed | [
"MIT"
] | null | null | null | client/core/scene/summary.py | krerkkiat/space-invader | 428b1041c9246b55cb63bc6c0b2ec20beb7a32ed | [
"MIT"
] | null | null | null | client/core/scene/summary.py | krerkkiat/space-invader | 428b1041c9246b55cb63bc6c0b2ec20beb7a32ed | [
"MIT"
] | null | null | null | import pygame
from config import Config
from core.ui import Table, Button
from core.scene import Scene
from core.manager import SceneManager
from core.scene.preload import Preload
class SummaryScene(Scene):
def __init__(self, game):
super().__init__(game)
self._background = pygame.display.get_surface()
self._background.set_alpha(180)
self._background.fill(Config.colors['black'])
self._elements.clear(self._canvas, self._background)
w, h = (200, 200)
rowData = [('Score', 'Wave'), (str(self._parent._pilot.score), str(self._parent._pilot.wave))]
columnWidth = [100, 100]
self._scoreBoard = Table(self, w, h, rowData, columnWidth, title='Summary', line=False, button=False)
self._scoreBoard.rect.centerx = Config.windowWidth//2
self._scoreBoard.rect.centery = Config.windowHeight//2
self.addElement(self._scoreBoard)
def callBack():
# SceneManager.call(MainScene(self._parent), Preload(self._parent))
self._parent._pilot.update()
SceneManager.ret(Preload(self._parent))
self._btn = Button(self, 'Continue', callBack)
self._btn.rect.right = self._scoreBoard.rect.right
self._btn.rect.top = self._scoreBoard.rect.bottom
self.addElement(self._btn)
self.addEventListener(self._btn.handleEvent)
def loadData(self):
pass
def run(self):
for event in pygame.event.get():
self._handleEvent(event)
self.update()
self.draw()
self._clock.tick(Config.ticks)
def update(self):
super().update()
def draw(self):
updatedRects = self._elements.draw(self._canvas)
pygame.display.update(updatedRects) | 34.627451 | 109 | 0.656285 |
6e7654580b77f1dbecf04a37ead830e9b06ecf31 | 198 | py | Python | mwptoolkit/module/Encoder/__init__.py | ShubhamAnandJain/MWP-CS229 | ce86233504fdb37e104a3944fd81d4606fbfa621 | [
"MIT"
] | 71 | 2021-03-08T06:06:15.000Z | 2022-03-30T11:59:37.000Z | mwptoolkit/module/Encoder/__init__.py | ShubhamAnandJain/MWP-CS229 | ce86233504fdb37e104a3944fd81d4606fbfa621 | [
"MIT"
] | 13 | 2021-09-07T12:38:23.000Z | 2022-03-22T15:08:16.000Z | mwptoolkit/module/Encoder/__init__.py | ShubhamAnandJain/MWP-CS229 | ce86233504fdb37e104a3944fd81d4606fbfa621 | [
"MIT"
] | 21 | 2021-02-16T07:46:36.000Z | 2022-03-23T13:41:33.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from mwptoolkit.module.Encoder import graph_based_encoder,rnn_encoder,transformer_encoder | 49.5 | 89 | 0.90404 |
6e78083845e016661893639e08ffab0d50cff621 | 546 | py | Python | src/python/intensity/components/shutdown_if_empty.py | kripken/intensityengine | 9ae352b4f526ecb180004ae4968db7f64f140762 | [
"MIT"
] | 31 | 2015-01-18T20:27:31.000Z | 2021-07-03T03:58:47.000Z | src/python/intensity/components/shutdown_if_empty.py | JamesLinus/intensityengine | 9ae352b4f526ecb180004ae4968db7f64f140762 | [
"MIT"
] | 4 | 2015-07-05T21:09:37.000Z | 2019-09-06T14:34:59.000Z | src/python/intensity/components/shutdown_if_empty.py | JamesLinus/intensityengine | 9ae352b4f526ecb180004ae4968db7f64f140762 | [
"MIT"
] | 11 | 2015-02-03T19:24:10.000Z | 2019-09-20T10:59:50.000Z |
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
from intensity.signals import client_connect, client_disconnect
from intensity.base import quit
class Data:
counter = 0
def add(sender, **kwargs):
Data.counter += 1
client_connect.connect(add, weak=False)
def subtract(sender, **kwargs):
Data.counter -= 1
if Data.counter <= 0:
quit()
client_disconnect.connect(subtract, weak=False)
| 22.75 | 110 | 0.717949 |
6e780b142bddebcec890df30277381a71e204488 | 694 | py | Python | pyforms/utils/timeit.py | dominic-dev/pyformsd | 23e31ceff2943bc0f7286d25dd14450a14b986af | [
"MIT"
] | null | null | null | pyforms/utils/timeit.py | dominic-dev/pyformsd | 23e31ceff2943bc0f7286d25dd14450a14b986af | [
"MIT"
] | null | null | null | pyforms/utils/timeit.py | dominic-dev/pyformsd | 23e31ceff2943bc0f7286d25dd14450a14b986af | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "ricardojvr@gmail.com"
__status__ = "Development"
import time
from datetime import datetime, timedelta
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
time_elapsed = datetime(1,1,1) + timedelta(seconds=(te-ts) )
print("%s: %d:%d:%d:%d;%d" % (method.__name__, time_elapsed.day-1, time_elapsed.hour, time_elapsed.minute, time_elapsed.second, time_elapsed.microsecond))
return result
return timed | 27.76 | 156 | 0.674352 |
6e7a4b454a8651618254290e5f7ef6b4e1cd99a9 | 1,388 | py | Python | cogs/botinfo.py | MM-coder/salbot-rewrite | 322c34ba85a2c852e02cd3c183d5a7a4a077ff6f | [
"Apache-2.0"
] | 1 | 2020-08-17T05:14:58.000Z | 2020-08-17T05:14:58.000Z | cogs/botinfo.py | MM-coder/salbot-rewrite | 322c34ba85a2c852e02cd3c183d5a7a4a077ff6f | [
"Apache-2.0"
] | null | null | null | cogs/botinfo.py | MM-coder/salbot-rewrite | 322c34ba85a2c852e02cd3c183d5a7a4a077ff6f | [
"Apache-2.0"
] | 1 | 2020-08-17T16:57:30.000Z | 2020-08-17T16:57:30.000Z | """
Created by vcokltfre at 2020-07-08
"""
import json
import logging
import time
from datetime import datetime
import discord
from discord.ext import commands
from discord.ext.commands import has_any_role
class BotInfo(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger("salbot.cogs.botinfo")
self.uptime_start = round(time.time())
self.socket_stats = {}
self.opcodes = {
10: "HELLO",
11: "HEARTBEAT",
9: "HI",
7: "RECONNECT"
}
@commands.Cog.listener()
async def on_socket_response(self, data):
t = data["t"]
if not t:
try:
t = self.opcodes[data["op"]]
except KeyError:
self.logger.warning(f"Unknown opcode. Received: {data['op']}")
self.socket_stats[t] = self.socket_stats.get(t, 0) + 1
@commands.command(name="stats")
@has_any_role("Administrator", "Moderator")
async def stats_bot(self, ctx, typ="raw"):
if typ == "raw":
jsd = json.dumps(self.socket_stats, indent=4)
desc = f"```json\n{jsd}```"
embed = discord.Embed(title="Raw Socket Stats", color=0xFF0000, description=desc, timestamp=datetime.now())
await ctx.channel.send(embed=embed)
def setup(bot):
bot.add_cog(BotInfo(bot))
| 28.326531 | 119 | 0.591499 |
6e7b6d33ac9f184e61e6b426b75d7acfe7a99f1e | 6,486 | py | Python | ninja_extra/pagination.py | eadwinCode/django-ninja-extra | 16246c466ab8895ba1bf29d69f3d3e9337031edd | [
"MIT"
] | 43 | 2021-09-09T14:20:59.000Z | 2022-03-28T00:38:52.000Z | ninja_extra/pagination.py | eadwinCode/django-ninja-extra | 16246c466ab8895ba1bf29d69f3d3e9337031edd | [
"MIT"
] | 6 | 2022-01-04T10:53:11.000Z | 2022-03-28T19:53:46.000Z | ninja_extra/pagination.py | eadwinCode/django-ninja-extra | 16246c466ab8895ba1bf29d69f3d3e9337031edd | [
"MIT"
] | null | null | null | import inspect
import logging
from collections import OrderedDict
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, Union, cast, overload
from django.core.paginator import InvalidPage, Page, Paginator
from django.db.models import QuerySet
from django.http import HttpRequest
from ninja import Schema
from ninja.constants import NOT_SET
from ninja.pagination import LimitOffsetPagination, PageNumberPagination, PaginationBase
from ninja.signature import has_kwargs
from ninja.types import DictStrAny
from pydantic import Field
from ninja_extra.conf import settings
from ninja_extra.exceptions import NotFound
from ninja_extra.schemas import PaginatedResponseSchema
from ninja_extra.urls import remove_query_param, replace_query_param
logger = logging.getLogger()
if TYPE_CHECKING:
from .controllers import ControllerBase # pragma: no cover
__all__ = [
"PageNumberPagination",
"PageNumberPaginationExtra",
"PaginationBase",
"LimitOffsetPagination",
"paginate",
"PaginatedResponseSchema",
]
def _positive_int(
integer_string: Union[str, int], strict: bool = False, cutoff: Optional[int] = None
) -> int:
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
return min(ret, cutoff)
return ret
class PageNumberPaginationExtra(PaginationBase):
class Input(Schema):
page: int = Field(1, gt=0)
page_size: int = Field(100, lt=200)
page_query_param = "page"
page_size_query_param = "page_size"
max_page_size = 200
paginator_class = Paginator
def __init__(
self,
page_size: int = settings.PAGINATION_PER_PAGE,
max_page_size: Optional[int] = None,
) -> None:
super().__init__()
self.page_size = page_size
self.max_page_size = max_page_size or 200
self.Input = self.create_input() # type:ignore
def create_input(self) -> Type[Input]:
class DynamicInput(PageNumberPaginationExtra.Input):
page: int = Field(1, gt=0)
page_size: int = Field(self.page_size, lt=self.max_page_size)
return DynamicInput
def paginate_queryset(
self, items: QuerySet, request: HttpRequest, **params: Any
) -> Any:
pagination_input = cast(PageNumberPaginationExtra.Input, params["pagination"])
page_size = self.get_page_size(pagination_input.page_size)
current_page_number = pagination_input.page
paginator = self.paginator_class(items, page_size)
try:
url = request.build_absolute_uri()
page: Page = paginator.page(current_page_number)
return self.get_paginated_response(base_url=url, page=page)
except InvalidPage as exc:
msg = "Invalid page. {page_number} {message}".format(
page_number=current_page_number, message=str(exc)
)
raise NotFound(msg)
def get_paginated_response(self, *, base_url: str, page: Page) -> DictStrAny:
return OrderedDict(
[
("count", page.paginator.count),
("next", self.get_next_link(base_url, page=page)),
("previous", self.get_previous_link(base_url, page=page)),
("results", list(page)),
]
)
@classmethod
def get_response_schema(
cls, response_schema: Union[Schema, Type[Schema], Any]
) -> Any:
return PaginatedResponseSchema[response_schema]
def get_next_link(self, url: str, page: Page) -> Optional[str]:
if not page.has_next():
return None
page_number = page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self, url: str, page: Page) -> Optional[str]:
if not page.has_previous():
return None
page_number = page.previous_page_number()
if page_number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(url, self.page_query_param, page_number)
def get_page_size(self, page_size: int) -> int:
if page_size:
try:
return _positive_int(page_size, strict=True, cutoff=self.max_page_size)
except (KeyError, ValueError):
pass
return self.page_size
@overload
def paginate() -> Callable[..., Any]:
...
@overload
def paginate(
func_or_pgn_class: Any = NOT_SET, **paginator_params: Any
) -> Callable[..., Any]:
...
def paginate(
func_or_pgn_class: Any = NOT_SET, **paginator_params: Any
) -> Callable[..., Any]:
isfunction = inspect.isfunction(func_or_pgn_class)
isnotset = func_or_pgn_class == NOT_SET
pagination_class: Type[PaginationBase] = settings.PAGINATION_CLASS
if isfunction:
return _inject_pagination(func_or_pgn_class, pagination_class)
if not isnotset:
pagination_class = func_or_pgn_class
def wrapper(func: Callable[..., Any]) -> Any:
return _inject_pagination(func, pagination_class, **paginator_params)
return wrapper
def _inject_pagination(
func: Callable[..., Any],
paginator_class: Type[PaginationBase],
**paginator_params: Any,
) -> Callable[..., Any]:
func.has_kwargs = True # type: ignore
if not has_kwargs(func):
func.has_kwargs = False # type: ignore
logger.debug(
f"function {func.__name__} should have **kwargs if you want to use pagination parameters"
)
paginator: PaginationBase = paginator_class(**paginator_params)
paginator_kwargs_name = "pagination"
@wraps(func)
def view_with_pagination(
controller: "ControllerBase", *args: Any, **kw: Any
) -> Any:
func_kwargs = dict(kw)
if not func.has_kwargs: # type: ignore
func_kwargs.pop(paginator_kwargs_name)
items = func(controller, *args, **func_kwargs)
assert (
controller.context and controller.context.request
), "Request object is None"
return paginator.paginate_queryset(items, controller.context.request, **kw)
view_with_pagination._ninja_contribute_args = [ # type: ignore
(
paginator_kwargs_name,
paginator.Input,
paginator.InputSource,
),
]
return view_with_pagination
| 31.333333 | 101 | 0.665433 |
6e7bea4cb2b85ac4aa392fccc69253e8cb2356b9 | 547 | py | Python | text-boxes/test-textbox01.py | rajorshi-mukherjee/gui-python | 356eef26975e63de48b441d336d75a1f9c232cf3 | [
"MIT"
] | null | null | null | text-boxes/test-textbox01.py | rajorshi-mukherjee/gui-python | 356eef26975e63de48b441d336d75a1f9c232cf3 | [
"MIT"
] | 3 | 2022-01-02T18:04:24.000Z | 2022-01-12T16:35:31.000Z | text-boxes/test-textbox01.py | rajorshi-mukherjee/gui-python | 356eef26975e63de48b441d336d75a1f9c232cf3 | [
"MIT"
] | null | null | null | # !/usr/bin/python3
from tkinter import *
top = Tk()
top.geometry("400x250")
name = Label(top, text = "Name").place(x = 30,y = 50)
email = Label(top, text = "Email").place(x = 30, y = 90)
password = Label(top, text = "Password").place(x = 30, y = 130)
sbmitbtn = Button(top, text = "Submit",activebackground = "pink", activeforeground = "blue").place(x = 30, y = 170)
e1 = Entry(top).place(x = 80, y = 50)
e2 = Entry(top).place(x = 80, y = 90)
e3 = Entry(top, show="*").place(x = 95, y = 130)
top.mainloop() | 30.388889 | 117 | 0.575868 |
6e7c1a4dd0214c41c2785c1779862d06bb157d94 | 873 | py | Python | server/yafa/migrations/0002_auto_20160606_2216.py | mrmonkington/yafa | d15ba1fdaaa046e3bc07a7a44fb61213d686bb7d | [
"MIT"
] | null | null | null | server/yafa/migrations/0002_auto_20160606_2216.py | mrmonkington/yafa | d15ba1fdaaa046e3bc07a7a44fb61213d686bb7d | [
"MIT"
] | 13 | 2016-08-10T19:22:35.000Z | 2021-06-10T18:53:01.000Z | server/yafa/migrations/0002_auto_20160606_2216.py | mrmonkington/yafa | d15ba1fdaaa046e3bc07a7a44fb61213d686bb7d | [
"MIT"
] | 2 | 2016-06-23T09:02:20.000Z | 2021-03-22T11:39:20.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-06 22:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yafa', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='site',
name='name',
),
migrations.RemoveField(
model_name='zone',
name='name',
),
migrations.AddField(
model_name='site',
name='slug',
field=models.SlugField(default='', max_length=250),
preserve_default=False,
),
migrations.AddField(
model_name='zone',
name='slug',
field=models.SlugField(default='', max_length=250),
preserve_default=False,
),
]
| 24.25 | 63 | 0.54181 |
6e7d261c65a6ddf389725d10b7241f84b3620572 | 501 | py | Python | toys/urls.py | julesc00/restful | 11b5312caf4affeaa06e3ceb5b86a7c73357eed1 | [
"MIT"
] | null | null | null | toys/urls.py | julesc00/restful | 11b5312caf4affeaa06e3ceb5b86a7c73357eed1 | [
"MIT"
] | null | null | null | toys/urls.py | julesc00/restful | 11b5312caf4affeaa06e3ceb5b86a7c73357eed1 | [
"MIT"
] | null | null | null | from django.urls import path
from toys.views import (toy_list_view, toy_detail_view, toy_sql_view, toy_raw_sql_view,
toy_aggregate_view)
app_name = "toys"
urlpatterns = [
path("toys/", toy_list_view, name="toys_list"),
path("toys_sql/", toy_sql_view, name="toys_sql_list"),
path("toys/count/", toy_aggregate_view, name="toys_count"),
path("toys_raw/", toy_raw_sql_view, name="toys_raw_list"),
path("toys/<int:pk>/", toy_detail_view, name="toy_detail"),
]
| 35.785714 | 87 | 0.692615 |
6e7e694936dd85ec6e3ce90826c00f74519f89dc | 5,590 | py | Python | predict_image.py | sempwn/kaggle-cats-v-dogs | 0b0e50ca5208248d18b31bfdd456cdb6401060d7 | [
"MIT"
] | null | null | null | predict_image.py | sempwn/kaggle-cats-v-dogs | 0b0e50ca5208248d18b31bfdd456cdb6401060d7 | [
"MIT"
] | null | null | null | predict_image.py | sempwn/kaggle-cats-v-dogs | 0b0e50ca5208248d18b31bfdd456cdb6401060d7 | [
"MIT"
] | null | null | null | '''This script goes along the blog post
"Building powerful image classification models using very little data"
from blog.keras.io.
It uses data that can be downloaded at:
https://www.kaggle.com/c/dogs-vs-cats/data
In our setup, we:
- created a data/ folder
- created train/ and validation/ subfolders inside data/
- created cats/ and dogs/ subfolders inside train/ and validation/
- put the cat pictures index 0-999 in data/train/cats
- put the cat pictures index 1000-1400 in data/validation/cats
- put the dogs pictures index 12500-13499 in data/train/dogs
- put the dog pictures index 13500-13900 in data/validation/dogs
So that we have 1000 training examples for each class, and 400 validation examples for each class.
In summary, this is our directory structure:
```
data/
train/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
validation/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
```
'''
import os
import h5py
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image as image_utils
from keras import optimizers
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Activation, Dropout, Flatten, Dense
#image input utils
from Tkinter import Tk
from tkFileDialog import askopenfilename
# path to the model weights files.
weights_path = 'data/models/vgg16_weights.h5'
top_model_weights_path = 'data/models/bottleneck_fc_model.h5'
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
nb_epoch = 50
# build the VGG16 network
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.0)) #Should have 0 dropout for predicition. But still need model structure so set to 0.
top_model.add(Dense(1, activation='sigmoid'))
print('[INFO] loading weights. May take a while...')
# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
model.add(top_model)
# TODO: create test_data in appropriate format.
print("[INFO] loading and preprocessing image...")
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
image = image_utils.load_img(filename, target_size=(img_width, img_height))
image = image_utils.img_to_array(image) #array should be (3,150,150)
image = np.expand_dims(image, axis=0) #expand to shape (1,3,150, 150)
pDOG = model.predict(image)[0][0]
pCAT = 1. - pDOG
print 'Image {} percent dog and {} percent cat'.format(pDOG*100.,pCAT*100.)
| 38.287671 | 111 | 0.719499 |
6e7f3a4c08faec09d89aa387dcfdf45492ab2264 | 163 | py | Python | ultimate-utils-proj-src/uutils/torch_uu/training/meta_training.py | brando90/ultimate-utils | 9b7ca2e9d330333c4e49722d0708d65b22ed173a | [
"MIT"
] | 5 | 2021-03-13T16:07:26.000Z | 2021-09-09T17:00:36.000Z | ultimate-utils-proj-src/uutils/torch_uu/training/meta_training.py | brando90/ultimate-utils | 9b7ca2e9d330333c4e49722d0708d65b22ed173a | [
"MIT"
] | 8 | 2021-03-09T21:52:09.000Z | 2021-12-02T17:23:33.000Z | ultimate-utils-proj-src/uutils/torch_uu/training/meta_training.py | brando90/ultimate-utils | 9b7ca2e9d330333c4e49722d0708d65b22ed173a | [
"MIT"
] | 5 | 2021-03-24T20:38:43.000Z | 2022-03-17T07:54:12.000Z | """
TODO: Once I finish the d zero and high paper, I will port the code here.
TODO: also put the epochs training, for the ml vs maml paper with synthetic data.
""" | 40.75 | 81 | 0.730061 |
6e7ff5caf482e80185273f9434f18cc9786fbe99 | 692 | py | Python | setup.py | ellwise/kedro-light | 8f5a05d880f3ded23b024d5db72b5fc615e75230 | [
"MIT"
] | 2 | 2021-10-16T12:19:50.000Z | 2022-01-20T16:50:14.000Z | setup.py | ellwise/kedro-light | 8f5a05d880f3ded23b024d5db72b5fc615e75230 | [
"MIT"
] | null | null | null | setup.py | ellwise/kedro-light | 8f5a05d880f3ded23b024d5db72b5fc615e75230 | [
"MIT"
] | null | null | null | from setuptools import setup
from os import path
# read the contents of your README file
curr_dir = path.abspath(path.dirname(__file__))
with open(path.join(curr_dir, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="kedro-light",
version="0.1",
description="A lightweight interface to Kedro",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ellwise/naive-bayes-explainer",
author="Elliott Wise",
author_email="ell.wise@gmail.com",
license="MIT",
packages=["kedro_light"],
install_requires=["kedro"],
include_package_data=True,
zip_safe=False,
)
| 27.68 | 67 | 0.710983 |
6e812cd9d9f3ad6325c8b7be7fb0c2f7d95ff84f | 1,217 | py | Python | app.py | mh-github/mh-wtgw | 0e8d9b622954e14d1e24fda6fc6a4e63af2cd822 | [
"CC0-1.0"
] | null | null | null | app.py | mh-github/mh-wtgw | 0e8d9b622954e14d1e24fda6fc6a4e63af2cd822 | [
"CC0-1.0"
] | null | null | null | app.py | mh-github/mh-wtgw | 0e8d9b622954e14d1e24fda6fc6a4e63af2cd822 | [
"CC0-1.0"
] | null | null | null | import random
from flask import Flask, request, render_template, jsonify
app = Flask(__name__)
data_list = []
with open('data.txt', 'r') as data_file:
data_list = data_file.readlines()
@app.route("/", methods=['GET'])
def index():
index = random.randint(1, len(data_list) - 1)
clue = data_list[index].split('|')[0]
return render_template('game.html',
clue=clue.strip(),
index=index)
@app.route("/check")
def checkAnswer():
ind = int(request.args.get("index"))
ans = request.args.get("answer").strip().upper()
correct_answer = data_list[ind].split('|')[1].strip()
return "You got it right!" if (ans == correct_answer) else "Wrong Answer! Please try again!!"
@app.route("/show")
def showAnswer():
ind = int(request.args.get("index"))
return data_list[ind].split('|')[1].strip()
@app.route("/new")
def newClue():
index = random.randint(1, len(data_list) - 1)
clue = data_list[index].split('|')[0].strip()
response = {
'index': index,
'clue': clue
}
return jsonify(response)
if __name__ == "__main__":
app.run(host='0.0.0.0') | 27.659091 | 98 | 0.57765 |
6e81c177879d88e6b010319496c61e52cdb196f1 | 13,606 | py | Python | imported_files/plotting_cswl05.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | imported_files/plotting_cswl05.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | imported_files/plotting_cswl05.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Plotting.py for notebook 05_Preliminary_comparison_of_simulations_AGN_fraction_with_data
This python file contains all the functions used for plotting graphs and maps in the 2nd notebook (.ipynb) of the repository: 05. Preliminary comparison of the 𝑓MM between simulation and data
Script written by: Soumya Shreeram
Project supervised by Johan Comparat
Date created: 27th April 2021
"""
# astropy modules
import astropy.units as u
import astropy.io.fits as fits
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM, z_at_value
import numpy as np
# scipy modules
from scipy.spatial import KDTree
from scipy.interpolate import interp1d
import os
import importlib
# plotting imports
import matplotlib
from mpl_toolkits import axes_grid1
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import seaborn as sns
import Agn_incidence_from_Major_Mergers as aimm
import Comparison_simulation_with_literature_data as cswl
from scipy.stats import norm
def setLabel(ax, xlabel, ylabel, title='', xlim='default', ylim='default', legend=True):
"""
Function defining plot properties
@param ax :: axes to be held
@param xlabel, ylabel :: labels of the x-y axis
@param title :: title of the plot
@param xlim, ylim :: x-y limits for the axis
"""
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim != 'default':
ax.set_xlim(xlim)
if ylim != 'default':
ax.set_ylim(ylim)
if legend:
l = ax.legend(loc='best', fontsize=14, frameon=False)
for legend_handle in l.legendHandles:
legend_handle._legmarker.set_markersize(12)
ax.grid(False)
ax.set_title(title, fontsize=18)
return
def plotFpairs(ax, r_p, f_pairs, f_pairs_err, label, color='r', errorbar = True):
# changing all unit to kpc
r_p_kpc, f_pairs = 1e3*r_p[1:], f_pairs
# plotting the results
ax.plot( r_p_kpc , f_pairs, 's', ls='--', color=color, label = label)
if errorbar:
ax.errorbar(r_p_kpc , f_pairs.value, yerr=np.array(f_pairs_err), ecolor='k', fmt='none', capsize=4.5)
return ax
def plotScaleMMdistribution(halo_m_scale_arr_all_r, cosmo, dt_m_arr):
"""
Function plots the number of objects in pairs as a function of the scale of last MM
--> the cuts on delta t_mm are overplotted to see the selection criterion
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
bins = 20
hist_all_r = np.zeros((0, bins))
for i in range(len(halo_m_scale_arr_all_r)):
hist_counts, a = np.histogram(halo_m_scale_arr_all_r[i], bins=bins)
hist_all_r = np.append(hist_all_r, [hist_counts], axis=0)
ax.plot(a[1:], hist_counts, '--', marker = 'd', color='k')
scale_mm = cswl.tmmToScale(cosmo, dt_m_arr)
pal1 = sns.color_palette("Spectral", len(scale_mm)+1).as_hex()
for j, l in enumerate(scale_mm):
ax.vlines(l, np.min(hist_all_r), np.max(hist_all_r), colors=pal1[j], label=r'$t_{\rm MM}$ = %.1f Gyr'%dt_m_arr[j])
setLabel(ax, r'Scale factor, $a$', r'Counts', '', 'default',[np.min(hist_all_r), np.max(hist_all_r)], legend=False)
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', frameon=False)
ax.set_yscale('log')
return
def plotNpSep(ax, hd_z_halo, pairs_all, color, label, mec, errorbars = True):
"""
Function plots the n_p as a function of separation
"""
pairs_all = np.array(pairs_all)
# get shell volume and projected radius bins [Mpc]
r_p, shell_volume = aimm.shellVolume()
# get number density of pairs with and without selection cuts
n_pairs, n_pairs_err = cswl.nPairsToFracPairs(hd_z_halo, pairs_all)
# changing all unit to kpc
r_p_kpc, n_pairs = 1e3*r_p[1:len(n_pairs)+1], n_pairs
# plotting the results
ax.plot( r_p_kpc , n_pairs, 'd', mec = mec, ms = 10, color=color, label=label)
# errorbars
if errorbars:
n_pairs_err = np.array(n_pairs_err)
ax.errorbar(r_p_kpc , np.array(n_pairs), yerr=n_pairs_err, ecolor=mec, fmt='none', capsize=4.5)
return ax, n_pairs, n_pairs_err
def plotFracNdensityPairs(hd_z_halo, pairs_all, pairs_mm_dv_all, pairs_selected_all, plot_selected_pairs=True):
"""
Function to plot the fractional number density of pairs for different selection criteria
"""
flare = sns.color_palette("pastel", 5).as_hex()
mec = ['k', '#05ad2c', '#db5807', '#a30a26', 'b']
fig, ax = plt.subplots(1,1,figsize=(5,4))
# plotting the 4 cases with the 4 different cuts
ax, n_pairs, n_pairs_err = plotNpSep(ax, hd_z_halo, pairs_all[1], 'k', r' $\mathbf{\Gamma}_{m;\ \Delta v;\ t_{\rm MM};\ \tilde{X}_{\rm off}}(r)\ $', mec[0])
ax, n_mm_dv_pairs, n_pairs_mm_dv_err = plotNpSep(ax, hd_z_halo, pairs_mm_dv_all[1], flare[3], r'$\mathbf{\Gamma}_{t_{\rm MM};\ \tilde{X}_{\rm off}}(r|\ m;\ \Delta v)$', mec[3])
if plot_selected_pairs:
ax, n_selected_pairs, n_selected_err = plotNpSep(ax, hd_z_halo, pairs_selected_all[1], flare[2], r'$\mathbf{\Gamma}(r|\ m;\ \Delta v;\ t_{\rm MM};\ \tilde{X}_{\rm off} )$'+'\n'+r'$t_{\rm MM} \in [0.6-1.2]$ Gyr, $\tilde{X}_{\rm off} \in [0.17, 0.54]$', mec[1])
ax.set_yscale("log")
setLabel(ax, r'Separation, $r$ [kpc]', r'$\mathbf{\Gamma}(r)$ [Mpc$^{-3}$]', '', 'default', 'default', legend=False)
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=15, frameon=False)
pairs_arr = np.array([n_pairs, n_mm_dv_pairs, n_selected_pairs], dtype=object)
pairs_arr_err = np.array([n_pairs_err, n_pairs_mm_dv_err, n_selected_err], dtype=object)
return pairs_arr, pairs_arr_err, ax
def plotCumulativeDist(vol, dt_m_arr, pairs_mm_all, pairs_mm_dv_all, n_pairs_mm_dt_all, n_pairs_mm_dv_dt_all, param = 't_mm'):
"""
Function to plot the cumulative number of pairs for the total vol (<z=2) for pairs with dz and mass ratio criteria
"""
# get shell volume and projected radius bins [Mpc]
r_p, _ = aimm.shellVolume()
fig, ax = plt.subplots(1,2,figsize=(17,6))
pal = sns.color_palette("coolwarm", len(dt_m_arr)+1).as_hex()
ax[0].plot( (1e3*r_p[1:]), (pairs_mm_all[1][1:]/(2*vol)), 'X', color='k', label='No criterion')
ax[1].plot( (1e3*r_p[1:]), (pairs_mm_dv_all[1][1:]/(2*vol)), 'X', color='k', label='No criterion')
for t_idx in range(len(dt_m_arr)):
np_mm_dt, np_mm_dv_dt = n_pairs_mm_dt_all[t_idx], n_pairs_mm_dv_dt_all[t_idx]
if param == 't_mm':
label = r'$t_{\rm MM} \in$ %.1f-%.1f Gyr'%(dt_m_arr[t_idx][0], dt_m_arr[t_idx][1])
else:
label = r'$\tilde{X}_{\rm off} \in$ %.1f-%.1f Gyr'%(dt_m_arr[t_idx][0], dt_m_arr[t_idx][1])
ax[0].plot( (1e3*r_p[1:]), (np_mm_dt[1:]/(2*vol)), 'kX', label = label, color=pal[t_idx])
ax[1].plot( (1e3*r_p[1:]), (np_mm_dv_dt[1:]/(2*vol)), 'kX', color=pal[t_idx])
ax[0].set_yscale('log')
ax[1].set_yscale('log')
setLabel(ax[0], r'Separation, $r$ [kpc]', 'Cumulative number of halo pairs\n'+r'[Mpc$^{-3}$]', r'Mass ratio 3:1, $\Delta z_{\rm R, S} < 10^{-3}$', 'default', 'default', legend=False)
setLabel(ax[1], r'Separation, $r$ [kpc]', r'', 'Mass ratio 3:1', 'default', 'default', legend=False)
ax[0].legend(bbox_to_anchor=(-0.5, -0.7), loc='lower left', ncol=4, frameon=False)
return pal
def plotParameterDistributions(xoff_all, string=r'$\tilde{X}_{\rm off}$', xmax=5, filestring='xoff'):
"""
Function to plot the parameter distribution i.e. SF and PDF
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
sf_xoff = norm.sf(np.sort(xoff_all))
if string == r'$\tilde{X}_{\rm off}$':
ax.plot(np.sort(xoff_all), sf_xoff, 'r-', label=r'Survival Function of '+string)
xmax = np.max(xoff_all)
else:
ax.plot(np.sort(xoff_all), 1-sf_xoff, 'r-', label=r'CDF of '+string)
pdf_xoff = norm.pdf(np.sort(xoff_all))
ax.plot(np.sort(xoff_all), pdf_xoff, 'k-', label=r'PDF of '+string)
setLabel(ax, string, 'Distribution of '+string, '', [np.min(xoff_all), xmax], 'default', legend=True)
plt.savefig('../figures/'+filestring+'_function.png', facecolor='w', edgecolor='w', bbox_inches='tight')
return ax
def axId(i):
if i == 0: m, n = 0, 0
if i == 1: m, n = 0, 1
if i == 2: m, n = 1, 0
if i == 3: m, n = 1, 1
return int(m), int(n)
def plotPdf(ax, arr, string, color):
pdf_arr = norm.pdf(np.sort(arr))
ax.plot(np.sort(arr), pdf_arr, '-', color=color, label=r'PDF of '+string, lw=4)
return
def saveFig(filename):
plt.savefig('../figures/'+filename, facecolor='w', edgecolor='w', bbox_inches='tight')
return
def plotContour(u_pix, matrix_2D, xmin=10, xmax=150, ymin=0, ymax=2, ax=None, cmap='YlGnBu'):
"""
Function plots a contour map
@u_pix :: number of pixels in the FOV
@Returns :: 2D matrix
"""
if ax == None:
fig, ax = plt.subplots(1,1,figsize=(7,6))
if isinstance(u_pix, (int, float)):
X, Y = np.meshgrid(np.linspace(0, u_pix, u_pix), np.linspace(0, u_pix, u_pix))
if isinstance(u_pix, (list, tuple, np.ndarray)): # if FOV is a rectangle
X, Y = np.meshgrid(np.linspace(xmin, xmax, u_pix[0]), np.linspace(ymin, ymax, u_pix[1]))
plot = ax.contourf(X, Y, matrix_2D, cmap=cmap, origin='image')
return ax, plot
def labelMZTmmXoff(ax, ylabel, redshift_limit=2):
setLabel(ax[0, 0], r'Stellar mass, $\log{M^*}$', ylabel, '', 'default', 'default', legend=False)
setLabel(ax[0, 1], 'Redshift, $z$', '', '', [0, redshift_limit], 'default', legend=False)
setLabel(ax[1, 0], r'$t_{\rm MM}$', ylabel, '', 'default', 'default', legend=False)
ax[1,0].set_xscale('log')
setLabel(ax[1, 1], r'$\tilde{X}_{\rm off}$', '', '', 'default', 'default', legend=False)
return
def plotBinsMZdistribution(mz_mat_tmm0, mz_mat_tmm1, tmm_bins, param=r'$t_{\rm MM} = $'):
fig, ax = plt.subplots(2,2,figsize=(15,15))
ax0, pt0 = plotContour((mz_mat_tmm0[0].shape[1], mz_mat_tmm0[0].shape[0]), mz_mat_tmm0[0], ymin=0.8, ymax=1.3, cmap='terrain', ax=ax[0, 0])
ax1, pt1 = plotContour((mz_mat_tmm0[1].shape[1], mz_mat_tmm0[1].shape[0]), mz_mat_tmm0[1], ymin=0., ymax=2, cmap='terrain', ax=ax[1, 0])
setLabel(ax[0, 0], '', 'Mass ratio', param+' %.2f - %.2f'%(tmm_bins[0][0], tmm_bins[0][1]), 'default', 'default', legend=False)
setLabel(ax[1, 0], r'Separation, $r_p$ [kpc]', 'Mean redshift', '', 'default', 'default', legend=False)
ax2, pt2 = plotContour((mz_mat_tmm1[0].shape[1], mz_mat_tmm1[0].shape[0]), mz_mat_tmm1[0], ymin=0.8, ymax=1.3, cmap='terrain', ax=ax[0, 1])
ax3, pt3 = plotContour((mz_mat_tmm1[1].shape[1], mz_mat_tmm1[1].shape[0]), mz_mat_tmm1[1], ymin=0., ymax=2, cmap='terrain', ax=ax[1, 1])
setLabel(ax[0, 1], '', '', param+ ' %.2f - %.2f'%(tmm_bins[1][0], tmm_bins[1][1]), 'default', 'default', legend=False)
setLabel(ax[1, 1], r'Separation, $r_p$ [kpc]', '', '', 'default', 'default', legend=False)
return
def snsPlotLabels():
plt.xlabel(r'$t_{\rm MM}$ [Gyr]', fontsize=20)
plt.ylabel(r'$\tilde{X}_{\rm off}$', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
return
def plotGaussianKde(param_arr, Z, string, i, j, set_xy_lim=True):
xmin, xmax = np.min(param_arr[i]), np.max(param_arr[i])
ymin, ymax = np.min(param_arr[j]), np.max(param_arr[j])
fig, ax = plt.subplots(1,1,figsize=(5, 5))
ax.plot(param_arr[i], param_arr[j], 'k.', markersize=.02)
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
if set_xy_lim:
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
setLabel(ax, string[i], string[j], '', 'default', 'default', legend=False)
return ax
def plotModelResults(ax, hd_halo, pairs_all, pairs_selected, vol):
"""
Plots the models generated for bins of Tmm and Xoff
"""
# get shell volume and projected radius bins [Mpc]
r_p, shell_volume = aimm.shellVolume()
# plotting the cumulative pairs
norm = vol*len(hd_halo)
np_all, np_selected = pairs_all/norm, pairs_selected[1]/norm
ax[0].plot( (1e3*r_p), (np_selected), 'rX', ls = '--', ms=9, label='Selected pairs')
ax[0].plot( (1e3*r_p), (np_all), 'kX', ls = '--', label = 'All pairs', ms = 9)
setLabel(ax[0], r'', r'Cumulative $n_{\rm halo\ pairs}}$ [Mpc$^{-3}$]', '', 'default', 'default', legend=True)
# plotting the pairs in bins of radius
np_all_bins, np_all_bins_err = cswl.nPairsToFracPairs(hd_halo, pairs_all)
np_selected_bins, np_selected_bins_err = cswl.nPairsToFracPairs(hd_halo, pairs_selected[1])
_ = plotFpairs(ax[1], r_p, np_all_bins, np_all_bins_err, label = 'All pairs', color='k')
_ = plotFpairs(ax[1], r_p, np_selected_bins, np_selected_bins_err, label = 'Selected pairs')
ax[1].set_yscale('log')
setLabel(ax[1], r'', r'$n_{\rm halo\ pairs}}$ [Mpc$^{-3}$]', '', 'default', 'default', legend=True)
# plotting the pairs in bins with respect to the control
_ = plotFpairs(ax[2], r_p, np_selected_bins/np_all_bins, np_selected_bins_err, label='wrt all pairs', color='orange')
setLabel(ax[2], r'Separation, $r$ [kpc]', r'Fraction of pairs, $f_{\rm halo\ pairs}}$ ', '', 'default', 'default', legend=False)
return np_selected_bins/np_all_bins | 43.193651 | 269 | 0.648317 |
6e824c90d5cc97b09e96bf2d9fa8d40cff2f3778 | 1,797 | py | Python | goatools/gosubdag/utils.py | camiloaruiz/goatools | 3da97251ccb6c5e90b616c3f625513f8aba5aa10 | [
"BSD-2-Clause"
] | null | null | null | goatools/gosubdag/utils.py | camiloaruiz/goatools | 3da97251ccb6c5e90b616c3f625513f8aba5aa10 | [
"BSD-2-Clause"
] | null | null | null | goatools/gosubdag/utils.py | camiloaruiz/goatools | 3da97251ccb6c5e90b616c3f625513f8aba5aa10 | [
"BSD-2-Clause"
] | null | null | null | """Small lightweight utilities used frequently in GOATOOLS."""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
def extract_kwargs(args, exp_keys, exp_elems):
"""Return user-specified keyword args in a dictionary and a set (for True/False items)."""
arg_dict = {} # For arguments that have values
arg_set = set() # For arguments that are True or False (present in set if True)
for key, val in args.items():
if exp_keys is not None and key in exp_keys and val:
arg_dict[key] = val
elif exp_elems is not None and key in exp_elems and val:
arg_set.add(key)
return {'dict':arg_dict, 'set':arg_set}
def get_kwargs_set(args, exp_elem2dflt):
"""Return user-specified keyword args in a dictionary and a set (for True/False items)."""
arg_set = set() # For arguments that are True or False (present in set if True)
# Add user items if True
for key, val in args.items():
if exp_elem2dflt is not None and key in exp_elem2dflt and val:
arg_set.add(key)
# Add defaults if needed
for key, dfltval in exp_elem2dflt.items():
if dfltval and key not in arg_set:
arg_set.add(key)
return arg_set
def get_kwargs(args, exp_keys, exp_elems):
"""Return user-specified keyword args in a dictionary and a set (for True/False items)."""
arg_dict = {} # For arguments that have values
for key, val in args.items():
if exp_keys is not None and key in exp_keys and val:
arg_dict[key] = val
elif exp_elems is not None and key in exp_elems and val:
arg_dict[key] = True
return arg_dict
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
| 41.790698 | 94 | 0.668893 |
6e82b8d1720684c00d864fb512765fbff3379ce5 | 309 | py | Python | nicos_ess/ymir/setups/forwarder.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-03-26T10:30:45.000Z | 2021-03-26T10:30:45.000Z | nicos_ess/ymir/setups/forwarder.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_ess/ymir/setups/forwarder.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 3 | 2020-08-04T18:35:05.000Z | 2021-04-16T11:22:08.000Z | description = 'Monitors the status of the Forwarder'
devices = dict(
KafkaForwarder=device(
'nicos_ess.devices.forwarder.EpicsKafkaForwarder',
description='Monitors the status of the Forwarder',
statustopic='UTGARD_forwarderStatus',
brokers=['172.30.242.20:9092']),
)
| 30.9 | 59 | 0.68932 |
6e83557731c2fd4923e8fa481bc7d1048e5e106e | 985 | py | Python | codetree/cli.py | slank/codetree | c1aad059ad31aa1b3cca80a89861c659fce217ac | [
"MIT"
] | 2 | 2015-03-16T11:46:28.000Z | 2017-04-01T13:58:47.000Z | codetree/cli.py | slank/codetree | c1aad059ad31aa1b3cca80a89861c659fce217ac | [
"MIT"
] | null | null | null | codetree/cli.py | slank/codetree | c1aad059ad31aa1b3cca80a89861c659fce217ac | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
import logging
from .config import Config
import sys
def main():
ap = ArgumentParser()
ap.add_argument("cfgfile", nargs="+", help="Codetree configuration file")
verbosity = ap.add_mutually_exclusive_group(required=False)
verbosity.add_argument("-v", "--verbose", action="store_true", default=False)
verbosity.add_argument("-q", "--quiet", action="store_true", default=False)
verbosity.add_argument("-f", "--fatality", action="store_true", default=False,
help="Any error is fatal")
args = ap.parse_args()
logfmt = "%(message)s"
loglevel = logging.INFO
if args.verbose:
logfmt = "%(levelname)s: %(message)s"
loglevel = logging.DEBUG
if args.quiet:
loglevel = logging.CRITICAL
logging.basicConfig(format=logfmt, level=loglevel)
config = Config(args.cfgfile)
if config.build(args.fatality):
sys.exit(0)
else:
sys.exit(1)
| 30.78125 | 82 | 0.655838 |
6e85d5b6b7bc4a9b52702783da32bcd642bd2255 | 5,862 | py | Python | notebooks/utils.py | cognoma/ml-workers | 781763c8361d49023222c7349350c3c4774ce4fa | [
"BSD-3-Clause"
] | null | null | null | notebooks/utils.py | cognoma/ml-workers | 781763c8361d49023222c7349350c3c4774ce4fa | [
"BSD-3-Clause"
] | 13 | 2017-01-31T22:54:03.000Z | 2021-02-02T21:42:33.000Z | notebooks/utils.py | cognoma/ml-workers | 781763c8361d49023222c7349350c3c4774ce4fa | [
"BSD-3-Clause"
] | 7 | 2017-06-29T14:19:11.000Z | 2018-04-08T12:06:21.000Z | """
Methods for building Cognoma mutation classifiers
Usage - Import only
"""
import pandas as pd
from sklearn.metrics import roc_curve, roc_auc_score
import plotnine as gg
def theme_cognoma(fontsize_mult=1):
return (gg.theme_bw(base_size=14 * fontsize_mult) +
gg.theme(line=gg.element_line(color="#4d4d4d"),
rect=gg.element_rect(fill="white", color=None),
text=gg.element_text(color="black"),
axis_ticks=gg.element_line(color="#4d4d4d"),
legend_key=gg.element_rect(color=None),
panel_border=gg.element_rect(color="#4d4d4d"),
panel_grid=gg.element_line(color="#b3b3b3"),
panel_grid_major_x=gg.element_blank(),
panel_grid_minor=gg.element_blank(),
strip_background=gg.element_rect(fill="#FEF2E2",
color="#4d4d4d"),
axis_text=gg.element_text(size=12 * fontsize_mult,
color="#4d4d4d"),
axis_title_x=gg.element_text(size=13 * fontsize_mult,
color="#4d4d4d"),
axis_title_y=gg.element_text(size=13 * fontsize_mult,
color="#4d4d4d")))
def get_model_coefficients(classifier, feature_set, covariate_names):
"""
Extract the feature names and associate them with the coefficient values
in the final classifier object.
* Only works for expressions only model with PCA, covariates only model,
and a combined model
* Assumes the PCA features come before any covariates that are included
* Sorts the final dataframe by the absolute value of the coefficients
Args:
classifier: the final sklearn classifier object
feature_set: string of the model's name {expressions, covariates, full}
covariate_names: list of the names of the covariate features matrix
Returns:
pandas.DataFrame: mapping of feature name to coefficient value
"""
import pandas as pd
import numpy as np
coefs = classifier.coef_[0]
if feature_set == 'expressions':
features = ['PCA_%d' % cf for cf in range(len(coefs))]
elif feature_set == 'covariates':
features = covariate_names
else:
features = ['PCA_%d' % cf for cf in range(len(coefs) - len(covariate_names))]
features.extend(covariate_names)
coef_df = pd.DataFrame({'feature': features, 'weight': coefs})
coef_df['abs'] = coef_df['weight'].abs()
coef_df = coef_df.sort_values('abs', ascending=False)
coef_df['feature_set'] = feature_set
return coef_df
def get_genes_coefficients(pca_object, classifier_object,
expression_df, expression_genes_df,
num_covariates=None):
"""Identify gene coefficients from classifier after pca.
Args:
pca_object: The pca object from running pca on the expression_df.
classifier_object: The logistic regression classifier object.
expression_df: The original (pre-pca) expression data frame.
expression_genes_df: The "expression_genes" dataframe used for gene
names.
num_covariates: Optional, only needed if PCA was only performed on a
subset of the features. This should be the number of
features that PCA was not performed on. This function
assumes that the covariates features were at the end.
Returns:
gene_coefficients_df: A dataframe with entreze gene-ID, gene name,
coefficient abbsolute value of coefficient, and
gene description. The dataframe is sorted by
absolute value of coefficient.
"""
# Get the classifier coefficients.
if num_covariates:
coefficients = classifier_object.coef_[0][0:-num_covariates]
else:
coefficients = classifier_object.coef_[0]
# Get the pca weights
weights = pca_object.components_
# Combine the coefficients and weights
gene_coefficients = weights.T @ coefficients.T
# Create the dataframe with correct index
gene_coefficients_df = pd.DataFrame(gene_coefficients, columns=['weight'])
gene_coefficients_df.index = expression_df.columns
gene_coefficients_df.index.name = 'entrez_id'
expression_genes_df.index = expression_genes_df.index.map(str)
# Add gene symbol and description
gene_coefficients_df['symbol'] = expression_genes_df['symbol']
gene_coefficients_df['description'] = expression_genes_df['description']
# Add absolute value and sort by highest absolute value.
gene_coefficients_df['abs'] = gene_coefficients_df['weight'].abs()
gene_coefficients_df.sort_values(by='abs', ascending=False, inplace=True)
# Reorder columns
gene_coefficients_df = gene_coefficients_df[['symbol', 'weight', 'abs',
'description']]
return(gene_coefficients_df)
def select_feature_set_columns(X, feature_set, n_covariates):
"""
Select the feature set for the different models within the pipeline
"""
if feature_set == 'covariates':
return X[:, :n_covariates]
if feature_set == 'expressions':
return X[:, n_covariates:]
raise ValueError('feature_set not supported: {}'.format(feature_set))
def get_threshold_metrics(y_true, y_pred):
roc_columns = ['fpr', 'tpr', 'threshold']
roc_items = zip(roc_columns, roc_curve(y_true, y_pred))
roc_df = pd.DataFrame.from_items(roc_items)
auroc = roc_auc_score(y_true, y_pred)
return {'auroc': auroc, 'roc_df': roc_df}
| 42.788321 | 85 | 0.638519 |
6e85eafe88b2abc4b10f2eb6623ed07ecab6567b | 1,740 | py | Python | docs/fossil-help-cmd.py | smitty1eGH/pyphlogiston | 5134be190cdb31ace04ac5ce2e699a48e54e036e | [
"MIT"
] | null | null | null | docs/fossil-help-cmd.py | smitty1eGH/pyphlogiston | 5134be190cdb31ace04ac5ce2e699a48e54e036e | [
"MIT"
] | null | null | null | docs/fossil-help-cmd.py | smitty1eGH/pyphlogiston | 5134be190cdb31ace04ac5ce2e699a48e54e036e | [
"MIT"
] | null | null | null | from subprocess import run
cmds = [
"3-way-merge",
"ci",
"help",
"push",
"stash",
"add",
"clean",
"hook",
"rebuild",
"status",
"addremove",
"clone",
"http",
"reconstruct",
"sync",
"alerts",
"close",
"import",
"redo",
"tag",
"all",
"co",
"info",
"remote",
"tarball",
"amend",
"commit",
"init",
"remote-url",
"ticket",
"annotate",
"configuration",
"interwiki",
"rename",
"timeline",
"artifact",
"dbstat",
"json",
"reparent",
"tls-config",
"attachment",
"deconstruct",
"leaves",
"revert",
"touch",
"backoffice",
"delete",
"login-group",
"rm",
"ui",
"backup",
"descendants",
"ls",
"rss",
"undo",
"bisect",
"diff",
"md5sum",
"scrub",
"unpublished",
"blame",
"export",
"merge",
"search",
"unset",
"branch",
"extras",
"mv",
"server",
"unversioned",
"bundle",
"finfo",
"new",
"settings",
"update",
"cache",
"forget",
"open",
"sha1sum",
"user",
"cat",
"fts-config",
"pikchr",
"sha3sum",
"uv",
"cgi",
"gdiff",
"praise",
"shell",
"version",
"changes",
"git",
"publish",
"sql",
"whatis",
"chat",
"grep",
"pull",
"sqlar",
"wiki",
"checkout",
"hash-policy",
"purge",
"sqlite3",
"zip",
]
with open("fossile-cmds-help.org", "w") as f:
for c in cmds:
d = run(
["/home/osboxes/src/fossil-snapshot-20210429/fossil", "help", c],
capture_output=True,
)
f.write(d.stdout.decode("utf-8"))
| 14.745763 | 77 | 0.440805 |
6e89094dd4c599ed774bc54e2865f3ed2293d233 | 257 | bzl | Python | internal/copts.bzl | zaucy/bzlws | a8f3e4b0bc168059ec92971b1ea7c214db2c5454 | [
"MIT"
] | 4 | 2021-07-21T01:43:50.000Z | 2021-11-18T03:23:18.000Z | internal/copts.bzl | zaucy/bzlws | a8f3e4b0bc168059ec92971b1ea7c214db2c5454 | [
"MIT"
] | null | null | null | internal/copts.bzl | zaucy/bzlws | a8f3e4b0bc168059ec92971b1ea7c214db2c5454 | [
"MIT"
] | 1 | 2022-02-03T07:53:17.000Z | 2022-02-03T07:53:17.000Z | _msvc_copts = ["/std:c++17"]
_clang_cl_copts = ["/std:c++17"]
_gcc_copts = ["-std=c++17"]
copts = select({
"@bazel_tools//tools/cpp:msvc": _msvc_copts,
"@bazel_tools//tools/cpp:clang-cl": _clang_cl_copts,
"//conditions:default": _gcc_copts,
})
| 25.7 | 56 | 0.649805 |
6e8aa5fdaccdc2cf8e079b7b4e650e213a55472a | 1,154 | py | Python | monitor.py | projectsbyif/trillian-demo-audit | 5bb08ae3c359698d8beb47ced39d21e793539396 | [
"Apache-2.0"
] | null | null | null | monitor.py | projectsbyif/trillian-demo-audit | 5bb08ae3c359698d8beb47ced39d21e793539396 | [
"Apache-2.0"
] | 1 | 2021-06-02T02:13:46.000Z | 2021-06-02T02:13:46.000Z | monitor.py | projectsbyif/trillian-demo-audit | 5bb08ae3c359698d8beb47ced39d21e793539396 | [
"Apache-2.0"
] | null | null | null | import logging
import sys
from trillian import TrillianLog
from print_helper import Print
from pprint import pprint
def main(argv):
logging.basicConfig(level=logging.INFO)
trillian_log = TrillianLog.load_from_environment()
Print.status('Checking signature on signed log root')
validated_log_root = trillian_log.get_log_root()
Print.tick('Log root is signed correctly by public key')
# * do full audit between hash[previous] and hash[current]
# * do consistency check between hash[previous] and hash[current]
Print.status('Rebuilding Merkle tree from {} entries to get root '
'hash'.format(validated_log_root.tree_size))
Print.bullet('Looking for root hash: {}'.format(
validated_log_root.root_hash))
if trillian_log.full_audit(validated_log_root):
Print.bullet('Calculated root hash: {}'.format(
validated_log_root.root_hash))
Print.tick('Root hashes match, Merkle tree appears correct')
Print.status('Showing latest log entry')
Print.normal(str(trillian_log.latest().json()))
print()
if __name__ == '__main__':
main(sys.argv)
| 26.227273 | 70 | 0.707972 |
6e8b21d90213008722c8b31b5d6059ea9e59aa07 | 875 | py | Python | src/geocurrency/units/urls.py | OpenPrunus/geocurrency | 23cc075377d47ac631634cd71fd0e7d6b0a57bad | [
"MIT"
] | 5 | 2021-01-28T16:45:49.000Z | 2021-08-15T06:47:17.000Z | src/geocurrency/units/urls.py | OpenPrunus/geocurrency | 23cc075377d47ac631634cd71fd0e7d6b0a57bad | [
"MIT"
] | 8 | 2020-10-01T15:12:45.000Z | 2021-10-05T14:45:33.000Z | src/geocurrency/units/urls.py | OpenPrunus/geocurrency | 23cc075377d47ac631634cd71fd0e7d6b0a57bad | [
"MIT"
] | 2 | 2021-01-28T16:43:16.000Z | 2021-10-05T14:25:25.000Z | """
Units module URLs
"""
from django.conf.urls import url, include
from django.urls import path
from rest_framework import routers
from .viewsets import UnitSystemViewset, UnitViewset, \
ConvertView, CustomUnitViewSet
from geocurrency.calculations.viewsets import ValidateViewSet, CalculationView
app_name = 'units'
router = routers.DefaultRouter()
router.register(r'', UnitSystemViewset, basename='unit_systems')
router.register(r'(?P<system_name>\w+)/units',
UnitViewset, basename='units')
router.register(r'(?P<system_name>\w+)/custom',
CustomUnitViewSet, basename='custom')
urlpatterns = [
path('convert/', ConvertView.as_view()),
path('<str:unit_system>/formulas/validate/', ValidateViewSet.as_view()),
path('<str:unit_system>/formulas/calculate/', CalculationView.as_view()),
url(r'^', include(router.urls)),
]
| 31.25 | 78 | 0.726857 |
6e8ba5d71602dfafef83788dd25424753fb81302 | 22 | py | Python | rtk/_reports_/__init__.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | null | null | null | rtk/_reports_/__init__.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | null | null | null | rtk/_reports_/__init__.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
] | 2 | 2020-04-03T04:14:42.000Z | 2021-02-22T05:30:35.000Z | from tabular import *
| 11 | 21 | 0.772727 |
6e8c6eb072fed5f8eeeb59211773c40061897cf1 | 383 | py | Python | backend/urls.py | starmarek/organize-me-2 | bd9b73d3e6d9a4ebc4cbb8a20c97729bdc6b1377 | [
"MIT"
] | 1 | 2021-03-09T20:49:51.000Z | 2021-03-09T20:49:51.000Z | backend/urls.py | starmarek/organize-me-2 | bd9b73d3e6d9a4ebc4cbb8a20c97729bdc6b1377 | [
"MIT"
] | 7 | 2021-05-08T11:05:15.000Z | 2021-05-08T11:12:27.000Z | backend/urls.py | starmarek/organize-me-2 | bd9b73d3e6d9a4ebc4cbb8a20c97729bdc6b1377 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import include, path
from rest_framework import routers
from .shifts.views import ShiftView
from .workers.views import WorkerView
router = routers.DefaultRouter()
router.register("workers", WorkerView)
router.register("shifts", ShiftView)
urlpatterns = [
path("admin/", admin.site.urls),
path("", include(router.urls)),
]
| 23.9375 | 38 | 0.762402 |
6e8d075cdc130105dd93cb71efed865a3cfcfbc8 | 257 | py | Python | ssk/alpha/api.py | jobliz/solid-state-kinetics | c5767b400b19bd0256c806001664f0b369718bab | [
"MIT"
] | 2 | 2017-03-08T21:32:11.000Z | 2017-07-19T03:27:18.000Z | ssk/alpha/api.py | jobliz/solid-state-kinetics | c5767b400b19bd0256c806001664f0b369718bab | [
"MIT"
] | null | null | null | ssk/alpha/api.py | jobliz/solid-state-kinetics | c5767b400b19bd0256c806001664f0b369718bab | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
from scipy import integrate
__all__ = ['area', 'simple']
def simple(p):
pass
def area(p):
cumul = np.hstack(([0], integrate.cumtrapz(np.abs(np.gradient(p)))))
return cumul / max(cumul)
| 17.133333 | 72 | 0.669261 |
6e8f20f780d781f8cdc23f8a2e62a4a9d0aaaf14 | 6,451 | py | Python | randominette.py | Dutesier/randominette | 2260c0f521d9fcc97f30a8cceb36c94dbee3d474 | [
"MIT"
] | null | null | null | randominette.py | Dutesier/randominette | 2260c0f521d9fcc97f30a8cceb36c94dbee3d474 | [
"MIT"
] | null | null | null | randominette.py | Dutesier/randominette | 2260c0f521d9fcc97f30a8cceb36c94dbee3d474 | [
"MIT"
] | 2 | 2022-01-19T00:27:59.000Z | 2022-01-19T03:46:21.000Z | # **************************************************************************** #
# #
# ::: :::::::: #
# randominette.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: ayalla, sotto & dutesier +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2022/01/13 18:14:29 by dareias- #+# #+# #
# Updated: 2022/01/20 13:10:47 by dareias- ### ########.fr #
# #
# **************************************************************************** #
import requests
import json
import random
import sys
import pprint
from decouple import config
import time
def main():
my_time = 1
argc = len(sys.argv)
pmode = 0
if argc > 1 and sys.argv[1].find("c") > 0:
pmode = 1
if len(sys.argv) > 1 and sys.argv[1].find("s") > 0:
# Get Campus ID and Cluster from user
campus = int(input("Campus ID (38 for Lisbon): "))
cluster = int(input("Cluster: "))
my_time = int(input("Time between requests (change at your own risk): "))
else :
campus = 38
cluster = 1
if (my_time < 0):
my_time = 1
print("We're not time travelers - time set to 1 second")
client_id = config('42-UID')
client_secret = config('42-SECRET')
# Get authorization token
token_url = "https://api.intra.42.fr/oauth/token"
data = {
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret
}
access_token = requests.post(
token_url,
data,
)
ret = access_token
if ret.status_code != 200:
return(print(f"Error: Failed to get OAUTH2 token: {ret.status_code}"))
ret = ret.json()
# Set pagination
page = {
"number": 1,
"size": 100
}
# Pass our authorization token as a header
headers = {
"Authorization": f"{ret['token_type']} {ret['access_token']}",
}
# Pass our pagination definitions as a dict
params = {
"page": page
}
time.sleep(my_time)
# Get info from the API
url = f'https://api.intra.42.fr/v2/campus/{campus}/locations?sort=-end_at,host&filter[active]=true&range[host]=c{cluster}, c{cluster + 1}r00s00'
ret = requests.get(url, headers=headers, json=params)
if ret.status_code != 200:
return(print(f"Error: Failed to GET from {url}: Got status code {ret.status_code}"))
users_in_campus = ret.json()
i = 0
if len(sys.argv) > 1 and sys.argv[1].find("l") > 0:
# pprint.pprint(users_in_campus)
print_user_info(users_in_campus)
if len(users_in_campus) == 0:
return(print(f"There are currently {i} active users in cluster {cluster} at campus {campus}"))
# Check if we have all elements or if there are more pages
if 'Link' in ret.headers and len(users_in_campus)==page['size'] :
while True:
time.sleep(my_time)
page['number'] = page['number'] + 1
ret = requests.get(url, headers=headers, json=params)
second_page = ret.json()
users_in_campus = users_in_campus + second_page
if len(second_page) != page['size']:
break
# Get ammount of active users
for student in users_in_campus:
i = i + 1
print(f"There are currently {i} active users in cluster {cluster} at campus {campus}")
if i == 0:
return
chosen_one = random_user(users_in_campus)
print("The Chosen One is: ")
if pmode:
print(users_in_campus[chosen_one]['user']['location'])
else:
print(users_in_campus[chosen_one]['user']['login'])
print(users_in_campus[chosen_one]['user']['location'])
# Pick all users from the random's user row
if len(sys.argv) > 1 and sys.argv[1].find("r") > 0 :
row = get_user_row(users_in_campus[chosen_one]['user']['location'])
if row:
print(f"The Chosen Row is {row}, and the unlucky ones are: ")
for student in users_in_campus:
if (get_user_row(student['user']['location'])==row):
if pmode:
print(student['user']['location'], end=" ")
else:
print(student['user']['login'])
print(student['user']['location'])
if pmode:
print("")
# Pick a random percentage for users to be randomly selected
if len(sys.argv) > 1 and sys.argv[1].find("p") > 0 :
while (True):
percentage = int(input("Percentage of victims (%): "))
if (percentage <= 100 and percentage > 0):
break
else :
print("Percentage must be between 0 and 100")
number_users = int(len(users_in_campus) * (percentage / 100))
if number_users <= 0:
return (print(f"The percentage {percentage}% translates to a total of 0 users"))
sample = random_users(users_in_campus, number_users)
# Print chosen users
for n in sample:
if pmode:
print(users_in_campus[n]['user']['location'], end=" ")
else:
print(users_in_campus[n]['user']['login'])
print(users_in_campus[n]['user']['location'])
if pmode:
print("")
def random_users(users_in_campus, nu):
i = len(users_in_campus)
if (i == 1):
sample = [0]
else:
sample = random.sample(range(i), nu)
return (sample)
def random_user(users_in_campus):
# Pick a random user
i = len(users_in_campus)
if i > 1:
chosen_one = random.randrange(0, i - 1)
if i == 1:
chosen_one = 0
return (chosen_one)
def print_user_info(users_in_campus):
for student in users_in_campus:
print(f"user: {student['user']['login']}\tloc: {student['user']['location']}")
def get_user_row(location):
return (location[location.find("r"):location.find("s")])
if __name__ == '__main__':
main()
| 38.39881 | 148 | 0.509068 |
6e918c5815dd4774b7932aa1ec3b9fffa1176641 | 750 | py | Python | newsman/factories.py | acapitanelli/newsman | 3f109f42afe6131383fba1e118b7b9457d76096b | [
"MIT"
] | null | null | null | newsman/factories.py | acapitanelli/newsman | 3f109f42afe6131383fba1e118b7b9457d76096b | [
"MIT"
] | null | null | null | newsman/factories.py | acapitanelli/newsman | 3f109f42afe6131383fba1e118b7b9457d76096b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module provides a way to initialize components for processing
pipeline.
Init functions are stored into a dictionary which can be used by `Pipeline` to
load components on demand.
"""
from .pipeline import Byte2html, Html2text, Html2image, Html2meta, Text2title
def build_factories():
"""Creates default factories for Processor."""
factories = {
'byte2html': lambda config: Byte2html(config),
'html2text': lambda config: Html2text(config),
'html2image': lambda config: Html2image(config),
'html2meta': lambda config: Html2meta(config),
'text2title': lambda config: Text2title(config),
'text2title': lambda config: Text2title(config)
}
return factories
| 32.608696 | 78 | 0.698667 |
6e91c4809b083bd8e190189c7a4286818bc08e69 | 3,673 | py | Python | deprecated.py | thu-fit/DCGAN-anime | da549bd45a6ca3c4c5a8894945d3242c59f823a0 | [
"MIT"
] | null | null | null | deprecated.py | thu-fit/DCGAN-anime | da549bd45a6ca3c4c5a8894945d3242c59f823a0 | [
"MIT"
] | null | null | null | deprecated.py | thu-fit/DCGAN-anime | da549bd45a6ca3c4c5a8894945d3242c59f823a0 | [
"MIT"
] | null | null | null |
def sampler(self, z, y=None):
'''generate iamge given z'''
with tf.variable_scope("generator") as scope:
# we hope the weights defined in generator to be reused
scope.reuse_variables()
if not self.y_dim:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
h0 = tf.reshape(
linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'),
[-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0, train=False))
h1 = deconv2d(h0, [batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1')
h1 = tf.nn.relu(self.g_bn1(h1, train=False))
h2 = deconv2d(h1, [batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2')
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3 = deconv2d(h2, [batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3')
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4 = deconv2d(h3, [batch_size, s_h, s_w, self.c_dim], name='g_h4')
return tf.nn.tanh(h4)
else:
s_h, s_w = self.output_height, self.output_width
s_h2, s_h4 = int(s_h/2), int(s_h/4)
s_w2, s_w4 = int(s_w/2), int(s_w/4)
# yb = tf.reshape(y, [-1, 1, 1, self.y_dim])
yb = tf.reshape(y, [batch_size, 1, 1, self.y_dim])
z = concat([z, y], 1)
h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=False))
h0 = concat([h0, y], 1)
h1 = tf.nn.relu(self.g_bn1(
linear(h0, self.gf_dim*2*s_h4*s_w4, 'g_h1_lin'), train=False))
h1 = tf.reshape(h1, [batch_size, s_h4, s_w4, self.gf_dim * 2])
h1 = conv_cond_concat(h1, yb)
h2 = tf.nn.relu(self.g_bn2(
deconv2d(h1, [batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2'), train=False))
h2 = conv_cond_concat(h2, yb)
return tf.nn.sigmoid(deconv2d(h2, [batch_size, s_h, s_w, self.c_dim], name='g_h3'))
def sampler1(self, z, y=None, reuse=True):
'''Generate a given number of samples using z. The first dimension of z is the number of samples'''
with tf.variable_scope("generator") as scope:
# we hope the weights defined in generator to be reused
if reuse:
scope.reuse_variables()
num_samples = z.get_shape().as_list()[0]
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
h0 = tf.reshape(
linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'),
[-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0, train=False))
h1 = deconv2d(h0, [num_samples, s_h8, s_w8, self.gf_dim*4], name='g_h1')
h1 = tf.nn.relu(self.g_bn1(h1, train=False))
h2 = deconv2d(h1, [num_samples, s_h4, s_w4, self.gf_dim*2], name='g_h2')
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3 = deconv2d(h2, [num_samples, s_h2, s_w2, self.gf_dim*1], name='g_h3')
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4 = deconv2d(h3, [num_samples, s_h, s_w, self.c_dim], name='g_h4')
return tf.nn.tanh(h4)
| 39.494624 | 103 | 0.613395 |
6e922f24956d34276912f3a429414da7e22eb9ef | 14,915 | py | Python | Prioritize/get_HPO_similarity_score.py | mbosio85/ediva | c0a1aa4dd8951fa659483164c3706fb9374beb95 | [
"MIT"
] | 1 | 2021-02-23T07:42:42.000Z | 2021-02-23T07:42:42.000Z | Prioritize/get_HPO_similarity_score.py | mbosio85/ediva | c0a1aa4dd8951fa659483164c3706fb9374beb95 | [
"MIT"
] | null | null | null | Prioritize/get_HPO_similarity_score.py | mbosio85/ediva | c0a1aa4dd8951fa659483164c3706fb9374beb95 | [
"MIT"
] | 1 | 2019-09-26T01:21:06.000Z | 2019-09-26T01:21:06.000Z | ## how we measure the similarity between two lists w/ IC per each node
## we have a DAG strucutre
## goal is for each Gene !! output a 'semantic distance'
# based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2756558/ [but different]
# with this two equal nodes will have distance '0'
# maximum distance is -2log(1/tot) ~~ 25
import networkx as nx
import cPickle as pickle
import numpy as np
import math
import random
def calc_me(DG, a, b, PW =False):
#actual calculation of IC distance
#return IC(a) + IC(b) -2*IC(MICA)
# MICA = Max IC Ancestor
if any(x not in DG.nodes()for x in [a,b]):
#means one key is not in the DG nodes,
# it can happen so we need to be safe
#return max possible value
return 2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))
#check for obsolete nodes
#substitute by the replacement if obsolete
a = DG.node[a].get('replaced_by',a)
b = DG.node[b].get('replaced_by',b)
if any(x not in DG.nodes()for x in [a,b]):
#means one key is not in the DG nodes,
# it can happen so we need to be safe
#return max possible value
return 2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))
if a==b :
return 0.0
#
# IC_a = DG.node[a]['IC']
# IC_b = DG.node[b]['IC']
#
# ancestors_a = list(nx.ancestors(DG,a))
# ancestors_b = list(nx.ancestors(DG,b))
#
# ancestors_a.append(a)
# ancestors_b.append(b)
#
# common_ancestors = list(set(ancestors_a) & set(ancestors_b))
# ancestors_val = [DG.node[x]['IC'] for x in common_ancestors]
#
# distance = IC_a + IC_b -2.0*max(ancestors_val)
offset =1000
distance = nx.shortest_path_length(DG,a,b,weight='dist')%offset
print distance
return distance
def list_distance(DG,Q,G,Query_distances):
#idea is :
# for each query HPO calculate all distances
# store them in a dict with HPOs as keys
# value is the minimum value of distance on the query HPOs
# So than for the list of genes it's enough to
# collect the values at columns names
# and if missing set '1'
#cover cases where no HPO from Query
# or no HPO provided, or no HPO
# associated with the gene
if 'NONE' in Q or 'NONE' in G:
return (0,Query_distances)
if len(Q) <1 or len(G) < 1:
return (0,Query_distances)
offset =1000
if Query_distances == 0:
# #build it
for k_q in Q:
if k_q not in DG.nodes():
#missing node (obsolete not updated or just wrong value)
continue
k_q = DG.node[k_q].get('replaced_by',k_q)
distance = nx.shortest_path_length(DG,k_q,weight='dist')
if Query_distances ==0:
Query_distances = {key: float(value)%offset for (key, value) in distance.items()}
print 'calc whole dist'
else:
for k in Query_distances.keys():
try:
Query_distances[k] = min([Query_distances[k] , float(distance[k])%offset] )
except:
Query_distances[k] = float(Query_distances[k])%offset
if Query_distances == 0:
#can happen when the original list has no updated HPO or wrong values
return (0,0)
Query_distances['maxval']=2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))
#now I have the query distances value
# map the genes HPO and extract values.
# missing one : print it and add it to the db
#results = []
maxval = Query_distances['maxval']
results = [Query_distances.get(q_g,maxval) for q_g in G]
#for q_g in G:
# q_g = DG.node[q_g].get('replaced_by',q_g)
# results.append(Query_distances.get(q_g,2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))))
final_value = np.mean(results)/maxval
if final_value > 1:
final_value = 1 #borderline cases whiere go up an down to get to the other node
return (1-final_value,Query_distances)
def calc_distance(DG,query,gene,Query_distances=0):
### DEPRECATED
## Distance (Query, Gene)
##
## Query = HPO list from user
## Gene = HPO associated to each gene
#asymmetric one
if len(query)*len(gene) ==0:
#one of the lists is empty at least
return 0
#avg [ sum_{t_i \in Q} min_{t_2 \in G} ( IC(t_1) + IC(t_2) - 2*IC(MICA(t_1,t_2) ) ) ]
#graph contains IC
distances = []
distances =[ float(min([calc_me(DG,qg,x) for x in gene])) for qg in query]
final_value = np.mean(distances)/(2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)])))
#print distances
#the division is to ensure a maximum to 1
#print final_value
return (1-final_value)
def check_qualtiy(DG):
#find if all ancestors have IC <= sons
# if not, why :
for node in DG:
ancestors = nx.ancestors(DG,node)
ancestors_val = [DG.node[x]['IC'] - DG.node[node]['IC'] for x in ancestors]
problematic = [i for i, e in enumerate(ancestors_val) if e > 0]
for i in problematic:
print node
print list(ancestors)[i]
print ancestors_val[i]
return None
def get_DG_edges(HPO, outfile):
#This one generates a dict file to generate edges of the HPO graph
#download data
#wget https://raw.githubusercontent.com/obophenotype/human-phenotype-ontology/master/hp.obo
#then call this python ... hp.obo myHPO_edges.pk
import sys
import cPickle as pickle
listfile = HPO
out_HPO = dict()
replacements =[]
alternatives =[]
token = False
obsolete =False
with open(listfile) as rd:
for line in rd:
if line.startswith('id: HP:'):
if token and not obsolete:
out_HPO[name]=parents
if repl !='':
replacements.append((name,repl))
token=True
name = line.strip().split('id: ')[1]
parents = []
repl =''
obsolete =False
elif line.startswith('is_a:'):
parents.append(line.strip().split('is_a: ')[1].split(' !')[0])
elif line.startswith('replaced_by:'):
#add a field to say it's replaced
repl = line.strip().split('replaced_by: ')[1]
obsolete =False #means we can backtrack it
elif line.startswith('is_obsolete:'):
obsolete =True
elif line.startswith('alt_id:'):
#add alternative nodes, will be later added with
# replacement field for the most common one
alt = line.strip().split('alt_id: ')[1]
alternatives.append((name,alt))
elif line.startswith('consider:'):
#add alternative nodes, will be later added with
# replacement field for the most common one
alt = line.strip().split('consider: ')[1]
alternatives.append((alt,name))
obsolete =False #means we can backtrack it
out_HPO[name] = parents
out_HPO['replacements'] = replacements
out_HPO['alternatives'] = alternatives
pickle.dump( out_HPO, open( outfile,'wb'))
def generate_HPO_graph(edges_file,counts,output):
offset =1000 #penalization for the distance
#usage: python me edges.pk ontology.txt graph.pk
# counts as wget wget http://compbio.charite.de/jenkins/job/hpo.annotations/lastStableBuild/artifact/misc/phenotype_annotation.tab
# awk -F '\t' '{print $5}' < phenotype_annotation.tab | sort |uniq -c | awk '{print $2 "\t" $1}' > HPO_counts.txt
#idea is a graph with attribute the IC value per node
# calculated
# generate graph with counts:
counts_d=dict()
tot = 0
with open(counts) as rd:
for line in rd:
ff=line.strip().split('\t')
counts_d[ff[0]] = int(ff[1])
tot += int(ff[1])
print tot
# load dict with edges
edges =pickle.load(open(edges_file,'rb'))
print( len(edges.keys()))
#get replacements of obsolete nodes
replacements = dict(edges.get('replacements',[]))
tmpval = edges.pop('replacements',None)
#let's build a graph
DG = nx.DiGraph()
#populate with alternatives
#mark alternatives as replaced, it's the same for us.
alternatives = edges.get('alternatives',[])
tmpval = edges.pop('alternatives',None)
# DG.add_edges_from([(1,2)])
for k in edges.keys():
DG.add_node(k)
DG.node[k]['count']=0.0
ancestors = [(x,k) for x in edges[k]]
DG.add_edges_from(ancestors)
if k in replacements.keys():
DG.node[k]['replaced_by']=replacements[k]
DG.node[k]['IC'] = -math.log(1.0/tot)
#nx.set_node_attributes(DG, 0,'count',)
print 'edges'
print DG.number_of_edges()
print 'nodes'
print DG.number_of_nodes()
for k in DG.nodes():
DG.node[k]['count']=0.0
#populate with raw counts
for k in counts_d.keys():
DG.node[k]['count'] = counts_d[k]
DG.nodes(data='count')
#now fill it with the actual value.
for k in edges.keys():
desc = nx.descendants(DG,k)
count = DG.node[k]['count']
for i in desc:
count += DG.node[i]['count']
if count >0 :
DG.node[k]['IC'] = -math.log(float(count)/tot)
else :
DG.node[k]['IC'] = -math.log(1.0/tot) #missing nodes, set as rare as possible
#print k
#print DG.node[k]
# add edges weight
for a,b in DG.edges():
DG[a][b]['dist']=offset+abs(DG.node[a]['IC'] - DG.node[b]['IC'])
#alternatives fill in IC and count
for node,k in alternatives:
DG.add_node(k)
DG.node[k]['count']=0.0
DG.node[k]['replaced_by']=node
DG.node[k]['IC'] = DG.node[node]['IC']
#count is the IC of the node then : IC = information content
G = DG.to_undirected()
DG= G
pickle.dump(DG,open(output,'wb'))
return None
def generate_gene_2_HPO_dict(HPO_info,outfile):
#get mapping gene -> HPOs
#download from HPO charite ALL_FREQ gene to phenotype
#wget http://compbio.charite.de/jenkins/job/hpo.annotations.monthly/lastStableBuild/artifact/annotation/ALL_SOURCES_ALL_FREQUENCIES_genes_to_phenotype.txt
gene_2_HPO = dict()
with open(HPO_info) as rd:
for line in rd:
if line.startswith('#'):pass
else:
ff = line.strip().split('\t')
#format #Format: entrez-gene-id<tab>entrez-gene-symbol<tab>HPO-Term-Name<tab>HPO-Term-ID
key = ff[1]
HPO = ff[-1]
to_add = gene_2_HPO.get(key,[])
to_add.append(HPO)
to_add= list(set(to_add))
gene_2_HPO[key] = to_add
pickle.dump(gene_2_HPO, open(outfile,'wb'))
return None
def extract_HPO_related_to_gene(gene_2_HPO,gene):
#gene_2_HPO : dict with [gene] --- HPO_list
if type(gene_2_HPO) is dict:
gene_2_HPO_dict = gene_2_HPO
else:
gene_2_HPO_dict = pickle.load(open(gene_2_HPO,'rb'))
outlist = gene_2_HPO_dict.get(gene,[])
return outlist
def alter_HPO_list(DG,HPO):
#way to get a list of HPO
# for each one of these you can
# - keep it
# - choose an ancestor
# - choose a descendant
# - remove it
# - choose a HPO unrelated
# all with same priority
out_list =[]
toadd =''
for hpo in HPO:
if 'NONE' == hpo :
out_list = []
break
#check replacement
hpo = DG.node[hpo].get('replaced_by',hpo)
p_val = random.uniform(0,4)
if p_val < 1:
#keep it
out_list.append(hpo)
continue
elif p_val < 2:
#ancestor
ancestors = list(nx.ancestors(DG,hpo))
if len(ancestors) >0:
toadd=random.choice(ancestors)
out_list.append(toadd)
continue
elif p_val < 3:
#descendants, if none, nothing
desc = list(nx.descendants(DG,hpo))
if len(desc) >0:
toadd=random.choice(desc)
out_list.append(toadd)
continue
#remove it
else:
ancestors = nx.ancestors(DG,hpo)
desc = nx.descendants(DG,hpo)
remaining = list(set(DG.node.keys()) - (ancestors |desc |set(hpo)))
if len(remaining) >0:
toadd=random.choice(remaining)
out_list.append(toadd)
if len(out_list) <1:
out_list=['NONE']
return out_list
def attempt_graph_populate_dist(DG,offset=1000):
#attempt to use the directed graph to build a new graph with
# node1 --> ancestor [dist = length]
# node1 --> descendant [dist =length]
# so ideally then we can use that to search for shortest path length
# and get same value as calc_me
GG=nx.Graph()
for root_id in DG.nodes():
from_root = (nx.shortest_path_length(DG,root_id,weight='dist'))
for k,v in from_root.items():
GG.add_node(k)
GG.node[k]['IC '] = DG.node[k]['IC']
links = [(root_id ,k)]
GG.add_edges_from(links)
GG[root_id][k]['dist']=offset+abs(DG.node[root_id]['IC'] - DG.node[k]['IC'])
## add replaced!
replaced_nodes =[x for x in GG.nodes(data=True) if 'replaced_by'in x[1].keys()]
for node_info in replaced_nodes:
k = node_info[0]
node_dict = node_info[1]
GG.add_node(k)
GG.node[k]['IC '] = node_dict['IC']
GG.node[k]['replaced_by '] = node_dict['replaced_by']
return GG
def calc_pairwise(DG,outfile):
#generates a file too big for the moment
count =0
#select all keys
all_dists = dict()
#remove all replaced_by
offset=1000
GG = attempt_graph_populate_dist(DG,offset)
kk = [x for x in DG.node.keys() if 'replaced_by' not in DG.node[x].keys()]
DG = GG
kk_y = kk
#for all keys
for key_x in kk:
print "%s %s"%(key_x , str(count))
count +=1
#calc distance
dists = nx.shortest_path_length(GG,key_x,weight='dist')
#pop k from key_y
#Store them in a dict key_x,key_y = [val]
kk_y.pop(0)
tmp_keys =[':'.join([key_x,y]) for y in kk_y]
tmp_vals =[dists[y]%offset for y in kk_y]
tmp_dict = dict(zip(tmp_keys, tmp_vals))
all_dists.update(tmp_dict)
#if keyx == keyy dont store it: dist =0
#another time
pickle.dump(all_dists,open(outfile,'wb'))
return None
| 32.852423 | 158 | 0.578947 |
6e942a1e8c0fd4f03d779fd36629d8f97651ff14 | 364 | py | Python | tests/tfgraph/utils/test_datasets.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 4 | 2017-07-23T13:48:35.000Z | 2021-12-03T18:11:50.000Z | tests/tfgraph/utils/test_datasets.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 21 | 2017-07-23T13:15:20.000Z | 2020-09-28T02:13:11.000Z | tests/tfgraph/utils/test_datasets.py | tfgraph/tfgraph | 19ae968b3060275c631dc601757646abaf1f58a1 | [
"Apache-2.0"
] | 1 | 2017-07-28T10:28:04.000Z | 2017-07-28T10:28:04.000Z | import tfgraph
def test_data_sets_naive_4():
assert tfgraph.DataSets.naive_4().shape == (8, 2)
def test_data_sets_naive_6():
assert tfgraph.DataSets.naive_6().shape == (9, 2)
def test_data_sets_compose():
assert tfgraph.DataSets.compose_from_path("./datasets/wiki-Vote/wiki-Vote.csv",
True).shape == (65499, 2)
| 24.266667 | 81 | 0.653846 |
6e94f020370af25596b5a73fe263fae2cf996278 | 668 | py | Python | deploy/virenv/lib/python2.7/site-packages/haystack/outputters/__init__.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | deploy/virenv/lib/python2.7/site-packages/haystack/outputters/__init__.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | deploy/virenv/lib/python2.7/site-packages/haystack/outputters/__init__.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:mod:`haystack.outputs` -- classes that create an output
==============================================================================
"""
from haystack import utils
class Outputter(object):
""" Outputter interface """
def __init__(self, memory_handler):
self._memory_handler = memory_handler
self._ctypes = self._memory_handler.get_target_platform().get_target_ctypes()
self._utils = utils.Utils(self._ctypes)
self._model = self._memory_handler.get_model()
self._addr_cache = {}
def parse(self, obj, prefix='', depth=10):
raise NotImplementedError('Please define parse')
| 27.833333 | 85 | 0.591317 |
6e9649858a66821226a8387a5c2ae25467b9d1c9 | 631 | py | Python | adminmgr/media/code/python/red3/BD_543_565_624_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/config/BD_543_565_624_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/python/red3/BD_543_565_624_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | #!/usr/bin/python3
import sys
# f=open("reduce3.csv","w+")
di={}
for y in sys.stdin:
Record=list(map(str,y.split(",")))
if(len(Record)>3):
Record=[Record[0]+","+Record[1],Record[2],Record[3]]
s=int(Record[2][:-1])
if (Record[0],Record[1]) not in di:
di[(Record[0],Record[1])]=[s,1]
else:
di[(Record[0],Record[1])][0]+=s
di[(Record[0],Record[1])][1]+=1
dsr={}
for i in di:
sr=(di[i][0]*100)/di[i][1]
if i[0] not in dsr:
dsr[i[0]]=[]
else:
dsr[i[0]].append((i[1],sr,di[i][0]))
for i in sorted(dsr,key=lambda x:x):
j=sorted(dsr[i],key=lambda x:(-x[1],-x[2]))[0]
print(i,j[0],sep=",")
# f.write(i+","+j[0]+"\n")
| 24.269231 | 54 | 0.557845 |
6e9740ebd2a997095586f788ec3e7c7b37619818 | 9,622 | py | Python | hbgd_data_store_server/studies/management/commands/load_idx.py | pcstout/study-explorer | b49a6853d8155f1586360138ed7f87d165793184 | [
"Apache-2.0"
] | 2 | 2019-04-02T14:31:27.000Z | 2020-04-13T20:41:46.000Z | hbgd_data_store_server/studies/management/commands/load_idx.py | pcstout/study-explorer | b49a6853d8155f1586360138ed7f87d165793184 | [
"Apache-2.0"
] | 7 | 2019-08-07T14:44:54.000Z | 2020-06-05T17:30:51.000Z | hbgd_data_store_server/studies/management/commands/load_idx.py | pcstout/study-explorer | b49a6853d8155f1586360138ed7f87d165793184 | [
"Apache-2.0"
] | 1 | 2019-03-27T01:32:30.000Z | 2019-03-27T01:32:30.000Z | # Copyright 2017-present, Bill & Melinda Gates Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import zipfile
import fnmatch
from pandas import read_csv
from django.core.management.base import BaseCommand, CommandError
from ...models import Study, Count, Variable, Domain, EMPTY_IDENTIFIERS
# Regex file pattern defining the naming convention of IDX files
FILE_PATTERN = r'^IDX_(\w*)\.csv'
# Suffixes of domain name, code and category columns
# e.g. LB domain columns are LBTEST, LBTESTCD and LBCAT
DOMAIN_FORMAT = '{domain}TEST'
DOMAIN_CODE_FORMAT = '{domain}TESTCD'
DOMAIN_CAT_FORMAT = '{domain}CAT'
def get_study(row, study_cache=None, **kwargs):
"""
Finds the study for an entry.
"""
study_id_field = kwargs['study_id_field']
if not study_cache:
study_cache = {}
study_id = row[study_id_field]
if study_id in EMPTY_IDENTIFIERS:
return None
elif study_id in study_cache:
return study_cache[study_id]
study, _ = Study.objects.get_or_create(study_id=study_id)
study_cache[study_id] = study
return study
def get_domain_variable(row, domain, variable_cache=None):
"""
Get a Variable model specifying the rows domain, category and
code.
"""
if not variable_cache:
variable_cache = {}
decode_idx = DOMAIN_FORMAT.format(domain=domain.code)
code_idx = DOMAIN_CODE_FORMAT.format(domain=domain.code)
cat_idx = DOMAIN_CAT_FORMAT.format(domain=domain.code)
code = row[code_idx]
if code in EMPTY_IDENTIFIERS:
return None
attrs = dict(domain=domain, code=code)
cache_key = (domain.id, code)
if cache_key in variable_cache:
return variable_cache[cache_key]
try:
var = Variable.objects.get(**attrs)
except Variable.DoesNotExist:
category = row.get(cat_idx)
if category not in EMPTY_IDENTIFIERS:
attrs['category'] = category
var = Variable.objects.create(label=row[decode_idx], **attrs)
variable_cache[cache_key] = var
return var
def get_qualifiers(row, valid_qualifiers, qualifier_cache=None):
"""
Extract qualifier variables from row
"""
if not qualifier_cache:
qualifier_cache = {}
qualifiers = []
for qualifier, qual_code, suffix in valid_qualifiers:
code = row.get(qual_code + suffix)
if code in EMPTY_IDENTIFIERS:
raise ValueError('Qualifiers cannot be empty')
elif isinstance(code, float) and code.is_integer():
code = int(code)
attrs = dict(domain=qualifier, code=str(code))
cache_key = (qualifier.id, str(code))
if cache_key in qualifier_cache:
qualifiers.append(qualifier_cache[cache_key])
continue
try:
var = Variable.objects.get(**attrs)
except Variable.DoesNotExist:
var = Variable.objects.create(label=row[qual_code],
**attrs)
qualifier_cache[cache_key] = var
qualifiers.append(var)
return qualifiers
def get_valid_qualifiers(columns):
"""
Returns a list of the valid qualifier columns.
"""
valid_qualifiers = []
qualifiers = Domain.objects.filter(is_qualifier=True)
for qual in qualifiers:
wildcard_re = fnmatch.translate(qual.code)
cols = [col for col in columns if re.match(wildcard_re, col)]
if not cols:
continue
elif len(cols) > 1:
raise Exception('Qualifier code must match only one column per file.')
qual_code = cols[0]
suffix_re = qual_code + r'(\w{1,})'
potential_suffixes = [re.match(suffix_re, col).group(1) for col in columns
if re.match(suffix_re, col)]
suffix = ''
if len(potential_suffixes) > 0:
suffix = potential_suffixes[0]
valid_qualifiers.append((qual, qual_code, suffix))
return valid_qualifiers
def process_idx_df(df, domain, **kwargs):
"""
Process an IDX csv file, creating Code, Count and Study
objects.
"""
count_subj_field = kwargs['count_subj_field']
count_obs_field = kwargs['count_obs_field']
study_id_field = kwargs['study_id_field']
for required in [study_id_field, count_subj_field, count_obs_field]:
if required not in df.columns:
raise ValueError('IDX file does not contain %s column, '
'skipping.' % required)
valid_qualifiers = get_valid_qualifiers(df.columns)
study_cache, variable_cache, qualifier_cache = {}, {}, {}
df = df.fillna('NaN')
for _, row in df.iterrows():
count = row[count_obs_field]
subjects = row[count_subj_field]
if any(c in EMPTY_IDENTIFIERS for c in (count, subjects)):
continue
try:
qualifiers = get_qualifiers(row, valid_qualifiers, qualifier_cache)
except ValueError:
continue
study = get_study(row, study_cache, **kwargs)
if not study:
continue
variable = get_domain_variable(row, domain, variable_cache)
if variable:
qualifiers = [variable] + qualifiers
query = Count.objects.create(count=count, subjects=subjects, study=study)
query.codes = qualifiers
query.save()
class Command(BaseCommand):
help = """
Loads queries into database given one or more IDX csv files or zip
files containing IDX csv files (disregarding all zipfile structure).
"""
def add_arguments(self, parser):
parser.add_argument('files', nargs='+', type=str,
help='One or more csv or zip files')
parser.add_argument('-study_id_field', type=str, default='STUDYID',
help='Name of column to use as study_id.')
parser.add_argument('-count_subj_field', type=str, default='COUNT_SUBJ',
help='Name of column to use as subject count.')
parser.add_argument('-count_obs_field', type=str, default='COUNT_OBS',
help='Name of column to use as observation count.')
parser.add_argument('--clear', action='store_true',
default=True, dest='clear',
help='Clear database before processing data.')
def process_file(self, filepath, zip_file=None, **kwargs):
# Ensure the file matches the FILE_PATTERN
basename = os.path.basename(filepath)
match = re.search(FILE_PATTERN, basename)
if not match:
return False
# Ensure that Domain exists
domain = match.group(1)
try:
domain = Domain.objects.get(code=domain)
except Domain.DoesNotExist:
return False
# Load file
try:
if zip_file:
with zip_file.open(filepath) as f:
df = read_csv(f)
else:
with open(filepath) as f:
df = read_csv(f)
except:
self.stderr.write('%s could not be read ensure '
'it is a valid csv file.' % basename)
return False
# Process dataframe
self.stdout.write('Processing %s' % basename)
try:
process_idx_df(df, domain, **kwargs)
except ValueError as e:
self.stderr.write(str(e))
return True
def handle(self, *args, **options):
if options['clear']:
queries = Count.objects.all()
self.stdout.write('Deleting %s counts' % len(queries))
queries.delete()
codes = Variable.objects.all()
self.stdout.write('Deleting %s variables' % len(codes))
codes.delete()
n_queries = Count.objects.count()
n_studies = Study.objects.count()
n_codes = Variable.objects.count()
processed = False
for f in options['files']:
if f.endswith('.csv'):
if not re.search(FILE_PATTERN, os.path.basename(f)):
self.stderr.write('Processing %s skipped, does '
'not match %s naming convention.'
% (f, FILE_PATTERN))
continue
processed = self.process_file(f, **options)
elif f.endswith('.zip') or f.endswith('.upload'):
zip_file = zipfile.ZipFile(f)
for zf in zip_file.filelist:
processed |= self.process_file(zf.filename, zip_file, **options)
if not processed:
raise CommandError('None of the supplied files could '
'be processed.')
self.stdout.write('Wrote %s Study entries' %
(Study.objects.count() - n_studies))
self.stdout.write('Wrote %s Variable entries' %
(Variable.objects.count() - n_codes))
self.stdout.write('Wrote %s Count entries' %
(Count.objects.count() - n_queries))
| 35.902985 | 84 | 0.610788 |
6e97ddc9ef075e7d004c1410ff22b946e2b0175d | 1,937 | py | Python | setup.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | 7 | 2021-08-24T14:34:33.000Z | 2021-12-10T12:43:50.000Z | setup.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | null | null | null | setup.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | 1 | 2021-09-10T17:50:38.000Z | 2021-09-10T17:50:38.000Z | from setuptools import setup, find_packages
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="neureca",
version="0.0.1",
description="A framework for building conversational recommender systems",
long_description=long_description,
long_description_content_type="text/markdown",
author="Hojin Yang",
author_email="hojin.yang7@gmail.com",
url="https://github.com/hojinYang/neureca",
entry_points={
"console_scripts": [
"neureca-train = neureca.cmd:neureca_train_command",
],
},
install_requires=[
"click==7.1.2",
"Flask==1.1.2",
"joblib==1.0.1",
"numpy==1.20.2",
"pandas==1.2.3",
"pytorch-crf==0.7.2",
"pytorch-lightning==1.2.7",
"scikit-learn==0.24.1",
"scipy==1.6.2",
"sklearn==0.0",
"spacy==3.0.6",
"summarizers==1.0.4",
"tokenizers==0.10.2",
"toml==0.10.2",
"torch==1.8.1",
"TorchCRF==1.1.0",
"torchmetrics==0.3.1",
"tqdm==4.60.0",
"transformers==4.5.0",
"typer==0.3.2",
],
packages=find_packages(exclude=["demo-toronto"]),
python_requires=">=3",
package_data={"neureca": ["interface/static/*/*", "interface/templates/index.html"]},
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries",
],
)
| 32.283333 | 89 | 0.565823 |
6e98642f2b6b958a07ac0e545cf862d4394aa56c | 786 | py | Python | Thread.PY/thread-rlock.py | Phoebus-Ma/Python-Helper | d880729f0bbfbc2b1503602fd74c9177ecd4e970 | [
"MIT"
] | null | null | null | Thread.PY/thread-rlock.py | Phoebus-Ma/Python-Helper | d880729f0bbfbc2b1503602fd74c9177ecd4e970 | [
"MIT"
] | null | null | null | Thread.PY/thread-rlock.py | Phoebus-Ma/Python-Helper | d880729f0bbfbc2b1503602fd74c9177ecd4e970 | [
"MIT"
] | null | null | null | ###
# Thread rlock test.
#
# License - MIT.
###
import time
from threading import Thread, RLock
# thread_test2 - Thread test2 function.
def thread_test2(rlock):
# {
time.sleep(0.5)
rlock.acquire()
print('Third acquire.')
rlock.release()
# }
# thread_test1 - Thread test1 function.
def thread_test1(rlock):
# {
rlock.acquire()
print('First acquire.')
rlock.acquire()
print('Second acquire.')
rlock.release()
rlock.release()
# }
# Main function.
def main():
# {
# Create RLock
thrd_rlock = RLock()
thrd1 = Thread(target = thread_test1, args = (thrd_rlock, ))
thrd2 = Thread(target = thread_test2, args = (thrd_rlock, ))
thrd1.start()
thrd2.start()
# }
# Program entry.
if '__main__' == __name__:
main()
| 14.290909 | 64 | 0.619593 |
6e98aa2320fefc8b613e9eb26ab879e97d03ea24 | 1,319 | py | Python | api/python/tests/test_bingo_nosql.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 204 | 2015-11-06T21:34:34.000Z | 2022-03-30T16:17:01.000Z | api/python/tests/test_bingo_nosql.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 509 | 2015-11-05T13:54:43.000Z | 2022-03-30T22:15:30.000Z | api/python/tests/test_bingo_nosql.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 89 | 2015-11-17T08:22:54.000Z | 2022-03-17T04:26:28.000Z | import shutil
import tempfile
from indigo.bingo import Bingo
from tests import TestIndigoBase
class TestBingo(TestIndigoBase):
def setUp(self) -> None:
super().setUp()
self.test_folder = tempfile.mkdtemp()
def tearDown(self) -> None:
shutil.rmtree(self.test_folder)
def test_molecule_search_sub(self) -> None:
bingo = Bingo.createDatabaseFile(self.indigo, self.test_folder, 'molecule', '')
self.assertTrue(bingo)
m1 = self.indigo.loadMolecule('C1CCCCC1')
m2 = self.indigo.loadMolecule('C1CCCCC1')
m3 = self.indigo.loadMolecule('C1CCNCC1')
m4 = self.indigo.loadMolecule('N')
m1_id = bingo.insert(m1)
m2_id = bingo.insert(m2)
m3_id = bingo.insert(m3)
bingo.insert(m4)
bingo.optimize()
q = self.indigo.loadQueryMolecule('C')
result = bingo.searchSub(q)
ids = []
while result.next():
ids.append(result.getCurrentId())
self.assertEqual(3, len(ids))
self.assertEqual([m1_id, m2_id, m3_id], ids)
self.assertTrue(self.indigo.exactMatch(m1, bingo.getRecordById(m1_id)))
self.assertTrue(self.indigo.exactMatch(m2, bingo.getRecordById(m2_id)))
self.assertTrue(self.indigo.exactMatch(m3, bingo.getRecordById(m3_id)))
| 33.820513 | 87 | 0.64746 |
6e9910237b294e11a1a1bbded611300e71f69a4f | 3,932 | py | Python | src/core/src/tortuga/scripts/get_kit.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/core/src/tortuga/scripts/get_kit.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/core/src/tortuga/scripts/get_kit.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import json
import sys
from tortuga.exceptions.kitNotFound import KitNotFound
from tortuga.kit.kitCli import KitCli
from tortuga.wsapi.kitWsApi import KitWsApi
class GetKitCli(KitCli):
"""
Get kit command line interface.
"""
def parseArgs(self, usage=None):
cmd_options_group = _('Command Options')
self.addOptionGroup(cmd_options_group, '')
self.addOptionToGroup(cmd_options_group, '--quiet',
action='store_true', dest='bQuiet',
help=_('Return success (0) if kit exists,'
' otherwise 1.'))
output_attr_group = _('Output formatting options')
self.addOptionGroup(output_attr_group, None)
self.addOptionToGroup(
output_attr_group, '--json',
action='store_true', default=False,
help=_('JSON formatted output')
)
self.addOptionToGroup(
output_attr_group, '--xml',
action='store_true', default=False,
help=_('XML formatted output')
)
super(GetKitCli, self).parseArgs(usage=usage)
def runCommand(self):
self.parseArgs(_("""
Returns details of the specified kit
"""))
name, version, iteration = \
self.getKitNameVersionIteration(self.getArgs().kitspec)
api = self.configureClient(KitWsApi)
try:
kit = api.getKit(name, version=version, iteration=iteration)
if not self.getArgs().bQuiet:
if self.getArgs().xml:
print(kit.getXmlRep())
elif self.getArgs().json:
print(json.dumps({
'kit': kit.getCleanDict(),
}, sort_keys=True, indent=4, separators=(',', ': ')))
else:
self._console_output(kit)
sys.exit(0)
except KitNotFound:
if self.getArgs().bQuiet:
sys.exit(1)
# Push the "kit not found" exception up the stack
raise
def _console_output(self, kit):
print('{0}-{1}-{2}'.format(kit.getName(),
kit.getVersion(),
kit.getIteration()))
print(' ' * 2 + '- Description: {0}'.format(kit.getDescription()))
print(' ' * 2 + '- Type: {0}'.format(
'OS' if kit.getIsOs() else 'Application'
if kit.getName() != 'base' else 'System'))
print(' ' * 2 + '- Removable: {0}'.format(kit.getIsRemovable()))
print(' ' * 2 + '- Components:')
for component in kit.getComponentList():
print(' ' * 4 + '- Name: {0}, Version: {1}'.format(
component.getName(), component.getVersion()))
print(' ' * 6 + '- Description: {0}'.format(
component.getDescription()))
if not kit.getIsOs():
compatible_os = component.getOsInfoList() +\
component.getOsFamilyInfoList()
else:
compatible_os = []
if compatible_os:
print(' ' * 6 + '- Operating system(s): {0}'.format(
', '.join([str(item) for item in compatible_os])))
def main():
GetKitCli().run()
| 31.96748 | 74 | 0.559003 |
6e99d90082f82cff092fcb68582087a7ab692e17 | 2,264 | py | Python | examples.py | ThyagoFRTS/power-eletric | dd26cd5ffb2aca0741d8983e57351488badc64da | [
"MIT"
] | 1 | 2021-11-07T02:31:58.000Z | 2021-11-07T02:31:58.000Z | examples.py | ThyagoFRTS/power-eletric | dd26cd5ffb2aca0741d8983e57351488badc64da | [
"MIT"
] | null | null | null | examples.py | ThyagoFRTS/power-eletric | dd26cd5ffb2aca0741d8983e57351488badc64da | [
"MIT"
] | null | null | null | from power_sizing import calculate_power_luminance
from power_sizing import calculate_number_and_power_of_tugs
from conductor_sizing import conduction_capacity
from conductor_sizing import minimum_section
from conductor_sizing import voltage_drop
from conductor_sizing import harmonic_rate
from neutral_sizing import get_neutral_section
from protection_sizing import get_conductor_protection_section
import pathlib
#IMPORTANT: all inputs are in portuguese, remember this
# Calculate power luminance of an ambient
# inputs: Area (m^2)
calculate_power_luminance(12)
# Calculate power luminance of an ambient
# inputs: AmbientName (str), perimeter (m)
calculate_number_and_power_of_tugs('cozinha',13.3)
# Sizing conductor by capacity conduction
# inputs: power (Watts/VA), tension: optional (default 220), Potency-factor: optional (used if Watts, default 1)
# circuit_type: optional mono/tri (str) (default mono)
section1 = conduction_capacity(21000, fp=0.9 ,ft=0.87, fg=0.8, circuit_type='tri')
# Sizing conductor by section minimum
# inputs: Circuit type (str)
section2 = minimum_section('forca')
# Sizing conductor by voltage drop
# inputs: power (Watts/VA), distance in (m), fp: (default 1), circuit_type: optional 'mono'/'tri' (default 'mono')
# isolation_type = optional 0 to Non-Magnetic 1 to Magnetic (default 0), drop_rate: optional (default 0.04)
section3 = voltage_drop(13000,40, drop_rate=0.02, circuit_type='tri', fp = 0.75, isolation_type = 0)
# Sizing conductor by harmonic
# inputs: harmonics [I1, I3, I5...] circuit_type: optional 'tri'/'bi' (default 'tri')
section4, thd3 = harmonic_rate(harmonics = [100,60,45,30,20], fp = 1, ft=1, fg=1 , circuit_type = 'tri', installation_method = 'B1')
# Sizing neutral
# inputs: phase_section (mm), Ib: project current, balanced_circuit: optional bool (default True), circuit_type: optional 'mono'/'tri' (default 'mono')
neutral_section1 = get_neutral_section(95, 10, circuit_type = 'tri', index_THD3 = 0.14, balanced_circuit = True)
# Sizing protection
# inputs: phase_section (mm), Ib: Project current
neutral_section1 = get_neutral_section(95, 127, index_THD3 = 0.14, circuit_type = 'tri', balanced_circuit = True, installation_method = 'B1', ft=1, fg=1)
get_conductor_protection_section(95) | 48.170213 | 153 | 0.774293 |
6e9df45528e4294de8ca5838baa62293adbb002d | 784 | py | Python | myapp/migrations/0008_doctordata_specialist.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | 1 | 2022-02-08T16:37:43.000Z | 2022-02-08T16:37:43.000Z | myapp/migrations/0008_doctordata_specialist.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | null | null | null | myapp/migrations/0008_doctordata_specialist.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.6 on 2021-05-15 11:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0007_auto_20210317_1817'),
]
operations = [
migrations.CreateModel(
name='doctordata',
fields=[
('itemid', models.IntegerField(primary_key=True, serialize=False)),
('dname', models.CharField(max_length=50)),
('sid', models.IntegerField()),
],
),
migrations.CreateModel(
name='specialist',
fields=[
('sid', models.IntegerField(primary_key=True, serialize=False)),
('sname', models.CharField(max_length=50)),
],
),
]
| 27.034483 | 83 | 0.53699 |
6e9f10181a7ecfeffe5b3e63362769aa8677cc14 | 12,338 | py | Python | eventide/message.py | blakev/python-eventide | ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0 | [
"MIT"
] | 1 | 2021-01-14T18:35:44.000Z | 2021-01-14T18:35:44.000Z | eventide/message.py | blakev/python-eventide | ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0 | [
"MIT"
] | null | null | null | eventide/message.py | blakev/python-eventide | ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0 | [
"MIT"
] | 2 | 2021-04-20T22:09:02.000Z | 2021-07-29T21:52:30.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# >>
# python-eventide, 2020
# LiveViewTech
# <<
from uuid import UUID, uuid4
from datetime import datetime
from operator import attrgetter
from functools import total_ordering
from dataclasses import (
field,
asdict,
fields,
dataclass,
_process_class,
make_dataclass,
)
from typing import (
Dict,
List,
Type,
Mapping,
Callable,
Optional,
NamedTuple,
)
from pydantic import BaseModel, Field
from eventide.utils import jdumps, jloads, dense_dict
from eventide._types import JSON
f_blank = Field(default=None)
class Metadata(BaseModel):
"""A message's metadata object contains information about the stream where the
message resides, the previous message in a series of messages that make up a
messaging workflow, the originating process to which the message belongs, as well
as other data that are pertinent to understanding the provenance and disposition.
Message metadata is data about messaging machinery, like message schema version,
source stream, positions, provenance, reply address, and the like.
"""
class Config:
extra = 'allow'
orm_mode = True
# yapf: disable
stream_name: Optional[str] = f_blank
position: Optional[int] = f_blank
global_position: Optional[int] = f_blank
causation_message_stream_name: Optional[str] = f_blank
causation_message_position: Optional[int] = f_blank
causation_message_global_position: Optional[int] = f_blank
correlation_stream_name: Optional[str] = f_blank
reply_stream_name: Optional[str] = f_blank
schema_version: Optional[str] = f_blank
time: Optional[float] = f_blank
# yapf: enable
def __repr__(self) -> str:
# dynamically scan the available fields the first time this
# object instance is printed out, looking for fields where
# repr=True -- we then save those fields so we can dynamically
# extract their current value each time.
attr = '__repr_fields__'
if not hasattr(self, attr):
repr_fields = filter(lambda f: f.repr, fields(self))
repr_fields = set(map(attrgetter('name'), repr_fields))
setattr(self, attr, repr_fields)
o = ', '.join('%s=%s' % (k, getattr(self, k)) for k in getattr(self, attr))
return '%s(%s)' % (self.__class__.__name__, o)
def to_dict(self) -> Dict:
return self.dict(skip_defaults=True, exclude_unset=True)
def to_json(self) -> str:
return jdumps(self.to_dict())
@property
def identifier(self) -> str:
return '%s/%d' % (self.stream_name, self.position)
@property
def causation_identifier(self) -> str:
return '%s/%d' % (
self.causation_message_stream_name, self.causation_message_position
)
@property
def replies(self) -> bool:
return bool(self.reply_stream_name)
def do_not_reply(self) -> 'Metadata':
self.reply_stream_name = None
return self
def follow(self, other: 'Metadata') -> 'Metadata':
self.causation_message_stream_name = other.stream_name
self.causation_message_position = other.position
self.causation_message_global_position = other.global_position
self.correlation_stream_name = other.correlation_stream_name
self.reply_stream_name = other.reply_stream_name
return self
def follows(self, other: 'Metadata') -> bool:
return self.causation_message_stream_name == other.stream_name \
and self.causation_message_position == other.position \
and self.causation_message_global_position == other.global_position \
and self.correlation_stream_name == other.correlation_stream_name \
and self.reply_stream_name == other.reply_stream_name
def correlates(self, stream_name: str) -> bool:
return self.correlation_stream_name == stream_name
@dataclass(frozen=True, repr=True)
@total_ordering
class MessageData:
"""MessageData is the raw, low-level storage representation of a message.
These instances are READ from the database and should not be created directly.
"""
type: str
stream_name: str
data: JSON
metadata: JSON
id: UUID
position: int
global_position: int
time: float
@classmethod
def from_record(cls, record: Mapping) -> 'MessageData':
"""Build a new instance from a row in the message store."""
rec = dict(record)
rec['data'] = jloads(rec.get('data', '{}'))
rec['metadata'] = jloads(rec.get('metadata', '{}'))
rec['time'] = rec.get('time', datetime.utcnow()).timestamp()
return cls(**rec)
def __gt__(self, other: 'MessageData') -> bool:
return self.global_position > other.global_position
def __ge__(self, other: 'MessageData') -> bool:
return self.global_position >= other.global_position
def __eq__(self, other: 'MessageData') -> bool:
return self.stream_name == other.stream_name \
and self.type == other.type \
and self.data == other.data \
and self.metadata == other.metadata
@property
def category(self) -> str:
return self.stream_name.split('-')[0]
@property
def is_category(self) -> bool:
return '-' not in self.stream_name
@property
def stream_id(self) -> Optional[str]:
if '-' not in self.stream_name:
return None
return self.stream_name.split('-', 1)[1]
@property
def cardinal_id(self) -> Optional[str]:
if '-' not in self.stream_name:
return None
return self.stream_name.split('-', 1)[1].split('+')[0]
@property
def command(self) -> Optional[str]:
if ':' not in self.category:
return None
return self.category.split(':', 1)[1].split('-')[0]
class SerializedMessage(NamedTuple):
"""A light representation of a Message instance before writing to message store."""
id: str
stream_name: str
type: str
data: str
metadata: str
expected_version: Optional[int]
@dataclass(frozen=False, repr=False, init=True, eq=False)
class Message:
"""Base class for defining custom Message records for the message store.
Messages are converted into SerializedMessage right before being written, and
are created from MessageData instances when being deserialized.
This class should not be instantiated directly but instead should be the parent
class on other structures that are persisted to the database.
"""
id: UUID = Field(default_factory=uuid4, alias='_id_')
metadata: Metadata = Field(default_factory=Metadata, alias='_metadata_')
@classmethod
def from_messagedata(cls, data: 'MessageData', strict: bool = False) -> 'Message':
if strict:
if data.type != cls.__name__:
raise ValueError('invalid class name, does not match type `%s`' % data.type)
# coerce the metadata object
# .. attempt to assign all the metadata fields and values from the
# incoming MessageData instance onto this custom Message instance.
# These additional attributes can be specified before the underlying Message
# instance is created by decorating the class with @messagecls.
meta_obj = {}
meta_fields = cls.__dataclass_fields__['metadata'].metadata or {}
for k, v in data.metadata.items():
if k not in meta_fields:
if strict:
raise ValueError('undefined metadata field name `%s`' % k)
# else:
# skipping field: value
if k in meta_fields:
meta_obj[k] = v
# create instance
msg = cls(**data.data)
msg.id = data.id
msg.metadata = msg.metadata.__class__(**meta_obj)
# return instance of custom class
return msg
def __eq__(self, other: 'Message') -> bool:
if not isinstance(other, self.__class__):
return False
attrs = self.attributes()
for k, v in other.attributes().items():
if k in ('id', 'metadata'):
continue
if attrs.get(k, not v) != v:
return False
return True
@property
def type(self) -> str:
return self.__class__.__name__
def attributes(self) -> Dict:
return asdict(self)
def attribute_names(self) -> List[str]:
return list(self.attributes().keys())
def follow(self, other: 'Message') -> 'Message':
self.metadata.follow(other.metadata)
return self
def follows(self, other: 'Message') -> bool:
return self.metadata.follows(other.metadata)
def serialize(
self,
stream_name: str,
expected_version: Optional[int] = None,
json_default_fn: Optional[Callable] = None,
) -> SerializedMessage:
"""Prepare this instance to be written to the message store.
Returns a serialized version of this object's data.
"""
data = self.attributes()
# separate the metadata from the data
meta = dense_dict(data.pop('metadata'))
# remove the UUID, since it has its own column
del data['id']
# build the response instance
return SerializedMessage(
str(self.id),
stream_name,
self.type,
jdumps(data, json_default_fn),
jdumps(meta, json_default_fn),
expected_version,
)
def messagecls(
cls_=None,
*,
msg_meta: Type[Metadata] = Metadata,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
) -> Type[Message]:
"""Decorator used to build a custom Message type, with the ability to bind
a custom Metadata class with additional fields. When these instances are built,
serialized, or de-serialized from the database all the correct fields will be
filled out with no interference on in-editor linters.
The parameters for this decorator copy @dataclass with the addition of ``msg_meta``
which allows the definition to have a custom Metadata class assigned to it.
All @messagecls decorated classes behave like normal dataclasses.
"""
def wrap(cls):
# turn the wrapped class into a dataclass
kls = dataclass(
cls,
init=init,
repr=repr,
eq=eq,
order=order,
unsafe_hash=unsafe_hash,
frozen=frozen,
)
# extract all the field names and types from the new class definition
m_fields = {f.name: f.type for f in fields(msg_meta)}
# re-create the msg_meta class on the `metadata` attribute for this Message
# object. We attach the new (and old) fields into the metadata flag for
# this field so we don't have to process those values every time an instance
# is de-serialized from the database.
return make_dataclass(
cls.__name__,
fields=[
(
'metadata',
msg_meta,
field(
init=False,
default_factory=msg_meta,
metadata=m_fields,
),
),
],
bases=(
kls,
Message,
),
)
# ensure this class definition follows basic guidelines
if not hasattr(msg_meta, '__dataclass_fields__'):
raise ValueError('custom message metadata class must be a @dataclass')
if not issubclass(msg_meta, Metadata):
raise ValueError('custom message metadata class must inherit eventide.Metadata')
# "wrap" the Metadata class with @dataclass so we don't have to on its definition
msg_meta = _process_class(msg_meta, True, False, True, False, False, False)
# mimic @dataclass functionality
if cls_ is None:
return wrap
return wrap(cls_)
message_cls = messagecls # alias
| 33.710383 | 92 | 0.623197 |
6e9f30208ea04fa7ad96c88e5f93a7fce170ab1e | 10,926 | py | Python | utils/minifier.py | MateuszDabrowski/elquent | 9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4 | [
"MIT"
] | 4 | 2021-05-26T19:48:31.000Z | 2022-03-01T03:52:39.000Z | utils/minifier.py | MateuszDabrowski/ELQuent | 9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4 | [
"MIT"
] | null | null | null | utils/minifier.py | MateuszDabrowski/ELQuent | 9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4 | [
"MIT"
] | 3 | 2021-03-05T23:06:38.000Z | 2021-10-05T19:56:28.000Z | #!/usr/bin/env python3.6
# -*- coding: utf8 -*-
'''
ELQuent.minifier
E-mail code minifier
Mateusz Dąbrowski
github.com/MateuszDabrowski
linkedin.com/in/mateusz-dabrowski-marketing/
'''
import os
import re
import sys
import json
import pyperclip
from colorama import Fore, Style, init
# ELQuent imports
import utils.api.api as api
# Initialize colorama
init(autoreset=True)
# Globals
naming = None
source_country = None
# Predefined messege elements
ERROR = f'{Fore.WHITE}[{Fore.RED}ERROR{Fore.WHITE}] {Fore.YELLOW}'
WARNING = f'{Fore.WHITE}[{Fore.YELLOW}WARNING{Fore.WHITE}] '
SUCCESS = f'{Fore.WHITE}[{Fore.GREEN}SUCCESS{Fore.WHITE}] '
YES = f'{Style.BRIGHT}{Fore.GREEN}y{Fore.WHITE}{Style.NORMAL}'
NO = f'{Style.BRIGHT}{Fore.RED}n{Fore.WHITE}{Style.NORMAL}'
def country_naming_setter(country):
'''
Sets source_country for all functions
Loads json file with naming convention
'''
global source_country
source_country = country
# Loads json file with naming convention
with open(file('naming'), 'r', encoding='utf-8') as f:
global naming
naming = json.load(f)
'''
=================================================================================
File Path Getter
=================================================================================
'''
def file(file_path, file_name=''):
'''
Returns file path to template files
'''
def find_data_file(filename, directory='outcomes'):
'''
Returns correct file path for both script and frozen app
'''
if directory == 'main': # Files in main directory
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, filename)
elif directory == 'api': # For reading api files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, 'utils', directory, filename)
elif directory == 'outcomes': # For writing outcome files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, directory, filename)
file_paths = {
'naming': find_data_file('naming.json', directory='api'),
'mail_html': find_data_file(f'WK{source_country}_{file_name}.txt')
}
return file_paths.get(file_path)
'''
=================================================================================
Code Output Helper
=================================================================================
'''
def output_method(html_code):
'''
Allows user choose how the program should output the results
Returns email_id if creation/update in Eloqua was selected
'''
# Asks which output
print(
f'\n{Fore.GREEN}New code should be:',
f'\n{Fore.WHITE}[{Fore.YELLOW}0{Fore.WHITE}]\t»',
f'{Fore.WHITE}[{Fore.YELLOW}FILE{Fore.WHITE}] Only saved to Outcomes folder',
f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t»',
f'{Fore.WHITE}[{Fore.YELLOW}HTML{Fore.WHITE}] Copied to clipboard as HTML for pasting [CTRL+V]',
f'\n{Fore.WHITE}[{Fore.YELLOW}2{Fore.WHITE}]\t»',
f'{Fore.WHITE}[{Fore.YELLOW}CREATE{Fore.WHITE}] Uploaded to Eloqua as a new E-mail',
f'\n{Fore.WHITE}[{Fore.YELLOW}3{Fore.WHITE}]\t»',
f'{Fore.WHITE}[{Fore.YELLOW}UPDATE{Fore.WHITE}] Uploaded to Eloqua as update to existing E-mail')
email_id = ''
while True:
print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='')
choice = input(' ')
if choice == '0':
break
elif choice == '1' and html_code:
pyperclip.copy(html_code)
print(
f'\n{SUCCESS}You can now paste the HTML code [CTRL+V]')
break
elif choice == '2':
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}NAME{Fore.WHITE}] » Write or copypaste name of the E-mail:')
name = api.eloqua_asset_name()
api.eloqua_create_email(name, html_code)
break
elif choice == '3':
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}ID{Fore.WHITE}] » Write or copypaste ID of the E-mail to update:')
email_id = input(' ')
if not email_id:
email_id = pyperclip.paste()
api.eloqua_update_email(email_id, html_code)
break
else:
print(f'{ERROR}Entered value does not belong to any utility!')
choice = ''
return
'''
=================================================================================
E-mail Minifier
=================================================================================
'''
def email_minifier(code):
'''
Requires html code of an e-mail
Returns minified html code of an e-mail
'''
# HTML Minifier
html_attr = ['html', 'head', 'style', 'body',
'table', 'tbody', 'tr', 'td', 'th', 'div']
for attr in html_attr:
code = re.sub(rf'{attr}>\s*\n\s*', f'{attr}>', code)
code = re.sub(rf'\s*\n\s+<{attr}', f'<{attr}', code)
code = re.sub(r'"\n+\s*', '" ', code)
for attr in ['alt', 'title', 'data-class']:
code = re.sub(rf'{attr}=""', '', code)
code = re.sub(r'" />', '"/>', code)
code = re.sub(r'<!--[^\[\]]*?-->', '', code)
for attr in html_attr:
code = re.sub(rf'{attr}>\s*\n\s*', f'{attr}>', code)
code = re.sub(rf'\s*\n\s+<{attr}', f'<{attr}', code)
# Conditional Comment Minifier
code = re.sub(
r'\s*\n*\s*<!--\[if mso \| IE\]>\s*\n\s*', '\n<!--[if mso | IE]>', code)
code = re.sub(
r'\s*\n\s*<!\[endif\]-->\s*\n\s*', '<![endif]-->\n', code)
# CSS Minifier
code = re.sub(r'{\s*\n\s*', '{', code)
code = re.sub(r';\s*\n\s*}\n\s*', '} ', code)
code = re.sub(r';\s*\n\s*', '; ', code)
code = re.sub(r'}\n+', '} ', code)
# Whitespace Minifier
code = re.sub(r'\t', '', code)
code = re.sub(r'\n+', ' ', code)
while ' ' in code:
code = re.sub(r' {2,}', ' ', code)
# Trim lines to maximum of 500 characters
count = 0
newline_indexes = []
for i, letter in enumerate(code):
if count > 450:
if letter in ['>', ' ']:
newline_indexes.append(i)
count = 0
else:
count += 1
for index in reversed(newline_indexes):
output = code[:index+1] + '\n' + code[index+1:]
code = output
# Takes care of lengthy links that extends line over 500 characters
while True:
lengthy_lines_list = re.findall(r'^.{500,}$', code, re.MULTILINE)
if not lengthy_lines_list:
break
lengthy_link_regex = re.compile(r'href=\".{40,}?\"|src=\".{40,}?\"')
for line in lengthy_lines_list:
lengthy_link_list = re.findall(lengthy_link_regex, line)
code = code.replace(
lengthy_link_list[0], f'\n{lengthy_link_list[0]}')
return code
def email_workflow(email_code=''):
'''
Minifies the e-mail code
'''
if email_code:
module = True
# Gets e-mail code if not delivered via argument
elif not email_code:
module = False
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}Code{Fore.WHITE}] » Copy code of the E-mail to minify and click [Enter]:')
input()
email_code = pyperclip.paste()
# Gets the code from the user
while True:
email_code = pyperclip.paste()
is_html = re.compile(r'<html[\s\S\n]*?</html>', re.UNICODE)
if is_html.findall(email_code):
print(f'{Fore.WHITE}» {SUCCESS}Code copied from clipboard')
break
print(
f'{Fore.WHITE}» {ERROR}Invalid HTML. Copy valid code and click [Enter]', end='')
input(' ')
# Saves original code to outcomes folder
with open(file('mail_html', file_name='original_code'), 'w', encoding='utf-8') as f:
f.write(email_code)
# Gets file size of original file
original_size = os.path.getsize(
file('mail_html', file_name='original_code'))
# Minified the code
minified_code = email_minifier(email_code)
# Saves minified code to outcomes folder
with open(file('mail_html', file_name='minified_code'), 'w', encoding='utf-8') as f:
f.write(minified_code)
# Gets file size of minified file
minified_size = os.path.getsize(
file('mail_html', file_name='minified_code'))
print(f'\n{Fore.WHITE}» {SUCCESS}E-mail was minified from {Fore.YELLOW}{round(original_size/1024)}kB'
f'{Fore.WHITE} to {Fore.YELLOW}{round(minified_size/1024)}kB'
f' {Fore.WHITE}({Fore.GREEN}-{round((original_size-minified_size)/original_size*100)}%{Fore.WHITE})!')
if not module:
# Outputs the code
output_method(minified_code)
# Asks user if he would like to repeat
print(f'\n{Fore.YELLOW}» {Fore.WHITE}Do you want to {Fore.YELLOW}minify another Email{Fore.WHITE}?',
f'{Fore.WHITE}({YES}/{NO}):', end=' ')
choice = input('')
if choice.lower() == 'y':
print(
f'\n{Fore.GREEN}-----------------------------------------------------------------------------')
email_workflow()
return
'''
=================================================================================
Minifier module menu
=================================================================================
'''
def minifier_module(country):
'''
Lets user minify the HTML code
'''
# Create global source_country and load json file with naming convention
country_naming_setter(country)
# Report type chooser
print(
f'\n{Fore.GREEN}ELQuent.minifier Utilites:'
f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t» [{Fore.YELLOW}E-mail{Fore.WHITE}] Minifies e-mail code'
f'\n{Fore.WHITE}[{Fore.YELLOW}Q{Fore.WHITE}]\t» [{Fore.YELLOW}Quit to main menu{Fore.WHITE}]'
)
while True:
print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='')
choice = input(' ')
if choice.lower() == 'q':
break
elif choice == '1':
email_workflow()
break
else:
print(f'{Fore.RED}Entered value does not belong to any utility!')
choice = ''
return
| 33.618462 | 116 | 0.532857 |
6ea22002e9ef59fb7dc0ae80af6cf9fc57e8fc02 | 2,305 | py | Python | doc/conf.py | safay/uta | bf3cf5a531aec4cca61f8926e79a624d01c76682 | [
"Apache-2.0"
] | 48 | 2016-09-20T16:28:46.000Z | 2022-02-02T10:32:02.000Z | doc/conf.py | safay/uta | bf3cf5a531aec4cca61f8926e79a624d01c76682 | [
"Apache-2.0"
] | 45 | 2016-12-12T23:41:12.000Z | 2022-02-09T11:48:04.000Z | doc/conf.py | safay/uta | bf3cf5a531aec4cca61f8926e79a624d01c76682 | [
"Apache-2.0"
] | 20 | 2016-10-09T10:16:44.000Z | 2021-06-18T02:19:58.000Z | ############################################################################
# Theme setup
html_theme = 'invitae'
html_theme_path = ['themes']
if html_theme == 'sphinx_rtd_theme':
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
elif html_theme == 'bootstrap':
import sphinx_bootstrap_theme
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
############################################################################
# Project config
import uta
version = uta.__version__
release = str(uta.__version__)
project = u'UTA'
authors = project + ' Contributors'
copyright = u'2015, ' + authors
extlinks = {
'issue': ('https://bitbucket.org/biocommons/uta/issue/%s', 'UTA issue '),
}
man_pages = [
('index', 'uta', u'UTA Documentation', [u'UTA Contributors'], 1)
]
############################################################################
# Boilerplate
# , 'inherited-members']
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
exclude_patterns = ['build', 'static', 'templates', 'themes']
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.fulltoc',
]
html_favicon = 'static/favicon.ico'
html_logo = 'static/logo.png'
html_static_path = ['static']
html_title = '{project} {release}'.format(project=project, release=release)
intersphinx_mapping = {
'http://docs.python.org/': None,
}
master_doc = 'index'
pygments_style = 'sphinx'
source_suffix = '.rst'
templates_path = ['templates']
# <LICENSE>
# Copyright 2014 UTA Contributors (https://bitbucket.org/biocommons/uta)
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
##
# http://www.apache.org/licenses/LICENSE-2.0
##
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
| 29.935065 | 77 | 0.647722 |
6ea3527b6763af10afefd4e777c572e2ac4172fc | 997 | py | Python | exercises_gustguan/ex113.py | Ewerton12F/Python-Notebook | 85c4d38c35c6063fb475c25ebf4645688ec9dbcb | [
"MIT"
] | null | null | null | exercises_gustguan/ex113.py | Ewerton12F/Python-Notebook | 85c4d38c35c6063fb475c25ebf4645688ec9dbcb | [
"MIT"
] | null | null | null | exercises_gustguan/ex113.py | Ewerton12F/Python-Notebook | 85c4d38c35c6063fb475c25ebf4645688ec9dbcb | [
"MIT"
] | null | null | null | def leiaInt(msg):
while True:
try:
i = int(input(msg))
except (ValueError, TypeError):
print('\033[1;3;31mERRO: Por favor, digite um número inteiro válido.\033[0;0;0m')
continue
except (KeyboardInterrupt):
print('\n\033[1;3;33mUsuário preferiu não digitar esse número.\033[0;0;0m\n')
return 0
else:
return i
def leiaFloat(msg):
while True:
try:
r = float(input(msg))
except (TypeError, ValueError):
print('\033[1;3;31mERRO: Por favor, digite um número real válido.\033[0;0;0m')
continue
except (KeyboardInterrupt):
print('\n\033[1;3;33mUsuário preferiu não digitar esse número.\033[0;0;0m\n')
return 0
else:
return r
li = leiaInt('Digite um número inteiro: ')
lr = leiaFloat('Digite um número real: ')
print(f'\033[1;3;34mO valor inteiro foi {li} e o real foi {lr}.\033[0;0;0m') | 33.233333 | 93 | 0.565697 |
6ea43d3eb6ab1823ba2e818e55cba7f4297fc931 | 10,851 | py | Python | frameworks/kafka/tests/auth.py | minyk/dcos-activemq | 57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1 | [
"Apache-2.0"
] | null | null | null | frameworks/kafka/tests/auth.py | minyk/dcos-activemq | 57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1 | [
"Apache-2.0"
] | null | null | null | frameworks/kafka/tests/auth.py | minyk/dcos-activemq | 57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import retrying
import sdk_cmd
LOG = logging.getLogger(__name__)
def wait_for_brokers(client: str, brokers: list):
"""
Run bootstrap on the specified client to resolve the list of brokers
"""
LOG.info("Running bootstrap to wait for DNS resolution")
bootstrap_cmd = ['/opt/bootstrap',
'-print-env=false',
'-template=false',
'-install-certs=false',
'-resolve-hosts', ','.join(brokers)]
bootstrap_output = sdk_cmd.task_exec(client, ' '.join(bootstrap_cmd))
LOG.info(bootstrap_output)
assert "SDK Bootstrap successful" in ' '.join(str(bo) for bo in bootstrap_output)
def is_not_authorized(output: str) -> bool:
return "AuthorizationException: Not authorized to access" in output
def get_kerberos_client_properties(ssl_enabled: bool) -> list:
protocol = "SASL_SSL" if ssl_enabled else "SASL_PLAINTEXT"
return ['security.protocol={protocol}'.format(protocol=protocol),
'sasl.mechanism=GSSAPI',
'sasl.kerberos.service.name=kafka', ]
def get_ssl_client_properties(cn: str, has_kerberos: bool) -> list:
if has_kerberos:
client_properties = []
else:
client_properties = ["security.protocol=SSL", ]
client_properties.extend(["ssl.truststore.location = {cn}_truststore.jks".format(cn=cn),
"ssl.truststore.password = changeit",
"ssl.keystore.location = {cn}_keystore.jks".format(cn=cn),
"ssl.keystore.password = changeit", ])
return client_properties
def write_client_properties(id: str, task: str, lines: list) -> str:
"""Write a client properties file containing the specified lines"""
output_file = "{id}-client.properties".format(id=id)
LOG.info("Generating %s", output_file)
output = sdk_cmd.create_task_text_file(task, output_file, lines)
LOG.info(output)
return output_file
def write_jaas_config_file(primary: str, task: str, krb5: object) -> str:
output_file = "{primary}-client-jaas.config".format(primary=primary)
LOG.info("Generating %s", output_file)
# TODO: use kafka_client keytab path
jaas_file_contents = ['KafkaClient {',
' com.sun.security.auth.module.Krb5LoginModule required',
' doNotPrompt=true',
' useTicketCache=true',
' principal=\\"{primary}@{realm}\\"'.format(primary=primary, realm=krb5.get_realm()),
' useKeyTab=true',
' serviceName=\\"kafka\\"',
' keyTab=\\"/tmp/kafkaconfig/kafka-client.keytab\\"',
' client=true;',
'};', ]
output = sdk_cmd.create_task_text_file(task, output_file, jaas_file_contents)
LOG.info(output)
return output_file
def write_krb5_config_file(task: str, krb5: object) -> str:
output_file = "krb5.config"
LOG.info("Generating %s", output_file)
try:
# TODO: Set realm and kdc properties
krb5_file_contents = ['[libdefaults]',
'default_realm = {}'.format(krb5.get_realm()),
'',
'[realms]',
' {realm} = {{'.format(realm=krb5.get_realm()),
' kdc = {}'.format(krb5.get_kdc_address()),
' }', ]
log.info("%s", krb5_file_contents)
except Exception as e:
log.error("%s", e)
raise(e)
output = sdk_cmd.create_task_text_file(task, output_file, krb5_file_contents)
LOG.info(output)
return output_file
def setup_krb5_env(primary: str, task: str, krb5: object) -> str:
env_setup_string = "export KAFKA_OPTS=\\\"" \
"-Djava.security.auth.login.config={} " \
"-Djava.security.krb5.conf={}" \
"\\\"".format(write_jaas_config_file(primary, task, krb5), write_krb5_config_file(task, krb5))
LOG.info("Setting environment to %s", env_setup_string)
return env_setup_string
def get_bash_command(cmd: str, environment: str) -> str:
env_str = "{} && ".format(environment) if environment else ""
return "bash -c \"{}{}\"".format(env_str, cmd)
def write_to_topic(cn: str, task: str, topic: str, message: str,
client_properties: list=[], environment: str=None) -> bool:
client_properties_file = write_client_properties(cn, task, client_properties)
cmd = "echo {message} | kafka-console-producer \
--topic {topic} \
--producer.config {client_properties_file} \
--broker-list \$KAFKA_BROKER_LIST".format(message=message,
topic=topic,
client_properties_file=client_properties_file)
write_cmd = get_bash_command(cmd, environment)
def write_failed(output) -> bool:
LOG.info("Checking write output: %s", output)
rc = output[0]
stderr = output[2]
if rc:
LOG.error("Write failed with non-zero return code")
return True
if "UNKNOWN_TOPIC_OR_PARTITION" in stderr:
LOG.error("Write failed due to stderr: UNKNOWN_TOPIC_OR_PARTITION")
return True
if "LEADER_NOT_AVAILABLE" in stderr and "ERROR Error when sending message" in stderr:
LOG.error("Write failed due to stderr: LEADER_NOT_AVAILABLE")
return True
LOG.info("Output check passed")
return False
@retrying.retry(wait_exponential_multiplier=1000,
wait_exponential_max=60 * 1000,
retry_on_result=write_failed)
def write_wrapper():
LOG.info("Running: %s", write_cmd)
rc, stdout, stderr = sdk_cmd.task_exec(task, write_cmd)
LOG.info("rc=%s\nstdout=%s\nstderr=%s\n", rc, stdout, stderr)
return rc, stdout, stderr
rc, stdout, stderr = write_wrapper()
rc_success = rc is 0
stdout_success = ">>" in stdout
stderr_success = not is_not_authorized(stderr)
return rc_success and stdout_success and stderr_success
def read_from_topic(cn: str, task: str, topic: str, messages: int,
client_properties: list=[], environment: str=None) -> str:
client_properties_file = write_client_properties(cn, task, client_properties)
cmd = "kafka-console-consumer \
--topic {topic} \
--consumer.config {client_properties_file} \
--bootstrap-server \$KAFKA_BROKER_LIST \
--from-beginning --max-messages {messages} \
--timeout-ms {timeout_ms}".format(topic=topic,
client_properties_file=client_properties_file,
messages=messages,
timeout_ms=60000)
read_cmd = get_bash_command(cmd, environment)
def read_failed(output) -> bool:
LOG.info("Checking read output: %s", output)
rc = output[0]
stderr = output[2]
if rc:
LOG.error("Read failed with non-zero return code")
return True
if "kafka.consumer.ConsumerTimeoutException" in stderr:
return True
LOG.info("Output check passed")
return False
@retrying.retry(wait_exponential_multiplier=1000,
wait_exponential_max=60 * 1000,
retry_on_result=read_failed)
def read_wrapper():
LOG.info("Running: %s", read_cmd)
rc, stdout, stderr = sdk_cmd.task_exec(task, read_cmd)
LOG.info("rc=%s\nstdout=%s\nstderr=%s\n", rc, stdout, stderr)
return rc, stdout, stderr
output = read_wrapper()
assert output[0] is 0
return " ".join(str(o) for o in output)
log = LOG
def create_tls_artifacts(cn: str, task: str) -> str:
pub_path = "{}_pub.crt".format(cn)
priv_path = "{}_priv.key".format(cn)
log.info("Generating certificate. cn={}, task={}".format(cn, task))
output = sdk_cmd.task_exec(
task,
'openssl req -nodes -newkey rsa:2048 -keyout {} -out request.csr '
'-subj "/C=US/ST=CA/L=SF/O=Mesosphere/OU=Mesosphere/CN={}"'.format(priv_path, cn))
log.info(output)
assert output[0] is 0
rc, raw_csr, _ = sdk_cmd.task_exec(task, 'cat request.csr')
assert rc is 0
request = {
"certificate_request": raw_csr
}
token = sdk_cmd.run_cli("config show core.dcos_acs_token")
output = sdk_cmd.task_exec(
task,
"curl --insecure -L -X POST "
"-H 'Authorization: token={}' "
"leader.mesos/ca/api/v2/sign "
"-d '{}'".format(token, json.dumps(request)))
log.info(output)
assert output[0] is 0
# Write the public cert to the client
certificate = json.loads(output[1])["result"]["certificate"]
output = sdk_cmd.task_exec(task, "bash -c \"echo '{}' > {}\"".format(certificate, pub_path))
log.info(output)
assert output[0] is 0
create_keystore_truststore(cn, task)
return "CN={},OU=Mesosphere,O=Mesosphere,L=SF,ST=CA,C=US".format(cn)
def create_keystore_truststore(cn: str, task: str):
pub_path = "{}_pub.crt".format(cn)
priv_path = "{}_priv.key".format(cn)
keystore_path = "{}_keystore.jks".format(cn)
truststore_path = "{}_truststore.jks".format(cn)
log.info("Generating keystore and truststore, task:{}".format(task))
output = sdk_cmd.task_exec(task, "curl -L -k -v leader.mesos/ca/dcos-ca.crt -o dcos-ca.crt")
# Convert to a PKCS12 key
output = sdk_cmd.task_exec(
task,
'bash -c "export RANDFILE=/mnt/mesos/sandbox/.rnd && '
'openssl pkcs12 -export -in {} -inkey {} '
'-out keypair.p12 -name keypair -passout pass:export '
'-CAfile dcos-ca.crt -caname root"'.format(pub_path, priv_path))
log.info(output)
assert output[0] is 0
log.info("Generating certificate: importing into keystore and truststore")
# Import into the keystore and truststore
output = sdk_cmd.task_exec(
task,
"keytool -importkeystore "
"-deststorepass changeit -destkeypass changeit -destkeystore {} "
"-srckeystore keypair.p12 -srcstoretype PKCS12 -srcstorepass export "
"-alias keypair".format(keystore_path))
log.info(output)
assert output[0] is 0
output = sdk_cmd.task_exec(
task,
"keytool -import -trustcacerts -noprompt "
"-file dcos-ca.crt -storepass changeit "
"-keystore {}".format(truststore_path))
log.info(output)
assert output[0] is 0
| 35.345277 | 117 | 0.599853 |
6ea45f9b51639f8a0b82e891df2cc0bae0501648 | 1,242 | py | Python | python/problem-060.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | 1 | 2015-12-18T13:25:41.000Z | 2015-12-18T13:25:41.000Z | python/problem-060.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | null | null | null | python/problem-060.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import prime
description = '''
Prime pair sets
Problem 60
The primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes and concatenating them in any order the result will always be prime. For example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.
Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.
'''
prime.loadPrimes('primes.bin')
def digitconcat(a, b):
return int(str(a) + str(b))
def isconnected(a, b):
return prime.isPrime(digitconcat(a,b)) and prime.isPrime(digitconcat(b,a))
def search(space, path, n):
if len(path) == n: return path
p = path[0]
sspace = filter(lambda x: not x in path and isconnected(p,x), sorted(space))
for c in sspace:
r = search(sspace, [c]+path, n)
if r is not None:
return r
def findPairSets(n):
for p in prime.primes():
space = [p]
for p2 in prime.primes(p):
if isconnected(p, p2):
space.append(p2)
if len(space) >= n:
r = search(space, [p], n)
if r is not None: yield r
result = next(findPairSets(5))
print(result, sum(result))
| 29.571429 | 313 | 0.681159 |
6ea54be459981a2401f315126f120b27aa749589 | 5,298 | py | Python | multilanguage_frappe_website/hooks.py | developmentforpeople/frappe-multilingual-website | c0bf74453f3d1de6127ad174aab6c05360cc1ec1 | [
"MIT"
] | null | null | null | multilanguage_frappe_website/hooks.py | developmentforpeople/frappe-multilingual-website | c0bf74453f3d1de6127ad174aab6c05360cc1ec1 | [
"MIT"
] | null | null | null | multilanguage_frappe_website/hooks.py | developmentforpeople/frappe-multilingual-website | c0bf74453f3d1de6127ad174aab6c05360cc1ec1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "multilanguage_frappe_website"
app_title = "Multilanguage Frappe Website"
app_publisher = "DFP developmentforpeople"
app_description = "Multilanguage Frappe Framework website example"
app_icon = "octicon octicon-file-directory"
app_color = "green"
app_email = "developmentforpeople@gmail.com"
app_license = "MIT"
# App name (used to override only sites with this app installed)
multilanguage_app_site_name = app_name
# Hosts/sites where this app will be enabled
multilanguage_app_site_hosts = ["mf.local", "frappe-multilingual-website.developmentforpeople.com"]
# Languages available for site
translated_languages_for_website = ["en", "es"]
# First one on list will be the default one
language_default = translated_languages_for_website[0]
# Home page
home_page = "index"
# Url 301 redirects
website_redirects = [
# Remove duplicated pages for home:
{ "source": "/index", "target": "/" },
{ "source": "/index.html", "target": "/" },
# Languages: Remove main language segment. For example,
# if "en" is first one in "translated_languages_for_website"
# then route "/en/example" will be redirected 301 to "/example"
{ "source": r"/{0}".format(language_default), "target": "/" },
{ "source": r"/{0}/(.*)".format(language_default), "target": r"/\1" },
# Foce url language for some Frappe framework dynamic pages:
{ "source": "/en/login", "target": "/login?_lang=en" },
{ "source": "/es/login", "target": "/login?_lang=es" },
{ "source": "/en/contact", "target": "/contact?_lang=en" },
{ "source": "/es/contact", "target": "/contact?_lang=es" },
# Foce url language for not language specific pages:
{ "source": "/en/translations", "target": "/translations?_lang=en" },
{ "source": "/es/translations", "target": "/translations?_lang=es" },
]
# Setup some global context variables related to languages
website_context = {
"languages": translated_languages_for_website,
"language_default": language_default,
"app_site_name": app_name,
}
# Calculate active language from url first segment
update_website_context = [
"{0}.context_extend".format(app_name),
]
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/multilanguage_frappe_website/css/multilanguage_frappe_website.css"
# app_include_js = "/assets/multilanguage_frappe_website/js/multilanguage_frappe_website.js"
# include js, css files in header of web template
web_include_css = "/assets/multilanguage_frappe_website/css/multilanguage_frappe_website.css"
# web_include_js = "/assets/multilanguage_frappe_website/js/multilanguage_frappe_website.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "multilanguage_frappe_website.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "multilanguage_frappe_website.install.before_install"
# after_install = "multilanguage_frappe_website.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "multilanguage_frappe_website.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "multilanguage_frappe_website.tasks.all"
# ],
# "daily": [
# "multilanguage_frappe_website.tasks.daily"
# ],
# "hourly": [
# "multilanguage_frappe_website.tasks.hourly"
# ],
# "weekly": [
# "multilanguage_frappe_website.tasks.weekly"
# ]
# "monthly": [
# "multilanguage_frappe_website.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "multilanguage_frappe_website.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "multilanguage_frappe_website.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "multilanguage_frappe_website.task.get_dashboard_data"
# }
| 29.597765 | 99 | 0.714232 |
6ea56221c4382d050ea20b187d845407bd8d039d | 90 | py | Python | renormalizer/mps/tdh/__init__.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | renormalizer/mps/tdh/__init__.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | renormalizer/mps/tdh/__init__.py | liwt31/Renormalizer | 123a9d53f4f5f32c0088c255475f0ee60d02c745 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from renormalizer.mps.tdh.propagation import unitary_propagation
| 22.5 | 64 | 0.755556 |
6ea5d0975fd4eec1bb06ec6bc86c9a210abd074c | 398 | py | Python | items/Boots_Of_Speed.py | ivoryhuang/LOL_simple_text_version | 13c98721ad094c4eb6b835c838805c77dc9075c5 | [
"MIT"
] | 2 | 2017-01-08T15:53:49.000Z | 2017-01-19T17:24:53.000Z | items/Boots_Of_Speed.py | ivoryhuang/LOL_simple_text_version | 13c98721ad094c4eb6b835c838805c77dc9075c5 | [
"MIT"
] | null | null | null | items/Boots_Of_Speed.py | ivoryhuang/LOL_simple_text_version | 13c98721ad094c4eb6b835c838805c77dc9075c5 | [
"MIT"
] | null | null | null | from items.Item import Item
class Boots_Of_Speed(Item):
def __init__(self):
Item.__init__(self, name='Boots of Speed', code=1001, cost=300, sell=210)
self.sub_items = None
def stats(self, champ):
champ.move_speed += 25
return "%s move speed increase %d" % (champ.name, 25)
def remove_stats(self, champ):
champ.move_speed -= 25
return "%s move speed decrease %d" % (champ.name, 25) | 28.428571 | 75 | 0.701005 |
6ea618363d6a6f275346b95643dd61b27b8e3d12 | 12,045 | py | Python | RsNet/train_models.py | gehuangyi20/random_spiking | c98b550420ae4061b9d47ca475e86c981caf5514 | [
"MIT"
] | 1 | 2020-08-03T17:47:40.000Z | 2020-08-03T17:47:40.000Z | RsNet/train_models.py | gehuangyi20/random_spiking | c98b550420ae4061b9d47ca475e86c981caf5514 | [
"MIT"
] | null | null | null | RsNet/train_models.py | gehuangyi20/random_spiking | c98b550420ae4061b9d47ca475e86c981caf5514 | [
"MIT"
] | null | null | null | ## train_models.py -- train the neural network models for attacking
##
## Copyright (C) 2016, Nicholas Carlini <nicholas@carlini.com>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
## Modified for the needs of MagNet.
import os
import argparse
import utils
import numpy as np
import tensorflow as tf
from keras import backend as k
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from RsNet.setup_mnist import MNIST, MNISTModel
from RsNet.tf_config import gpu_config, setup_visibile_gpus, CHANNELS_LAST, CHANNELS_FIRST
from RsNet.dataset_nn import model_mnist_meta
from RsNet.random_spiking.nn_ops import random_spike_sample_scaling, random_spike_sample_scaling_per_sample
def random_spike(x, sample_rate, scaling, is_batch=True):
if is_batch:
return random_spike_sample_scaling(x, sample_rate=sample_rate, scaling=scaling)
else:
return random_spike_sample_scaling_per_sample(x, sample_rate=sample_rate, scaling=scaling)
def train(data, file_name, params, rand_params, num_epochs=50, batch_size=128, is_batch=True,
dropout=0.0, data_format=None, init_model=None, train_temp=1, data_gen=None):
"""
Standard neural network training procedure.
"""
_input = Input(shape=data.train_data.shape[1:])
x = _input
x = Conv2D(params[0], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[0], "scaling": rand_params[1], "is_batch": is_batch})(x)
x = Conv2D(params[1], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[2], "scaling": rand_params[3], "is_batch": is_batch})(x)
x = MaxPooling2D(pool_size=(2, 2), data_format=data_format)(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[4], "scaling": rand_params[5], "is_batch": is_batch})(x)
x = Conv2D(params[2], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[6], "scaling": rand_params[7], "is_batch": is_batch})(x)
x = Conv2D(params[3], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[8], "scaling": rand_params[9], "is_batch": is_batch})(x)
x = MaxPooling2D(pool_size=(2, 2), data_format=data_format)(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[10], "scaling": rand_params[11], "is_batch": is_batch})(x)
x = Flatten()(x)
x = Dense(params[4])(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[12], "scaling": rand_params[13], "is_batch": is_batch})(x)
if dropout > 0:
x = Dropout(dropout)(x, training=True)
x = Dense(params[5])(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[14], "scaling": rand_params[15], "is_batch": is_batch})(x)
x = Dense(10)(x)
model = Model(_input, x)
model.summary()
def fn(correct, predicted):
return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
logits=predicted/train_temp)
if init_model is not None:
model.load_weights(init_model)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss=fn,
optimizer=sgd,
metrics=['accuracy'])
if data_gen is None:
model.fit(data.train_data, data.train_labels,
batch_size=batch_size,
validation_data=(data.test_data, data.test_labels),
nb_epoch=num_epochs,
shuffle=True)
else:
data_flow = data_gen.flow(data.train_data, data.train_labels, batch_size=128, shuffle=True)
model.fit_generator(data_flow,
steps_per_epoch=len(data_flow),
validation_data=(data.validation_data, data.validation_labels),
nb_epoch=num_epochs,
shuffle=True)
if file_name is not None:
model.save(file_name)
# save idx
utils.save_model_idx(file_name, data)
return model
def parse_rand_spike(_str):
_str = _str.split(',')
return [float(x) for x in _str]
parser = argparse.ArgumentParser(description='Train mnist model')
parser.add_argument('--data_dir', help='data dir, required', type=str, default=None)
parser.add_argument('--data_name', help='data name, required', type=str, default=None)
parser.add_argument('--model_dir', help='save model directory, required', type=str, default=None)
parser.add_argument('--model_name', help='save model name, required', type=str, default=None)
parser.add_argument('--validation_size', help='size of validation dataset', type=int, default=5000)
parser.add_argument('--random_spike', help='parameter used for random spiking', type=str, default=None)
parser.add_argument('--random_spike_batch', help='whether to use batch-wised random noise', type=str, default='yes')
parser.add_argument('--dropout', help='dropout rate', type=float, default=0.5)
parser.add_argument('--rotation', help='rotation angle', type=float, default=10)
parser.add_argument('--gpu_idx', help='gpu index', type=int, default=0)
parser.add_argument('--data_format', help='channels_last or channels_first', type=str, default=CHANNELS_FIRST)
parser.add_argument('--is_dis', help='whether to use distillation training', type=str, default='no')
parser.add_argument('--is_trans', help='whether do transfer training using soft label', type=str, default='no')
parser.add_argument('--is_data_gen', help='whether train on data generator, zoom, rotation', type=str, default='no')
parser.add_argument('--trans_model', help='transfer model name', type=str, default='no')
parser.add_argument('--trans_drop', help='dropout trans model name', type=float, default=0.5)
parser.add_argument('--trans_random_spike', help='random spiking parameter used for trans model',
type=str, default=None)
parser.add_argument('--train_sel_rand', help='whether to random select the training data', type=str, default='no')
parser.add_argument('--train_size', help='number of training example', type=int, default=0)
parser.add_argument('--pre_idx', help='predefined idx, duplicated training dataset', type=str, default=None)
parser.add_argument('--ex_data_dir', help='extra data dir, required', type=str, default=None)
parser.add_argument('--ex_data_name', help='extra data name, required', type=str, default=None)
parser.add_argument('--ex_data_size', help='number of extra training example', type=int, default=0)
parser.add_argument('--ex_data_sel_rand', help='whether to random select the extra training data',
type=str, default='no')
args = parser.parse_args()
data_dir = args.data_dir
data_name = args.data_name
save_model_dir = args.model_dir
save_model_name = args.model_name
validation_size = args.validation_size
train_size = args.train_size
train_sel_rand = args.train_sel_rand == 'yes'
para_random_spike = None if args.random_spike is None else parse_rand_spike(args.random_spike)
_is_batch = args.random_spike_batch == 'yes'
dropout = args.dropout
gpu_idx = args.gpu_idx
rotation = args.rotation
data_format = args.data_format
is_distillation = args.is_dis == 'yes'
is_data_gen = args.is_data_gen == 'yes'
ex_data_dir = args.ex_data_dir
ex_data_name = args.ex_data_name
ex_data_size = args.ex_data_size
ex_data_sel_rand = args.ex_data_sel_rand == 'yes'
pre_idx_path = args.pre_idx
setup_visibile_gpus(str(gpu_idx))
k.tensorflow_backend.set_session(tf.Session(config=gpu_config))
if not os.path.exists(save_model_dir):
os.makedirs(save_model_dir)
data = MNIST(data_dir, data_name, validation_size, model_meta=model_mnist_meta,
input_data_format=CHANNELS_LAST, output_data_format=data_format,
train_size=train_size, train_sel_rand=train_sel_rand)
if pre_idx_path is not None:
pre_idx = utils.load_model_idx(pre_idx_path)
data.apply_pre_idx(pre_idx)
if ex_data_dir is not None and ex_data_name is not None and ex_data_size > 0:
data.append_train_data(ex_data_dir, ex_data_name, ex_data_size,
input_data_format=CHANNELS_LAST, output_data_format=data_format, sel_rand=ex_data_sel_rand)
# config data if using transfer training here
is_trans = args.is_trans == 'yes'
if is_trans:
print("Get the soft label of the transfer model")
trans_random_spike = None if args.trans_random_spike is None else parse_rand_spike(args.trans_random_spike)
trans_model = MNISTModel(args.trans_model, None, output_logits=False,
input_data_format=data_format, data_format=data_format, dropout=0,
rand_params=trans_random_spike, is_batch=True)
predicted = trans_model.model.predict(data.train_data, batch_size=500, verbose=1)
train_data_acc = np.mean(np.argmax(predicted, 1) == np.argmax(data.train_labels, 1))
data.train_labels = predicted
print("trasfer model acc on training data:", train_data_acc)
if is_data_gen:
data_gen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=rotation,
shear_range=0.2,
zoom_range=0.2,
fill_mode='reflect',
width_shift_range=4,
height_shift_range=4,
horizontal_flip=False,
vertical_flip=False,
data_format=data_format
)
else:
data_gen = None
if is_distillation:
print("train init model")
train(data, save_model_dir + "/" + save_model_name + '_init',
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=1, is_batch=_is_batch,
data_format=data_format, dropout=dropout, data_gen=data_gen)
print("train teacher model")
train(data, save_model_dir + "/" + save_model_name + '_teacher',
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout,
init_model=save_model_dir + "/" + save_model_name + '_init', train_temp=100, data_gen=data_gen)
# evaluate label with teacher model
model_teacher = MNISTModel(os.path.join(save_model_dir, save_model_name + '_teacher'), None, output_logits=True,
input_data_format=data_format, data_format=data_format, dropout=0,
rand_params=para_random_spike, is_batch=True)
predicted = model_teacher.model.predict(data.train_data, batch_size=500, verbose=1)
train_data_acc = np.mean(np.argmax(predicted, 1) == np.argmax(data.train_labels, 1))
print("train teacher acc:", train_data_acc)
with tf.Session() as sess:
y = sess.run(tf.nn.softmax(predicted/100))
print(y)
data.train_labels = y
print("train student model")
train(data, save_model_dir + "/" + save_model_name,
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout,
init_model=save_model_dir + "/" + save_model_name + '_init', train_temp=100, data_gen=data_gen)
else:
train(data, save_model_dir + "/" + save_model_name,
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout, data_gen=data_gen)
| 47.235294 | 118 | 0.703778 |
6ea71b4513f1f9f11b82f5034de5e9e21242e450 | 3,151 | py | Python | link_crawler.py | Stevearzh/greedy-spider | ca8b1d892e4ac5066ab33aafe7755ee959ef630a | [
"MIT"
] | null | null | null | link_crawler.py | Stevearzh/greedy-spider | ca8b1d892e4ac5066ab33aafe7755ee959ef630a | [
"MIT"
] | null | null | null | link_crawler.py | Stevearzh/greedy-spider | ca8b1d892e4ac5066ab33aafe7755ee959ef630a | [
"MIT"
] | null | null | null | import datetime
import re
import time
import urllib
from urllib import robotparser
from urllib.request import urlparse
from downloader import Downloader
DEFAULT_DELAY = 5
DEFAULT_DEPTH = -1
DEFAULT_URL = -1
DEFAULT_AGENT = 'wswp'
DEFAULT_RETRY = 1
DEFAULT_TIMEOUT = 60
DEFAULT_IGNORE_ROBOTS = False
def link_crawler(seed_url, link_regex=None, delay=DEFAULT_DELAY, max_depth=DEFAULT_DEPTH,
max_urls=DEFAULT_URL, user_agent=DEFAULT_AGENT, proxies=None, num_retries=DEFAULT_RETRY,
timeout=DEFAULT_TIMEOUT, ignore_robots=DEFAULT_IGNORE_ROBOTS, scrape_callback=None, cache=None):
'''
Crawl from the given seed URL following links matched by link_regex
'''
# the queue of URL's that still need to be crawled
crawl_queue = [seed_url]
# the URL's that have been seen and at what depth
seen = {seed_url: 0}
# track how many URL's have been downloaded
num_urls = 0
rp = get_robots(seed_url)
D = Downloader(delay=delay, user_agent=user_agent, proxies=proxies,
num_retries=num_retries, timeout=timeout, cache=cache)
while crawl_queue:
url = crawl_queue.pop()
depth = seen[url]
# check url passes robots.txt restrictions
if ignore_robots or rp.can_fetch(user_agent, url):
html = D(url)
links = []
if scrape_callback:
links.extend(scrape_callback(url, html) or [])
if depth != max_depth:
# can still crawl further
if link_regex:
# filter for links matching our regular expression
links.extend(link for link in get_links(html) if \
re.match(link_regex, link))
for link in links:
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
seen[link] = depth + 1
# check link is within same domain
if same_domain(seed_url, link):
# success add this new link to queue
crawl_queue.append(link)
# check whether have reached downloaded maximum
num_urls += 1
if num_urls == max_urls:
break
else:
print('Blocked by robots.txt', url)
def normalize(seed_url, link):
'''
Normalize this URL by removing hash and adding domain
'''
link, _ = urllib.parse.urldefrag(link) # remove hash to avoid duplicates
return urllib.parse.urljoin(seed_url, link)
def same_domain(url1, url2):
'''
Return True if both URL's belong to same domain
'''
return urllib.parse.urlparse(url1).netloc == urllib.parse.urlparse(url2).netloc
def get_robots(url):
'''
Initialize robots parser for this domain
'''
rp = robotparser.RobotFileParser()
rp.set_url(urllib.parse.urljoin(url, '/robots.txt'))
rp.read()
return rp
def get_links(html):
'''
Return a list of links from html
'''
# a regular expression to extract all links from the webpage
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
# list of all links from the webpage
return webpage_regex.findall(html)
if __name__ == '__main__':
# execute only if run as a script
pass
| 28.645455 | 100 | 0.668042 |
6ea734988dbfada1408954f978d47bd46b1b2de0 | 1,994 | py | Python | Array.diff.py | ErosMLima/last-classes-js-py-node-php | 14775adaa3372c03c1e73d0699516f759e162dc5 | [
"MIT"
] | 2 | 2020-08-01T03:31:28.000Z | 2021-02-02T15:17:31.000Z | Array.diff.py | ErosMLima/last-classes-js-py-node-php | 14775adaa3372c03c1e73d0699516f759e162dc5 | [
"MIT"
] | null | null | null | Array.diff.py | ErosMLima/last-classes-js-py-node-php | 14775adaa3372c03c1e73d0699516f759e162dc5 | [
"MIT"
] | null | null | null | #Array.diff.py OKS
function array_diff(a, b) {
return a.filter(function(x) { return b,index(x) == -1; });
}
#solution 2 for array,diff
function array_diff(a, b) {
return a.filter(e => !b.includes(e));
}
function array_diff(a, b) {
return a.filter(e => !b.includes(e));
}
#Bouncing Balls ok
function boucingBall(h, boumce, window) {
var rebounds = -1;
if (bounce > 0 && bounce < 1) while (h > window) rebounds+=2, h *= bounce;
return rebounds;
}
#Backspaces in string ok
function cleanString(str) {
let result = [];
for(let i=0; i<str.length; i++) {
const char = str[i];
if(char === `#`) {
result.pop();
} else {
result.push(char);
}
}
return result.join('');
}
function clean_string(string) {
while (string.indexOf(`#`) >= 0)
string = string.replace(\(^|[^#])#/g, '');
return string;
}
#Expression Matter OKs
function expressionMatter(a, b, c) {
const x1 = a * (b + c);
const x2 = a * b * c;
const x3 = a + b * c;
const x4 = a + b + c;
const x5 = (a + b) * c;
return Math.max(x1, x2, x3, x4, x5);
}
function expressionMatter(a, b, c) {
return Math.max(
a+b+c,
a*b*c,
a*(b+c),
(a+b)*c,
a+b*c,
a*b+c,
);
}
#Extract the domain name from a URL
function moreZeros(s){
return s.split('')
.fliter(removeDoubles)
.map(convertToAscii)
.map(converToBinary)
.filter(ateMoreZeros)
.map(convertToDecimal)
.map(convertToChar);
}
function removeDoubles(item, idx, arr) {
return arr.indexOf(item) === idx;
}
function convertToAscii(c) {
return c.charCodeAt(0);
}
function convertToBinary(num) {
return num.toString(2);
}
function areMoreZeros(str) {
const zeros = str.replace(/1/g, '').length;
const ones = str.replace(/0/g, '').length;
return zeros > ones;
}
function convertToDecimal(bi) {
return parseInt(bi, 2);
}
function convertToChar(num) {
return String.fromCharCode(num);
} | 18.127273 | 76 | 0.587763 |
6ea869001bb831aa67afe2e798624becb8124601 | 4,535 | py | Python | scripts/bcbb_helpers/process_run_info.py | parlundin/scilifelab | e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a | [
"MIT"
] | 1 | 2016-03-21T14:04:09.000Z | 2016-03-21T14:04:09.000Z | scripts/bcbb_helpers/process_run_info.py | parlundin/scilifelab | e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a | [
"MIT"
] | 35 | 2015-01-22T08:25:02.000Z | 2020-02-17T12:09:12.000Z | scripts/bcbb_helpers/process_run_info.py | parlundin/scilifelab | e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a | [
"MIT"
] | 6 | 2015-01-16T15:32:08.000Z | 2020-01-30T14:34:40.000Z | #!/usr/bin/env python
import os
import sys
import yaml
from optparse import OptionParser
def main(run_info_yaml, lane, out_file, genome_build, barcode_type, trim, ascii, analysis, description, clear_description, verbose):
if verbose: print "Verifying that %s exists" % run_info_yaml
assert os.path.exists(run_info_yaml)
if verbose: print "Parsing %s" % run_info_yaml
with open(run_info_yaml) as fh:
run_info = yaml.load(fh)
if verbose: print "Extracting lane info"
if lane == 0:
lane_info = run_info
else:
for info in run_info:
if (int(info.get("lane",0)) == lane):
lane_info = [info]
break
for info in lane_info:
if verbose: print "Processing lane %s" % info["lane"]
_process_info(info,genome_build,barcode_type,trim,ascii,analysis,description,clear_description,verbose)
if out_file is not None:
with open(out_file,'w') as fh:
yaml.dump(run_info, fh, allow_unicode=True, default_flow_style=False)
else:
print yaml.dump(run_info, allow_unicode=True, default_flow_style=False)
def _process_info(info,genome_build,barcode_type,trim,ascii,analysis,description,clear_description,verbose):
if genome_build is not None:
if verbose: print "\tSetting genome build: %s" % genome_build
info['genome_build'] = genome_build
if analysis is not None:
if verbose: print "\tSetting analysis: %s" % analysis
info['analysis'] = analysis
if description is not None:
if verbose: print "\tSetting description: %s" % description
info['description'] = description
if ascii and 'description' in info:
if verbose: print "\tEnsuring ascii"
info['description'] = _replace_ascii(info['description'])
for multiplex in info.get('multiplex',[]):
if verbose: print "\tProcessing multiplexed barcode id %s" % multiplex['barcode_id']
if barcode_type is not None:
if verbose: print "\t\tSetting barcode_type: %s" % barcode_type
multiplex['barcode_type'] = barcode_type
if trim > 0:
if verbose: print "\t\tTrimming %s nucleotides from end of barcode" % trim
multiplex['sequence'] = multiplex['sequence'][0:(-1*trim)]
if clear_description and 'description' in multiplex:
del multiplex['description']
if ascii:
if verbose: print "\t\tEnsuring ascii"
if 'sample_prj' in multiplex:
multiplex['sample_prj'] = _replace_ascii(multiplex['sample_prj'])
if 'description' in multiplex:
multiplex['description'] = _replace_ascii(multiplex['description'])
def _replace_ascii(str):
# Substitute swedish characters for sensible counterparts
str = str.replace(u'\xc5','A')
str = str.replace(u'\xe5','a')
str = str.replace(u'\xc4','A')
str = str.replace(u'\xe4','a')
str = str.replace(u'\xd6','O')
str = str.replace(u'\xf6','o')
return str.encode('ascii','replace')
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-l", "--lane", dest="lane", default=0)
parser.add_option("-o", "--out_file", dest="out_file", default=None)
parser.add_option("-g", "--genome_build", dest="genome_build", default=None)
parser.add_option("-b", "--barcode_type", dest="barcode_type", default=None)
parser.add_option("-t", "--trim", dest="trim", default=0)
parser.add_option("-a", "--analysis", dest="analysis", default=None)
parser.add_option("-d", "--description", dest="description", default=None)
parser.add_option("-c", "--clear_description", dest="clear_description", default=False, \
action="store_true")
parser.add_option("-i", "--ascii", dest="ascii", default=False, \
action="store_true")
parser.add_option("-v", "--verbose", dest="verbose", default=False, \
action="store_true")
options, args = parser.parse_args()
if len(args) == 1:
run_info_yaml, = args
else:
print __doc__
sys.exit()
main(run_info_yaml, int(options.lane), options.out_file,
options.genome_build, options.barcode_type, int(options.trim), \
options.ascii, options.analysis, options.description, options.clear_description, options.verbose)
| 44.460784 | 132 | 0.622712 |
6ea886c1faad67e0969ccc2de41ff81ea08b3480 | 196 | py | Python | app/forms.py | haibincoder/DjangoTensorflow | 7fc606fa5121f0c48d7c8e649775094d86e6387a | [
"MIT"
] | 17 | 2018-07-21T04:14:09.000Z | 2022-03-09T08:32:49.000Z | app/forms.py | haibincoder/DjangoTensorflow | 7fc606fa5121f0c48d7c8e649775094d86e6387a | [
"MIT"
] | 24 | 2020-01-28T22:11:42.000Z | 2022-03-11T23:47:43.000Z | app/forms.py | haibincoder/DjangoTensorflow | 7fc606fa5121f0c48d7c8e649775094d86e6387a | [
"MIT"
] | 7 | 2018-12-13T08:55:07.000Z | 2021-06-26T08:08:01.000Z | from django import forms
#from app.models import Image
# class ImageForm(forms.ModelForm):
# class Meta:
# model = Image
# name = ['name']
# location = ['location']
| 17.818182 | 35 | 0.591837 |
6ea8ed0768136e88f53d0dbb5391ad2ceecced0d | 3,914 | py | Python | python/create_account_with_captcha.py | shivanshbindal9/MediaWiki-Action-API-Code-Samples | 7d673e73d7cabbf4342a18d275b271e7d4456808 | [
"MIT"
] | null | null | null | python/create_account_with_captcha.py | shivanshbindal9/MediaWiki-Action-API-Code-Samples | 7d673e73d7cabbf4342a18d275b271e7d4456808 | [
"MIT"
] | null | null | null | python/create_account_with_captcha.py | shivanshbindal9/MediaWiki-Action-API-Code-Samples | 7d673e73d7cabbf4342a18d275b271e7d4456808 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
create_account_with_captcha.py
MediaWiki Action API Code Samples
Demo of `createaccount` module: Create an account on a wiki with a special
authentication extension installed. This example considers a case of a wiki
where captcha is enabled through extensions like ConfirmEdit
(https://www.mediawiki.org/wiki/Extension:ConfirmEdit)
MIT license
"""
import requests
from flask import Flask, render_template, flash, request
S = requests.Session()
WIKI_URL = "https://test.wikipedia.org"
API_ENDPOINT = WIKI_URL + "/w/api.php"
# App config.
DEBUG = True
APP = Flask(__name__)
APP.config.from_object(__name__)
APP.config['SECRET_KEY'] = 'enter_your_secret_key'
@APP.route("/", methods=['GET', 'POST'])
def show_form():
""" Render form template and handle form submission request """
fields = get_form_fields()
captcha = fields['CaptchaAuthenticationRequest']
captcha_url = WIKI_URL + captcha['captchaInfo']['value']
captcha_id = captcha['captchaId']['value']
display_fields = []
user_fields = []
captcha_fields = []
for field in fields:
for name in fields[field]:
details = {
'name': name,
'type': fields[field][name]['type'],
'label': fields[field][name]['label']
}
if field != "CaptchaAuthenticationRequest":
user_fields.append(details)
else:
if name == 'captchaWord':
captcha_fields.append(details)
display_fields = user_fields + captcha_fields
if request.method == 'POST':
create_account(request.form, captcha_id)
return render_template('create_account_form.html', \
captcha=captcha_url, fields=display_fields)
def get_form_fields():
""" Fetch the form fields from `authmanagerinfo` module """
result = {}
response = S.get(url=API_ENDPOINT, params={
'action': 'query',
'meta': 'authmanagerinfo',
'amirequestsfor': 'create',
'format': 'json'
})
data = response.json()
query = data and data['query']
authmanagerinfo = query and query['authmanagerinfo']
fields = authmanagerinfo and authmanagerinfo['requests']
for field in fields:
if field['id'] in ('MediaWiki\\Auth\\UserDataAuthenticationRequest', \
'CaptchaAuthenticationRequest', 'MediaWiki\\Auth\\PasswordAuthenticationRequest'):
result[field['id']] = field['fields']
return result
def create_account(form, captcha_id):
""" Send a post request along with create account token, user information
and return URL to the API to create an account on a wiki """
createtoken = fetch_create_token()
response = S.post(url=API_ENDPOINT, data={
'action': 'createaccount',
'createtoken': createtoken,
'username': form['username'],
'password': form['password'],
'retype': form['retype'],
'email': form['email'],
'createreturnurl': 'http://127.0.0.1:5000/',
'captchaId': captcha_id,
'captchaWord': form['captchaWord'],
'format': 'json'
})
data = response.json()
createaccount = data['createaccount']
if createaccount['status'] == "PASS":
flash('Success! An account with username ' + \
form['username'] + ' has been created!')
else:
flash('Oops! Something went wrong -- ' + \
createaccount['messagecode'] + "." + createaccount['message'])
def fetch_create_token():
""" Fetch create account token via `tokens` module """
response = S.get(url=API_ENDPOINT, params={
'action': 'query',
'meta': 'tokens',
'type': 'createaccount',
'format': 'json'
})
data = response.json()
return data['query']['tokens']['createaccounttoken']
if __name__ == "__main__":
APP.run()
| 29.428571 | 94 | 0.625447 |
6ea998dc1fdefa4215c75aefbef6ea7363fe98e0 | 2,396 | py | Python | main.py | smartsnake/PasswordGenerator | 985f05b81271d7a18c0f99fc77870754c48102d5 | [
"MIT"
] | null | null | null | main.py | smartsnake/PasswordGenerator | 985f05b81271d7a18c0f99fc77870754c48102d5 | [
"MIT"
] | null | null | null | main.py | smartsnake/PasswordGenerator | 985f05b81271d7a18c0f99fc77870754c48102d5 | [
"MIT"
] | null | null | null | import sys,os
import argparse
from util.MongoUtil import MongoUtil
from util.Generator import Generator
#Custom help messages
def help_msg(name=None):
return '''main.py [-h] [--length LENGTH] [--search SEARCHFIELD SEARCHTEXT]
'''
def search_usage():
return'''python main.py --search website example.com
python main.py --search username admin
'''
if __name__ == '__main__':
#Argument requirments
parser = argparse.ArgumentParser(description='Creates new passwords and adds them to mondodb.', usage=help_msg())
parser.add_argument('--length', '-l', action='store', default='15', dest='length',
help='Password length to be generated. (default=15)')
parser.add_argument('--search','-s', nargs=2, action='store', dest='search',
help='Used to search for existing password records.')
parser.add_argument('--import', '-i', action='store', dest='location',
help='Import LastPass csv file.')
args = parser.parse_args()
mongoUtil = MongoUtil()
gen = Generator()
try:
search = args.search
importLocation = args.location
pass_len = int(args.length)
#Import LastPass CSV data
if importLocation is not None:
assert os.path.exists(importLocation)
mongoUtil.importLastPass(importLocation)
print('Imported CSV Successfully')
# Checks if search argument was previded
elif search is None or len(search) != 2:
website = input("Enter website: ")
username = input("Enter username/email: ")
password = gen.generate_password(pass_len)
record = {"website":website, "username":username, "password":password}
#Save into database
if mongoUtil.addRecord(record):
print("Record added.")
else:
print("Recorded failed.")
# Dont create password, search database instead.
else:
if search[0] not in mongoUtil.searchableFields:
print(f'Searchable fields are [username or website]')
raise SystemExit(1)
else:
record = mongoUtil.searchRecord(search[0], search[1])
print(record)
except:
print("Pass positive integer as arg")
raise SystemExit(1) | 34.724638 | 117 | 0.603506 |
6eaaaf9c78bb564348f5f92937368a9dbc35cca5 | 66 | py | Python | src/clusto/drivers/devices/networkswitches/__init__.py | rongoro/clusto | d6425433e5132e8778feeb9db4b8dd80b933b030 | [
"BSD-3-Clause"
] | 5 | 2015-07-19T08:28:01.000Z | 2021-07-08T14:49:27.000Z | src/clusto/drivers/devices/networkswitches/__init__.py | wt/clusto | c114ce7c42dcfa33c1e79f4d3b49313115fea06b | [
"BSD-3-Clause"
] | null | null | null | src/clusto/drivers/devices/networkswitches/__init__.py | wt/clusto | c114ce7c42dcfa33c1e79f4d3b49313115fea06b | [
"BSD-3-Clause"
] | 5 | 2015-01-06T07:57:07.000Z | 2021-11-10T18:01:33.000Z | from basicnetworkswitch import *
from cisconetworkswitch import *
| 22 | 32 | 0.848485 |
6eabff19e23935994d6606bc2eb537d62eca55d2 | 498 | py | Python | referralnote/urls.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | null | null | null | referralnote/urls.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | 5 | 2021-03-18T20:13:38.000Z | 2022-01-13T00:35:37.000Z | referralnote/urls.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | null | null | null | from django.conf.urls import url
from referralnote import views
app_name = 'referral_note'
#view_obj = views.ReferralNotes()
urlpatterns = [
url(r'^(?P<p_id>[0-9]+)/delete_referralnote/(?P<notenum>[0-9]+)$', views.delete_refnote,
name='delete_referralnote'),
url(r'^(?P<p_id>[0-9]+)/edit_referralnote/(?P<notenum>[0-9]+)$', views.edit_referralnote, name='edit_referralnote'),
url(r'^(?P<p_id>[0-9]+)/new_referralnote/$', views.new_referralnote, name='new_referralnote'),
]
| 31.125 | 120 | 0.688755 |
6eac2fd4f481feb51a0c938cf7a2f928ddd2ec46 | 742 | py | Python | beetlesafari/io/_imread_raw.py | haesleinhuepf/beetle-safari | a6c90d10b6b4d67c153f87c83c02bd23f2b13843 | [
"BSD-3-Clause"
] | null | null | null | beetlesafari/io/_imread_raw.py | haesleinhuepf/beetle-safari | a6c90d10b6b4d67c153f87c83c02bd23f2b13843 | [
"BSD-3-Clause"
] | null | null | null | beetlesafari/io/_imread_raw.py | haesleinhuepf/beetle-safari | a6c90d10b6b4d67c153f87c83c02bd23f2b13843 | [
"BSD-3-Clause"
] | null | null | null | # thanks to max9111, https://stackoverflow.com/questions/41651998/python-read-and-convert-raw-3d-image-file
import numpy as np
from functools import lru_cache
@lru_cache(maxsize=2)
def imread_raw(filename : str, width : int = 1, height : int = 1, depth : int = 1, dtype = np.uint16):
"""Loads a raw image file (3D) with given dimensions from disk
Parameters
----------
filename
width
height
depth
dtype
Returns
-------
numpy array with given dimensions containing pixels from specified file
"""
f = open(filename, 'rb') # only opens the file for reading
img_arr = np.fromfile(f, dtype=dtype)
img_arr = img_arr.reshape(depth, height, width)
f.close()
return img_arr | 27.481481 | 107 | 0.663073 |
6eaca75829400aaa4a8f204a5c147dfdc9d105dc | 655 | py | Python | brain_games/games/gcd.py | belousovromnik/python-project-lvl1 | 11b4160b083b38c64b42ddd529d1379538eb9230 | [
"MIT"
] | null | null | null | brain_games/games/gcd.py | belousovromnik/python-project-lvl1 | 11b4160b083b38c64b42ddd529d1379538eb9230 | [
"MIT"
] | null | null | null | brain_games/games/gcd.py | belousovromnik/python-project-lvl1 | 11b4160b083b38c64b42ddd529d1379538eb9230 | [
"MIT"
] | null | null | null | import random
from brain_games.constants import MINIMAL_RANDOM, MAXIMAL_RANDOM
def greeting():
return 'Find the greatest common divisor of given numbers.'
def main_action():
first_el = random.randint(MINIMAL_RANDOM, MAXIMAL_RANDOM)
second_el = random.randint(MINIMAL_RANDOM, MAXIMAL_RANDOM)
correct_ans = gcd(first_el, second_el)
str_to_question = '{} {}'.format(first_el, second_el)
correct_ans = str(correct_ans)
return str_to_question, correct_ans
def gcd(a, b):
# остаток от деления
remainder_of_division = a % b
if remainder_of_division == 0:
return b
return gcd(b, remainder_of_division)
| 24.259259 | 64 | 0.728244 |
6eaef18c836b626ea67de2012928ab0468afa91b | 10,336 | py | Python | highest_iso.py | chem-william/helix_fit | b9921b0068f1a3084985ca820094a0db15b6aac2 | [
"MIT"
] | null | null | null | highest_iso.py | chem-william/helix_fit | b9921b0068f1a3084985ca820094a0db15b6aac2 | [
"MIT"
] | null | null | null | highest_iso.py | chem-william/helix_fit | b9921b0068f1a3084985ca820094a0db15b6aac2 | [
"MIT"
] | null | null | null | import os
from ase.visualize import view
from mpl_toolkits.mplot3d import Axes3D # noqa
from scipy.optimize import curve_fit
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set(
style="ticks",
rc={
"font.family": "Arial",
"font.size": 40,
"axes.linewidth": 2,
"lines.linewidth": 5,
},
font_scale=3.5,
palette=sns.color_palette("Set2")
)
c = ["#007fff", "#ff3616", "#138d75", "#7d3c98", "#fbea6a"] # Blue, Red, Green, Purple, Yellow
import utilities
from Helix import Helix
import matplotlib
matplotlib.use("Qt5Agg")
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return rho, phi
def center_atoms(atoms, center):
x = center[0]
y = center[1]
z = center[2]
# Centering atoms around given atom
for idx, atom in enumerate(atoms):
atoms[idx].position[0] = atom.position[0] - x
atoms[idx].position[1] = atom.position[1] - y
atoms[idx].position[2] = atom.position[2] - z
return atoms
def print_jmol_str(line_values, center):
file = "analyzed/diffp_2me_homo-1"
print("*"*25)
print(f"Writing to {file}")
print("*"*25)
curve_str = f"draw curve1 CURVE curve width 0.3"
for value in line_values:
x = value[0] + center[0]
y = value[1] + center[1]
z = value[2] + center[2]
curve_str += f" {{ {x} {y} {z} }}"
with open(f"{file}/jmol_export.spt", "a") as f:
f.write(curve_str)
print(curve_str)
def remove_outlier(ordered):
# Not elegant, possibly slow, but it works
temp = []
for idx, value in enumerate(ordered[:, 2]):
if idx < len(ordered[:, 2]) - 1:
temp.append(abs(value - ordered[idx + 1, 2]))
std = np.std(temp)
mean = np.mean(temp)
# It lies much further down the z-axis
# than the rest of the points
if not (mean - std) < temp[0] < (mean + std):
return ordered[1:]
# If no outliers is found, return the original array
else:
return ordered
center_bottom_top = np.array([2, 9, 7])
handedness = None
truncation = [None, None]
file = "./8cum_me_homo_homo/homo.cube"
ax = plt.axes(projection='3d')
radius = 1.4
limits = 3
# Check that the analysis hasn't already been done
names = file.split("/")
folder = "/".join(names[-3:-1])
print(f"foldername: {folder}")
if os.path.exists(folder):
print(f"Found existing data files in {folder}")
planes = np.load(folder + "/planes.npy", allow_pickle=True)
atoms, _, _, center = np.load(
folder + "/atom_info.npy", allow_pickle=True
)
xyz_vec = np.load(folder + "/xyz_vec.npy", allow_pickle=True)
else:
atoms, all_info, xyz_vec = utilities.read_cube(file)
# Sort the data after z-value
all_info = all_info[all_info[:, 2].argsort()]
# Center of the molecule is chosen to be Ru
# center = atoms[3].position
center = atoms[center_bottom_top[0]].position
all_info[:, :3] = all_info[:, :3] - center
atoms = center_atoms(atoms, center)
planes = []
plane = []
prev_coord = all_info[0]
for coordinate in tqdm(all_info, desc="Finding planes.."):
if np.equal(coordinate[2], prev_coord[2]):
# we're in the same plane so add the coordinate
plane.append([coordinate[0],
coordinate[1],
coordinate[2],
coordinate[3]])
else:
plane = np.array(plane)
# Drop coordinates with isovalues == 0.0
plane = plane[np.where(plane[:, 3] != 0.0)]
if plane.size != 0:
planes.append(plane)
plane = []
prev_coord = coordinate
planes = np.array(planes)
mean_z = []
ordered = []
all_r = []
bottom_carbon = atoms[center_bottom_top[1]].position
top_carbon = atoms[center_bottom_top[2]].position
print('Cleaning values..')
for idx, plane in enumerate(planes):
if top_carbon[2] > plane[0, 2] > bottom_carbon[2]:
if idx < len(planes) - 1:
# Uncomment to find points with the most positive isovalue
# Rare cases there might be the same maximum at two locations
# That's I just take the first one with [0][0]
maximum = np.amax(plane[:, 3])
max_index = np.where(plane[:, 3] == maximum)[0][0]
next_plane = planes[idx + 1]
next_maximum = np.amax(next_plane[:, 3])
next_index = np.where(next_plane[:, 3] == next_maximum)[0][0]
# Uncomment to find points with the most negative isovalue
# minimum = np.amin(plane[:, 3])
# min_index = np.where(plane[:, 3] == minimum)
# next_plane = planes[idx + 1]
# next_minimum = np.amin(next_plane[:, 3])
# next_index = np.where(next_plane[:, 3] == next_minimum)
current_iso_idx = max_index
next_iso_idx = next_index
# Check if point is within certain radius of the helical axis
if cart2pol(plane[current_iso_idx, 0], plane[current_iso_idx, 1])[0] < radius:
current_x = plane[current_iso_idx, 0].item()
current_y = plane[current_iso_idx, 1].item()
current_z = plane[current_iso_idx, 2].item()
current_iso = plane[current_iso_idx, 3].item()
next_x = next_plane[next_index, 0].item()
next_y = next_plane[next_index, 1].item()
next_z = next_plane[next_index, 2].item()
next_iso = next_plane[next_iso_idx, 3].item()
# Current point is beneath the next point
if (current_x == next_x) & (current_y == next_y):
delta_z = abs(next_z - current_z)
# Are they direcly on top of each other?
if round(delta_z, 4) <= 2*round(xyz_vec[2], 4):
mean_z.append(current_z)
# They are not directly on top of each other
else:
ax.scatter(
plane[current_iso_idx, 0],
plane[current_iso_idx, 1],
plane[current_iso_idx, 2],
# c='purple',
c=c[0],
)
# To be used as an estimate of
# the radius when fitting the helix
all_r.append(
cart2pol(plane[current_iso_idx, 0], plane[current_iso_idx, 1])[0]
)
mean_z.append(current_z)
ordered.append(
[current_x, current_y, np.mean(mean_z), current_iso]
)
mean_z = []
# TODO: Maybe I'm skipping the last point? Does it even matter?
# else:
# prev_x = current_x
# prev_y = current_y
# prev_z = current_z
# prev_iso = current_iso
# current_x = plane[max_index, 0].item()
# current_y = plane[max_index, 1].item()
# current_z = plane[max_index, 2].item()
# current_iso = plane[max_index, 3].item()
# if cart2pol(current_x, current_y)[0] < radius:
# all_r.append(cart2pol(plane[max_index, 0], plane[max_index, 1])[0])
# if (current_x == prev_x) & (current_y == prev_y):
# delta_z = abs(prev_z - current_z)
# # Are they directly on top of each other?
# if round(delta_z, 4) <= 2*round(z_vec, 4):
# mean_z.append(current_z)
# ordered.append([current_x,
# current_y,
# np.mean(mean_z),
# current_iso])
# # They are not directly on top of each other
# else:
# mean_z.append(current_z)
# ordered.append([current_x,
# current_y,
# np.mean(mean_z),
# current_iso])
# mean_z = []
ordered = np.array(ordered)
mean_radius = np.mean(all_r)
# Check if the first point is an outlier
ordered = remove_outlier(ordered)
# ordered, mean_radius = np.load("orbital_16_helix.npy", allow_pickle=True)
# ax.plot([0, ordered[0, 0]], [0, ordered[0, 1]], [0, 0])
# Line that connects each data point
# ax.plot(
# ordered[truncation[0]:truncation[1], 0],
# ordered[truncation[0]:truncation[1], 1],
# ordered[truncation[0]:truncation[1], 2],
# color='blue'
# )
print('Fitting datapoints to helix..')
helix = Helix(
ordered[0:, :3],
fitting_method='ampgo',
radius=mean_radius,
handedness=handedness,
truncation=truncation,
)
out = helix.fit_helix()
fitted_values = helix.fitted_values
# print_jmol_str(fitted_values, center)
print('RMSD: {}'.format(helix.RMSD))
print(out)
print('handedness: {}'.format(helix.handedness))
delta_z = helix.get_statistics()
print('std: {}'.format(np.std(delta_z)))
print('mean: {}'.format(np.mean(delta_z)))
print(f'p-value: {helix.p_value}')
ax.plot(
fitted_values[:, 0],
fitted_values[:, 1],
fitted_values[:, 2],
)
ax.plot((0, helix.a[0]), (0, helix.a[1]), (0, helix.a[2]))
ax.plot((0, helix.v[0]), (0, helix.v[1]), (0, helix.v[2]))
ax.plot((0, helix.w[0]), (0, helix.w[1]), (0, helix.w[2]), color='black')
print('Plotting atoms..')
for atom in atoms:
if atom.symbol == 'C':
ax.scatter(
atom.position[0],
atom.position[1],
atom.position[2],
c='black'
)
if atom.symbol == 'Ru':
ax.scatter(
atom.position[0],
atom.position[1],
atom.position[2],
c='turquoise'
)
# if atom.symbol == 'P':
# ax.scatter3D(atom.position[0],
# atom.position[1],
# atom.position[2],
# c='orange')
ax.set_xlim([-limits, limits])
ax.set_ylim([-limits, limits])
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
| 31.039039 | 95 | 0.544601 |
6eb0570098060d342879612db34c154166a28bf0 | 477 | py | Python | adlibre_dms/views.py | adlibre/Adlibre-DMS | 96ce41b5699e2ea58e3ca560d46d481e954f17a4 | [
"BSD-3-Clause"
] | 48 | 2015-02-25T03:20:35.000Z | 2022-03-11T09:02:42.000Z | adlibre_dms/views.py | adlibre/Adlibre-DMS | 96ce41b5699e2ea58e3ca560d46d481e954f17a4 | [
"BSD-3-Clause"
] | 6 | 2015-01-09T08:30:53.000Z | 2016-12-05T15:03:01.000Z | adlibre_dms/views.py | adlibre/Adlibre-DMS | 96ce41b5699e2ea58e3ca560d46d481e954f17a4 | [
"BSD-3-Clause"
] | 24 | 2015-01-15T11:41:40.000Z | 2022-03-17T20:31:06.000Z | from django.conf import settings
from django import http
from django.template import RequestContext, loader
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: `500.html`
Context:
MEDIA_URL
Path of static media (e.g. "media.example.org")
"""
t = loader.get_template(template_name)
return http.HttpResponseServerError(t.render(RequestContext({
'MEDIA_URL': settings.MEDIA_URL
})))
| 26.5 | 65 | 0.685535 |
6eb100cce3b9fcde7fec2f63e9fe107b7c59b8c7 | 372 | py | Python | operations/fleet_management/migrations/0005_merge_20171213_1548.py | kaizer88/emps | 2669b32c46befcf1a19390fb25013817e6b00980 | [
"MIT"
] | null | null | null | operations/fleet_management/migrations/0005_merge_20171213_1548.py | kaizer88/emps | 2669b32c46befcf1a19390fb25013817e6b00980 | [
"MIT"
] | null | null | null | operations/fleet_management/migrations/0005_merge_20171213_1548.py | kaizer88/emps | 2669b32c46befcf1a19390fb25013817e6b00980 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-13 13:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fleet_management', '0003_vehicledocument_document_type'),
('fleet_management', '0004_incidentdocument_upload'),
]
operations = [
]
| 21.882353 | 67 | 0.693548 |
6eb3861496ef592c07af6ae7b192e47f4452b309 | 1,481 | py | Python | secure_ml/attack/model_inversion.py | Koukyosyumei/secure_ml | 9da24f4ce4782ec2f6dd63b0437f657a0e190e40 | [
"MIT"
] | 10 | 2021-02-23T01:32:48.000Z | 2021-11-16T06:02:26.000Z | secure_ml/attack/model_inversion.py | Koukyosyumei/secure_ml | 9da24f4ce4782ec2f6dd63b0437f657a0e190e40 | [
"MIT"
] | 2 | 2021-05-16T08:38:19.000Z | 2021-06-20T09:01:45.000Z | secure_ml/attack/model_inversion.py | Koukyosyumei/secure_ml | 9da24f4ce4782ec2f6dd63b0437f657a0e190e40 | [
"MIT"
] | 4 | 2021-02-25T04:33:06.000Z | 2021-08-17T05:43:47.000Z | import torch
from ..attack.base_attack import BaseAttacker
class Model_inversion(BaseAttacker):
def __init__(self, target_model, input_shape):
"""implementation of model inversion attack
reference https://dl.acm.org/doi/pdf/10.1145/2810103.2813677
Args:
target_model: model of the victim
input_shape: input shapes of taregt model
Attributes:
target_model: model of the victim
input_shape: input shapes of taregt model
"""
super().__init__(target_model)
self.input_shape = input_shape
def attack(self, target_label,
lam, num_itr, process_func=lambda x: x):
"""Execute the model inversion attack on the target model.
Args:
target_label (int): taregt label
lam (float) : step size
num_itr (int) : number of iteration
process_func (function) : default is identity function
Returns:
x_numpy (np.array) :
loss ([float]) :
"""
log = []
x = torch.zeros(self.input_shape, requires_grad=True)
for i in range(num_itr):
c = process_func(1 - self.target_model(x)[:, [target_label]])
c.backward()
grad = x.grad
with torch.no_grad():
x -= lam * grad
log.append(c.item())
x_numpy = x.to('cpu').detach().numpy().copy()
return x_numpy, log
| 30.854167 | 73 | 0.574612 |
6eb49308c616d061678bacc901cf34896446490d | 448 | py | Python | tests/integration_tests/data/service_utils_integration_test/main_service.py | ZacharyATanenbaum/service_framework | b5dde4407998350d1b7ad09284110b986fd4e12a | [
"MIT"
] | 1 | 2020-03-20T21:33:56.000Z | 2020-03-20T21:33:56.000Z | tests/integration_tests/data/service_utils_integration_test/main_service.py | ZacharyATanenbaum/service_framework | b5dde4407998350d1b7ad09284110b986fd4e12a | [
"MIT"
] | 1 | 2020-03-22T03:48:45.000Z | 2020-03-22T03:48:45.000Z | tests/integration_tests/data/service_utils_integration_test/main_service.py | ZacharyATanenbaum/service_framework | b5dde4407998350d1b7ad09284110b986fd4e12a | [
"MIT"
] | null | null | null | """ Basic service for testing the service_utils run_main """
def main(to_send, config):
print('Hello World Main...')
connection_models = {
'out': {
'out_connection_1': {
'connection_type': 'requester',
'required_arguments': {
'this_is_a_test_arg': str,
},
'required_return_arguments': {
'this_is_a_return_arg': str,
},
}
}
}
| 22.4 | 60 | 0.517857 |
6eba09dcdf024f11f4d2e6c7da43fa66a4daf005 | 788 | py | Python | Curso de Python/Mundo 2/aula12/ex045.py | josevini/python | 45cde6d0ae8310b1d8ebb30ae1dd17c0ad0dd02a | [
"MIT"
] | null | null | null | Curso de Python/Mundo 2/aula12/ex045.py | josevini/python | 45cde6d0ae8310b1d8ebb30ae1dd17c0ad0dd02a | [
"MIT"
] | null | null | null | Curso de Python/Mundo 2/aula12/ex045.py | josevini/python | 45cde6d0ae8310b1d8ebb30ae1dd17c0ad0dd02a | [
"MIT"
] | null | null | null | from time import sleep
from random import randint
itens = ('Pedra', 'Papel', 'Tesoura')
print('Suas opções: ')
print("""[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA""")
computador = randint(0,2)
jogador = int(input('Qual é a sua jogada? '))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!!!')
print('-=' * 11)
print('Computador jogou {}'.format(itens[computador]))
print('Jogador jogou {}'.format(itens[jogador]))
print('-=' * 11)
if computador == 0 and jogador == 1 or computador == 2 and jogador == 0 or computador == 1 and jogador == 2:
print('JOGADOR VENCE')
elif jogador == 0 and computador == 1 or jogador == 2 and computador == 0 or jogador == 1 and computador == 2:
print('COPUTADOR VENCE')
elif jogador == computador:
print('EMPATE')
else:
print('Opção inválida')
| 29.185185 | 110 | 0.651015 |
6eba2b9c96279d6bf89fd584cbe501fd83d1e0c7 | 2,708 | py | Python | wtdb_test.py | hughgrigg/wtdb | 6614053cbbc0086d5470ee7e9f2d6a68fdb9ed94 | [
"MIT"
] | null | null | null | wtdb_test.py | hughgrigg/wtdb | 6614053cbbc0086d5470ee7e9f2d6a68fdb9ed94 | [
"MIT"
] | null | null | null | wtdb_test.py | hughgrigg/wtdb | 6614053cbbc0086d5470ee7e9f2d6a68fdb9ed94 | [
"MIT"
] | null | null | null | import wtdb
import unittest
class TestWtdbFunctions(unittest.TestCase):
def test_n_swaps_zero(self):
self.assertEqual(
frozenset(),
wtdb.n_swaps('foo', 'bar', 0),
)
def test_n_swaps_single(self):
self.assertSequenceEqual(
{
frozenset({'bar', 'foo'}), frozenset({'boo', 'far'}),
frozenset({'oo', 'fbar'}), frozenset({'bfoo', 'ar'}),
},
wtdb.n_swaps('foo', 'bar', 1),
)
def test_n_swaps_one_double(self):
self.assertSequenceEqual(
{
frozenset({'strain', 'team'}), frozenset({'train', 'steam'}),
frozenset({'srain', 'tteam'}), frozenset({'trsteam', 'ain'}),
frozenset({'stain', 'tream'}), frozenset({'tsteam', 'rain'}),
frozenset({'sttrain', 'eam'}), frozenset({'sain', 'trteam'}),
},
wtdb.n_swaps('steam', 'train', 2),
)
def test_order_pair(self):
self.assertSequenceEqual(
('national', 'rail'),
wtdb.order_pair(('rail', 'national'))
)
def test_order_pair_same_length(self):
self.assertSequenceEqual(
('steam', 'train'),
wtdb.order_pair(('train', 'steam'))
)
class TestWordSet(unittest.TestCase):
def test_find_swaps_none(self):
word_set = wtdb.WordSet()
word_set.add('foo')
word_set.add('bar')
self.assertListEqual([], list(word_set.find_swaps('hello')))
def test_find_swaps_single_letter(self):
word_set = wtdb.WordSet()
word_set.add('national')
word_set.add('rail')
word_set.add('rational')
self.assertListEqual(
[
(('national', 'rail'), ('rational', 'nail')),
],
sorted(word_set.find_swaps('nail')),
)
def test_find_swaps_double_letter(self):
word_set = wtdb.WordSet()
word_set.add('steam')
word_set.add('train')
word_set.add('team')
self.assertListEqual(
[
(('steam', 'train'), ('strain', 'team')),
],
sorted(word_set.find_swaps('strain')),
)
def test_validate_ok(self):
word_set = wtdb.WordSet()
word_set.add('foo')
word_set.add('bar')
self.assertTrue(word_set.validate('foo', 'bar'))
def test_validate_bad(self):
word_set = wtdb.WordSet()
word_set.add('foo')
word_set.add('bar')
self.assertFalse(word_set.validate('foo', 'bar', 'foobar'))
if __name__ == '__main__':
import doctest
doctest.testmod(wtdb)
unittest.main()
| 28.505263 | 77 | 0.531388 |
6ebb9230b1ec2e150157978f3bf6129f5b2db4e9 | 5,423 | py | Python | envisionpy/processor_network/BandstructureNetworkHandler.py | Vevn/ENVISIoN | d0e48a5ec38ed95375f632eafdc5814415f0f570 | [
"BSD-2-Clause"
] | null | null | null | envisionpy/processor_network/BandstructureNetworkHandler.py | Vevn/ENVISIoN | d0e48a5ec38ed95375f632eafdc5814415f0f570 | [
"BSD-2-Clause"
] | null | null | null | envisionpy/processor_network/BandstructureNetworkHandler.py | Vevn/ENVISIoN | d0e48a5ec38ed95375f632eafdc5814415f0f570 | [
"BSD-2-Clause"
] | 1 | 2020-05-15T14:52:19.000Z | 2020-05-15T14:52:19.000Z | # ENVISIoN
#
# Copyright (c) 2019 Jesper Ericsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################################
# TODO: add hdf5 validation
import sys,os,inspect
import inviwopy
import numpy as np
import h5py
from .LinePlotNetworkHandler import LinePlotNetworkHandler
class BandstructureNetworkHandler(LinePlotNetworkHandler):
""" Handler class for charge visualization network.
Sets up and manages the charge visualization
"""
def __init__(self, hdf5_path, inviwoApp):
LinePlotNetworkHandler.__init__(self, inviwoApp)
self.setup_bandstructure_network(hdf5_path)
def get_ui_data(self):
# Return data required to fill user interface
return [
"bandstructure",
LinePlotNetworkHandler.get_ui_data(self)
]
# ------------------------------------------
# ------- Network building functions -------
def setup_bandstructure_network(self, hdf5_path, xpos=0, ypos=0):
with h5py.File(hdf5_path,"r") as h5:
# A bool that tells if the band structure should be normalized around the fermi energy.
has_fermi_energy = "/FermiEnergy" in h5
# Start building the Inviwo network.
h5source = self.add_h5source(hdf5_path, xpos, ypos)
ypos += 75
path_selection = self.add_processor("org.inviwo.hdf5.PathSelection", "Select Bandstructure", xpos, ypos)
self.network.addConnection(h5source.getOutport("outport"),
path_selection.getInport("inport"))
# if has_fermi_energy:
# fermi_point = self.add_processor("org.inviwo.HDF5ToPoint", "Fermi energy", xpos + 175, ypos)
# self.network.addConnection(h5source.getOutport("outport"),
# fermi_point.getInport("hdf5HandleFlatMultiInport"))
ypos += 75
all_children_processor = self.add_processor("org.inviwo.HDF5PathSelectionAllChildren", "Select all bands", xpos, ypos)
self.network.addConnection(path_selection.getOutport("outport"),
all_children_processor.getInport("hdf5HandleInport"))
ypos += 75
HDF5_to_function = self.add_processor("org.inviwo.HDF5ToFunction", "Convert to function", xpos, ypos)
self.network.addConnection(all_children_processor.getOutport("hdf5HandleVectorOutport"),
HDF5_to_function.getInport("hdf5HandleFlatMultiInport"))
ypos += 75
function_to_dataframe = self.get_processor("Function to dataframe")
self.network.addConnection(HDF5_to_function.getOutport("functionVectorOutport"),
function_to_dataframe.getInport("functionFlatMultiInport"))
# if has_fermi_energy:
# self.network.addConnection(fermi_point.getOutport("pointVectorOutport"),
# self.get_processor("Line plot").getInport("pointInport"))
if has_fermi_energy:
self.set_title("Energy - Fermi energy [eV]")
else:
self.set_title("Energy [eV]")
# energy_text_processor.font.fontSize.value = 20
# energy_text_processor.position.value = inviwopy.glm.vec2(0.31, 0.93)
# energy_text_processor.color.value = inviwopy.glm.vec4(0,0,0,1)
# Start modifying properties.
path_selection.selection.value = '/Bandstructure/Bands'
# HDF5_to_function.yPathSelectionProperty.value = '/Energy'
# self.toggle_all_y(True)
self.set_y_selection_type(2)
# background_processor.bgColor1.value = inviwopy.glm.vec4(1)
# background_processor.bgColor2.value = inviwopy.glm.vec4(1)
# canvas_processor.inputSize.dimensions.value = inviwopy.glm.ivec2(900, 700)
# if has_fermi_energy:
# fermi_point.pathSelectionProperty.value = '/FermiEnergy'
| 47.570175 | 130 | 0.650009 |
6ebdf946af2b4bf33b3b815af562307b2b1a73fd | 26,442 | py | Python | smt/decoder/stackdecoder.py | kenkov/smt | db0a9fff15876442f1895b3ef730e91f7c84ad9b | [
"MIT"
] | 83 | 2015-01-12T14:40:08.000Z | 2022-01-07T09:41:09.000Z | smt/decoder/stackdecoder.py | HimmelStein/smt | db0a9fff15876442f1895b3ef730e91f7c84ad9b | [
"MIT"
] | 1 | 2016-12-08T21:22:23.000Z | 2016-12-08T21:22:23.000Z | smt/decoder/stackdecoder.py | HimmelStein/smt | db0a9fff15876442f1895b3ef730e91f7c84ad9b | [
"MIT"
] | 38 | 2015-04-08T04:39:13.000Z | 2021-11-14T13:16:19.000Z | #! /usr/bin/env python
# coding:utf-8
from __future__ import division, print_function
import math
# sqlalchemy
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, TEXT, REAL, INTEGER
from sqlalchemy.orm import sessionmaker
from smt.db.tables import Tables
#from pprint import pprint
# prepare classes for sqlalchemy
class Phrase(declarative_base()):
__tablename__ = "phrase"
id = Column(INTEGER, primary_key=True)
lang1p = Column(TEXT)
lang2p = Column(TEXT)
class TransPhraseProb(declarative_base()):
__tablename__ = "phraseprob"
id = Column(INTEGER, primary_key=True)
lang1p = Column(TEXT)
lang2p = Column(TEXT)
p1_2 = Column(REAL)
p2_1 = Column(REAL)
def phrase_prob(lang1p, lang2p,
transfrom=2,
transto=1,
db="sqlite:///:memory:",
init_val=1.0e-10):
"""
"""
engine = create_engine(db)
Session = sessionmaker(bind=engine)
session = Session()
# search
query = session.query(TransPhraseProb).filter_by(lang1p=lang1p,
lang2p=lang2p)
if transfrom == 2 and transto == 1:
try:
# Be Careful! The order of conditional prob is reversed
# as transfrom and transto because of bayes rule
return query.one().p2_1
except sqlalchemy.orm.exc.NoResultFound:
return init_val
elif transfrom == 1 and transto == 2:
try:
return query.one().p1_2
except sqlalchemy.orm.exc.NoResultFound:
return init_val
def available_phrases(inputs, transfrom=2, transto=1, db="sqlite:///:memory:"):
"""
>>> decode.available_phrases(u"He is a teacher.".split(),
db_name="sqlite:///:db:"))
set([((1, u'He'),),
((1, u'He'), (2, u'is')),
((2, u'is'),),
((2, u'is'), (3, u'a')),
((3, u'a'),),
((4, u'teacher.'),)])
"""
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
available = set()
for i, f in enumerate(inputs):
f_rest = ()
for fr in inputs[i:]:
f_rest += (fr,)
rest_phrase = u" ".join(f_rest)
if transfrom == 2 and transto == 1:
query = session.query(Phrase).filter_by(lang2p=rest_phrase)
elif transfrom == 1 and transto == 2:
query = session.query(Phrase).filter_by(lang1p=rest_phrase)
lst = list(query)
if lst:
available.add(tuple(enumerate(f_rest, i+1)))
return available
class HypothesisBase(object):
def __init__(self,
db,
totalnumber,
sentences,
ngram,
ngram_words,
inputps_with_index,
outputps,
transfrom,
transto,
covered,
remained,
start,
end,
prev_start,
prev_end,
remain_phrases,
prob,
prob_with_cost,
prev_hypo,
cost_dict
):
self._db = db
self._totalnumber = totalnumber
self._sentences = sentences
self._ngram = ngram
self._ngram_words = ngram_words
self._inputps_with_index = inputps_with_index
self._outputps = outputps
self._transfrom = transfrom
self._transto = transto
self._covered = covered
self._remained = remained
self._start = start
self._end = end
self._prev_start = prev_start
self._prev_end = prev_end
self._remain_phrases = remain_phrases
self._prob = prob
self._prob_with_cost = prob_with_cost
self._prev_hypo = prev_hypo
self._cost_dict = cost_dict
self._output_sentences = outputps
@property
def db(self):
return self._db
@property
def totalnumber(self):
return self._totalnumber
@property
def sentences(self):
return self._sentences
@property
def ngram(self):
return self._ngram
@property
def ngram_words(self):
return self._ngram_words
@property
def inputps_with_index(self):
return self._inputps_with_index
@property
def outputps(self):
return self._outputps
@property
def transfrom(self):
return self._transfrom
@property
def transto(self):
return self._transto
@property
def covered(self):
return self._covered
@property
def remained(self):
return self._remained
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def prev_start(self):
return self._prev_start
@property
def prev_end(self):
return self._prev_end
@property
def remain_phrases(self):
return self._remain_phrases
@property
def prob(self):
return self._prob
@property
def prob_with_cost(self):
return self._prob_with_cost
@property
def prev_hypo(self):
return self._prev_hypo
@property
def cost_dict(self):
return self._cost_dict
@property
def output_sentences(self):
return self._output_sentences
def __unicode__(self):
d = [("db", self._db),
("sentences", self._sentences),
("inputps_with_index", self._inputps_with_index),
("outputps", self._outputps),
("ngram", self._ngram),
("ngram_words", self._ngram_words),
("transfrom", self._transfrom),
("transto", self._transto),
("covered", self._covered),
("remained", self._remained),
("start", self._start),
("end", self._end),
("prev_start", self._prev_start),
("prev_end", self._prev_end),
("remain_phrases", self._remain_phrases),
("prob", self._prob),
("prob_with_cost", self._prob_with_cost),
#("cost_dict", self._cost_dict),
#("prev_hypo", ""),
]
return u"Hypothesis Object\n" +\
u"\n".join([u" " + k + u": " +
unicode(v) for (k, v) in d])
def __str__(self):
return unicode(self).encode('utf-8')
def __hash__(self):
return hash(unicode(self))
class Hypothesis(HypothesisBase):
"""
Realize like the following class
>>> args = {"sentences": sentences,
... "inputps_with_index": phrase,
... "outputps": outputps,
... "covered": hyp0.covered.union(set(phrase)),
... "remained": hyp0.remained.difference(set(phrase)),
... "start": phrase[0][0],
... "end": phrase[-1][0],
... "prev_start": hyp0.start,
... "prev_end": hyp0.end,
... "remain_phrases": remain_phrases(phrase,
... hyp0.remain_phrases),
... "prev_hypo": hyp0
... }
>>> hyp1 = decode.HypothesisBase(**args)
"""
def __init__(self,
prev_hypo,
inputps_with_index,
outputps,
):
start = inputps_with_index[0][0]
end = inputps_with_index[-1][0]
prev_start = prev_hypo.start
prev_end = prev_hypo.end
args = {"db": prev_hypo.db,
"totalnumber": prev_hypo.totalnumber,
"prev_hypo": prev_hypo,
"sentences": prev_hypo.sentences,
"ngram": prev_hypo.ngram,
# set later
"ngram_words": prev_hypo.ngram_words,
"inputps_with_index": inputps_with_index,
"outputps": outputps,
"transfrom": prev_hypo.transfrom,
"transto": prev_hypo.transto,
"covered": prev_hypo.covered.union(set(inputps_with_index)),
"remained": prev_hypo.remained.difference(
set(inputps_with_index)),
"start": start,
"end": end,
"prev_start": prev_start,
"prev_end": prev_end,
"remain_phrases": self._calc_remain_phrases(
inputps_with_index,
prev_hypo.remain_phrases),
"cost_dict": prev_hypo.cost_dict,
# set later
"prob": 0,
"prob_with_cost": 0,
}
HypothesisBase.__init__(self, **args)
# set ngram words
self._ngram_words = self._set_ngram_words()
# set the exact probability
self._prob = self._cal_prob(start - prev_end)
# set the exact probability with cost
self._prob_with_cost = self._cal_prob_with_cost(start - prev_end)
# set the output phrases
self._output_sentences = prev_hypo.output_sentences + outputps
def _set_ngram_words(self):
lst = self._prev_hypo.ngram_words + list(self._outputps)
o_len = len(self._outputps)
return list(reversed(list(reversed(lst))[:o_len - 1 + self._ngram]))
def _cal_phrase_prob(self):
inputp = u" ".join(zip(*self._inputps_with_index)[1])
outputp = u" ".join(self._outputps)
if self._transfrom == 2 and self._transto == 1:
return phrase_prob(lang1p=outputp,
lang2p=inputp,
transfrom=self._transfrom,
transto=self._transto,
db=self._db,
init_val=-100)
elif self._transfrom == 1 and self._transto == 2:
return phrase_prob(lang1p=inputp,
lang2p=outputp,
transfrom=self._transfrom,
transto=self._transto,
db=self._db,
init_val=-100)
else:
raise Exception("specify transfrom and transto")
def _cal_language_prob(self):
nw = self.ngram_words
triwords = zip(nw, nw[1:], nw[2:])
prob = 0
for first, second, third in triwords:
prob += language_model(first, second, third, self._totalnumber,
transto=self._transto,
db=self._db)
return prob
def _cal_prob(self, dist):
val = self._prev_hypo.prob +\
self._reordering_model(0.1, dist) +\
self._cal_phrase_prob() +\
self._cal_language_prob()
return val
def _sub_cal_prob_with_cost(self, s_len, cvd):
insert_flag = False
lst = []
sub_lst = []
for i in range(1, s_len+1):
if i not in cvd:
insert_flag = True
else:
insert_flag = False
if sub_lst:
lst.append(sub_lst)
sub_lst = []
if insert_flag:
sub_lst.append(i)
else:
if sub_lst:
lst.append(sub_lst)
return lst
def _cal_prob_with_cost(self, dist):
s_len = len(self._sentences)
cvd = set(i for i, val in self._covered)
lst = self._sub_cal_prob_with_cost(s_len, cvd)
prob = self._cal_prob(dist)
prob_with_cost = prob
for item in lst:
start = item[0]
end = item[-1]
cost = self._cost_dict[(start, end)]
prob_with_cost += cost
return prob_with_cost
def _reordering_model(self, alpha, dist):
return math.log(math.pow(alpha, math.fabs(dist)))
def _calc_remain_phrases(self, phrase, phrases):
"""
>>> res = remain_phrases(((2, u'is'),),
set([((1, u'he'),),
((2, u'is'),),
((3, u'a'),),
((2, u'is'),
(3, u'a')),
((4, u'teacher'),)]))
set([((1, u'he'),), ((3, u'a'),), ((4, u'teacher'),)])
>>> res = remain_phrases(((2, u'is'), (3, u'a')),
set([((1, u'he'),),
((2, u'is'),),
((3, u'a'),),
((2, u'is'),
(3, u'a')),
((4, u'teacher'),)]))
set([((1, u'he'),), ((4, u'teacher'),)])
"""
s = set()
for ph in phrases:
for p in phrase:
if p in ph:
break
else:
s.add(ph)
return s
def create_empty_hypothesis(sentences, cost_dict,
ngram=3, transfrom=2, transto=1,
db="sqlite:///:memory:"):
phrases = available_phrases(sentences,
db=db)
hyp0 = HypothesisBase(sentences=sentences,
db=db,
totalnumber=_get_total_number(transto=transto,
db=db),
inputps_with_index=(),
outputps=[],
ngram=ngram,
ngram_words=["</s>", "<s>"]*ngram,
transfrom=transfrom,
transto=transto,
covered=set(),
start=0,
end=0,
prev_start=0,
prev_end=0,
remained=set(enumerate(sentences, 1)),
remain_phrases=phrases,
prev_hypo=None,
prob=0,
cost_dict=cost_dict,
prob_with_cost=0)
#print(_get_total_number(transto=transto, db=db))
return hyp0
class Stack(set):
def __init__(self, size=10,
histogram_pruning=True,
threshold_pruning=False):
set.__init__(self)
self._min_hyp = None
self._max_hyp = None
self._size = size
self._histogram_pruning = histogram_pruning
self._threshold_pruning = threshold_pruning
def add_hyp(self, hyp):
#prob = hyp.prob
# for the first time
if self == set([]):
self._min_hyp = hyp
self._max_hyp = hyp
else:
raise Exception("Don't use add_hyp for nonempty stack")
#else:
# if self._min_hyp.prob > prob:
# self._min_hyp = hyp
# if self._max_hyp.prob < prob:
# self._max_hyp = hyp
self.add(hyp)
def _get_min_hyp(self):
# set value which is more than 1
lst = list(self)
mn = lst[0]
for item in self:
if item.prob_with_cost < mn.prob_with_cost:
mn = item
return mn
def add_with_combine_prune(self, hyp):
prob_with_cost = hyp.prob_with_cost
if self == set([]):
self._min_hyp = hyp
self._max_hyp = hyp
else:
if self._min_hyp.prob_with_cost > prob_with_cost:
self._min_hyp = hyp
if self._max_hyp.prob_with_cost < prob_with_cost:
self._max_hyp = hyp
self.add(hyp)
# combine
for _hyp in self:
if hyp.ngram_words[:-1] == _hyp.ngram_words[:-1] and \
hyp.end == hyp.end:
if hyp.prob_with_cost > _hyp:
self.remove(_hyp)
self.add(hyp)
break
# histogram pruning
if self._histogram_pruning:
if len(self) > self._size:
self.remove(self._min_hyp)
self._min_hyp = self._get_min_hyp()
# threshold pruning
if self._threshold_pruning:
alpha = 1.0e-5
if hyp.prob_with_cost < self._max_hyp + math.log(alpha):
self.remove(hyp)
def _get_total_number(transto=1, db="sqlite:///:memory:"):
"""
return v
"""
Trigram = Tables().get_trigram_table('lang{}trigram'.format(transto))
# create connection in SQLAlchemy
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
# calculate total number
query = session.query(Trigram)
return len(list(query))
def language_model(first, second, third, totalnumber, transto=1,
db="sqlalchemy:///:memory:"):
class TrigramProb(declarative_base()):
__tablename__ = 'lang{}trigramprob'.format(transto)
id = Column(INTEGER, primary_key=True)
first = Column(TEXT)
second = Column(TEXT)
third = Column(TEXT)
prob = Column(REAL)
class TrigramProbWithoutLast(declarative_base()):
__tablename__ = 'lang{}trigramprob'.format(transto)
id = Column(INTEGER, primary_key=True)
first = Column(TEXT)
second = Column(TEXT)
prob = Column(REAL)
# create session
engine = create_engine(db)
Session = sessionmaker(bind=engine)
session = Session()
try:
# next line can raise error if the prob is not found
query = session.query(TrigramProb).filter_by(first=first,
second=second,
third=third)
item = query.one()
return item.prob
except sqlalchemy.orm.exc.NoResultFound:
query = session.query(TrigramProbWithoutLast
).filter_by(first=first,
second=second)
# I have to modify the database
item = query.first()
if item:
return item.prob
else:
return - math.log(totalnumber)
class ArgumentNotSatisfied(Exception):
pass
def _future_cost_estimate(sentences,
phrase_prob):
'''
warning:
pass the complete one_word_prob
'''
s_len = len(sentences)
cost = {}
one_word_prob = {(st, ed): prob for (st, ed), prob in phrase_prob.items()
if st == ed}
if set(one_word_prob.keys()) != set((x, x) for x in range(1, s_len+1)):
raise ArgumentNotSatisfied("phrase_prob doesn't satisfy the condition")
# add one word prob
for tpl, prob in one_word_prob.items():
index = tpl[0]
cost[(index, index)] = prob
for length in range(1, s_len+1):
for start in range(1, s_len-length+1):
end = start + length
try:
cost[(start, end)] = phrase_prob[(start, end)]
except KeyError:
cost[(start, end)] = -float('inf')
for i in range(start, end):
_val = cost[(start, i)] + cost[(i+1, end)]
if _val > cost[(start, end)]:
cost[(start, end)] = _val
return cost
def _create_estimate_dict(sentences,
phrase_prob,
init_val=-100):
one_word_prob_dict_nums = set(x for x, y in phrase_prob.keys() if x == y)
comp_dic = {}
# complete the one_word_prob
s_len = len(sentences)
for i in range(1, s_len+1):
if i not in one_word_prob_dict_nums:
comp_dic[(i, i)] = init_val
for key, val in phrase_prob.items():
comp_dic[key] = val
return comp_dic
def _get_total_number_for_fce(transto=1, db="sqlite:///:memory:"):
"""
return v
"""
# create connection in SQLAlchemy
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
tablename = 'lang{}unigram'.format(transto)
Unigram = Tables().get_unigram_table(tablename)
# calculate total number
query = session.query(Unigram)
sm = 0
totalnumber = 0
for item in query:
totalnumber += 1
sm += item.count
return {'totalnumber': totalnumber,
'sm': sm}
def _future_cost_langmodel(word,
tn,
transfrom=2,
transto=1,
alpha=0.00017,
db="sqlite:///:memory:"):
tablename = "lang{}unigramprob".format(transto)
# create session
engine = create_engine(db)
Session = sessionmaker(bind=engine)
session = Session()
UnigramProb = Tables().get_unigramprob_table(tablename)
query = session.query(UnigramProb).filter_by(first=word)
try:
item = query.one()
return item.prob
except sqlalchemy.orm.exc.NoResultFound:
sm = tn['sm']
totalnumber = tn['totalnumber']
return math.log(alpha) - math.log(sm + alpha*totalnumber)
def future_cost_estimate(sentences,
transfrom=2,
transto=1,
init_val=-100.0,
db="sqlite:///:memory:"):
# create phrase_prob table
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
phrases = available_phrases(sentences,
db=db)
tn = _get_total_number_for_fce(transto=transto, db=db)
covered = {}
for phrase in phrases:
phrase_str = u" ".join(zip(*phrase)[1])
if transfrom == 2 and transto == 1:
query = session.query(TransPhraseProb).filter_by(
lang2p=phrase_str).order_by(
sqlalchemy.desc(TransPhraseProb.p2_1))
elif transfrom == 1 and transto == 2:
query = session.query(TransPhraseProb).filter_by(
lang1p=phrase_str).order_by(
sqlalchemy.desc(TransPhraseProb.p1_2))
lst = list(query)
if lst:
# extract the maximum val
val = query.first()
start = zip(*phrase)[0][0]
end = zip(*phrase)[0][-1]
pos = (start, end)
if transfrom == 2 and transto == 1:
fcl = _future_cost_langmodel(word=val.lang1p.split()[0],
tn=tn,
transfrom=transfrom,
transto=transto,
alpha=0.00017,
db=db)
print(val.lang1p.split()[0], fcl)
covered[pos] = val.p2_1 + fcl
if transfrom == 1 and transto == 2:
covered[pos] = val.p1_2
# + language_model()
# estimate future costs
phrase_prob = _create_estimate_dict(sentences, covered)
print(phrase_prob)
return _future_cost_estimate(sentences,
phrase_prob)
def stack_decoder(sentence, transfrom=2, transto=1,
stacksize=10,
searchsize=10,
lang1method=lambda x: x,
lang2method=lambda x: x,
db="sqlite:///:memory:",
verbose=False):
# create phrase_prob table
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
if transfrom == 2 and transto == 1:
sentences = lang2method(sentence).split()
else:
sentences = lang1method(sentence).split()
# create stacks
len_sentences = len(sentences)
stacks = [Stack(size=stacksize,
histogram_pruning=True,
threshold_pruning=False,
) for i in range(len_sentences+1)]
cost_dict = future_cost_estimate(sentences,
transfrom=transfrom,
transto=transto,
db=db)
#create the initial hypothesis
hyp0 = create_empty_hypothesis(sentences=sentences,
cost_dict=cost_dict,
ngram=3,
transfrom=2,
transto=1,
db=db)
stacks[0].add_hyp(hyp0)
# main loop
for i, stack in enumerate(stacks):
for hyp in stack:
for phrase in hyp.remain_phrases:
phrase_str = u" ".join(zip(*phrase)[1])
if transfrom == 2 and transto == 1:
query = session.query(TransPhraseProb).filter_by(
lang2p=phrase_str).order_by(
sqlalchemy.desc(TransPhraseProb.p2_1))[:searchsize]
elif transfrom == 1 and transto == 2:
query = session.query(TransPhraseProb).filter_by(
lang1p=phrase_str).order_by(
sqlalchemy.desc(TransPhraseProb.p1_2))[:searchsize]
query = list(query)
for item in query:
if transfrom == 2 and transto == 1:
outputp = item.lang1p
elif transfrom == 1 and transto == 2:
outputp = item.lang2p
#print(u"calculating\n {0} = {1}\n in stack {2}".format(
# phrase, outputp, i))
if transfrom == 2 and transto == 1:
outputps = lang1method(outputp).split()
elif transfrom == 1 and transto == 2:
outputps = lang2method(outputp).split()
# place in stack
# and recombine with existing hypothesis if possible
new_hyp = Hypothesis(prev_hypo=hyp,
inputps_with_index=phrase,
outputps=outputps)
if verbose:
print(phrase, u' '.join(outputps))
print("loop: ", i, "len:", len(new_hyp.covered))
stacks[len(new_hyp.covered)].add_with_combine_prune(
new_hyp)
return stacks
if __name__ == '__main__':
#import doctest
#doctest.testmod()
pass
| 32.444172 | 79 | 0.514182 |
6ebe31c36f436e8ae484085447aaa84df7e9db45 | 6,711 | py | Python | amoebaelib/generate_histogram_plot.py | laelbarlow/amoebae | 3c6607bcb64a60baee2f19f0a25e14b325e9725d | [
"Apache-2.0"
] | 8 | 2020-07-16T21:36:38.000Z | 2021-11-28T08:32:05.000Z | amoebaelib/generate_histogram_plot.py | laelbarlow/amoebae | 3c6607bcb64a60baee2f19f0a25e14b325e9725d | [
"Apache-2.0"
] | null | null | null | amoebaelib/generate_histogram_plot.py | laelbarlow/amoebae | 3c6607bcb64a60baee2f19f0a25e14b325e9725d | [
"Apache-2.0"
] | 1 | 2020-07-31T21:21:15.000Z | 2020-07-31T21:21:15.000Z | #!/usr/bin/env python3
# Copyright 2018 Lael D. Barlow
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains functions for generating histograms.
"""
import sys
import os
import re
import pylab
import subprocess
import numpy as np
def generate_histogram(title,
values,
num_bins,
output_filename
):
"""Take a list of values, a number of bins, and an output file name, and
generate a histogram using pylab and write it to the file path.
"""
# Make histogram of scores.
pylab.hist(values, bins=num_bins) #specify the number of bins for the histogram
pylab.title(title)
pylab.xlabel("Value")
pylab.ylabel("Number of values in bin")
#pylab.show() #can do this instead of the savefig method if just want to view
pylab.savefig(output_filename) #works for pdf or png
pylab.close()
def generate_double_histogram(title,
values1,
label1,
values2,
label2,
num_bins,
output_filename
):
"""Take two lists of values, a number of bins, and an output file name, and
generate a histogram using pylab and write it to the file path.
"""
pylab.style.use('seaborn-deep')
# Make histogram of scores.
#pylab.hist(values1, bins=num_bins, label=label1) #specify the number of bins for the histogram
#pylab.hist(values2, bins=num_bins, label=label2) #specify the number of bins for the histogram
pylab.hist([values1, values2], bins=num_bins, label=[label1, label2]) #specify the number of bins for the histogram
pylab.title(title)
pylab.xlabel("Value")
pylab.ylabel("Number of values in bin")
pylab.legend(loc='upper right')
#pylab.show() #can do this instead of the savefig method if just want to view
pylab.savefig(output_filename) #works for pdf or png
pylab.close()
def autolabel_bars(rects, ax):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
def generate_bar_chart(title,
categories,
labels,
num_hits,
output_filename
):
"""Take data and use matplotlib to generate a bar chart and write to a
specified file path.
"""
## Simple bar chart.
#fig, ax = pylab.subplots()
#pylab.style.use('seaborn-deep')
#pylab.rcdefaults()
#fig, ax = pylab.subplots()
# Example data
#x_pos = np.arange(len(labels))
#ax.barh(y_pos, performance, xerr=error, align='center')
#ax.bar(x_pos, values, align='center')
#ax.set_xticks(x_pos)
#ax.set_xticklabels(labels)
#ax.set_ylabel('Positive hit count')
#ax.set_title(title)
#pylab.show()
#pylab.close()
pylab.style.use('seaborn-deep')
#categories = ['Prot', 'Nucl']
#labels = ['Non-redundant', 'Final positive']
#num_hits = [[35, 30],
# [12, 6]]
assert len(labels) == len(num_hits)
for sublist in num_hits:
assert len(sublist) == len(categories)
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = pylab.subplots()
rects_list = []
num = 0
for category, sublist in zip(categories, num_hits):
num += 1
if not (num % 2) == 0:
rects = ax.bar(x - width/2, sublist, width, label=category)
rects_list.append(rects)
else:
rects = ax.bar(x + width/2, sublist, width, label=category)
rects_list.append(rects)
# Add numbers to label individual bars.
for r in rects_list:
autolabel_bars(r, ax)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Number of sequences')
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
fig.tight_layout()
#pylab.show()
pylab.savefig(output_filename) #works for pdf or png
pylab.close()
if __name__ == '__main__':
# Generate example plot.
## Define title for plot.
#title = "Histogram of random values in 30 bins"
## Define input data for example plot.
#mu, sigma = 0, 0.1 # mean and standard deviation
#s = np.random.normal(mu, sigma, 1000)
## Define output filepath.
#output_filepath = 'test_histogram_plot.pdf'
## Call function to generate plot.
#generate_histogram(title,
# s,
# 30,
# output_filepath
# )
## Open output file.
#subprocess.call(['open', output_filepath])
## Delete output file.
#os.remove(output_filepath)
#
#title = 'test bar chart'
#values = [20, 10]
#labels = ['prot', 'nucl']
#output_filename = 'test_bar_chart.pdf'
#generate_bar_chart(title,
# values,
# labels,
# output_filename
# )
## Open output file.
#subprocess.call(['open', output_filepath])
## Delete output file.
#os.remove(output_filepath)
# Test bar chart.
title = 'test bar chart'
categories = ['Prot', 'Nucl']
labels = ['Non-redundant', 'Final positive']
num_hits = [[35, 30],
[12, 6]]
output_filename = 'test_bar_chart.pdf'
generate_bar_chart(title,
categories,
labels,
num_hits,
output_filename
)
# Open output file.
subprocess.call(['open', output_filename])
# Delete output file.
os.remove(output_filename)
| 31.069444 | 119 | 0.58203 |
6ebfe39e2bd770fe4ca5a9e9da4a3af5efa9f58a | 12,838 | py | Python | fast_segmentation/core/evaluate.py | eilonshi/tevel-segmentation | bf9168fafa181ff4eac1d1eba0b0f8a06f5daae1 | [
"MIT"
] | null | null | null | fast_segmentation/core/evaluate.py | eilonshi/tevel-segmentation | bf9168fafa181ff4eac1d1eba0b0f8a06f5daae1 | [
"MIT"
] | null | null | null | fast_segmentation/core/evaluate.py | eilonshi/tevel-segmentation | bf9168fafa181ff4eac1d1eba0b0f8a06f5daae1 | [
"MIT"
] | null | null | null | import os
import os.path as osp
import logging
import argparse
import math
import yaml
from tabulate import tabulate
from torch.utils.data import Dataset
from tqdm import tqdm
from typing import Tuple, List
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.distributed as dist
from fast_segmentation.core.utils import get_next_file_name, delete_directory_content
from fast_segmentation.model_components.architectures import model_factory
from fast_segmentation.model_components.data_cv2 import get_data_loader
from fast_segmentation.model_components.logger import setup_logger
from fast_segmentation.core.consts import IGNORE_LABEL, NUM_CLASSES, BAD_IOU
from fast_segmentation.visualization.visualize import save_labels_mask_with_legend
def parse_args():
"""
Creates the parser for evaluation arguments
Returns:
The parser
"""
parse = argparse.ArgumentParser()
parse.add_argument('--local_rank', dest='local_rank',
type=int, default=-1)
parse.add_argument('--weight-path', dest='weight_pth', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/models/8/best_model.pth')
parse.add_argument('--im_root', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data')
parse.add_argument('--val_im_anns', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/data/val.txt')
parse.add_argument('--false_analysis_path', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/data/false_analysis')
parse.add_argument('--log_path', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/logs/regular_logs')
parse.add_argument('--port', dest='port', type=int, default=44553, )
parse.add_argument('--model', dest='model', type=str, default='bisenetv2')
parse.add_argument('--config_path', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/configs/main_cfg.yaml')
return parse.parse_args()
class MscEvalV0(object):
"""
"""
def __init__(self, scales=(1.,), flip=False, ignore_label=IGNORE_LABEL):
self.scales = scales
self.flip = flip
self.ignore_label = ignore_label
def __call__(self, net: nn.Module, data_loader, num_classes):
# evaluate
hist = torch.zeros(num_classes, num_classes).cuda().detach()
if dist.is_initialized() and dist.get_rank() != 0:
d_iter = enumerate(data_loader)
else:
d_iter = enumerate(tqdm(data_loader))
for i, (imgs, labels) in d_iter:
n, _, h, w = labels.shape
labels = labels.squeeze(1).cuda()
size = labels.size()[-2:]
probs = torch.zeros((n, num_classes, h, w), dtype=torch.float32).cuda().detach()
for scale in self.scales:
s_h, s_w = int(scale * h), int(scale * w)
im_sc = functional.interpolate(imgs, size=(s_h, s_w), mode='bilinear', align_corners=True)
im_sc = im_sc.cuda()
if self.flip:
im_sc = torch.flip(im_sc, dims=(3,))
logits = net(im_sc)[0]
if self.flip:
logits = torch.flip(logits, dims=(3,))
logits = functional.interpolate(logits, size=size, mode='bilinear', align_corners=True)
probs += torch.softmax(logits, dim=1)
# calc histogram of the predictions in each class
preds = torch.argmax(probs, dim=1)
relevant_labels = labels != self.ignore_label
hist += torch.bincount(labels[relevant_labels] * num_classes + preds[relevant_labels],
minlength=num_classes ** 2).view(num_classes, num_classes)
if dist.is_initialized():
dist.all_reduce(hist, dist.ReduceOp.SUM)
# diagonal is the intersection and the
ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag() + 1e-6)
ious[ious != ious] = 0 # replace nan with zero
miou = ious.mean()
return miou.item()
class MscEvalCrop(object):
def __init__(self, crop_size: Tuple[int, int], crop_stride: float, false_analysis_path: str, flip: bool = True,
scales: Tuple = (0.5, 0.75, 1, 1.25, 1.5, 1.75), label_ignore: int = IGNORE_LABEL):
self.scales = scales
self.ignore_label = label_ignore
self.flip = flip
self.distributed = dist.is_initialized()
self.crop_size = crop_size if isinstance(crop_size, (list, tuple)) else (crop_size, crop_size)
self.crop_stride = crop_stride
self.false_analysis_path = false_analysis_path
def pad_tensor(self, in_tensor: torch.Tensor):
n, c, h, w = in_tensor.size()
crop_h, crop_w = self.crop_size
if crop_h < h and crop_w < w:
return in_tensor, [0, h, 0, w]
pad_h, pad_w = max(crop_h, h), max(crop_w, w)
out_tensor = torch.zeros(n, c, pad_h, pad_w).cuda()
out_tensor.requires_grad_(False)
margin_h, margin_w = pad_h - h, pad_w - w
hst, hed = margin_h // 2, margin_h // 2 + h
wst, wed = margin_w // 2, margin_w // 2 + w
out_tensor[:, :, hst:hed, wst:wed] = in_tensor
return out_tensor, [hst, hed, wst, wed]
def eval_chip(self, net: nn.Module, crop: torch.Tensor):
prob = net(crop)[0].softmax(dim=1)
if self.flip:
crop = torch.flip(crop, dims=(3,))
prob += net(crop)[0].flip(dims=(3,)).softmax(dim=1)
prob = torch.exp(prob)
return prob
def crop_eval(self, net: nn.Module, im: torch.Tensor, n_classes: int):
crop_h, crop_w = self.crop_size
stride_rate = self.crop_stride
im, indices = self.pad_tensor(im)
n, c, h, w = im.size()
stride_h = math.ceil(crop_h * stride_rate)
stride_w = math.ceil(crop_w * stride_rate)
n_h = math.ceil((h - crop_h) / stride_h) + 1
n_w = math.ceil((w - crop_w) / stride_w) + 1
prob = torch.zeros(n, n_classes, h, w).cuda()
prob.requires_grad_(False)
for i in range(n_h):
for j in range(n_w):
st_h, st_w = stride_h * i, stride_w * j
end_h, end_w = min(h, st_h + crop_h), min(w, st_w + crop_w)
st_h, st_w = end_h - crop_h, end_w - crop_w
chip = im[:, :, st_h:end_h, st_w:end_w]
prob[:, :, st_h:end_h, st_w:end_w] += self.eval_chip(net, chip)
hst, hed, wst, wed = indices
prob = prob[:, :, hst:hed, wst:wed]
return prob
def scale_crop_eval(self, net: nn.Module, im: torch.Tensor, scale: Tuple, n_classes: int):
n, c, h, w = im.size()
new_hw = [int(h * scale), int(w * scale)]
im = functional.interpolate(im, new_hw, mode='bilinear', align_corners=True)
prob = self.crop_eval(net, im, n_classes)
prob = functional.interpolate(prob, (h, w), mode='bilinear', align_corners=True)
return prob
@torch.no_grad()
def __call__(self, net: nn.Module, dl: Dataset, n_classes: int):
data_loader = dl if self.distributed and not dist.get_rank() == 0 else tqdm(dl)
hist = torch.zeros(n_classes, n_classes).cuda().detach()
hist.requires_grad_(False)
for i, (images, labels) in enumerate(data_loader):
images = images.cuda()
labels = labels.squeeze(1).cuda()
n, h, w = labels.shape
probs = torch.zeros((n, n_classes, h, w)).cuda()
probs.requires_grad_(False)
for sc in self.scales:
probs += self.scale_crop_eval(net, images, sc, n_classes)
torch.cuda.empty_cache()
preds = torch.argmax(probs, dim=1)
keep = labels != self.ignore_label
cur_hist = torch.zeros(n_classes, n_classes).cuda().detach()
bin_count = torch.bincount(labels[keep] * n_classes + preds[keep], minlength=n_classes ** 2). \
view(n_classes, n_classes)
cur_hist += bin_count
cur_miou = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
cur_miou[cur_miou != cur_miou] = 0 # replace nan with zero
cur_miou = cur_miou.mean()
if cur_miou < BAD_IOU:
save_in_false_analysis(preds=preds, labels=labels, path=self.false_analysis_path)
hist += bin_count
if self.distributed:
dist.all_reduce(hist, dist.ReduceOp.SUM)
ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
ious[ious != ious] = 0 # replace nan with zero
miou = ious.mean()
return miou.item()
def save_in_false_analysis(preds: torch.Tensor, labels: torch.Tensor, path: str):
delete_directory_content(path)
for i, (pred, label) in enumerate(zip(preds, labels)):
pred = pred.detach().cpu().numpy()
label = label.detach().cpu().numpy()
label_path = get_next_file_name(root_dir=path, prefix='label', suffix='.jpg')
pred_path = get_next_file_name(root_dir=path, prefix='pred', suffix='.jpg')
save_labels_mask_with_legend(mask=pred, save_path=pred_path)
save_labels_mask_with_legend(mask=label, save_path=label_path)
@torch.no_grad()
def eval_model(net: nn.Module, ims_per_gpu: int, crop_size: Tuple[int, int], im_root: str, im_anns: str,
false_analysis_path: str) -> Tuple[List[str], List[float]]:
is_dist = dist.is_initialized()
dl = get_data_loader(data_path=im_root, ann_path=im_anns, ims_per_gpu=ims_per_gpu, crop_size=crop_size, mode='val',
distributed=is_dist)
net.eval()
heads, mious = [], []
logger = logging.getLogger()
single_scale = MscEvalV0((1.,), False)
miou = single_scale(net, dl, NUM_CLASSES)
heads.append('single_scale')
mious.append(miou)
logger.info('single mIOU is: %s\n', miou)
single_crop = MscEvalCrop(crop_size=crop_size, crop_stride=2. / 3, flip=False, scales=(1.,),
label_ignore=IGNORE_LABEL, false_analysis_path=false_analysis_path)
miou = single_crop(net, dl, NUM_CLASSES)
heads.append('single_scale_crop')
mious.append(miou)
logger.info('single scale crop mIOU is: %s\n', miou)
ms_flip = MscEvalV0((0.5, 0.75, 1, 1.25, 1.5, 1.75), True)
miou = ms_flip(net, dl, NUM_CLASSES)
heads.append('ms_flip')
mious.append(miou)
logger.info('ms flip mIOU is: %s\n', miou)
ms_flip_crop = MscEvalCrop(crop_size=crop_size, crop_stride=2. / 3, flip=True,
scales=(0.5, 0.75, 1.0, 1.25, 1.5, 1.75), label_ignore=IGNORE_LABEL,
false_analysis_path=false_analysis_path)
miou = ms_flip_crop(net, dl, NUM_CLASSES)
heads.append('ms_flip_crop')
mious.append(miou)
logger.info('ms crop mIOU is: %s\n', miou)
return heads, mious
def evaluate(ims_per_gpu: int, crop_size: Tuple[int, int], weight_pth: str, model_type: str, im_root: str,
val_im_anns: str, false_analysis_path: str):
logger = logging.getLogger()
# model
logger.info('setup and restore model')
net = model_factory[model_type](NUM_CLASSES)
net.load_state_dict(torch.load(weight_pth))
net.cuda()
is_dist = dist.is_initialized()
if is_dist:
local_rank = dist.get_rank()
net = nn.parallel.DistributedDataParallel(net, device_ids=[local_rank, ], output_device=local_rank)
# evaluator
heads, mious = eval_model(net=net, ims_per_gpu=ims_per_gpu, im_root=im_root, im_anns=val_im_anns,
false_analysis_path=false_analysis_path, crop_size=crop_size)
logger.info(tabulate([mious], headers=heads, tablefmt='orgtbl'))
if __name__ == "__main__":
args = parse_args()
with open(args.config_path) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
if not args.local_rank == -1:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl',
init_method='tcp://127.0.0.1:{}'.format(args.port),
world_size=torch.cuda.device_count(),
rank=args.local_rank
)
if not osp.exists(args.log_path):
os.makedirs(args.log_path)
setup_logger('{}-eval'.format(args.model), args.log_path)
evaluate(ims_per_gpu=cfg['ims_per_gpu'], crop_size=cfg['crop_size'], weight_pth=args.weight_pth,
model_type=args.model, im_root=args.im_root, val_im_anns=args.val_im_anns,
false_analysis_path=args.false_analysis_path)
| 38.668675 | 119 | 0.621436 |
6ec38b237400c36dd79206a7de24521d924c26b5 | 940 | py | Python | talent/urls.py | flannerykj/urbanapplause | c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5 | [
"MIT"
] | null | null | null | talent/urls.py | flannerykj/urbanapplause | c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5 | [
"MIT"
] | null | null | null | talent/urls.py | flannerykj/urbanapplause | c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
app_name = 'talent'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^musicians/$', views.MusicianIndex.as_view(), name='musicians'),
url(r'^musicians/(?P<pk>[0-9]+)/$', views.MusicianDetail.as_view(), name='musician-detail'),
url(r'^musicians/(?P<pk>\d+)/edit/$', views.UpdateMusician.as_view(), name='musician-edit'),
url(r'^musicians/create/$', views.MusicianCreate.as_view(), name='musician-create'),
url(r'^musicians/(?P<pk>\d+)/delete/$', views.DeleteTalent.as_view(), name='musician-delete'),
url(r'^artists/$', views.ArtistIndex.as_view(), name='artists'),
url(r'^artists/(?P<pk>[0-9]+)/$', views.ArtistDetail.as_view(), name='artist-detail'),
url(r'^artists/(?P<pk>\d+)/edit/$', views.UpdateArtist.as_view(), name='artist-edit'),
url(r'^artists/create/$', views.ArtistCreate.as_view(), name='artist-create'),
] | 55.294118 | 98 | 0.659574 |
6ec48fdaa8687076f0c91144e66c1442f92aee33 | 402 | py | Python | examples/modular_bot/bot.py | fuzzysearch404/discord-ext-modules | f97406dedee6cd8862bcc1f15ff994dde79f4c90 | [
"MIT"
] | null | null | null | examples/modular_bot/bot.py | fuzzysearch404/discord-ext-modules | f97406dedee6cd8862bcc1f15ff994dde79f4c90 | [
"MIT"
] | null | null | null | examples/modular_bot/bot.py | fuzzysearch404/discord-ext-modules | f97406dedee6cd8862bcc1f15ff994dde79f4c90 | [
"MIT"
] | null | null | null | import discord
from discord.ext.modules import ModularCommandClient
if __name__ == "__main__":
client = ModularCommandClient(intents=discord.Intents.none())
@client.event
async def on_ready():
print("Logged on as {0}!".format(client.user))
client.load_extension("commands.hello_module")
client.load_extension("commands.advanced_module")
client.run("your_bot_token")
| 26.8 | 65 | 0.731343 |
6ec4b74ab4db122d30b5b736d0e106eb22df1a59 | 11,282 | py | Python | plazma.py | caos21/Grodi | 3ae09f9283f3e1afdd641943e2244afc78511053 | [
"Apache-2.0"
] | 2 | 2019-12-11T16:19:46.000Z | 2020-08-19T20:14:18.000Z | plazma.py | caos21/Grodi | 3ae09f9283f3e1afdd641943e2244afc78511053 | [
"Apache-2.0"
] | null | null | null | plazma.py | caos21/Grodi | 3ae09f9283f3e1afdd641943e2244afc78511053 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Benjamin Santos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
""" This module contains the classes functions and helpers to compute
the plasma.
"""
__author__ = "Benjamin Santos"
__copyright__ = "Copyright 2019"
__credits__ = ["Benjamin Santos"]
__license__ = "Apache 2.0"
__version__ = "0.0.1"
__maintainer__ = "Benjamin Santos"
__email__ = "caos21@gmail.com"
__status__ = "Beta"
from collections import namedtuple
import numpy as np
import scipy.constants as const
import trazar as tzr
PI = const.pi
KE = 1.0/(4.0*PI*const.epsilon_0)
INVKE = 1.0/KE
KB = const.Boltzmann
QE = const.elementary_charge
ME = const.electron_mass
PlasmaSystem = namedtuple('System',
['length',
'radius',
'temperature',
'ion_temperature',
'pressure_torr',
'arsih4_ratio',
'armass',
'sih4mass',
'power',
'with_tunnel'])
def constant_rate(energy, avar, bvar, cvar):
""" Returns a constant rate a
"""
return avar*np.ones_like(energy)
def arrhenius_rate(energy, avar, bvar, cvar):
""" Returns the Arrhenius rate
"""
return avar * np.power(energy, cvar) * np.exp(-bvar/energy)
def a1expb_rate(energy, avar, bvar, cvar):
""" Returns a1expb rate
"""
return avar * (1.0 - np.exp(-bvar*energy))
class RateSpec:
""" Defines a rate
"""
def __init__(self, rate_function=None, avar=0.0, bvar=0.0, cvar=0.0, name=""):
self.rate_function = rate_function
self.avar = avar
self.bvar = bvar
self.cvar = cvar
self.name = name
def __call__(self, energy):
""" Returns the rate at mean electron energy value
"""
return self.rate_function(energy, self.avar, self.bvar, self.cvar)
class RatesMap:
""" Returns a dict of rates
"""
def __init__(self, rates_dict):
"""
"""
self.rates_dict = rates_dict
self.rates_map = dict()
def get_ratesmap(self):
""" Get the rates map
"""
for k, var in self.rates_dict.items():
if var[0] == "a1expb":
self.rates_map[k] = RateSpec(a1expb_rate, var[1], var[2], var[3], k)
if var[0] == "arrhenius":
self.rates_map[k] = RateSpec(arrhenius_rate, var[1], var[2], var[3], k)
if var[0] == "constant":
self.rates_map[k] = RateSpec(constant_rate, var[1], var[2], var[3], k)
return self.rates_map
def plot_rates(self, energy, savename="figx.eps"):
""" Plot the rates
"""
rates, labels = [], []
for k, var in self.rates_map.items():
rates.append(var(energy))
labels.append(var.name)
tzr.plot_plain(energy, rates, title="Rates",
axislabel=["Time (s)", r"Rate coefficient (m$^{3}$s$^{-1}$)"],
logx=False, logy=True, labels=labels,
ylim=[1e-18, 1e-12], savename=savename)
class PlasmaChem():
""" Plasma model
"""
def __init__(self, rates_map, plasmasystem):
self.rates_map = rates_map
self.plasmasystem = plasmasystem
self.electron_density = 1.0
self.nano_qdens = 0.0
self.nano_qdens_rate = 0.0
self.kbtg = KB * self.plasmasystem.temperature
self.ion_kbtg = KB * self.plasmasystem.ion_temperature
self.pressure = 133.32237 * self.plasmasystem.pressure_torr
self.reactor_volume = (self.plasmasystem.length*PI*self.plasmasystem.radius
*self.plasmasystem.radius)
self.reactor_area = self.plasmasystem.length*2.0*PI*self.plasmasystem.radius
self.ratio_av = self.reactor_area / self.reactor_volume
self.gas_dens = self.pressure / self.kbtg
self.nar = self.plasmasystem.arsih4_ratio * self.gas_dens
self.nsih4 = (1.0-self.plasmasystem.arsih4_ratio) * self.gas_dens
self.vth_ar = self.thermal_velocity(self.plasmasystem.armass)
self.vth_sih4 = self.thermal_velocity(self.plasmasystem.sih4mass)
self.flux_sih3 = self.flux_neutrals(self.plasmasystem.sih4mass)
self.flux_sih2 = self.flux_neutrals(self.plasmasystem.sih4mass)
self.flux_ar = self.flux_neutrals(self.plasmasystem.armass)
## From Lieberman pag 80 (117)
self.lambdai = 1. / (330 * self.plasmasystem.pressure_torr)
self.flux_arp = self.flux_ions(self.plasmasystem.armass, self.lambdai)
self.flux_sih3p = self.flux_ions(self.plasmasystem.sih4mass, 2.9e-3)
## peak voltage
self.vsheath = 0.25*100.0
self.density_sourcedrain = np.zeros(7)
self.past_plasmadensity = np.ones(7)
self.next_plasmadensity = np.zeros(7)
def thermal_velocity(self, mass):
""" computes the thermal velocity
"""
return np.sqrt(2.0*self.kbtg/mass)
def diffusion_neutrals(self, mass, lambdax=3.5*1e-3):
""" computes the diffusion coefficient for neutrals
"""
return self.kbtg*lambdax/(mass*self.thermal_velocity(mass))
def center2edge_neutrals(self, mass):
""" center to edge ratio for neutrals
"""
pfcn = (1.0 + (self.plasmasystem.length/2.0) * self.thermal_velocity(mass)
/ (4.0*self.diffusion_neutrals(mass)))
return 1.0/pfcn
def flux_neutrals(self, mass):
""" computes the neutral flux
"""
return 0.25 * self.center2edge_neutrals(mass) * self.thermal_velocity(mass)
def bohm_velocity(self, mass):
""" computes the Bohm velocity
"""
return np.sqrt(self.ion_kbtg/mass)
def center2edge_ions(self, lambdax):
""" center to edge ratio for ions
"""
pfcn = np.sqrt(3.0+(0.5*self.plasmasystem.length/lambdax))
return 1.0/pfcn
def flux_ions(self, mass, lambdax):
""" computes the ion flux
"""
return self.center2edge_ions(lambdax) * self.bohm_velocity(mass)
def ion_velocity(self, mass):
""" computes the ion velocity
"""
return np.sqrt(8.0*self.ion_kbtg/(PI*mass))
def get_system(self):
""" returns the system of equations
"""
return self.system
def system(self, time, nvector):
""" system of equations for the densities
"""
nel = nvector[0]
narp = nvector[1]
narm = nvector[2]
nsih3p = nvector[3]
nsih3 = nvector[4]
nsih2 = nvector[5]
neps = nvector[6]
energy = neps/nel
kel = self.rates_map["R1:kel"](energy)
kio = self.rates_map["R2:ki"](energy)
kex = self.rates_map["R3:kex"](energy)
kiarm = self.rates_map["R4:kiarm"](energy)
kelsih4 = self.rates_map["R5:kelsih4"](energy)
kdisih4 = self.rates_map["R6:kdisih4"](energy)
kdsih3 = self.rates_map["R7:kdsih3"](energy)
kdsih2 = self.rates_map["R8:kdsih2"](energy)
kisih3 = self.rates_map["R9:kisih3"](energy)
kv13 = self.rates_map["R10:kv13"](energy)
kv24 = self.rates_map["R11:kv24"](energy)
k12 = self.rates_map["R12:k12"](energy)
k13 = self.rates_map["R13:k13"](energy)
k14 = self.rates_map["R14:k14"](energy)
k15 = self.rates_map["R15:k15"](energy)
ekio = self.rates_map["R2:ki"].bvar
ekex = self.rates_map["R3:kex"].bvar
ekiarm = self.rates_map["R4:kiarm"].bvar
ekdisih4 = self.rates_map["R6:kdisih4"].bvar
ekdsih3 = self.rates_map["R7:kdsih3"].bvar
ekdsih2 = self.rates_map["R8:kdsih2"].bvar
ekisih3 = self.rates_map["R9:kisih3"].bvar
ekv13 = self.rates_map["R10:kv13"].bvar
ekv24 = self.rates_map["R11:kv24"].bvar
nar = self.nar
nsih4 = self.nsih4
flux_arp = self.flux_arp
flux_ar = self.flux_ar
flux_sih3p = self.flux_sih3p
flux_sih3 = self.flux_sih3
flux_sih2 = self.flux_sih2
ratio_av = self.ratio_av
sourcedrain = self.density_sourcedrain
with_tunnel = self.plasmasystem.with_tunnel
nsih3p = nel - narp - self.nano_qdens
dnel = (+kio*nar*nel
+ kiarm*nel*narm
+ kdisih4*nel*nsih4
+ kisih3*nel*nsih3
- flux_arp*ratio_av*narp
- flux_sih3p*ratio_av*nsih3p
- sourcedrain[0]*nel
+ with_tunnel*sourcedrain[4])
dnarp = (+kio*nar*nel
+ kiarm*nel*narm
- flux_arp*ratio_av*narp
- sourcedrain[1]*narp)
dnarm = (+ kex*nar*nel
- kiarm*narm*nel
- k12*narm*nsih4
- k13*narm*nsih4
- k14*narm*nsih3
- k15*narm*nsih2
- flux_ar*ratio_av*narm)
dnsih3p = (+ kdisih4*nel*nsih4
+ kisih3*nel*nsih3
- flux_sih3p*ratio_av*nsih3p)
dnsih3 = (+ kdsih3*nel*nsih4
- kisih3*nel*nsih3
+ k12*narm*nsih4
- k14*narm*nsih3
- flux_sih3*ratio_av*nsih3)
dnsih2 = (+ kdsih2*nel*nsih4
+ k13*narm*nsih4
+ k14*narm*nsih3
- k15*narm*nsih2
- flux_sih2*ratio_av*nsih2)
power = self.plasmasystem.power
reactor_volume = self.reactor_volume
vsheath = self.vsheath
armass = self.plasmasystem.armass
sih4mass = self.plasmasystem.sih4mass
dneps = (power/reactor_volume
- ekio*kio*nar*nel
- ekex*kex*nar*nel
- ekiarm*kiarm*narm*nel
- (5./3.)*self.bohm_velocity(armass)*ratio_av*neps
- QE*vsheath*self.bohm_velocity(armass)*ratio_av*nel
- (5./3.)*self.bohm_velocity(sih4mass)*ratio_av*neps
- QE*vsheath*self.bohm_velocity(sih4mass)*ratio_av*nel
- 3.0*(ME/armass)*kel*neps*nar
- 3.0*(ME/sih4mass)*kelsih4*neps*nsih4
- ekisih3*kisih3*nel*nsih3
- ekdisih4*kdisih4*nel*nsih4
- ekdsih3*kdsih3*nel*nsih4
- ekdsih2*kdsih2*nel*nsih4
- ekv13*kv13*nel*nsih4
- ekv24*kv24*nel*nsih4
- sourcedrain[6]*nel
+ with_tunnel*sourcedrain[5])
return np.nan_to_num([dnel, dnarp, dnarm, dnsih3p, dnsih3, dnsih2, dneps],
copy=False)
| 34.713846 | 87 | 0.576139 |
6ec6806b3360ba70b659e8d4a38b4d380d150247 | 1,842 | py | Python | sftpgo_client/base/models/crypt_fs_config.py | ramnes/sftpgo-client | 016da5e11274362eb2e4c2d0e78e2b88c25ea515 | [
"MIT"
] | 5 | 2021-03-23T15:59:30.000Z | 2021-06-30T09:50:55.000Z | sftpgo_client/base/models/crypt_fs_config.py | ramnes/sftpgo-client | 016da5e11274362eb2e4c2d0e78e2b88c25ea515 | [
"MIT"
] | 2 | 2021-03-24T22:06:12.000Z | 2021-07-28T11:59:52.000Z | sftpgo_client/base/models/crypt_fs_config.py | ramnes/sftpgo-client | 016da5e11274362eb2e4c2d0e78e2b88c25ea515 | [
"MIT"
] | 1 | 2022-01-18T17:04:32.000Z | 2022-01-18T17:04:32.000Z | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.secret import Secret
from ..types import UNSET, Unset
T = TypeVar("T", bound="CryptFsConfig")
@attr.s(auto_attribs=True)
class CryptFsConfig:
"""Crypt filesystem configuration details"""
passphrase: Union[Unset, Secret] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
passphrase: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.passphrase, Unset):
passphrase = self.passphrase.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if passphrase is not UNSET:
field_dict["passphrase"] = passphrase
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
_passphrase = d.pop("passphrase", UNSET)
passphrase: Union[Unset, Secret]
if isinstance(_passphrase, Unset):
passphrase = UNSET
else:
passphrase = Secret.from_dict(_passphrase)
crypt_fs_config = cls(
passphrase=passphrase,
)
crypt_fs_config.additional_properties = d
return crypt_fs_config
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 29.238095 | 77 | 0.647123 |
6ec843c15f7247a66d7807de9b1b0eb37d884ff5 | 3,598 | py | Python | venvs/sitio_web/restaurantes/views.py | mmaguero/MII-SSBW16-17 | 25b6c340c63a2fbe8342b48ec7f730b68c58d1bc | [
"MIT"
] | 1 | 2017-04-22T11:02:38.000Z | 2017-04-22T11:02:38.000Z | venvs/sitio_web/restaurantes/views.py | mmaguero/MII-SSBW16-17 | 25b6c340c63a2fbe8342b48ec7f730b68c58d1bc | [
"MIT"
] | 4 | 2017-06-17T16:10:45.000Z | 2022-02-13T20:23:04.000Z | venvs/sitio_web/restaurantes/views.py | mmaguero/MII-SSBW16-17 | 25b6c340c63a2fbe8342b48ec7f730b68c58d1bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.shortcuts import render, HttpResponse,redirect
from django.http import JsonResponse
from .forms import RestaurantesForm
from .models import restaurants, addr#, image
from django.contrib.auth.decorators import login_required
import logging
log = logging.getLogger(__name__)
# Create your views here.
def index(request):
log.info("INDEX - Hey there it works!!")
context = {
'menu': 'index'
}
#return HttpResponse('My Restaurants Manager')
return render(request,'index.html',context)
def test(request):
valor = 3
context = {
'variable': valor,
'resta': restaurants.objects[:5],
} # Aqui van la las variables para la plantilla
return render(request,'test.html', context)
@login_required
def listar(request):
log.info("LIST - Hey there it works!!")
context = {
'resta': restaurants.objects[:10],
'menu': 'list'
} # Aqui van la las variables para la plantilla
return render(request,'listar.html', context)
@login_required
def buscar(request):
log.info("SEARCH - Hey there it works!!")
cocina = request.GET.get('cocina')
lista=restaurants.objects(cuisine__icontains=cocina)
context = {
'resta': lista,
}
return render(request,'listar.html', context)
@login_required
def add(request):
log.info("ADD - Hey there it works!!")
formu = RestaurantesForm()
if request.method == "POST":
formu = RestaurantesForm(request.POST, request.FILES)
if formu.is_valid(): # valida o anhade errores
# datos sueltos
nombre = formu.cleaned_data['nombre']
cocina = formu.cleaned_data['cocina']
barrio = formu.cleaned_data['barrio']
calle = formu.cleaned_data['direccion']
imagen = request.FILES['imagen'] #formu.cleaned_data['imagen']
#tipo_foto = imagen.content_type
# tipo y nombre
direc = addr(street=calle)
#i = image(extension=tipo_foto, img=imagen)
r = restaurants(name=nombre, cuisine=cocina, borough=barrio, address=direc , image=imagen)
r.save()
# formu.save() # si está ligado al model
return redirect(index)
# GET o error
context = {
'form': formu,
'menu': 'add',
}
return render(request, 'form.html', context)
# @login_required
# def update(request):
# log.info("UPD - Hey there it works!!")
# name = request.GET.get('name')
# obj=restaurants.objects(name=name)
# context = {
# 'resta': obj,
# }
# return render(request,'formUpdate.html', context)
# url
@login_required
def restaurant(request, name):
log.info("DETAIL - Hey there it works!!")
resta=restaurants.objects(name=name)[0]
context = {
'resta': resta
}
return render(request, 'detalle.html', context)
# recuperar foto
@login_required
def imagen(request, name):
log.info("IMAGE - Hey there it works!!")
res = restaurants.objects(name=name)[0]
img = res.image.read()
return HttpResponse(img, content_type="image/" + res.image.format)
def r_ajax(request, name):
log.info("AJAX - Hey there it works!!")
resta = restaurants.objects(name=name)[0]
maps = '<iframe width="450" height="300" frameborder="0" style="border:0" src="https://maps.google.com/maps?q='+str(name) + ' ' + str(resta.address.street) + ' ' + str(resta.borough)+'&ie=UTF8&&output=embed" allowfullscreen></iframe>'
return JsonResponse({'map':maps}) # podría ser string o HTML
| 30.491525 | 248 | 0.636465 |
6ec8b33e8b06b3f5e838904586efee4b116849d8 | 3,705 | py | Python | networking-master/CodeBlue.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | 8 | 2019-02-17T20:11:46.000Z | 2019-10-18T06:27:16.000Z | networking-master/CodeBlue.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | null | null | null | networking-master/CodeBlue.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | 4 | 2019-02-17T23:00:18.000Z | 2019-10-18T06:27:14.000Z | import cb, time, struct, sys, random, string
try:
import console
console.set_color(0.0,0.2,1)
print """
_____ _ _____ _
| |___ _| |___| __ | |_ _ ___
| --| . | . | -_| __ -| | | | -_|
|_____|___|___|___|_____|_|___|___|
_____
_ __/ ___/
| | / / __ \
| |/ / /_/ /
|___/\____/
"""
console.set_color()
console.set_font()
except:
print """
_____ _ _____ _
| |___ _| |___| __ | |_ _ ___
| --| . | . | -_| __ -| | | | -_|
|_____|___|___|___|_____|_|___|___|
_____
_ __/ ___/
| | / / __ \
| |/ / /_/ /
|___/\____/
"""
try:
import printbyte
except:
class printbyte (object):
def byte_pbyte(data):
if len(str(data)) > 1:
msg = list(data)
s = 0
for u in msg:
u = str(u).encode("hex")
u = "\\x"+u
msg[s] = u
s = s + 1
msg = "".join(msg)
else:
msg = data
msg = str(msg).encode("hex")
msg = "\\x"+msg
return msg
shell = False
verbose = True
devices = []
responses = []
sim_names = False
blacklist = "none", "None", "unknown", "", "Unknon"
def ani_load(msg,amt=5,tm=0.1,rng=(1,3)):
for t in range(random.randint(rng[0],rng[1])):
for _ in range(1,amt):
sys.stdout.write("\r"+msg+"."*_+" ")
time.sleep(tm)
print
class BlueBorne (object):
def did_update_state(self):
pass
def did_discover_peripheral(self, p):
if "" in p.uuid and str(p.name) not in blacklist and p.name not in devices:
print "\n"+"="*36+"\n"
if verbose:
print "[+] Discovered " + str(p.name)
self.peripheral = p
cb.connect_peripheral(p)
def did_disconnect_peripheral(self,p,error):
try:
print "[-] %s Disconnected" %(p.name)
self.peripheral.cancel_peripheral_connection(p)
except:
pass
def did_connect_peripheral(self, p):
print "[+] Connected " + p.name
p.discover_services()
def did_discover_services(self, p, error):
if not sim_names:
devices.append(p.name)
responses = []
print
for s in p.services:
if "" in s.uuid:
if verbose:
print "[+] Service " + s.uuid
p.discover_characteristics(s)
print
def did_discover_characteristics(self, s, error):
for c in s.characteristics:
if "" in c.uuid:
if verbose:
print "[+] Characteristic " + c.uuid
if shell:
ani_load("[+] Generating Payload")
if shell:
self.peripheral.write_characteristic_value(c,shell,True)
print "Payload -> %s//%s" %(self.peripheral.name[:6], c.uuid[:6])
try:
self.peripheral.read_characteristic_value(c)
self.peripheral.set_notify_value(c, True)
except Exception as e:
pass
def did_update_value(self, c, error):
if c.uuid not in responses:
print "[*] Checking Response For %s" %c.uuid[:6]
try:
if len(c.value) == 10 and "\x70" in c.value:
ten = False
resp = str(c.value)
if str(c.uuid) == "2A24":
resp = str(c.value)
elif len(c.value) == 1:
resp = eval(printbyte.byte_pbyte(c.value).replace("\\x","0x"))
elif ten:
resp = printbyte.byte_pbyte(c.value)
print "[%]",resp
print
except Exception as e:
try:
print "[-] No Response"
print
except:
pass
pass
responses.append(c.uuid)
def did_write_value(self,c,error):
try:
print "[+] Payload Finished %s\n[=] Scanning Info On %s" %(c.uuid[:6],self.peripheral.name[:6])
except:
pass
cb.set_central_delegate(BlueBorne())
ani_load("[*] Scanning For Devices",5,0.15,(6,8))
cb.scan_for_peripherals()
try:
while True: time.sleep(0.1)
except KeyboardInterrupt:
cb.reset()
cb.stop_scan()
| 23.301887 | 98 | 0.57139 |
6ecb10199a77a73ed62e4b61289eba8a812c02c6 | 342 | py | Python | products/urls.py | Tsatsubii/tsatsubii-helpdesk | baee05b4fd1aedfda8e4039c45f182f29e8db348 | [
"MIT"
] | null | null | null | products/urls.py | Tsatsubii/tsatsubii-helpdesk | baee05b4fd1aedfda8e4039c45f182f29e8db348 | [
"MIT"
] | null | null | null | products/urls.py | Tsatsubii/tsatsubii-helpdesk | baee05b4fd1aedfda8e4039c45f182f29e8db348 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import *
app_name = 'products'
urlpatterns = [
path('create', CreateProduct.as_view(), name='create'),
path('view/<int:pk>', ProductDetail.as_view(), name='detail'),
path('list', ProductList.as_view(), name='list'),
path('<int:pk>/update', ProductUpdate.as_view(), name='update'),
]
| 28.5 | 68 | 0.666667 |
6ecb7c2943ca6eaec822d4f6b7669ca1cd39c2c1 | 13,004 | py | Python | src/panoramic/cli/errors.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-11-13T17:26:59.000Z | 2021-03-19T15:11:26.000Z | src/panoramic/cli/errors.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-10-28T10:22:35.000Z | 2021-01-27T17:33:58.000Z | src/panoramic/cli/errors.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 3 | 2021-01-26T07:58:03.000Z | 2021-03-11T13:28:34.000Z | import functools
import os
import signal
import sys
from abc import ABC
from enum import Enum
from pathlib import Path
from typing import Callable, ClassVar, List, Optional
from jsonschema.exceptions import ValidationError as JsonSchemaValidationError
from requests.exceptions import RequestException
from yaml.error import MarkedYAMLError
from panoramic.cli.paths import Paths
from panoramic.cli.print import echo_error
DIESEL_REQUEST_ID_HEADER = 'x-diesel-request-id'
class CliBaseException(Exception):
request_id: Optional[str] = None
def add_request_id(self, request_id: str):
self.request_id = request_id
return self
def extract_request_id(self, exc: RequestException):
headers = getattr(exc.response, 'headers', {})
return self.add_request_id(headers.get(DIESEL_REQUEST_ID_HEADER))
def __str__(self) -> str:
if self.request_id is not None:
return f'{super().__str__()} (RequestId: {self.request_id})'
return super().__str__()
def __repr__(self) -> str:
if self.request_id is not None:
return f'{super().__repr__()} (RequestId: {self.request_id})'
return super().__repr__()
class TimeoutException(CliBaseException):
"""Thrown when a remote operation times out."""
class IdentifierException(CliBaseException):
"""Error refreshing metadata."""
def __init__(self, source_name: str, table_name: str):
super().__init__(f'Identifiers could not be generated for table {table_name} in data connection {source_name}')
class JoinException(CliBaseException):
"""Error detecting joins in a dataset."""
def __init__(self, dataset_name: str):
super().__init__(f'Joins could not be detected for {dataset_name}')
class RefreshException(CliBaseException):
"""Error refreshing metadata."""
def __init__(self, source_name: str, table_name: str):
super().__init__(f'Metadata could not be refreshed for table {table_name} in data connection {source_name}')
class SourceNotFoundException(CliBaseException):
"""Thrown when a source cannot be found."""
def __init__(self, source_name: str):
super().__init__(f'Data connection {source_name} not found. Has it been connected?')
class DatasetNotFoundException(CliBaseException):
"""Thrown when a dataset cannot be found."""
def __init__(self, dataset_name: str):
super().__init__(f'Dataset {dataset_name} not found. Has it been created?')
class ScanException(CliBaseException):
"""Error scanning metadata."""
def __init__(self, source_name: str, table_filter: Optional[str]):
table_msg = f' {table_filter} ' if table_filter is not None else ' '
super().__init__(f'Metadata could not be scanned for table(s){table_msg}in data counnection: {source_name}')
class InvalidModelException(CliBaseException):
"""Invalid model submitted to remote."""
messages: List[str]
def __init__(self, error: RequestException):
try:
self.messages = [
error['msg'] for error in error.response.json()['error']['extra_data']['validation_errors']
]
except Exception:
self.messages = ['Invalid model submitted']
class InvalidDatasetException(CliBaseException):
"""Invalid model submitted to remote."""
messages: List[str]
def __init__(self, error: RequestException):
try:
self.messages = [
error['msg'] for error in error.response.json()['error']['extra_data']['validation_errors']
]
except Exception:
self.messages = ['Invalid dataset submitted']
class InvalidFieldException(CliBaseException):
"""Invalid field submitted to remote."""
messages: List[str]
def __init__(self, error: RequestException):
try:
self.messages = [
error['msg'] for error in error.response.json()['error']['extra_data']['validation_errors']
]
except Exception:
self.messages = ['Invalid field submitted']
class DatasetWriteException(CliBaseException):
"""Error writing dataset to remote state."""
def __init__(self, dataset_name: str):
super().__init__(f'Error writing dataset {dataset_name}')
class ModelWriteException(CliBaseException):
"""Error writing dataset to remote state."""
def __init__(self, dataset_name: str, model_name: str):
super().__init__(f'Error writing model {model_name} in dataset {dataset_name}')
class FieldWriteException(CliBaseException):
"""Error writing field to remote state."""
def __init__(self, dataset_name: Optional[str], field_name: str):
message = f'Error writing field {field_name}'
if dataset_name is not None:
message += f' in dataset {dataset_name}'
super().__init__(message)
class ValidationErrorSeverity(Enum):
WARNING = 'WARNING'
ERROR = 'ERROR'
class ValidationError(CliBaseException, ABC):
"""Abstract error raised during validation step."""
severity: ClassVar[ValidationErrorSeverity] = ValidationErrorSeverity.ERROR
class FileMissingError(ValidationError):
"""File that should exist didn't."""
def __init__(self, *, path: Path):
if path == Paths.context_file():
msg = f'Context file ({path.name}) not found in current working directory. Run pano init to create it.'
else:
# Should not happen => we only check above files exist explicitly
msg = f'File Missing - {path}'
super().__init__(msg)
def __eq__(self, o: object) -> bool:
if not isinstance(o, FileMissingError):
return False
return str(self) == str(o)
class DuplicateModelNameError(ValidationError):
"""Two local models use the same model name."""
def __init__(self, *, model_name: str, paths: List[Path]) -> None:
try:
paths = [path.relative_to(Path.cwd()) for path in paths]
except ValueError:
pass # Use relative path when possible
path_lines = ''.join(f'\n in {path}' for path in paths)
super().__init__(f'Multiple model files use model name {model_name}{path_lines}')
def __eq__(self, o: object) -> bool:
if not isinstance(o, DuplicateModelNameError):
return False
return str(self) == str(o)
class DuplicateFieldSlugError(ValidationError):
"""Two local models use the same model name."""
def __init__(self, *, field_slug: str, paths: List[Path]) -> None:
try:
paths = [path.relative_to(Path.cwd()) for path in paths]
except ValueError:
pass # Use relative path when possible
path_lines = ''.join(f'\n in {path}' for path in paths)
super().__init__(f'Multiple field files use slug {field_slug}{path_lines}')
def __eq__(self, o: object) -> bool:
if not isinstance(o, DuplicateFieldSlugError):
return False
return str(self) == str(o)
class InvalidYamlFile(ValidationError):
"""YAML syntax error."""
def __init__(self, *, path: Path, error: MarkedYAMLError):
try:
path = path.relative_to(Path.cwd())
except ValueError:
pass # Use relative path when possible
super().__init__(f'Invalid YAML file - {error.problem}\n on line {error.problem_mark.line}\n in {path}')
def __eq__(self, o: object) -> bool:
if not isinstance(o, InvalidYamlFile):
return False
return str(self) == str(o)
class DeprecatedAttributeWarning(ValidationError):
severity = ValidationErrorSeverity.WARNING
def __init__(self, *, attribute: str, path: Path):
try:
path = path.relative_to(Path.cwd())
except ValueError:
pass # Use relative path when possible
super().__init__(f'Deprecated attribute "{attribute}" \n in {path}')
def __eq__(self, o: object) -> bool:
if not isinstance(o, DeprecatedAttributeWarning):
return False
return str(self) == str(o)
class DeprecatedConfigProperty(ValidationError):
severity = ValidationErrorSeverity.WARNING
def __init__(self, property_: str, deprecation_message: Optional[str] = None):
if deprecation_message is None:
deprecation_message = "Property is deprecated"
super().__init__(f"'{property_}': {deprecation_message}")
def __eq__(self, o: object) -> bool:
if not isinstance(o, DeprecatedConfigProperty):
return False
return str(self) == str(o)
class JsonSchemaError(ValidationError):
def __init__(self, *, path: Path, error: JsonSchemaValidationError):
try:
path = path.relative_to(Path.cwd())
except ValueError:
pass # Use relative path when possible
error_path = '.'.join(str(p) for p in error.path)
super().__init__(f'{error.message}\n for path {error_path}\n in {path}')
def __eq__(self, o: object) -> bool:
if not isinstance(o, JsonSchemaError):
return False
return str(self) == str(o)
class OrphanFieldFileError(ValidationError):
severity = ValidationErrorSeverity.WARNING
field_slug: str
dataset_slug: str
def __init__(
self,
*,
field_slug: str,
dataset_slug: str,
) -> None:
self.field_slug = field_slug
self.dataset_slug = dataset_slug
super().__init__(f'Field {field_slug} under dataset {dataset_slug} not used by any model')
def __eq__(self, o: object) -> bool:
if not isinstance(o, OrphanFieldFileError):
return False
return str(self) == str(o)
class MissingFieldFileError(ValidationError):
field_slug: str
dataset_slug: str
data_reference: str
identifier: bool
model_name: str
def __init__(
self,
*,
field_slug: str,
dataset_slug: str,
data_reference: str,
identifier: bool,
model_name: str,
) -> None:
self.field_slug = field_slug
self.dataset_slug = dataset_slug
self.data_reference = data_reference
self.identifier = identifier
self.model_name = model_name
super().__init__(f'Missing field file for slug {field_slug} under dataset {dataset_slug}')
def __eq__(self, o: object) -> bool:
if not isinstance(o, MissingFieldFileError):
return False
return str(self) == str(o)
def handle_exception(f: Callable):
"""Print exception and exit with error code."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
echo_error('Internal error occurred', exc_info=True)
sys.exit(1)
return wrapped
def handle_interrupt(f: Callable):
"""Exit app on keyboard interrupt."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except KeyboardInterrupt:
os._exit(128 + signal.SIGINT)
return wrapped
class ConnectionNotFound(CliBaseException):
"""Connection not found in config."""
def __init__(self):
super().__init__('Connection was set up. Run: pano connection setup -h')
class ConnectionUrlNotAvailableFound(CliBaseException):
"""Connection not found in config."""
def __init__(self):
super().__init__('Connection has no url stored. Please call: pano connection setup --url <url>')
class ConnectionCreateException(CliBaseException):
"""Failed to create connection due to error."""
def __init__(self, error_message: str):
super().__init__(f'Failed to create connection: {error_message}.')
class ConnectionUpdateException(CliBaseException):
"""Failed to update connection due to error."""
def __init__(self, error_message: str):
super().__init__(f'Failed to update connection: {error_message}.')
class ConnectionFormatException(CliBaseException):
"""Failed to update connection due to error."""
def __init__(self, credential_error: str):
super().__init__(f'Invalid credentials format FAIL: {credential_error}')
class TransformCompileException(CliBaseException):
"""Failed to compile a Transform due to an error."""
def __init__(self, transform_name: str):
super().__init__(f'Error compiling transform {transform_name}')
class TransformExecutionFailed(Exception):
"""Failed to execute a transform on the remote connection"""
compiled_sql: str
def __init__(self, transform_name: str, compiled_sql: str):
self.compiled_sql = compiled_sql
super().__init__(f'Error executing transform {transform_name}')
class ExecuteInvalidArgumentsException(CliBaseException):
"""Failed to compile a execute due to invalid arguments."""
def __init__(self, message: str):
super().__init__(message)
| 30.383178 | 119 | 0.661181 |
6ecd09c39c02e164cec80cd3178067eb0c5adcdc | 336 | py | Python | server/src/__init__.py | dmitrijbozhkov/zno-library | 888ccde7163f8ac6746e4065f47d5070eadff5a7 | [
"Apache-2.0"
] | null | null | null | server/src/__init__.py | dmitrijbozhkov/zno-library | 888ccde7163f8ac6746e4065f47d5070eadff5a7 | [
"Apache-2.0"
] | null | null | null | server/src/__init__.py | dmitrijbozhkov/zno-library | 888ccde7163f8ac6746e4065f47d5070eadff5a7 | [
"Apache-2.0"
] | null | null | null | """ Builds and runs application """
from app import app, user_datastore, db
from api_module.api_routes import api
from auth_module.auth_routes import auth
app.register_blueprint(api)
app.register_blueprint(auth)
if __name__ == "__main__":
# database = create_db(connection_str)
# attach_db(g, database)
app.run(port=3000)
| 25.846154 | 42 | 0.758929 |
6ecdc7cb0a885b814b6a6f30cd78f9066a128b3b | 381 | py | Python | flask_miracle/__init__.py | tdpsk/flask-miracle-acl | 426a9845854678d00108cf5f91ada9323968b524 | [
"BSD-2-Clause"
] | 2 | 2018-01-17T15:57:38.000Z | 2018-02-06T00:03:16.000Z | flask_miracle/__init__.py | tdpsk/flask-miracle-acl | 426a9845854678d00108cf5f91ada9323968b524 | [
"BSD-2-Clause"
] | null | null | null | flask_miracle/__init__.py | tdpsk/flask-miracle-acl | 426a9845854678d00108cf5f91ada9323968b524 | [
"BSD-2-Clause"
] | null | null | null | '''
flask_miracle
-------------
This module provides a fabric layer between the Flask framework and the
Miracle ACL library.
:copyright: (c) 2017 by Timo Puschkasch.
:license: BSD, see LICENSE for more details.
'''
from .base import Acl
from .functions import check_all, check_any, set_current_roles
from .decorators import macl_check_any, macl_check_all
| 27.214286 | 75 | 0.716535 |
6ecea48303630a92cce0c4706954968c1cd05d0f | 134 | py | Python | sopy/tags/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 81 | 2015-02-17T17:07:27.000Z | 2021-08-15T17:46:13.000Z | sopy/tags/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 81 | 2015-02-17T17:04:16.000Z | 2021-02-21T03:52:55.000Z | sopy/tags/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T18:28:06.000Z | 2022-02-05T03:11:04.000Z | from flask import Blueprint
bp = Blueprint('tags', __name__)
@bp.record_once
def register(state):
from sopy.tags import models
| 14.888889 | 32 | 0.746269 |
6eceaf402774f48fd5e753b9693ff245ec9cc7ae | 152 | py | Python | {{cookiecutter.project_slug}}/backend/api/v1/handlers/__init__.py | devalv/cookiecutter-fastapi | c7cfd3caa14b40dcc5d8ff6bdb6e25cfed3c9d00 | [
"MIT"
] | 2 | 2021-12-26T00:10:19.000Z | 2022-01-30T21:24:31.000Z | {{cookiecutter.project_slug}}/backend/api/v1/handlers/__init__.py | devalv/cookiecutter-fastapi | c7cfd3caa14b40dcc5d8ff6bdb6e25cfed3c9d00 | [
"MIT"
] | 1 | 2021-10-10T17:38:30.000Z | 2021-10-10T18:30:24.000Z | {{cookiecutter.project_slug}}/backend/api/v1/handlers/__init__.py | devalv/cookiecutter-fastapi | c7cfd3caa14b40dcc5d8ff6bdb6e25cfed3c9d00 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""{{ cookiecutter.project_slug }} rest-api handlers."""
from .security import security_router
__all__ = ("security_router",)
| 21.714286 | 56 | 0.684211 |
6eceb56b5b2b3973a3c6557e579442305e42471b | 3,374 | py | Python | asyncspotify/album.py | minibox24/asyncspotify | 3767cf19cf598fb179883cffd878e2440c16a57c | [
"MIT"
] | 7 | 2020-06-16T21:24:42.000Z | 2022-03-10T20:23:29.000Z | asyncspotify/album.py | minibox24/asyncspotify | 3767cf19cf598fb179883cffd878e2440c16a57c | [
"MIT"
] | 13 | 2020-03-22T12:07:04.000Z | 2021-08-15T19:06:57.000Z | asyncspotify/album.py | minibox24/asyncspotify | 3767cf19cf598fb179883cffd878e2440c16a57c | [
"MIT"
] | 5 | 2020-03-22T18:21:55.000Z | 2021-10-03T06:30:30.000Z | from datetime import datetime
from .mixins import ArtistMixin, ExternalIDMixin, ExternalURLMixin, ImageMixin, TrackMixin
from .object import SpotifyObject
from .track import SimpleTrack
class _BaseAlbum(SpotifyObject, TrackMixin, ImageMixin, ExternalURLMixin, ArtistMixin):
_type = 'album'
_track_class = SimpleTrack
__date_fmt = dict(year='%Y', month='%Y-%m', day='%Y-%m-%d')
def __init__(self, client, data):
super().__init__(client, data)
TrackMixin.__init__(self, data)
ImageMixin.__init__(self, data)
ExternalURLMixin.__init__(self, data)
ArtistMixin.__init__(self, data)
self.album_group = data.pop('album_group', None) # can be None, though this is not specified in the API docs
self.album_type = data.pop('album_type')
self.available_markets = data.pop('available_markets', None)
self.release_date_precision = data.pop('release_date_precision')
if self.release_date_precision is None:
self.release_date = None
else:
try:
self.release_date = datetime.strptime(
data.pop('release_date'),
self.__date_fmt[self.release_date_precision]
)
except ValueError:
self.release_date = None
class SimpleAlbum(_BaseAlbum):
'''
Represents an Album object.
.. note::
To iterate all tracks, you have to use the ``async for`` construct or fill the object with ``.fill()`` before iterating ``.tracks``.
id: str
Spotify ID of the album.
name: str
Name of the album.
tracks: List[:class:`Track`]
List of tracks on the album.
artists: List[:class:`Artist`]
List of artists that appear on the album.
images: List[:class:`Image`]
List of associated images, such as album cover in different sizes.
track_count: int
The expected track count as advertised by the last paging object. ``is_filled()`` can return True even if fewer tracks than this exists in ``tracks``, since some fetched tracks from the API can be None for various reasons.
uri: str
Spotify URI of the album.
link: str
Spotify URL of the album.
type: str
Plaintext string of object type: ``album``.
album_type:
Type of album, e.g. ``album``, ``single`` or ``compilation``.
available_markets: List[str] or None
Markets where the album is available: ISO-3166-1_.
external_urls: dict
Dictionary that maps type to url.
release_date: `datetime <https://docs.python.org/3/library/datetime.html#module-datetime>`_
Date (and maybe time) of album release.
release_date_precision: str
Precision of ``release_date``. Can be ``year``, ``month``, or ``day``.
album_group: str or None
Type of album, e.g. ``album``, ``single``, ``compilation`` or ``appears_on``.
'''
class FullAlbum(_BaseAlbum, ExternalIDMixin):
'''
Represents a complete Album object.
This type has some additional attributes not existent in :class:`SimpleAlbum`.
genres: List[str]
List of genres associated with the album.
label: str
The label for the album.
popularity: int
An indicator of the popularity of the album, 0 being least popular and 100 being the most.
copyrights: dict
List of copyright objects.
external_ids: dict
Dictionary of external IDs.
'''
def __init__(self, client, data):
super().__init__(client, data)
ExternalIDMixin.__init__(self, data)
self.genres = data.pop('genres')
self.label = data.pop('label')
self.popularity = data.pop('popularity')
self.copyrights = data.pop('copyrights')
| 31.830189 | 224 | 0.727623 |
6ecf4bd8dbec5f43c3a5dbb66ff367208ec1e14c | 73 | py | Python | virustotal_intelligence/__init__.py | elastic/opencti-connector-vti | 52bd6e8c40a8b96f34316b87d4550f308844abbe | [
"Apache-2.0"
] | 1 | 2022-02-11T13:36:11.000Z | 2022-02-11T13:36:11.000Z | virustotal_intelligence/__init__.py | elastic/opencti-connector-vti | 52bd6e8c40a8b96f34316b87d4550f308844abbe | [
"Apache-2.0"
] | null | null | null | virustotal_intelligence/__init__.py | elastic/opencti-connector-vti | 52bd6e8c40a8b96f34316b87d4550f308844abbe | [
"Apache-2.0"
] | null | null | null | __version__ = "5.1.3"
LOGGER_NAME = "connector.virustotal_intelligence"
| 18.25 | 49 | 0.780822 |
6ed12b3edc7505ed891b2d8f3913b9e4dec71522 | 152 | py | Python | training/config_interface/__init__.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | 5 | 2021-05-05T06:08:52.000Z | 2022-03-24T04:57:52.000Z | training/config_interface/__init__.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | null | null | null | training/config_interface/__init__.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | 2 | 2021-08-07T05:18:05.000Z | 2022-03-31T03:48:37.000Z | from training.config_interface.BaseTrainingProcess import BaseTrainingProcess
from training.config_interface.BaseTrainingEpoch import BaseTrainingEpoch
| 50.666667 | 77 | 0.921053 |
6ed14769a9b989c5617ba6fd0d9b5db6ab25f970 | 2,948 | py | Python | tools_box/_hr/doctype/stationaries_log/stationaries_log.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | 4 | 2017-09-25T23:34:08.000Z | 2020-07-17T23:52:26.000Z | tools_box/_hr/doctype/stationaries_log/stationaries_log.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | null | null | null | tools_box/_hr/doctype/stationaries_log/stationaries_log.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | 5 | 2017-06-02T01:58:32.000Z | 2022-02-22T16:59:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017, masonarmani38@gmail.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class StationariesLog(Document):
def on_submit(self):
for item in self.items_issued:
_create_bin_card(item, self)
def _create_bin_card(item, doc):
import datetime
last_value = _last_bin_card_value(item)
if last_value[0] < 0:
frappe.throw("No more ")
new_bin_card = frappe.new_doc("Stationaries Bin Card")
new_bin_card.date = datetime.datetime.today()
new_bin_card.item = item.item_issued
new_bin_card.value = item.pqty
new_bin_card.current_value = last_value[0] + item.pqty
new_bin_card.last_value = last_value[2]
new_bin_card.reference_doctype = doc.doctype
new_bin_card.reference_docname = doc.name
new_bin_card.ppu = last_value[1]
new_bin_card.count = last_value[0] + item.pqty
less = new_bin_card.count
if less >= new_bin_card.ppu: # count and current has to change
unit = int(less / new_bin_card.ppu)
if (less / new_bin_card.ppu) > 1:
new_count = less % new_bin_card.ppu
else:
new_count = less - new_bin_card.ppu
# set new values
new_bin_card.count = new_count
new_bin_card.current_value = new_count
# set item values
item.qty = unit
item.ppu = new_bin_card.ppu
# remove value from stock
_remove_unit(item)
new_bin_card.submit()
def _remove_unit(item):
wh = "Stationaries - GCL"
se = frappe.new_doc("Stock Entry")
se.purpose = "Material Issue"
se.title = "Material Issue"
se.from_warehouse = wh
# using the latest cost center for item
last_cost_center = frappe.get_list(doctype="Stock Entry Detail",
filters={"item_code": item.item_issued}, fields=['cost_center'],
order_by='creation')
d_cost_center = ""
if last_cost_center[0].get('cost_center') != None:
d_cost_center = last_cost_center[0].cost_center
it = frappe.get_list(doctype="Item", filters={"name": item.item_issued},
fields=['stock_uom, item_name'])
# set new item
item = dict(
f_warehouse=wh,
t_warehouse="",
qty=item.qty,
item_code=item.item_issued,
item_name=it[0].item_name,
uom=it[0].stock_uom,
cost_center=d_cost_center
)
se.append('items', item)
se.submit()
def _last_bin_card_value(item):
last_value = frappe.db.sql("SELECT `count`, ppu, current_value FROM `tabStationaries Bin Card` where item = '{item}' "
"ORDER BY date DESC LIMIT 1".format(item=item.item_issued))
if len(last_value):
return last_value[0]
return [0, item.ppu, 0]
| 30.708333 | 123 | 0.640095 |
6ed16212d719203dc9c8b385ee044edff5accf55 | 205 | py | Python | html/semantics/scripting-1/the-script-element/module/resources/delayed-modulescript.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 8 | 2019-04-09T21:13:05.000Z | 2021-11-23T17:25:18.000Z | html/semantics/scripting-1/the-script-element/module/resources/delayed-modulescript.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 21 | 2021-03-31T19:48:22.000Z | 2022-03-12T00:24:53.000Z | html/semantics/scripting-1/the-script-element/module/resources/delayed-modulescript.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 11 | 2019-04-12T01:20:16.000Z | 2021-11-23T17:25:02.000Z | import time
def main(request, response):
delay = float(request.GET.first("ms", 500))
time.sleep(delay / 1E3);
return [("Content-type", "text/javascript")], "export let delayedLoaded = true;"
| 25.625 | 84 | 0.663415 |
6ed320ed0b2ababd92ec43fa0249838c7f41091f | 1,119 | py | Python | build_scripts/yariv_shaders/yariv_to_hex.py | danilw/vulkan-shadertoy-launcher | 8a8a00f2f32d5c4dc64b625a9bdfe4adcdc5c4ec | [
"MIT"
] | 37 | 2020-03-16T00:21:03.000Z | 2022-03-04T23:30:30.000Z | build_scripts/yariv_shaders/yariv_to_hex.py | danilw/vulkan-shadertoy-launcher | 8a8a00f2f32d5c4dc64b625a9bdfe4adcdc5c4ec | [
"MIT"
] | 1 | 2020-06-04T12:29:24.000Z | 2021-03-14T21:31:55.000Z | example_game/shaders/yariv_to_hex.py | danilw/vulkan_shader_launcher | e41c5a9c0f35a72e12a5300f194e9faff83aa684 | [
"MIT"
] | 2 | 2021-03-27T06:28:53.000Z | 2021-09-05T20:29:36.000Z | import struct
import os
import sys
import subprocess
if len(sys.argv) != 2:
print('Usage: python %s filename \n output is *.spv *.yariv and *.hex file \n' % sys.argv[0])
quit()
inputfilepath = sys.argv[1]
outputname = os.path.basename(inputfilepath)
outdir = os.path.dirname(inputfilepath)
ginfile = os.path.basename(inputfilepath)
ooutdir = os.path.join(outdir,"bin");
spirvcompiler = 'glslangValidator'
if os.name == 'nt':
spirvcompiler += ".exe"
yariv_pack = './yariv_pack'
if os.name == 'nt':
spirvcompiler += ".exe"
if not os.path.isdir(ooutdir):
os.mkdir(ooutdir, 0o0755 );
subprocess.call([spirvcompiler,'-V100',inputfilepath,'-o',os.path.join(ooutdir,ginfile) + '.spv'])
subprocess.call([yariv_pack,os.path.join(ooutdir,ginfile) + '.spv'])
infile = open(os.path.join(ooutdir,ginfile) + '.yariv', 'rb')
outfilepath = os.path.join(ooutdir,outputname + '.hex')
outfile = open(outfilepath, 'w')
lineno = 1
while 1 :
b = infile.read(1)
if len(b) == 0 :
break
d, = struct.unpack('B', b)
outfile.write(hex(d) + ',')
if lineno % 20 == 0:
outfile.write('\n')
lineno = lineno + 1
| 23.808511 | 98 | 0.670241 |
6ed529dee684fd60fac1d8d89d7b4a98c0265b6b | 3,672 | py | Python | gui/cli.py | HaoZeke/prest | eec6b34bde4e060f52a391662347918995ded245 | [
"BSD-3-Clause"
] | null | null | null | gui/cli.py | HaoZeke/prest | eec6b34bde4e060f52a391662347918995ded245 | [
"BSD-3-Clause"
] | null | null | null | gui/cli.py | HaoZeke/prest | eec6b34bde4e060f52a391662347918995ded245 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
import logging
import argparse
import tqdm
import dataset.budgetary
from model import *
from test import MockWorker
from dataset import load_raw_csv
from gui.estimation import Options as EstimationOpts
from dataset.experimental_data import ExperimentalData
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class ProgressWorker:
def __init__(self):
self.bar = None
self.size = None
self.last_value = 0
def set_work_size(self, size : int) -> None:
self.size = size
self.bar = tqdm.tqdm(total=size)
def set_progress(self, value : int) -> None:
self.bar.update(value - self.last_value)
self.last_value = value
def budgetary_consistency(args):
ds = dataset.budgetary.load_from_csv(args.fname_in)
dsc = ds.analysis_consistency(ProgressWorker(), None)
variant = dsc._get_export_variant(args.export_variant)
dsc.export(args.fname_out, '*.csv', variant, ProgressWorker())
def estimate(args):
rows = load_raw_csv(args.fname_in)
ds = ExperimentalData.from_csv('dataset', rows[1:], (0, 1, None, 2))
AVAILABLE_MODELS = [
preorder(strict=True, total=True),
preorder(strict=False, total=True),
unattractive(strict=True, total=True),
unattractive(strict=False, total=True),
preorder(strict=True, total=False),
preorder(strict=False, total=False),
UndominatedChoice(strict=True),
UndominatedChoice(strict=False),
PartiallyDominantChoice(fc=True),
PartiallyDominantChoice(fc=False),
Overload(PreorderParams(strict=True, total=True)),
Overload(PreorderParams(strict=False, total=True)),
StatusQuoUndominatedChoice(),
TopTwo(),
SequentiallyRationalizableChoice(),
]
if not args.models:
print('Please specify a model using -m:')
for m in AVAILABLE_MODELS:
print(' ' + str(m))
sys.exit(1)
if args.models == 'all':
models = AVAILABLE_MODELS
else:
models = [
m
for m in AVAILABLE_MODELS
if str(m) in args.models
]
dsm = ds.analysis_estimation(ProgressWorker(), EstimationOpts(
models=models,
disable_parallelism=args.sequential,
))
variant = dsm._get_export_variant(args.export_variant)
dsm.export(args.fname_out, '*.csv', variant, MockWorker())
def main(args):
if args.action == 'estimate':
estimate(args)
elif args.action == 'budgetary':
budgetary_consistency(args)
else:
raise Exception(f'unknown action: {args.action}')
if __name__ == '__main__':
ap = argparse.ArgumentParser()
sub = ap.add_subparsers(dest='action', help='subcommands')
sub.required = True
apE = sub.add_parser('estimate', help='model estimation')
apE.add_argument('fname_in', metavar='input.csv')
apE.add_argument('fname_out', metavar='output.csv')
apE.add_argument('-e', dest='export_variant',
default='compact (human-friendly)',
help='export variant [%(default)s]',
)
apE.add_argument('-s', '--sequential', default=False, action='store_true', help='disable paralellism')
apE.add_argument('-m', dest='models', metavar='MODEL', nargs='+', help='model(s)')
apB = sub.add_parser('budgetary', help='budgetary consistency')
apB.add_argument('fname_in', metavar='input.csv')
apB.add_argument('fname_out', metavar='output.csv')
apB.add_argument('-e', dest='export_variant',
default='Summary',
help='export variant [%(default)s]',
)
main(ap.parse_args())
| 31.655172 | 106 | 0.659586 |
6ed7f888ecc9bba08e6a0dcd86d63bb68f3e4ae3 | 12,156 | py | Python | KML.py | ncareol/PlanFlight | c38b3e1a99f52655cae9e1b4f4c2ee06e56833eb | [
"BSD-3-Clause"
] | 1 | 2021-06-16T01:10:35.000Z | 2021-06-16T01:10:35.000Z | KML.py | NCAR/PlanFlight | c38b3e1a99f52655cae9e1b4f4c2ee06e56833eb | [
"BSD-3-Clause"
] | null | null | null | KML.py | NCAR/PlanFlight | c38b3e1a99f52655cae9e1b4f4c2ee06e56833eb | [
"BSD-3-Clause"
] | null | null | null | # file KML.py
#
"Produces a kml file from the track as defined in ModuleConstructor.Track."
# Strategy here is to produce two .kml files, one that references
# google.com and one that references acserver.raf.ucar.edu, the latter
# for use on the aircraft to avoid remote connections to google.com
# in flight. The latter is named PlanAC.kml, the former Plan.kml.
#
# This is awkward code that writes many things repeatedly where I'm sure
# there is an efficient way to do this. Someday should clean this up --
# but it works, so leave it for now. It was copied from a Google-Earth-
# constructed representation of the track, so I'm just taking all the
# kml that was in that file and duplicating it without understanding what
# I'm doing...
import Specs
WaypointNumber = 0
KMLFileName = 'Plan.kml'
lonx = Specs.TakeoffLocation()[0]
latx = Specs.TakeoffLocation()[1]
galtx = Specs.TakeoffLocation()[2]
# header info for .kml file
def KMLHeader(KMLFileName):
"Opens the file and writes the required header."
# XXXX fix this
global WaypointNumber # changed here so needs to be global
KMLACFileName = KMLFileName.replace ('Plan', 'PlanAC')
print 'kml file name: ', KMLFileName, ', new name is: ', KMLACFileName
KMLFile = open(KMLFileName,'w')
KMLACFile = open(KMLACFileName,'w')
KMLFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
KMLFile.write("<kml xmlns=\"http://earth.google.com/kml/2.2\">\n")
KMLFile.write("<Document>\n")
# might need to replace .kml with .kmz here?
KMLFile.write("\t <name>"+KMLFileName+"</name>\n")
KMLFile.write("\t<StyleMap id=\"msn_triangle_copy1\">\n")
KMLFile.write("\t\t<Pair>\n")
KMLFile.write("\t\t\t<key>normal</key>\n")
KMLFile.write("\t\t\t<styleUrl>#sn_triangle_copy1"\
+ "</styleUrl>\n")
KMLFile.write("\t\t</Pair>\n")
KMLFile.write("\t\t<Pair>\n")
KMLFile.write("\t\t\t<key>highlight</key>\n")
KMLFile.write("\t\t\t<styleUrl>#sh_triangle_copy1"\
+"</styleUrl>\n")
KMLFile.write("\t\t</Pair>\n")
KMLFile.write("\t</StyleMap>\n")
KMLFile.write("\t <Style id=\"sh_triangle_copy1\">\n")
KMLFile.write("\t\t <IconStyle>\n")
KMLFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLFile.write("\t\t\t <scale>0.8</scale>\n")
KMLFile.write("\t\t\t <Icon>\n")
# KMLFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n")
KMLFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/placemark_square.png</href>\n")
KMLFile.write("\t\t\t </Icon>\n")
KMLFile.write("\t\t </IconStyle>\n")
KMLFile.write("\t\t <LabelStyle>\n")
KMLFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLFile.write("\t\t </LabelStyle>\n")
KMLFile.write("\t\t <LineStyle>\n")
KMLFile.write("\t\t\t <color>ff00aaff</color>\n")
KMLFile.write("\t\t\t <width>2</width>\n")
KMLFile.write("\t\t </LineStyle>\n")
KMLFile.write("\t\t <ListStyle>\n")
KMLFile.write("\t\t </ListStyle>\n")
KMLFile.write("\t </Style>\n")
KMLFile.write("\t <Style id=\"sn_triangle_copy1\">\n")
KMLFile.write("\t\t <IconStyle>\n")
KMLFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLFile.write("\t\t\t <scale>0.8</scale>\n")
KMLFile.write("\t\t\t <Icon>\n")
# KMLFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n")
KMLFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/placemark_square.png</href>\n")
KMLFile.write("\t\t\t </Icon>\n")
KMLFile.write("\t\t </IconStyle>\n")
KMLFile.write("\t\t <LabelStyle>\n")
KMLFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLFile.write("\t\t </LabelStyle>\n")
KMLFile.write("\t\t <LineStyle>\n")
KMLFile.write("\t\t\t <color>ff00aaff</color>\n")
KMLFile.write("\t\t\t <width>2</width>\n")
KMLFile.write("\t\t </LineStyle>\n")
KMLFile.write("\t\t <ListStyle>\n")
KMLFile.write("\t\t </ListStyle>\n")
KMLFile.write("\t </Style>\n")
KMLACFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
KMLACFile.write("<kml xmlns=\"http://earth.google.com/kml/2.2\">\n")
KMLACFile.write("<Document>\n")
# might need to replace .kml with .kmz here?
KMLACFile.write("\t <name>"+KMLACFileName+"</name>\n")
KMLACFile.write("\t<StyleMap id=\"msn_triangle_copy1\">\n")
KMLACFile.write("\t\t<Pair>\n")
KMLACFile.write("\t\t\t<key>normal</key>\n")
KMLACFile.write("\t\t\t<styleUrl>#sn_triangle_copy1"\
+ "</styleUrl>\n")
KMLACFile.write("\t\t</Pair>\n")
KMLACFile.write("\t\t<Pair>\n")
KMLACFile.write("\t\t\t<key>highlight</key>\n")
KMLACFile.write("\t\t\t<styleUrl>#sh_triangle_copy1"\
+"</styleUrl>\n")
KMLACFile.write("\t\t</Pair>\n")
KMLACFile.write("\t</StyleMap>\n")
KMLACFile.write("\t <Style id=\"sh_triangle_copy1\">\n")
KMLACFile.write("\t\t <IconStyle>\n")
KMLACFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLACFile.write("\t\t\t <scale>0.8</scale>\n")
KMLACFile.write("\t\t\t <Icon>\n")
# KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n")
KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/placemark_square.png</href>\n")
KMLACFile.write("\t\t\t </Icon>\n")
KMLACFile.write("\t\t </IconStyle>\n")
KMLACFile.write("\t\t <LabelStyle>\n")
KMLACFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLACFile.write("\t\t </LabelStyle>\n")
KMLACFile.write("\t\t <LineStyle>\n")
KMLACFile.write("\t\t\t <color>ff00aaff</color>\n")
KMLACFile.write("\t\t\t <width>2</width>\n")
KMLACFile.write("\t\t </LineStyle>\n")
KMLACFile.write("\t\t <ListStyle>\n")
KMLACFile.write("\t\t </ListStyle>\n")
KMLACFile.write("\t </Style>\n")
KMLACFile.write("\t <Style id=\"sn_triangle_copy1\">\n")
KMLACFile.write("\t\t <IconStyle>\n")
KMLACFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLACFile.write("\t\t\t <scale>0.8</scale>\n")
KMLACFile.write("\t\t\t <Icon>\n")
KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/placemark_square.png</href>\n")
# KMLACFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/triangle.png</href>\n")
KMLACFile.write("\t\t\t </Icon>\n")
KMLACFile.write("\t\t </IconStyle>\n")
KMLACFile.write("\t\t <LabelStyle>\n")
KMLACFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLACFile.write("\t\t </LabelStyle>\n")
KMLACFile.write("\t\t <LineStyle>\n")
KMLACFile.write("\t\t\t <color>ff00aaff</color>\n")
KMLACFile.write("\t\t\t <width>2</width>\n")
KMLACFile.write("\t\t </LineStyle>\n")
KMLACFile.write("\t\t <ListStyle>\n")
KMLACFile.write("\t\t </ListStyle>\n")
KMLACFile.write("\t </Style>\n")
WaypointNumber = 0
return(KMLFile, KMLACFile)
def KMLclose(KMLFile, KMLACFile):
"Adds trailer to the .kml file and then closes it."
KMLFile.write("</Document>\n")
KMLFile.write("</kml>\n")
KMLFile.close()
KMLACFile.write("</Document>\n")
KMLACFile.write("</kml>\n")
KMLACFile.close()
def PlotPoints (KMLFile, KMLACFile, points):
"Plot the set of points on the .kml file"
KMLFile.write("\t <Placemark>\n")
KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n")
KMLFile.write("\t\t <LineString>\n")
KMLFile.write("\t\t\t <tessellate>1</tessellate>\n")
KMLFile.write("\t\t\t <coordinates>\n")
for x in points:
KMLFile.write ("\t\t\t\t " + format (x[0], 'f') + ','\
+ format (x[1], 'f') + ','\
+ format (x[2], 'f') + ' \n')
KMLFile.write("\t\t\t </coordinates>\n")
KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n")
KMLFile.write("\t\t </LineString>\n")
KMLFile.write("\t </Placemark>\n")
KMLACFile.write("\t <Placemark>\n")
KMLACFile.write("\t\t <styleUrl>#sh_triangle_copy1</styleUrl>\n")
KMLACFile.write("\t\t <LineString>\n")
KMLACFile.write("\t\t\t <tessellate>1</tessellate>\n")
KMLACFile.write("\t\t\t <coordinates>\n")
for x in points:
KMLACFile.write ("\t\t\t\t " + format (x[0], 'f') + ','\
+ format (x[1], 'f') + ','\
+ format (x[2], 'f') + ' \n')
KMLACFile.write("\t\t\t </coordinates>\n")
KMLACFile.write("\t\t </LineString>\n")
KMLACFile.write("\t </Placemark>\n")
def PlotWaypoint (KMLFile, KMLACFile, wp, label='', symbol = 'triangle'):
"Adds waypoint symbol to the .kml file for plotting on Google Earth etc."
# Copy from a Google-Earth-generated example
# (I don't understand all this; it's just copied verbatim here.
# It's likely this could be made more compact.)
global WaypointNumber, lonx, latx, galtx
# These are global because they are saved in order to
# draw lines from the last point to this one.
longitude = wp[0]
latitude = wp[1]
altitude = wp[2]
WaypointNumber += 1
if (label == ''): label="WP"+format(WaypointNumber,'d')
KMLFile.write("\t <Placemark>\n")
KMLFile.write("\t\t <name>"+label+"</name>\n")
KMLFile.write("\t\t <description>WayPoint "\
+format(round(altitude/(100))*100.,'.0f')+' ft'+"</description>\n")
KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1"\
+"</styleUrl>\n")
KMLFile.write("\t\t <Point>\n")
KMLFile.write("\t\t\t <coordinates>"+format(longitude,'f')\
+','+format(latitude,'f')+','+format(altitude,'f')+"</coordinates>\n")
KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n")
KMLFile.write("\t\t </Point>\n")
KMLFile.write("\t </Placemark>\n")
KMLFile.write("\t <Placemark>\n")
KMLFile.write("\t\t <name>"+"Path"+format(WaypointNumber,'d')+"</name>\n")
KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n")
KMLFile.write("\t\t <LineString>\n")
KMLFile.write("\t\t\t <tessellate>1</tessellate>\n")
KMLFile.write("\t\t\t <coordinates>\n")
KMLFile.write("\t\t\t\t "+format(lonx,'f')+','+format(latx,'f')+','\
+format(galtx,'f')+' '+format(longitude,'f')+','\
+format(latitude,'f')+','+format(altitude,'f')+'\n')
# print 'Waypoint'+format(WaypointNumber,'d')+' '+format(longitude, '.2f')\
# +','+format(latitude, '.2f')+',' + format(round(altitude/100.)*100., '.0f')
KMLFile.write("\t\t\t </coordinates>\n")
KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n")
KMLFile.write("\t\t </LineString>\n")
KMLFile.write("\t </Placemark>\n")
KMLACFile.write("\t <Placemark>\n")
KMLACFile.write("\t\t <name>"+label+"</name>\n")
KMLACFile.write("\t\t <description>WayPoint "\
+format(round(altitude/(100))*100.,'.0f')+' ft'+"</description>\n")
KMLACFile.write("\t\t <styleUrl>#msn_triangle_copy1"\
+"</styleUrl>\n")
KMLACFile.write("\t\t <Point>\n")
KMLACFile.write("\t\t\t <coordinates>"+format(longitude,'f')\
+','+format(latitude,'f')+','+format(altitude,'f')+"</coordinates>\n")
KMLACFile.write("\t\t </Point>\n")
KMLACFile.write("\t </Placemark>\n")
KMLACFile.write("\t <Placemark>\n")
KMLACFile.write("\t\t <name>"+"Path"+format(WaypointNumber,'d')+"</name>\n")
KMLACFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n")
KMLACFile.write("\t\t <LineString>\n")
KMLACFile.write("\t\t\t <tessellate>1</tessellate>\n")
KMLACFile.write("\t\t\t <coordinates>\n")
KMLACFile.write("\t\t\t\t "+format(lonx,'f')+','+format(latx,'f')+','\
+format(galtx,'f')+' '+format(longitude,'f')+','\
+format(latitude,'f')+','+format(altitude,'f')+'\n')
# print 'Waypoint'+format(WaypointNumber,'d')+' '+format(longitude, '.2f')\
# +','+format(latitude, '.2f')+',' + format(round(altitude/100.)*100., '.0f')
KMLACFile.write("\t\t\t </coordinates>\n")
KMLACFile.write("\t\t </LineString>\n")
KMLACFile.write("\t </Placemark>\n")
lonx = longitude
latx = latitude
galtx = altitude
return()
| 48.430279 | 116 | 0.618707 |
6ed8e2ed4b2704d5c1be4866ec0fcfee29634e45 | 1,932 | py | Python | api.py | amagrabi/oeb-importer-proto | f93b1ac1834e10595c8d89e23cde1fadfc88d009 | [
"Apache-2.0"
] | null | null | null | api.py | amagrabi/oeb-importer-proto | f93b1ac1834e10595c8d89e23cde1fadfc88d009 | [
"Apache-2.0"
] | null | null | null | api.py | amagrabi/oeb-importer-proto | f93b1ac1834e10595c8d89e23cde1fadfc88d009 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions to make API calls.
@author: amagrabi
"""
import requests
def login(client_id, client_secret, project_key, scope, host = 'EU'):
'''Authentification
Args:
client_id: client_id.
client_secret: client_secret.
project_key: project_key.
scope: Scope of access (read, write, etc.).
host: 'EU' or 'NA'.
Returns:
Authentification data.
'''
headers = { 'Content-Type' : 'application/x-www-form-urlencoded' }
body = "grant_type=client_credentials&scope=%s" % scope
if host == 'EU':
url = "https://auth.sphere.io/oauth/token"
elif host == 'US':
url = "https://auth.commercetools.co/oauth/token"
else:
raise Exception("Host is unknown (has to be 'EU' or 'US').")
auth = (client_id, client_secret)
r = requests.post(url, data=body, headers=headers, auth=auth)
if r.status_code is 200:
return r.json()
else:
raise Exception("Failed to get an access token. Are you sure you have added them to config.py?")
def query(endpoint, project_key, auth, host = 'EU'):
'''Fetch Data via API into Json-Format
Args:
endpoint: API endpoint (products, orders, etc.).
project_key: project_key.
auth: Login data.
host: 'EU' or 'NA'.
Returns:
Query output in json.
'''
headers = { "Authorization" : "Bearer %s" % auth["access_token"] }
if host == 'EU':
url = "https://api.sphere.io/%s/%s" % (project_key, endpoint)
elif host == 'US':
url = "https://api.commercetools.co/%s/%s" % (project_key, endpoint)
else:
raise Exception("Host is unknown (has to be 'EU' or 'US').")
r = requests.get(url, headers=headers)
data_json = r.json() # json-format as nested dict-/list-structure
return data_json | 29.272727 | 104 | 0.59058 |
6ed978e5e0ccff6910a7ff36922b214818cfc125 | 769 | py | Python | model/addParams.py | thegricean/modals | 9bb267a64542ee30e2770d79d9cd5d9cce890be8 | [
"MIT"
] | null | null | null | model/addParams.py | thegricean/modals | 9bb267a64542ee30e2770d79d9cd5d9cce890be8 | [
"MIT"
] | null | null | null | model/addParams.py | thegricean/modals | 9bb267a64542ee30e2770d79d9cd5d9cce890be8 | [
"MIT"
] | null | null | null | import sys, re, string, numpy
p_strongs = numpy.arange(0.1, 0.9, 0.1)
costs = range(3, 10, 1)
for p_s in p_strongs:
p_meds = numpy.arange(0.1, 1-p_s, 0.1)
for p_m in p_meds:
p_w = 1 - p_s - p_m
for cost in costs:
filename = str(p_s) + "_" + str(p_m) + "_" + str(p_w) + "_" + str(cost) + ".church"
wF = open("model_fits/" + filename, "w")
wF.write("(define p-strong " + str(p_s) + ")\n" + "(define p-mod " + str(p_m) + ")\n" + "(define p-weak " + str(p_w) + ")\n" + "(define cost " + str(cost) + ")\n")
f = open(sys.argv[1], "r")
for l in f:
wF.write(l)
f.close()
#print str(p_s) + "," + str(p_v) + "," + str(p_a) + "," + str(alpha)
| 40.473684 | 186 | 0.461638 |
6ed9b7c639af766ee8c222459a703935071b14fd | 1,625 | py | Python | shuttl/Storage.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | 2 | 2017-06-26T18:06:58.000Z | 2017-10-11T21:45:29.000Z | shuttl/Storage.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | null | null | null | shuttl/Storage.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | null | null | null | import boto3 as aws
import botocore
from shuttl import app
## Class for AWS S3 storage
class Storage:
bucket = None ##< the bucket the file belongs to
s3 = aws.resource("s3") ##< The s3 instance
@classmethod
def GetBucket(cls, bucketName):
try:
cls.bucket = cls.s3.Bucket(bucketName)
pass
except botocore.exceptions.NoCredentialsError:
pass
pass
@classmethod
def Upload(cls, fileObj):
if app.config["TESTING"]:
return
if cls.bucket is None:
cls.GetBucket("shuttl.io")
pass
try:
return cls.bucket.upload_file(fileObj.filePath, fileObj.filePath)
except botocore.exceptions.NoCredentialsError:
pass
pass
@classmethod
def Delete(cls, fileObj, bucketName="shuttl.io"):
if app.config["TESTING"]:
return
try:
obj = cls.s3.Object(bucketName, fileObj.filePath)
return obj.delete()
except botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError:
pass
pass
@classmethod
def Download(cls, fileObj, bucketName="shuttl.io"):
if app.config["TESTING"]:
return
try:
obj = cls.s3.Object(bucketName, fileObj.filePath)
return obj.download_file(fileObj.filePath)
except botocore.exceptions.ClientError:
raise FileNotFoundError("No such file or directory: {}".format(fileObj.filePath))
except botocore.exceptions.NoCredentialsError:
pass
pass
| 28.508772 | 93 | 0.603692 |
6edba8dc188676342b22f5209b14d0e344cc743f | 21,822 | py | Python | util/process_results.py | blackoutjack/jamweaver | 31533556617e9190e565b2c9edd7e7752ce71f32 | [
"BSD-3-Clause"
] | 2 | 2015-08-14T12:34:27.000Z | 2015-10-15T04:07:17.000Z | util/process_results.py | blackoutjack/jamweaver | 31533556617e9190e565b2c9edd7e7752ce71f32 | [
"BSD-3-Clause"
] | null | null | null | util/process_results.py | blackoutjack/jamweaver | 31533556617e9190e565b2c9edd7e7752ce71f32 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
#
# This script is used to analyze, tabulate, and graph data generated by
# the JAM weaver and by JAMScript performance instrumentation. It was
# used to produce figures presented in the experimental results section
# of ``Efficient Runtime Enforcement Techniques for Policy Weaving,''
# published at FSE 2014.
#
import sys
MAJOR = sys.version_info[0]
import os
import re
import subprocess
from subprocess import PIPE
import shutil
import time
import imp
from optparse import OptionParser
#import warnings
import grapher
from resultsutil import AppStats, SourceVariant, Action, Section
def collect_results_from_file(filepath, results):
lns = cfg.get_lines(filepath)
# Detect whether the file contains any results via the format.
# %%% Does this short-circuit when it finds a match?
isresults = any([True for ln in lns if ln.startswith(cfg.PROFILE_MARKER)])
if isresults:
results.extend(lns)
def collect_results(filelist):
lines = []
for respath in filelist:
assert os.path.exists(respath), "Results source does not exist: %s" % respath
collect_results_from_file(respath, lines)
if len(lines) == 0:
cfg.warn("No results found")
return lines
#/collect_results
def collect_separate_results(filelist):
results = []
for respath in filelist:
lines = []
assert os.path.exists(respath), "Results source does not exist: %s" % respath
collect_results_from_file(respath, lines)
if len(lines) == 0:
cfg.warn("No results found for file: %s" % respath)
results.append(lines)
return results
#/collect_separate_results
def parse_profile_header(ln):
assert ln.startswith(cfg.PROFILE_MARKER)
action = ln[len(cfg.PROFILE_MARKER):-len(cfg.PROFILE_MARKER_TAIL)]
if len(action) < 1:
cfg.err("Action name is empty: %s" % ln)
return action
#/parse_profile_header
def parse_section_header(ln):
assert ln.startswith(cfg.SECTION_MARKER)
info = None
section = ln[len(cfg.SECTION_MARKER):-len(cfg.SECTION_MARKER)]
if section.startswith(cfg.TIME_SECTION_NAME):
info = section[len(cfg.TIME_SECTION_NAME):]
info = info.lstrip(": ")
section = cfg.TIME_SECTION_NAME
if len(section) < 1:
cfg.err("Section name is empty: %s" % ln)
return section, info
#/parse_section_header
def parse_stack(lines):
appinfo = None
stackinfo = [] # Will contain (file, lineno) pairs in order.
for ln in lines:
parts = ln.split(None)
# This can happen for unknown stack frames.
if len(parts) < 3: continue
flln = parts[2]
srcfl, lineno = flln.rsplit(':', 1)
stackinfo.append((srcfl, lineno))
for i in range(0, len(stackinfo)):
idx = len(stackinfo) - 1 - i
lastsrc = stackinfo[idx][0]
if lastsrc.startswith('http://'):
lastsrc = lastsrc[7:]
# Get the init time.
# %%% Ugly string matching
if lastsrc.startswith(cfg.TEST_DIR):
lastsrc = lastsrc[len(cfg.TEST_DIR):]
for marker in ['/test.php?script=', '/test.php?sources[]=']:
begin = -1
begin = lastsrc.find(marker)
if begin > -1:
begin += len(marker)
lastsrc = lastsrc[begin:]
end = lastsrc.find("&policy=")
if end > -1:
lastsrc = lastsrc[:end]
break
appinfo = cfg.get_file_info(lastsrc)
if appinfo['app'] != 'libTx' and appinfo['app'] != 'auto' and appinfo['app'] != 'autoextra':
break
return appinfo, stackinfo
#/parse_stack
def process_data(appstats, variant, action, section, dataline):
dataparts = dataline.split('/')
# This method assumes the first data fed to it has column headers.
section.addData(dataparts)
#/process_data
def parse_results(lines):
curAppStats = None
curVariant = None
curActionDesc = None
curAction = None
curSection = None
curBig = False
stats = {}
idxes = iter(range(0, len(lines)))
for idx in idxes:
ln = lines[idx].strip()
if ln == '':
continue
isjunk = False
for junk in cfg.JUNK_MARKERS:
if ln.startswith(junk):
isjunk = True
if isjunk:
continue
if ln.startswith(cfg.ERROR_MARKER):
errtxt = ln[len(cfg.ERROR_MARKER):]
if curAction is not None:
curAction.addError(errtxt)
cfg.err("Error inkey action %s, variant %s, app %s: %s" % (curAction.description, curVariant.descriptor(), curAppStats.name, errtxt))
elif curVariant is not None:
curVariant.addError(errtxt)
cfg.err("Error in variant %s, app %s: %s" % (curVariant.descriptor(), curAppStats.name, errtxt))
elif curAppStats is not None:
curAppStats.addError(errtxt)
cfg.err("Error in app %s: %s" % (curAppStats.name, errtxt))
else:
cfg.err("Unassociated error: %s" % errtxt)
continue
if ln.startswith(cfg.PROFILE_MARKER):
curActionDesc = parse_profile_header(ln)
elif ln.startswith(cfg.SECTION_MARKER):
sect, sectinfo = parse_section_header(ln)
curSection = Section(sect, sectinfo)
assert curAction is not None
curAction.addSection(curSection, cfg.TIME_SECTION_NAME)
elif ln.startswith(cfg.STACK_MARKER):
stack = [ln]
# Look ahead to get whole stack.
nextidx = idx + 1
while True:
try:
nextln = lines[nextidx]
if nextln.startswith(cfg.STACK_MARKER):
stack.append(nextln)
next(idxes) # Exhaust the line from the iterator.
nextidx += 1
else:
break
except Exception as e:
cfg.err('While parsing stack: %s' % str(e))
break
appinfo, stackInfo = parse_stack(stack)
# Generate or retrieve the AppStats object.
appname = appinfo['app']
# Parse the body HTML to determine whether it's a "big" test case.
# %%% Yikes!
if curActionDesc == 'init' and appname.startswith(cfg.SMS2PREFIX):
if ln.find('.big.body.html') > -1 or appname.endswith('.big'):
curBig = True
else:
curBig = False
appkey = appname
if appkey.startswith(cfg.SMS2PREFIX):
if appkey.endswith('.big') or appkey.endswith('-big'):
appkey = appkey[:-4]
if appkey.endswith('-newcall'):
appkey = appkey[:-8]
elif appkey == 'jsqrcode-call':
appkey = 'jsqrcode'
if appkey in stats:
curAppStats = stats[appkey]
else:
curAppStats = AppStats(appname)
stats[appkey] = curAppStats
descparts = appinfo['desc']
# "profile" is expected for all of these data.
try: descparts.remove('profile')
except: cfg.err('No "profile" component of variant description: %r' % appinfo)
curVariant = curAppStats.getVariant(descparts)
# The app/variant info is assumed to come right after the action.
if appname.startswith(cfg.SMS2PREFIX) and curActionDesc == "compute":
if curBig:
curActionDesc = "bigcompute"
curAction = curVariant.getAction(curActionDesc, stackInfo)
else:
assert curSection is not None, "No section info: %s" % ln
process_data(curAppStats, curVariant, curAction, curSection, ln)
return stats
#/parse_results
def print_all_times(stats):
# Print the time stats for each app.
for app, stat in stats.items():
cfg.out(stat.name)
for i in stat.variants:
cfg.out(str(stat.variants[i]))
def print_times(app, actdesc, timemap):
out = '%s/%s' % (app, actdesc)
for v, t in timemap.items():
out += ' %s:%s' % (v, str(t))
cfg.out(out)
def compare_sections(sect0, sect1, action, variant, app):
allsame = True
section = sect0.name
# Compare keys for the two dicts.
keys0 = set(sect0.rows.keys())
keys1 = set(sect1.rows.keys())
addl0 = keys0 - keys1
addl1 = keys1 - keys0
for a0 in addl0:
cfg.err("Row '%s' not found in 2nd section '%s', action '%s', variant '%s' for app '%s'" % (a0, section, action, variant, app))
allsame = False
for a1 in addl1:
cfg.err("Row '%s' not found in 2nd section '%s', action '%s', variant '%s' for app '%s'" % (a1, section, action, variant, app))
allsame = False
common = keys0 & keys1
for rowdesc in common:
rowdata0 = sect0.rows[rowdesc]
rowdata1 = sect1.rows[rowdesc]
if rowdata0 != rowdata1:
cfg.err("Data is inconsistent: %r != %r in section '%s', action '%s', variant '%s', app '%s'" % (rowdata0, rowdata1, section, action, variant, app))
allsame = False
return allsame
#/compare_sections
def compare_actions(act0, act1, variant, app):
allsame = True
action = act0.description
# Compare keys for the two dicts.
keys0 = set(act0.sections.keys())
keys1 = set(act1.sections.keys())
addl0 = keys0 - keys1
addl1 = keys1 - keys0
for a0 in addl0:
cfg.err("Section '%s' not found for 2nd action '%s' in variant '%s' for app '%s'" % (a0, action, variant, app))
allsame = False
for a1 in addl1:
cfg.err("Section '%s' not found for 1st action '%s' in variant '%s' for app '%s'" % (a1, action, variant, app))
allsame = False
common = keys0 & keys1
for sectdesc in common:
sectlist0 = act0.sections[sectdesc]
sectlist1 = act1.sections[sectdesc]
for sect0 in sectlist0:
for sect1 in sectlist1:
if not compare_sections(sect0, sect1, action, variant, app):
allsame = False
return allsame
#/compare_actions
def compare_variants(var0, var1, app):
allsame = True
variant = var0.descriptor()
# Compare keys for the two dicts.
keys0 = set(var0.actions.keys())
keys1 = set(var1.actions.keys())
addl0 = keys0 - keys1
addl1 = keys1 - keys0
for a0 in addl0:
cfg.err("Action '%s' not found in 2nd variant '%s' for app '%s'" % (a0, variant, app))
allsame = False
for a1 in addl1:
cfg.err("Action '%s' not found in 1st variant '%s' for app '%s'" % (a1, variant, app))
allsame = False
common = keys0 & keys1
for actdesc in common:
act0 = var0.actions[actdesc]
act1 = var1.actions[actdesc]
if not compare_actions(act0, act1, variant, app):
allsame = False
return allsame
def compare_stats(statsobj0, statsobj1):
allsame = True
app = statsobj0.name
# Compare keys for the two dicts.
keys0 = set(statsobj0.variants.keys())
keys1 = set(statsobj1.variants.keys())
addl0 = keys0 - keys1
addl1 = keys1 - keys0
for a0 in addl0:
cfg.err("Variant '%s' not found in 2nd variant list for app '%s'" % (a0, app))
allsame = False
for a1 in addl1:
cfg.err("Variant '%s' not found in 1st variant list for app '%s'" % (a1, app))
allsame = False
common = keys0 & keys1
for vardesc in common:
variant0 = statsobj0.variants[vardesc]
variant1 = statsobj1.variants[vardesc]
if not compare_variants(variant0, variant1, app):
allsame = False
return allsame
#/compare_stats
def compare_results(stats0, stats1):
allsame = True
# Compare keys for the two dicts.
keys0 = set(stats0.keys())
keys1 = set(stats1.keys())
addl0 = keys0 - keys1
addl1 = keys1 - keys0
for a0 in addl0:
cfg.err("Application '%s' not found in 2nd stats list" % a0)
allsame = False
for a1 in addl1:
cfg.err("Application '%s' not found in 1st stats list" % a1)
allsame = False
common = keys0 & keys1
for app in common:
stats0obj = stats0[app]
stats1obj = stats1[app]
if not compare_stats(stats0obj, stats1obj):
allsame = False
if allsame:
cfg.out("Statistics match exactly")
#/compare_results
def compare_actions_times(act0, act1, variant, app):
allsame = True
action = act0.description
t0 = act0.avg_time()
t1 = act1.avg_time()
diff = t0 - t1
if diff < 0.0:
fast = -1
faststr = '0 is faster'
diff = -diff
ratio = t0 / t1
elif diff > 0.0:
fast = 1
faststr = '1 is faster'
ratio = t1 / t0
else:
fast = 0
faststr = 'Same time'
ratio = 1.0
cfg.out("%s for app '%s', variant '%s', action '%s', difference: %.2f, ratio: %.2f" % (faststr, app, variant, action, diff, ratio))
return (fast, diff, ratio)
#/compare_actions_times
def compare_variants_times(var0, var1, app):
allsame = True
variant = var0.descriptor()
# Compare keys for the two dicts.
keys0 = set(var0.actions.keys())
keys1 = set(var1.actions.keys())
addl0 = keys0 - keys1
addl1 = keys1 - keys0
for a0 in addl0:
cfg.err("Action '%s' not found in 2nd variant '%s' for app '%s'" % (a0, variant, app))
for a1 in addl1:
cfg.err("Action '%s' not found in 1st variant '%s' for app '%s'" % (a1, variant, app))
common = keys0 & keys1
for actdesc in common:
act0 = var0.actions[actdesc]
act1 = var1.actions[actdesc]
fast, diff, ratio = compare_actions_times(act0, act1, variant, app)
# %%% Do something with these
#/compare_variants_times
def compare_stats_times(statsobj0, statsobj1):
app = statsobj0.name
# Compare keys for the two dicts.
keys0 = set(statsobj0.variants.keys())
keys1 = set(statsobj1.variants.keys())
addl0 = keys0 - keys1
addl1 = keys1 - keys0
for a0 in addl0:
cfg.err("Variant '%s' not found in 2nd variant list for app '%s'" % (a0, app))
for a1 in addl1:
cfg.err("Variant '%s' not found in 1st variant list for app '%s'" % (a1, app))
common = keys0 & keys1
for vardesc in common:
variant0 = statsobj0.variants[vardesc]
variant1 = statsobj1.variants[vardesc]
compare_variants_times(variant0, variant1, app)
#/compare_stats_times
def compare_times(stats0, stats1):
# Compare keys for the two dicts.
keys0 = set(stats0.keys())
keys1 = set(stats1.keys())
addl0 = keys0 - keys1
addl1 = keys1 - keys0
for a0 in addl0:
cfg.err("Application '%s' not found in 2nd stats list" % a0)
for a1 in addl1:
cfg.err("Application '%s' not found in 1st stats list" % a1)
common = keys0 & keys1
for app in common:
stats0obj = stats0[app]
stats1obj = stats1[app]
compare_stats_times(stats0obj, stats1obj)
#/compare_times
# Get a list of actions recorded in the given |AppStats| object.
def load_actions(stat):
actions = []
for vardesc in stat.variants:
for actdesc in stat.variants[vardesc].actions:
if actdesc not in actions:
actions.append(actdesc)
return actions
# Get a filtered list of variants for the given application/action.
# An exception may be thrown if some variants are not available.
def load_variants(stat, actdesc):
app = stat.name
variants = {}
for vardesc in cfg.VARIANTS:
if vardesc in stat.variants:
variant = stat.variants[vardesc]
if actdesc not in variant.actions:
raise Exception("Data not available for action: %s %s %s" % (app, vardesc, actdesc))
variants[vardesc] = variant
else:
raise Exception("Variant data not available: %s %s" % (app, vardesc))
assert len(variants) == len(cfg.VARIANTS)
return variants
def print_time_comparison(stats):
apps = stats.keys()
apps.sort()
for app in apps:
stat = stats[app]
actdescs = load_actions(stat)
for actdesc in actdescs:
try:
variants = load_variants(stat, actdesc)
times = {}
for vardesc, variant in variants.items():
times[vardesc] = variant.actions[actdesc].avg_time()
print_times(app, actdesc, times)
except Exception as e:
cfg.err('Time comparison for %s/%s: %s' (app, actdesc, str(e)))
def updateMinMax(minmax, tm, sub, desc):
curmin = minmax[sub]['mintime']
if tm < curmin:
minmax[sub]['mintime'] = tm
minmax[sub]['minapp'] = desc
curmax = minmax[sub]['maxtime']
if tm > curmax:
minmax[sub]['maxtime'] = tm
minmax[sub]['maxapp'] = desc
# Create graphs.
def generate_graphs(stats):
timelist = []
apps = list(stats.keys())
apps.sort()
minmax = {
'overall': {
'mintime': float('inf'),
'maxtime': 0.0,
'minapp': None,
'maxapp': None,
},
'init': {
'mintime': float('inf'),
'maxtime': 0.0,
'minapp': None,
'maxapp': None,
}
}
for app in apps:
if app in cfg.DISABLED:
continue
stat = stats[app]
actdescs = load_actions(stat)
for actdesc in actdescs:
# Optionally group init and load times together
if cfg.INCLUDE_INIT and actdesc == 'init': continue
if cfg.SUPPRESS_SMS2_LOAD and actdesc == 'load' and app.startswith(cfg.SMS2PREFIX): continue
variants = load_variants(stat, actdesc)
times = {}
times['action'] = actdesc
for vardesc, variant in variants.items():
tm = variant.actions[actdesc].avg_time()
desc = app + '/' + actdesc + '/' + vardesc
# Optionally add on policy.js and libTx.js load time.
if cfg.INCLUDE_INIT:
if actdesc == 'load' and vardesc != 'input':
if 'init' in variant.actions:
inittm = variant.actions['init'].avg_time()
updateMinMax(minmax, inittm, 'init', desc)
tm += inittm
else:
cfg.warn("No init time for load: %s/%s" % (app, vardesc))
elif actdesc == 'init' and vardesc != 'input':
updateMinMax(minmax, inittm, 'init', desc)
times[vardesc] = tm
updateMinMax(minmax, tm, 'overall', desc)
# Check for zero/negative times.
ok = True
for vardesc, time in times.items():
if vardesc == 'action': continue
if time <= 0:
cfg.err("NON-POSITIVE TIME: %s/%s/%s/%.2f" % (app, actdesc, vardesc, time))
ok = False
if not ok: continue
# Check for cases where woven performs worse than modular
# on long-duration base case.
time0 = float(times[cfg.VARIANTS[0]])
time1 = float(times[cfg.VARIANTS[1]])
time2 = float(times[cfg.VARIANTS[2]])
disp0 = cfg.VARIANT_DISPLAY[0].upper()
disp1 = cfg.VARIANT_DISPLAY[1].upper()
disp2 = cfg.VARIANT_DISPLAY[2].upper()
if time2 > time1 and time0 > 100.0:
cfg.warn("LONG-DURATION OUTLIER: %s/%s/%.2f/%.2f/%.2f" % (app, actdesc, time0, time1, time2))
if actdesc != 'init' and time0 < 0.1 or time1 < 0.1 or time2 < 0.1:
cfg.warn("TINY TIME: %s/%s/%.2f/%.2f/%.2f" % (app, actdesc, time0, time1, time2))
# Check for cases where secure code is faster than unprotected.
if actdesc != 'init' and time2 / time0 <= 0.90:
cfg.warn("%s UNDERLIER: %s/%s %.2f" % (disp2, app, actdesc, time2 / time0))
if actdesc != 'init' and time1 / time0 < 0.90:
cfg.warn("%s UNDERLIER: %s/%s %.2f" % (disp1, app, actdesc, time1 / time0))
if time2 / time1 > 1.5:
cfg.warn("LARGE %s/%s RATIO: %s/%s/%.2f/%.2f" % (disp2, disp1, app, actdesc, time2 / time1, time0))
if actdesc != 'init' and actdesc != 'load' and time2 / time0 > 5:
cfg.warn("LARGE %s/%s RATIO: %s/%s/%.2f" % (disp2, disp0, app, actdesc, time2 / time0))
timelist.append(times)
grapher.modularVsWovenOverheadByOriginal(timelist, cfg.VARIANTS, cfg.VARIANT_DISPLAY)
#grapher.modularVsWovenOverhead(timelist, False)
grapher.modularVsWovenOverhead(timelist, True)
grapher.wovenOverheadByOriginal(timelist, cfg.VARIANTS, cfg.VARIANT_DISPLAY)
cfg.out("MIN INIT TIME: %s/%s" % (minmax['init']['mintime'], minmax['init']['minapp']))
cfg.out("MAX INIT TIME: %s/%s" % (minmax['init']['maxtime'], minmax['init']['maxapp']))
cfg.out("MIN ACTION TIME: %s/%s" % (minmax['overall']['mintime'], minmax['overall']['minapp']))
cfg.out("MAX ACTION TIME: %s/%s" % (minmax['overall']['maxtime'], minmax['overall']['maxapp']))
#/generate_graphs
def generate_output(stats):
#print_all_times(stats)
#print_time_comparison(stats)
generate_graphs(stats)
#/generate_output
def main():
parser = OptionParser(usage="%prog results.txt")
parser.add_option('-c', '--config', action='store', default=os.path.join(os.path.dirname(__file__), 'resultsconfig.py'), dest='config', help='configuration.py file')
parser.add_option('-v', '--verbose', action='store_true', default=False, dest='verbose', help='generate verbose output')
parser.add_option('-a', '--analysis', action='store', default='t', dest='analysis', help='t: fine-grained vs. coarsed-grain runtime; c: compare profile information across results files; m: compare running time across results files; a: all')
opts, args = parser.parse_args()
#warnings.simplefilter('error', UserWarning)
global cfg
cfg = imp.load_source("cfg", opts.config)
global VERBOSE
VERBOSE = opts.verbose
analysis = opts.analysis
if analysis not in ['a', 't', 'c', 'm']:
parser.error("Invalid analysis identifier: %s" % analysis)
if len(args) == 0:
assert os.path.exists(cfg.RESULTS_SOURCE), "Default results path %s doesn't exist." % cfg.RESULTS_SOURCE
resultsfiles = [os.path.join(cfg.RESULTS_SOURCE, fl) for fl in os.listdir(cfg.RESULTS_SOURCE)]
else:
resultsfiles = []
for resfile in args:
if not os.path.exists(resfile):
cfg.warn("results file %s doesn't exist." % resfile)
if os.path.isdir(resfile):
# Doesn't recurse.
resultsfiles.extend([os.path.join(resfile, filename) for filename in os.listdir(resfile)])
else:
resultsfiles.append(resfile)
if len(resultsfiles) == 0:
parser.error("No results files found")
if analysis == 'a' or analysis == 't':
resultstxt = collect_results(resultsfiles)
stats = parse_results(resultstxt)
generate_output(stats)
if analysis in ['a', 'c', 'm']:
if len(resultsfiles) != 2: parser.error("Size of results list != 2")
resultslist = collect_separate_results(resultsfiles)
stats0 = parse_results(resultslist[0])
stats1 = parse_results(resultslist[1])
if analysis in ['a', 'c']:
compare_results(stats0, stats1)
if analysis in ['a', 'm']:
compare_times(stats0, stats1)
#/main
if __name__ == "__main__":
main()
| 32.091176 | 242 | 0.645129 |
6edbba4a74356991de5aa46330579ce20ab0026e | 245 | py | Python | Controller/hone_control.py | pupeng/hone | 8fb2618a51478049c73158f1d54e7165a37dffcf | [
"BSD-3-Clause"
] | 5 | 2017-02-18T12:39:13.000Z | 2021-03-29T09:21:58.000Z | Controller/hone_control.py | pupeng/hone | 8fb2618a51478049c73158f1d54e7165a37dffcf | [
"BSD-3-Clause"
] | null | null | null | Controller/hone_control.py | pupeng/hone | 8fb2618a51478049c73158f1d54e7165a37dffcf | [
"BSD-3-Clause"
] | 7 | 2015-08-12T10:08:21.000Z | 2018-08-30T12:55:25.000Z | # Copyright (c) 2011-2013 Peng Sun. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYRIGHT file.
# hone_control.py
# a placeholder file for any control jobs HONE runtime generates
| 35 | 72 | 0.767347 |