hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37b92a6bf223f5761c53562b6e0bd7327e57e2bf | 2,373 | py | Python | uiworld.py | touilleMan/trimps | 603335009a1768f104e4ed317d24a75579f1aeb1 | [
"WTFPL"
] | 2 | 2021-11-08T02:46:09.000Z | 2021-11-08T09:41:00.000Z | uiworld.py | touilleMan/trimps | 603335009a1768f104e4ed317d24a75579f1aeb1 | [
"WTFPL"
] | null | null | null | uiworld.py | touilleMan/trimps | 603335009a1768f104e4ed317d24a75579f1aeb1 | [
"WTFPL"
] | null | null | null | from PyQt4 import QtCore, QtGui
class UiWorld(QtGui.QWidget):
"""Qt widget representing the world
"""
def __init__(self, parent):
super(UiWorld, self).__init__(parent)
self.__last_point = None
self.image = QtGui.QImage(800, 600, QtGui.QImage.Format_ARGB32)
self.image.fill(QtCore.Qt.white)
self.pen = QtGui.QPen(QtCore.Qt.black, 10, QtCore.Qt.SolidLine)
# Create a timer to refresh the image
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1000/60)
self.robot = None
def clear(self):
self.image.fill(QtCore.Qt.white)
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
qp.drawImage(e.rect(), self.image, e.rect())
if self.robot is not None:
# Rotate the robot sprite before drawing it
rot_sprite = QtGui.QPixmap(self.robot.sprite.size())
rot_sprite.fill(QtCore.Qt.transparent)
rp = QtGui.QPainter()
rp.begin(rot_sprite)
rp.translate(self.robot.half_width, self.robot.half_height)
rp.rotate(-self.robot.rotation)
rp.translate(-self.robot.half_width, -self.robot.half_height)
rp.drawPixmap(0, 0, self.robot.sprite)
rp.end()
qp.drawPixmap(self.robot.img_x(),
self.robot.img_y(),
rot_sprite)
qp.end()
def mousePressEvent(self, e):
# Draw line on left click
if e.button() == QtCore.Qt.LeftButton:
self.__last_point = e.pos()
self.__drawto(e.pos())
if e.button() == QtCore.Qt.RightButton:
# Move the robot on right click
self.robot.pos_x = e.pos().x()
self.robot.pos_y = e.pos().y()
def mouseMoveEvent(self, e):
# Draw line on left click
if e.buttons() == QtCore.Qt.LeftButton:
self.__drawto(e.pos())
if e.button() == QtCore.Qt.RightButton:
# Move the robot on right click
self.robot.pos_x = e.pos().x()
self.robot.pos_y = e.pos().y()
def __drawto(self, pos):
painter = QtGui.QPainter(self.image)
painter.setPen(self.pen)
painter.drawLine(self.__last_point, pos)
self.__last_point = pos
self.update()
| 35.954545 | 73 | 0.581542 | 309 | 2,373 | 4.333333 | 0.313916 | 0.100822 | 0.038835 | 0.033607 | 0.348021 | 0.31068 | 0.271845 | 0.271845 | 0.271845 | 0.231516 | 0 | 0.01135 | 0.294564 | 2,373 | 65 | 74 | 36.507692 | 0.78853 | 0.094395 | 0 | 0.196078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.019608 | 0 | 0.156863 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37b9584c00e84d083226a073ff4103362f7643d1 | 7,215 | py | Python | src/kblue/rfcomm.py | tulare/kblue | 731aa3c4600f3b7c0e53efb51075335ca266b665 | [
"MIT"
] | null | null | null | src/kblue/rfcomm.py | tulare/kblue | 731aa3c4600f3b7c0e53efb51075335ca266b665 | [
"MIT"
] | null | null | null | src/kblue/rfcomm.py | tulare/kblue | 731aa3c4600f3b7c0e53efb51075335ca266b665 | [
"MIT"
] | null | null | null | # -*- encoding: utf8 -*-
import logging
import re
import socket
import subprocess
__all__ = [ 'RFComm' ]
class RFCommNotConnected(BaseException) :
pass
class RFCommError(BaseException) :
pass
class RFComm :
def __init__(self, bdaddr, port, timeout=5, encoding='utf-8') :
self.logger = logging.getLogger(self.__class__.__name__)
self._sock = None
self.bdaddr = bdaddr
self.port = port
self.timeout = timeout
self.encoding = encoding
def __enter__(self) :
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback) :
self.close()
return True
@property
def bdaddr(self) :
return self._bdaddr
@bdaddr.setter
def bdaddr(self, bdaddr) :
self._bdaddr = bdaddr
@property
def port(self) :
return self._port
@port.setter
def port(self, port) :
self._port = port
@property
def timeout(self) :
return self._timeout
@timeout.setter
def timeout(self, timeout) :
self._timeout = timeout
if self.connected :
self._sock.settimeout(self._timeout)
@property
def encoding(self) :
return self._encoding
@encoding.setter
def encoding(self, encoding) :
self._encoding = encoding
@property
def connected(self) :
return self._sock is not None
def connect(self) :
if not self.connected :
self._create_socket()
self.logger.debug('connect to {} port={}'.format(self._bdaddr, self._port))
self._sock.connect((self._bdaddr, self._port))
self.logger.debug('connected')
def close(self) :
if self.connected :
self.logger.debug('close socket')
self._sock.close()
self._sock = None
def recv(self, text=False) :
if not self.connected :
raise RFCommNotConnected()
try :
data = self._sock.recv(1024)
self.logger.debug('receive {} bytes, text={}'.format(len(data), text))
if text :
return data.decode(self._encoding)
return data
except (ConnectionResetError, ConnectionAbortedError) as e :
self.logger.error('{}'.format(e))
self.close()
raise RFCommError(e)
except socket.timeout as e :
self.logger.error('{}'.format(e))
raise RFCommError(e)
except BlockingIOError as e :
self.logger.error('{}'.format(e))
raise RFCommError(e)
def send(self, data) :
if not self.connected :
raise RFCommNotConnected()
try :
if isinstance(data, str) :
data = data.encode(self._encoding)
self.logger.debug('send {} bytes : {}'.format(len(data), data))
return self._sock.send(data)
except (ConnectionResetError, ConnectionAbortedError) as e :
self.logger.error('{}'.format(e))
self.close()
raise RFCommError(e)
except socket.timeout as e :
self.logger.error('{}'.format(e))
raise RFCommError('timeout')
@property
def services(self) :
command = [ 'sdptool', 'browse', '--l2cap', self.bdaddr ]
output = subprocess.check_output(command, universal_newlines=True)
service = None
services = {}
for line in output.splitlines() :
m = re.search('Service Name:\s+(.+)', line)
if m :
service = m.group(1)
m = re.search('Channel:\s+([0-9]+)', line)
if m and service :
services[service] = int(m.group(1))
return services
def _create_socket(self) :
self._sock = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
self._sock.settimeout(self._timeout)
self.logger.debug('create {}'.format(self._sock))
def send_to_gateway(self, text=False) :
self.timeout = 5
self.close()
self.connect()
self.timeout = 1
self.send(b'AT+BRSF=023\r')
print(self.recv(text))
print(self.recv(text))
self.send(b'AT+CIND=?\r')
print(self.recv(text))
print(self.recv(text))
self.send(b'AT+CIND?\r')
print(self.recv(text))
print(self.recv(text))
self.send(b'AT+CMER=3,0,0,1\r')
print(self.recv(text))
self.send(b'AT+CHLD=?\r')
print(self.recv(text))
print(self.recv(text))
self.send(b'AT+CMEE=1\r')
print(self.recv(text))
self.send(b'AT+CLIP=1\r')
print(self.recv(text))
self.send(b'AT+CCWA=1\r')
print(self.recv(text))
self.send(b'AT+NREC=0\r')
print(self.recv(text))
self.send(b'AT+VGS=15\r')
print(self.recv(text))
self.send(b'AT+VGM=15\r')
print(self.recv(text))
self.send(b'AT+XAPL=ABCD-1234-0100,10\r')
print(self.recv(text))
self.send(b'AT+IPHONEACCEV=1,1,4\r')
print(self.recv(text))
def recv_from_gateway(self, text=False) :
self.timeout = 5
self.close()
self.connect()
self.timeout = 1
print(self.recv(text))
self.send(b'\r\n+BRSF: 871\r\n')
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(
b'\r\n+CIND: ("call",(0,1)),("callsetup",(0-3)),("service",(0-1)),("signal",(0-5)),("roam",(0,1)),("battchg",(0-5)),("callheld",(0-2))\r\n'
)
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\n+CIND: 0,0,0,0,0,1,0\r\n')
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\n+CHLD: (0,1,2,3)\r\n')
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
print(self.recv(text))
self.send(b'\r\nOK\r\n')
"""
> b'AT+BRSF=023\r'
< b'\r\n+BRSF: 871\r\n'
< b'\r\nOK\r\n'
> b'AT+CIND=?\r'
< b'\r\n+CIND: ("call",(0,1)),("callsetup",(0-3)),("service",(0-1)),("signal",(0-5)),("roam",(0,1)),("battchg",(0-5)),("callheld",(0-2))\r\n'
< b'\r\nOK\r\n'
> b'AT+CIND?\r'
< b'\r\n+CIND: 0,0,0,0,0,1,0\r\n'
< b'\r\nOK\r\n'
> b'AT+CMER=3,0,0,1\r'
< b'\r\nOK\r\n'
> b'AT+CHLD=?\r'
< b'\r\n+CHLD: (0,1,2,3)\r\n'
< b'\r\nOK\r\n'
> b'AT+CMEE=1\r'
< b'\r\nOK\r\n'
> b'AT+CLIP=1\r'
< b'\r\nOK\r\n'
> b'AT+CCWA=1\r'
< b'\r\nOK\r\n'
> b'AT+NREC=0\r'
< b'\r\nOK\r\n'
> b'AT+VGS=15\r'
< b'\r\nOK\r\n'
> b'AT+VGM=15\r'
< b'\r\nOK\r\n'
> b'AT+XAPL=ABCD-1234-0100,10\r'
< b'\r\nOK\r\n'
> b'AT+IPHONEACCEV=1,1,4\r'
< b'\r\nOK\r\n'
"""
| 27.643678 | 151 | 0.537076 | 1,028 | 7,215 | 3.705253 | 0.131323 | 0.022053 | 0.070885 | 0.133893 | 0.551851 | 0.503807 | 0.501969 | 0.465739 | 0.446836 | 0.39958 | 0 | 0.024845 | 0.285932 | 7,215 | 260 | 152 | 27.75 | 0.71448 | 0.003049 | 0 | 0.475936 | 0 | 0.010695 | 0.107115 | 0.026856 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106952 | false | 0.010695 | 0.02139 | 0.026738 | 0.203209 | 0.160428 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37bb688f46ffd73cd913e78b3be6c48784ad0dc1 | 10,517 | py | Python | SplineMeasurement/engine/vtk_widgets/ro_psi_spline_widget.py | TiNezlobinsky/SplineLV | 7281bca555f8eda802091cfbe3687b8ab59bfa4b | [
"MIT"
] | null | null | null | SplineMeasurement/engine/vtk_widgets/ro_psi_spline_widget.py | TiNezlobinsky/SplineLV | 7281bca555f8eda802091cfbe3687b8ab59bfa4b | [
"MIT"
] | null | null | null | SplineMeasurement/engine/vtk_widgets/ro_psi_spline_widget.py | TiNezlobinsky/SplineLV | 7281bca555f8eda802091cfbe3687b8ab59bfa4b | [
"MIT"
] | null | null | null | from vtk import vtkSplineWidget, vtkLineSource, vtkActor, vtkPolyDataMapper
from numpy import linspace
from math import pi, asin, sqrt, sin
from scipy import interpolate
import numpy as np
# CODE REGIONS:
# 1) Spline computing
# 2) Spline redrawing
# 3) Setters
# 4) Getters
# 5) Coordinates transformation
# 6) Handles management
# 7) Neighboring spline connection
class RoPsiSplineWidget(vtkSplineWidget):
"""
Interactive spline for contouring left ventricle wall.
Handles (spline nodes) used to manage spline interactively
"""
def __init__(self, side, spline_type):
# side = 'right' or 'left'
# spline_type = 'endo' or 'epi' only
if side == "left":
self._sign = -1
else:
self._sign = 1
self._spline_type = spline_type
self.psi_interval_points = 60
self.ro_nodes_array = np.array([])
self.psi_nodes_array = np.array([])
self.z_nodes_array = np.array([])
self.ro_array = np.array([])
self.psi_array = np.array([])
self.output_spline_points = []
self.line_source_list = []
self.actor_list = []
self.mapper_list = []
self._handles_position_list = []
self._neighboring_spline = None
self.AddObserver("InteractionEvent", self._vtk_observer_remember_handles_position)
self.AddObserver("InteractionEvent", self._vtk_observer_compute)
self.AddObserver("InteractionEvent", self._vtk_observer_move_neighboring_spline_handle)
def Off(self):
vtkSplineWidget.Off(self)
for actor in self.actor_list:
self.render.RemoveActor(actor)
self.GetInteractor().Initialize()
# SPLINE COMPUTING:
def _compute_ro_psi_spline(self):
self.psi_nodes_array = self.psi_nodes_array[::-1] # reverse array
self.ro_nodes_array = self.ro_nodes_array[::-1]
self.z_nodes_array = self.z_nodes_array[::-1]
psi_0 = 0.
psi_1 = pi/2.
psi_array = linspace(psi_0, psi_1, self.psi_interval_points)
psi_array = sorted(psi_array)
self._tck = interpolate.splrep(self.psi_nodes_array, self.ro_nodes_array, s=0) # for b-spline
output_ro = interpolate.splev(psi_array, self._tck)
self._interpolate = interpolate.splev
self.ro_array = output_ro
self.psi_array = psi_array
def compute(self):
"""
Compute spline for current handles
"""
try:
if self.GetNumberOfHandles() > 3: # we need at least 4 point to build cubic spline
spline_handles = self.GetNumberOfHandles()
pos = self.GetHandlePosition(spline_handles - 1)
self.Z = pos[1]
self._fix_first_handle()
self._handles_coordinates_to_ropsi()
self._compute_ro_psi_spline()
self._ropsi_to_xyz()
self._update_spline()
except Exception:
pass
def _vtk_observer_compute(self, obj, event):
self.compute()
# SPLINE REDRAWING:
def _update_spline(self):
for actor in self.actor_list:
self.render.RemoveActor(actor)
self.line_source_list = []
self.actor_list = []
self.mapper_list = []
self._draw_spline()
def _draw_spline(self):
for i in range(len(self.output_spline_points[0]) - 1):
self.line_source_list.append(vtkLineSource())
self.actor_list.append(vtkActor())
self.mapper_list.append(vtkPolyDataMapper())
spline_color = self.GetLineProperty().GetColor()
spline_width = self.GetLineProperty().GetLineWidth()
for i, line in enumerate(self.line_source_list):
x1 = self.output_spline_points[0][i]
y1 = self.output_spline_points[1][i]
z1 = self.output_spline_points[2][i]
x2 = self.output_spline_points[0][i + 1]
y2 = self.output_spline_points[1][i + 1]
z2 = self.output_spline_points[2][i + 1]
line.SetPoint1(x1, y1, 0)
line.SetPoint2(x2, y2, 0)
self.mapper_list[i].SetInputConnection(line.GetOutputPort())
self.actor_list[i].SetMapper(self.mapper_list[i])
self.actor_list[i].GetProperty().SetColor(spline_color)
self.actor_list[i].GetProperty().SetLineWidth(spline_width)
self.render.AddActor(self.actor_list[i])
self.GetLineProperty().SetOpacity(0.01)
self.GetInteractor().Initialize()
# SETTERS:
def set_psi_interval_points(self, n):
self.psi_interval_points = n
def set_spline_nodes(self, node_list, compute_=True):
for i in range(len(node_list)):
self.SetHandlePosition(i, *node_list[i])
if compute_:
self.compute()
def set_h(self, h):
self.h = h
def set_Z(self, z):
spline_handles = self.GetNumberOfHandles()
pos = list(self.GetHandlePosition(spline_handles - 1))
pos[1] = z
self.SetHandlePosition(spline_handles - 1, pos)
self.Z = z
self._remember_handles_position()
if self.GetEnabled():
self.compute()
def set_gamma(self, gamma):
self.gamma = gamma
def set_render(self, render):
self.render = render
# GETTERS:
def get_h(self):
return self.h
def get_Z(self):
return self.Z
def get_ropsi_handles_coordinates(self):
return [self.ro_nodes_array, self.psi_nodes_array, self.z_nodes_array]
def get_ropsi_set(self):
return [self.ro_array, self.psi_array]
def get_z_set(self):
z_set = self.Z - (self.Z - self.h * self.gamma) * np.sin(self.psi_array)
return z_set
def get_ro_set(self):
return self.ro_array
def get_psi_set(self):
return self.psi_array
def get_psi_coordinates(self):
return self.psi_nodes_array
def get_ro_coordinates(self):
return self.ro_nodes_array
def get_z_coordinates(self):
return self.z_nodes_array
def get_handles_position_list(self):
return self._handles_position_list
def get_handles_number(self):
# May be should use the original GetNumberOfHandles()?
return len(self._handles_position_list)
def get_spline_set(self):
return [list(self.ro_array), list(self.psi_array)]
def get_spline_object(self):
return [self._tck, self._interpolate]
# COORDINATES TRANSFORMATION:
def _handles_coordinates_to_ropsi(self):
self.psi_nodes_array = np.array([])
self.ro_nodes_array = np.array([])
self.z_nodes_array = np.array([])
number_of_points = self.GetNumberOfHandles()
for i in range(number_of_points):
x = self.GetHandlePosition(i)[0] # why not in the single line?
y = self.GetHandlePosition(i)[1]
z = self.GetHandlePosition(i)[2]
arg = (self.Z - y) / (self.Z - self.h * self.gamma)
if arg > 1.0:
arg = 1.0
psi = asin(arg)
ro = sqrt(x ** 2)
self.psi_nodes_array = np.append(self.psi_nodes_array, psi)
self.ro_nodes_array = np.append(self.ro_nodes_array, ro)
self.z_nodes_array = np.append(self.z_nodes_array, y)
def _ropsi_to_xyz(self):
x = []
y = []
z = []
for i in range(len(self.psi_array)):
y.append(self.Z - (self.Z - self.h * self.gamma) * sin(self.psi_array[i]))
x.append(self._sign * sqrt(self.ro_array[i] ** 2))
z.append(self.GetHandlePosition(0)[2])
self.output_spline_points = [x, y, z]
# HANDLES MANAGEMENT:
def _fix_first_handle(self):
# Subsequent algorithm requires fixing
# of the first point of the spline
if self._spline_type == 'endo':
self.SetHandlePosition(0, 0., self.h, 0.)
else:
self.SetHandlePosition(0, 0., 0., 0.)
def _remember_handles_position(self):
# We have to dynamically track the position of spline handles and write at list
handles_number = len(self._handles_position_list)
self._handles_position_list = []
for i in range(handles_number):
self._handles_position_list.append(self.GetHandlePosition(i))
def _vtk_observer_remember_handles_position(self, obj, event):
self._remember_handles_position()
def add_handle(self, position):
"""
Add new handle in specified position
Parameters
----------
position : array_like
(x, y, z) to append to handles list
"""
position = list(position)
position[2] = 0
self._handles_position_list.append(position)
self._update_spline_handles_position()
def delete_handle(self, position):
"""
Delete new handle from specified position
Parameters
----------
position : array_like
(x, y, z) to delete from handles list
"""
position = list(position)
position[2] = 0
self._handles_position_list.remove(position)
self._update_spline_handles_position()
def _update_spline_handles_position(self):
self._handles_position_list.sort(key=lambda k: k[1]) # we use sorting by y component
handles_number = len(self._handles_position_list)
if handles_number > 1: # otherwise it raise warning when points count become less than 2
self.SetNumberOfHandles(handles_number)
for i in range(handles_number):
self.SetHandlePosition(i, self._handles_position_list[i])
if self.GetEnabled():
self.compute()
def remove_all_handles(self):
"""
Remove all handles from handles list
"""
self._handles_position_list = []
# NEIGHBORING SPLINE CONNECTION:
def connect_with_spline(self, spline):
"""
Connect with neighboring endo/epi spline to to maintain an equal height (Z)
on the same meridian
Parameters
----------
spline : RoPsiSplineWidget object
"""
self._neighboring_spline = spline
self.set_Z(spline.get_Z())
def _move_neighboring_spline_handle(self):
self._neighboring_spline.set_Z(self.get_Z())
def _vtk_observer_move_neighboring_spline_handle(self, obj, event):
self._move_neighboring_spline_handle()
| 32.865625 | 102 | 0.625464 | 1,312 | 10,517 | 4.745427 | 0.166159 | 0.04176 | 0.039672 | 0.04433 | 0.379859 | 0.274655 | 0.146482 | 0.089945 | 0.082236 | 0.082236 | 0 | 0.010617 | 0.274603 | 10,517 | 319 | 103 | 32.968652 | 0.805479 | 0.127698 | 0 | 0.207729 | 0 | 0 | 0.00628 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.188406 | false | 0.004831 | 0.024155 | 0.062802 | 0.285024 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37c5b7a57373382792f04fb19c487676bd6c5d39 | 12,537 | py | Python | csaws_creation/train_val_creation/generate_patches.py | ChrisMats/seemingly_uninformative_labels | bcbe060f8be89d731626e3f37752d5906c0a6752 | [
"MIT"
] | 4 | 2020-10-14T03:57:52.000Z | 2021-09-23T13:34:03.000Z | csaws_creation/train_val_creation/generate_patches.py | ChrisMats/seemingly_uninformative_labels | bcbe060f8be89d731626e3f37752d5906c0a6752 | [
"MIT"
] | 1 | 2021-06-04T10:34:32.000Z | 2021-06-07T04:54:35.000Z | csaws_creation/train_val_creation/generate_patches.py | ChrisMats/seemingly_uninformative_labels | bcbe060f8be89d731626e3f37752d5906c0a6752 | [
"MIT"
] | 4 | 2021-02-23T07:05:31.000Z | 2021-09-08T19:48:57.000Z | """This script creates the patched dataset"""
import sys
import glob
import json
from tqdm import tqdm
import numpy as np
from PIL import Image
import multiprocessing
from datetime import datetime
from joblib import Parallel, delayed
from scipy.interpolate import interp1d
from scipy.ndimage import generic_filter
from multiprocessing import Process, Manager
from settings import NUM_TO_LABEL, CLASSES
from utils import calculate_num_crops
import os
from utils import get_train_validation_split
from settings import (
SEGMENTATIONS_DIRECTORY,
ANONYMIZED_DATA_DIRECTORY,
RECORD_DIRECTORY,
DATASET_SPECS,
RANDOM_VALIDATION_SPLIT,
)
np.random.seed(2019)
NUM_CLASSES = len(NUM_TO_LABEL)
to_range_256 = interp1d([0, NUM_CLASSES - 1], [0, 255])
to_range_num_classes = interp1d([0, 255], [0, NUM_CLASSES - 1])
LABEL_TO_NUM = {v: k for k, v in NUM_TO_LABEL.items()}
SEGMENTATIONS_LIST = sorted(glob.glob(os.path.join(SEGMENTATIONS_DIRECTORY, "*.png")))
def process_image(target_folder, image_addrs, stuff_addrs, mode, crop_size,
crops_per_class):
""" given an image, generates patches and saves them
Parameters:
-----------
writer: writer object
Path to file
image_addrs: str
Path to image
stuff_addrs: str
Path to annotations
i: int
image number in the dataset
mode: str
train, val or test
Returns:
--------
crops_of_each_label: array_like
if mode is 'train', number of crops with central pixel of each
label type. If mode is 'test', 1.
pixels_of_each_label: array_like
number of pixels of each label among the crops generated
"""
# Open image and array
img = np.array(Image.open(image_addrs))
label = np.array(Image.open(stuff_addrs))
img_ID = image_addrs.split("/")[-1][:-4]
# Make sure is int16
img = img.astype(np.uint16)
annotations = label.astype(np.uint8)
# Define width and height
width = img.shape[0]
height = img.shape[1]
# Define variables to save labels information
crops_of_each_label = np.zeros(NUM_CLASSES)
pixels_of_each_label = np.zeros(NUM_CLASSES)
if mode in ('train'):
# create one list per each label with the positions
positions = [[] for _ in range(NUM_CLASSES)]
for pixel_col in range(width):
for pixel_row in range(height):
label = annotations[pixel_col, pixel_row]
positions[label].append([pixel_col, pixel_row])
# define dict
positions_dict = {}
for pos, _ in enumerate(positions):
if positions[pos]:
positions_dict[str(pos)] = positions[pos]
# list of labels contained in this image
unique_labels = list(np.unique(annotations))
# remove background and mammary gland
if ["mammary_gland"] in CLASSES:
if LABEL_TO_NUM['background'] in unique_labels:
unique_labels.remove(LABEL_TO_NUM['background'])
if LABEL_TO_NUM["mammary_gland"] in unique_labels:
unique_labels.remove(LABEL_TO_NUM['mammary_gland'])
for unique_label in unique_labels:
for crop_number in range(crops_per_class):
# Sample random pixel of class unique_label
sampled_pixel = np.random.randint(low=0, high=len(
positions_dict.get(str(unique_label))))
# Get pixel coordinates
coordinates = positions_dict.get(
str(unique_label))[sampled_pixel]
# Find upper left corner of the crop
x_coordinate = np.clip(
coordinates[0] - (crop_size // 2), 0, width)
y_coordinate = np.clip(
coordinates[1] - (crop_size // 2), 0, height)
# Check coordinates not too close from right or bottom side
if x_coordinate + crop_size >= width:
x_coordinate = width - crop_size
if y_coordinate + crop_size >= height:
y_coordinate = height - crop_size
# Get crop
img_crop = img[x_coordinate:x_coordinate + crop_size,
y_coordinate:y_coordinate + crop_size]
annotation_crop = annotations[
x_coordinate:x_coordinate + crop_size,
y_coordinate: y_coordinate + crop_size]
# Save img and mask patches in foler
img_crop = Image.fromarray(img_crop.astype(np.uint16))
annotation_crop = Image.fromarray(annotation_crop.astype(np.uint8))
img_crop.save(os.path.join(target_folder, 'images',
'{}-{}-{}.png'.format(img_ID,unique_label, crop_number)))
annotation_crop.save(os.path.join(target_folder, 'masks',
'{}-{}-{}.png'.format(img_ID,unique_label, crop_number)))
# Increase the number of crops of type unique_label
crops_of_each_label[unique_label] += 1
else:
overlapping = 0
img = Image.fromarray(img.astype(np.uint16))
annotations = Image.fromarray(annotations.astype(np.uint8))
# save full images
full_img_save_path = os.path.join(RECORD_DIRECTORY, 'images_full', '{}.png'.format(img_ID))
full_mask_save_path = os.path.join(RECORD_DIRECTORY, 'masks_full', '{}.png'.format(img_ID))
img.save(full_img_save_path)
annotations.save(full_mask_save_path)
# get image and segments and start the patching
x_max, y_max = img.size
path_list = []
x0 = 0
while (x0 + crop_size) < (x_max + crop_size):
y0 = 0
while (y0 + crop_size) < (y_max + crop_size):
## if patch exceeds img size then pad
if ((y0 + crop_size) - y_max > 0) or ((x0 + crop_size) - x_max > 0):
cropped_img = Image.fromarray(np.zeros((crop_size, crop_size), dtype=np.uint16))
cropped_mask = Image.fromarray(np.ones((crop_size, crop_size), dtype=np.uint8)*LABEL_TO_NUM['background'])
x1 = x0 + crop_size
y1 = y0 + crop_size
area = (x0, y0, x1, y1)
str_area = 'x'.join(map(str, area))
if (y0 + crop_size) - y_max > 0:
y1 = y_max
if (x0 + crop_size) - x_max > 0:
x1 = x_max
area = (x0, y0, x1, y1)
t_cropped_img = img.crop(area)
t_cropped_mask = annotations.crop(area)
cropped_img.paste(t_cropped_img)
cropped_mask.paste(t_cropped_mask)
unique_labels = list(np.unique(cropped_mask))
# remove blank images
if [LABEL_TO_NUM['background']] != unique_labels:
img_crop_path = os.path.join(target_folder, 'images','{}-{}.png'.format(img_ID, str_area))
mask_crop_path = os.path.join(target_folder, 'masks','{}-{}.png'.format(img_ID, str_area))
cropped_img.save(img_crop_path)
cropped_mask.save(mask_crop_path)
else:
area = (x0, y0, x0 + crop_size, y0 + crop_size)
str_area = 'x'.join(map(str, area))
cropped_img = img.crop(area)
cropped_mask = annotations.crop(area)
unique_labels = list(np.unique(cropped_mask))
# remove blank images
if [LABEL_TO_NUM['background']] != unique_labels:
img_crop_path = os.path.join(target_folder, 'images','{}-{}.png'.format(img_ID, str_area))
mask_crop_path = os.path.join(target_folder, 'masks','{}-{}.png'.format(img_ID, str_area))
cropped_img.save(img_crop_path)
cropped_mask.save(mask_crop_path)
y0 += crop_size - overlapping
x0 += crop_size - overlapping
print("{} -- done ".format(img_ID))
sys.stdout.flush()
def generate_dataset(original_imgs_address, segmentation_addrs, target_folder,
mode, crop_size, crops_per_class):
""" generates dataset according to defined mode
Parameters:
-----------
segmentation_addrs: list
List containing all annotations paths.
target_folder: str
Folder to save the datasets
name: str
Dataset name
mode: str
train, val or test
"""
if not os.path.isdir(target_folder):
os.mkdir(target_folder)
if not os.path.isdir(os.path.join(target_folder, mode)):
os.mkdir(os.path.join(target_folder, mode))
if not os.path.isdir(os.path.join(target_folder, mode, 'images')):
os.mkdir(os.path.join(target_folder, mode, 'images'))
if not os.path.isdir(os.path.join(target_folder, mode, 'masks')):
os.mkdir(os.path.join(target_folder, mode, 'masks'))
if not os.path.isdir(os.path.join(RECORD_DIRECTORY, 'images_full')):
os.mkdir(os.path.join(RECORD_DIRECTORY, 'images_full'))
if not os.path.isdir(os.path.join(RECORD_DIRECTORY, 'masks_full')):
os.mkdir(os.path.join(RECORD_DIRECTORY, 'masks_full'))
# Read addresses and labels from the 'train' folder
image_addrs = [os.path.join(original_imgs_address,
segmentation.split("/")[-1][0:3],
segmentation.split("/")[-1][:-16] + ".png")
for segmentation in segmentation_addrs]
# Sort the list of addresses
train_image_addrs = sorted(image_addrs)
train_stuff_addrs = sorted(segmentation_addrs)
# Check that train_image_addrs and train_stuff_addrs have the same length
if len(train_image_addrs) != len(train_stuff_addrs):
print("Error: image address list length and label address list"
" length are different")
sys.exit(1)
# Define number of images
n_images = len(train_stuff_addrs)
if n_images < 1:
print("no registered data found for {}".format(mode))
return
num_cores = multiprocessing.cpu_count()
n_jobs = n_images if n_images < num_cores else -3
print('Patching starts . . .')
Parallel(n_jobs=n_jobs, verbose=1)(delayed(process_image)(
target_folder=os.path.join(target_folder , mode),
image_addrs=train_image_addrs[i],
stuff_addrs=train_stuff_addrs[i],
mode=mode,
crop_size=crop_size,
crops_per_class=crops_per_class)
for i in range(n_images))
if __name__ == "__main__":
"""Creates network datasets"""
np.random.seed(2019)
split_screenings = get_train_validation_split(
SEGMENTATIONS_LIST,
percent_test=0,
percent_validation=10,
random_split=RANDOM_VALIDATION_SPLIT
)
datasets_to_generate_parameters = []
for crop_size in DATASET_SPECS["crop_sizes"]:
for mode in ["train", "val"]:
dataset_folder = os.path.join(RECORD_DIRECTORY,
"crop_size_{}".format(crop_size))
if not os.path.isdir(dataset_folder):
os.mkdir(dataset_folder)
datasets_to_generate_parameters.append(
{
"original_imgs_address": ANONYMIZED_DATA_DIRECTORY,
"segmentation_addrs": split_screenings[mode],
"target_folder": dataset_folder,
"mode": mode,
"crop_size": crop_size,
"crops_per_class": calculate_num_crops(crop_size),
}
)
start = datetime.now()
for dataset_parameters in datasets_to_generate_parameters:
generate_dataset(**dataset_parameters)
end = datetime.now()
delta = end - start
print(
'\n\tDatasets generated in %d hours, %d minutes and %d seconds' % (
delta.seconds // 3600, ((delta.seconds // 60) % 60),
delta.seconds % 60))
| 38.457055 | 126 | 0.582755 | 1,510 | 12,537 | 4.587417 | 0.170199 | 0.043886 | 0.03176 | 0.030027 | 0.325393 | 0.265483 | 0.228815 | 0.187383 | 0.156633 | 0.133824 | 0 | 0.013045 | 0.321289 | 12,537 | 325 | 127 | 38.575385 | 0.801034 | 0.127223 | 0 | 0.116505 | 0 | 0 | 0.0575 | 0.00196 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009709 | false | 0 | 0.082524 | 0 | 0.097087 | 0.024272 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80615ede9944c60d7f347d9b93800d3c39e08d0f | 1,258 | py | Python | tests/test_value.py | DanielTOsborne/repgen5 | a13e0005dc2a471bb9c112b53ab5e2e0d2596f72 | [
"MIT"
] | null | null | null | tests/test_value.py | DanielTOsborne/repgen5 | a13e0005dc2a471bb9c112b53ab5e2e0d2596f72 | [
"MIT"
] | 1 | 2021-12-17T16:45:56.000Z | 2022-02-02T20:40:57.000Z | tests/test_value.py | DanielTOsborne/repgen5 | a13e0005dc2a471bb9c112b53ab5e2e0d2596f72 | [
"MIT"
] | 1 | 2021-03-31T21:38:55.000Z | 2021-03-31T21:38:55.000Z | import unittest
from nose2.tools import params
import sys
import datetime
sys.path.append("../")
from repgen.data import Value
from repgen.util import TZ
def test_gents_scalar():
t_end = datetime.datetime.now().replace(minute=0,second=0,microsecond=0,tzinfo=TZ("UTC"))
t_start = t_end-datetime.timedelta(hours=2)
v = Value(dbtype="gents",value=2, tz="PST8PDT", start=t_start,end=t_end, interval=datetime.timedelta(minutes=15), picture="%0.02f")
assert len( v.values ) == 9
assert v.values[0][1] == 2
assert v.pop() == "2.00"
def test_gents_generator():
def data():
data.index+=1
return data.thedata[data.index-1]
data.index = 0
data.thedata = range(9)
t_end = datetime.datetime.now().replace(minute=0,second=0,microsecond=0,tzinfo=TZ("UTC"))
t_start = t_end-datetime.timedelta(hours=2)
v = Value(dbtype="gents",value = data,tz="PST8PDT", start=t_start,end=t_end, interval=datetime.timedelta(minutes=15), picture="%0.02f")
assert len( v.values ) == 9
assert v.pop() == "0.00"
assert v.values[0][1] == 0
assert v.values[1][1] == 1
assert v.values[2][1] == 2
assert v.values[3][1] == 3
assert v.values[4][1] == 4
assert v.values[8][1] == 8
| 34 | 139 | 0.645469 | 202 | 1,258 | 3.950495 | 0.277228 | 0.078947 | 0.114035 | 0.050125 | 0.575188 | 0.546366 | 0.546366 | 0.546366 | 0.546366 | 0.546366 | 0 | 0.053502 | 0.18283 | 1,258 | 36 | 140 | 34.944444 | 0.722763 | 0 | 0 | 0.193548 | 0 | 0 | 0.04213 | 0 | 0 | 0 | 0 | 0 | 0.354839 | 1 | 0.096774 | false | 0 | 0.193548 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8061f8f83585386d7f3cee51d2a8ec30b9f44859 | 9,096 | py | Python | Data Science Project/Mall Customer Segmentation & Analysis/Mall Customer Segmentation & Analysis.py | jrderek/Data-science-master-resources | 95adab02dccbf5fbe6333389324a1f8d032d3165 | [
"MIT"
] | 14 | 2020-09-17T17:04:04.000Z | 2021-08-19T05:08:49.000Z | Data Science Project/Mall Customer Segmentation & Analysis/Mall Customer Segmentation & Analysis.py | jrderek/Data-science-master-resources | 95adab02dccbf5fbe6333389324a1f8d032d3165 | [
"MIT"
] | 85 | 2020-10-01T16:53:21.000Z | 2021-07-08T17:44:17.000Z | Data Science Project/Mall Customer Segmentation & Analysis/Mall Customer Segmentation & Analysis.py | jrderek/Data-science-master-resources | 95adab02dccbf5fbe6333389324a1f8d032d3165 | [
"MIT"
] | 5 | 2020-09-18T08:53:01.000Z | 2021-08-19T05:12:52.000Z | #!/usr/bin/env python
# coding: utf-8
# ### Author : Sanjoy Biswas
# ### Topic : Mall Customer Segmentation & Analysis
# ### Email : sanjoy.eee32@gmail.com
# # **Data Import And Preprocessing**
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import cufflinks as cf
import warnings
warnings.filterwarnings("ignore")
# In[2]:
df=pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
# In[3]:
df.head(5)
# As you can see we have five columns: Customer ID , Gender, Annual Income and Spending score.
# In[4]:
len(df)
# And there are 200 columns. So this is quite a small dataset with less number of rows and columns. Now lets begin working with all the data that we have
# In[5]:
df.isnull().sum()
# But first we need to check if there are any missing values or not. Turns out our dataset is clean without any null values. So we dont have to worry about filling any missing columns or rows
# In[6]:
import missingno as msno
msno.matrix(df)
# You can see that our graph shows continuous dark lines without any horizozntal interruptions. This support our previous idea that we dont have any missing values
# Now lets check the Gender Column. There might be two possiblities: either gender is just classified as male and female or there are other classifications that identify LGBTQ community.
# In[7]:
df['Gender'].unique()
# So we only have two genders listed.
# Now lets do some short codings so that we dont have any duplicate values.
# In[8]:
print(sum(df.duplicated()))
df = df.drop_duplicates()
# Now lets look onto the distribution of age and income of our customers
# **DATA WRANGLING AND VISUALIZATION**
# In[9]:
ig, axes = plt.subplots(1,2, figsize=(21,6))
sns.distplot(df['Age'], ax=axes[0])
sns.distplot(df['Annual Income (k$)'], ax=axes[1])
# The first figure shows us that the average age of our customers is around 35. The age of our customers is typically between 20 and 70.
# Now talking about annual income, majority of our customers have an annual income around 80K per annum. The salary ranges from 15k to 135k
# Lets see which gender makes up the majority of people visiting our store
# In[10]:
sns.countplot(x='Gender', data=df, palette='viridis')
# Out of 200 people, around 115 were women while 85 were men. The barchart above clearly illustrates the fact that we have more female
# customers than male. Perhaps, we have more of household products or may be our stores have more emphasis on items on which female
# population is interested
# Now lets look onto the spending scores of male and female individually
# In[11]:
sns.stripplot(x='Gender', y = 'Spending Score (1-100)', data = df)
# Roughly, male and female both have similar spending score. We can see more number of dots on the female side because of
# large number of female population compared to male population. We can see both the male and female population have two gaps in their distribution : around 20 and 60. These two gaps divide the population into three chunks: people with score below 20, people with score between 30 and 60 , and people with speding score between 60 and 80. So we conclude that there are three types of customers in both the gender on the basis of their spending habit. We are most interested in the top most group.
# Now lets see if there is any differences in the income of the population in two genders that might explain their spending habit
# In[12]:
sns.boxplot( x= 'Gender', y = 'Annual Income (k$)', data = df )
# There are few outliers in the male population on the top. Surprisingly, the females have a little less average income but still our store has more female customers which clearly indicates that females visit our stores regardless of their income. Perhaps, men are more interested in saving than spending.
# Now lets look at some complex plot
# In[13]:
x = df['Annual Income (k$)']
y = df['Age']
z = df['Spending Score (1-100)']
sns.lineplot(x, y, color = 'blue')
sns.lineplot(x, z, color = 'pink')
plt.title('Annual Income vs Age and Spending Score', fontsize = 20)
plt.show()
# We can see that people with the highest spending score are the ones with annual income of 50k. Perhaps our store is a retail store with only few luxury brands and more of household and daily products. Despite large income, some people appear to have decreasing spending score which is clear onthe right edge of the graph.
# In[14]:
df['Gender'].replace({'Male': 0, 'Female': 1},inplace = True)
# Here we have replaced males with 1 and females with 0. Quantifying the variables will enable us to execute machine learning methods for future predictions. Now lets check if we have succesfully dummied these values or not.
#
# In[15]:
df.head(5)
# In[16]:
df.drop('CustomerID', axis=1, inplace = True)
# Since the customer ID are nothing more than unique numbers assigned to each customers, we have removed this column. Lets check
# In[17]:
df.head(5)
# Now lets try to find if there is any correlation between any of our data.
# In[18]:
sns.heatmap(df.corr(), annot=True)
# The figure above indicates that none of the columns we have are strongly correlated to each other. So using machine learning technique of linear regression wont give us an accurate predictive outcome
# In[19]:
def impute_age(cols):
spend=cols
if spend > 55:
return 1
else:
return 0
df['Spending Score (1-100)'] = df['Spending Score (1-100)'].apply(impute_age)
df.head(5)
# Now, lets make a fair assumption that people with spending score of more than 55( the topmost chunk in the aforementioned plot)
# are our target customers as they are highly likely to make purchases. Once we can accurately predict these group of customers by looking at their age, gender and income , we cann apply several tactics like sending emails of new offers to increase the number of purchases
# **Machine Learning: Logistic Regression**
# In[20]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test= train_test_split(df.drop('Spending Score (1-100)',axis=1), df['Spending Score (1-100)'], test_size=0.30, random_state=101)
from sklearn.linear_model import LogisticRegression
log=LogisticRegression()
log.fit(X_train,y_train)
pred=log.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, pred))
# As we can see the logistic regresion only has a probability of predicting the target customer with a probability of 0.6, which is pretty low, we will apply another machine learning technique( K Nearest Neighbor) to our data.
# In[21]:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df.drop('Spending Score (1-100)',axis=1))
scaled_features = scaler.transform(df.drop('Spending Score (1-100)',axis=1))
df_feat = pd.DataFrame(scaled_features,columns=df.columns[:-1])
df_feat.head()
# Here we have standardized out data for further processing.
# **Machine Learning: K-Means Clustering**
# In[22]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(scaled_features,df['Spending Score (1-100)'],
test_size=0.30)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
from sklearn.metrics import classification_report,confusion_matrix
print(confusion_matrix(y_test,pred))
print(classification_report(y_test,pred))
# With interval value=1, we see that our predicitve probability is just 0.68. Lets check which interval value between 1 and 40 gives us the most accurate result.
# In[23]:
error_rate = []
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,color='blue', linestyle='dashed', marker='o',
markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
# We can see from the figure above that at the K interval value of 3, the error rate is low.So now,lets find the classification report with error rate =3.
# In[24]:
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
print('WITH K=3')
print('\n')
print(confusion_matrix(y_test,pred))
print('\n')
print(classification_report(y_test,pred))
# So with the K-Nearest Neighbour algorithm, we can predict if a customer is our target customer or not with a probability of roughly 8/10, which is pretty much acceptable. Hope you found this analysis helpful. Feel free to ask if you have any questions with the code above. There are other algorithms that you can apply to find if they are more accurate.
# **THANK YOU**
# In[ ]:
| 30.626263 | 498 | 0.744173 | 1,493 | 9,096 | 4.487609 | 0.316142 | 0.029104 | 0.018806 | 0.022836 | 0.131493 | 0.102687 | 0.085373 | 0.07194 | 0.067761 | 0.035224 | 0 | 0.024277 | 0.171284 | 9,096 | 296 | 499 | 30.72973 | 0.864553 | 0.610158 | 0 | 0.206897 | 0 | 0 | 0.142898 | 0.01963 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011494 | false | 0 | 0.172414 | 0 | 0.206897 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80624346f155364d7ebf81125c59e583fc168c60 | 4,319 | py | Python | colorlight-5a-75b/uart-probe/colorlight-uart-probe.py | TomKeddie/prj-litex | cc79c041d22ad552a12b49f531d007491b536521 | [
"MIT"
] | 2 | 2019-08-26T13:49:22.000Z | 2019-11-11T18:43:29.000Z | colorlight-5a-75b/uart-probe/colorlight-uart-probe.py | TomKeddie/prj-litex | cc79c041d22ad552a12b49f531d007491b536521 | [
"MIT"
] | null | null | null | colorlight-5a-75b/uart-probe/colorlight-uart-probe.py | TomKeddie/prj-litex | cc79c041d22ad552a12b49f531d007491b536521 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This file is Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
# Disclaimer: This SoC is still a Proof of Concept with large timings violations on the IP/UDP and
# Etherbone stack that need to be optimized. It was initially just used to validate the reversed
# pinout but happens to work on hardware...
import argparse
import sys
import math
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import colorlight_5a_75b
from litex.soc.cores.clock import *
from litex.soc.cores.uart import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.build.generic_platform import Pins, IOStandard, Misc, Subsignal
_serial = [
("serial", 0,
Subsignal("rx", Pins("M1")),
Subsignal("tx", Pins("M2"), Misc("PULLUP=TRUE")),
IOStandard("LVCMOS33")
),
]
_test = [
("test", 0,
Subsignal("tx", Pins("F3"), Misc("PULLUP=TRUE")),
IOStandard("LVCMOS33")
),
]
# ----------------------------------------------------------------------------------------------
class RS232TextSender(Module):
def __init__(self, pads, clk_freq, text, baudrate=115200):
tuning_word = Signal(32, reset=int((baudrate/clk_freq)*2**32))
self.source = stream.Endpoint([("data", 8)])
self.submodules.tx = RS232PHYTX(pads, tuning_word)
ch = Signal(8)
ix = Signal(int(math.log2(len(text))+1))
inc = Signal
text = text + "\r\n"
text_ascii = Array(Constant(ord(character), bits_sign=8) for character in list(text))
self.comb += [
self.tx.sink.valid.eq(1),
self.tx.sink.data.eq(text_ascii[ix]),
]
self.sync += [
If(ix == len(text_ascii),
ix.eq(0),
).Elif(self.tx.sink.ready,
ix.eq(ix+1),
)
]
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
# Clk / Rst
clk25 = platform.request("clk25")
platform.add_period_constraint(clk25, 1e9/25e6)
# PLL
self.submodules.pll = pll = ECP5PLL()
pll.register_clkin(clk25, 25e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~pll.locked)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, revision, **kwargs):
platform = colorlight_5a_75b.Platform(revision=revision)
# try for 11.52MHz but 25MHz*16/35=11.43MHz, use accurate value here to ensure uart is as close as possible
# ie. 43287859*11430000/115200=4294967260 (0xFFFFFFDC)
sys_clk_freq = int(11.43e6)
# SoCCore ----------------------------------------------------------------------------------
platform.add_extension(_serial)
SoCCore.__init__(self, platform, clk_freq=sys_clk_freq, **kwargs, cpu_variant="standard")
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
print(self.crg.pll.config)
# uarts ------------------------------------------------------------------------------------
platform.add_extension(_test)
self.submodules.test0 = RS232TextSender(platform.request("test"), sys_clk_freq, "F3")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Colorlight 5A-75B")
builder_args(parser)
soc_core_args(parser)
parser.add_argument("--revision", default="7.0", type=str, help="Board revision 7.0 (default) or 6.1")
args = parser.parse_args()
argdict = soc_core_argdict(args)
soc = BaseSoC(args.revision, **argdict)
argdict = builder_argdict(args)
argdict["output_dir"]="build"
builder = Builder(soc, **argdict)
builder.csr_csv="csr.csv"
builder.build()
if __name__ == "__main__":
main()
| 34.277778 | 115 | 0.552674 | 478 | 4,319 | 4.828452 | 0.453975 | 0.027296 | 0.025997 | 0.023397 | 0.05286 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036909 | 0.197036 | 4,319 | 125 | 116 | 34.552 | 0.628604 | 0.274138 | 0 | 0.050633 | 0 | 0 | 0.06254 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0 | 0.139241 | 0 | 0.227848 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8063b17195dc598c722e984e0438f0d945c3c21d | 4,109 | py | Python | src/jig/commands/tests/test_sticky.py | robmadole/jig | 6596e15afb0bb7f69850a71d9071440ba101f539 | [
"BSD-2-Clause"
] | 16 | 2015-04-07T19:26:01.000Z | 2020-03-05T21:09:07.000Z | src/jig/commands/tests/test_sticky.py | robmadole/jig | 6596e15afb0bb7f69850a71d9071440ba101f539 | [
"BSD-2-Clause"
] | 2 | 2015-02-11T13:29:35.000Z | 2015-03-02T21:03:08.000Z | src/jig/commands/tests/test_sticky.py | robmadole/jig | 6596e15afb0bb7f69850a71d9071440ba101f539 | [
"BSD-2-Clause"
] | 2 | 2020-05-29T06:48:16.000Z | 2020-05-29T06:54:36.000Z | # coding=utf-8
import git
from os.path import expanduser
from mock import patch, MagicMock
from jig.tests.testcase import CommandTestCase, result_with_hint
from jig.commands import sticky
from jig.exc import (
ForcedExit, JigUserDirectoryError, GitConfigError,
InitTemplateDirAlreadySet, GitTemplatesMissing, GitHomeTemplatesExists)
from jig.commands.hints import (
INIT_TEMPLATE_DIR_ALREADY_SET, GIT_TEMPLATES_MISSING,
GIT_HOME_TEMPLATES_EXISTS)
class TestStickyCommand(CommandTestCase):
"""
Test the sticky command.
"""
command = sticky.Command
def setUp(self):
super(TestStickyCommand, self).setUp()
self.mocks = {
'create_auto_init_templates': MagicMock(),
'set_templates_directory': MagicMock()
}
self._patches = []
def _start_patches(self):
assert len(self._patches) == 0
for function, mock_function in self.mocks.items():
patched = patch(
'jig.commands.sticky.{0}'.format(function),
new=mock_function
)
patched.start()
self._patches.append(patched)
def run_command(self, *args, **kwargs):
"""
Make sure that our patches have started before we run a command.
"""
self._start_patches()
return super(TestStickyCommand, self).run_command(*args, **kwargs)
def tearDown(self):
for patches in self._patches:
patches.stop()
def test_command_succeeds(self):
"""
Successful command returns a message that informs the user.
"""
self.run_command()
self.assertResults(
u'Jig has been setup to run everytime you clone.',
self.output)
def test_fails_create_auto_init_templates(self):
"""
A failure to auto-init is formatted correctly.
"""
self.mocks['create_auto_init_templates'].side_effect = \
JigUserDirectoryError('Error')
with self.assertRaises(ForcedExit):
self.run_command()
self.assertResults(
u'Error',
self.error)
def test_templates_missing(self):
"""
No Git templates can be found.
"""
self.mocks['create_auto_init_templates'].side_effect = \
GitTemplatesMissing()
with self.assertRaises(ForcedExit):
self.run_command()
self.assertResults(
result_with_hint(
u'Unable to find templates.',
GIT_TEMPLATES_MISSING),
self.error)
def test_home_templates_exist(self):
"""
A templates directory already exists in ~/.jig/git
"""
self.mocks['create_auto_init_templates'].side_effect = \
GitHomeTemplatesExists('~/.jig/git/templates')
with self.assertRaises(ForcedExit):
self.run_command()
self.assertResults(
result_with_hint(
u'~/.jig/git/templates already exists',
GIT_HOME_TEMPLATES_EXISTS),
self.error)
def test_init_templatesdir_already_set(self):
"""
Git is already configured with a init.templatedir
"""
self.mocks['set_templates_directory'].side_effect = \
InitTemplateDirAlreadySet('/tmp/templates')
with self.assertRaises(ForcedExit):
self.run_command()
self.assertResults(
result_with_hint(
u'Git configuration for init.templatedir is /tmp/templates',
INIT_TEMPLATE_DIR_ALREADY_SET),
self.error)
def test_git_config_error(self):
"""
A failure to read or write to the Git config.
"""
self.mocks['set_templates_directory'].side_effect = \
GitConfigError(git.exc.GitCommandError(
'git config', 1, 'error'))
with self.assertRaises(ForcedExit):
self.run_command()
self.assertResults(
u'Problem when running git config: error',
self.error)
| 28.534722 | 76 | 0.607204 | 423 | 4,109 | 5.695035 | 0.293144 | 0.033209 | 0.040681 | 0.044832 | 0.290577 | 0.269822 | 0.243254 | 0.210046 | 0.157742 | 0.157742 | 0 | 0.001395 | 0.302263 | 4,109 | 143 | 77 | 28.734266 | 0.838856 | 0.094427 | 0 | 0.344828 | 0 | 0 | 0.128422 | 0.05532 | 0 | 0 | 0 | 0 | 0.137931 | 1 | 0.114943 | false | 0 | 0.08046 | 0 | 0.229885 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8063da22cb97bc1092b0a5274319e07381b8faeb | 5,624 | py | Python | model/utils.py | yhygao/CBIM-Medical-Image-Segmentation | 5586f705156ef3c442393276d184e4d51d2a2408 | [
"Apache-2.0"
] | 20 | 2022-03-02T08:47:25.000Z | 2022-03-30T11:18:26.000Z | model/utils.py | yhygao/CBIM-Medical-Image-Segmentation | 5586f705156ef3c442393276d184e4d51d2a2408 | [
"Apache-2.0"
] | 3 | 2022-03-04T04:23:10.000Z | 2022-03-05T17:29:52.000Z | model/utils.py | yhygao/CBIM-Medical-Image-Segmentation | 5586f705156ef3c442393276d184e4d51d2a2408 | [
"Apache-2.0"
] | 5 | 2022-03-02T08:47:32.000Z | 2022-03-30T11:18:53.000Z | import numpy as np
import torch
import torch.nn as nn
import pdb
def get_model(args, pretrain=False):
if args.dimension == '2d':
if args.model == 'unet':
from .dim2 import UNet
if pretrain:
raise ValueError('No pretrain model available')
return UNet(args.in_chan, args.classes, args.base_chan, block=args.block)
if args.model == 'unet++':
from .dim2 import UNetPlusPlus
if pretrain:
raise ValueError('No pretrain model available')
return UNetPlusPlus(args.in_chan, args.classes, args.base_chan)
if args.model == 'attention_unet':
from .dim2 import AttentionUNet
if pretrain:
raise ValueError('No pretrain model available')
return AttentionUNet(args.in_chan, args.classes, args.base_chan)
elif args.model == 'resunet':
from .dim2 import UNet
if pretrain:
raise ValueError('No pretrain model available')
return UNet(args.in_chan, args.classes, args.base_chan, block=args.block)
elif args.model == 'daunet':
from .dim2 import DAUNet
if pretrain:
raise ValueError('No pretrain model available')
return DAUNet(args.in_chan, args.classes, args.base_chan, block=args.block)
elif args.model in ['utnetv2']:
from .dim2 import UTNetV2
if pretrain:
raise ValueError('No pretrain model available')
return UTNetV2(args.in_chan, args.classes, args.base_chan, conv_block=args.conv_block, conv_num=args.conv_num, trans_num=args.trans_num, num_heads=args.num_heads, fusion_depth=args.fusion_depth, fusion_dim=args.fusion_dim, fusion_heads=args.fusion_heads, map_size=args.map_size, proj_type=args.proj_type, act=nn.GELU, expansion=args.expansion, attn_drop=args.attn_drop, proj_drop=args.proj_drop)
elif args.model == 'transunet':
from .dim2 import VisionTransformer as ViT_seg
from .dim2.transunet import CONFIGS as CONFIGS_ViT_seg
config_vit = CONFIGS_ViT_seg['R50-ViT-B_16']
config_vit.n_classes = args.classes
config_vit.n_skip = 3
config_vit.patches.grid = (int(args.training_size[0]/16), int(args.training_size[1]/16))
net = ViT_seg(config_vit, img_size=args.training_size[0], num_classes=args.classes)
if pretrain:
net.load_from(weights=np.load(args.init_model))
return net
elif args.model == 'swinunet':
from .dim2 import SwinUnet
from .dim2.swin_unet import SwinUnet_config
config = SwinUnet_config()
net = SwinUnet(config, img_size=224, num_classes=args.classes)
if pretrain:
net.load_from(args.init_model)
return net
elif args.dimension == '3d':
if args.model == 'vnet':
from .dim3 import VNet
if pretrain:
raise ValueError('No pretrain model available')
return VNet(args.in_chan, args.classes, scale=args.downsample_scale, baseChans=args.base_chan)
elif args.model == 'resunet':
from .dim3 import UNet
if pretrain:
raise ValueError('No pretrain model available')
return UNet(args.in_chan, args.base_chan, num_classes=args.classes, scale=args.down_scale, norm=args.norm, kernel_size=args.kernel_size, block=args.block)
elif args.model == 'unet':
from .dim3 import UNet
return UNet(args.in_chan, args.base_chan, num_classes=args.classes, scale=args.down_scale, norm=args.norm, kernel_size=args.kernel_size, block=args.block)
elif args.model == 'unet++':
from .dim3 import UNetPlusPlus
return UNetPlusPlus(args.in_chan, args.base_chan, num_classes=args.classes, scale=args.down_scale, norm=args.norm, kernel_size=args.kernel_size, block=args.block)
elif args.model == 'attention_unet':
from .dim3 import AttentionUNet
return AttentionUNet(args.in_chan, args.base_chan, num_classes=args.classes, scale=args.down_scale, norm=args.norm, kernel_size=args.kernel_size, block=args.block)
elif args.model == 'utnetv2':
from .dim3 import UTNetV2
return UTNetV2(args.in_chan, args.classes, args.base_chan, map_size=args.map_size, conv_block=args.conv_block, conv_num=args.conv_num, trans_num=args.trans_num, num_heads=args.num_heads, fusion_depth=args.fusion_depth, fusion_dim=args.fusion_dim, fusion_heads=args.fusion_heads, expansion=args.expansion, attn_drop=args.attn_drop, proj_drop=args.proj_drop, proj_type=args.proj_type, norm=args.norm, act=args.act, kernel_size=args.kernel_size, scale=args.down_scale)
elif args.model == 'unetr':
from .dim3 import UNETR
model = UNETR(args.in_chan, args.classes, args.training_size, feature_size=16, hidden_size=768, mlp_dim=3072, num_heads=12, pos_embed='perceptron', norm_name='instance', res_block=True)
if pretrain:
weight = torch.load(args.init_model)
model.load_state_dict(weight)
return model
elif args.model == 'vtunet':
from .dim3 import VTUNet
model = VTUNet(args, args.classes)
if pretrain:
model.load_from(args)
return model
else:
raise ValueError('Invalid dimension, should be \'2d\' or \'3d\'')
| 48.068376 | 477 | 0.641536 | 733 | 5,624 | 4.742156 | 0.150068 | 0.053797 | 0.037399 | 0.052359 | 0.657652 | 0.610472 | 0.585443 | 0.555524 | 0.520426 | 0.418872 | 0 | 0.013025 | 0.262802 | 5,624 | 116 | 478 | 48.482759 | 0.825374 | 0 | 0 | 0.365591 | 0 | 0 | 0.070959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010753 | false | 0 | 0.236559 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80647f5099199c99b0e0a984c775048c1fbf6fda | 8,731 | py | Python | parser.py | envlh/henry | 53a1097a8650b99a8145b16853dbfece13922cb4 | [
"CC0-1.0"
] | 2 | 2022-01-10T12:36:21.000Z | 2022-01-18T11:13:40.000Z | parser.py | envlh/henry | 53a1097a8650b99a8145b16853dbfece13922cb4 | [
"CC0-1.0"
] | null | null | null | parser.py | envlh/henry | 53a1097a8650b99a8145b16853dbfece13922cb4 | [
"CC0-1.0"
] | 1 | 2022-01-10T13:15:43.000Z | 2022-01-10T13:15:43.000Z | import json
import re
import requests
import unidecode
import urllib.parse
def normalize_lemma(lemma):
return re.sub(r'[^a-z]', '', unidecode.unidecode(lemma))
def get_existing_entries(user_agent):
url = 'https://query.wikidata.org/sparql?{}'.format(urllib.parse.urlencode({'query': 'SELECT DISTINCT (REPLACE(?statedAs, "’", "\'") AS ?statedAs) { ?lexeme p:P1343 [ ps:P1343 wd:Q19216625 ; pq:P1932 ?statedAs ] . }', 'format': 'json'}))
raw = requests.get(url, headers={'User-Agent': user_agent}).content
res = json.loads(raw)['results']['bindings']
existing_entries = []
for value in res:
existing_entries.append(value['statedAs']['value'])
return existing_entries
def load_json_file(filename):
return json.loads(file_get_contents(filename))
def file_get_contents(filename):
with open(filename, 'r', encoding='UTF-8') as f:
s = f.read()
return s
def build_lexeme(lemma, lexical_category, gender, number, forms, dialects, page_number, stated_as):
lexeme = {'type': 'lexeme', 'language': 'Q12107', 'lemmas': {'br': {'language': 'br', 'value': lemma}}, 'lexicalCategory': lexical_category, 'forms': []}
# forms + dialect / variety of form (P7481)
for f in forms:
claims = {}
if len(dialects) >= 1:
cl = []
for dialect in dialects:
cl.append({'mainsnak': {'snaktype': 'value', 'property': 'P7481', 'datavalue': {'value': {'entity-type': 'item', 'numeric-id': dialect[1:], 'id': dialect}, 'type': 'wikibase-entityid'}, 'datatype': 'wikibase-item'}, 'type': 'statement', 'rank': 'normal'})
claims['P7481'] = cl
form = {'representations': {'br': {'language': 'br', 'value': f}}, 'grammaticalFeatures': [], 'claims': claims, 'add': ''}
# positive for adjectives
if lexical_category == 'Q34698':
form['grammaticalFeatures'] = ['Q3482678']
# infinitive for verbs
elif lexical_category == 'Q24905':
form['grammaticalFeatures'] = ['Q179230']
# number for nouns
elif lexical_category == 'Q1084' and number is not None:
form['grammaticalFeatures'] = [number]
lexeme['forms'].append(form)
# described by source (P1343)
first_letter = normalize_lemma(lemma)[:1]
if lemma[:3] == 'c\'h':
first_letter = 'c\'h'
elif lemma[:2] == 'ch':
first_letter = 'ch'
first_letter = first_letter.upper()
lexeme['claims'] = {
'P1343': [{
'mainsnak': {'snaktype': 'value', 'property': 'P1343', 'datavalue': {'value': {'entity-type': 'item', 'numeric-id': 19216625, 'id': 'Q19216625'}, 'type': 'wikibase-entityid'}, 'datatype': 'wikibase-item'},
'type': 'statement',
'qualifiers': {
'P304': [{'snaktype': 'value', 'property': 'P304', 'datavalue': {'value': str(page_number), 'type': 'string'}, 'datatype': 'string'}],
'P953': [{'snaktype': 'value', 'property': 'P953', 'datavalue': {'value': 'https://fr.wikisource.org/wiki/Lexique_%C3%A9tymologique_du_breton_moderne/{}#{}'.format(first_letter, page_number), 'type': 'string'}, 'datatype': 'url'}],
'P1932': [{'snaktype': 'value', 'property': 'P1932', 'datavalue': {'value': stated_as, 'type': 'string'}, 'datatype': 'string'}],
},
'qualifiers-order': ['P304', 'P953', 'P1932'],
'rank': 'normal'
}]
}
# gender (P5185)
if gender is not None:
lexeme['claims']['P5185'] = [{'mainsnak': {'snaktype': 'value', 'property': 'P5185', 'datavalue': {'value': {'entity-type': 'item', 'numeric-id': int(gender[1:]), 'id': gender}, 'type': 'wikibase-entityid'}, 'datatype': 'wikibase-item'}, 'type': 'statement', 'rank': 'normal'}]
# reconstructed word (P31)
if lemma[0] == '*':
lexeme['claims']['P31'] = [{'mainsnak': {'snaktype': 'value', 'property': 'P31', 'datavalue': {'value': {'entity-type': 'item', 'numeric-id': 55074511, 'id': 'Q55074511'}, 'type': 'wikibase-entityid'}, 'datatype': 'wikibase-item'}, 'type': 'statement', 'rank': 'normal'}]
return lexeme
def main():
conf = load_json_file('conf/general.json')
ref_lexical_categories = load_json_file('conf/lexical_categories.json')
ref_genders = load_json_file('conf/genders.json')
ref_numbers = load_json_file('conf/numbers.json')
ref_dialects = load_json_file('conf/dialects.json')
# already existing
existing_entries = get_existing_entries(conf['user_agent'])
content = file_get_contents('data/{}/stripped_{}.txt'.format(conf['iteration'], conf['iteration']))
lines = content.split('\n')
lexemes = []
lexemes_error = []
monograms = {}
bigrams = {}
page_number = 1
with open('data/{}/lexemes_{}.txt'.format(conf['iteration'], conf['iteration']), 'w', encoding='utf-8') as out:
out.write('lemma,lexical_category,gender,number,forms,dialects,page_number\n')
for line in lines:
line = line.strip()
# line starting with a lemma (starting string surrounded by 3 single quotes)
output = re.search(r'^\'\'\'(.*?)\'\'\'(.*)', line)
if output is not None:
# STATED AS (entry label)
stated_as = output.group(1).strip()
if stated_as in existing_entries:
lexemes_error.append({stated_as: 'entry already used in Wikidata'})
continue
# LEMMA and FORMS
# removing definition number
forms = re.sub(r'^[0-9]+ ', '', stated_as.lower())
forms = forms.split(',')
forms = [x.strip() for x in forms]
# do not compute already existing lemmas
lemma = forms[0]
# DEFINITION
definition = output.group(2)
match = re.search(r'^( \([CLTV., ]+\))?, ([a-zéè\' .]+)', definition)
if match is None:
lexemes_error.append({stated_as: 'unable to parse definition'})
continue
# DIALECTS
dialects = match.group(1)
if dialects is None:
dialects = []
else:
dialects = re.findall(r'[CLTV]', dialects)
dialects = [ref_dialects[x] for x in dialects]
# LEXICOGRAPHICAL CATEGORY
parsed_lexical_category = match.group(2).strip()
if parsed_lexical_category not in ref_lexical_categories:
lexemes_error.append({stated_as: 'unknown lexical category ({})'.format(parsed_lexical_category)})
continue
lexical_category = ref_lexical_categories[parsed_lexical_category]
# GENDER
gender = None
if parsed_lexical_category in ref_genders:
gender = ref_genders[parsed_lexical_category]
# NUMBER
number = None
if parsed_lexical_category in ref_numbers:
number = ref_numbers[parsed_lexical_category]
lexeme = build_lexeme(lemma, lexical_category, gender, number, forms, dialects, page_number, stated_as)
lexemes.append(lexeme)
out.write('{},{},{},{},{},{},{}\n'.format(lemma, lexical_category, gender, number, forms, dialects, page_number))
for c in lemma:
if c not in monograms:
monograms[c] = 0
monograms[c] += 1
for (a, b) in zip(lemma[0::2], lemma[1::2]):
if (a + b) not in bigrams:
bigrams[a + b] = 0
bigrams[a + b] += 1
output = re.search(r'(?i)^{{nr\|', line)
if output is not None:
page_number += 1
with open('data/{}/lexemes_{}.json'.format(conf['iteration'], conf['iteration']), 'w', encoding='utf-8') as myfile:
json.dump(lexemes, myfile, ensure_ascii=False)
with open('data/{}/errors_{}.json'.format(conf['iteration'], conf['iteration']), 'w', encoding='utf-8') as myfile:
json.dump(lexemes_error, myfile, ensure_ascii=False)
with open('data/{}/monograms_{}.json'.format(conf['iteration'], conf['iteration']), 'w', encoding='utf-8') as myfile:
json.dump(monograms, myfile, ensure_ascii=False)
with open('data/{}/bigrams_{}.json'.format(conf['iteration'], conf['iteration']), 'w', encoding='utf-8') as myfile:
json.dump(bigrams, myfile, ensure_ascii=False)
print('{} lexemes'.format(len(lexemes)))
if __name__ == '__main__':
main()
| 45.712042 | 285 | 0.569465 | 948 | 8,731 | 5.113924 | 0.232068 | 0.055693 | 0.034653 | 0.017327 | 0.290635 | 0.262995 | 0.246493 | 0.169348 | 0.158416 | 0.135726 | 0 | 0.029348 | 0.262398 | 8,731 | 190 | 286 | 45.952632 | 0.723447 | 0.049021 | 0 | 0.036496 | 0 | 0 | 0.233824 | 0.030541 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043796 | false | 0 | 0.036496 | 0.014599 | 0.116788 | 0.007299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8064b62c6658077a658035b75bf939d6a102f7cb | 1,591 | py | Python | tests/test_solidarity_tax_credit.py | RogerEMO/srd | 40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5 | [
"MIT"
] | 1 | 2021-11-22T18:15:09.000Z | 2021-11-22T18:15:09.000Z | tests/test_solidarity_tax_credit.py | RogerEMO/srd | 40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5 | [
"MIT"
] | 3 | 2021-05-10T18:46:16.000Z | 2021-06-01T16:51:48.000Z | tests/test_solidarity_tax_credit.py | RogerEMO/srd | 40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5 | [
"MIT"
] | 1 | 2021-05-05T17:20:06.000Z | 2021-05-05T17:20:06.000Z | import pytest
from math import isclose
import sys
sys.path.append('/Users/pyann/Dropbox (CEDIA)/srd/Model')
import srd
from srd import quebec
qc_form = quebec.form(2016)
@pytest.mark.parametrize('income, amount', [(0, 966), (33e3, 966), (51e3, 0),
(34e3+(51e3-34e3)/2, 480),
(100e3, 0)])
def test_single(income, amount):
p = srd.Person(age=45, othtax=income)
hh = srd.Hhold(p, prov='qc')
qc_form.file(hh)
assert isclose(qc_form.solidarity(p, hh), amount, abs_tol=50)
@pytest.mark.parametrize('income, amount', [(0, 1231), (33e3, 1231), (56e3, 0),
(34e3+(56e3-34e3)/2, 620),
(100e3, 0)])
def test_couple(income, amount):
p0 = srd.Person(age=45, othtax=income/2)
p1 = srd.Person(age=45, othtax=income/2)
hh = srd.Hhold(p0, p1, prov='qc')
qc_form.file(hh)
assert isclose(qc_form.solidarity(p0, hh), amount/2, abs_tol=50)
@pytest.mark.parametrize('income, amount', [(0, 1200), (33e3, 1200), (55e3, 0),
(34e3+(55e3-34e3)/2, 600),
(100e3, 0)])
def test_single_2kids(income, amount):
p = srd.Person(age=45, othtax=income)
hh = srd.Hhold(p, prov='qc')
d0 = srd.Dependent(age=12)
d1 = srd.Dependent(age=12)
hh.add_dependent(d0, d1)
qc_form.file(hh)
assert isclose(qc_form.solidarity(p, hh), amount, abs_tol=50)
| 32.469388 | 80 | 0.529855 | 208 | 1,591 | 3.980769 | 0.298077 | 0.050725 | 0.057971 | 0.067633 | 0.576087 | 0.530193 | 0.48913 | 0.423913 | 0.423913 | 0.332126 | 0 | 0.124654 | 0.319296 | 1,591 | 48 | 81 | 33.145833 | 0.639889 | 0 | 0 | 0.342857 | 0 | 0 | 0.055772 | 0 | 0 | 0 | 0 | 0 | 0.085714 | 1 | 0.085714 | false | 0 | 0.142857 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
806566fc4d9daeabfef5f2000f79ccd69c7d32a1 | 49,754 | py | Python | 2021/BondwireProfileEditor_Win_Linux.py | zhangjq933/HowtoSim_Script | d958cc6cc743106e8f6ddf58dead6551a8ac7784 | [
"MIT"
] | 79 | 2019-04-01T04:35:01.000Z | 2022-03-30T10:59:32.000Z | 2021/BondwireProfileEditor_Win_Linux.py | raflzhang/HowtoSim_Script | 90fb8cca87d47d2c45b8ff5d07a35e8a6c846685 | [
"MIT"
] | 1 | 2020-03-29T20:52:06.000Z | 2020-03-30T05:35:30.000Z | 2021/BondwireProfileEditor_Win_Linux.py | raflzhang/HowtoSim_Script | 90fb8cca87d47d2c45b8ff5d07a35e8a6c846685 | [
"MIT"
] | 73 | 2019-05-07T10:26:53.000Z | 2022-03-24T02:25:08.000Z | # coding=utf-8
import os, re, sys, clr, json, math, logging, random, time
from itertools import combinations
os.chdir(os.path.dirname(__file__))
logging.basicConfig(filename='gui.log', filemode='w', encoding='utf-8', level=logging.DEBUG)
clr.AddReference('System.Drawing')
clr.AddReference('System.Windows.Forms')
from System import Drawing, Array, ComponentModel, Diagnostics, IO
from System.Drawing import Color
from System.Windows import Forms
import System.Object as object
import System.String as string
from System.Windows.Forms import DialogResult, OpenFileDialog ,SaveFileDialog, FolderBrowserDialog, MessageBox
#----------------------------------------------------------------------------
import ScriptEnv
import clr
clr.AddReference('Ansys.Ansoft.Edb')
clr.AddReference('Ansys.Ansoft.SimSetupData')
import Ansys.Ansoft.Edb as edb
import Ansys.Ansoft.Edb.Definition as edbd
ScriptEnv.Initialize("Ansoft.ElectronicsDesktop")
oDesktop.RestoreWindow()
oDesktop.ClearMessages("", "", 2)
oProject = oDesktop.GetActiveProject()
oDesign = oProject.GetActiveDesign()
oEditor = oDesign.GetActiveEditor()
oDefinitionManager = oProject.GetDefinitionManager()
oBondwireManager = oDefinitionManager.GetManager("Bondwire")
DB = edb.Database.Attach(int(oProject.GetEDBHandle()))
def changeJEDECType(bondwirenames, profile, jtype):
jvalue = {1: "Cadence APD/Allegro:JEDEC4Bondwire",
2: "Cadence APD/Allegro:JEDEC5Bondwire"}
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
] + bondwirenames,
[
"NAME:ChangedProps",
[
"NAME:Type",
"Value:=" , jvalue[jtype]
],
[
"NAME:Profile",
"Value:=" , "\"{}\"".format(profile)
]
]
]
])
def getExistingProfiles():
return oBondwireManager.GetNames()
def getCategory():
category = {}
for p in oBondwireManager.GetNames():
category[p] = []
for i in oEditor.FindObjects('type', 'bondwire'):
profile = oEditor.GetPropertyValue('BaseElementTab', i, 'Profile')[1:-1]
try:
category[profile] +=[i]
except:
category[profile] = [i]
return category
def getProfileInfo():
result = {i:(-1, '0', '0', '0') for i in getCategory()}
for i in oBondwireManager.GetNames():
data = oBondwireManager.GetData(i)
bondwire_type = data[2]
if bondwire_type not in [1, 2]:
continue
h = data[8][0][:-2]
a = data[10][0][:-3]
b = data[12][0][:-3]
result[i] = (bondwire_type, h, a, b)
return result
def removeProfile(names):
for name in names:
oBondwireManager.Remove(name, True, "", "Project")
def addProfile(name, profile_type, h="500", a="90", b="30"):
# profile_type 1:Jedec4Bondwire, 2:Jedec4Bondwire
oBondwireManager.Add(
[
"NAME:{}".format(name),
"Type:=" , profile_type,
"ModifiedOn:=" , str(time.time()).split('.')[0],
"Library:=" , "",
"h:=" , [h+'um'],
"a:=" , [a+'deg'],
"b:=" , [b+'deg']
])
if profile_type == 1:
result = edbd.Jedec4BondwireDef.Create(DB, name, float(h)*1e-6)
elif profile_type == 2:
result = edbd.Jedec5BondwireDef.Create(DB, name, float(h)*1e-6, float(a), float(b))
setBondwireProfile(name, profile_type)
AddWarningMessage('{} is added!'.format(name))
return result
def setBondwireProfile(name, profile_type):
x = getCategory()
bondwires = x[name]
if bondwires:
changeJEDECType(bondwires, name, profile_type)
def editProfile(name, profile_type, h='500', a='90', b='30'):
# profile_type 1:Jedec4Bondwire, 2:Jedec4Bondwire
a = '90' if a == '' else a
b = '30' if b == '' else b
if name not in getExistingProfiles():
addProfile(name, profile_type, h, a, b)
else:
oBondwireManager.Edit(name,
[
"NAME:{}".format(name),
"Type:=" , profile_type,
"ModifiedOn:=" , str(time.time()).split('.')[0],
"Library:=" , "",
"h:=" , [h+'um'],
"a:=" , [a+'deg'],
"b:=" , [b+'deg']
])
if profile_type == 1:
result = edbd.Jedec4BondwireDef.Create(DB, name, float(h)*1e-6)
elif profile_type == 2:
result = edbd.Jedec5BondwireDef.Create(DB, name, float(h)*1e-6, float(a), float(b))
setBondwireProfile(name, profile_type)
AddWarningMessage('{} is set!'.format(name))
return result
def isfloat(x):
try:
return (float(x) > 0)
except:
return False
def getPW():
result = {}
for i in oEditor.FindObjects('type', 'bondwire'):
pw = oEditor.GetPropertyValue('BaseElementTab', i, 'PathWidth')
if pw in ['0fm']:
continue
else:
result[i] = pw
return result
def changeBondwirePathWidth(bondwires, pathwidth = '0fm'):
if len(bondwires) == 0:
return None
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
] + bondwires,
[
"NAME:ChangedProps",
[
"NAME:PathWidth",
"Value:=" , pathwidth
]
]
]
])
def change(bondwire_name, direction, distance, point="Pt1"):
if bondwire_name not in oEditor.FindObjects('Type', 'bondwire'):
return
pt0 = oEditor.GetPropertyValue("BaseElementTab", bondwire_name, 'pt0')
pt1 = oEditor.GetPropertyValue("BaseElementTab", bondwire_name, 'pt1')
x0, y0 = map(float, pt0.strip().split(','))
x1, y1 = map(float, pt1.strip().split(','))
length = math.sqrt((x1-x0)**2 + (y1-y0)**2)
dx = distance*(x1-x0)/(length)
dy = distance*(y1-y0)/(length)
dvector = { "Forward": (dx, dy),
"Backward": (-dx, -dy),
"Left":(-dy, dx),
"Right":(dy, -dx),
}
du, dv = dvector[direction]
if point == "Pt0":
x, y = x0 + du, y0 + dv
else:
x, y = x1 + du, y1 + dv
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
bondwire_name
],
[
"NAME:ChangedProps",
[
"NAME:{}".format(point),
"X:=" , "{}mm".format(x),
"Y:=" , "{}mm".format(y)
]
]
]
])
def reverse(bw_name):
unit = oEditor.GetActiveUnits()
start_layer = oEditor.GetPropertyValue("BaseElementTab", bw_name, 'Start Layer')
end_layer = oEditor.GetPropertyValue("BaseElementTab", bw_name, 'End Layer')
pt0 = oEditor.GetPropertyValue("BaseElementTab", bw_name, 'Pt0').split(',')
pt1 = oEditor.GetPropertyValue("BaseElementTab", bw_name, 'Pt1').split(',')
try:
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
bw_name
],
[
"NAME:ChangedProps",
[
"NAME:Start Layer",
"Value:=" , end_layer
]
]
]
])
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
bw_name
],
[
"NAME:ChangedProps",
[
"NAME:End Layer",
"Value:=" , start_layer
]
]
]
])
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
bw_name
],
[
"NAME:ChangedProps",
[
"NAME:Pt0",
"X:=" , "{}{}".format(pt1[0], unit),
"Y:=" , "{}{}".format(pt1[1], unit)
]
]
]
])
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
bw_name
],
[
"NAME:ChangedProps",
[
"NAME:Pt1",
"X:=" , "{}{}".format(pt0[0], unit),
"Y:=" , "{}{}".format(pt0[1], unit)
]
]
]
])
AddWarningMessage('{} is switched!'.format(bw_name))
except:
AddWarningMessage('{} failed in switching!'.format(bw_name))
def alignBondwireCenter(bondwire, point='Pt0'):
try:
x, y = oEditor.GetPropertyValue('BaseElementTab', bondwire, point).split(',')
x, y = float(x), float(y)
if point == 'Pt0':
layer = oEditor.GetPropertyValue('BaseElementTab', bondwire, 'Start Layer')
else:
layer = oEditor.GetPropertyValue('BaseElementTab', bondwire, 'End Layer')
objs = oEditor.FindObjectsByPoint(oEditor.Point().Set(x*1e-3, y*1e-3), layer)
for i in objs:
if oEditor.GetPropertyValue('BaseElementTab', i, 'Type') in ['Via', 'Pin']:
u, v = oEditor.GetPropertyValue('BaseElementTab', i, 'Location').split(',')
break
else:
pass
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
bondwire,
],
[
"NAME:ChangedProps",
[
"NAME:{}".format(point),
"X:=" , "{}mm".format(u),
"Y:=" , "{}mm".format(v),
]
]
]
])
AddWarningMessage('{} is aligned to {} center!'.format(bondwire, i))
except:
logging.exception('error')
#Separate Code-------------------------------------------------------
def ccw(A,B,C):
Ax, Ay = A
Bx, By = B
Cx, Cy = C
return (Cy-Ay) * (Bx-Ax) > (By-Ay) * (Cx-Ax)
def intersect(A,B,C,D):
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
def checkintersection(segments):
for (A, B), (C, D) in combinations(segments, 2):
if intersect(A, B, C, D):
return True
return False
def getPkgGrid(pin_name):
layer = oEditor.GetPropertyValue('BaseElementTab', pin_name, 'Start Layer')
x0, y0 = oEditor.GetPropertyValue('BaseElementTab', pin_name, 'Location').split(',')
x0, y0 = float(x0), float(y0)
grid = []
for i in range(-10, 11):
for j in range(-10, 11):
x = (x0 + 0.04 * i) * 1e-3
y = (y0 + 0.04 * j) * 1e-3
pt = oEditor.Point()
pt.Set(x,y)
if pin_name in oEditor.FindObjectsByPoint(pt, layer):
grid.append((x, y))
return grid
def getDieGrid(pin_name):
layer = oEditor.GetPropertyValue('BaseElementTab', pin_name, 'Start Layer')
grid = {}
for i in oEditor.FindObjects('Type', 'bondwire'):
p1 = oEditor.Point()
x, y = oEditor.GetPropertyValue('BaseElementTab', i, 'Pt1').split(',')
pt = p1.Set(float(x)*1e-3 ,float(y)*1e-3)
obj = oEditor.FindObjectsByPoint(p1, layer)
if pin_name in oEditor.FindObjectsByPoint(pt, layer):
x, y = oEditor.GetPropertyValue('BaseElementTab', i, 'Pt0').split(',')
x, y = float(x)*1e-3+random.uniform(0, 1)*1e-9 ,float(y)*1e-3+random.uniform(0, 1)*1e-9
grid[(x, y)] = i
return grid
def separate(pcb_pad):
pkg = getPkgGrid(pcb_pad)
AddWarningMessage('Pkg Locations: {}'.format(len(pkg)))
die = getDieGrid(pcb_pad)
AddWarningMessage('die Locations: {}'.format(len(die)))
pair = {}
N = 0
while(True):
N+=1
if N > 100000:
AddWarningMessage('Failed')
segments = []
break
segments = []
random.shuffle(pkg)
for (pt0, pt1) in zip(die.keys(), pkg):
segments.append((pt0, pt1))
if checkintersection(segments) == False:
AddWarningMessage('Successful')
break
for pt0, pt1 in segments:
pair[die[pt0]] = pt1
AddWarningMessage(str(pair))
try:
for bw_name in pair:
x, y = pair[bw_name]
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
bw_name
],
[
"NAME:ChangedProps",
[
"NAME:Pt1",
"X:=" , str(x),
"Y:=" , str(y)
]
]
]
])
except:
pass
#----------------------------------------------------------------------------
class MyForm(Forms.Form):
def __init__(self):
self.tabPage1 = Forms.TabPage()
self.ok_bt = Forms.Button()
self.label2 = Forms.Label()
self.modelname_lb = Forms.Label()
self.groupBox1 = Forms.GroupBox()
self.label8 = Forms.Label()
self.label9 = Forms.Label()
self.label10 = Forms.Label()
self.label7 = Forms.Label()
self.label6 = Forms.Label()
self.label5 = Forms.Label()
self.apply_bt = Forms.Button()
self.beta_tb = Forms.TextBox()
self.alpha_tb = Forms.TextBox()
self.h1_tb = Forms.TextBox()
self.groupBox2 = Forms.GroupBox()
self.create_bt = Forms.Button()
self.name_tb = Forms.TextBox()
self.delete_bt = Forms.Button()
self.type_cb = Forms.ComboBox()
self.model_lb = Forms.ListBox()
self.switch_tab = Forms.TabControl()
self.tabPage2 = Forms.TabPage()
self.groupBox5 = Forms.GroupBox()
self.label13 = Forms.Label()
self.label12 = Forms.Label()
self.label11 = Forms.Label()
self.separate_bt = Forms.Button()
self.align_bt = Forms.Button()
self.reverse_bt = Forms.Button()
self.groupBox4 = Forms.GroupBox()
self.right_bt = Forms.Button()
self.backward_bt = Forms.Button()
self.left_bt = Forms.Button()
self.forward_bt = Forms.Button()
self.groupBox3 = Forms.GroupBox()
self.unit_lb = Forms.Label()
self.label3 = Forms.Label()
self.step_tb = Forms.TextBox()
self.pt1_rb = Forms.RadioButton()
self.pt0_rb = Forms.RadioButton()
self.tabPage1.SuspendLayout()
self.groupBox1.SuspendLayout()
self.groupBox2.SuspendLayout()
self.switch_tab.SuspendLayout()
self.tabPage2.SuspendLayout()
self.groupBox5.SuspendLayout()
self.groupBox4.SuspendLayout()
self.groupBox3.SuspendLayout()
self.SuspendLayout()
# tabPage1
self.tabPage1.BackColor = Drawing.Color.Transparent
self.tabPage1.Controls.Add(self.ok_bt)
self.tabPage1.Controls.Add(self.label2)
self.tabPage1.Controls.Add(self.modelname_lb)
self.tabPage1.Controls.Add(self.groupBox1)
self.tabPage1.Controls.Add(self.groupBox2)
self.tabPage1.Controls.Add(self.delete_bt)
self.tabPage1.Controls.Add(self.type_cb)
self.tabPage1.Controls.Add(self.model_lb)
self.tabPage1.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.tabPage1.Location = Drawing.Point(4, 25)
self.tabPage1.Name = "tabPage1"
self.tabPage1.Padding = Forms.Padding(3)
self.tabPage1.Size = Drawing.Size(417, 506)
self.tabPage1.TabIndex = 0
self.tabPage1.Text = "Profile Edit"
# ok_bt
self.ok_bt.Anchor = (((Forms.AnchorStyles.Bottom | Forms.AnchorStyles.Right)))
self.ok_bt.Font = Drawing.Font("Arial", 12, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.ok_bt.Location = Drawing.Point(304, 458)
self.ok_bt.Name = "ok_bt"
self.ok_bt.Size = Drawing.Size(100, 40)
self.ok_bt.TabIndex = 14
self.ok_bt.Text = "Interact"
self.ok_bt.UseVisualStyleBackColor = True
self.ok_bt.Click += self.ok_bt_Click
# label2
self.label2.AutoSize = True
self.label2.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label2.Location = Drawing.Point(222, 8)
self.label2.Name = "label2"
self.label2.Size = Drawing.Size(47, 16)
self.label2.TabIndex = 10
self.label2.Text = "Profile:"
# modelname_lb
self.modelname_lb.AutoSize = True
self.modelname_lb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.modelname_lb.Location = Drawing.Point(12, 8)
self.modelname_lb.Name = "modelname_lb"
self.modelname_lb.Size = Drawing.Size(84, 16)
self.modelname_lb.TabIndex = 7
self.modelname_lb.Text = "Model Name:"
# groupBox1
self.groupBox1.Anchor = (((Forms.AnchorStyles.Top | Forms.AnchorStyles.Right)))
self.groupBox1.Controls.Add(self.label8)
self.groupBox1.Controls.Add(self.label9)
self.groupBox1.Controls.Add(self.label10)
self.groupBox1.Controls.Add(self.label7)
self.groupBox1.Controls.Add(self.label6)
self.groupBox1.Controls.Add(self.label5)
self.groupBox1.Controls.Add(self.apply_bt)
self.groupBox1.Controls.Add(self.beta_tb)
self.groupBox1.Controls.Add(self.alpha_tb)
self.groupBox1.Controls.Add(self.h1_tb)
self.groupBox1.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.groupBox1.Location = Drawing.Point(222, 99)
self.groupBox1.Name = "groupBox1"
self.groupBox1.Size = Drawing.Size(182, 209)
self.groupBox1.TabIndex = 12
self.groupBox1.TabStop = False
self.groupBox1.Text = "Dimension"
# label8
self.label8.AutoSize = True
self.label8.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label8.Location = Drawing.Point(133, 108)
self.label8.Name = "label8"
self.label8.Size = Drawing.Size(28, 16)
self.label8.TabIndex = 20
self.label8.Text = "deg"
# label9
self.label9.AutoSize = True
self.label9.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label9.Location = Drawing.Point(133, 72)
self.label9.Name = "label9"
self.label9.Size = Drawing.Size(28, 16)
self.label9.TabIndex = 19
self.label9.Text = "deg"
# label10
self.label10.AutoSize = True
self.label10.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label10.Location = Drawing.Point(133, 33)
self.label10.Name = "label10"
self.label10.Size = Drawing.Size(25, 16)
self.label10.TabIndex = 18
self.label10.Text = "um"
# label7
self.label7.AutoSize = True
self.label7.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label7.Location = Drawing.Point(13, 108)
self.label7.Name = "label7"
self.label7.Size = Drawing.Size(36, 16)
self.label7.TabIndex = 17
self.label7.Text = "beta:"
# label6
self.label6.AutoSize = True
self.label6.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label6.Location = Drawing.Point(7, 72)
self.label6.Name = "label6"
self.label6.Size = Drawing.Size(42, 16)
self.label6.TabIndex = 16
self.label6.Text = "alpha:"
# label5
self.label5.AutoSize = True
self.label5.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label5.Location = Drawing.Point(24, 33)
self.label5.Name = "label5"
self.label5.Size = Drawing.Size(25, 16)
self.label5.TabIndex = 15
self.label5.Text = "h1:"
# apply_bt
self.apply_bt.Anchor = (((Forms.AnchorStyles.Bottom | Forms.AnchorStyles.Right)))
self.apply_bt.Font = Drawing.Font("Arial", 12, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.apply_bt.Location = Drawing.Point(40, 150)
self.apply_bt.Name = "apply_bt"
self.apply_bt.Size = Drawing.Size(100, 40)
self.apply_bt.TabIndex = 15
self.apply_bt.Text = "Apply"
self.apply_bt.UseVisualStyleBackColor = True
self.apply_bt.Click += self.apply_bt_Click
# beta_tb
self.beta_tb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.beta_tb.Location = Drawing.Point(54, 108)
self.beta_tb.Name = "beta_tb"
self.beta_tb.Size = Drawing.Size(73, 22)
self.beta_tb.TabIndex = 6
self.beta_tb.TextChanged += self.beta_tb_TextChanged
# alpha_tb
self.alpha_tb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.alpha_tb.Location = Drawing.Point(54, 69)
self.alpha_tb.Name = "alpha_tb"
self.alpha_tb.Size = Drawing.Size(73, 22)
self.alpha_tb.TabIndex = 5
self.alpha_tb.TextChanged += self.alpha_tb_TextChanged
# h1_tb
self.h1_tb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.h1_tb.Location = Drawing.Point(54, 30)
self.h1_tb.Name = "h1_tb"
self.h1_tb.Size = Drawing.Size(73, 22)
self.h1_tb.TabIndex = 4
self.h1_tb.TextChanged += self.h1_tb_TextChanged
# groupBox2
self.groupBox2.Anchor = (((Forms.AnchorStyles.Bottom | Forms.AnchorStyles.Right)))
self.groupBox2.Controls.Add(self.create_bt)
self.groupBox2.Controls.Add(self.name_tb)
self.groupBox2.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.groupBox2.Location = Drawing.Point(222, 314)
self.groupBox2.Name = "groupBox2"
self.groupBox2.Size = Drawing.Size(182, 133)
self.groupBox2.TabIndex = 13
self.groupBox2.TabStop = False
self.groupBox2.Text = "New Profile"
# create_bt
self.create_bt.Anchor = (((Forms.AnchorStyles.Bottom | Forms.AnchorStyles.Right)))
self.create_bt.Font = Drawing.Font("Arial", 12, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.create_bt.Location = Drawing.Point(40, 71)
self.create_bt.Name = "create_bt"
self.create_bt.Size = Drawing.Size(100, 40)
self.create_bt.TabIndex = 16
self.create_bt.Text = "Add"
self.create_bt.UseVisualStyleBackColor = True
self.create_bt.Click += self.create_bt_Click
# name_tb
self.name_tb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.name_tb.Location = Drawing.Point(24, 31)
self.name_tb.Name = "name_tb"
self.name_tb.Size = Drawing.Size(134, 22)
self.name_tb.TabIndex = 7
self.name_tb.TextChanged += self.name_tb_TextChanged
# delete_bt
self.delete_bt.Anchor = (((Forms.AnchorStyles.Bottom | Forms.AnchorStyles.Left)))
self.delete_bt.Font = Drawing.Font("Arial", 12, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.delete_bt.Location = Drawing.Point(104, 458)
self.delete_bt.Name = "delete_bt"
self.delete_bt.Size = Drawing.Size(100, 40)
self.delete_bt.TabIndex = 8
self.delete_bt.Text = "Delete"
self.delete_bt.UseVisualStyleBackColor = True
self.delete_bt.Click += self.delete_bt_Click
# type_cb
self.type_cb.Anchor = (((Forms.AnchorStyles.Top | Forms.AnchorStyles.Right)))
self.type_cb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.type_cb.FormattingEnabled = True
self.type_cb.Location = Drawing.Point(222, 43)
self.type_cb.Name = "type_cb"
self.type_cb.Size = Drawing.Size(182, 24)
self.type_cb.TabIndex = 11
self.type_cb.Text = "None"
self.type_cb.SelectedIndexChanged += self.type_cb_SelectedIndexChanged
# model_lb
self.model_lb.Anchor = ((((Forms.AnchorStyles.Top | Forms.AnchorStyles.Bottom)| Forms.AnchorStyles.Left)))
self.model_lb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.model_lb.FormattingEnabled = True
self.model_lb.ItemHeight = 16
self.model_lb.Location = Drawing.Point(12, 43)
self.model_lb.Name = "model_lb"
self.model_lb.ScrollAlwaysVisible = True
self.model_lb.Size = Drawing.Size(192, 404)
self.model_lb.TabIndex = 9
self.model_lb.SelectedIndexChanged += self.model_lb_SelectedIndexChanged
# switch_tab
self.switch_tab.Controls.Add(self.tabPage1)
self.switch_tab.Controls.Add(self.tabPage2)
self.switch_tab.Dock = Forms.DockStyle.Fill
self.switch_tab.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.switch_tab.Location = Drawing.Point(0, 0)
self.switch_tab.Margin = Forms.Padding(5)
self.switch_tab.Name = "switch_tab"
self.switch_tab.SelectedIndex = 0
self.switch_tab.Size = Drawing.Size(425, 535)
self.switch_tab.TabIndex = 0
# tabPage2
self.tabPage2.BackColor = Drawing.Color.Transparent
self.tabPage2.Controls.Add(self.groupBox5)
self.tabPage2.Controls.Add(self.groupBox4)
self.tabPage2.Controls.Add(self.groupBox3)
self.tabPage2.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.tabPage2.Location = Drawing.Point(4, 25)
self.tabPage2.Name = "tabPage2"
self.tabPage2.Padding = Forms.Padding(3)
self.tabPage2.Size = Drawing.Size(417, 506)
self.tabPage2.TabIndex = 1
self.tabPage2.Text = "Bondwire Move"
# groupBox5
self.groupBox5.Anchor = ((((Forms.AnchorStyles.Top | Forms.AnchorStyles.Left)| Forms.AnchorStyles.Right)))
self.groupBox5.Controls.Add(self.label13)
self.groupBox5.Controls.Add(self.label12)
self.groupBox5.Controls.Add(self.label11)
self.groupBox5.Controls.Add(self.separate_bt)
self.groupBox5.Controls.Add(self.align_bt)
self.groupBox5.Controls.Add(self.reverse_bt)
self.groupBox5.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.groupBox5.Location = Drawing.Point(6, 314)
self.groupBox5.Name = "groupBox5"
self.groupBox5.Size = Drawing.Size(405, 182)
self.groupBox5.TabIndex = 2
self.groupBox5.TabStop = False
self.groupBox5.Text = "Functions"
# label13
self.label13.AutoSize = True
self.label13.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label13.Location = Drawing.Point(141, 85)
self.label13.Name = "label13"
self.label13.Size = Drawing.Size(207, 16)
self.label13.TabIndex = 5
self.label13.Text = "\"Select Bondwires and Pt to Align\""
# label12
self.label12.AutoSize = True
self.label12.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label12.Location = Drawing.Point(141, 139)
self.label12.Name = "label12"
self.label12.Size = Drawing.Size(216, 16)
self.label12.TabIndex = 4
self.label12.Text = "\"Select Pad to Separate Bondwires\""
# label11
self.label11.AutoSize = True
self.label11.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label11.Location = Drawing.Point(141, 33)
self.label11.Name = "label11"
self.label11.Size = Drawing.Size(183, 16)
self.label11.TabIndex = 3
self.label11.Text = "\"Select Bondwires to Reverse\""
# separate_bt
self.separate_bt.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.separate_bt.Location = Drawing.Point(15, 128)
self.separate_bt.Name = "separate_bt"
self.separate_bt.Size = Drawing.Size(120, 40)
self.separate_bt.TabIndex = 2
self.separate_bt.Text = "Separate"
self.separate_bt.UseVisualStyleBackColor = True
self.separate_bt.Click += self.separate_bt_Click
# align_bt
self.align_bt.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.align_bt.Location = Drawing.Point(15, 74)
self.align_bt.Name = "align_bt"
self.align_bt.Size = Drawing.Size(120, 40)
self.align_bt.TabIndex = 1
self.align_bt.Text = "Aligh Center"
self.align_bt.UseVisualStyleBackColor = True
self.align_bt.Click += self.align_bt_Click
# reverse_bt
self.reverse_bt.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.reverse_bt.Location = Drawing.Point(15, 22)
self.reverse_bt.Name = "reverse_bt"
self.reverse_bt.Size = Drawing.Size(120, 40)
self.reverse_bt.TabIndex = 0
self.reverse_bt.Text = "Reverse"
self.reverse_bt.UseVisualStyleBackColor = True
self.reverse_bt.Click += self.reverse_bt_Click
# groupBox4
self.groupBox4.Anchor = ((((Forms.AnchorStyles.Top | Forms.AnchorStyles.Left)| Forms.AnchorStyles.Right)))
self.groupBox4.Controls.Add(self.right_bt)
self.groupBox4.Controls.Add(self.backward_bt)
self.groupBox4.Controls.Add(self.left_bt)
self.groupBox4.Controls.Add(self.forward_bt)
self.groupBox4.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.groupBox4.Location = Drawing.Point(6, 97)
self.groupBox4.Name = "groupBox4"
self.groupBox4.Size = Drawing.Size(405, 211)
self.groupBox4.TabIndex = 1
self.groupBox4.TabStop = False
self.groupBox4.Text = "Move"
# right_bt
self.right_bt.BackColor = Drawing.Color.Navy
self.right_bt.Font = Drawing.Font("Arial", 12, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.right_bt.ForeColor = Drawing.SystemColors.ButtonHighlight
self.right_bt.Location = Drawing.Point(262, 65)
self.right_bt.Name = "right_bt"
self.right_bt.Size = Drawing.Size(100, 80)
self.right_bt.TabIndex = 3
self.right_bt.Text = "Right"
self.right_bt.UseVisualStyleBackColor = False
self.right_bt.Click += self.right_bt_Click
# backward_bt
self.backward_bt.BackColor = Drawing.Color.Navy
self.backward_bt.Font = Drawing.Font("Arial", 12, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.backward_bt.ForeColor = Drawing.SystemColors.ButtonHighlight
self.backward_bt.Location = Drawing.Point(156, 117)
self.backward_bt.Name = "backward_bt"
self.backward_bt.Size = Drawing.Size(100, 80)
self.backward_bt.TabIndex = 2
self.backward_bt.Text = "Backward"
self.backward_bt.UseVisualStyleBackColor = False
self.backward_bt.Click += self.backward_bt_Click
# left_bt
self.left_bt.BackColor = Drawing.Color.Navy
self.left_bt.Font = Drawing.Font("Arial", 12, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.left_bt.ForeColor = Drawing.SystemColors.ButtonHighlight
self.left_bt.Location = Drawing.Point(50, 65)
self.left_bt.Name = "left_bt"
self.left_bt.Size = Drawing.Size(100, 80)
self.left_bt.TabIndex = 1
self.left_bt.Text = "Left"
self.left_bt.UseVisualStyleBackColor = False
self.left_bt.Click += self.left_bt_Click
# forward_bt
self.forward_bt.BackColor = Drawing.Color.Navy
self.forward_bt.Font = Drawing.Font("Arial", 12, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.forward_bt.ForeColor = Drawing.SystemColors.ButtonHighlight
self.forward_bt.Location = Drawing.Point(156, 21)
self.forward_bt.Name = "forward_bt"
self.forward_bt.Size = Drawing.Size(100, 80)
self.forward_bt.TabIndex = 0
self.forward_bt.Text = "Forward"
self.forward_bt.UseVisualStyleBackColor = False
self.forward_bt.Click += self.forward_bt_Click
# groupBox3
self.groupBox3.Anchor = ((((Forms.AnchorStyles.Top | Forms.AnchorStyles.Left)| Forms.AnchorStyles.Right)))
self.groupBox3.Controls.Add(self.unit_lb)
self.groupBox3.Controls.Add(self.label3)
self.groupBox3.Controls.Add(self.step_tb)
self.groupBox3.Controls.Add(self.pt1_rb)
self.groupBox3.Controls.Add(self.pt0_rb)
self.groupBox3.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.groupBox3.Location = Drawing.Point(6, 6)
self.groupBox3.Name = "groupBox3"
self.groupBox3.Size = Drawing.Size(405, 85)
self.groupBox3.TabIndex = 0
self.groupBox3.TabStop = False
self.groupBox3.Text = "Point To Move"
# unit_lb
self.unit_lb.AutoSize = True
self.unit_lb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.unit_lb.Location = Drawing.Point(348, 40)
self.unit_lb.Name = "unit_lb"
self.unit_lb.Size = Drawing.Size(29, 16)
self.unit_lb.TabIndex = 5
self.unit_lb.Text = "mm"
# label3
self.label3.AutoSize = True
self.label3.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.label3.Location = Drawing.Point(227, 40)
self.label3.Name = "label3"
self.label3.Size = Drawing.Size(36, 16)
self.label3.TabIndex = 4
self.label3.Text = "step:"
# step_tb
self.step_tb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.step_tb.Location = Drawing.Point(269, 38)
self.step_tb.Name = "step_tb"
self.step_tb.Size = Drawing.Size(73, 22)
self.step_tb.TabIndex = 3
self.step_tb.Text = "0.01"
self.step_tb.TextAlign = Forms.HorizontalAlignment.Center
self.step_tb.TextChanged += self.step_tb_TextChanged
# pt1_rb
self.pt1_rb.AutoSize = True
self.pt1_rb.Checked = True
self.pt1_rb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.pt1_rb.Location = Drawing.Point(70, 38)
self.pt1_rb.Name = "pt1_rb"
self.pt1_rb.Size = Drawing.Size(45, 20)
self.pt1_rb.TabIndex = 2
self.pt1_rb.TabStop = True
self.pt1_rb.Text = "Pt1"
self.pt1_rb.UseVisualStyleBackColor = True
# pt0_rb
self.pt0_rb.AutoSize = True
self.pt0_rb.Font = Drawing.Font("Arial", 9.75, Drawing.FontStyle.Regular, Drawing.GraphicsUnit.Point)
self.pt0_rb.Location = Drawing.Point(15, 38)
self.pt0_rb.Name = "pt0_rb"
self.pt0_rb.Size = Drawing.Size(45, 20)
self.pt0_rb.TabIndex = 1
self.pt0_rb.Text = "Pt0"
self.pt0_rb.UseVisualStyleBackColor = True
# Form1
self.AutoScaleDimensions = Drawing.SizeF(7, 15)
self.AutoScaleMode = Forms.AutoScaleMode.Font
self.ClientSize = Drawing.Size(425, 535)
self.Controls.Add(self.switch_tab)
self.FormBorderStyle = Forms.FormBorderStyle.FixedSingle
self.MaximizeBox = False
self.MinimizeBox = False
self.Name = "Form1"
self.Text = "Bondwire Profile Editor"
self.TopMost = True
self.FormClosed += self.Form1_FormClosed
self.Load += self.Form1_Load
self.tabPage1.ResumeLayout(False)
self.tabPage1.PerformLayout()
self.groupBox1.ResumeLayout(False)
self.groupBox1.PerformLayout()
self.groupBox2.ResumeLayout(False)
self.groupBox2.PerformLayout()
self.switch_tab.ResumeLayout(False)
self.tabPage2.ResumeLayout(False)
self.groupBox5.ResumeLayout(False)
self.groupBox5.PerformLayout()
self.groupBox4.ResumeLayout(False)
self.groupBox3.ResumeLayout(False)
self.groupBox3.PerformLayout()
self.ResumeLayout(False)
def forward_bt_Click(self, sender, e):
try:
direction = sender.Text
distance = float(self.step_tb.Text)
bondwires = oEditor.GetSelections()
point = 'Pt0' if self.pt0_rb.Checked else 'Pt1'
for bondwire in bondwires:
change(bondwire, direction, distance, point)
oEditor.Select(bondwires)
except:
MessageBox.Show("Please Select Bondwires First!", 'Wrong Selection!')
def alpha_tb_TextChanged(self, sender, e):
self.checkInputValue(sender)
def create_bt_Click(self, sender, e):
name = self.name_tb.Text
profile_type = self.type_cb.SelectedIndex
h = self.h1_tb.Text
a = self.alpha_tb.Text
b = self.beta_tb.Text
x = addProfile(name, profile_type, h, a, b)
self.db[name] = x
self.refreshListBox()
self.name_tb.Text = ''
def backward_bt_Click(self, sender, e):
self.forward_bt_Click(sender, e)
def left_bt_Click(self, sender, e):
self.forward_bt_Click(sender, e)
def align_bt_Click(self, sender, e):
all_bondwires = oEditor.FindObjects('type', 'bondwire')
bondwires = set(oEditor.GetSelections()).intersection(set(all_bondwires))
point = 'Pt0' if self.pt0_rb.Checked else 'Pt1'
for i in bondwires:
alignBondwireCenter(i, point)
oEditor.Select(list(bondwires))
def step_tb_TextChanged(self, sender, e):
pass
def Form1_FormClosed(self, sender, e):
all_bondwires = oEditor.FindObjects('type', 'bondwire')
self.changePathWidth(list(all_bondwires))
def ok_bt_Click(self, sender, e):
self.ok_bt.Enabled = False
oDesktop.PauseScript("You can interact with AEDT now.")
def delete_bt_Click(self, sender, e):
selected_profiles = [i for i in self.model_lb.SelectedItems]
removeProfile(selected_profiles)
self.refreshListBox()
self.delete_bt.Enabled = False
self.modelname_lb.Text = 'Model Name:'
def name_tb_TextChanged(self, sender, e):
self.checkCreateValid()
def type_cb_SelectedIndexChanged(self, sender, e):
try:
bondwire_type = { 0: (False, False, False, False),
1: (True, False, False, self.checkApplyValid()),
2: (True, True, True, self.checkApplyValid()) }
( self.h1_tb.Enabled,
self.alpha_tb.Enabled,
self.beta_tb.Enabled,
self.apply_bt.Enabled,) = bondwire_type[sender.SelectedIndex]
self.checkCreateValid()
except:
pass
def reverse_bt_Click(self, sender, e):
all_bondwires = oEditor.FindObjects('type', 'bondwire')
bondwires = set(oEditor.GetSelections()).intersection(set(all_bondwires))
for i in bondwires:
reverse(i)
oEditor.Select(list(bondwires))
def Form1_Load(self, sender, e):
try:
self.typemap = {-1: "None", 1: "JEDEC4", 2: "JEDEC5"}
#self.x0 = self.model_lb.Items[0]
self.delete_bt.Enabled = False
self.create_bt.Enabled = False
self.pw_info = {}
self.refreshListBox()
self.db = {}
self.type_cb.Items.Add('None')
self.type_cb.Items.Add('JEDEC4')
self.type_cb.Items.Add('JEDEC5')
except:
logging.exception('error')
def model_lb_SelectedIndexChanged(self, sender, e):
try:
selected_bondwires = []
for i in range(len(self.model_lb.SelectedItems)):
selected_bondwires += self.category[self.model_lb.SelectedItems[i]]
N = len(selected_bondwires)
self.modelname_lb.Text = 'Bondwires: #{}'.format(N)
self.changePathWidth(selected_bondwires)
oEditor.Select(selected_bondwires)
self.delete_bt.Enabled = True if N == 0 else False
info = []
for i in self.model_lb.SelectedItems:
info.append(self.info[i])
bw_type, h1, alpha, beta = zip(*info)
if len(set(bw_type)) == 1:
self.type_cb.Text = self.typemap[bw_type[0]]
else:
self.type_cb.Text = ''
if len(set(h1)) == 1:
self.h1_tb.Text = h1[0]
else:
self.h1_tb.Text = ''
if len(set(alpha)) == 1:
self.alpha_tb.Text = alpha[0]
else:
self.alpha_tb.Text = ''
if len(set(beta)) == 1:
self.beta_tb.Text = beta[0]
else:
self.beta_tb.Text = ''
except:
logging.exception('error')
def beta_tb_TextChanged(self, sender, e):
self.checkInputValue(sender)
def right_bt_Click(self, sender, e):
self.forward_bt_Click(sender, e)
def separate_bt_Click(self, sender, e):
try:
sele = oEditor.GetSelections()
for s in sele:
separate(s)
oEditor.Select(sele)
except:
MessageBox.Show("Please Select Package Pad!", 'Wrong Selection!')
logging.exception('error')
def apply_bt_Click(self, sender, e):
try:
profile_type = self.type_cb.SelectedIndex
h = self.h1_tb.Text
a = self.alpha_tb.Text
b = self.beta_tb.Text
selected_profiles = [i.Text for i in self.model_lb.SelectedItems]
for name in selected_profiles:
try:
self.db[name].Delete()
except:
pass
x = editProfile(name, profile_type, h, a, b)
self.db[name] = x
self.refreshListBox()
for i in self.model_lb.Items:
if i.Text in selected_profiles:
self.model_lb.SelectedItems.Add(i)
except:
logging.exception('error')
def h1_tb_TextChanged(self, sender, e):
self.checkInputValue(sender)
def refreshListBox(self):
self.modelname_lb.Text = 'Model Name:'
self.category = getCategory()
self.info = getProfileInfo()
self.model_lb.Items.Clear()
for i in sorted(self.category):
self.model_lb.Items.Add(i)
self.delete_bt.Enabled = False
def changePathWidth(self, selected_bondwires):
x = getPW()
for i in x:
self.pw_info[i] = x[i]
all_bondwires = oEditor.FindObjects('type', 'bondwire')
result = {}
for i in selected_bondwires:
try:
result[self.pw_info[i]] += [i]
except:
result[self.pw_info[i]] = [i]
result['0fm'] = list(set(all_bondwires).difference(set(selected_bondwires)))
for diameter in result:
changeBondwirePathWidth(result[diameter], diameter)
def checkApplyValid(self):
condition = [isfloat(self.h1_tb.Text),
self.type_cb.SelectedIndex == 1,
len(self.model_lb.SelectedItems)]
if all(condition):
self.apply_bt.Enabled = True
return True
condition = [isfloat(self.h1_tb.Text),
isfloat(self.alpha_tb.Text),
isfloat(self.beta_tb.Text),
self.type_cb.SelectedIndex == 2,
len(self.model_lb.SelectedItems)
]
if all(condition):
self.apply_bt.Enabled = True
return True
else:
self.apply_bt.Enabled = False
return False
def checkCreateValid(self):
condition = [isfloat(self.h1_tb.Text),
self.type_cb.SelectedIndex == 1,
len(self.name_tb.Text) > 0,
self.name_tb.Text.lower() not in [i.lower() for i in self.category]
]
if all(condition):
self.create_bt.Enabled = True
return True
condition = [isfloat(self.h1_tb.Text),
isfloat(self.alpha_tb.Text),
isfloat(self.beta_tb.Text),
self.type_cb.SelectedIndex == 2,
len(self.name_tb.Text) > 0,
self.name_tb.Text.lower() not in [i.lower() for i in self.category]
]
if all(condition):
self.create_bt.Enabled = True
return True
else:
self.create_bt.Enabled = False
return False
def checkInputValue(self, sender):
if isfloat(sender.Text) and float(sender.Text) > 0:
sender.BackColor = Color.White
else:
sender.BackColor = Color.Red
self.checkCreateValid()
self.checkApplyValid()
if __name__ == '__main__':
try:
form = MyForm()
form.ShowDialog()
form = MyForm()
form.Dispose()
#form.Show()
#oDesktop.PauseScript()
except:
logging.exception('ERROR!')
| 39.8032 | 116 | 0.552116 | 5,292 | 49,754 | 5.088624 | 0.091837 | 0.010918 | 0.022838 | 0.03045 | 0.458242 | 0.344424 | 0.287757 | 0.258346 | 0.240039 | 0.225927 | 0 | 0.034494 | 0.328757 | 49,754 | 1,249 | 117 | 39.835068 | 0.771836 | 0.015215 | 0 | 0.228332 | 0 | 0 | 0.057783 | 0.002139 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04287 | false | 0.00466 | 0.011184 | 0.001864 | 0.075489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
806640663332b26791d299631d7a07702f2f99ab | 1,738 | py | Python | nodedge/blocks/custom/input_block.py | Nodedge/nodedge | 5658269a1841f33b3c42d6f79b8b50411e105787 | [
"MIT"
] | 7 | 2020-03-25T19:54:56.000Z | 2021-06-09T04:43:58.000Z | nodedge/blocks/custom/input_block.py | Nodedge/nodedge | 5658269a1841f33b3c42d6f79b8b50411e105787 | [
"MIT"
] | 9 | 2020-01-17T10:47:54.000Z | 2021-05-30T12:40:28.000Z | nodedge/blocks/custom/input_block.py | nodedge/nodedge | 5658269a1841f33b3c42d6f79b8b50411e105787 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import List
from nodedge.blocks.block import Block
from nodedge.blocks.block_config import BLOCKS_ICONS_PATH, OP_NODE_INPUT, registerNode
from nodedge.blocks.graphics_block import GraphicsBlock
from nodedge.blocks.graphics_input_block_content import GraphicsInputBlockContent
from nodedge.socket_type import SocketType
@registerNode(OP_NODE_INPUT)
class InputBlock(Block):
icon = f"{BLOCKS_ICONS_PATH}/input.png"
operationCode = OP_NODE_INPUT
operationTitle = "Input"
contentLabel = "In"
contentLabelObjectName = "InputBlockContent"
library = "input/output"
inputSocketTypes: List[SocketType] = []
outputSocketTypes: List[SocketType] = [
SocketType.Any,
]
def __init__(self, scene):
super().__init__(
scene,
inputSocketTypes=self.__class__.inputSocketTypes,
outputSocketTypes=self.__class__.outputSocketTypes,
)
self.eval()
# noinspection PyAttributeOutsideInit
def initInnerClasses(self):
self.content = GraphicsInputBlockContent(self)
self.graphicsNode = GraphicsBlock(self)
self.content.edit.textChanged.connect(self.onInputChanged)
def evalImplementation(self):
rawValue = self.content.edit.text()
convertedValue = float(rawValue)
self.value = convertedValue
self.isDirty = False
self.isInvalid = False
self.markDescendantsInvalid(False)
self.markDescendantsDirty(True)
return self.value
def generateCode(self, currentVarIndex: int, inputVarIndexes: List[int]):
generatedCode: str = f"var_{str(currentVarIndex)} = {str(self.eval())}\n"
return generatedCode
| 30.491228 | 86 | 0.705984 | 169 | 1,738 | 7.065089 | 0.443787 | 0.046064 | 0.056951 | 0.036851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000725 | 0.206559 | 1,738 | 56 | 87 | 31.035714 | 0.86512 | 0.032796 | 0 | 0 | 0 | 0 | 0.067938 | 0.032777 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.146341 | 0 | 0.512195 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8066624d5dffeae87c4031c186ee89c3a0ab8dcd | 5,890 | py | Python | app/core.py | JulienPetit-1/DataTools_Project | 60dc787e219e3a00a4a0b14808e8ad32a7e0f878 | [
"MIT"
] | null | null | null | app/core.py | JulienPetit-1/DataTools_Project | 60dc787e219e3a00a4a0b14808e8ad32a7e0f878 | [
"MIT"
] | null | null | null | app/core.py | JulienPetit-1/DataTools_Project | 60dc787e219e3a00a4a0b14808e8ad32a7e0f878 | [
"MIT"
] | null | null | null | import pandas as pd
class Core:
def __init__(self, players):
index_with_nan = players.index[players.isnull().any(axis=1)]
players.drop(index_with_nan,0, inplace=True)
self.Players = players
def roi_top_players(self):
'''
Sorted the player list by ROI from the top
:return: List of all players sorted by ROI descending
:rtype: list(dict)
'''
return self.Players.sort_values(by=['ROI'], ascending=False).to_dict("records")
def roi_bottom_players(self):
'''
Sorted the player list by ROI from the bottom
:return: List of all players sorted by ROI ascending
:rtype: list(dict)
'''
return self.Players.sort_values(by=['ROI'], ascending=True).to_dict("records")
def average__player_roi(self):
'''
Sorted the player list by ROI's mean
:return: List of all players sorted by ROI's mean
:rtype: list(dict)
'''
return round(float(self.Players['ROI'].mean(), 2)).to_dict("records")
def points_top_players(self):
'''
Sorted the player list by goals
:return: List of all players sorted by goals
:rtype: list(dict)
'''
return self.Players.sort_values(by=['Goals'], ascending=False).to_dict("records")
def players_by_status(self, status):
'''
Sorted the player list by status
:return: List of all players sorted by status
:rtype: list(dict)
'''
return self.Players[self.Players['Status'].str.match(status)].to_dict("records")
def roi_filter_by_position(self, position, number = 10):
'''
Sorted the player's ROI by position
:return: List of all players sorted by position and ROI
:rtype: list(dict)
'''
return self.Players[self.Players['Position'].str.match(position)].sort_values(by=['ROI'], ascending=False)[:number].to_dict("records")
def points_filter_by_position(self, position, number = 10):
'''
Sorted the player's points by position
:return: List of all players sorted by position and points
:rtype: list(dict)
'''
return self.Players[self.Players['Position'].str.match(position)].sort_values(by=['Goals'], ascending=False)[:number].to_dict("records")
def team_list(self):
'''
Prepare the team list by grouping the players with their position
:return: Number of players in the teams
:rtype: integer
'''
return self.Players.groupby('Club')['Position'].count()
def player_list(self):
'''
Display all the players with their informations
:return: List of all players informations
:rtype: List(dict)
'''
return self.Players.to_dict("records")
def build_team_by_roi(self, budget = 100, count_limit = 2, gk = 2, df = 5, md = 5, atk = 3):
'''
Build the final team with all the previous informations
:param budget: Budget to allow for the team
:type budget: integer
:param count_limit: Number of stars for the team
:type count_limit: integer
:param gk: Number of goalkeepers
:type gk: integer
:param df: Number of defenders
:type df: integer
:param md: Number of midfielders
:type md: integer
:param atk: Number of attackers
:type atk: integer
:return: List of all players choosen for the final team
:rtype: list(dict)
'''
money_team = []
final_team = []
budget = budget
injured = self.players_by_status('injuried')
positions = {'Goalkeeper': gk, 'Defender': df, 'Midfielder': md, 'Attacker': atk}
y = {'Goalkeeper': 410, 'Defender': 300, 'Midfielder': 50, 'Attacker': -190}
teams = self.team_list()
for player in self.points_top_players():
if len(money_team) < count_limit and player not in injured and budget >= player['Cost'] and positions[player['Position']] > 0 and teams[player['Club']] > 0:
money_team.append(player)
budget -= player['Cost']
positions[player['Position']] = positions[player['Position']] - 1
teams[player['Club']] = teams[player['Club']] - 1
else:
for player in self.roi_top_players():
if player not in money_team and budget >= player['Cost'] and positions[player['Position']] > 0 and teams[player['Club']] > 0 :
money_team.append(player)
budget -= player['Cost']
positions[player['Position']] = positions[player['Position']] - 1
teams[player['Club']] = teams[player['Club']] - 1
pos = None
i = 0
for player in money_team:
player['ROI'] = round(float(player['ROI']), 2)
player['y'] = y[player['Position']]
if pos is not player['Position'] :
i = 1
pos = player['Position']
else:
i = i + 1
row_team = sum(value['Position'] == pos for value in money_team)
player['x'] = (i/(row_team+1))* 600 - 300
final_team.append(player)
total_points = sum([item['Goals'] for item in money_team])
print('Budget: ' + str(round(budget, 2)))
print('Points: ' + str(total_points))
return final_team | 38.496732 | 168 | 0.543633 | 682 | 5,890 | 4.593842 | 0.180352 | 0.052665 | 0.034472 | 0.04309 | 0.479094 | 0.442387 | 0.412384 | 0.361315 | 0.3045 | 0.291095 | 0 | 0.011917 | 0.344652 | 5,890 | 153 | 169 | 38.496732 | 0.799741 | 0.261969 | 0 | 0.16129 | 0 | 0 | 0.095119 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.177419 | false | 0 | 0.016129 | 0 | 0.370968 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80676fec9d54f2f82f75da34b314f6afd4212486 | 3,799 | py | Python | main/classify_program.py | Abel-Huang/simple-image-classifier | 89d2822c2b06cdec728f734d43d9638f4b601348 | [
"MIT"
] | 4 | 2017-05-17T08:01:38.000Z | 2018-07-22T11:13:55.000Z | main/classify_program.py | Abel-Huang/ImageClassifier | 89d2822c2b06cdec728f734d43d9638f4b601348 | [
"MIT"
] | null | null | null | main/classify_program.py | Abel-Huang/ImageClassifier | 89d2822c2b06cdec728f734d43d9638f4b601348 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from sklearn import svm
from sklearn.externals import joblib
from main import data_set as ds
from main import feature_program as fp
from util import save_2_db as db
from util import file_manage as fm
# 训练分类器
def train_classifier(feature_type):
train_data = np.float32([]).reshape(0, 50)
response = np.float32([])
dict_idx = 0
for name, count in ds.trainset_info.items():
dir = '../data/train_set/' + name + '/'
file_name=fm.generic_fea_filename(feature_type) + '/vocabulary/' + name + '.npy'
labels, centers = np.load(file_name)
print('Init training data of ' + name + '...')
for i in range(1, count + 1):
filename = dir + name + ' (' + str(i) + ').jpg'
img = cv2.imread(filename)
print(filename)
features = fp.cal_feature_info(img, feature_type)
feat_vec = fp.cal_feature_vec(features, centers)
train_data = np.append(train_data, feat_vec, axis=0)
res = np.repeat(np.float32([dict_idx]), count)
response = np.append(response, res)
dict_idx += 1
print('Done\n')
print('Now train svm classifier...')
train_data = np.float32(train_data)
response = response.reshape(-1, 1)
print('trainData \n')
print(train_data)
print('response \n')
print(response)
# sklearn中的SVM
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(train_data, response)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(train_data, response)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(train_data, response)
lin_svc = svm.LinearSVC(C=C).fit(train_data, response)
# 保存训练好的模型
joblib.dump(svc, fm.generic_ml_filename(feature_type, 'svc'))
joblib.dump(rbf_svc, fm.generic_ml_filename(feature_type, 'rbf'))
joblib.dump(poly_svc, fm.generic_ml_filename(feature_type, 'poly'))
joblib.dump(lin_svc, fm.generic_ml_filename(feature_type, 'lin'))
# 调用分类器进行分类
def classify(feature_type, ml_method, unitag):
#sklearn中的SVM
# 载入分类器
svc = joblib.load(fm.generic_ml_filename(feature_type, ml_method))
total = 0; #总量
correct = 0; #正确分类的总量
dict_idx = 0 #索引
for name, count in ds.testset_info.items():
crt = 0
dir = '../data/test_set/' + name + '/'
file_name = fm.generic_fea_filename(feature_type) + '/vocabulary/' + name + '.npy'
labels, centers = np.load(file_name)
print('Classify on test_set ' + name + ':')
for i in range(1, count + 1):
#对每一张图片进行预测
filename = dir + name + ' (' + str(i) + ').jpg'
img = cv2.imread(filename)
features = fp.cal_feature_info(img, feature_type)
feat_vec = fp.cal_feature_vec(features, centers)
case = np.float32(feat_vec)
if (dict_idx == svc.predict(case)):
db.store_single(filename, name, ml_method, feature_type, 1, unitag)
log=filename+': is in this class'
print(log)
crt += 1
else:
db.store_single(filename, name, ml_method, feature_type, 0, unitag)
log = filename + ': is not in this class'
print(log)
print('Accuracy: ' + str(crt) + ' / ' + str(count) + '\n')
db.store_total(name, ml_method, feature_type, crt, count, unitag)
total += count
correct += crt
dict_idx += 1
print('Total accuracy: ' + str(correct) + ' / ' + str(total))
db.store_total('total', ml_method, feature_type, correct, total, unitag)
| 37.245098 | 90 | 0.61253 | 521 | 3,799 | 4.297505 | 0.272553 | 0.073694 | 0.059402 | 0.04243 | 0.405092 | 0.363555 | 0.310853 | 0.251898 | 0.232247 | 0.192943 | 0 | 0.014612 | 0.261385 | 3,799 | 101 | 91 | 37.613861 | 0.783321 | 0.063701 | 0 | 0.25641 | 0 | 0 | 0.082366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.102564 | 0 | 0.128205 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8069c80b4ba47527c4c176a2e51cb7a78d306b86 | 4,616 | py | Python | momoichigo/app/views/resource_queue_view.py | nothink/momoichigo | 85710c31a4dddb85fc1597ceb31c80d97779502b | [
"MIT"
] | null | null | null | momoichigo/app/views/resource_queue_view.py | nothink/momoichigo | 85710c31a4dddb85fc1597ceb31c80d97779502b | [
"MIT"
] | 174 | 2021-06-21T08:19:03.000Z | 2022-03-30T23:44:55.000Z | momoichigo/app/views/resource_queue_view.py | nothink/momoichigo | 85710c31a4dddb85fc1597ceb31c80d97779502b | [
"MIT"
] | 1 | 2021-09-24T13:40:53.000Z | 2021-09-24T13:40:53.000Z | """momoichigo views."""
from __future__ import annotations
import io
import logging
from typing import Any, List, Tuple
from urllib.parse import urlparse
import pendulum
import requests
from django.core.exceptions import ValidationError
from django.db import transaction
from rest_framework import mixins, status, viewsets
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.serializers import Serializer
from slack_sdk.errors import SlackApiError
from slack_sdk.web.client import WebClient
from momoichigo import settings
from momoichigo.app import models, serializers
logger = logging.getLogger(__name__)
class ResourceQueueViewSet(
mixins.ListModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet
):
"""Request Queue Views."""
queryset = models.ResourceQueue.objects.all()
serializer_class = serializers.ResourceQueueSerializer
def get_serializer(
self: ResourceQueueViewSet, *args: Any, **kwargs: Any
) -> Serializer:
"""Get serializers.
Overwrites for using custom ListSerializers.
sa: https://medium.com/swlh/f73da6af7ddc
** warning: this overwrite makes BrowsableAPI bad. **
"""
kwargs["context"] = self.get_serializer_context()
if "data" in kwargs and isinstance(kwargs["data"], list):
kwargs["many"] = True
data = []
for item in kwargs["data"]:
if isinstance(item, str):
data.append({"source": item})
else:
data.append(item)
kwargs["data"] = data
return self.get_serializer_class()(*args, **kwargs)
def list(
self: ResourceQueueViewSet, request: Request, *args: Any, **kwargs: Any
) -> Response:
"""List method's overwrite."""
with transaction.atomic():
# source 重複除去
sources = list(self.queryset.distinct().values_list("source", flat=True))
# クリーンアップ
self.queryset.delete()
# 要素なしならおしまい
if len(sources) == 0:
return Response(data=sources, status=status.HTTP_200_OK)
collected, covered = self.__fetch_resources(sources)
# 収集しきれなかった分は再度追加
remains = list(set(sources) - set(collected) - set(covered))
remain_modles = [models.ResourceQueue(source=r) for r in remains]
models.ResourceQueue.objects.bulk_create(remain_modles)
if len(collected) == 0:
return Response(data=collected, status=status.HTTP_200_OK)
self.__send_slack_message(self.__build_slack_msg(collected))
return Response(data=collected, status=status.HTTP_201_CREATED)
# ----------------- utility functions -----------------
@staticmethod
def __fetch_resources(urls: List[str]) -> Tuple[List[str], List[str]]:
"""Fetch and create Resource instance from source path.
limit: 30 sec.
"""
begin = pendulum.now()
collected = []
covered = []
for url in urls:
# Resource レコードを作成
# その状態で get method で対象リソースをfetch
instance = models.Resource()
try:
instance.source = url
instance.validate_unique(exclude=["file"])
except ValidationError:
# ValidationErrorが出たなら既出なので無視対象
covered.append(instance.source)
continue
res = requests.get(url)
if res.status_code == 200 and len(res.content) > 0:
path = urlparse(url).path[1:]
# ファイル配置先はストレージのキー生成ルールに則る
instance.file.save(path, io.BytesIO(res.content))
logger.info("[fetch] " + instance.source)
instance.full_clean(validate_unique=True)
instance.save()
collected.append(instance.source)
# 合計時間が30秒を超えたら一旦キューの処理をやめる
if pendulum.now().diff(begin).in_seconds() > 30:
break
# 収集結果と無視対象を返す
return (collected, covered)
@staticmethod
def __send_slack_message(body: str) -> None:
"""Send messages to slack."""
try:
client = WebClient(token=settings.SLACK_API_TOKEN)
client.chat_postMessage(text=body, channel="#resources")
except SlackApiError as e:
logger.error(e)
@staticmethod
def __build_slack_msg(sources: List[str]) -> str:
"""Create message strings for send to slack."""
return ":strawberry: \n" + " \n".join(sources) + "\n :strawberry: "
| 34.192593 | 85 | 0.620234 | 471 | 4,616 | 5.951168 | 0.388535 | 0.011416 | 0.02426 | 0.011416 | 0.039957 | 0.030681 | 0.030681 | 0 | 0 | 0 | 0 | 0.007773 | 0.275347 | 4,616 | 134 | 86 | 34.447761 | 0.830194 | 0.129983 | 0 | 0.057471 | 0 | 0 | 0.024235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057471 | false | 0 | 0.195402 | 0 | 0.356322 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
806ddac5cfe116c67e0d9529de64b5b850440192 | 347 | py | Python | Algoritmo(Python)/Alg_S7_4.py | Daniel-Conte/Exercicios-de-Algoritmo | 5a42722516097d0aec14d80549e18501b182eebd | [
"MIT"
] | null | null | null | Algoritmo(Python)/Alg_S7_4.py | Daniel-Conte/Exercicios-de-Algoritmo | 5a42722516097d0aec14d80549e18501b182eebd | [
"MIT"
] | null | null | null | Algoritmo(Python)/Alg_S7_4.py | Daniel-Conte/Exercicios-de-Algoritmo | 5a42722516097d0aec14d80549e18501b182eebd | [
"MIT"
] | null | null | null | #variables
media = 0
maior = 0
menor = 9999
#input + process
for(i) in range(1, 11):
N = int(input("Digite um número: "))
if(N > maior):
maior = N
if(N < menor):
menor = N
media = (media + (N / 10))
print("Maior número: {0}".format(maior))
print("Menor número: {0}".format(menor))
print("Media: {0}".format(media)) | 23.133333 | 40 | 0.570605 | 52 | 347 | 3.807692 | 0.442308 | 0.106061 | 0.131313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05283 | 0.236311 | 347 | 15 | 41 | 23.133333 | 0.69434 | 0.069164 | 0 | 0 | 0 | 0 | 0.192547 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
806f5dbad8693bef1f42f7424619c01bd49c62cd | 2,464 | py | Python | test_depth_cityscapes.py | sanweiliti/Segmentation-MonoDepth-Pytorch | d1a3de8d10c60fe9d3b86b585e0f0089555fc8a6 | [
"MIT"
] | 25 | 2019-02-09T21:19:15.000Z | 2022-01-24T22:11:20.000Z | test_depth_cityscapes.py | sanweiliti/Segmentation-MonoDepth-Pytorch | d1a3de8d10c60fe9d3b86b585e0f0089555fc8a6 | [
"MIT"
] | null | null | null | test_depth_cityscapes.py | sanweiliti/Segmentation-MonoDepth-Pytorch | d1a3de8d10c60fe9d3b86b585e0f0089555fc8a6 | [
"MIT"
] | 4 | 2019-02-21T07:08:06.000Z | 2022-01-25T12:43:24.000Z | import yaml
import torch
import argparse
from torch.utils import data
from tqdm import tqdm
from ptsemseg.models import get_model
from ptsemseg.loader import get_loader
from ptsemseg.metrics import runningScoreDepth, averageMeter
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def test(cfg, args):
# Setup device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Setup Dataloader
data_loader = get_loader(cfg['data']['dataset'], cfg['task'])
data_path = cfg['data']['path']
loader = data_loader(
data_path,
split=cfg['data']['test_split'],
is_transform=True,
img_size=(cfg['data']['img_rows'],
cfg['data']['img_cols']),
img_norm=cfg['data']['img_norm']
)
n_classes = 0
running_metrics_val = runningScoreDepth(cfg['data']['dataset'])
testloader = data.DataLoader(loader,
batch_size=cfg['training']['batch_size'],
num_workers=0)
# Load Model
model = get_model(cfg['model'], cfg['task'], n_classes=n_classes).to(device)
#weights = torch.load(cfg['testing']['trained_model'])
weights = torch.load(cfg['testing']['trained_model'], map_location=lambda storage, loc: storage)
model.load_state_dict(weights["model_state"])
model.eval()
model.to(device)
with torch.no_grad():
for i, (images, labels, img_path) in tqdm(enumerate(testloader)):
images = images.to(device)
labels = labels.to(device)
outputs = model(images) # [batch_size, n_classes, height, width]
if cfg['model']['arch'] == "dispnet" and cfg['task'] == "depth":
outputs = 1 / outputs
pred = outputs.squeeze(1).data.cpu().numpy()
gt = labels.data.squeeze(1).cpu().numpy()
running_metrics_val.update(gt=gt, pred=pred)
val_result = running_metrics_val.get_scores()
for k, v in val_result.items():
print(k, v)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Hyperparams")
parser.add_argument(
"--config",
nargs="?",
type=str,
default="configs/fcn_cityscapes_depth.yml",
help="Config file to be used",
)
args = parser.parse_args()
with open(args.config) as fp:
cfg = yaml.load(fp)
test(cfg, args)
| 28.988235 | 100 | 0.616477 | 308 | 2,464 | 4.753247 | 0.399351 | 0.03347 | 0.020492 | 0.025956 | 0.051913 | 0.051913 | 0.051913 | 0 | 0 | 0 | 0 | 0.00269 | 0.245536 | 2,464 | 84 | 101 | 29.333333 | 0.784831 | 0.053571 | 0 | 0 | 0 | 0 | 0.110108 | 0.013763 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.137931 | 0.017241 | 0.189655 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
806fe60353ed1a2e39330d425617b5fb47e04792 | 1,951 | py | Python | tests/test_lambda.py | ZhukovAlexander/lambdify | e291c15bacffc871cd1c10aefe9f132420259dfd | [
"Apache-2.0"
] | 51 | 2016-04-07T12:50:08.000Z | 2020-05-19T14:56:47.000Z | tests/test_lambda.py | ZhukovAlexander/easy-lambda | e291c15bacffc871cd1c10aefe9f132420259dfd | [
"Apache-2.0"
] | null | null | null | tests/test_lambda.py | ZhukovAlexander/easy-lambda | e291c15bacffc871cd1c10aefe9f132420259dfd | [
"Apache-2.0"
] | 8 | 2016-04-08T10:05:30.000Z | 2020-01-20T14:01:05.000Z | import unittest
import zipfile
from StringIO import StringIO
import tempfile
import shutil
import boto3
import dill
import moto
import mock
import pip
from easy_lambda.deployment import Lambda, DeploymentPackage
@moto.mock_lambda
class Test(unittest.TestCase):
def setUp(self):
super(Test, self).setUp()
self.client = boto3.client('lambda', region_name='us-west-2')
@mock.patch('easy_lambda.deployment.DeploymentPackage.copy_env')
def test_create(self, mock):
value = 1
function_name = 'test_function'
@Lambda(name=function_name, bucket='test', key='test', client=self.client)
def foo():
return value
package = DeploymentPackage(foo)
zfp = zipfile.ZipFile(StringIO(package.zip_bytes(foo.dumped_code)), "r")
func = dill.load(zfp.open('.lambda.dump'))
self.assertEqual(func(), value)
resp_create = foo.create()
self.assertEqual(resp_create['FunctionName'], function_name)
# moto doesn't support ZipFile only lambda deployments, while
# aws doen't allow other arguments when scpesifying ZipFile argument
#resp_get = foo.get()
#self.assertEqual(resp_get['Configuration']['FunctionName'], function_name)
@unittest.skip('slow')
class PackageTestCase(unittest.TestCase):
def setUp(self):
self.venv = tempfile.mkdtemp()
# <http://stackoverflow.com/a/19404371/2183102>
pip.main(['install', 'requests', '-t', self.venv])
shutil.copytree(self.venv, self.venv + '/lib/python2.7/site-packages')
def test_copy_env(self):
package = DeploymentPackage(None, None, None)
with zipfile.ZipFile(StringIO(), 'w', zipfile.ZIP_DEFLATED) as dest:
package.copy_env(dest, venv_path=self.venv)
self.assertTrue(dest.namelist(), 'For now just test that it is not empty')
def tearDown(self):
shutil.rmtree(self.venv)
| 28.691176 | 86 | 0.671963 | 240 | 1,951 | 5.370833 | 0.454167 | 0.037238 | 0.031032 | 0.037238 | 0.043445 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01361 | 0.209124 | 1,951 | 67 | 87 | 29.119403 | 0.821776 | 0.136853 | 0 | 0.047619 | 0 | 0 | 0.118068 | 0.045915 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.142857 | false | 0 | 0.261905 | 0.02381 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
807024c63669049ff37fa7c1466e2b39243f3485 | 2,397 | py | Python | command_executor/command.py | stephrdev/python-command-executor | 87b43da25e86cd60ca29b31fe5d0202e7be53cf9 | [
"MIT"
] | null | null | null | command_executor/command.py | stephrdev/python-command-executor | 87b43da25e86cd60ca29b31fe5d0202e7be53cf9 | [
"MIT"
] | 2 | 2021-06-01T22:31:14.000Z | 2021-06-01T22:32:14.000Z | command_executor/command.py | stephrdev/python-command-executor | 87b43da25e86cd60ca29b31fe5d0202e7be53cf9 | [
"MIT"
] | null | null | null | import shlex
import subprocess
from .exceptions import CommandExecutionError, CommandParameterError
class Command(object):
process = None
command = 'true'
ignore_output = True
fail_silently = False
required_parameters = None
stdout = subprocess.PIPE
stderr = subprocess.PIPE
def __init__(self, **kwargs):
self.parameters = kwargs
if not self.validate_parameters():
raise CommandParameterError(
'Parameter(s) missing, required parameters: {0}'.format(
', '.join(self.required_parameters)
)
)
def execute(self, ignore_output=None, fail_silently=None, stdin=None, **kwargs):
command = self.get_command()
ignore_output = ignore_output if ignore_output is not None else self.ignore_output
fail_silently = fail_silently if fail_silently is not None else self.fail_silently
# Don't automatically merge with os.environ for security reasons.
# Make this forwarding explicit rather than implicit.
environ = kwargs.pop('environ', None)
shell = kwargs.pop('shell', False)
try:
self.process = subprocess.Popen(
command,
shell=shell,
universal_newlines=True,
env=environ,
stdout=kwargs['stdout'] if 'stdout' in kwargs else self.stdout,
stderr=kwargs['stderr'] if 'stderr' in kwargs else self.stderr,
stdin=subprocess.PIPE,
)
stdout, stderr = self.process.communicate(input=stdin)
except OSError as exc:
raise CommandExecutionError(1, str(exc), self)
if not fail_silently and (stderr or self.process.returncode != 0):
raise CommandExecutionError(self.process.returncode, stderr or '', self)
return True if ignore_output else self.handle_output(stdout)
def validate_parameters(self):
return all(k in self.parameters for k in self.required_parameters or [])
def get_parameters(self):
return self.parameters
def get_command(self):
command = self.command.format(**self.get_parameters())
return shlex.split(str(command))
def handle_output(self, output):
return output
@property
def pid(self):
return self.process.pid if self.process else None
| 34.242857 | 90 | 0.637046 | 269 | 2,397 | 5.565056 | 0.315985 | 0.056112 | 0.029392 | 0.017368 | 0.022712 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001741 | 0.281185 | 2,397 | 69 | 91 | 34.73913 | 0.867092 | 0.047977 | 0 | 0 | 0 | 0 | 0.038613 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132075 | false | 0 | 0.056604 | 0.075472 | 0.45283 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80727ce2ec20ae44ac4f84444e1d4ed99b47a36d | 2,926 | py | Python | tests/test_types.py | tonysimpson/pointbreak | 04e59cdda19a797b926b9541607077ad77522503 | [
"MIT"
] | 6 | 2018-07-13T09:52:14.000Z | 2019-11-27T12:39:27.000Z | tests/test_types.py | tonysimpson/pointbreak | 04e59cdda19a797b926b9541607077ad77522503 | [
"MIT"
] | 10 | 2018-07-12T14:44:44.000Z | 2019-02-07T18:59:02.000Z | tests/test_types.py | tonysimpson/pointbreak | 04e59cdda19a797b926b9541607077ad77522503 | [
"MIT"
] | null | null | null | import struct
import pointbreak
import pointbreak.types as types
from pointbreak.types import TestAccessor as Accessor
def test_type_get_simple_value():
accessor = Accessor(b"\x01\x00\x00\x00")
ref = types.reference(types.int32, 0, accessor)
assert ref.value == 1
def test_type_set_simple_value():
accessor = Accessor(b"\x00\x00\x00\x00")
ref = types.reference(types.int32, 0, accessor)
ref.value = 22
assert ref.value == 22
def test_type_get_array_value_unchecked():
accessor = Accessor(b"\x01\x02\x03")
uint8_array_unchecked = types.array_type(0, types.uint8, checked=False)
ref = types.reference(uint8_array_unchecked, 0, accessor)
assert ref.value[2] == 3
def test_type_get_array_value():
accessor = Accessor(b"\x01\x02\x03")
uint8_array_3 = types.array_type(3, types.uint8)
ref = types.reference(uint8_array_3, 0, accessor)
assert ref.value[0] == 1
assert ref.value[1] == 2
assert ref.value[2] == 3
def test_type_set_array_value():
accessor = Accessor(b"\x00\x00\x00")
uint8_array_3 = types.array_type(3, types.uint8)
ref = types.reference(uint8_array_3, 0, accessor)
ref.value[1] = 5
assert ref.value[1] == 5
def test_set_whole_array():
accessor = Accessor(b"\x00\x00\x00\x00\x00")
uint8_array_5 = types.array_type(5, types.uint8)
ref = types.reference(uint8_array_5, 0, accessor)
value = [5,4,3,2,1]
ref.value = value
assert list(ref.value) == value
def test_mulitdimensional_array():
accessor = Accessor(b'\x00\x00\x00\x00\x00\x00')
uint8_array_3_2 = types.array_type(3, types.array_type(2, types.uint8))
ref = types.reference(uint8_array_3_2, 0, accessor)
ref.value[0][0] = 244
ref.value[2][1] = 221
assert ref.value[0][0] == 244
assert ref.value[2][1] == 221
def test_array_detach():
accessor = Accessor(b"\x00\x01\x00\x05\x00")
uint8_array_5 = types.array_type(5, types.uint8)
ref = types.reference(uint8_array_5, 0, accessor)
assert ref.detach() == [0, 1, 0, 5, 0]
def test_pointer_get():
accessor = Accessor(b"\x08" + (b'\x00' * 7) + b'\x10')
uint8_pointer = types.pointer_type(types.uint8)
ref = types.reference(uint8_pointer, 0, accessor)
assert ref.value.address == 8
assert ref.value.value == 16
def test_struct():
accessor = Accessor(b'\x00' * 100)
complex_struct = types.struct_type(
('value', types.int64),
('pvalue', types.pointer_type(types.int64)),
('avalue', types.array_type(12, types.char))
)
ref = types.reference(complex_struct, 0, accessor)
ref.value.pvalue = 64
ref.value.pvalue.value = 32432424
ref.value.value = 321
assert ref.value.pvalue.value == 32432424
def test_c_string():
accessor = Accessor(b"bobbins\x00")
c_string = types.c_string_type(9)
ref = types.reference(c_string, 0, accessor)
assert ref.value == b"bobbins"
| 32.153846 | 75 | 0.679084 | 445 | 2,926 | 4.289888 | 0.14382 | 0.092195 | 0.095338 | 0.080671 | 0.53955 | 0.393924 | 0.357255 | 0.321111 | 0.24044 | 0.203248 | 0 | 0.089084 | 0.182843 | 2,926 | 90 | 76 | 32.511111 | 0.709327 | 0 | 0 | 0.189189 | 0 | 0 | 0.062564 | 0.008205 | 0 | 0 | 0 | 0 | 0.202703 | 1 | 0.148649 | false | 0 | 0.054054 | 0 | 0.202703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8072dab55c7898746ef42113226d03eadb2ebafb | 9,853 | py | Python | data/bonecell.py | edocoh87/ssd.pytorch | 09fe21af84976dd6ab09ff0c5649db2793e47468 | [
"MIT"
] | null | null | null | data/bonecell.py | edocoh87/ssd.pytorch | 09fe21af84976dd6ab09ff0c5649db2793e47468 | [
"MIT"
] | null | null | null | data/bonecell.py | edocoh87/ssd.pytorch | 09fe21af84976dd6ab09ff0c5649db2793e47468 | [
"MIT"
] | null | null | null | """
Author: Edo Cohen-Karlik
"""
from __future__ import division
# import os.path as osp
import json
# import sys
import os
import torch
import torch.utils.data as data
import cv2
import numpy as np
#from augmentations import SSDAugmentation, SSDBoneCellAugmentation
# ignore classes with label value -1.
BONE_CELL_CLASSES_MAP = {
'p': 1,
'g': 2,
'0_1': 3,
'0_2': 4,
}
# format: BGR
CLASS_COLOR_MAP = {
'p': (200,0,180), # purple
'g': (0,200,0), # green
'0_1': (255,0,0), # blue
'0_2': (0,0,255), # red
}
idx_to_class = {}
for k, v in BONE_CELL_CLASSES_MAP.items():
if v != -1:
idx_to_class[v] = k
def convert_circle_to_bbox(points, width, height):
center = points[0]
edge = points[1]
radius = int(np.sqrt(np.power(center[0]-edge[0], 2) + np.power(center[1]-edge[1], 2)))
# xmin, ymin, xmax, ymax
xmin = (center[0] - radius) / width
ymin = (center[1] - radius) / height
xmax = (center[0] + radius) / width
ymax = (center[1] + radius) / height
return [xmin, ymin, xmax, ymax]
def convert_polygon_to_bbox(points, width, height):
points = np.array(points)
xmin = np.min(points[:,0]) / width
ymin = np.min(points[:,1]) / height
xmax = np.max(points[:,0]) / width
ymax = np.max(points[:,1]) / height
return [xmin, ymin, xmax, ymax]
def convert_shape_to_bbox(points, shape_type, width, height):
if shape_type == 'circle':
return convert_circle_to_bbox(points, width, height)
elif shape_type == 'polygon':
return convert_polygon_to_bbox(points, width, height)
def mark_area(mat, start_point, end_point):
# print(start_point, end_point)
mat[start_point[1]:end_point[1], start_point[0]:end_point[0]] = 1
return mat
def draw_bbox_on_img(img, bboxes, bbox_format='gt', get_area=False):
img = img.numpy()
img = np.transpose(img, (1,2,0))
area_mat = np.zeros((img.shape[0], img.shape[1]))
# print(img.shape)
# print(area_mat.shape)
# exit()
img = img.astype(np.uint8).copy()
height, width = img.shape[:2]
if bbox_format == 'pred':
width = height = 1.0
for rec in bboxes:
cls_idx = rec[-1]
start_point = (int(rec[0]*width), int(rec[1]*height))
end_point = (int(rec[2]*width), int(rec[3]*height))
area_mat = mark_area(area_mat, start_point, end_point)
img = cv2.rectangle(img, start_point, end_point, CLASS_COLOR_MAP[idx_to_class[cls_idx]], 2)
# print(area_mat.sum()/(area_mat.shape[0]*area_mat.shape[1]))
# area = area_mat.sum()/(area_mat.shape[0]*area_mat.shape[1])
area = area_mat.mean()
if get_area:
return img, area
return img
class BoneCellAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
# def __init__(self):
# self.class_to_ind = dict(zip(BONE_CELL_CLASSES, range(len(BONE_CELL_CLASSES))))
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
orig_res = []
skipped = 0
# for obj in target.iter('object'):
for obj in target['shapes']:
label_idx = BONE_CELL_CLASSES_MAP[obj['label']]
if label_idx == -1:
skipped += 1
continue
# pts = ['xmin', 'ymin', 'xmax', 'ymax']
if obj['shape_type'] == 'polygon':
orig_plgn = obj['points']
else:
orig_plgn = None
bndbox = convert_shape_to_bbox(obj['points'], obj['shape_type'], width, height)
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
orig_res += [orig_plgn]
## img_id = target.find('filename').text[:-4]
# print('skipped {} objects'.format(skipped))
return res, orig_res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class BoneCellDetection(data.Dataset):
"""BoneCell Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to BoneCell folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, transform=None, target_transform=BoneCellAnnotationTransform(),
dataset_name='BONECELL'):
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
self.ids = list()
# for line in open(osp.join(root, mode, mode + '.txt')):
for f in os.listdir(root):
if f.endswith('.png'):
_line = f.split('.')
fname = '.'.join(_line[:-1])
# fname = os.path.join(*fname)
self.ids.append(os.path.join(root, fname))
print('loaded Bonecell dataset with {} images'.format(len(self.ids)))
# for line in open(osp.join(root, 'file_list.txt')):
# _line = line.split('.')
# fname = '.'.join(_line[:-1])
# # fname = os.path.join(*fname)
# self.ids.append(osp.join(root, fname))
def __getitem__(self, index):
im, gt, orig_gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def get_img_idx(self, img_id):
img_id, _ = img_id.split('.')
for idx, _id in enumerate(self.ids):
_id = _id.split('/')[-1]
if img_id == _id:
return idx
def pull_item(self, index):
img_id = self.ids[index]
# print('image id: {}'.format(img_id))
with open(img_id + '.json', 'r') as f:
target = json.loads(f.read())
img = cv2.imread(img_id + '.png')
height, width, channels = img.shape
if self.target_transform is not None:
target, orig_target = self.target_transform(target, width, height)
if self.transform is not None:
if len(target) > 0:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
return torch.from_numpy(img).permute(2, 0, 1), target, orig_target, height, width
def draw_gt(self, index, get_area=False):
img, target, original_target, height, width = self.pull_item(index)
return draw_bbox_on_img(img, target, get_area=get_area)
def draw_pred(self, index, pred_bbox, get_area=False):
img, target, original_target, height, width = self.pull_item(index)
return draw_bbox_on_img(img, pred_bbox, bbox_format='pred', get_area=get_area)
class BoneCellInfer(BoneCellDetection):
"""BoneCell Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to BoneCell folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, transform, target_transform=None, dataset_name='INFER'):
super().__init__(root=root, transform=transform,
target_transform=target_transform, dataset_name=dataset_name)
def pull_item(self, index):
img_id = self.ids[index]
img = cv2.imread(img_id + '.png')
height, width, channels = img.shape
img, _, _ = self.transform(img, None, None)
img = img[:, :, (2, 1, 0)]
return torch.from_numpy(img).permute(2, 0, 1), None, None, height, width
draw_bbox_on_img
def draw_gt(self, index, get_area=False):
img, target, original_target, height, width = self.pull_item(index)
return draw_bbox_on_img(img, [], get_area=False)
def draw_pred(self, index, pred_bbox, get_area=False):
img, target, original_target, height, width = self.pull_item(index)
return draw_bbox_on_img(img, pred_bbox, bbox_format='pred', get_area=False)
# def base_transform(image, size, mean):
# x = cv2.resize(image, (size, size)).astype(np.float32)
# x -= mean
# x = x.astype(np.float32)
# return x
# class BaseTransform:
# def __init__(self, size, mean):
# self.size = size
# self.mean = np.array(mean, dtype=np.float32)
# def __call__(self, image, boxes=None, labels=None):
# return base_transform(image, self.size, self.mean), boxes, labels | 36.492593 | 99 | 0.603978 | 1,309 | 9,853 | 4.362872 | 0.194041 | 0.014708 | 0.014708 | 0.01681 | 0.376642 | 0.366661 | 0.349501 | 0.316757 | 0.303449 | 0.291893 | 0 | 0.017027 | 0.266822 | 9,853 | 270 | 100 | 36.492593 | 0.773533 | 0.32792 | 0 | 0.136054 | 0 | 0 | 0.026084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115646 | false | 0 | 0.047619 | 0.006803 | 0.29932 | 0.006803 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80757abfd788a13520ba7245a84a078943b84c38 | 2,556 | py | Python | ABONO/__init__.py | SalahEddineLahniche/MLC-Kaggle-2017 | 489b76182227cbf51812c051381da4e58098d338 | [
"MIT"
] | null | null | null | ABONO/__init__.py | SalahEddineLahniche/MLC-Kaggle-2017 | 489b76182227cbf51812c051381da4e58098d338 | [
"MIT"
] | 1 | 2018-04-25T20:48:35.000Z | 2020-06-19T00:48:49.000Z | ABONO/__init__.py | SalahEddineLahniche/MLC-Kaggle-2017 | 489b76182227cbf51812c051381da4e58098d338 | [
"MIT"
] | null | null | null | import functools
import pandas as pd
from ABONO.Regressor import Regressor
from ABONO.Processer import Processer
from ABONO.Session import Session
TRAIN_PATH = 'data/train.csv'
TEST_PATH = 'data/test.csv'
def timed(session):
def innertimed(f):
import time
@functools.wraps(f)
def wrapped(*args):
t=time.time() # get the current time
rslt = f(*args)
# print the current time - the recorded time (which is the elapsed time in seconds)
session.log("Time of execution is: {t:.0f} s".format(t=(time.time()) - t))
return rslt
return wrapped
return innertimed
class model:
def __init__(self, processer, session, offset=0, dcols=None, length=None, model=None, **kwargs):
self.pr = processer
self.session = session
self.model = model
self.offset = offset
self.length = length
self.dcols = dcols
self.m_args = kwargs
def run(self, cross_validate=False, processed_train_data=None, processed_test_data=None):
if not processed_train_data:
self.session.log("raw train dataset: {file}".format(file=TRAIN_PATH))
self.session.init_train()
tmp_train = self.session.get_train_filename()
self.session.log("structured train dataset: {file}--".format(file=tmp_train))
with open(TRAIN_PATH) as f:
with open(tmp_train, 'w') as g:
self.pr.process(f, g, length=self.length, offset=self.offset)
processed_train_data = tmp_train
if not processed_test_data and not cross_validate:
self.session.log("raw train dataset: {file}".format(file=TEST_PATH))
self.session.init_test()
tmp_test = self.session.get_test_filename()
self.session.log("structured train dataset: {file}".format(file=tmp_test))
with open(TEST_PATH) as f:
with open(tmp_test, 'w') as g:
self.pr.process(f, g, length=self.length, offset=self.offset)
processed_test_data = tmp_test
self.df = pd.read_csv(processed_train_data)
if not cross_validate:
self.tdf = pd.read_csv(processed_test_data)
else:
self.tdf = None
reg = Regressor(self.session, self.df, self.tdf, self.dcols, self.model, **self.m_args)
if cross_validate:
return reg.cross_validate()
else:
y = reg.predict()
mse = reg.cross_validate(fit=False)
return y, mse
| 38.149254 | 100 | 0.621283 | 338 | 2,556 | 4.550296 | 0.242604 | 0.071521 | 0.046814 | 0.057217 | 0.236671 | 0.236671 | 0.213264 | 0.213264 | 0.213264 | 0.157347 | 0 | 0.001082 | 0.276604 | 2,556 | 66 | 101 | 38.727273 | 0.830719 | 0.039906 | 0 | 0.068966 | 0 | 0 | 0.071807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0.103448 | 0 | 0.293103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8075ab5a27998d20f0817eccb49607a1460552d7 | 29,946 | py | Python | neon/layers/recurrent.py | sjuvekar/neon | abe5d30a68663c739a97a9e657516d530c66dbd9 | [
"Apache-2.0"
] | null | null | null | neon/layers/recurrent.py | sjuvekar/neon | abe5d30a68663c739a97a9e657516d530c66dbd9 | [
"Apache-2.0"
] | 4 | 2021-03-26T00:21:20.000Z | 2022-03-12T00:46:11.000Z | neon/layers/recurrent.py | huamichaelchen/neon | abe5d30a68663c739a97a9e657516d530c66dbd9 | [
"Apache-2.0"
] | 1 | 2016-08-12T09:05:04.000Z | 2016-08-12T09:05:04.000Z | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from neon.layers.layer import ParameterLayer, Layer
def get_steps(x, shape):
"""
Convert a (vocab_size, steps * batch_size) array
into a [(vocab_size, batch_size)] * steps list of views
"""
steps = shape[1]
if x is None:
return [None for step in range(steps)]
xs = x.reshape(shape + (-1,))
return [xs[:, step, :] for step in range(steps)]
class Recurrent(ParameterLayer):
"""
Basic recurrent layer
Arguments:
output_size (int): Number of hidden/output units
init (Initializer): Function for initializing the model's input to hidden weights. By
default, this initializer will also be used for recurrent parameters
unless init_inner is also specified. Biases will always be
initialized to zero.
init_inner (Initializer, optional): Function for initializing the model's recurrent
parameters. If absent, will default to using same
initializer provided to init.
activation (Transform): Activation function for the input modulation
Attributes:
W_input (Tensor): weights from inputs to output units
(input_size, output_size)
W_recur (TTensor): weights for recurrent connections
(output_size, output_size)
b (Tensor): Biases on output units (output_size, 1)
"""
def __init__(self, output_size, init, init_inner=None, activation=None,
reset_cells=False, name="RecurrentLayer"):
super(Recurrent, self).__init__(init, name)
self.x = None
self.in_deltas = None
self.nout = output_size
self.h_nout = output_size
self.activation = activation
self.outputs = None
self.W_input = None
self.ngates = 1
self.reset_cells = reset_cells
self.init_inner = init_inner
def configure(self, in_obj):
super(Recurrent, self).configure(in_obj)
(self.nin, self.nsteps) = self.in_shape
self.out_shape = (self.nout, self.nsteps)
self.gate_shape = (self.nout * self.ngates, self.nsteps)
if self.weight_shape is None:
self.weight_shape = (self.nout, self.nin)
return self
def allocate(self, shared_outputs=None):
super(Recurrent, self).allocate(shared_outputs)
self.h = get_steps(self.outputs, self.out_shape)
self.h_prev = self.h[-1:] + self.h[:-1]
# State deltas
self.h_delta = get_steps(self.be.iobuf(self.out_shape), self.out_shape)
self.bufs_to_reset = [self.outputs]
if self.W_input is None:
self.init_params(self.weight_shape)
def set_deltas(self, delta_buffers):
super(Recurrent, self).set_deltas(delta_buffers)
self.out_deltas_buffer = self.deltas
self.out_delta = get_steps(self.out_deltas_buffer, self.in_shape)
def init_buffers(self, inputs):
"""
Initialize buffers for recurrent internal units and outputs.
Buffers are initialized as 2D tensors with second dimension being steps * batch_size
A list of views are created on the buffer for easy manipulation of data
related to a certain time step
Arguments:
inputs (Tensor): input data as 2D tensor. The dimension is
(input_size, sequence_length * batch_size)
"""
if self.x is None or self.x is not inputs:
if self.x is not None:
for buf in self.bufs_to_reset:
buf[:] = 0
self.x = inputs
self.xs = get_steps(inputs, self.in_shape)
def init_params(self, shape):
"""
Initialize params including weights and biases.
The weight matrix and bias matrix are concatenated from the weights
for inputs and weights for recurrent inputs and bias.
Arguments:
shape (Tuple): contains number of outputs and number of inputs
"""
(nout, nin) = shape
g_nout = self.ngates * nout
doFill = False
if self.W is None:
self.W = self.be.empty((nout + nin + 1, g_nout))
self.dW = self.be.zeros_like(self.W)
doFill = True
else:
# Deserialized weights and empty grad
assert self.W.shape == (nout + nin + 1, g_nout)
assert self.dW.shape == (nout + nin + 1, g_nout)
self.W_input = self.W[:nin].reshape((g_nout, nin))
self.W_recur = self.W[nin:-1].reshape((g_nout, nout))
self.b = self.W[-1:].reshape((g_nout, 1))
if doFill:
gatelist = [g * nout for g in range(0, self.ngates + 1)]
for wtnm in ('W_input', 'W_recur'):
wtmat = getattr(self, wtnm)
if wtnm is 'W_recur' and self.init_inner is not None:
initfunc = self.init_inner
else:
initfunc = self.init
for gb, ge in zip(gatelist[:-1], gatelist[1:]):
initfunc.fill(wtmat[gb:ge])
self.b.fill(0.)
self.dW_input = self.dW[:nin].reshape(self.W_input.shape)
self.dW_recur = self.dW[nin:-1].reshape(self.W_recur.shape)
self.db = self.dW[-1:].reshape(self.b.shape)
def fprop(self, inputs, inference=False):
"""
Forward propagation of input to recurrent layer.
Arguments:
inputs (Tensor): input to the model for each time step of
unrolling for each input in minibatch
shape: (vocab_size * steps, batch_size)
where:
* vocab_size: input size
* steps: degree of model unrolling
* batch_size: number of inputs in each mini-batch
inference (bool, optional): Set to true if you are running
inference (only care about forward
propagation without associated backward
propagation). Default is False.
Returns:
Tensor: layer output activations for each time step of
unrolling and for each input in the minibatch
shape: (output_size * steps, batch_size)
"""
self.init_buffers(inputs)
if self.reset_cells:
self.h[-1][:] = 0
# recurrent layer needs a h_prev buffer for bprop
self.h_prev_bprop = [0] + self.h[:-1]
for (h, h_prev, xs) in zip(self.h, self.h_prev, self.xs):
self.be.compound_dot(self.W_input, xs, h)
self.be.compound_dot(self.W_recur, h_prev, h, beta=1.0)
h[:] = self.activation(h + self.b)
return self.outputs
def bprop(self, deltas, alpha=1.0, beta=0.0):
"""
Backward propagation of errors through recurrent layer.
Arguments:
deltas (Tensor): tensors containing the errors for
each step of model unrolling.
shape: (output_size, * steps, batch_size)
Returns:
Tensor: back propagated errors for each step of time unrolling
for each mini-batch element
shape: (input_size * steps, batch_size)
"""
self.dW[:] = 0
if self.in_deltas is None:
self.in_deltas = get_steps(deltas, self.out_shape)
self.prev_in_deltas = self.in_deltas[-1:] + self.in_deltas[:-1]
params = (self.xs, self.h, self.h_prev_bprop, self.h_delta,
self.in_deltas, self.prev_in_deltas, self.out_delta)
for (xs, hs, h_prev, h_delta, in_deltas,
prev_in_deltas, out_delta) in reversed(zip(*params)):
in_deltas[:] = self.activation.bprop(hs) * in_deltas
self.be.compound_dot(self.W_recur.T, in_deltas, h_delta)
prev_in_deltas[:] = prev_in_deltas + h_delta
if h_prev != 0:
self.be.compound_dot(in_deltas, h_prev.T, self.dW_recur, beta=1.0)
self.be.compound_dot(in_deltas, xs.T, self.dW_input, beta=1.0)
self.db[:] = self.db + self.be.sum(in_deltas, axis=1)
# save a bit of computation if not bpropping activation gradients
if out_delta:
self.be.compound_dot(self.W_input.T, in_deltas, out_delta, alpha=alpha, beta=beta)
return self.out_deltas_buffer
class LSTM(Recurrent):
"""
Long Short-Term Memory (LSTM) layer based on
Hochreiter, S. and J. Schmidhuber, Neural Computation 9(8): 1735-80 (1997).
Arguments:
output_size (int): Number of hidden/output units
init (Initializer): Function for initializing the model's input to hidden weights. By
default, this initializer will also be used for recurrent parameters
unless init_inner is also specified. Biases will always be
initialized to zero.
init_inner (Initializer, optional): Function for initializing the model's recurrent
parameters. If absent, will default to using same
initializer provided to init.
activation (Transform): Activation function for the input modulation
gate_activation (Transform): Activation function for the gates
Attributes:
x (Tensor): input data as 2D tensor. The dimension is
(input_size, sequence_length * batch_size)
W_input (Tensor): Weights on the input units
(out size * 4, input size)
W_recur (Tensor): Weights on the recursive inputs
(out size * 4, out size)
b (Tensor): Biases (out size * 4 , 1)
"""
def __init__(self, output_size, init, init_inner=None, activation=None,
gate_activation=None, reset_cells=False, name="LstmLayer"):
super(LSTM, self).__init__(output_size, init, init_inner,
activation, reset_cells, name)
self.gate_activation = gate_activation
self.ngates = 4 # Input, Output, Forget, Cell
def allocate(self, shared_outputs=None):
super(LSTM, self).allocate(shared_outputs)
# indices for slicing gate buffers
(ifo1, ifo2) = (0, self.nout * 3)
(i1, i2) = (0, self.nout)
(f1, f2) = (self.nout, self.nout * 2)
(o1, o2) = (self.nout * 2, self.nout * 3)
(g1, g2) = (self.nout * 3, self.nout * 4)
# States: hidden, cell, previous hidden, previous cell
self.c_buffer = self.be.iobuf(self.out_shape)
self.c = get_steps(self.c_buffer, self.out_shape)
self.c_prev = self.c[-1:] + self.c[:-1]
self.c_prev_bprop = [0] + self.c[:-1]
self.c_act_buffer = self.be.iobuf(self.out_shape)
self.c_act = get_steps(self.c_act_buffer, self.out_shape)
# Gates: input, forget, output, input modulation
self.ifog_buffer = self.be.iobuf(self.gate_shape)
self.ifog = get_steps(self.ifog_buffer, self.gate_shape)
self.ifo = [gate[ifo1:ifo2] for gate in self.ifog]
self.i = [gate[i1:i2] for gate in self.ifog]
self.f = [gate[f1:f2] for gate in self.ifog]
self.o = [gate[o1:o2] for gate in self.ifog]
self.g = [gate[g1:g2] for gate in self.ifog]
# State deltas
self.c_delta_buffer = self.be.iobuf((self.out_shape))
self.c_delta = get_steps(self.c_delta_buffer, self.out_shape)
self.c_delta_prev = [None] + self.c_delta[:-1]
# Pre activation gate deltas
self.ifog_delta_buffer = self.be.iobuf(self.gate_shape)
self.ifog_delta = get_steps(self.ifog_delta_buffer, self.gate_shape)
self.i_delta = [gate[i1:i2] for gate in self.ifog_delta]
self.f_delta = [gate[f1:f2] for gate in self.ifog_delta]
self.o_delta = [gate[o1:o2] for gate in self.ifog_delta]
self.g_delta = [gate[g1:g2] for gate in self.ifog_delta]
self.bufs_to_reset.append(self.c_buffer)
def fprop(self, inputs, inference=False):
"""
Apply the forward pass transformation to the input data. The input
data is a list of inputs with an element for each time step of
model unrolling.
Arguments:
inputs (Tensor): input data as 2D tensors, then being converted into a
list of 2D slices
Returns:
Tensor: LSTM output for each model time step
"""
self.init_buffers(inputs)
if self.reset_cells:
self.h[-1][:] = 0
self.c[-1][:] = 0
params = (self.h, self.h_prev, self.xs, self.ifog, self.ifo,
self.i, self.f, self.o, self.g, self.c, self.c_prev, self.c_act)
for (h, h_prev, xs, ifog, ifo, i, f, o, g, c, c_prev, c_act) in zip(*params):
self.be.compound_dot(self.W_recur, h_prev, ifog)
self.be.compound_dot(self.W_input, xs, ifog, beta=1.0)
ifog[:] = ifog + self.b
ifo[:] = self.gate_activation(ifo)
g[:] = self.activation(g)
c[:] = f * c_prev + i * g
c_act[:] = self.activation(c)
h[:] = o * c_act
return self.outputs
def bprop(self, deltas, alpha=1.0, beta=0.0):
"""
Backpropagation of errors, output delta for previous layer, and
calculate the update on model parmas
Arguments:
deltas (list[Tensor]): error tensors for each time step
of unrolling
do_acts (bool, optional): Carry out activations. Defaults to True
Attributes:
dW_input (Tensor): input weight gradients
dW_recur (Tensor): revursive weight gradients
db (Tensor): bias gradients
Returns:
Tensor: Backpropagated errors for each time step
of model unrolling
"""
self.c_delta_buffer[:] = 0
self.dW[:] = 0
if self.in_deltas is None:
self.in_deltas = get_steps(deltas, self.out_shape)
self.prev_in_deltas = self.in_deltas[-1:] + self.in_deltas[:-1]
self.ifog_delta_last_steps = self.ifog_delta_buffer[:, self.be.bsz:]
self.h_first_steps = self.outputs[:, :-self.be.bsz]
params = (self.h_delta, self.in_deltas, self.prev_in_deltas,
self.i, self.f, self.o, self.g, self.ifog_delta,
self.i_delta, self.f_delta, self.o_delta, self.g_delta,
self.c_delta, self.c_delta_prev, self.c_prev_bprop, self.c_act)
for (h_delta, in_deltas, prev_in_deltas,
i, f, o, g, ifog_delta, i_delta, f_delta, o_delta, g_delta,
c_delta, c_delta_prev, c_prev, c_act) in reversed(zip(*params)):
# current cell delta
c_delta[:] = c_delta + self.activation.bprop(c_act) * (o * in_deltas)
i_delta[:] = self.gate_activation.bprop(i) * c_delta * g
f_delta[:] = self.gate_activation.bprop(f) * c_delta * c_prev
o_delta[:] = self.gate_activation.bprop(o) * in_deltas * c_act
g_delta[:] = self.activation.bprop(g) * c_delta * i
# out deltas
self.be.compound_dot(self.W_recur.T, ifog_delta, h_delta)
if c_delta_prev is not None:
c_delta_prev[:] = c_delta * f
prev_in_deltas[:] = prev_in_deltas + h_delta
# Weight deltas and accumulate
self.be.compound_dot(self.ifog_delta_last_steps, self.h_first_steps.T, self.dW_recur)
self.be.compound_dot(self.ifog_delta_buffer, self.x.T, self.dW_input)
# Bias delta and accumulate
self.db[:] = self.be.sum(self.ifog_delta_buffer, axis=1)
# out deltas
if self.out_deltas_buffer: # save a bit of computation
self.be.compound_dot(self.W_input.T, self.ifog_delta_buffer, self.out_deltas_buffer,
alpha=alpha, beta=beta)
return self.out_deltas_buffer
class GRU(Recurrent):
"""
Implementation of the Gated Recurrent Unit based on [Cho2014]
- It uses two gates: reset gate (r) and update gate (z)
- The update gate (z) decides how much the activation is updated
- The reset gate (r) decides how much to reset (when r = 0) from the previous activation
- Activation (h_t) is a linear interpolation (by z) between the previous
activation (h_t-1) and the new candidate activation ( h_can )
- r and z are compuated the same way, using different weights
- gate activation function and unit activation function are usually different
- gate activation is usually logistic
- unit activation is usually tanh
- consider there are 3 gates: r, z, h_can
Arguments:
output_size (int): Number of hidden/output units
init (Initializer): Function for initializing the model's input to hidden weights. By
default, this initializer will also be used for recurrent parameters
unless init_inner is also specified. Biases will always be
initialized to zero.
init_inner (Initializer, optional): Function for initializing the model's recurrent
parameters. If absent, will default to using same
initializer provided to init.
activation (Transform): Activiation function for the input modulation
gate_activation (Transform): Activation function for the gates
Attributes:
x (Tensor): Input data tensor (seq len, inp size, batch size)
W_input (Tensor): Weights on the input units
(out size * 3, input size)
W_recur (Tensor): Weights on the recursive inputs
(out size * 3, out size)
b (Tensor): Biases (out size * 3 , 1)
References:
* Learning phrase representations using rnn encoder-decoder for
statistical machine translation `[Cho2014]`_
* Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling
`[Chung2014]`_
.. _[Cho2014]: http://arxiv.org/abs/1406.1078
.. _[Chung2014]: http://arxiv.org/pdf/1412.3555v1.pdf
"""
def __init__(self, output_size, init, init_inner=None, activation=None,
gate_activation=None, reset_cells=False, name="GruLayer"):
super(GRU, self).__init__(output_size, init, init_inner,
activation, reset_cells, name)
self.gate_activation = gate_activation
self.ngates = 3 # r, z, hcandidate
def allocate(self, shared_outputs=None):
super(GRU, self).allocate(shared_outputs)
self.h_prev_bprop = [0] + self.h[:-1]
# indices for slicing gate buffers
(rz1, rz2) = (0, self.nout * 2)
(r1, r2) = (0, self.nout)
(z1, z2) = (self.nout, self.nout * 2)
(c1, c2) = (self.nout * 2, self.nout * 3)
# buffers for:
# rh_prev_buffer: previous hidden multiply with r;
# wrc_T_dc: wc_recur.T dot with hcan_delta
self.rh_prev_buffer = self.be.iobuf(self.out_shape)
self.rh_prev = get_steps(self.rh_prev_buffer, self.out_shape)
self.wrc_T_dc = self.be.iobuf(self.nout)
# Gates: reset: r; update: z; candidate h: hcan
self.rzhcan_buffer = self.be.iobuf(self.gate_shape)
self.rzhcan = get_steps(self.rzhcan_buffer, self.gate_shape)
self.rz = [gate[rz1:rz2] for gate in self.rzhcan]
self.r = [gate[r1:r2] for gate in self.rzhcan]
self.z = [gate[z1:z2] for gate in self.rzhcan]
self.hcan = [gate[c1:c2] for gate in self.rzhcan]
# the buffer only deals with recurrent inputs to the gates
self.rzhcan_rec_buffer = self.be.iobuf(self.gate_shape)
self.rzhcan_rec = get_steps(self.rzhcan_rec_buffer, self.gate_shape)
self.rz_rec = [gate[rz1:rz2] for gate in self.rzhcan_rec]
self.hcan_rec = [gate[c1:c2] for gate in self.rzhcan_rec]
# Pre activation gate deltas
self.rzhcan_delta_buffer = self.be.iobuf(self.gate_shape)
self.rzhcan_delta = get_steps(self.rzhcan_delta_buffer, self.gate_shape)
self.rz_delta = [gate[rz1:rz2] for gate in self.rzhcan_delta]
self.r_delta = [gate[r1:r2] for gate in self.rzhcan_delta]
self.z_delta = [gate[z1:z2] for gate in self.rzhcan_delta]
self.hcan_delta = [gate[c1:c2] for gate in self.rzhcan_delta]
def init_params(self, shape):
"""
Initialize params for GRU including weights and biases.
The weight matrix and bias matrix are concatenated from the weights
for inputs and weights for recurrent inputs and bias.
The shape of the weights are (number of inputs + number of outputs +1 )
by (number of outputs * 3)
Arguments:
shape (Tuple): contains number of outputs and number of inputs
"""
super(GRU, self).init_params(shape)
(nout, nin) = shape
# indices for slicing gate buffers
(rz1, rz2) = (0, nout * 2)
(c1, c2) = (nout * 2, nout * 3)
self.Wrz_recur = self.W_recur[rz1:rz2]
self.Whcan_recur = self.W_recur[c1:c2]
self.b_rz = self.b[rz1:rz2]
self.b_hcan = self.b[c1:c2]
self.dWrz_recur = self.dW_recur[rz1:rz2]
self.dWhcan_recur = self.dW_recur[c1:c2]
def fprop(self, inputs, inference=False):
"""
Apply the forward pass transformation to the input data. The input data is a list of
inputs with an element for each time step of model unrolling.
Arguments:
inputs (Tensor): input data as 3D tensors, then converted into a list of 2D tensors
Returns:
Tensor: GRU output for each model time step
"""
self.init_buffers(inputs)
if self.reset_cells:
self.h[-1][:] = 0
self.rz[-1][:] = 0
self.hcan[-1][:] = 0
for (h, h_prev, rh_prev, xs, rz, r, z, hcan, rz_rec, hcan_rec, rzhcan) in zip(
self.h, self.h_prev, self.rh_prev, self.xs, self.rz, self.r,
self.z, self.hcan, self.rz_rec, self.hcan_rec, self.rzhcan):
# computes r, z, hcan from inputs
self.be.compound_dot(self.W_input, xs, rzhcan)
# computes r, z, hcan from recurrents
self.be.compound_dot(self.Wrz_recur, h_prev, rz_rec)
rz[:] = self.gate_activation(rz + rz_rec + self.b_rz)
rh_prev[:] = r * h_prev
self.be.compound_dot(self.Whcan_recur, rh_prev, hcan_rec)
hcan[:] = self.activation(hcan_rec + hcan + self.b_hcan)
h[:] = (1 - z) * h_prev + z * hcan
return self.outputs
def bprop(self, deltas, alpha=1.0, beta=0.0):
"""
Backpropagation of errors, output delta for previous layer, and calculate the update on
model parmas
Arguments:
deltas (Tensor): error tensors for each time step of unrolling
do_acts (bool, optional): Carry out activations. Defaults to True
Attributes:
dW_input (Tensor): input weight gradients
dW_recur (Tensor): recurrent weight gradients
db (Tensor): bias gradients
Returns:
Tensor: Backpropagated errors for each time step of model unrolling
"""
self.dW[:] = 0
if self.in_deltas is None:
self.in_deltas = get_steps(deltas, self.out_shape)
self.prev_in_deltas = self.in_deltas[-1:] + self.in_deltas[:-1]
params = (self.r, self.z, self.hcan, self.rh_prev, self.h_prev_bprop,
self.r_delta, self.z_delta, self.hcan_delta, self.rz_delta, self.rzhcan_delta,
self.h_delta, self.in_deltas, self.prev_in_deltas)
for (r, z, hcan, rh_prev, h_prev, r_delta, z_delta, hcan_delta, rz_delta,
rzhcan_delta, h_delta, in_deltas, prev_in_deltas) in reversed(zip(*params)):
# hcan_delta
hcan_delta[:] = self.activation.bprop(hcan) * in_deltas * z
z_delta[:] = self.gate_activation.bprop(z) * in_deltas * (hcan - h_prev)
# r_delta
self.be.compound_dot(self.Whcan_recur.T, hcan_delta, r_delta)
r_delta[:] = self.gate_activation.bprop(r) * r_delta * h_prev
# out hidden delta
h_delta[:] = in_deltas * (1 - z)
self.be.compound_dot(self.Wrz_recur.T, rz_delta, h_delta, beta=1.0)
self.be.compound_dot(self.Whcan_recur.T, hcan_delta, self.wrc_T_dc)
h_delta[:] = h_delta + r * self.wrc_T_dc
if h_prev != 0:
self.be.compound_dot(rz_delta, h_prev.T, self.dWrz_recur, beta=1.0)
self.be.compound_dot(hcan_delta, rh_prev.T, self.dWhcan_recur, beta=1.0)
prev_in_deltas[:] = prev_in_deltas + h_delta
# Weight deltas and accumulate
self.be.compound_dot(self.rzhcan_delta_buffer, self.x.T, self.dW_input) # batch
self.db[:] = self.be.sum(self.rzhcan_delta_buffer, axis=1)
# out deltas
if self.out_deltas_buffer: # save a bit of computation
self.be.compound_dot(self.W_input.T, self.rzhcan_delta_buffer, self.out_deltas_buffer,
alpha=alpha, beta=beta)
return self.out_deltas_buffer
class RecurrentOutput(Layer):
"""
A layer to combine the recurrent layer outputs over time steps. It will
collapse the time dimension in several ways. These layers do not have
parameters and do not optimize during training.
Options derived from this include:
RecurrentSum, RecurrentMean, RecurrentLast
"""
def __init__(self, name=None):
name = name if name else self.classnm
super(RecurrentOutput, self).__init__(name)
self.owns_output = self.owns_delta = True
self.x = None
def __str__(self):
return "RecurrentOutput choice %s : (%d, %d) inputs, %d outputs" % (
self.name, self.nin, self.nsteps, self.nin)
def configure(self, in_obj):
super(RecurrentOutput, self).configure(in_obj) # gives self.in_shape
(self.nin, self.nsteps) = self.in_shape
self.out_shape = (self.nin, 1)
return self
def set_deltas(self, delta_buffers):
super(RecurrentOutput, self).set_deltas(delta_buffers)
self.deltas_buffer = self.deltas
if self.deltas:
self.deltas = get_steps(self.deltas_buffer, self.in_shape)
else:
self.deltas = [] # for simplifying bprop notation
def init_buffers(self, inputs):
"""
Initialize buffers for recurrent internal units and outputs.
Buffers are initialized as 2D tensors with second dimension being steps * batch_size
A list of views are created on the buffer for easy manipulation of data
related to a certain time step
Arguments:
inputs (Tensor): input data as 2D tensor. The dimension is
(input_size, sequence_length * batch_size)
"""
if self.x is None or self.x is not inputs:
self.x = inputs
self.xs = get_steps(inputs, self.in_shape)
class RecurrentSum(RecurrentOutput):
"""
A layer that sums over the recurrent layer outputs over time
"""
def configure(self, in_obj):
super(RecurrentSum, self).configure(in_obj) # gives self.in_shape
self.sumscale = 1.
return self
def fprop(self, inputs, inference=False):
self.init_buffers(inputs)
self.outputs.fill(0)
for x in self.xs:
self.outputs[:] = self.outputs + self.sumscale * x
return self.outputs
def bprop(self, error, alpha=1.0, beta=0.0):
for delta in self.deltas:
delta[:] = alpha * self.sumscale * error + delta * beta
return self.deltas_buffer
class RecurrentMean(RecurrentSum):
"""
A layer that gets the averaged recurrent layer outputs over time
"""
def configure(self, in_obj):
super(RecurrentMean, self).configure(in_obj) # gives self.in_shape
self.sumscale = 1. / self.nsteps
return self
class RecurrentLast(RecurrentOutput):
"""
A layer that only keeps the recurrent layer output at the last time step
"""
def fprop(self, inputs, inference=False):
self.init_buffers(inputs)
self.outputs[:] = self.xs[-1]
return self.outputs
def bprop(self, error, alpha=1.0, beta=0.0):
if self.deltas:
# RNN/LSTM layers don't allocate new hidden units delta buffers and they overwrite it
# while doing bprop. So, init with zeros here.
self.deltas_buffer.fill(0)
self.deltas[-1][:] = alpha * error
return self.deltas_buffer
| 40.522327 | 98 | 0.601783 | 4,063 | 29,946 | 4.280581 | 0.108048 | 0.021159 | 0.017709 | 0.021504 | 0.589696 | 0.538466 | 0.486718 | 0.448655 | 0.391157 | 0.361373 | 0 | 0.012879 | 0.29994 | 29,946 | 738 | 99 | 40.577236 | 0.816733 | 0.367996 | 0 | 0.284431 | 0 | 0 | 0.006089 | 0 | 0 | 0 | 0 | 0 | 0.005988 | 1 | 0.086826 | false | 0 | 0.002994 | 0.002994 | 0.161677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8075ab5b7644cd6b940830cbdac14017e16f9d27 | 439 | py | Python | Exercise_7_9.py | kushrami/Python-Crash-Course-book-Excersice | 7093181940a90d9f4bab5775ef56f57963450393 | [
"Apache-2.0"
] | null | null | null | Exercise_7_9.py | kushrami/Python-Crash-Course-book-Excersice | 7093181940a90d9f4bab5775ef56f57963450393 | [
"Apache-2.0"
] | null | null | null | Exercise_7_9.py | kushrami/Python-Crash-Course-book-Excersice | 7093181940a90d9f4bab5775ef56f57963450393 | [
"Apache-2.0"
] | null | null | null | #No pastrami:
sandwich_orders = ['maxican','pastrami','aloo','pastrami','spicypoteto''pastrami','lulu']
finished_sandwich = []
while sandwich_orders:
sandwich = sandwich_orders.pop()
if sandwich == 'pastrami':
print("We are out of pastrami.")
continue
print("I made your",sandwich,"sandwich")
finished_sandwich.append(sandwich)
for sandwich in finished_sandwich:
print(sandwich,"Sandwich is ready.")
| 27.4375 | 89 | 0.697039 | 50 | 439 | 6 | 0.52 | 0.14 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166287 | 439 | 15 | 90 | 29.266667 | 0.819672 | 0.027335 | 0 | 0 | 0 | 0 | 0.276995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80785349bd512737005eabd1247ba002964d3d8f | 6,811 | py | Python | keyhandler.py | egriffith/AWSKeyHandler | 9dbe1068440f801a7c522f7fd212bebef1af2a65 | [
"MIT"
] | null | null | null | keyhandler.py | egriffith/AWSKeyHandler | 9dbe1068440f801a7c522f7fd212bebef1af2a65 | [
"MIT"
] | null | null | null | keyhandler.py | egriffith/AWSKeyHandler | 9dbe1068440f801a7c522f7fd212bebef1af2a65 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import sys
from os.path import expanduser
import argparse
import boto3
import botocore
def printDebug(action, publicKeyName, publicKeyText, regionList, debug, dryRun, credProfile):
return 0
def buildArgParser(argv):
parser = argparse.ArgumentParser(description="Upload, delete, or list SSH key pairs in AWS regions")
parser.add_argument('action',
help="Valid actions are 'upload', 'delete', and 'list' ")
parser.add_argument('--keyname', '-n',
dest="publicKeyName",
help="Identifier of the key within AWS. \
If uploading, AWS will automatically add '.pem' onto the end in all connection dialogues.\
Mandatory if uploading or removing a key. Optional for listing.")
parser.add_argument('--keyfile', '-f', dest="keyFilePath",
default=expanduser("~")+"/.ssh/id_rsa.pub",
help="Path to the public key file to upload. Required for uploading, will default to ~/.ssh/id_rsa.pub if not specified")
parser.add_argument('--regions', '-r', dest="regionList",
default="all",
help="Comma deliminated list of AWS regions to take action against. \
Defaults to all regions if not specified. Accepted by all actions.")
parser.add_argument('--profile', '-p', dest="credProfile",
default="default",
help="The profile specified in ~/.aws/credentials to use for permissions.\
Defaults to 'default' profile. Accepted by all actions.")
parser.add_argument('--dryrun', action="store_true",
help="Sets the 'DryRun' flag on the upload_key API call. ")
return parser.parse_args()
def wipeKey(publicKeyName, regionList, credProfile, dryrun):
if publicKeyName == None:
print("argument '--keyname / -n' is required for wiping a key.")
sys.exit(1)
session = boto3.Session(profile_name=credProfile)
for region in regionList:
print("Removing key '" + publicKeyName + "' from: " + region + " --- ", end="")
try:
session.client("ec2",region_name=region).delete_key_pair(
KeyName=publicKeyName,
DryRun=dryrun)
except botocore.exceptions.ClientError as error:
print("Failed.")
if error.response['Error']['Code'] == "DryRunOperation":
print("Operation would have succeeded, but was a dry run.\n")
continue
elif error.response['Error']['Code'] == "UnauthorizedOperation":
print("Operation failed due to permissions.\n")
continue
else:
print(str(error) + "\n")
sys.exit(1)
print("Success.\n")
return 0
def uploadKey(publicKeyName, publicKeyText, regionList, dryRun, credProfile):
if publicKeyName == None:
print("argument '--keyname / -n' is required for uploading a key.")
sys.exit(1)
else:
session = boto3.Session(profile_name=credProfile)
for region in regionList:
print("Importing key to: " + region + " --- ", end="")
try:
session.client("ec2",region_name=region).import_key_pair(
DryRun=dryRun,
KeyName=publicKeyName,
PublicKeyMaterial=publicKeyText)
except botocore.exceptions.ClientError as error:
print("Failed.")
if error.response['Error']['Code'] == "DryRunOperation":
print("Operation would have succeeded, but was a dry run.\n")
continue
elif error.response['Error']['Code'] == "UnauthorizedOperation":
print("Operation failed due to permissions.\n")
continue
else:
print(str(error) + "\n")
sys.exit(1)
print("Success.\n")
return 0
def listKeys(regionList, credProfile, dryrun, publicKeyName=[]):
session = boto3.Session(profile_name=credProfile)
for region in regionList:
print("======= Public Keys available in: " + region + " =======")
try:
keyList = session.client("ec2",region_name=region).describe_key_pairs(
KeyNames=publicKeyName,
DryRun=dryrun)['KeyPairs']
except botocore.exceptions.ClientError as error:
print("Failed.")
if error.response['Error']['Code'] == "DryRunOperation":
print("Operation would have succeeded, but was a dry run.\n")
continue
elif error.response['Error']['Code'] == "UnauthorizedOperation":
print("Operation failed due to permissions.\n")
continue
else:
print(str(error) + "\n")
sys.exit(1)
for index, item in enumerate(keyList):
print(item['KeyName'] + " - " + item['KeyFingerprint'])
print("")
return 0
def manipRegionInput(regionInput):
regionInput = regionInput.lower()
regionInput = regionInput.split(",")
if regionInput[0] == "all":
regionInput = boto3.session.Session().get_available_regions("ec2")
return regionInput
def readKeyFile(keyFilePath):
try:
with open(keyFilePath, 'r') as keyFile:
publicKeyText = keyFile.read()
except FileNotFoundError:
print("ERROR: File: " + str(keyFilePath) + "' could not be found. Exiting.")
sys.exit(1)
return publicKeyText
def main(argv):
arglist = buildArgParser(argv)
if arglist.action == "upload":
uploadKey(arglist.publicKeyName,
readKeyFile(arglist.keyFilePath),
manipRegionInput(arglist.regionList),
arglist.dryrun,
arglist.credProfile)
elif arglist.action == "delete":
wipeKey(arglist.publicKeyName,
manipRegionInput(arglist.regionList),
arglist.credProfile,
arglist.dryrun)
elif arglist.action == "list":
listKeys(manipRegionInput(arglist.regionList), arglist.credProfile, arglist.dryrun)
else:
print("Action '" + arglist.action + "' not recognized.")
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| 38.050279 | 145 | 0.550286 | 645 | 6,811 | 5.75969 | 0.268217 | 0.01319 | 0.015074 | 0.035532 | 0.401346 | 0.394886 | 0.386272 | 0.331898 | 0.331898 | 0.30821 | 0 | 0.005117 | 0.340038 | 6,811 | 178 | 146 | 38.264045 | 0.821357 | 0.00323 | 0 | 0.445255 | 0 | 0.007299 | 0.182823 | 0.009281 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058394 | false | 0 | 0.051095 | 0.007299 | 0.160584 | 0.175182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
807ac64b53208d8bf2363b570ae4aa35ea88e5a3 | 8,868 | py | Python | scripts/cros_list_modified_packages.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | null | null | null | scripts/cros_list_modified_packages.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | 2 | 2021-03-26T00:29:32.000Z | 2021-04-30T21:29:33.000Z | scripts/cros_list_modified_packages.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Calculate what workon packages have changed since the last build.
A workon package is treated as changed if any of the below are true:
1) The package is not installed.
2) A file exists in the associated repository which has a newer modification
time than the installed package.
3) The source ebuild has a newer modification time than the installed package.
Some caveats:
- We do not look at eclasses. This replicates the existing behavior of the
commit queue, which also does not look at eclass changes.
- We do not try to fallback to the non-workon package if the local tree is
unmodified. This is probably a good thing, since developers who are
"working on" a package want to compile it locally.
- Portage only stores the time that a package finished building, so we
aren't able to detect when users modify source code during builds.
"""
from __future__ import print_function
import errno
import multiprocessing
import os
import sys
from six.moves import queue as Queue
from chromite.lib import constants
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import portage_util
from chromite.lib import sysroot_lib
from chromite.lib import workon_helper
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class ModificationTimeMonitor(object):
"""Base class for monitoring last modification time of paths.
This takes a list of (keys, path) pairs and finds the latest mtime of an
object within each of the path's subtrees, populating a map from keys to
mtimes. Note that a key may be associated with multiple paths, in which case
the latest mtime among them will be returned.
Attributes:
_tasks: A list of (key, path) pairs to check.
_result_queue: A queue populated with corresponding (key, mtime) pairs.
"""
def __init__(self, key_path_pairs):
self._tasks = list(key_path_pairs)
self._result_queue = multiprocessing.Queue(len(self._tasks))
def _EnqueueModificationTime(self, key, path):
"""Calculate the last modification time of |path| and enqueue it."""
if os.path.isdir(path):
self._result_queue.put((key, self._LastModificationTime(path)))
def _LastModificationTime(self, path):
"""Returns the latest modification time for anything under |path|."""
cmd = 'find . -name .git -prune -o -printf "%T@\n" | sort -nr | head -n1'
ret = cros_build_lib.run(cmd, cwd=path, shell=True, print_cmd=False,
capture_output=True)
return float(ret.output) if ret.output else 0
def GetModificationTimes(self):
"""Get the latest modification time for each of the queued keys."""
parallel.RunTasksInProcessPool(self._EnqueueModificationTime, self._tasks)
mtimes = {}
try:
while True:
key, mtime = self._result_queue.get_nowait()
mtimes[key] = max((mtimes.get(key, 0), mtime))
except Queue.Empty:
return mtimes
class WorkonPackageInfo(object):
"""Class for getting information about workon packages.
Attributes:
cp: The package name (e.g. chromeos-base/power_manager).
mtime: The modification time of the installed package.
projects: The project(s) associated with the package.
full_srcpaths: The brick source path(s) associated with the package.
src_ebuild_mtime: The modification time of the source ebuild.
"""
def __init__(self, cp, mtime, projects, full_srcpaths, src_ebuild_mtime):
self.cp = cp
self.pkg_mtime = int(mtime)
self.projects = projects
self.full_srcpaths = full_srcpaths
self.src_ebuild_mtime = src_ebuild_mtime
def ListWorkonPackages(sysroot, all_opt=False):
"""List the packages that are currently being worked on.
Args:
sysroot: sysroot_lib.Sysroot object.
all_opt: Pass --all to cros_workon. For testing purposes.
"""
helper = workon_helper.WorkonHelper(sysroot.path)
return helper.ListAtoms(use_all=all_opt)
def ListWorkonPackagesInfo(sysroot):
"""Find the specified workon packages for the specified board.
Args:
sysroot: sysroot_lib.Sysroot object.
Returns:
A list of WorkonPackageInfo objects for unique packages being worked on.
"""
packages = ListWorkonPackages(sysroot)
if not packages:
return []
results = {}
if sysroot.path == '/':
overlays = portage_util.FindOverlays(constants.BOTH_OVERLAYS, None)
else:
overlays = sysroot.GetStandardField('PORTDIR_OVERLAY').splitlines()
vdb_path = os.path.join(sysroot.path, portage_util.VDB_PATH)
for overlay in overlays:
for filename, projects, srcpaths in portage_util.GetWorkonProjectMap(
overlay, packages):
# chromeos-base/power_manager/power_manager-9999
# cp = chromeos-base/power_manager
# cpv = chromeos-base/power_manager-9999
category, pn, p = portage_util.SplitEbuildPath(filename)
cp = '%s/%s' % (category, pn)
cpv = '%s/%s' % (category, p)
# Get the time the package finished building. TODO(build): Teach Portage
# to store the time the package started building and use that here.
pkg_mtime_file = os.path.join(vdb_path, cpv, 'BUILD_TIME')
try:
pkg_mtime = int(osutils.ReadFile(pkg_mtime_file))
except EnvironmentError as ex:
if ex.errno != errno.ENOENT:
raise
pkg_mtime = 0
# Get the modificaton time of the ebuild in the overlay.
src_ebuild_mtime = os.lstat(os.path.join(overlay, filename)).st_mtime
# Write info into the results dictionary, overwriting any previous
# values. This ensures that overlays override appropriately.
results[cp] = WorkonPackageInfo(cp, pkg_mtime, projects, srcpaths,
src_ebuild_mtime)
return list(results.values())
def WorkonProjectsMonitor(projects):
"""Returns a monitor for project modification times."""
# TODO(garnold) In order for the mtime monitor to be as accurate as
# possible, this only needs to enqueue the checkout(s) relevant for the
# task at hand, e.g. the specific ebuild we want to emerge. In general, the
# CROS_WORKON_LOCALNAME variable in workon ebuilds defines the source path
# uniquely and can be used for this purpose.
project_path_pairs = []
manifest = git.ManifestCheckout.Cached(constants.SOURCE_ROOT)
for project in set(projects).intersection(manifest.checkouts_by_name):
for checkout in manifest.FindCheckouts(project):
project_path_pairs.append((project, checkout.GetPath(absolute=True)))
return ModificationTimeMonitor(project_path_pairs)
def WorkonSrcpathsMonitor(srcpaths):
"""Returns a monitor for srcpath modification times."""
# This class handles generators, so zip() is safe.
# pylint: disable=zip-builtin-not-iterating
return ModificationTimeMonitor(zip(srcpaths, srcpaths))
def ListModifiedWorkonPackages(sysroot):
"""List the workon packages that need to be rebuilt.
Args:
sysroot: sysroot_lib.Sysroot object.
"""
packages = ListWorkonPackagesInfo(sysroot)
if not packages:
return
# Get mtimes for all projects and source paths associated with our packages.
all_projects = [p for info in packages for p in info.projects]
project_mtimes = WorkonProjectsMonitor(all_projects).GetModificationTimes()
all_srcpaths = [s for info in packages for s in info.full_srcpaths]
srcpath_mtimes = WorkonSrcpathsMonitor(all_srcpaths).GetModificationTimes()
for info in packages:
mtime = int(max([project_mtimes.get(p, 0) for p in info.projects] +
[srcpath_mtimes.get(s, 0) for s in info.full_srcpaths] +
[info.src_ebuild_mtime]))
if mtime >= info.pkg_mtime:
yield info.cp
def _ParseArguments(argv):
parser = commandline.ArgumentParser(description=__doc__)
target = parser.add_mutually_exclusive_group(required=True)
target.add_argument('--board', help='Board name')
target.add_argument('--host', default=False, action='store_true',
help='Look at host packages instead of board packages')
target.add_argument('--sysroot', help='Sysroot path.')
flags = parser.parse_args(argv)
flags.Freeze()
return flags
def main(argv):
logging.getLogger().setLevel(logging.INFO)
flags = _ParseArguments(argv)
sysroot = None
if flags.board:
sysroot = cros_build_lib.GetSysroot(flags.board)
elif flags.host:
sysroot = '/'
else:
sysroot = flags.sysroot
modified = ListModifiedWorkonPackages(sysroot_lib.Sysroot(sysroot))
print(' '.join(sorted(modified)))
| 36.195918 | 80 | 0.726545 | 1,206 | 8,868 | 5.230514 | 0.310945 | 0.019023 | 0.023779 | 0.033291 | 0.09591 | 0.047559 | 0.015219 | 0.015219 | 0.015219 | 0 | 0 | 0.003621 | 0.190347 | 8,868 | 244 | 81 | 36.344262 | 0.87493 | 0.400992 | 0 | 0.05 | 0 | 0.008333 | 0.045779 | 0 | 0 | 0 | 0 | 0.004098 | 0.008333 | 1 | 0.1 | false | 0 | 0.133333 | 0 | 0.325 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
807e9b1b026b86213f993cb57eef6c26141e77e2 | 3,222 | py | Python | share/ttkwidgets/debugwindow.py | Marusoftware/Marutools | 2b462ea02abaf957eb037c281b62d7efe053840e | [
"MIT"
] | null | null | null | share/ttkwidgets/debugwindow.py | Marusoftware/Marutools | 2b462ea02abaf957eb037c281b62d7efe053840e | [
"MIT"
] | 5 | 2021-01-21T09:46:12.000Z | 2022-02-14T13:54:44.000Z | share/ttkwidgets/debugwindow.py | Marusoftware/Marutools | 2b462ea02abaf957eb037c281b62d7efe053840e | [
"MIT"
] | 2 | 2021-11-02T11:01:53.000Z | 2022-02-14T10:11:21.000Z | """
Author: RedFantom
License: GNU GPLv3
Source: This repository
"""
try:
import Tkinter as tk
import ttk
import tkFileDialog as fd
except ImportError:
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog as fd
import sys
from ttkwidgets import AutoHideScrollbar
class DebugWindow(tk.Toplevel):
"""
A Toplevel that shows sys.stdout and sys.stderr for Tkinter applications
"""
def __init__(self, master=None, title="Debug window", stdout=True,
stderr=False, width=70, autohidescrollbar=True, **kwargs):
"""
Create a Debug window.
:param master: master widget
:type master: widget
:param stdout: whether to redirect stdout to the widget
:type stdout: bool
:param stderr: whether to redirect stderr to the widget
:type stderr: bool
:param width: window width (in characters)
:type width: int
:param autohidescrollbar: whether to use an :class:`~ttkwidgets.AutoHideScrollbar` or a :class:`ttk.Scrollbar`
:type autohidescrollbar: bool
:param kwargs: options to be passed on to the :class:`tk.Toplevel` initializer
"""
self._width = width
tk.Toplevel.__init__(self, master, **kwargs)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.protocol("WM_DELETE_WINDOW", self.quit)
self.wm_title(title)
self._oldstdout = sys.stdout
self._oldstderr = sys.stderr
if stdout:
sys.stdout = self
if stderr:
sys.stderr = self
self.menu = tk.Menu(self)
self.config(menu=self.menu)
self.filemenu = tk.Menu(self.menu, tearoff=0)
self.filemenu.add_command(label="Save file", command=self.save)
self.filemenu.add_command(label="Exit", command=self.quit)
self.menu.add_cascade(label="File", menu=self.filemenu)
self.text = tk.Text(self, width=width, wrap=tk.WORD)
if autohidescrollbar:
self.scroll = AutoHideScrollbar(self, orient=tk.VERTICAL, command=self.text.yview)
else:
self.scroll = ttk.Scrollbar(self, orient=tk.VERTICAL, command=self.text.yview)
self.text.config(yscrollcommand=self.scroll.set)
self.text.bind("<Key>", lambda e: "break")
self._grid_widgets()
def save(self):
"""Save widget content."""
file_name = fd.asksaveasfilename()
if file_name == "" or file_name == None:
return
with open(file_name, "w") as f:
f.write(self.text.get("1.0", tk.END))
def _grid_widgets(self):
self.text.grid(row=0, column=0, sticky="nsew")
self.scroll.grid(row=0, column=1, sticky="ns")
def write(self, line):
"""
Write line at the end of the widget.
:param line: text to insert in the widget
:type line: str
"""
self.text.insert(tk.END, line)
def flush(self):
pass
def quit(self):
"""Restore previous stdout/stderr and destroy the window."""
sys.stdout = self._oldstdout
sys.stderr = self._oldstderr
self.destroy()
| 33.915789 | 118 | 0.619491 | 404 | 3,222 | 4.873762 | 0.331683 | 0.032504 | 0.019807 | 0.017268 | 0.068055 | 0.04063 | 0.04063 | 0.04063 | 0 | 0 | 0 | 0.005978 | 0.273122 | 3,222 | 94 | 119 | 34.276596 | 0.834757 | 0.251397 | 0 | 0 | 0 | 0 | 0.029372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0.017857 | 0.160714 | 0 | 0.303571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
807f29911fba7b1a336c50a170090123fe9e9f0c | 967 | py | Python | migrations/versions/e9596ed3a618_add_release_date_uk_field_and_director_.py | jimmybutton/moviedb | 61028ac4db7f58a671ab3a1c2afd3bfb53372773 | [
"MIT"
] | null | null | null | migrations/versions/e9596ed3a618_add_release_date_uk_field_and_director_.py | jimmybutton/moviedb | 61028ac4db7f58a671ab3a1c2afd3bfb53372773 | [
"MIT"
] | null | null | null | migrations/versions/e9596ed3a618_add_release_date_uk_field_and_director_.py | jimmybutton/moviedb | 61028ac4db7f58a671ab3a1c2afd3bfb53372773 | [
"MIT"
] | null | null | null | """add release_date_uk field and director_id to movie model
Revision ID: e9596ed3a618
Revises: affd804a37d8
Create Date: 2020-08-05 17:34:58.197456
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e9596ed3a618'
down_revision = 'affd804a37d8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('director_id', sa.Integer(), nullable=True))
op.add_column('movie', sa.Column('release_date_uk', sa.Date(), nullable=True))
op.create_foreign_key(None, 'movie', 'people', ['director_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'movie', type_='foreignkey')
op.drop_column('movie', 'release_date_uk')
op.drop_column('movie', 'director_id')
# ### end Alembic commands ###
| 29.30303 | 82 | 0.698035 | 128 | 967 | 5.109375 | 0.460938 | 0.061162 | 0.059633 | 0.070336 | 0.204893 | 0.204893 | 0.134557 | 0.134557 | 0 | 0 | 0 | 0.058752 | 0.155119 | 967 | 32 | 83 | 30.21875 | 0.741738 | 0.349535 | 0 | 0 | 0 | 0 | 0.228041 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
808194511d3bb385bc1da7eb37a9fb429a3efa5a | 26,267 | py | Python | retro/tables/generate_tdi_table.py | ellohfin/retro | 58ec8f5b698e6140acd215717f051d99e407c4e5 | [
"Apache-2.0"
] | 1 | 2018-03-02T01:05:52.000Z | 2018-03-02T01:05:52.000Z | retro/tables/generate_tdi_table.py | ellohfin/retro | 58ec8f5b698e6140acd215717f051d99e407c4e5 | [
"Apache-2.0"
] | 30 | 2018-01-30T21:03:28.000Z | 2019-11-07T16:42:07.000Z | retro/tables/generate_tdi_table.py | ellohfin/retro | 58ec8f5b698e6140acd215717f051d99e407c4e5 | [
"Apache-2.0"
] | 6 | 2017-07-27T19:49:13.000Z | 2019-11-19T13:38:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position, too-many-locals
"""
Create time- and DOM-independent (TDI) whole-detector Cartesian-binned Retro
table.
The generated table is useful for computing the total charge expected to be
deposited by a hypothesis across the entire detector (i.e., independent of time
and DOM).
Define a Cartesian grid that covers all of the IceCube fiducial volume, then
tabulate for each voxel the survival probability for photons coming from any
DOM at any time to reach that voxel. Also, tabulate the "average surviving
photon," defined by its x, y, and z components (which differs from the original
time- and DOM-dependent retro tables, wherein length, theta, and deltaphi are
used to characterize the average surviving photon).
Note that the length of the average surviving photon vector can be interpreted
as a measure of the directionality required for a photon to reach a DOM. I.e.,
if its length is 1, then only photons going exactly opposite that direction
will make it to a DOM (to within statistical and bin-size uncertainties used to
arrive at the average photon. If the length is _less_ than 1, then other
directions besides the average photon direction will be accepted, with
increasing likelihood as that length decreases towards 0.
The new table is in (x, y, z)--independent of time and DOM--and can be used to
scale the photons expected to reach any DOM at any time due to a hypothesis
that generates some number of photons (with an average direction / length) in
any of the voxel(s) of this table.
"""
from __future__ import absolute_import, division, print_function
__all__ = [
'generate_tdi_table_meta',
'generate_tdi_table',
'parse_args'
]
__author__ = 'P. Eller, J.L. Lanfranchi'
__license__ = '''Copyright 2017 Philipp Eller and Justin L. Lanfranchi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from argparse import ArgumentParser
from collections import OrderedDict
from copy import deepcopy
from os.path import abspath, dirname, isdir, isfile, join
import sys
import time
import numpy as np
from astropy.io import fits
if __name__ == '__main__' and __package__ is None:
PARENT_DIR = dirname(dirname(abspath(__file__)))
if PARENT_DIR not in sys.path:
sys.path.append(PARENT_DIR)
from retro.const import (
DC_DOM_QUANT_EFF, IC_DOM_QUANT_EFF, POL_TABLE_RMAX, POL_TABLE_RPWR,
POL_TABLE_NRBINS, POL_TABLE_NTHETABINS, POL_TABLE_NTBINS
)
from retro.tables.generate_binmap import generate_binmap
from retro.tables.shift_and_bin import shift_and_bin
from retro.tables.dom_time_polar_tables import load_t_r_theta_table
from retro.tables.tdi_cart_tables import TDI_TABLE_FNAME_PROTO
from retro.utils.geom import generate_geom_meta
from retro.utils.misc import (
generate_anisotropy_str, hash_obj, hrlist2list, list2hrlist
)
def generate_tdi_table_meta(
binmap_hash, geom_hash, dom_tables_hash, times_str, x_min, x_max,
y_min, y_max, z_min, z_max, binwidth, anisotropy, ic_dom_quant_eff,
dc_dom_quant_eff, ic_exponent, dc_exponent
):
"""Generate a metadata dict for a time- and DOM-independent Cartesian
(x,y,z)-binned table.
Parameters
----------
binmap_hash : string
geom_hash : string
dom_tables_hash : string
times_str : string
x_lims, y_lims, z_lims : 2-tuples of floats
binwidth : float
anisotropy : None or tuple
ic_dom_quant_eff : float in [0, 1]
dc_dom_quant_eff : float in [0, 1]
ic_exponent : float >= 0
dc_exponent : float >= 0
Returns
-------
metadata : OrderedDict
Contains keys
'fbasename' : string
'hash' : string
'kwargs' : OrderedDict
"""
if dom_tables_hash is None:
dom_tables_hash = 'none'
kwargs = OrderedDict([
('geom_hash', geom_hash),
('binmap_hash', binmap_hash),
('dom_tables_hash', dom_tables_hash),
('times_str', times_str),
('x_min', x_min),
('x_max', x_max),
('y_min', y_min),
('y_max', y_max),
('z_min', z_min),
('z_max', z_max),
('binwidth', binwidth),
('anisotropy', anisotropy),
('ic_dom_quant_eff', ic_dom_quant_eff),
('dc_dom_quant_eff', dc_dom_quant_eff),
('ic_exponent', ic_exponent),
('dc_exponent', dc_exponent)
])
hash_params = deepcopy(kwargs)
for param in ['x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max']:
rounded_int = int(np.round(hash_params[param]*100))
hash_params[param] = rounded_int
kwargs[param] = float(rounded_int) / 100
for param in ['ic_dom_quant_eff', 'dc_dom_quant_eff',
'ic_exponent', 'dc_exponent']:
rounded_int = int(np.round(hash_params[param]*10000))
hash_params[param] = rounded_int
kwargs[param] = float(rounded_int) / 10000
hash_params['binwidth'] = int(np.round(hash_params['binwidth'] * 1e10))
tdi_hash = hash_obj(hash_params, fmt='hex')
anisotropy_str = generate_anisotropy_str(anisotropy)
fname = TDI_TABLE_FNAME_PROTO[-1].format(
tdi_hash=tdi_hash,
anisotropy_str=anisotropy_str,
table_name='',
**kwargs
)
fbasename = fname.rsplit('_.fits')[0]
metadata = OrderedDict([
('fbasename', fbasename),
('hash', tdi_hash),
('kwargs', kwargs)
])
return metadata
def generate_tdi_table(tables_dir, geom_fpath, dom_tables_hash, n_phibins,
x_lims, y_lims, z_lims,
binwidth, oversample, antialias, anisotropy,
ic_dom_quant_eff, dc_dom_quant_eff,
ic_exponent, dc_exponent,
strings=slice(None),
depths=slice(None),
times=slice(None),
recompute_binmap=False,
recompute_table=False):
"""Create a time- and DOM-independent Cartesian (x,y,z)-binned Retro
table (if it doesn't already exist or if the user requests that it be
re-computed) and save the table to disk.
The intermediate step of computing a bin mapping from polar (r, theta)
coordinates for the source (t,r,theta)-binned DOM Retro tables is also
performed if it hasn't already been saved to disk or if the user forces
its recomputation; the result of this is stored to disk for future use.
Parameters
----------
tables_dir
geom_fpath
dom_tables_hash
n_phibins : int
x_lims, y_lims, z_lims : 2-tuples of floats
binwidth : float
oversample : int
antialias : int
anisotropy : None or tuple
ic_dom_quant_eff : float in [0, 1]
dc_dom_quant_eff : float in [0, 1]
ic_exponent : float >= 0
dc_exponent : float >= 0
strings : int, sequence, slice
Select only these strings by indexing into the geom array
depths : int, sequence, slice
Select only these depth indices by indexing into the geom array
times : int, sequence, slice
Sum over only these times
recompute_binmap : bool
Force recomputation of bin mapping even if it already exists; existing
file will be overwritten
recompute_table : bool
Force recomputation of table files even if the already exist; existing
files will be overwritten
Returns
-------
tdi_data : OrderedDict
Contains following items:
'binned_sp : shape (nx,ny,nz) numpy ndarray, dtype float32
Survival probability table
'binned_px' : shape (nx,ny,nz) numpy ndarray, dtype float32
'binned_py' : shape (nx,ny,nz) numpy ndarray, dtype float32
'binned_pz' : shape (nx,ny,nz) numpy ndarray, dtype float32
Tables with average photon directionality, one each for x, y,
and z components, respectively
'ind_arrays'
'vol_arrays'
'tdi_meta' : OrderedDict
Return value from `generate_tdi_table_meta`
'binmap_meta' : OrderedDict
Return value from `generate_binmap_meta`
"""
assert isdir(tables_dir)
if dom_tables_hash is None:
dom_tables_hash = 'none'
r_max = POL_TABLE_RMAX
r_power = POL_TABLE_RPWR
n_rbins = POL_TABLE_NRBINS
n_costhetabins = POL_TABLE_NTHETABINS
n_tbins = POL_TABLE_NTBINS
else:
raise ValueError('Cannot handle non-None `dom_tables_hash`')
nx = int(np.round((x_lims[1] - x_lims[0]) / binwidth))
ny = int(np.round((y_lims[1] - y_lims[0]) / binwidth))
nz = int(np.round((z_lims[1] - z_lims[0]) / binwidth))
assert np.abs(x_lims[0] + nx * binwidth - x_lims[1]) < 1e-6
assert np.abs(y_lims[0] + ny * binwidth - y_lims[1]) < 1e-6
assert np.abs(z_lims[0] + nz * binwidth - z_lims[1]) < 1e-6
xyz_shape = (nx, ny, nz)
print('Generated/loaded TDI Cart table will have shape:', xyz_shape)
print('')
geom = np.load(geom_fpath)
depth_indices = np.atleast_1d(np.arange(60)[depths])
string_indices = np.atleast_1d(np.arange(87)[strings]) - 1
string_indices = string_indices[string_indices >= 0]
subdet_doms = {'ic': [], 'dc': []}
dc_strings = list(range(79, 86))
for string_idx in string_indices:
dom_coords = geom[string_idx:string_idx+1, depths, :]
if string_idx in dc_strings:
subdet_doms['dc'].append(dom_coords)
else:
subdet_doms['ic'].append(dom_coords)
for subdet in subdet_doms:
dom_string_list = subdet_doms[subdet]
if not dom_string_list:
subdet_doms.pop(subdet)
else:
subdet_doms[subdet] = np.concatenate(dom_string_list, axis=0)
geom = geom[string_indices, :, :][:, depth_indices, :]
geom_meta = generate_geom_meta(geom)
print('Geom uses strings %s, depth indices %s for a total of %d DOMs'
% (list2hrlist([i+1 for i in string_indices]),
list2hrlist(depth_indices),
geom.shape[0] * geom.shape[1]))
print('')
ind_arrays, vol_arrays, binmap_meta = generate_binmap(
r_max=r_max, r_power=r_power,
n_rbins=n_rbins, n_costhetabins=n_costhetabins, n_phibins=n_phibins,
cart_binwidth=binwidth, oversample=oversample, antialias=antialias,
tables_dir=tables_dir, recompute=recompute_binmap
)
print('')
# Figure out which time bin(s) to use to reduce source (t,r,theta) tables
# along time axis (where reduction is one minus product of one minus
# survival probabilities and average photon directionality)
all_t_bins = list(range(n_tbins))
remaining_t_bins = np.array(all_t_bins)[times].tolist()
if all_t_bins == remaining_t_bins:
times_str = 'all'
else:
times_str = list2hrlist(remaining_t_bins)
print('Marginalizing over times in source (t,r,theta) DOM Retro tables:',
times_str)
print('')
tdi_meta = generate_tdi_table_meta(
binmap_hash=binmap_meta['hash'],
geom_hash=geom_meta['hash'],
dom_tables_hash=None, # TODO: hash for dom tables not yet implemented
times_str=times_str,
x_min=x_lims[0], x_max=x_lims[1],
y_min=y_lims[0], y_max=y_lims[1],
z_min=z_lims[0], z_max=z_lims[1],
binwidth=binwidth, anisotropy=anisotropy,
ic_dom_quant_eff=ic_dom_quant_eff,
dc_dom_quant_eff=dc_dom_quant_eff,
ic_exponent=ic_exponent, dc_exponent=dc_exponent
)
print('Generating Cartesian time- and DOM-independent (TDI) Retro table')
print('tdi_kw:', tdi_meta['kwargs'])
names = [
'survival_prob',
'avg_photon_x',
'avg_photon_y',
'avg_photon_z'
]
if not recompute_table:
for name in names:
fpath = join(tables_dir,
'%s_%s.fits' % (tdi_meta['fbasename'], name))
if not isfile(fpath):
print(' Could not find table, will (re)compute\n%s\n' % fpath)
recompute_table = True
break
if not recompute_table:
print(' Loading (x,y,z)-binned TDI Retro table from disk')
for name in names:
fpath = join(tables_dir,
tdi_meta['fbasename'] + '_' + name + '.fits')
with fits.open(fpath) as fits_file:
tmp = fits_file[0].data # pylint: disable=no-member
if name == 'survival_prob':
binned_sp = tmp
elif name == 'avg_photon_x':
binned_px = tmp
elif name == 'avg_photon_y':
binned_py = tmp
elif name == 'avg_photon_z':
binned_pz = tmp
del tmp
tdi_data = OrderedDict([ # pylint: disable=redefined-outer-name
('binned_sp', binned_sp),
('binned_px', binned_px),
('binned_py', binned_py),
('binned_pz', binned_pz),
('ind_arrays', ind_arrays),
('vol_arrays', vol_arrays),
('tdi_meta', tdi_meta),
('binmap_meta', binmap_meta)
])
return tdi_data
# Instantiate arrays for aggregation of survival probabilities and
# averaging photon direction per Cartesian bin. Note that these start as 1D
# to speed indexing operations, then are reshaped into 3D at the end.
binned_spv = np.zeros((nx*ny*nz), dtype=np.float64)
binned_px_spv = np.zeros((nx*ny*nz), dtype=np.float64)
binned_py_spv = np.zeros((nx*ny*nz), dtype=np.float64)
binned_pz_spv = np.zeros((nx*ny*nz), dtype=np.float64)
binned_one_minus_sp = np.ones((nx*ny*nz), dtype=np.float64)
t00 = time.time()
for subdet, subdet_dom_coords in subdet_doms.items():
print(' Subdetector:', subdet)
print(' -> %d strings with DOM(s) at %d depths'
% (len(subdet_dom_coords), len(subdet_dom_coords[0])))
print('')
if subdet == 'ic':
dom_quant_eff = ic_dom_quant_eff
exponent = ic_exponent
elif subdet == 'dc':
dom_quant_eff = dc_dom_quant_eff
exponent = dc_exponent
else:
raise ValueError(str(subdet))
for rel_idx, depth_idx in enumerate(depth_indices):
print(' Subdetector: %s, depth_idx: %d' % (subdet, depth_idx))
dom_coords = subdet_dom_coords[:, rel_idx, :]
t0 = time.time()
table_fname = (
'retro_nevts1000'
'_{subdet:s}'
'_DOM{depth_idx:d}'
'_r_cz_t_angles'
'.fits'.format(
subdet=subdet.upper(), depth_idx=depth_idx
)
)
# TODO: validate that bin edges match spec we're using
photon_info, _ = load_t_r_theta_table(
fpath=join(tables_dir, table_fname),
depth_idx=depth_idx,
scale=dom_quant_eff,
exponent=exponent
)
t1 = time.time()
print(' Time to load Retro DOM table: {} s'
.format(np.round(t1 - t0, 3)))
sp = photon_info.survival_prob[depth_idx].astype(np.float64)
plength = photon_info.length[depth_idx].astype(np.float64)
ptheta = photon_info.theta[depth_idx].astype(np.float64)
pdeltaphi = photon_info.deltaphi[depth_idx].astype(np.float64)
plength *= np.cos(pdeltaphi)
pz = plength * np.cos(ptheta)
prho = plength * np.sin(ptheta)
# Marginalize out time, computing the probability of a photon
# starting at any one time being detected at any other time
t_indep_sp = 1 - np.prod(1 - sp[times], axis=0)
mask = t_indep_sp != 0
scale = 1 / sp.sum(axis=0)[mask]
t_indep_pz = np.zeros_like(t_indep_sp)
t_indep_prho = np.zeros_like(t_indep_sp)
t_indep_pz[mask] = (
(pz[times] * sp[times]).sum(axis=0)[mask] * scale
)
t_indep_prho[mask] = (
(prho[times] * sp[times]).sum(axis=0)[mask] * scale
)
t2 = time.time()
print(" Time to reduce Retro DOM table's time dimension: {} s"
.format(np.round(t2 - t1, 3)))
shift_and_bin(
ind_arrays=ind_arrays,
vol_arrays=vol_arrays,
dom_coords=dom_coords,
survival_prob=t_indep_sp,
prho=t_indep_prho,
pz=t_indep_pz,
nr=n_rbins,
ntheta=n_costhetabins,
r_max=r_max,
binned_spv=binned_spv,
binned_px_spv=binned_px_spv,
binned_py_spv=binned_py_spv,
binned_pz_spv=binned_pz_spv,
binned_one_minus_sp=binned_one_minus_sp,
x_min=x_lims[0],
y_min=y_lims[0],
z_min=z_lims[0],
x_max=x_lims[1],
y_max=y_lims[1],
z_max=z_lims[1],
binwidth=binwidth,
oversample=oversample,
anisotropy=None
)
print(' %d surv probs are exactly 1'
% np.sum(binned_one_minus_sp == 0))
t3 = time.time()
print(' Time to shift and bin: {} s'
.format(np.round(t3 - t2, 3)))
print('')
print('Total time to shift and bin: {} s'.format(np.round(t3 - t00, 3)))
print('')
binned_sp = 1.0 - binned_one_minus_sp
binned_sp = binned_sp.astype(np.float32).reshape(xyz_shape)
del binned_one_minus_sp
mask = binned_spv != 0
binned_px_spv[mask] /= binned_spv[mask]
binned_py_spv[mask] /= binned_spv[mask]
binned_pz_spv[mask] /= binned_spv[mask]
del mask
# Rename so as to not mislead
binned_px = binned_px_spv.astype(np.float32).reshape(xyz_shape)
binned_py = binned_py_spv.astype(np.float32).reshape(xyz_shape)
binned_pz = binned_pz_spv.astype(np.float32).reshape(xyz_shape)
del binned_px_spv, binned_py_spv, binned_pz_spv
t4 = time.time()
print('Time to normalize histograms: {} s'.format(np.round(t4 - t3, 3)))
print('')
arrays_names = [
(binned_sp, 'survival_prob'),
(binned_px, 'avg_photon_x'),
(binned_py, 'avg_photon_y'),
(binned_pz, 'avg_photon_z')
]
for array, name in arrays_names:
fname = '%s_%s.fits' % (tdi_meta['fbasename'], name)
fpath = join(tables_dir, fname)
hdulist = fits.HDUList([
fits.PrimaryHDU(array.astype(np.float32)),
fits.ImageHDU(xyz_shape),
fits.ImageHDU(np.array([x_lims, y_lims, z_lims])),
fits.ImageHDU(geom)
])
print('Saving %s to file\n%s\n' % (name, fpath))
hdulist.writeto(fpath, clobber=True) # pylint: disable=unexpected-keyword-arg
t5 = time.time()
print('Time to save tables to disk: {} s'.format(np.round(t5 - t4, 3)))
print('')
print('TOTAL RUN TIME: {} s'.format(np.round(t5 - t00, 3)))
tdi_data = OrderedDict([
('binned_sp', binned_sp),
('binned_px', binned_px),
('binned_py', binned_py),
('binned_pz', binned_pz),
('ind_arrays', ind_arrays),
('vol_arrays', vol_arrays),
('tdi_meta', tdi_meta),
('binmap_meta', binmap_meta)
])
return tdi_data
def parse_args(description=__doc__):
"""Parse command line args"""
parser = ArgumentParser(description=description)
parser.add_argument(
'--tables-dir', required=True,
help='Path to eirectory containing Retro tables'
)
parser.add_argument(
'--geom-fpath', required=True,
help='Path to geometry NPY file'
)
parser.add_argument(
'--dom-tables-hash', default=None,
help='Hash ID for source (t,r,theta)-binned DOM Retro tables'
)
# TODO: all of the following should be known by passing the hash, but we
# could also specify these specs and then figure out what source
# tables to load
#parser.add_argument(
# '--t-max', type=float,
# help='''Maximum time bin edge in the source (t,r,theta)-binnned DOM
# Retro tables (nanoseconds)'''
#)
#parser.add_argument(
# '--r-max', type=float,
# help='''Maximum radial bin edge in the source (t,r,theta)-binnned DOM
# Retro tables (meters)'''
#)
#parser.add_argument(
# '--r-power', type=float,
# help='''Power used for radial power-law binning in source
# (t,r,theta)-binned DOM Retro tables'''
#)
#parser.add_argument(
# '--n-rbins', type=int,
# help='''Number of radial bins used in source (t,r,theta)-binned DOM
# Retro tables'''
#)
#parser.add_argument(
# '--n-costhetabins', type=int,
# help='''Number of costheta bins used in source (t,r,theta)-binned DOM
# Retro tables'''
#)
#parser.add_argument(
# '--n-tbins', type=int,
# help='''Number of time bins used in source (t,r,theta)-binned DOM Retro
# tables'''
#)
parser.add_argument(
'--n-phibins', type=int, required=True,
help='''Number of phi bins to use for rotating the (r,theta) tables
about the z-axis to for effectively spherical tables'''
)
parser.add_argument(
'--x-lims', nargs=2, type=float, required=True,
help='''Limits of the produced table in the x-direction (meters)'''
)
parser.add_argument(
'--y-lims', nargs=2, type=float, required=True,
help='''Limits of the produced table in the y-direction (meters)'''
)
parser.add_argument(
'--z-lims', nargs=2, type=float, required=True,
help='''Limits of the produced table in the z-direction (meters)'''
)
parser.add_argument(
'--binwidth', type=float, required=True,
help='''Binwidth in x, y, and z directions (meters). Must divide each
of --x-lims, --y-lims, and --z-lims into an integral number of bins.'''
)
parser.add_argument(
'--oversample', type=int, required=True,
help='''Oversampling factor in the x-, y-, and z- directions (int >=
1).'''
)
parser.add_argument(
'--antialias', type=int, required=True,
help='''Antialiasing factor (int between 1 and 50).'''
)
parser.add_argument(
'--anisotropy', nargs='+', metavar='ANISOT_PARAM', required=False,
default=None,
help='''[NOT IMPLEMENTED] Simple ice anisotropy parameters to use: DIR
for azimuthal direction of low-scattering axis (radians) and MAG for
magnitude of anisotropy (unitless). If not specified, no anisotropy is
modeled.'''
)
parser.add_argument(
'--ic-quant-eff', type=float, default=IC_DOM_QUANT_EFF,
help='''IceCube (non-DeepCore) DOM quantum efficiency'''
)
parser.add_argument(
'--dc-quant-eff', type=float, default=DC_DOM_QUANT_EFF,
help='''DeepCore DOM quantum efficiency'''
)
parser.add_argument(
'--ic-exponent', type=float, default=1,
help='''IceCube (non-DeepCore) DOM probability exponent, applied as
`P = 1 - (1 - P)**exponent`; must be >= 0.'''
)
parser.add_argument(
'--dc-exponent', type=float, default=1,
help='''DeepCore DOM probability exponent, applied as
`P = 1 - (1 - P)**exponent`; must be >= 0.'''
)
parser.add_argument(
'--strings', type=str, nargs='+', required=False, default=None,
help='''Only use these strings (indices start at 1, as per the IceCube
convention). Specify a human-redable string, e.g. "80-86" to include
only DeepCore strings, or "26-27,35-37,45-46,80-86" to include the
IceCube strings that are considered to be part of DeepCore as well as
"DeepCore-proper" strings. Note that spaces are acceptable.'''
)
parser.add_argument(
'--depths', type=str, nargs='+', required=False, default=None,
help='''Only use these depths, specified as indices with shallowest at
0 and deepest at 59. Note that the actual depths of the DOMs depends
upon whether the string is in DeepCore or not. Specify a human-redable
string, e.g. "50-59" to include depths {50, 51, ..., 59}. Or one
could specify "4-59:5" to use every fifth DOM on each string. Note that
spaces are acceptable.'''
)
parser.add_argument(
'--times', type=str, nargs='+', required=False, default=None,
help='''Only use these times (specified as indices) from the source
(t,r,theta)-binned Retro DOM tables. Specify as a human-readable
sequence, similarly to --strings and --depths.'''
)
parser.add_argument(
'--recompute-binmap', action='store_true',
help='''Recompute the bin mapping even if the file exists; the existing
file will be overwritten.'''
)
parser.add_argument(
'--recompute-table', action='store_true',
help='''Recompute the Retro time- and DOM-independent (TDI) table even
if the corresponding files exist; these files will be overwritten.'''
)
kwargs = vars(parser.parse_args())
for key in ['strings', 'depths', 'times']:
val = kwargs[key]
if val is None:
kwargs[key] = slice(None)
else:
kwargs[key] = hrlist2list(','.join(val))
return kwargs
if __name__ == '__main__':
tdi_data = generate_tdi_table(**parse_args()) # pylint: disable=invalid-name
| 37.685796 | 86 | 0.619751 | 3,555 | 26,267 | 4.374684 | 0.167651 | 0.014918 | 0.019097 | 0.010867 | 0.363812 | 0.277456 | 0.234311 | 0.207819 | 0.160108 | 0.14982 | 0 | 0.012624 | 0.273195 | 26,267 | 696 | 87 | 37.739943 | 0.802001 | 0.227015 | 0 | 0.156118 | 0 | 0.006329 | 0.241334 | 0.002408 | 0 | 0 | 0 | 0.001437 | 0.008439 | 1 | 0.006329 | false | 0 | 0.033755 | 0 | 0.048523 | 0.061181 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8083cb6483b18e6dd4299dd81d56acefd37473b1 | 28,929 | py | Python | exipicrename/exipicrename.py | unixhex/exipicrename2 | b2a2f5af224c4a2c93f81e48c2622c7522d76489 | [
"MIT"
] | 1 | 2020-02-14T13:41:28.000Z | 2020-02-14T13:41:28.000Z | exipicrename/exipicrename.py | unixhex/exipicrename2 | b2a2f5af224c4a2c93f81e48c2622c7522d76489 | [
"MIT"
] | 3 | 2021-06-08T19:46:29.000Z | 2022-03-11T23:44:57.000Z | exipicrename/exipicrename.py | unixhex/exipicrename2 | b2a2f5af224c4a2c93f81e48c2622c7522d76489 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
exipicrename
beta of python3 version.
Reads exif data from pictures and rename them.
Used exif tags are:
* DateTimeOriginal
* FNumber
* ExposureTime
* FocalLength
* Model
* ISOSpeedRatings
"""
# Copyright (c) 2019 Hella Breitkopf, https://www.unixwitch.de
# MIT License -> see LICENSE file
import os
from os.path import splitext as splitext_last
import sys
import re
import csv
import time
import glob
import argparse
import copy
import logging
# PIL from Pillow
import PIL
import PIL.Image
import PIL.ExifTags
version_info = (0, 0, 0, 8) # pylint: disable=invalid-name
version = '.'.join(str(digit) for digit in version_info) # pylint: disable=invalid-name
__CAMERADICT = {} # how to rename certain camera names (load from csv)
__PIC_DICT = {} # main storage for file meta data
__CONF = {
'date_dir' : False,
'verbose' : False,
'debug' : False,
'silent' : False,
'dry_run' : False,
'use_serial' : True,
'use_duplicate' : True,
'ooc' : False,
'ooc_extension': '.ooc',
'short_names' : False,
'clean_data_after_run' : True,
'serial_length': 3,
'camera_rename_csv_file': os.path.join(os.path.dirname(__file__), "camera-model-rename.csv"),
'zero_value_ersatz': 'x',
'unwanted_character_ersatz': '-',
'decimal_delimiter_ersatz': '-',
'jpg_out_extension': '.jpg',
'jpg_input_extensions': ('.jpg', '.JPG', '.jpeg', '.JPEG'),
# source for raw_extensions: https://fileinfo.com/filetypes/camera_raw
'raw_extensions': (
'.orf', '.ORF', '.3fr', '.3FR',
'.ari', '.ARI', '.arw', '.ARW',
'.bay', '.BAY',
'.cr2', '.CR2', '.cr3', '.CR3', '.crw', '.CRW',
'.cs1', '.CS1', '.cxi', '.CXI',
'.dcr', '.DCR', '.dng', '.DNG',
'.eip', '.EIP', '.erf', '.ERF',
'.fff', '.FFF',
'.iiq', '.IIQ',
'.j6i', '.J6I',
'.k25', '.K25', '.kdc', '.KDC',
'.mef', '.MEF', '.mfw', '.MFW', '.mos', '.MOS', '.mrw', '.MRW',
'.nef', '.NEF', '.nrw', '.NRW',
'.pef', '.PEF',
'.raf', '.RAF', '.raw', '.RAW', '.rw2', '.RW2',
'.rwl', '.RWL', '.rwz', '.RWZ',
'.sr2', '.SR2', '.srf', '.SRF', '.srw', '.SRW',
'.x3f', '.X3F',
)
}
def set_raw_extensions(ext_set: set):
"""this set of extension we use to recognize raw files
(please don't forget the delimiter)
HINT: use only if neccessary, the default is rather inclusive
"""
__CONF['raw_extensions'] = ext_set
def get_raw_extensions():
"""get set of extension to recognize input raw files
(should include the delimiter (.)"""
return __CONF['raw_extensions']
def set_jpg_input_extensions(ext_set: set):
"""this set of extension we use to recognize JPEG files
(please don't forget the delimiter)"""
__CONF['jpg_input_extensions'] = ext_set
def get_jpg_input_extensions():
"""get set of extension to recognize input JGEG files
(should include the delimiter (.)"""
return __CONF['jpg_input_extensions']
def set_jpg_out_extension(ext: str = ".jpg"):
"""this extension we use as output for JPEG files
please don't forget the delimiter (.)"""
__CONF['jpg_out_extension'] = ext
def get_jpg_out_extension():
"""get extension for output JGEG files
(should include the delimiter (.)"""
return __CONF['jpg_out_extension']
def set_ooc_extension(ext: str = ".jpg"):
"""additional extension to mark 'out of cam' pictures
comes before the jpg_out_extension
please don't forget the delimiter (.)"""
# we don't trust commandline-arguments, so we clean it ...
newext = re.sub(r'[^a-zA-Z0-9._-]+', '', ext.strip().lower())
__CONF['ooc_extension'] = newext
def get_ooc_extension():
"""additional extension to mark 'out of cam' pictures
comes before the jpg_out_extension
(should include the delimiter (.)"""
return __CONF['ooc_extension']
def set_decimal_delimiter_ersatz(dds: str):
"""which symbol should be used instead
of the decimal delimiter '.'
e.g. for aperture (blende)
(since a dot is not good in file names we use something else)"""
__CONF['decimal_delimiter_ersatz'] = dds
def get_decimal_delimiter_ersatz():
"""return substitution string for decimal delimiter"""
return __CONF['decimal_delimiter_ersatz']
def set_unwanted_character_ersatz(ucs: str):
"""if the lens is analog, the value for aperture or length might be zero
which string should be written instead?"""
__CONF['unwanted_character_ersatz'] = ucs
def get_unwanted_character_ersatz():
"""return substitution string for zero aperture or length values"""
return __CONF['unwanted_character_ersatz']
def set_zero_value_ersatz(zvs: str):
"""if the lens is analog, the value for aperture or length might be zero
which string should be written instead?"""
__CONF['zero_value_ersatz'] = zvs
def get_zero_value_ersatz():
"""return substitution string for zero aperture or length values"""
return __CONF['zero_value_ersatz']
def set_camera_rename_csv_name(filename: str):
"""set name for the 'camera-name-translation'"""
__CONF['camera_rename_csv_file'] = filename
def get_camera_rename_csv_name():
"""get name for the 'camera-name-translation'"""
return __CONF['camera_rename_csv_file']
def set_serial_length(serial_length: int = 3):
"""set the length of the serial number (to be included in the file name) """
__CONF['serial_length'] = serial_length
def get_serial_length():
"""get the length of the serial number (to be included in the file name) """
return __CONF['serial_length']
def set_clean_data_after_run(__clean: bool = True):
"""for tests we wan't to analyze the dict,
but if used as a module, it needs to be cleaned up"""
__CONF['clean_data_after_run'] = __clean
def do_clean_data_after_run():
"""for tests we wan't to analyze the dict,
but if used as a module, it needs to be cleaned up"""
return __CONF['clean_data_after_run']
def set_use_date_dir(_use_date_dir: bool = True):
"""write files to separate directory?"""
__CONF['date_dir'] = _use_date_dir
def use_date_dir():
"""write files to separate directory?"""
return __CONF['date_dir']
def set_verbose(verbose: bool = True):
"""set verbosity (bool)"""
__CONF['verbose'] = verbose
def is_verbose():
"""get verbosity (bool)"""
return __CONF['verbose']
def set_debug(debug: bool = True):
"""set debug (bool)"""
__CONF['debug'] = debug
def is_debug():
"""get debug status (bool)"""
return __CONF['debug']
def set_silent(silent: bool = True):
"""set silence (bool)"""
__CONF['silent'] = silent
def is_silent():
"""get silence (bool)"""
return __CONF['silent']
def set_dry_run(dry_run: bool = True):
"""set dry-run (simulation-mode status)"""
__CONF['dry_run'] = dry_run
def is_dry_run():
"""get dry-run (simulation-mode status)"""
return __CONF['dry_run']
def set_use_serial(use_serial: bool = True):
"""include a serial number"""
__CONF['use_serial'] = use_serial
def use_serial():
"""should we include serial number?"""
return __CONF['use_serial']
def set_use_duplicate(use_duplicate: bool = True):
"""include a duplicate number if the same timestamp occurs"""
__CONF['use_duplicate'] = use_duplicate
def use_duplicate():
"""should we include a duplicate number?"""
return __CONF['use_duplicate']
def set_use_ooc(_use_ooc: bool = True):
"""set use of ooc extension"""
__CONF['ooc'] = _use_ooc
def use_ooc():
"""get use of ooc extension"""
return __CONF['ooc']
def set_short_names(short_names: bool = True):
"""use short names (without camera exif)"""
__CONF['short_names'] = short_names
def use_short_names():
"""get usage of short names (without camera exif)"""
return __CONF['short_names']
def export_pic_dict():
"""for tests"""
return copy.deepcopy(__PIC_DICT)
def verboseprint(*msg):
"""print verbose messages"""
#print("P", *msg)
#logging.info(*msg)
for m in msg:
logging.info(str(m))
def errorprint(*args):
"""print error messages"""
#print(*args, file=sys.stderr)
for m in args:
logging.error(str(m))
def __create_new_basename(img):
"""create a new filename based on exif data"""
# fetch tagging from https://stackoverflow.com/a/4765242
try:
exif = {
PIL.ExifTags.TAGS[k]: v
for k, v in img._getexif().items() # pylint: disable=protected-access
if k in PIL.ExifTags.TAGS
}
except AttributeError:
if is_verbose():
errorprint('NO exif info in ' + img.filename)
return None, None, None
try:
_datetime = format_datetime(exif['DateTimeOriginal'])
_date = format_date(exif['DateTimeOriginal'])
if not use_short_names():
_aperture = __format_aperture_tuple(exif['FNumber'])
_exposure_time = __format_exposuretime_tuple(exif['ExposureTime'])
_focal_len = __format_focal_length_tuple(exif['FocalLength'])
_camera = __format_camera_name(exif['Model'])
_iso = (exif['ISOSpeedRatings'])
except KeyError as err:
if is_verbose():
errorprint('(Some) exif tags missing in ' + img.filename, err)
return None, None, None
if not use_short_names():
_new_basename = f"{_datetime}{{}}__{_camera}__{_focal_len}" + \
f"__{_aperture}__t{_exposure_time}__iso{_iso}"
else:
_new_basename = f"{_datetime}{{}}"
return _datetime, _new_basename, _date
def __format_camera_name(_name):
"""format camera name - substitute unwanted characters, lower case
if available, read translations for camera models from csv and apply them """
_newname = re.sub(r'[^a-zA-Z0-9]+', get_unwanted_character_ersatz(), _name.strip().lower())
__read_camera_rename_csv()
if _newname in __CAMERADICT:
return __CAMERADICT[_newname]
return _newname
def __format_aperture_tuple(_ap):
"""format aperture tuple to short printable string
new pillow might not return tuple, so check first"""
if (isinstance(_ap,tuple)):
numerator = _ap[0] # numerator = zaehler
divisor = _ap[1] # divisor = nenner
else:
numerator=_ap.numerator
divisor=_ap.denominator
if numerator == 0:
return get_zero_value_ersatz()
if numerator % divisor == 0:
return "f" + str(numerator//divisor)
else:
return "f" + str(numerator/divisor).replace('.', get_decimal_delimiter_ersatz())
def __format_focal_length_tuple(_tuple):
"""format FocalLenght tuple to short printable string
we ignore the position after the decimal point
because it is usually not very essential for focal length
"""
if (isinstance(_tuple,tuple)):
numerator = _tuple[0]
divisor = _tuple[1]
else:
numerator=_tuple.numerator
divisor=_tuple.denominator
if numerator == 0:
return get_zero_value_ersatz()
if numerator % 10 == 0 and divisor % 10 == 0:
# example: change 110/10 -> 11
numerator = numerator // 10
divisor = divisor // 10
if divisor == 1:
# example: change 8/1 to 8mm
_string = f"{numerator}mm"
else:
# example: 524/10 -> 52mm
# we ignore the position after the decimal point
# because it is usually not very essential for focal length
_string = f"{numerator//divisor}mm"
return _string
def __format_exposuretime_tuple(_time):
"""format ExposureTime tuple to short printable string
fractions over or equal 1 second are marked with s, e.g. 8s
fractions below 1 second are broken down to the divisor,
this is a bit incorrect but short and common e.g. in cameras
(and we want to have a short string)
"""
if (isinstance(_time,tuple)):
numerator = _time[0]
divisor = _time[1]
else:
numerator=_time.numerator
divisor=_time.denominator
if numerator % 10 == 0 and divisor % 10 == 0:
# change 10/1250 to 1/125
numerator = numerator // 10
divisor = divisor // 10
if divisor == 1:
# change 6/1 -> 6s
# fractions => 1s with s for seconds
_string = f"{numerator}s"
else:
# change 1/125 -> 125
_string = f"{divisor}"
return _string
def format_datetime(_datetime):
"""format time string -> YYYYmmdd_HHMMSS"""
_time_struct = time.strptime(_datetime, "%Y:%m:%d %H:%M:%S")
return time.strftime("%Y%m%d_%H%M%S", _time_struct)
def format_date(_datetime):
"""format time string -> YYYY-mm-dd"""
_time_struct = time.strptime(_datetime, "%Y:%m:%d %H:%M:%S")
return time.strftime("%Y-%m-%d", _time_struct)
def __read_camera_rename_csv():
"""read the model translate csv - if available (only once)"""
if __CAMERADICT:
# we've read the csv already
return
try:
with open(get_camera_rename_csv_name()) as csvfile:
camera_model_translate = csv.reader(csvfile, delimiter=',')
for row in camera_model_translate:
__CAMERADICT[row[0]] = row[1]
except OSError:
if is_verbose():
verboseprint("camera translation csv not found: ", get_camera_rename_csv_name())
pass
def splitext_all(_filename):
"""split all extensions (after the first .) from the filename
should work similar to os.path.splitext (but that splits only the last extension)
"""
_name, _extensions = _filename.split('.')[0], '.'.join(_filename.split('.')[1:])
return(_name, "."+ _extensions)
def __picdict_set_serial_once(_pic, _serial, _serial_length):
"""set serial number in a global __PIC_DICT dictionary entry (if not set yet or if empty)"""
# make a string out of "_serial", fill it up with 0 up to _serial_length
# include it into the new file base name
try:
_ = __PIC_DICT[_pic]['serial']
return False
except KeyError:
pass
__PIC_DICT[_pic]['serial'] = _serial
if use_serial():
__PIC_DICT[_pic]['new_basename'] = \
__PIC_DICT[_pic]['new_basename'].format("__" +str(_serial).zfill(_serial_length))
else:
__PIC_DICT[_pic]['new_basename'] = \
__PIC_DICT[_pic]['new_basename'].format("")
return True
def __picdict_has_orig_filepath(filepath):
"""search if this filename is already recorded in global __PIC_DICT"""
_dir, _ = os.path.split(filepath)
_basename, _ext = os.path.splitext(_)
for filerecord in __PIC_DICT.values():
try:
if filerecord['orig_basename'] == _basename \
and filerecord['orig_extension'] == _ext \
and filerecord['orig_dirname'] == _dir:
return True
except KeyError:
pass
return False
def __rename_files():
"""rename files (after check if we don't overwrite)"""
for k in sorted(__PIC_DICT):
oldname = "{}/{}{}".format(
__PIC_DICT[k]["orig_dirname"],
__PIC_DICT[k]["orig_basename"],
__PIC_DICT[k]["orig_extension"],
)
newname = "{}/{}{}".format(
__PIC_DICT[k]["new_dirname"],
__PIC_DICT[k]["new_basename"],
__PIC_DICT[k]["new_extension"],
)
if oldname == newname:
continue
if not os.path.isfile(oldname) and not is_silent():
errorprint(f"WARNING: want to rename {oldname}\n"
f" to {newname}\n"
f" but orig file not available any more")
continue
if os.path.isfile(newname) and not is_silent():
errorprint(f"WARNING: did not overwrite existing file\n"
f"\t{newname}\n\twith:\n \t{oldname}")
continue
sys.exit() # pylint: disable=unreachable
# we really really don't want to overwrite files
if is_verbose():
msg = ""
if is_dry_run():
msg = "SIMULATION| "
if is_verbose() or (is_dry_run() and not is_silent()):
verboseprint(f"{msg}rename old: {oldname} ")
verboseprint(f"{msg}to NEW : {newname} ")
if not is_dry_run():
os.rename(oldname, newname)
def __parse_args():
"read and interpret commandline arguments with argparse"
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("file", nargs='+',
help="jpeg files to rename")
parser.add_argument("-d", "--datedir", action="store_true",
help="sort and store pictures to sub-directories"
"depending on DateTimeOriginal (YYYY-MM-DD)")
parser.add_argument("-o", "--ooc", action="store_true",
help="use .ooc.jpg as filename extension (for Out Of Cam pictures)")
parser.add_argument("--oocstring", action="store",
help="use string as additional extension,"
" don't forget the '.' as delimiter")
parser.add_argument("-s", "--short", "--short-names",
action="store_true",
help="use short names: only date + serial number, "
"no exhaustive camera data")
parser.add_argument("-n", "--simulate", "--dry-run",
action="store_true",
help="don't rename, just show what would happen")
parser.add_argument("--debug",
action="store_true",
help="debug")
parser.add_argument('-V', '--version',
action='version',
version=f'%(prog)s {version}',
help='show the version number and exit')
group_number = parser.add_mutually_exclusive_group()
group_number.add_argument("--no-serial", action="store_true",
help="don't include a serial number")
group_number.add_argument("--no-duplicate", action="store_true",
help="don't attach a duplicate number if the same timestamp occurrs more than once")
group_verbose = parser.add_mutually_exclusive_group()
group_verbose.add_argument("-v", "--verbose", action="store_true")
group_verbose.add_argument("-q", "--quiet", "--silent", action="store_true")
args = parser.parse_args()
if args.no_serial:
set_use_serial(False)
set_use_duplicate(True)
if args.no_duplicate:
set_use_serial(True)
set_use_duplicate(False)
if args.quiet:
set_silent(True)
set_verbose(False)
if args.datedir:
set_use_date_dir(True)
if args.simulate:
set_dry_run(True)
if args.ooc:
set_use_ooc(True)
if args.oocstring:
set_ooc_extension(args.oocstring)
set_use_ooc(True)
if args.short:
set_short_names(True)
if args.debug:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
set_debug(True)
if args.verbose:
if logging.getLogger().getEffectiveLevel() >= logging.INFO:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
set_verbose(True)
verboseprint(f"""
version: {version}
FLAGS:
verbose: {is_verbose()}
silent: {is_silent()}
dry_run: {is_dry_run()}
use_date_dir: {use_date_dir()},
use_ooc: {use_ooc()}
short_names: {use_short_names()}
use_serial: {use_serial()}
use_duplicate: {use_duplicate()}
log_level: {logging.getLevelName(logging.getLogger().getEffectiveLevel())}
""")
if logging.getLogger().getEffectiveLevel() >= logging.INFO:
logging.basicConfig(format='%(levelname)s:%(message)s')
return args.file
def __read_picture_data(_filelist):
""" READ picture exif data, put it in dictionary __PIC_DICT"""
for orig_filepath in _filelist:
# ensure we only fetch jpg and jpeg and JPG and JPEG ...
_, extension = splitext_last(orig_filepath)
if not extension in get_jpg_input_extensions():
continue
orig_dirname, origfilename = os.path.split(orig_filepath)
orig_basename, orig_all_extensions = splitext_all(origfilename)
# the orig_dirname might be empty->absolute path
orig_dirname = os.path.abspath(os.path.expanduser(orig_dirname))
orig_filepath = os.path.join(orig_dirname, orig_basename + orig_all_extensions)
# ensure we don't read the same picture twice
if __picdict_has_orig_filepath(orig_filepath):
if is_verbose():
verboseprint(f"{orig_filepath} already processed")
continue
try:
with PIL.Image.open(orig_filepath) as picture:
timestamp, new_basename, date = __create_new_basename(picture)
except OSError:
if not is_silent():
errorprint(f"{orig_filepath} can't be opened as image")
continue
if new_basename:
duplicate = 0
# There might be other jpg arround with the same timestamp
# these might be either:
# * serial shots (same camera same second) or
# * parallel shots (other camera, same second)
# * same camera after a clock reset
# so we NEED to check first if this date is already claimed by an other shot
# and save both (the second gets a number > 0 in duplicate
while f"{timestamp}_{duplicate}" in __PIC_DICT.keys():
duplicate += 1
# last changed time of that file to see for serial pictures which is the newest
#ctime = str(os.path.getctime(orig_filepath))
#mtime = str(os.path.getmtime(orig_filepath))
__PIC_DICT[f"{timestamp}_{duplicate}"] = {
'timestamp': timestamp,
'duplicate': duplicate,
'orig_basename' : orig_basename,
'new_basename': new_basename,
'orig_dirname' : orig_dirname,
'orig_extension' : orig_all_extensions,
'date': date,
}
#'ctime' : ctime,
#'orig_filepath': orig_filepath,
def __organize_picture_data():
"""analyse what jpg files we've got and find accociate files"""
pic_list = sorted(__PIC_DICT)
# how long is my list? Is the default serial length long enough (do I have enough digits)?
serial_min_length = (len(str(len(pic_list))))
if serial_min_length > get_serial_length():
set_serial_length(serial_min_length)
# first serial NUMBER to be included into the new picture name
serial = 1
# walk now through all pictures to process them
for pic in pic_list:
orig_extension = __PIC_DICT[pic]['orig_extension']
extension = "." + orig_extension.split(".")[-1]
if extension in get_jpg_input_extensions():
__organize_jpg_files(pic, serial)
__organize_extra_files(pic)
serial += 1
def __organize_jpg_files(pic, serial):
"""organize new paths for the jpg files"""
orig_full_name = os.path.join(
__PIC_DICT[pic]['orig_dirname'],
__PIC_DICT[pic]['orig_basename'],
) + \
__PIC_DICT[pic]['orig_extension']
duplicate = __PIC_DICT[pic]['duplicate']
# TODO BETTER DUBLICATE HANDLING pylint: disable=fixme
# -> oldest file (mtime) should win "original without marker status"
# -> check if the content seems to be really the same
# -> real duplicates could be marked with a "DUPLICATE" string
# current status is first come first serve
__picdict_set_serial_once(pic, serial, get_serial_length())
orig_dirname, origfilename = os.path.split(orig_full_name)
orig_basename, orig_all_extensions = splitext_all(origfilename)
# the orig_dirname might be empty->expand to absolute path
orig_dirname = os.path.abspath(os.path.expanduser(orig_dirname))
__PIC_DICT[pic]['orig_dirname'] = orig_dirname
__PIC_DICT[pic]['orig_basename'] = orig_basename
__PIC_DICT[pic]['orig_extension'] = orig_all_extensions
# move files to other directory
if use_date_dir():
new_dirname = os.path.join(orig_dirname, __PIC_DICT[pic]['date'])
# is this directory already there
# is there something else what has this name but is no dir
# write the dir
# if problem, exit
if not os.path.isdir(new_dirname):
try:
if is_dry_run():
if is_verbose():
verboseprint(f"INFO: create new directory: {new_dirname} (SIMULATION MODE)")
else:
if is_verbose():
verboseprint(f"INFO: create new directory: {new_dirname}")
os.makedirs(new_dirname)
except FileExistsError:
errorprint(f'ERROR: There is a {new_dirname}, but it is not a directory')
sys.exit()
# don't move files to an other directory
else:
new_dirname = orig_dirname
__PIC_DICT[pic]['new_dirname'] = new_dirname
if duplicate and use_duplicate():
__PIC_DICT[pic]['new_basename'] = __PIC_DICT[pic]['new_basename'] + f'_{duplicate}'
if use_ooc():
__PIC_DICT[pic]['new_extension'] = get_ooc_extension() + get_jpg_out_extension()
else:
__PIC_DICT[pic]['new_extension'] = get_jpg_out_extension()
def __organize_extra_files(pic):
"""organize new paths for the associated files"""
extracounter = 0
orig_dirname = __PIC_DICT[pic]['orig_dirname']
new_dirname = __PIC_DICT[pic]['new_dirname']
orig_basename = __PIC_DICT[pic]['orig_basename']
orig_full_name = os.path.join(
__PIC_DICT[pic]['orig_dirname'],
__PIC_DICT[pic]['orig_basename'],
) + \
__PIC_DICT[pic]['orig_extension']
duplicate = __PIC_DICT[pic]['duplicate']
for extrafile in glob.glob(f'{orig_dirname}/{orig_basename}.*'):
if extrafile == orig_full_name or os.path.isdir(extrafile):
continue # next file
# raw
_, extension = splitext_last(extrafile)
if extension in get_raw_extensions():
extra = f"{pic}_raw"
if duplicate:
# check if the first jpg (or a following) file
# already "claimed" this raw file
if __picdict_has_orig_filepath(extrafile):
continue
# ok, we did look, nobody has this file so we keep it ...
else: # if not raw
if __picdict_has_orig_filepath(extrafile):
continue
extra = f"{pic}_{extracounter}"
_, extension = splitext_all(extrafile)
__PIC_DICT[extra] = {
'orig_dirname' : orig_dirname,
'new_dirname' : new_dirname,
'orig_basename' : orig_basename,
'new_basename' : __PIC_DICT[pic]['new_basename'],
'orig_extension' : extension,
'new_extension' : extension.lower(),
}
if extension not in get_raw_extensions():
extracounter += 1
def clean_stored_data():
"""cleanup stored data"""
global __PIC_DICT # pylint: disable=global-statement
__PIC_DICT = {}
def exipicrename(filelist):
"""Read exif data from (filelist) pictures,
rename them and associated files (e.g. raw files, xmp files, ... ).
input should be a list of filenames (one single filenames as string is also accepted)"""
# read exif data from picture files and store this data in __PIC_DICT
# for single files we don't require a list
if not isinstance(filelist, list):
if isinstance(filelist, str):
filelist = [filelist]
else:
if not is_silent():
errorprint(f"Error: expected list of files ")
sys.exit(1)
__read_picture_data(filelist)
# analyse what jpg files we've got and find accociate files
# write all to __PIC_DICT
__organize_picture_data()
# now do the renaming (based on all stored data in __PIC_DICT)
__rename_files()
# for use as a module: clean up stored data from __PIC_DICT
if do_clean_data_after_run():
clean_stored_data()
def main():
"""main - entry point for command line call"""
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
#logging.basicConfig(format='%(levelname)s:%(message)s')
exipicrename(__parse_args())
if __name__ == '__main__':
main()
# *** THE END ***
| 35.023002 | 108 | 0.624564 | 3,649 | 28,929 | 4.67635 | 0.155385 | 0.021331 | 0.016409 | 0.009845 | 0.305556 | 0.243436 | 0.197082 | 0.165436 | 0.157876 | 0.14135 | 0 | 0.006009 | 0.257907 | 28,929 | 825 | 109 | 35.065455 | 0.788849 | 0.255246 | 0 | 0.202677 | 0 | 0 | 0.192381 | 0.027469 | 0 | 0 | 0 | 0.001212 | 0 | 1 | 0.116635 | false | 0.005736 | 0.024857 | 0 | 0.216061 | 0.030593 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80855b801b71f76158fe03a357cb9349f1c0a767 | 4,324 | py | Python | api/urls.py | deka108/meas_deka | 9646b04b878f325ade0a59e41bfcb10ab962d753 | [
"Apache-2.0"
] | null | null | null | api/urls.py | deka108/meas_deka | 9646b04b878f325ade0a59e41bfcb10ab962d753 | [
"Apache-2.0"
] | 1 | 2018-06-19T16:27:31.000Z | 2018-06-21T02:57:03.000Z | api/urls.py | deka108/mathqa-server | 9646b04b878f325ade0a59e41bfcb10ab962d753 | [
"Apache-2.0"
] | null | null | null | """
# Name: cms/urls.py
# Description:
# Created by: Phuc Le-Sanh
# Date Created: Nov 23, 2016
# Last Modified: Nov 23, 2016
# Modified by: Phuc Le-Sanh
"""
from django.conf.urls import url, include
# from rest_framework import routers
from rest_framework.authtoken import views as rest_views
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
# router = routers.SimpleRouter()
# router.register("question/search", QuestionSearchView, base_name="question-search")
urlpatterns = [
# url(r'^', include(router.urls)),
url(r'^topics/$', views.TopicList.as_view(), name='topic-list'),
url(r'^topics/(?P<pk>[0-9]+)/$', views.TopicDetail.as_view(),
name='topic-detail'),
url(r'^concepts/$', views.ConceptList.as_view(), name='concept-list'),
url(r'^concepts/(?P<pk>[0-9]+)/$', views.ConceptDetail.as_view(),
name='concept-detail'),
url(r'^papers/$', views.PaperList.as_view(), name='paper-list'),
url(r'^papers/(?P<pk>[0-9]+)/$', views.PaperDetail.as_view(),
name='paper-detail'),
url(r'^questions/$', views.QuestionList.as_view(), name='question-list'),
url(r'^questions/(?P<pk>[0-9]+)/$', views.QuestionDetail.as_view(),
name='question-detail'),
url(r'^answerparts/$', views.AnswerPartList.as_view(),
name='answerpart-list'),
url(r'^answerparts/(?P<pk>[0-9]+)/$', views.AnswerPartDetail.as_view(),
name='answerpart-detail'),
# education levels
url(r'^subjects/$', views.SubjectList.as_view(), name='subject-list'),
url(r'^subjects/(?P<pk>[0-9]+)/$', views.SubjectDetail.as_view(),
name='subject-detail'),
# topics
url(r'^(?P<subj_id>[0-9]+)/topics/$', views.TopicList.as_view(),
name='subj-topic-list'),
# Concepts
url(r'^(?P<subj_id>[0-9]+)/concepts/$', views.ConceptList.as_view(),
name='subj-concept-list'),
url(r'^topics/(?P<topic_id>[0-9]+)/concepts/$',
views.ConceptList.as_view(), name='topic-concept-list'),
# Questions
url(r'^(?P<subj_id>[0-9]+)/questions/$', views.QuestionList.as_view(),
name='subj-question-list'),
url(r'^topics/(?P<topic_id>[0-9]+)/questions/$',
views.QuestionList.as_view(), name='topic-question-list'),
url(r'^concepts/(?P<concept_id>[0-9]+)/questions/$',
views.QuestionList.as_view(), name='concept-question-list'),
# Keypoints
url(r'^keypoints/$', views.KeyPointList.as_view(), name='keypoint-list'),
url(r'^keypoints/(?P<pk>[0-9]+)/$', views.KeyPointDetail.as_view(),
name='keypoint-detail'),
url(r'^concepts/(?P<concept_id>[0-9]+)/keypoints/$',
views.KeyPointList.as_view(), name='concept-keypoint-list'),
# Sample Questions
url(r'^samplequestions/$', views.QuestionList.as_view(),
name='samplequestion-list'),
url(r'^concepts/(?P<concept_id>[0-9]+)/samplequestions/$',
views.QuestionList.as_view(), name='concept-samplequestion-list'),
# Sample Questions
url(r'^realquestions/$', views.QuestionList.as_view(),
name='realquestion-list'),
url(r'^concepts/(?P<concept_id>[0-9]+)/realquestions/$',
views.QuestionList.as_view(), name='concept-realquestion-list'),
# Formulas
url(r'^formulas/$', views.FormulaList.as_view(), name="formula-list"),
url(r'^formulas/(?P<pk>[0-9]+)/$', views.FormulaDetail.as_view(),
name="formula-detail"),
url(r'^formulas/reindex/all', views.reindex_all_formula,
name="formula-reindex-all"),
# FormulaIndex
url(r'^formulaindexes/$', views.FormulaIndexList.as_view(),
name="formula-index-list"),
# Search
url(r'^search/db$', views.search_text_db, name="search_db_text"),
url(r'^fsearch/$', views.search_formula, name="search_formula"),
url(r'^csearch/$', views.search_formula_cluster,
name="search_formula_cluster"),
# url(r'^searchf$', ),
# account
# url(r'^register/$', ),
# url(r'^logout/$', ),
]
urlpatterns += [
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', rest_views.obtain_auth_token),
]
# For assessment
urlpatterns += [
url(r'^check_answer/$', views.check_answer, name='check_answer'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 37.275862 | 85 | 0.640148 | 556 | 4,324 | 4.859712 | 0.197842 | 0.057735 | 0.103627 | 0.014804 | 0.338268 | 0.280533 | 0.137306 | 0.125833 | 0.11695 | 0 | 0 | 0.01252 | 0.150324 | 4,324 | 115 | 86 | 37.6 | 0.722918 | 0.128816 | 0 | 0.028571 | 0 | 0 | 0.363005 | 0.187918 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8088b3c5ba94e3f16d523776ee4f502d91b3b6b5 | 1,243 | py | Python | 44.wildcard-matching.py | leonhx/leetcode-practice | 35fabe5a1b98c05a5dd5d6a62201e9cb54be69ec | [
"MIT"
] | null | null | null | 44.wildcard-matching.py | leonhx/leetcode-practice | 35fabe5a1b98c05a5dd5d6a62201e9cb54be69ec | [
"MIT"
] | null | null | null | 44.wildcard-matching.py | leonhx/leetcode-practice | 35fabe5a1b98c05a5dd5d6a62201e9cb54be69ec | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=44 lang=python3
#
# [44] Wildcard Matching
#
class Solution:
def _consume_seq(self, s: str, p: str, s_i: int, p_i: int):
while p_i < len(p) and p[p_i] != '*':
if s_i >= len(s) or (p[p_i] != s[s_i] and p[p_i] != '?'):
return -1, -1
s_i, p_i = s_i + 1, p_i + 1
if p_i == len(p) and s_i != len(s): return -1, -1
return s_i, p_i
def isMatch(self, s: str, p: str) -> bool:
if not p:
return not s
s_i, p_i = 0, 0
if p[p_i] != '*' and p[p_i] != '?':
s_i, p_i = self._consume_seq(s, p, s_i, p_i)
if s_i == -1: return False
could_skip = False
while p_i < len(p):
if p[p_i] == '*':
p_i, could_skip = p_i + 1, True
elif p[p_i] == '?':
s_i, p_i = s_i + 1, p_i + 1
else:
s_i_, p_i_ = self._consume_seq(s, p, s_i, p_i)
if s_i_ == -1:
if could_skip and s_i < len(s) - 1: s_i += 1
else: return False
else:
s_i, p_i, could_skip = s_i_, p_i_, False
return (could_skip and s_i <= len(s)) or s_i == len(s)
| 34.527778 | 69 | 0.436846 | 221 | 1,243 | 2.171946 | 0.167421 | 0.104167 | 0.06875 | 0.083333 | 0.504167 | 0.25 | 0.25 | 0.1625 | 0.1625 | 0.1625 | 0 | 0.027586 | 0.416734 | 1,243 | 35 | 70 | 35.514286 | 0.634483 | 0.046661 | 0 | 0.137931 | 0 | 0 | 0.005089 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8089824c1db000c6c79126935002dddc0b661de7 | 664 | py | Python | Scripts/Utilities/linear_regg.py | aryanmangal769/UGV-DTU_ROS_Stack | 6a00c83d076361bdf171c1ad4ef383ad262da4e6 | [
"BSD-3-Clause"
] | null | null | null | Scripts/Utilities/linear_regg.py | aryanmangal769/UGV-DTU_ROS_Stack | 6a00c83d076361bdf171c1ad4ef383ad262da4e6 | [
"BSD-3-Clause"
] | null | null | null | Scripts/Utilities/linear_regg.py | aryanmangal769/UGV-DTU_ROS_Stack | 6a00c83d076361bdf171c1ad4ef383ad262da4e6 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from fractions import Fraction
if __name__ == '__main__':
#enter coordinates vectors
Y = np.array([[-420,-330]]).T
X = np.array([[300,0]]).T
# y =mx +c
O = np.ones(X.shape)
A = np.append(X,O,axis=1)
A_t = A.T
A_t_dot_A = A_t.dot(A)
A_t_dot_A_inv = np.linalg.inv(A_t_dot_A)
A_t_dot_A_inv_dot_A_t = A_t_dot_A_inv.dot(A_t)
ans = A_t_dot_A_inv_dot_A_t.dot(Y)
m = float(ans[0])
c = float(ans[1])
#print(type(m))
#print(m)
simple_m = Fraction(m).limit_denominator()
simple_c = Fraction(c).limit_denominator()
#np.append(x, y, axis=1)
print("slope m =",simple_m)
print("intercept c =",simple_c)
| 16.6 | 48 | 0.643072 | 133 | 664 | 2.87218 | 0.323308 | 0.062827 | 0.104712 | 0.109948 | 0.185864 | 0.185864 | 0.180628 | 0.180628 | 0.078534 | 0 | 0 | 0.026022 | 0.189759 | 664 | 39 | 49 | 17.025641 | 0.684015 | 0.120482 | 0 | 0 | 0 | 0 | 0.052083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
808a844aeabff3fdc0f7f9b10b9c6a241b07b945 | 2,159 | py | Python | pythia/pyre/inventory/FacilityArrayFacility.py | willic3/pythia | 2657b95a0c07fd3c914ab6b5f7ec89a8edba004c | [
"BSD-3-Clause"
] | 1 | 2015-11-30T08:01:39.000Z | 2015-11-30T08:01:39.000Z | pythia/pyre/inventory/FacilityArrayFacility.py | willic3/pythia | 2657b95a0c07fd3c914ab6b5f7ec89a8edba004c | [
"BSD-3-Clause"
] | 27 | 2018-05-24T18:31:25.000Z | 2021-10-16T03:57:52.000Z | pythia/pyre/inventory/FacilityArrayFacility.py | willic3/pythia | 2657b95a0c07fd3c914ab6b5f7ec89a8edba004c | [
"BSD-3-Clause"
] | 7 | 2019-07-19T02:30:56.000Z | 2021-06-02T22:00:01.000Z | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# California Institute of Technology
# (C) 2008 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pythia.pyre.inventory.Facility import Facility
class FacilityArrayFacility(Facility):
def __init__(self, name, itemFactory, **kwds):
Facility.__init__(self, name=name, **kwds)
self.itemFactory = itemFactory
return
def _retrieveComponent(self, instance, componentName):
facilityNames = self._cast(componentName)
facilityOrder = []
dict = {}
for index, facilityName in enumerate(facilityNames):
# Strip leading and trailing whitespace from facility name
facility = self.itemFactory(facilityName.strip())
attr = "item%05d" % index
dict[attr] = facility
facilityOrder.append(facilityName.strip())
from .Inventory import Inventory
from pythia.pyre.components.Component import Component
Inventory = type(Inventory)("FacilityArray.Inventory", (Component.Inventory,), dict)
dict = {'Inventory': Inventory}
FacilityArray = type(Component)("FacilityArray", (Component,), dict)
fa = FacilityArray(self.name)
fa.Inventory._facilityOrder = facilityOrder
import pythia.pyre.parsing.locators
locator = pythia.pyre.parsing.locators.builtIn()
return fa, locator
def _cast(self, text):
if isinstance(text, str):
if text and text[0] in '[({':
text = text[1:]
if text and text[-1] in '])}':
text = text[:-1]
value = text.split(",")
# allow trailing comma
if len(value) and not value[-1]:
value.pop()
else:
value = text
if isinstance(value, list):
return value
raise TypeError("facility '%s': could not convert '%s' to a list" % (self.name, text))
# end of file
| 29.986111 | 94 | 0.544697 | 198 | 2,159 | 5.878788 | 0.419192 | 0.034364 | 0.024055 | 0.042955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007124 | 0.284854 | 2,159 | 71 | 95 | 30.408451 | 0.746762 | 0.181102 | 0 | 0 | 0 | 0 | 0.061003 | 0.013113 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.102564 | 0 | 0.282051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
808ba59db073ed00f5b7a13b6e51d1825bca7ae9 | 2,052 | py | Python | psearch/scripts/split.py | meddwl/psearch | 58c374bdf6550ab43a8832aeaf9b18d5969640b5 | [
"BSD-3-Clause"
] | 24 | 2018-11-05T10:07:26.000Z | 2022-03-28T06:26:23.000Z | psearch/scripts/split.py | meddwl/psearch | 58c374bdf6550ab43a8832aeaf9b18d5969640b5 | [
"BSD-3-Clause"
] | 4 | 2020-01-03T21:10:16.000Z | 2021-11-04T16:47:55.000Z | psearch/scripts/split.py | meddwl/psearch | 58c374bdf6550ab43a8832aeaf9b18d5969640b5 | [
"BSD-3-Clause"
] | 10 | 2019-11-21T18:48:28.000Z | 2021-08-22T12:19:01.000Z | #!/usr/bin/env python3
# author : Alina Kutlushina
# date : 01.05.2018
# license : BSD-3
#==============================================================================
import sys
import argparse
import pandas as pd
def main(in_fname, out_act_fname, out_inact_fname):
"""
split a dataset into an active and an inactive sets by status column
:param in_fname: input .smi file
:param out_act_fname: path where an active set will be saved
:param out_inact_fname: path where an inactive set will be saved
:return: None
"""
df = pd.read_csv(in_fname, sep='\t', header=None)
df_act = df[df[2] == 'active']
df_act.to_csv(out_act_fname, sep='\t', index=None, header=None)
df_inact = df[df[2] == 'inactive']
df_inact.to_csv(out_inact_fname, sep='\t', index=None, header=None)
sys.stderr.write('actives: %i, inactives: %i.\n' % (df_act.shape[0], df_inact.shape[0]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='select active and inactive compounds'
'based on given values (act_threshold and inact_threshold)')
parser.add_argument('-i', '--in', metavar='input.smi', required=True,
help='input SMILES file name. It should contain three columns separated by whitespaces: '
'SMILES, name, activity. No header.')
parser.add_argument('-oa', '--out_act', metavar='active.smi', required=True,
help='output SMILES file name for active compounds.')
parser.add_argument('-oi', '--out_inact', metavar='inactive.smi', required=True,
help='output SMILES file name for inactive compounds.')
args = vars(parser.parse_args())
for o, v in args.items():
if o == "in": in_fname = v
if o == "out_act": out_act_fname = v
if o == "out_inact": out_inact_fname = v
main(in_fname=in_fname,
out_act_fname=out_act_fname,
out_inact_fname=out_inact_fname)
| 41.04 | 113 | 0.60039 | 278 | 2,052 | 4.226619 | 0.381295 | 0.040851 | 0.05617 | 0.040851 | 0.201702 | 0.181277 | 0.168511 | 0.071489 | 0.071489 | 0 | 0 | 0.009038 | 0.245127 | 2,052 | 49 | 114 | 41.877551 | 0.749516 | 0.209064 | 0 | 0 | 0 | 0 | 0.276623 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
808f76f97cbe057b74c0a81a931896d5d9eb9b7d | 2,084 | py | Python | 1024/cl/spiders/grass.py | wkias/1024 | 501e9cb2563e8dc6cad84e99db2128f2a447af91 | [
"MIT"
] | 2 | 2020-12-02T12:25:52.000Z | 2021-01-08T02:51:54.000Z | 1024/cl/spiders/grass.py | wkias/1024 | 501e9cb2563e8dc6cad84e99db2128f2a447af91 | [
"MIT"
] | null | null | null | 1024/cl/spiders/grass.py | wkias/1024 | 501e9cb2563e8dc6cad84e99db2128f2a447af91 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from ..items import ClItem
from ..settings import META_URL
from ..settings import SELECT
from ..settings import TYPE
from ..settings import DOWNLOAD_HISTORY
class GrassSpider(scrapy.Spider):
name = 'grass'
# allowed_domains = []
start_urls = [META_URL + 'thread.php?fid-' + SELECT + '.html']
def parse(self, response):
if response.url.find('thread') == -1:
item = ClItem()
item['url'] = response.url
item['title'] = response.css('h1::text').get()
item['src'] = response.css('.f14 > img::attr(src)').extract()
if item['src'].__len__() == 0:
item['src'] = response.css(
'.f14 > a > img::attr(src)').extract()
if item['src'].__len__() == 0:
item['src'] = response.css(
'.f14 > span > img::attr(src)').extract()
item['ext_name'] = [i.split('.')[-1] for i in item['src']]
if any(i.find('/') != -1 for i in item['ext_name']):
item['alt'] = [str(i+1) + '.' + 'jpg'
for i in range(len(item['src']))]
else:
item['alt'] = [str(i+1) + '.' + item['ext_name'][i]
for i in range(len(item['src']))]
item['domain'] = META_URL
item['path'] = response.url.replace(META_URL, '')
item['classfication'] = TYPE[SELECT]
# item['attachment'] = response.css('a[href*="download"]::attr(href)').get()
yield item
else:
pages = response.css('.subject::attr(href)').extract()
if pages.__len__() > 0:
pages.reverse()
pages.append(response.css('b+a::attr(href)').get())
self.log(response.css('.subject::text').extract())
for i in pages:
if i not in DOWNLOAD_HISTORY:
yield scrapy.Request(META_URL + i, callback=self.parse)
else:
self.log(i + ' has downloaded')
| 41.68 | 88 | 0.490883 | 240 | 2,084 | 4.1625 | 0.329167 | 0.088088 | 0.03003 | 0.054054 | 0.211211 | 0.144144 | 0.144144 | 0.102102 | 0.102102 | 0.102102 | 0 | 0.011494 | 0.332054 | 2,084 | 49 | 89 | 42.530612 | 0.706178 | 0.056142 | 0 | 0.209302 | 0 | 0 | 0.137035 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.139535 | 0 | 0.232558 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8091f803a97bac3ff576f5dd377c3775b3de1ebd | 740 | py | Python | Twitoff-01/twitoff/app.py | ivan-mihailov/LS-Unit-3-Sprint-3-Module-1 | 964029740d8db34121f19e5dec4c76c23c256c01 | [
"Apache-2.0"
] | null | null | null | Twitoff-01/twitoff/app.py | ivan-mihailov/LS-Unit-3-Sprint-3-Module-1 | 964029740d8db34121f19e5dec4c76c23c256c01 | [
"Apache-2.0"
] | null | null | null | Twitoff-01/twitoff/app.py | ivan-mihailov/LS-Unit-3-Sprint-3-Module-1 | 964029740d8db34121f19e5dec4c76c23c256c01 | [
"Apache-2.0"
] | null | null | null | import os
from flask import Flask, render_template, request
from .models import db, User
def create_app():
"""Create and configure an instance of the Flask appication."""
app_dir = os.path.dirname(os.path.abspath(__file__))
database = "sqlite:///{}".format(os.path.join(app_dir, "twitoff.sqlite3"))
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = database
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
@app.route('/', methods=["GET", "POST"])
def home():
# if request.form:
# print(request.form)
users = User.query.all()
return render_template("home.html", title='home', users = User.query.all())
return app
| 29.6 | 83 | 0.644595 | 94 | 740 | 4.882979 | 0.56383 | 0.039216 | 0.082789 | 0.074074 | 0.100218 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001712 | 0.210811 | 740 | 24 | 84 | 30.833333 | 0.784247 | 0.133784 | 0 | 0 | 0 | 0 | 0.159306 | 0.083596 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
809321ce7f4ed89b3d9c2cee1b729e5803693f21 | 1,664 | py | Python | locations/spiders/costacoffee_pl.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | locations/spiders/costacoffee_pl.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | locations/spiders/costacoffee_pl.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | import scrapy
from locations.items import GeojsonPointItem
class CostaCoffeePLSpider(scrapy.Spider):
name = "costacoffee_pl"
item_attributes = {"brand": "Costa Coffee", "brand_wikidata": "Q608845"}
allowed_domains = ["api.costacoffee.pl"]
start_urls = ["https://api.costacoffee.pl/api/storelocator/list"]
def parse(self, response):
data = response.json()
for store in data:
properties = {
"name": store["name"],
"street": store["address"],
"city": store["city"],
"postcode": store["postCode"],
"country": "PL",
"addr_full": ", ".join(
filter(
None,
(
store["address"],
store["city"],
store["postCode"],
"PL",
),
),
),
"lat": store["gpsY"],
"lon": store["gpsX"],
"extras": {
"store_type": store["type"],
"delivery": "yes" if store["deliveryAvailable"] else "no",
},
}
# No ref in upstream data, so we just want something as unique as possible
properties["ref"] = "|".join(
(
properties["lat"],
properties["lon"],
properties["name"],
properties["addr_full"],
)
)
yield GeojsonPointItem(**properties)
| 32.627451 | 86 | 0.412861 | 121 | 1,664 | 5.61157 | 0.578512 | 0.057437 | 0.047128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006704 | 0.462139 | 1,664 | 50 | 87 | 33.28 | 0.751955 | 0.043269 | 0 | 0.069767 | 0 | 0 | 0.183019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.046512 | 0 | 0.186047 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
809358564886b7b38cfb4df0981ded339161c3b7 | 7,530 | py | Python | Recurrent Neural Network.py | Sayansree/Recurrent-Neural-Network-from-scrach | 16daa7a203b4558fecbd783d9218929561485bb3 | [
"MIT"
] | null | null | null | Recurrent Neural Network.py | Sayansree/Recurrent-Neural-Network-from-scrach | 16daa7a203b4558fecbd783d9218929561485bb3 | [
"MIT"
] | null | null | null | Recurrent Neural Network.py | Sayansree/Recurrent-Neural-Network-from-scrach | 16daa7a203b4558fecbd783d9218929561485bb3 | [
"MIT"
] | null | null | null | import numpy as np
"""
basic implementation of Recurrent Neural Networks from scrach
to train model to learn to add any number pair when given in binary arrayed format
devloper-->sayaneree paria
"""
class RecurrentNeuralNetwork:
def __init__(self,hidden_size=10):
"""hidden_size is number of neurons in hidden layer"""
self.hidden_size=hidden_size
self.activation={"sigmoid":(self.sigmoid,self.sig_grad),
"RELU":(self.RELU,self.RELU_grad),
"tanh":(self.tanh,self.tanh_grad)}
def fit(self,X,Y):
"""input your training dataset
X: input array 3D
Y: output arrray 3D
axis0- number of data data
axis1 -oredered steps(time steps) of data
axis2- input array for each step"""
#add a slot for threshold weight in each inputs
X=np.append(X,np.ones((X.shape[0],X.shape[1],1)),axis=2)
# store sizes of datasets
self.input_size=X.shape[2]
self.output_size=Y.shape[2]
self.X=X
self.Y=Y
def tanh(self,x):
"""for hyperbolic tangent activation"""
return np.tanh(x)
def tanh_grad(self,x):
"""gradiant through tanh function"""
return np.minimum(1-self.tanh(x)**2,1e2)
def RELU(self,x):
"""for RELU activation"""
return np.maximum(x,0)
def RELU_grad(self,x):
"""gradient through RELU function"""
return np.sign(x)
def sigmoid(self,x):
"""sigmoid activation"""
return 1/(1+np.exp(-x))
def sig_grad(self,x):
"""gradiant through sigmoid function"""
return x*(1-x)
def train(self,rate=1,activation="sigmoid"):
"""train the model on the dataset provided , rate: learning rate"""
activate,actv_grad=self.activation[activation]
# initialise our weights randomly for hidden and output layers and recursion of previous layers
hidden_weight=2*np.random.random((self.input_size,self.hidden_size))-1
output_weight=2*np.random.random((self.hidden_size,self.output_size))-1
recurent_weight=2*np.random.random((self.hidden_size,self.hidden_size))-1
#terate through all data in dataset
for i,X1 in enumerate(self.X):
#corosponding output
Y1=self.Y[i]
#lists to store our outputs to help find gradients of all timestep
hidden_layers=list()
output_gradients=list()
#initially we set our feedback vector to zero
hiddenlayer=np.zeros((1,self.hidden_size))
hidden_layers.append(hiddenlayer)
#keep track of error
total_errors=0
# forward propagate in time steps finding output of the RNN
for time,X in enumerate(X1):
# hidden state is function of both input of current time step and hidden state of previous time step
#note we can also use other activation like RELU or tanh which may affect performanc
hiddenlayer= activate(np.dot(X,hidden_weight)+np.dot(hidden_layers[-1],recurent_weight))
outputlayer= activate(np.dot(hiddenlayer,output_weight))
#calulate error
error= Y1[time]-outputlayer
total_errors+=np.abs(error[0,0])
#gradient of output layer
outputGradient=error*actv_grad(outputlayer)
#we store the hidden layers and output gradients to calculate the gradients of weight vectors
hidden_layers.append(np.atleast_2d(hiddenlayer))
output_gradients.append(np.atleast_2d(outputGradient))
#initialise all gradients zero
output_weight_gradient=np.zeros_like(output_weight)
hidden_weight_gradient=np.zeros_like(hidden_weight)
recurent_weight_gradient=np.zeros_like(recurent_weight)
#we use this to store the gradient of cost function (of future time) wrt time steps (in current time) on which it depends
future_gradients=np.zeros(self.hidden_size)
# iterate in reverse order, backpropagation through time!
for time,X in enumerate(X1[::-1]):
time=X1.shape[0]-time-1
#recursively set current gradients and all future gradients linked to this time step
hidden_layer_gradients=(np.dot(future_gradients,recurent_weight.T)+ np.dot(output_gradients[time],output_weight.T))*actv_grad(hidden_layers[time+1])
#sum of gradients of error in each time step
output_weight_gradient+=hidden_layers[time+1].T.dot(output_gradients[time])
hidden_weight_gradient+=np.atleast_2d(X).T.dot(hidden_layer_gradients)
recurent_weight_gradient+=np.dot(hidden_layers[time].T,hidden_layer_gradients)
#use this in next iteration to set gradients linked to past
future_gradients=hidden_layer_gradients
# update out weights by the learning rate
hidden_weight += rate * hidden_weight_gradient
output_weight+=rate * output_weight_gradient
recurent_weight += rate * recurent_weight_gradient
# print error in intervals
if i %1000==0:
print("iteration: {0}\t\t error: {1}".format(i,total_errors))
#we save our weights
self.hidden_weight=hidden_weight
self.output_weight=output_weight
self.recurent_weight=recurent_weight
def predict(self,X):
"""predict the output of X"""
#add slot for thresholds
X=np.append(X,np.ones((X.shape[0],X.shape[1],1)),axis=2)
output=np.zeros((X.shape[0],X.shape[1],self.output_size))
#set feedback to zero intially
prev_hiddenlayer=np.zeros((1,self.hidden_size))
#iterate through all input data and do pediction
for j,X2 in enumerate(X):
for time,X1 in enumerate(X2):
hiddenlayer= self.sigmoid(np.dot(X1,self.hidden_weight)+np.dot(prev_hiddenlayer,self.recurent_weight))
outputlayer= self.sigmoid(np.dot(hiddenlayer,self.output_weight))
output[j,time]=outputlayer
prev_hiddenlayer=hiddenlayer
return output
###we train RNN to learn how to add two numbers
# we generate 10,1000 random pair of numbers whose sum is below 2^8
max_val = 2**8
a=np.random.randint(0,high=max_val/2,size=(10000,2,1),dtype=np.uint8)
#convert to binary format
b= np.transpose(np.unpackbits(a, axis=2),(2,1,0))
#reverse order to keep LSB(least significant bit)first
b=b[::-1].transpose((2,0,1))
#sum the pairs with LSB first
sum=np.atleast_3d(np.unpackbits(np.sum(a,axis=1,dtype=np.uint8),axis=1).T[::-1].T)
#create instance of our model we will use 8 neurons in hidden layers it may be changed according to requirments
rnn=RecurrentNeuralNetwork(hidden_size=8)
#train on first 9980 data
rnn.fit(b[:9980],sum[:9980])
rnn.train(rate=1)
#print prediction for last 20 row wise
print(np.round(rnn.predict(b[9980:])).astype(int).transpose(2,0,1))
#and print the actual sums
print(sum[9980:].transpose(2,0,1))
| 41.147541 | 165 | 0.619389 | 1,012 | 7,530 | 4.501976 | 0.238142 | 0.026339 | 0.027656 | 0.005268 | 0.098771 | 0.065189 | 0.04741 | 0.032924 | 0.032924 | 0.015803 | 0 | 0.024004 | 0.286321 | 7,530 | 182 | 166 | 41.373626 | 0.823781 | 0.290704 | 0 | 0.023529 | 0 | 0 | 0.01052 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.011765 | 0 | 0.223529 | 0.035294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8093a2725077b2b49d9c6858f567993bef3daea9 | 572 | py | Python | Asyncio/asyncio_ensure_future.py | xlui/PythonExamples | 0389efb84e01dc1310bb2bab7aa2433c0e1b45c4 | [
"MIT"
] | null | null | null | Asyncio/asyncio_ensure_future.py | xlui/PythonExamples | 0389efb84e01dc1310bb2bab7aa2433c0e1b45c4 | [
"MIT"
] | null | null | null | Asyncio/asyncio_ensure_future.py | xlui/PythonExamples | 0389efb84e01dc1310bb2bab7aa2433c0e1b45c4 | [
"MIT"
] | null | null | null | # asyncio_ensure_future.py
import asyncio
async def wrapped():
print('now in function wrapped')
return 'result'
async def inner(task):
print('now in function inner')
print('inner: waiting for {!r}'.format(task))
ret = await task
print('inner: task return: {}'.format(ret))
async def outer():
print('creating task')
task = asyncio.ensure_future(wrapped())
print('waiting for inner')
await inner(task)
print('inner returned')
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(outer())
event_loop.close()
| 20.428571 | 49 | 0.685315 | 77 | 572 | 4.961039 | 0.415584 | 0.094241 | 0.099476 | 0.094241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 572 | 27 | 50 | 21.185185 | 0.816239 | 0.041958 | 0 | 0 | 0 | 0 | 0.254579 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.111111 | 0.388889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80940e10972b61cf1900104bd927a2163e4fae1d | 1,737 | py | Python | tests/test_db_operations.py | antoniodimariano/metrics_consumer | 5c485f3b6c2b6788f947c02b49083ce237424bfc | [
"Apache-2.0"
] | null | null | null | tests/test_db_operations.py | antoniodimariano/metrics_consumer | 5c485f3b6c2b6788f947c02b49083ce237424bfc | [
"Apache-2.0"
] | null | null | null | tests/test_db_operations.py | antoniodimariano/metrics_consumer | 5c485f3b6c2b6788f947c02b49083ce237424bfc | [
"Apache-2.0"
] | null | null | null | from psycopg2 import pool
import psycopg2
import psycopg2.extras
import unittest
class TestDB(unittest.TestCase):
def setUp(self):
self.connection_pool = pool.ThreadedConnectionPool(1, 2, database='test', user='postgresql', password='test123',
host='localhost')
def tearDown(self):
self.connection_pool.closeall()
def test_a_ThreadedPool_Connection(self):
self.assertEqual(self.connection_pool.closed, False)
self.assertEqual(self.connection_pool.maxconn, 2)
self.assertEqual(self.connection_pool.minconn, 1)
def test_b_test_Write(self):
connection_1 = self.connection_pool.getconn()
query = "INSERT INTO metrics (url, http_status,elapsed_time, day, month, year, time,pattern_verified) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT DO NOTHING RETURNING *"
params = ("http://test.com", '200', '0.2', '02', '10', '2021','22:21:36.168319', 'True')
cursor_1 = connection_1.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor_1.execute(query, params)
connection_1.commit()
inserted_entry = cursor_1.fetchone()
self.assertIsNotNone(inserted_entry)
cursor_1.close()
self.assertEqual(cursor_1.closed, True)
def test_c_test_Read(self):
connection_2 = self.connection_pool.getconn()
cursor_2 = connection_2.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor_2.execute("select * from metrics")
records = cursor_2.fetchmany(1)
cursor_2.close()
self.assertIsNotNone(records)
self.assertEqual(cursor_2.closed, True)
if __name__ == "__main__":
unittest.main()
| 39.477273 | 185 | 0.666091 | 209 | 1,737 | 5.315789 | 0.416268 | 0.113411 | 0.113411 | 0.018002 | 0.191719 | 0.10261 | 0.10261 | 0 | 0 | 0 | 0 | 0.038971 | 0.217041 | 1,737 | 43 | 186 | 40.395349 | 0.777941 | 0 | 0 | 0 | 0 | 0.028571 | 0.157743 | 0.027058 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.142857 | false | 0.028571 | 0.114286 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8095fed737853a16f266e29c70aa1c6f509f7dd8 | 967 | py | Python | test-framework/test-suites/integration/tests/report/test_report_discovery.py | khanfluence/stacki-cumulus-switch | df54afb20f6ea6a3a136b3c09b30df54ea79ffcc | [
"BSD-3-Clause"
] | null | null | null | test-framework/test-suites/integration/tests/report/test_report_discovery.py | khanfluence/stacki-cumulus-switch | df54afb20f6ea6a3a136b3c09b30df54ea79ffcc | [
"BSD-3-Clause"
] | null | null | null | test-framework/test-suites/integration/tests/report/test_report_discovery.py | khanfluence/stacki-cumulus-switch | df54afb20f6ea6a3a136b3c09b30df54ea79ffcc | [
"BSD-3-Clause"
] | null | null | null | import os
import subprocess
import pytest
@pytest.mark.usefixtures("revert_discovery")
class TestReportDiscovery:
def test_report_daemon_not_running(self, host):
"Test the output when the discovery daemon is not running"
# Make sure discovery isn't running
result = host.run("stack disable discovery")
assert result.rc == 0
assert result.stdout == "Discovery daemon has stopped\n"
# See what reports says
result = host.run("stack report discovery")
assert result.rc == 0
assert result.stdout == "Discovery daemon is stopped\n"
def test_report_daemon_running(self, host):
"Test the output when the discovery daemon is running"
# We gotta start discovery
result = host.run("stack enable discovery")
assert result.rc == 0
assert result.stdout == "Discovery daemon has started\n"
# See what report says
result = host.run("stack report discovery")
assert result.rc == 0
assert result.stdout == "Discovery daemon is running\n"
| 28.441176 | 60 | 0.741468 | 137 | 967 | 5.175182 | 0.335766 | 0.135402 | 0.09591 | 0.101551 | 0.561354 | 0.561354 | 0.561354 | 0.561354 | 0.561354 | 0.561354 | 0 | 0.004988 | 0.170631 | 967 | 33 | 61 | 29.30303 | 0.879052 | 0.219235 | 0 | 0.285714 | 0 | 0 | 0.384437 | 0 | 0 | 0 | 0 | 0 | 0.380952 | 1 | 0.095238 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
809668ea6678e6fc0ac8190a7a64ddbf086a2f6c | 862 | py | Python | Python/magic_8_ball.py | rockchipgh/Hacktoberfest2020-1 | 1d1e28614aa16c1bac2560b0250ce0014e48241d | [
"MIT"
] | null | null | null | Python/magic_8_ball.py | rockchipgh/Hacktoberfest2020-1 | 1d1e28614aa16c1bac2560b0250ce0014e48241d | [
"MIT"
] | null | null | null | Python/magic_8_ball.py | rockchipgh/Hacktoberfest2020-1 | 1d1e28614aa16c1bac2560b0250ce0014e48241d | [
"MIT"
] | null | null | null | #*****MAGIC 8 BALL CODE*****
import sys
import random
ans = True
while ans:
question = input("ask the magic 8 ball a question: (press enter to quit) ")
answers = random.randint(1,8)
if question == "":
sys.exit()
elif answers == 1:
print ("Good:)")
elif answers == 2:
print ("Certainly:)")
elif answers == 3:
print ("You may rely on it:)")
elif answers == 4:
print ("Ask again later:)")
elif answers == 5:
print ("Concentrate and ask again:)")
elif answers == 6:
print ("Vague, try again:)")
elif answers == 7:
print ("Nope:( If that's what you were looking for then, Kudos:)")
elif answers == 8:
print ("Oops, it's a No:( If that's what you were looking for then, Kudos:)")
| 22.684211 | 88 | 0.512761 | 109 | 862 | 4.055046 | 0.513761 | 0.199095 | 0.045249 | 0.049774 | 0.167421 | 0.167421 | 0.167421 | 0.167421 | 0.167421 | 0.167421 | 0 | 0.021467 | 0.351508 | 862 | 37 | 89 | 23.297297 | 0.769231 | 0.031323 | 0 | 0 | 0 | 0.041667 | 0.332134 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8098871e0930a689062b0ccaa88626806d0cc195 | 3,372 | py | Python | venus/venus/test_venus.py | FrederichRiver/neutrino2 | 65e158f0d64046628cf2d1d52bdb3161489c7595 | [
"BSD-3-Clause"
] | null | null | null | venus/venus/test_venus.py | FrederichRiver/neutrino2 | 65e158f0d64046628cf2d1d52bdb3161489c7595 | [
"BSD-3-Clause"
] | null | null | null | venus/venus/test_venus.py | FrederichRiver/neutrino2 | 65e158f0d64046628cf2d1d52bdb3161489c7595 | [
"BSD-3-Clause"
] | null | null | null | from stock_base import StockEventBase, dataLine
def unit_test_NoneHeaderError():
try:
raise NoneHeaderError('Test!')
except NoneHeaderError as e:
print(e)
def unit_test_stockEventBase():
from dev_global.env import GLOBAL_HEADER
import pandas as pd
event = StockEventBase(GLOBAL_HEADER)
try:
print(event)
event.update_date_time()
event.get_all_stock_list()
except Exception as e:
print(e)
def unit_test_StockList():
from stock_base import StockList
event = StockList()
event.get_sh_stock()
stock_list = event.get_sz_stock()
print(stock_list[0], stock_list[-1])
def unit_test_stock_interest():
from dev_global.env import GLOBAL_HEADER
from stock_interest import EventInterest
import numpy as np
event = EventInterest(GLOBAL_HEADER)
event.get_all_stock_list()
for stock_code in event.stock_list:
try:
print(stock_code)
tab = event.resolve_table(stock_code)
tab.replace(['--'], np.nan, inplace=True)
tab.to_sql(
'test_interest', event.mysql.engine.connect(),
if_exists="append", index=True
)
except Exception:
print(f"Error while recording interest of {stock_code}")
def unit_test_dataline():
import pandas as pd
df = pd.DataFrame({
'id': [1, 2, 3, 4, 5, 6],
'name': ['Alice', 'Bob', 'Cindy', 'Eric', 'Helen', 'Grace'],
'math': [90, 89, 99, 78, 97, 93],
'english': [89, 94, 80, 94, 94, 90]})
dt = dataLine('test_interest')
sql_list = dt.insert_sql(df)
sql_list = dt.update_sql(df, ['id', 'name'])
for sql in sql_list:
print(sql)
def unit_test_financeReport():
from dev_global.env import GLOBAL_HEADER
from finance_report import EventFinanceReport
event = EventFinanceReport(GLOBAL_HEADER)
event.update_balance_sheet("SH601818")
def unit_test_stockcode():
from venus.stock_base import StockCodeFormat
event = StockCodeFormat()
call_result = event('600000.SH')
func_result = event.net_ease_code('SH601818')
print(call_result)
print(func_result)
def unit_test_absolute_path():
from venus.stock_manager import absolute_path
x = 'path/path2/path3'
y = 'path/path2/path3/'
z = 'path4/file'
z2 = '/path4/file'
print(absolute_path(x,z))
print(absolute_path(x,z2))
print(absolute_path(y,z))
print(absolute_path(y,z2))
def unit_test_stockBase():
from venus.stock_base import StockBase
from polaris.mysql8 import GLOBAL_HEADER
event = StockBase(GLOBAL_HEADER)
result = event.get_all_stock_list()
print(result)
def unit_test_stock_manager():
from polaris.mysql8 import GLOBAL_HEADER
from venus.stock_manager2 import EventTradeDataManager
from venus.stock_base2 import resolve_stock_list
stock_list = resolve_stock_list('totalstocklist')
event = EventTradeDataManager(GLOBAL_HEADER)
result = event.get_trade_data('SH600000', event.today)
print(result)
if __name__ == "__main__":
# unit_test_NoneHeaderError()
# unit_test_stockEventBase()
# unit_test_StockList()
# unit_test_stock_interest()
# unit_test_dataline()
# unit_test_financeReport()
# unit_test_stockBase()
unit_test_stock_manager()
| 28.576271 | 68 | 0.673191 | 430 | 3,372 | 4.990698 | 0.3 | 0.067102 | 0.051258 | 0.022367 | 0.173346 | 0.102516 | 0.069897 | 0.035415 | 0 | 0 | 0 | 0.026417 | 0.225386 | 3,372 | 117 | 69 | 28.820513 | 0.795176 | 0.051008 | 0 | 0.179775 | 0 | 0 | 0.076441 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.11236 | false | 0 | 0.191011 | 0 | 0.303371 | 0.168539 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8099edc75a8e1289de2d8bd7684b17513889d966 | 7,592 | py | Python | io_mesh_urho/utils.py | practicing01/Urho3D-Blender | 820f03c34adda7594aa8ebc3f95cd71382a51528 | [
"Unlicense"
] | null | null | null | io_mesh_urho/utils.py | practicing01/Urho3D-Blender | 820f03c34adda7594aa8ebc3f95cd71382a51528 | [
"Unlicense"
] | null | null | null | io_mesh_urho/utils.py | practicing01/Urho3D-Blender | 820f03c34adda7594aa8ebc3f95cd71382a51528 | [
"Unlicense"
] | null | null | null |
#
# This script is licensed as public domain.
#
# http://docs.python.org/2/library/struct.html
from xml.etree import ElementTree as ET
from xml.dom import minidom
import os
import struct
import array
import logging
log = logging.getLogger("ExportLogger")
def enum(**enums):
return type('Enum', (), enums)
PathType = enum(
ROOT = "ROOT-",
MODELS = "MODE-",
ANIMATIONS = "ANIM-",
TRIGGERS = "TRIG-",
MATERIALS = "MATE-",
TECHNIQUES = "TECH-",
TEXTURES = "TEXT-",
MATLIST = "MATL-",
OBJECTS = "OBJE-",
SCENES = "SCEN-")
# Options for file utils
class FOptions:
def __init__(self):
self.useSubDirs = True
self.fileOverwrite = False
self.paths = {}
self.exts = {
PathType.MODELS : "mdl",
PathType.ANIMATIONS : "ani",
PathType.TRIGGERS : "xml",
PathType.MATERIALS : "xml",
PathType.TECHNIQUES : "xml",
PathType.TEXTURES : "png",
PathType.MATLIST : "txt",
PathType.OBJECTS : "xml",
PathType.SCENES : "xml"
}
self.preserveExtTemp = False
#--------------------
# Errors container
#--------------------
class ErrorsMem:
def __init__(self):
self.errors = {}
self.seconds = []
def Get(self, name, defaultValue = None):
try:
return self.errors[name]
except KeyError:
if defaultValue is not None:
self.errors[name] = defaultValue
return defaultValue
def Delete(self, name):
if name in self.errors:
del self.errors[name]
def Cleanup(self):
emptyList = []
for name in self.errors.keys():
try:
if not self.errors[name]:
emptyList.append(name)
except TypeError:
pass
for name in emptyList:
del self.errors[name]
def Names(self):
return self.errors.keys()
def Second(self, index):
try:
return self.seconds[index]
except IndexError:
return None
def SecondIndex(self, second):
try:
return self.seconds.index(second)
except ValueError:
index = len(self.seconds)
self.seconds.append(second)
return index
def Clear(self):
self.errors.clear()
self.seconds.clear()
#--------------------
# File utilities
#--------------------
# Get a file path for the object 'name' in a folder of type 'pathType'
def GetFilepath(pathType, name, fOptions):
# Get the root path
rootPath = fOptions.paths[PathType.ROOT]
# Append the relative path to get the full path
fullPath = rootPath
if fOptions.useSubDirs:
fullPath = os.path.join(fullPath, fOptions.paths[pathType])
# Compose filename
filename = name
if type(filename) is list or type(filename) is tuple:
filename = os.path.sep.join(filename)
# Add extension to the filename, if present we can preserve the extension
ext = fOptions.exts[pathType]
if ext and (not fOptions.preserveExtTemp or os.path.extsep not in filename):
filename += os.path.extsep + ext
#filename = bpy.path.ensure_ext(filename, ".mdl")
fOptions.preserveExtTemp = False
# Replace all characters besides A-Z, a-z, 0-9 with '_'
#filename = bpy.path.clean_name(filename)
# Compose the full file path
fileFullPath = os.path.join(fullPath, filename)
# Get the Urho path (relative to root)
fileUrhoPath = os.path.relpath(fileFullPath, rootPath)
fileUrhoPath = fileUrhoPath.replace(os.path.sep, '/')
# Return full file path and relative file path
return (fileFullPath, fileUrhoPath)
# Check if 'filepath' is valid
def CheckFilepath(fileFullPaths, fOptions):
fileFullPath = fileFullPaths
if type(fileFullPaths) is tuple:
fileFullPath = fileFullPaths[0]
# Create the full path if missing
fullPath = os.path.dirname(fileFullPath)
if not os.path.isdir(fullPath):
try:
os.makedirs(fullPath)
log.info( "Created path {:s}".format(fullPath) )
except Exception as e:
log.error("Cannot create path {:s} {:s}".format(fullPath, e))
if os.path.exists(fileFullPath) and not fOptions.fileOverwrite:
log.error( "File already exists {:s}".format(fileFullPath) )
return False
return True
#--------------------
# XML formatters
#--------------------
def BoolToString(value):
return "{}".format(value)
def FloatToString(value):
return "{:g}".format(value)
def Vector3ToString(vector):
return "{:g} {:g} {:g}".format(vector[0], vector[1], vector[2])
def Vector4ToString(vector):
return "{:g} {:g} {:g} {:g}".format(vector[0], vector[1], vector[2], vector[3])
def XmlToPrettyString(elem):
rough = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough)
pretty = reparsed.toprettyxml(indent="\t")
i = pretty.rfind("?>")
if i >= 0:
pretty = pretty[i+2:]
return pretty.strip()
#--------------------
# XML writers
#--------------------
# Write XML to a text file
def WriteXmlFile(xmlContent, filepath, fOptions):
try:
file = open(filepath, "w")
except Exception as e:
log.error("Cannot open file {:s} {:s}".format(filepath, e))
return
try:
file.write(XmlToPrettyString(xmlContent))
except Exception as e:
log.error("Cannot write to file {:s} {:s}".format(filepath, e))
file.close()
#--------------------
# Binary writers
#--------------------
class BinaryFileWriter:
# We try to write the file with a single API call to avoid
# the Editor crashing while reading a not completed file.
# We set the buffer to 1Mb (if unspecified is 64Kb, and it is
# 8Kb with multiple file.write calls)
# Constructor.
def __init__(self):
self.filename = None
self.buffer = None
# Open file stream.
def open(self, filename):
self.filename = filename
self.buffer = array.array('B')
return True
def close(self):
try:
file = open(self.filename, "wb", 1024 * 1024)
except Exception as e:
log.error("Cannot open file {:s} {:s}".format(self.filename, e))
return
try:
self.buffer.tofile(file)
except Exception as e:
log.error("Cannot write to file {:s} {:s}".format(self.filename, e))
file.close()
# Writes an ASCII string without terminator
def writeAsciiStr(self, v):
self.buffer.extend(bytes(v, "ascii"))
# Writes a 32 bits unsigned int
def writeUInt(self, v):
self.buffer.extend(struct.pack("<I", v))
# Writes a 16 bits unsigned int
def writeUShort(self, v):
self.buffer.extend(struct.pack("<H", v))
# Writes one 8 bits unsigned byte
def writeUByte(self, v):
self.buffer.extend(struct.pack("<B", v))
# Writes four 32 bits floats .w .x .y .z
def writeQuaternion(self, v):
self.buffer.extend(struct.pack("<4f", v.w, v.x, v.y, v.z))
# Writes three 32 bits floats .x .y .z
def writeVector3(self, v):
self.buffer.extend(struct.pack("<3f", v.x, v.y, v.z))
# Writes a 32 bits float
def writeFloat(self, v):
self.buffer.extend(struct.pack("<f", v))
| 27.607273 | 83 | 0.576264 | 890 | 7,592 | 4.898876 | 0.291011 | 0.022936 | 0.01445 | 0.024083 | 0.155963 | 0.124771 | 0.120642 | 0.059174 | 0.059174 | 0.059174 | 0 | 0.007742 | 0.285432 | 7,592 | 274 | 84 | 27.708029 | 0.795945 | 0.18638 | 0 | 0.147929 | 0 | 0 | 0.056462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16568 | false | 0.005917 | 0.035503 | 0.035503 | 0.331361 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
809cbc92d834b903ea9a7f231c4069974f14b439 | 793 | py | Python | 751_ConcatenationCoincidence.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | 751_ConcatenationCoincidence.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | 751_ConcatenationCoincidence.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | """
Joe Tacheron
difficulty: TBD
runtime: instant
answer: 2.223561019313554106173177
***
751 Concatenation Coincidence
Find the only value of theta for which the concatenated sequence equals theta. Give your answer rounded to 24 places after the decimal point.
"""
from math import floor
from decimal import getcontext, Decimal as D
P = 24 # precision
getcontext().prec = P+1
def concat(theta):
a = [floor(theta)]
b = [theta]
for _ in range(P+1):
b.append(floor(b[-1])*(b[-1]-floor(b[-1])+1))
a.append(floor(b[-1]))
tau = D(str(a[0]) + "." + "".join(str(i) for i in a[1:]))
return tau
assert str(concat(D('2.956938891377988'))).startswith('2.3581321345589')
theta = D(2)
tau = concat(theta)
while theta != tau:
theta = tau
tau = concat(theta)
print(str(round(tau, P))) | 19.341463 | 141 | 0.682219 | 126 | 793 | 4.285714 | 0.5 | 0.014815 | 0.038889 | 0.048148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108271 | 0.161412 | 793 | 41 | 142 | 19.341463 | 0.703759 | 0.351829 | 0 | 0.105263 | 0 | 0 | 0.064202 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.210526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
809d6db57d7ccbeed3286156e788d7b40de4e64f | 2,991 | py | Python | apps/cuenta/views.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | apps/cuenta/views.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | apps/cuenta/views.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | import json
from datetime import datetime, timedelta
from collections import defaultdict
from django.shortcuts import redirect
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from rest_framework.renderers import TemplateHTMLRenderer
from api.serializers import CuentaSerializer
from apps.cuenta.models import Cuenta
from apps.grupo.models import Grupo
from apps.calendarioPago.models import CalendarioPago
from apps.transaccion.models import Transaccion
# Create your views here.
class CuentaListar(generics.ListAPIView):
serializer_class = CuentaSerializer
renderer_classes = [TemplateHTMLRenderer]
template_name = 'cuenta/cuenta_listar.html'
def get(self, request, *args, **kwargs):
grupos = Grupo.objects.all()
groups = {
}
groups['grupos'] = [{
"grupo_id": g.id,
} for g in grupos]
for c in groups['grupos']:
counts = Cuenta.objects.filter(grupo_id=c['grupo_id'])
for c2 in counts:
c['cuentas'] = counts.values()
for cuentas in groups['grupos']:
for cuenta in cuentas['cuentas']:
calendario = CalendarioPago.objects.filter(cuenta_id=cuenta['id'])
cuenta['calendarioPagos'] = calendario.values()
pagos = Transaccion.objects.filter(cuenta_id=cuenta['id'])
cuenta['pagos'] = pagos.values()
return Response({'groups': groups})
class CuentaCreate(generics.CreateAPIView):
serializer_class = CuentaSerializer
renderer_classes = [TemplateHTMLRenderer]
template_name = 'cuenta/cuenta_crear.html'
def get(self, request):
queryset = Cuenta.objects.all()
serializer = CuentaSerializer(queryset, many=True)
grupos = Grupo.objects.all()
return Response({'serializer': serializer.data, 'grupos': grupos})
def post(self, request):
serializer = CuentaSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
fecha_inicio = datetime.now()
num_pagos = 4
fecha_siguiente = fecha_inicio
for pago in range(1, num_pagos + 1, 1):
fecha_siguiente += timedelta(days=7)
if fecha_siguiente.weekday() == 5:
fecha_siguiente += timedelta(days=2)
if fecha_siguiente.weekday() == 6:
fecha_siguiente += timedelta(days=1)
CalendarioPago.objects.create(
cuenta_id=Cuenta.objects.get(id=request.data['id']),
num_pago=pago,
monto=float(request.data['monto']) / num_pagos,
fecha_pago=fecha_siguiente,
estatus='PENDIENTE'
)
return redirect('cuenta_listar')
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 33.606742 | 82 | 0.63223 | 311 | 2,991 | 5.961415 | 0.321543 | 0.052859 | 0.037756 | 0.043689 | 0.157497 | 0.134844 | 0.134844 | 0.097087 | 0.097087 | 0.097087 | 0 | 0.005991 | 0.27449 | 2,991 | 88 | 83 | 33.988636 | 0.848387 | 0.00769 | 0 | 0.089552 | 0 | 0 | 0.057991 | 0.016521 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.19403 | 0 | 0.41791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
809dce66f1ae4ba19a16cb38353f61a408805045 | 2,651 | py | Python | BioClients/pubtator/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 10 | 2020-05-26T07:29:14.000Z | 2021-12-06T21:33:40.000Z | BioClients/pubtator/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 1 | 2021-10-05T12:25:30.000Z | 2021-10-05T17:05:56.000Z | BioClients/pubtator/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 2 | 2021-03-16T03:20:24.000Z | 2021-08-08T20:17:10.000Z | #!/usr/bin/env python3
"""
Pubtator REST API client
https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmTools/RESTfulAPIs.html
Formats: JSON, PubTator, BioC.
Nomenclatures:
Gene : NCBI Gene
e.g. https://www.ncbi.nlm.nih.gov/sites/entrez?db=gene&term=145226
Disease : MEDIC (CTD, CTD_diseases.csv)
e.g. http://ctdbase.org/basicQuery.go?bqCat=disease&bq=C537775
Chemical : MESH
e.g. http://www.nlm.nih.gov/cgi/mesh/2014/MB_cgi?field=uid&term=D000596
Species : NCBI Taxonomy
e.g. https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?name=10090
Mutation : tmVar
https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/PubTator/tutorial/tmVar.html
NOTE that the API does NOT provide keyword search capability like
webapp https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/PubTator/index.cgi
"""
import sys,os,time,json,argparse,re,logging
#
from .. import pubtator
#
API_HOST="www.ncbi.nlm.nih.gov"
API_BASE_PATH="/CBBresearch/Lu/Demo/RESTful/tmTool.cgi"
#
#############################################################################
if __name__=='__main__':
parser = argparse.ArgumentParser(description='PubTator REST API client', epilog='Reports PubMed NER annotations for specified PMID[s].')
ops=['get_annotations']
modes = ['Gene', 'Chemical', 'BioConcept']
parser.add_argument("op", choices=ops, help="operation")
parser.add_argument("--mode", choices=modes, help='mode', default='BioConcept')
parser.add_argument("--ids", help="PubMed IDs, comma-separated (ex:25533513)")
parser.add_argument("--i", dest="ifile", help="input file, PubMed IDs")
parser.add_argument("--nmax", help="list: max to return")
parser.add_argument("--api_host", default=API_HOST)
parser.add_argument("--api_base_path", default=API_BASE_PATH)
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("-v", "--verbose", default=0, action="count")
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))
BASE_URL='https://'+args.api_host+args.api_base_path
fout = open(args.ofile, "w+") if args.ofile else sys.stdout
ids=[];
if args.ifile:
fin = open(args.ifile)
while True:
line = fin.readline()
if not line: break
ids.append(line.rstrip())
logging.info('Input IDs: %d'%(len(ids)))
fin.close()
elif args.ids:
ids = re.split(r'[\s,]+', args.ids.strip())
if args.op == 'get_annotations':
if not ids: logging.error('Input PMIDs required.')
pubtator.Utils.GetAnnotations(BASE_URL, args.mode, ids, fout)
else:
logging.error('Invalid operation: {0}'.format(args.op))
| 38.42029 | 138 | 0.692569 | 386 | 2,651 | 4.663212 | 0.450777 | 0.045 | 0.085 | 0.043333 | 0.106667 | 0.097778 | 0.097778 | 0.097778 | 0.072222 | 0.051111 | 0 | 0.016553 | 0.111279 | 2,651 | 68 | 139 | 38.985294 | 0.747453 | 0.296492 | 0 | 0 | 0 | 0 | 0.273495 | 0.036016 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.054054 | 0 | 0.054054 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80a1edfb39244009248c251e763cdd1deed6666f | 925 | py | Python | 2. Programming Fundamentals With Python (May 2021)/18. Mid Exam Preparation/More Exercises/02_array_modifier.py | kzborisov/SoftUni | ccb2b8850adc79bfb2652a45124c3ff11183412e | [
"MIT"
] | 1 | 2021-02-07T07:51:12.000Z | 2021-02-07T07:51:12.000Z | 2. Programming Fundamentals With Python (May 2021)/18. Mid Exam Preparation/More Exercises/02_array_modifier.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | 2. Programming Fundamentals With Python (May 2021)/18. Mid Exam Preparation/More Exercises/02_array_modifier.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | class Modifier:
def __init__(self, lst):
self.lst = lst
def swap(self, index_1, index_2):
self.lst[index_1], self.lst[index_2] = self.lst[index_2], self.lst[index_1]
def multiply(self, index_1, index_2):
self.lst[index_1] = int(self.lst[index_1]) * int(self.lst[index_2])
def decrease(self):
self.lst = [int(x) - 1 for x in self.lst]
initial_list = input().split()
command = input()
modifier = Modifier(initial_list)
while not command == "end":
cmd = command.split()[0]
if cmd == "swap":
idx_1 = int(command.split()[1])
idx_2 = int(command.split()[2])
modifier.swap(idx_1, idx_2)
elif cmd == "multiply":
idx_1 = int(command.split()[1])
idx_2 = int(command.split()[2])
modifier.multiply(idx_1, idx_2)
elif cmd == "decrease":
modifier.decrease()
command = input()
print(*modifier.lst, sep=", ")
| 26.428571 | 83 | 0.597838 | 137 | 925 | 3.854015 | 0.226277 | 0.145833 | 0.159091 | 0.098485 | 0.481061 | 0.481061 | 0.422348 | 0.350379 | 0.291667 | 0.181818 | 0 | 0.035511 | 0.238919 | 925 | 34 | 84 | 27.205882 | 0.714489 | 0 | 0 | 0.230769 | 0 | 0 | 0.027027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.192308 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80a2a8d0820253e95e290a620678375fc27af9cc | 6,023 | py | Python | undeployed/subjects/chunking/chunk_bundle.py | NASA-DEVELOP/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 65 | 2015-09-10T12:59:56.000Z | 2022-02-27T22:09:03.000Z | undeployed/subjects/chunking/chunk_bundle.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 40 | 2015-04-08T19:23:30.000Z | 2015-08-04T15:53:11.000Z | undeployed/subjects/chunking/chunk_bundle.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 45 | 2015-08-14T19:09:38.000Z | 2022-02-15T18:53:16.000Z | __author__ = 'jwely'
import numpy
import os
from chunk import chunk
# from dnppy import raster # please see chunk_bundle.read() for dnppy.raster import
class chunk_bundle():
"""
Creates a chunk bundle object.
it can be used to pass smaller pieces of raster data to complex functions
and reduce memory consumption in those functions.
Presently, chunks are not saved individually, but are always bundled to form the
undivided raster image. This allows chunks to be individually passed through more
complex processing tasks then re-assmbled. They can be passed sequentially to reduce
memory consumption, or in parallel to increase performance where memory isn't as
limited (for suitable tasks).
In the future, writing chunks to disk and dropping them from memory may be a good
idea to truly maximize the data volume that a limited memory space can handle.
NOTE:
For intended functionality, this module uses a dnppy wrapper of the
arcpy.RasterToNumPyArray function that passes a numpy array and a metadata object.
This module SHOULD allow non-arcmap users to import and export images without
geospatial metadata associated with them. That requires the simple CV python module.
"""
def __init__(self, rasterpath, num_chunks = 0, chunk_list = [],
metadata = None, force_scv = False):
"""
Creates a chunk bundle.
Two probable use cases:
1) loading raster to split into smaller chunks with:
inchunks = chunk_bundle(rasterpath, num_chunks = #)
inchunks.read()
2) building new chunk_bundle with processed data, passing on old chunks metadata:
outchunks = chunk_bundle(rasterpath,
chunk_list = [chunk1, chunk2,...],
metadata = metadata)
outchunks.write()
"""
self.rasterpath = rasterpath # full filepath to raster location
self.num_chunks = num_chunks # number of chunks to subdivide or construct this into
self.chunk_list = chunk_list # list of chunk objects (consitutent chunks)
self.metadata = metadata # raster metadata object for this chunk
self.force_scv = force_scv # forces simple CV module to be used instead of dnppy.
# good for machines without arcmap installed.
return
def __getitem__(self, index):
""" allows builtin __getitem__ to be used to get chunks by their integer ID numbers """
for chunk_obj in self.chunk_list:
if chunk_obj.index == index:
return chunk_obj.data
else: raise Exception("No chunk with chunk_id = {0}".format(index))
def __setitem__(self,index, item):
""" allows chunk data to be altered easily from the chunk bundle"""
for chunk_obj in self.chunk_list:
if chunk_obj.index == index:
chunk_obj = item
return
else: raise Exception("No chunk with chunk_id = {0}".format(index))
def _assemble_chunks(self):
""" stitches constituent chunks back together into one numpy array """
# stitch chunks together
if self.num_chunks == 1:
bundle_data = self.chunk_list[0]
else:
# concatenate the first two chunks
bundle_data = numpy.concatenate((self[0], self[1]), axis = 1)
# concatenate the rest of them
for i in range(2,len(self.chunk_list)):
bundle_data = numpy.concatenate((bundle_data, self[i]), axis = 1)
return bundle_data
def read(self):
""" loads a raster image and splits it into roughly equal width vertical slices"""
print("Loading input raster {0} and splitting into {1} chunks!".format(
os.path.basename(self.rasterpath), self.num_chunks))
if self.num_chunks <1:
raise Exception("Cannot split into any fewer than 1 chunk!")
# loads entire raster as numpy array with metadata object
if not self.force_scv:
from dnppy import raster
data, self.metadata = raster.to_numpy(self.rasterpath)
# uses the simpleCV module to import raster without metadata
else: pass
# split the data and add new chunks to this raster
ys, xs = data.shape
width = xs / float(self.num_chunks)
for c in range(self.num_chunks):
chunk_data = data[:, int(c * width):int((c+1) * width)]
new_chunk = chunk(c, chunk_data)
self.chunk_list.append(new_chunk)
del data
return
def write(self, rasterpath):
"""
writes the chunk_bundle to its rasterpath """
# write with metadata using dnppy and arcpy.
if self.metadata and not self.force_scv:
from dnppy import raster
raster.from_numpy(self._assemble_chunks(), self.metadata, rasterpath)
# write without metadata using simple CV
else: pass
return
def ingest(self, new_chunk_obj):
"""
places a chunk into the chunk bundle.
If chunk with that ID already exists, it will be replaced.
"""
# delete any chunk already existing at the index location of the new chunk object
for chunk_obj in self.chunk_list:
if chunk_obj.index == new_chunk_obj.index:
self.chunk_list.remove(new_chunk_obj.index)
self.chunk_list.append(new_chunk_obj)
return
# testing area
if __name__ == "__main__":
path = r"C:\Users\jwely\Desktop\troubleshooting\test_in_MODIS\MYD11A1.A2013001_day_clip_W05_C2014001_Avg_K_C_p_GSC.tif"
num = 2
c = chunk_bundle(path, num)
c.read()
c[0] += 10
test = c.write(r"C:\Users\jwely\Desktop\troubleshooting\chunk_test.tif") | 33.648045 | 123 | 0.62859 | 779 | 6,023 | 4.724005 | 0.322208 | 0.029348 | 0.031793 | 0.01712 | 0.1375 | 0.128804 | 0.099185 | 0.083424 | 0.063859 | 0.063859 | 0 | 0.010024 | 0.304333 | 6,023 | 179 | 124 | 33.648045 | 0.868258 | 0.442305 | 0 | 0.235294 | 0 | 0 | 0.105246 | 0.05214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102941 | false | 0.029412 | 0.073529 | 0 | 0.294118 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80a2d988e41df3a0c79d453576a3823c0cb38741 | 58,965 | py | Python | quart_app/beyondchaosmaster/wor.py | razzlestorm/BCRandomizer-API | 2e5aec91c34b46e845bca695d3468eb8f3bae401 | [
"MIT"
] | 1 | 2021-06-15T03:54:53.000Z | 2021-06-15T03:54:53.000Z | quart_app/beyondchaosmaster/wor.py | razzlestorm/BCRandomizer-API | 2e5aec91c34b46e845bca695d3468eb8f3bae401 | [
"MIT"
] | 1 | 2021-09-13T04:32:43.000Z | 2021-09-13T04:32:43.000Z | BeyondChaos/Wor.py | razzlestorm/BeyondChaosRandomizer | 04a0acdcd9d4c3991a3e42cf1bba4299adda4435 | [
"MIT"
] | null | null | null | import dataclasses
from chestrandomizer import get_event_items
from character import get_character, get_characters
from dialoguemanager import get_dialogue, set_dialogue
from locationrandomizer import get_location, get_locations, NPCBlock
from monsterrandomizer import change_enemy_name
from utils import (WOB_TREASURE_TABLE, WOR_ITEMS_TABLE, WOB_EVENTS_TABLE,
read_multi, Substitution, utilrandom as random, write_multi, bytes_to_dialogue)
alt_zone_eater_recruit = None
def _dir_to_camera_moves(dir):
x = dir[0]
y = dir[1]
left = x < 0
down = y < 0
if left:
x = -x
if down:
y = -y
out = []
while x != 0 and y != 0:
if x == y:
diag = 0xA0
if left:
diag += 2
if down != left:
diag += 1
out.append(diag)
x -= 1
y -= 1
else:
if x > y:
diag = 0xA5
if left:
diag += 4
if down != left:
diag += 1
out.append(diag)
x -= 2
y -= 1
else:
diag = 0xA4
if left:
diag += 4
if down != left:
diag += 3
out.append(diag)
x -= 1
y -= 2
if x == 0 and y == 0:
return out
if x != 0:
dir_add = 3 if left else 1
dist = x
else:
dir_add = 2 if down else 0
dist = y
ortho = 0x80 + (dist << 2) + dir_add
out.append(ortho)
return out
def recruit_mog_insert(fout, recruit_info):
maybe_name_location = 0x304000
maybe_name_low = maybe_name_location & 0xFF
maybe_name_mid = (maybe_name_location >> 8) & 0xFF
maybe_name_high = maybe_name_location >> 16
name_location = 0x304010
name_low = name_location & 0xFF
name_mid = (name_location >> 8) & 0xFF
name_high = name_location >> 16
fout.seek(recruit_info.name_pointer)
extra_bytes = fout.read(recruit_info.num_name_bytes)
level_average_bytes = bytes([0x77, 0x0A]) if recruit_info.special == zone_eater_recruit else bytes([])
maybe_name_sub = Substitution()
maybe_name_sub.set_location(maybe_name_location)
maybe_name_sub.bytestring = bytes([
0xC0, 0x9F, 0x02, name_low, name_mid, name_high - 0x0A,
]) + extra_bytes + level_average_bytes + bytes([0xFE])
maybe_name_sub.write(fout)
name_jump = Substitution()
name_jump.set_location(recruit_info.name_pointer)
name_jump.bytestring = bytes([0xB2, maybe_name_low, maybe_name_mid, maybe_name_high - 0x0A] + [0xFD] * (recruit_info.num_name_bytes-4))
name_jump.write(fout)
palette = get_character(0xA).palette
name_sub = Substitution()
name_sub.set_location(name_location)
mog_npc = recruit_info.location_npcs[0][1] + 0x10
hide_npcs = []
show_npcs = []
if recruit_info.name_camera == (0, 0):
name_camera = []
name_camera_reverse = []
else:
c = _dir_to_camera_moves(recruit_info.name_camera)
d = _dir_to_camera_moves((-recruit_info.name_camera[0], -recruit_info.name_camera[1]))
name_camera = [0x38, 0x30, 0x82 + len(c), 0xC1] + c + [0xFF]
name_camera_reverse = [0x30, 0x82 + len(d), 0xC2] + d +[0xFF, 0x39]
for npc in recruit_info.name_npcs:
hide_npcs += [0x42, 0x10 + npc]
show_npcs += [0x41, 0x10 + npc]
if recruit_info.name_show_full_party:
hide_party = [
0x42, 0x31,
0x42, 0x32,
0x42, 0x33,
0x42, 0x34,
]
show_party = [
0x41, 0x31,
0x41, 0x32,
0x41, 0x33,
0x41, 0x34,
]
else:
hide_party = [0x42, 0x31]
show_party = [0x41, 0x31]
name_sub.bytestring = bytes([
0x40, 0x0A, 0x0A, # assign mog properties to mog
0x3D, 0x0A, # create mog
0x37, 0x0A, 0x0A, # assign mog graphics to mog
0x43, 0x0A, palette, # assign mog palette to mog
0xD4, 0xEA, # Add Mog to shops/Gogo
0x45, # refresh objects
0x92, # pause for 30 frames
mog_npc, 0x82, # begin queue for mog npc
0x1F, 0xFF, # Do graphic action 1F, end
0x94, # pause for 60 frames
mog_npc, 0x82, # begin queue for mog npc
0xCE, 0xFF, # Turn down for what?, end
] + hide_party + hide_npcs + [
0xB2, 0x0F, 0xD0, 0x00, # Darken background
] + name_camera + [
0x4B, 0xE0, 0xC6, # SLAM-dancing Moogle text
0x92, # Pause for 30 frames
mog_npc, 0x82, # begin queue for mog npc
0x1D, 0xFF, # do graphical action 1D, end
0x94, # pause for 60 frames
0x97, # fade to black
0x5C, # Pause until fade is complete
0x7F, 0x0A, 0x0A, # change mog's name to mog
0x98, 0x0A, # name change screen for mog
] + show_party + show_npcs + recruit_info.name_extra + [
0x45, # refresh objects
0x96, # unfade
0x5C, # wait until unfade is complete
] + name_camera_reverse + [
0xB2, 0x15, 0xD0, 0x00, # Lighten background
0x92, # pause for 30 frames
0x3E, 0x0A, # Delete object 0A
0x45, # refresh objects
]) + extra_bytes + bytes([0xFE])
name_sub.write(fout)
def recruit_umaro_insert(fout, recruit_info):
name_location = 0x304400
name_low = name_location & 0xFF
name_mid = (name_location >> 8) & 0xFF
name_high = name_location >> 16
fout.seek(recruit_info.name_pointer)
extra_bytes = fout.read(recruit_info.num_name_bytes)
name_jump = Substitution()
name_jump.set_location(recruit_info.name_pointer)
name_jump.bytestring = bytes([0xB2, name_low, name_mid, name_high - 0x0A] + [0xFD] * (recruit_info.num_name_bytes-4))
name_jump.write(fout)
palette = get_character(0xD).palette
name_sub = Substitution()
name_sub.set_location(name_location)
umaro_npc = recruit_info.location_npcs[0][1] + 0x10
hide_npcs = []
show_npcs = []
if recruit_info.name_camera == (0, 0):
name_camera = []
name_camera_reverse = []
else:
c = _dir_to_camera_moves(recruit_info.name_camera)
d = _dir_to_camera_moves((-recruit_info.name_camera[0], -recruit_info.name_camera[1]))
name_camera = [0x38, 0x30, 0x82 + len(c), 0xC1] + c + [0xFF]
name_camera_reverse = [0x30, 0x82 + len(d), 0xC2] + d +[0xFF, 0x39]
for npc in recruit_info.name_npcs:
hide_npcs += [0x42, 0x10 + npc]
show_npcs += [0x41, 0x10 + npc]
name_sub.bytestring = bytes([
0x40, 0x0D, 0x0D, # assign umaro properties to umaro
0x3D, 0x0D, # create umaro
0x37, 0x0D, 0x0D, # assign umaro graphics to umaro
0x43, 0x0D, palette, # assign umaro palette to umaro
0xD4, 0xED, # Add umaro to shops/Gogo
0x45, # refresh objects
0x92, # pause for 30 frames
umaro_npc, 0x82, # begin queue for umaro npc
0xCE, 0xFF, # Turn down for what?, end
0x42, 0x31, # Hide party
0x42, 0x32, # Hide party
0x42, 0x33, # Hide party
0x42, 0x34, # Hide party
] + hide_npcs + [
0xB2, 0x0F, 0xD0, 0x00, # Darken background
] + name_camera + [
0x4B, 0xF9, 0xC5, # Admirer of bone-carvings text
0x92, # Pause for 30 frames
umaro_npc, 0x82, # begin queue for umaro npc
0x16, 0xFF, # do graphical action 16, end
0x92, # pause for 30 frames
0x97, # fade to black
0x5C, # Pause until fade is complete
0x7F, 0x0D, 0x0D, # change umaro's name to umaro
0x98, 0x0D, # name change screen for umaro
0x41, 0x31, # show party
0x41, 0x32, # show party
0x41, 0x33, # show party
0x41, 0x34, # show party
] + show_npcs + recruit_info.name_extra + [
0x45, # refresh objects
0x96, # unfade
0x5C, # wait until unfade is complete
] + name_camera_reverse + [
0xB2, 0x15, 0xD0, 0x00, # Lighten background
0x92, # pause for 30 frames
0x3E, 0x0D, # Delete object 0D
0x45, # refresh objects
]) + extra_bytes + bytes([0xFE])
name_sub.write(fout)
def recruit_gogo_insert(fout, recruit_info):
name_location = 0x304800
name_low = name_location & 0xFF
name_mid = (name_location >> 8) & 0xFF
name_high = name_location >> 16
fout.seek(recruit_info.name_pointer)
extra_bytes = fout.read(recruit_info.num_name_bytes)
name_jump = Substitution()
name_jump.set_location(recruit_info.name_pointer)
name_jump.bytestring = bytes([0xB2, name_low, name_mid, name_high - 0x0A] + [0xFD] * (recruit_info.num_name_bytes-4))
name_jump.write(fout)
palette = get_character(0xD).palette
name_sub = Substitution()
name_sub.set_location(name_location)
gogo_npc = recruit_info.location_npcs[0][1] + 0x10
hide_npcs = []
show_npcs = []
if recruit_info.name_camera == (0, 0):
name_camera = []
name_camera_reverse = []
else:
c = _dir_to_camera_moves(recruit_info.name_camera)
d = _dir_to_camera_moves((-recruit_info.name_camera[0], -recruit_info.name_camera[1]))
name_camera = [0x38, 0x30, 0x82 + len(c), 0xC1] + c + [0xFF]
name_camera_reverse = [0x30, 0x82 + len(d), 0xC2] + d +[0xFF, 0x39]
for npc in recruit_info.name_npcs:
hide_npcs += [0x42, 0x10 + npc]
show_npcs += [0x41, 0x10 + npc]
name_sub.bytestring = bytes([
gogo_npc, 0x82, # begin queue for gogo npc
0xCE, 0xFF, # Turn down for what?, end
0x42, 0x31, # Hide party
0x42, 0x32, # Hide party
0x42, 0x33, # Hide party
0x42, 0x34, # Hide party
] + hide_npcs + [
0xB2, 0x0F, 0xD0, 0x00, # Darken background
] + name_camera + [
0x4B, 0x0D, 0xCA, # Shrouded in odd clothing text
0x92, # Pause for 30 frames
0x40, 0x0C, 0x0C, # assign gogo properties to gogo
0x3D, 0x0C, # create gogo
0x37, 0x0C, 0x0C, # assign gogo graphics to gogo
0x43, 0x0C, palette, # assign gogo palette to gogo
0xD4, 0xEC, # Add gogo to shops/Gogo
0x7F, 0x0C, 0x0C, # change gogo's name to gogo
0x98, 0x0C, # name change screen for gogo
0x50, 0xBC, # tint screen
0x59, 0x10, # unfade screen at speed $10
0x92, # pause for 30 frames
0xB2, 0x15, 0xD0, 0x00, # Lighten background
0x41, 0x31, # show party
0x41, 0x32, # show party
0x41, 0x33, # show party
0x41, 0x34, # show party
] + show_npcs + recruit_info.name_extra + [
0x45, # refresh objects
] + name_camera_reverse + [
0x93, # pause for 45 frames
0x3E, 0x0D, # Delete object 0D
0x45, # refresh objects
]) + extra_bytes + bytes([0xFE])
name_sub.write(fout)
class WoRRecruitInfo:
def __init__(self, label, event_pointers, recruited_bit_pointers, location_npcs,
dialogue_pointers, name_pointer, num_name_bytes, old_char_id,
shop_menu_bit_pointers=None, palette_pointers=None,
caseword_pointers=None, prerequisite=None, special=None,
name_npcs=None, name_extra=None, name_camera=(0, 0),
name_show_full_party=False):
self.label = label
self.event_pointers = event_pointers
self.recruited_bit_pointers = recruited_bit_pointers
self.location_npcs = location_npcs
self.dialogue_pointers = dialogue_pointers
self.char_id = None
self.old_char_id = old_char_id
self.name_pointer = name_pointer
self.num_name_bytes = num_name_bytes
self.caseword_pointers = caseword_pointers
self.shop_menu_bit_pointers = shop_menu_bit_pointers or []
self.palette_pointers = palette_pointers or []
self.prerequisite = prerequisite
self.special = special
self.name_npcs = name_npcs or []
self.name_extra = name_extra or []
self.name_camera = name_camera
self.name_show_full_party = name_show_full_party
def write_data(self, fout):
assert self.char_id is not None
for event_pointer in self.event_pointers:
fout.seek(event_pointer)
fout.write(bytes([self.char_id]))
for recruited_bit_pointer in self.recruited_bit_pointers:
fout.seek(recruited_bit_pointer)
fout.write(bytes([0xf0 + self.char_id]))
for shop_menu_bit_pointer in self.shop_menu_bit_pointers:
fout.seek(shop_menu_bit_pointer)
fout.write(bytes([0xe0 + self.char_id]))
palette = get_character(self.char_id).palette
for palette_pointer in self.palette_pointers:
fout.seek(palette_pointer)
fout.write(bytes([palette]))
for location_id, npc_id in self.location_npcs:
location = get_location(location_id)
npc = location.npcs[npc_id]
npc.graphics = self.char_id
npc.palette = get_character(self.char_id).palette
for index in self.dialogue_pointers:
text = get_dialogue(index)
old_name_placeholder = bytes_to_dialogue(bytes([self.old_char_id + 2]))
new_name_placeholder = bytes_to_dialogue(bytes([self.char_id + 2]))
text = text.replace(old_name_placeholder, new_name_placeholder)
set_dialogue(index, text)
if self.caseword_pointers:
for location in self.caseword_pointers:
fout.seek(location)
byte = ord(fout.read(1))
fout.seek(location)
fout.write(bytes([byte & 0x0F | (self.char_id << 4)]))
if self.special:
self.special(fout, self.char_id)
if self.char_id == 0xA and self.special != moogle_cave_recruit:
recruit_mog_insert(fout, self)
if self.char_id == 0xC and self.special not in [sasquatch_cave_recruit, moogle_cave_recruit, zone_eater_recruit]:
recruit_gogo_insert(fout, self)
if self.char_id == 0xD and self.special not in [sasquatch_cave_recruit, moogle_cave_recruit, zone_eater_recruit]:
recruit_umaro_insert(fout, self)
def falcon_recruit(fout, char_id):
falcon_recruit_sub = Substitution()
falcon_recruit_sub.set_location(0xA5324)
falcon_recruit_sub.bytestring = bytes([0xD5, 0xFB])
falcon_recruit_sub.write(fout)
falcon_recruit_sub.set_location(0xA5310 + 2 * char_id - (2 if char_id > 6 else 0))
falcon_recruit_sub.bytestring = bytes([0xD4, 0xF0 + char_id])
falcon_recruit_sub.write(fout)
def moogle_cave_recruit(fout, char_id):
if char_id == 0x0A:
return
if char_id in [0x0C, 0x0D]:
# Gogo and Umaro always get renamed, so jump to
# the never-got-Mog-in-WoB part
moogle_cave_recruit_sub = Substitution()
moogle_cave_recruit_sub.set_location(0xC3975)
moogle_cave_recruit_sub.bytestring = bytes([0x2F, 0x02])
moogle_cave_recruit_sub.write(fout)
moogle_cave_recruit_sub.set_location(0xC3AA0)
if char_id == 0x0C:
moogle_cave_recruit_sub.bytestring = bytes([0x4B, 0x0D, 0xCA]) # shrouded in odd clothing
else:
moogle_cave_recruit_sub.bytestring = bytes([0x4B, 0xF9, 0xC5]) # Admirer of bone-carvings text
moogle_cave_recruit_sub.write(fout)
return
# Don't rename, stay in got-Mog-in-WoB part
moogle_cave_recruit_sub = Substitution()
moogle_cave_recruit_sub.set_location(0xC3974)
moogle_cave_recruit_sub.bytestring = bytes([0xFD] * 7)
moogle_cave_recruit_sub.write(fout)
def sasquatch_cave_recruit(fout, char_id):
assert char_id != 0x0A
umaro_name = get_character(char_id).newname
for umaro_id in [0x10f, 0x110]:
change_enemy_name(fout, umaro_id, umaro_name)
if char_id == 0x0C:
gogo_sub = Substitution()
gogo_sub.set_location(0xCD811)
gogo_sub.bytestring = bytes([0x4B, 0x0D, 0xCA]) # shrouded in odd clothing
gogo_sub.write(fout)
gogo_sub.set_location(0xCD79A)
gogo_sub.bytestring = bytes([0x40, 0x0C, 0x0C]) # assign Gogo properties to Gogo
gogo_sub.write(fout)
return
if char_id == 0x0D:
return
sasquatch_cave_recruit_sub = Substitution()
# Level average character instead of setting Umaro's properties
sasquatch_cave_recruit_sub.set_location(0xCD79A)
sasquatch_cave_recruit_sub.bytestring = bytes([0x77, char_id, 0xFD])
sasquatch_cave_recruit_sub.write(fout)
# Skip over rename
sasquatch_cave_recruit_sub.set_location(0xCD7F5)
sasquatch_cave_recruit_sub.bytestring = bytes([
0xC0, 0x27, 0x01, 0x40, 0xD8, 0x02 # jump
])
sasquatch_cave_recruit_sub.write(fout)
def zone_eater_recruit(fout, char_id):
if char_id == 0x0C:
return
if char_id == 0x0D:
umaro_sub = Substitution()
umaro_sub.set_location(0xB81D6)
umaro_sub.bytestring = bytes([0x4B, 0xF9, 0xC5]) # Admirer of bone-carvings text
return
prefix = [0xFD] * 4 if char_id == 0x0A else [0x77, char_id]
# Skip over rename
zone_eater_recruit_sub = Substitution()
zone_eater_recruit_sub.set_location(0xB81CF)
zone_eater_recruit_sub.bytestring = bytes(prefix + [0x3D, char_id, 0xC0, 0x27, 0x01, 0x00, 0x82, 0x01])
zone_eater_recruit_sub.write(fout)
def collapsing_house_recruit(unused_fout, unused_char_id):
pass
def manage_wor_recruitment(fout, shuffle_wor, random_treasure, include_gau, alternate_gogo):
if alternate_gogo:
_setup_alternate_zone_eater(fout, include_gau)
if shuffle_wor:
wor_free_char, collapsing_house_char = _shuffle_recruit_locations(fout, random_treasure, include_gau, alternate_gogo)
else:
wor_free_char = 0x0B
collapsing_house_char = 0x05
if alternate_gogo:
_manage_gogo_recruitment(fout, collapsing_house_char)
_start_of_wor_event(fout, alternate_gogo)
return wor_free_char
def _start_of_wor_event(fout, alternate_gogo):
new_events = [
# Set names for Mog, Gogo, Umaro in case they appear in text
0x7F, 0x0C, 0x0C, # Set name for GOGO
0x7F, 0x0D, 0x0D, # Set name for UMARO
0xC0, 0x9F, 0x82, 0xB3, 0x5E, 0x00, # If Mog recruited in WoB, jump to return
0x7F, 0x0A, 0x0A # Set name for MOG
]
if alternate_gogo:
new_events = [0xDA, 0x4B] + new_events # Set Gogo NPC bit
# bits that get set at the start of the world of ruin
wor_bits_sub = Substitution()
wor_bits_sub.set_location(0x305280)
wor_bits_sub.bytestring = [
# These bits are normally set in subroutine CB4B4B
# We could just call it as a subroutine, but we'll reuse the space later.
0xD9, 0xF2,
0xD8, 0x92,
] + new_events + [
0xFE, # Return
]
wor_bits_sub.write(fout)
next_event = wor_bits_sub.location + len(wor_bits_sub.bytestring)
# call the new subroutine above in place of CB4B4B
ptr_low = wor_bits_sub.location & 0xFF
ptr_mid = (wor_bits_sub.location & 0xFF00) >> 8
ptr_high = ((wor_bits_sub.location - 0xA0000) & 0xFF0000) >> 16
wor_bits_sub2 = Substitution()
wor_bits_sub2.set_location(0xA5334)
wor_bits_sub2.bytestring = [0xB2, ptr_low, ptr_mid, ptr_high]
wor_bits_sub2.write(fout)
def _shuffle_recruit_locations(fout, random_treasure, include_gau, alternate_gogo):
candidates = [0x00, 0x01, 0x02, 0x05, 0x07, 0x08, 0x0A, 0x0D]
locke_event_pointers = [0xc2c48, 0xc2c51, 0xc2c91, 0xc2c9d, 0xc2c9e, 0xc2caf, 0xc2cb8, 0xc2cc5, 0xc2cca, 0xc2cd8, 0xc2ce3, 0xc2ce9, 0xc2cee, 0xc2cf4, 0xc2cfa, 0xc2d0b, 0xc2d33, 0xc2e32, 0xc2e4a, 0xc2e80, 0xc2e86, 0xc2e8b, 0xc2e91, 0xc2ea5, 0xc2eb1, 0xc2ec4, 0xc2f0b, 0xc2fe1, 0xc3102, 0xc3106, 0xc3117, 0xc311d, 0xc3124, 0xc3134, 0xc313d, 0xc3163, 0xc3183, 0xc3185, 0xc3189, 0xc318b, 0xc318e, 0xc3191, 0xc3197, 0xc31c7, 0xc31cb, 0xc31e2, 0xc31e8, 0xc31ed, 0xc31f2, 0xc31f8, 0xc3210, 0xc3215, 0xc321d, 0xc3229, 0xc322f, 0xc3235, 0xc323b]
locke_event_pointers_2 = [0xc3244, 0xc324a, 0xc324f, 0xc3258, 0xc326a]
if random_treasure:
locke_event_pointers_2 = [p + 12 for p in locke_event_pointers_2]
recruit_info = [
WoRRecruitInfo(
label="Phoenix Cave",
event_pointers=locke_event_pointers + locke_event_pointers_2,
recruited_bit_pointers=[0xc3195],
location_npcs=[(0x139, 0)],
dialogue_pointers=[0x984, 0x988, 0x989, 0xa20, 0xa21, 0xa22, 0xa23, 0xa24, 0xa28, 0xa2a, 0xa2c, 0xa2d, 0xa2e, 0xa2f, 0xa30, 0xa31, 0xa34, 0xa35],
old_char_id=1,
name_pointer=0xC2B81,
num_name_bytes=4,
name_show_full_party=True),
WoRRecruitInfo(
label="Mt. Zozo",
event_pointers=[0xc429c, 0xc429e, 0xc42a2, 0xc42a4, 0xc42a7, 0xc42aa],
recruited_bit_pointers=[0xc42ae],
location_npcs=[(0xb5, 2), (0xb4, 8)],
dialogue_pointers=[0x9f2, 0x9f9, 0x9fb, 0x9fd, 0x9fe, 0x9ff, 0xa00, 0xa01, 0xa02, 0xa03, 0xa04, 0xa05, 0xa06, 0xa08, 0xa0b, 0xa0c],
old_char_id=2,
name_pointer=0xC402A,
num_name_bytes=4),
WoRRecruitInfo(
label="Collapsing House",
event_pointers=[0xa6c0e, 0xc5aa8, 0xc5aaa, 0xc5aae, 0xc5ab0, 0xc5ab3, 0xc5ab6],
recruited_bit_pointers=[0xc5aba],
location_npcs=[(0x131, 1)],
dialogue_pointers=[0x8a7, 0x8a8, 0x8a9, 0x8aa, 0x8ab, 0x8ac, 0x8ad, 0x8ae, 0x8b1, 0x954, 0x95a],
caseword_pointers=[0xa6af1, 0xa6b0c, 0xa6bbd],
old_char_id=5,
name_pointer=0xC590B,
num_name_bytes=7,
name_npcs=[0, 2, 4, 6, 8, 10],
special=collapsing_house_recruit),
WoRRecruitInfo(
label="Fanatics' Tower",
event_pointers=[0xc5418, 0xc541a, 0xc541e, 0xc5420, 0xc5423, 0xc5426],
recruited_bit_pointers=[0xc542a],
location_npcs=[(0x16a, 3)],
prerequisite=0x08,
dialogue_pointers=[0x8c2, 0x8c3, 0x8c4, 0x8c5],
old_char_id=7,
name_pointer=0xC5316,
name_npcs=list(range(3)) + list(range(4, 10)),
num_name_bytes=4,
name_show_full_party=True),
WoRRecruitInfo(
label="Owzer's House",
event_pointers=[0xb4e09, 0xb4e0b, 0xb4e0f, 0xb4e11, 0xb4e14, 0xb4e17],
recruited_bit_pointers=[0xb4e1b],
location_npcs=[(0x161, 3), (0x15d, 21), (0xd0, 3)],
dialogue_pointers=[0xa18, 0xa8d, 0xa99, 0xa9d, 0xa9d, 0xa9e, 0xa9f, 0xaa0, 0xabd, 0xabe, 0xabe, 0xac0, 0xac1, 0xac2],
old_char_id=8,
name_pointer=0xB4D0D,
num_name_bytes=5,
name_npcs=list(range(3)) + list(range(4, 6))),
WoRRecruitInfo(
label="Mobliz",
event_pointers=[0xc49d1, 0xc49d3, 0xc49da, 0xc49de, 0xc49e2, 0xc4a01, 0xc4a03, 0xc4a0c, 0xc4a0d, 0xc4a2b, 0xc4a37, 0xc4a3a, 0xc4a43, 0xc4a79, 0xc4a7b, 0xc4ccf, 0xc4cd1, 0xc4cd5, 0xc4cd7, 0xc4cdb, 0xc4cde, 0xc4ce1, 0xc4ce5, 0xc4cf4, 0xc4cf6, 0xc5040, 0xc5042, 0xc5048, 0xc504a, 0xc504d, 0xc5050],
recruited_bit_pointers=[0xc4cd9, 0xc4cfa, 0xc5046],
location_npcs=[(0x09A, 1), (0x09A, 2), (0x096, 0), (0x09E, 13)],
dialogue_pointers=[0x8cf, 0x8d1, 0x8d2, 0x8d3, 0x8d4, 0x8d5, 0x8d6, 0x8d7, 0x8d8, 0x8d9, 0x8db, 0x8dc, 0x8dd, 0x8df, 0x8e0, 0x8e5, 0x8eb, 0x8ec, 0x8f0, 0x8f6, 0x8f7, 0x8f8, 0x8f9, 0x8fa, 0x8fb, 0x8fc, 0x8fe, 0x900, 0x903, 0x906, 0x90b],
old_char_id=0,
name_pointer=0xC446F,
num_name_bytes=4,
name_npcs=[0] + list(range(6, 15)),
name_extra=[0x73, 0x32, 0x33, 0x01, 0x02, 0x04, 0x14], # Keep door open
name_camera=(-2, 4)),
WoRRecruitInfo(
label="Moogle Cave",
event_pointers=[0xC3A2D, 0xC3A2F, 0xC3A33, 0xC3A35, 0xC3A38, 0xC3A3B, 0xC3A4D, 0xC3A4E, 0xC3A50, 0xC3A52, 0xC3A53, 0xC3A55, 0xC3AAD, 0xC3AAE, 0xC3AB0, 0xC3ACC, 0xC3AD9, 0xC3ADB, 0xC3ADF, 0xC3AE2, 0xC3AE5],
recruited_bit_pointers=[0xC3A3F, 0xC3A58],
shop_menu_bit_pointers=[0xC3A5A],
location_npcs=[(0x02C, 0)],
dialogue_pointers=[],
old_char_id=0xA,
palette_pointers=[0xC3A56],
special=moogle_cave_recruit,
name_pointer=None,
num_name_bytes=None
),
WoRRecruitInfo(
label="Sasquatch Cave",
event_pointers=[0xCD79B, 0xCD79C, 0xCD79E, 0xCD7A0, 0xCD7A1, 0xCD7A4, 0xCD81D, 0xCD820],
recruited_bit_pointers=[0xCD7A6],
shop_menu_bit_pointers=[0xCD7A8],
location_npcs=[(0x11B, 1), (0x15, 1)],
dialogue_pointers=[0x5fa],
old_char_id=0xD,
palette_pointers=[0xCD7A4],
prerequisite=0x0A,
special=sasquatch_cave_recruit,
name_pointer=None,
num_name_bytes=None
)
]
if include_gau:
candidates.append(0x0B)
if alternate_gogo:
recruit_info.append(alt_zone_eater_recruit)
else:
recruit_info.append(WoRRecruitInfo("Falcon", [], [], [], dialogue_pointers=[0xa07], old_char_id=0xB, special=falcon_recruit, name_pointer=None, num_name_bytes=None))
if not alternate_gogo:
candidates.append(0x0C)
recruit_info.append(
WoRRecruitInfo(
label="ZoneEater",
event_pointers=[0xB81DB, 0xB81DC, 0xB81DE, 0xB81E0, 0xB81E1, 0xB81E3, 0xB81E6, 0xB81E7, 0xB81E9, 0xB81EB, 0xB81EF, 0xB81F2, 0xB824A, 0xB824E],
recruited_bit_pointers=[0xB823E],
shop_menu_bit_pointers=[0xB823C],
location_npcs=[(0x116, 0)],
dialogue_pointers=[0xa0e, 0xa0f, 0xa10],
old_char_id=0xC,
palette_pointers=[0xB81E4],
special=zone_eater_recruit,
name_pointer=0xB81CF,
num_name_bytes=4,
))
prerequisite_info = [info for info in recruit_info if info.prerequisite]
noname_info = [info for info in recruit_info if info.special == falcon_recruit]
unrestricted_info = [info for info in recruit_info if info not in prerequisite_info and info not in noname_info]
random.shuffle(prerequisite_info)
recruit_info = prerequisite_info + noname_info + unrestricted_info
prerequisite_dict = dict()
wor_free_char = None
collapsing_house_char = None
for info in recruit_info:
valid_candidates = candidates
if info.prerequisite:
valid_candidates = [c for c in candidates
if c != info.prerequisite and c not in prerequisite_dict.get(info.prerequisite, [])]
if (not info.name_pointer) and info.special not in [moogle_cave_recruit, sasquatch_cave_recruit, zone_eater_recruit]:
valid_candidates = [c for c in valid_candidates if c not in [0xA, 0xC, 0xD]]
candidate = random.choice(valid_candidates)
candidates.remove(candidate)
info.char_id = candidate
if info.prerequisite:
prerequisite_dict.setdefault(candidate, []).append(info.prerequisite)
if info.special == falcon_recruit:
wor_free_char = candidate
elif info.special == collapsing_house_recruit:
collapsing_house_char = candidate
info.write_data(fout)
get_character(candidate).wor_location = info.label
return wor_free_char, collapsing_house_char
def _manage_gogo_recruitment(fout, collapsing_house_char):
character_specific_locations = {
0: {'map': 0xE2, 'x': 84, 'y': 17, 'facing': 0, 'move': True}, # Zozo tower top *Terra only*,
#1: *Locke only*
2: {'map': 0x120, 'x': 56, 'y': 40, 'facing': 3}, # Maranda inn *Cyan only*, No move
5: {'map': 0x80, 'x': 76, 'y': 31, 'facing': 1}, # Duncan's house *Sabin only*, No move
7: {'map': 0x158, 'x': 54, 'y': 18, 'facing': 0, 'move': True}, # Thamasa exterior *Strago only*,
8: {'map': 0x161, 'x': 27, 'y': 27, 'facing': 0, 'move': True}, # Cave in the Veldt *Relm only*
10: {'map': 0x83, 'x': 4, 'y': 11, 'facing': 3, 'move': True}, # Gau's dad's house *Gau only*,
#11: # *Mog only*,
#13: # *Umaro only*,
}
# Can't be used for collapsing_house_char
pre_falcon_locations = [
{'map': 0x14A, 'x': 12, 'y': 24, 'facing': 1}, # Albrook pub No move
{'map': 0x4E, 'x': 72, 'y': 38, 'facing': 3}, # South Figaro pub, No move
{'map': 0x3C, 'x': 100, 'y': 16, 'facing': 0, 'move': True}, # Figaro castle library
]
general_locations = [
{'map': 0x1C, 'x': 11, 'y': 39, 'facing': 2, 'move': True}, # Narshe inn
{'map': 0xCA, 'x': 51, 'y': 19, 'facing': 0, 'move': True}, # Jidoor relic shop
{'map': 0xEE, 'x': 99, 'y': 18, 'facing': 3, 'move': True}, # Opera house dressing room
]
candidates = list(set(range(0, 0xd)) - {0x3, 0x4, 0x6, 0x9, 0xc}) # Exclude mandatory chars, Shadow, and Gogo
char_index = random.choice(candidates)
location_candidates = [] + general_locations
if char_index in character_specific_locations:
location_candidates.append(character_specific_locations[char_index])
if char_index != collapsing_house_char:
location_candidates.extend(pre_falcon_locations)
location = random.choice(location_candidates)
gogo_location = get_location(location['map'])
get_character(0xc).wor_location = f"{str(gogo_location)[3:]} as {get_character(char_index).newname}"
gogo_npc = NPCBlock(None, gogo_location.locid)
gogo_npc.npcid = len(gogo_location.npcs)
gogo_npc.palette = get_character(char_index).palette
gogo_npc.bg2_scroll = 0
gogo_npc.membit = 3 # Gogo
gogo_npc.memaddr = 0x49 # Gogo
gogo_npc.event_addr = 0x2E5EF
gogo_npc.x = location['x']
gogo_npc.show_on_vehicle = 0
gogo_npc.y = location['y']
gogo_npc.speed = 2 # Normal
gogo_npc.graphics = char_index
gogo_npc.move_type = 0 # None
gogo_npc.sprite_priority = 0 # Normal
gogo_npc.vehicle = 0
gogo_npc.facing = location['facing']
gogo_npc.no_turn_when_speaking = 1
gogo_npc.layer_priority = 2 # Foreground
gogo_npc.special_anim = 0
show_npcs = []
hide_npcs = []
for i in range(len(gogo_location.npcs)):
hide_npcs.extend([0x42, 0x10 + i])
show_npcs.extend([0x41, 0x10 + i])
gogo_location.npcs.append(gogo_npc)
middle = []
middle2 = []
if location.get('move', False):
middle = [
0xC1, # Slow
0x83, # Move left 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xCC, # Turn up
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xC3, # Fast
0x85, # Move right 2
0xCE, # turn down
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x20, # front, head down,
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x01, # Front, standing
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x20, # front, head down,
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xCD, # turn right
0xE0, 0x0A, # Pause for 4 * 10 (40) frames
0xC2, # normal speed
0xC7, # stay still while moving
0x46, # walking, facing right
0x83, # move left 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x47, # standing facing right
0xE0, 0x04, # Pause for 4 * 4 (16) frames
0x48, # walking, facing right 2
0x83, # move left 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x47, # standing facing right
0xE0, 0x04, # Pause for 4 * 4 (16) frames
0xDC, # jump (low)
0x81, # move right 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xC6, # walk while moving
]
middle2 = [
0xC1, # Slow
0x83, # Move left 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xCE, # Turn down
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xC3, # Fast
0x85, # Move right 2
0xCC, # turn up
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x21, # back, head down,
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x04, # back, standing
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x21, # back, head down,
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xCD, # turn right
0xE0, 0x0A, # Pause for 4 * 10 (40) frames
0xC2, # normal speed
0xC7, # stay still while moving
0x46, # walking, facing right
0x83, # move left 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x47, # standing facing right
0xE0, 0x04, # Pause for 4 * 4 (16) frames
0x48, # walking, facing right 2
0x83, # move left 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x47, # standing facing right
0xE0, 0x04, # Pause for 4 * 4 (16) frames
0xDC, # jump (low)
0x81, # move right 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xC6, # walk while moving
]
recruit_event = Substitution()
recruit_event.set_location(0xCE5EF)
recruit_event.bytestring = [0xB2, 0x00, 0x50, 0x26, 0xFE] # Call subroutine, return
recruit_event.write(fout)
recruit_event = Substitution()
recruit_event.set_location(0x305000)
recruit_event.bytestring = [
0xDE, # Load caseword with current party
0xC0, 0xA0 + char_index, 0x01, 0xA6, 0x33, 0x02, # If target character is not in the party, jump to message blowing them off
0xB2, 0x8D, 0xCA, 0x00, # move party to tile below gogo
0xB2, 0x34, 0x2E, 0x01, # disable collision for party
0xB2, 0xAC, 0xC6, 0x00, # Call subroutine CAC6AC
0x3C, char_index, 0xFF, 0xFF, 0xFF, # Set up the party
0x45, # Refresh objects
0x32, 0x04,
0xC2, # Set vehicle/entity's event speed to normal
0xA1, # move right/down 1x1
0xCC, # turn up
0xFF,
0x33, 0x04,
0xC2, # Set vehicle/entity's event speed to normal
0xA2, # move left/down 1x1
0xCC, # turn up
0xFF,
0x34, 0x04,
0xC2, # Set vehicle/entity's event speed to normal
0x82, # move down 1
0xCC, # turn up
0xFF,
char_index, 0x84,
0xCC,
0xE0, 0x04,
0xFF,
0x94,
char_index, 0x8B, # begin queue for party character 0,
0x13, # blink
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0xCE, # turn down
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x13, # blink
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0xCE, # turn down
0xFF, # end queue
0x91, # Pause for 15 frames
0x10 + gogo_npc.npcid, 0x8B, # begin queue for gogo npc
0x13, # blink
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0xCE, # turn down
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x13, # blink
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0xCE, # turn down
0xFF, # end queue
0x94, # Pause for 60 frames
char_index, 0x44 + len(middle), # begin queue for party character 0,
0x04, # Facing up
0xE0, 0x04, # Pause for 4 * 4 (16) frames
0x1B, # Back, right arm raise
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x1C, # Back, right arm raise 2
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x1B, # Back, right arm raise
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x1C, # Back, right arm raise 2
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x1B, # Back, right arm raise
0xE0, 0x02, # Pause for 4 * 2 (2) frames
0x04, # Facing up
0xE0, 0x08, # Pause for 4 * 8 (32) frames
0x5B, # Back, left arm raise
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x5C, # Back, left arm raise 2
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x5B, # Back, left arm raise
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x5C, # Back, left arm raise 2
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x5B, # Back, left arm raise
0xE0, 0x02, # Pause for 4 * 2 (2) frames
0x04, # Facing up
0xE0, 0x10, # Pause for 4 * 16 (64) frames
0x23, # Front, head turned left
0xE0, 0x10, # Pause for 4 * 16 (64) frames
] + middle + [
0x18, # Mad/embarrassed
0xE0, 0x10, # Pause for 4 * 16 (64) frames
0x0A, # Attack pose
0xE0, 0x10, # Pause for 4 * 16 (64) frames
0x17, # back, arms raised
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xDD, # Jump (high)
0xE0, 0x08, # Pause for 4 * 8 (32) frames
0x09, # kneeling
0xE0, 0x10, # Pause for 4 * 16 (64) frames
0x04, # facing up
0xE0, 0x08, # Pause for 4 * 8 (32) frames
0x04, # facing up
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x04, # facing up
0xE0, 0x08, # Pause for 4 * 8 (40) frames
0x1F, # shocked
0xFF,
0x10 + gogo_npc.npcid, 0xc4 + len(middle2), # begin queue for gogo, wait until finished
0x01, # Facing down
0xE0, 0x04, # Pause for 4 * 4 (16) frames
0x59, # Front, left arm raise
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x5A, # Front, left arm raise 2
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x59, # Front, left arm raise
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x5A, # Front, left arm raise 2
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x59, # Front, left arm raise
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x01, # Facing down
0xE0, 0x08, # Pause for 4 * 8 (32) frames
0x19, # Front, right arm raise
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x1A, # Front, right arm raise 2
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x19, # Front, right arm raise
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x1A, # Front, right arm raise 2
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x19, # Front, right arm raise
0xE0, 0x02, # Pause for 4 * 2 (2) frames
0x01, # Facing down
0xE0, 0x10, # Pause for 4 * 16 (64) frames
0x04, # Facing up
0xE0, 0x10, # Pause for 4 * 16 (64) frames
] + middle2 + [
0x04, # Facing up
0xE0, 0x10, # Pause for 4 * 16 (64) frames
0x0A, # Attack pose
0xE0, 0x10, # Pause for 4 * 16 (64) frames
0x16, # front, arms raised
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xDD, # Jump (high)
0xE0, 0x08, # Pause for 4 * 8 (32) frames
0x09, # kneeling
0xE0, 0x10, # Pause for 4 * 16 (64) frames
0x01, # facing down
0xE0, 0x08, # Pause for 4 * 8 (32) frames
0x14, # wink
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0x01, # facing down
0xE0, 0x08, # Pause for 4 * 8 (40) frames
0x1F, # shocked
0xFF,
0x94,
0x10 + gogo_npc.npcid, 0x1B, # begin queue for gogo npc
0x1D, #laugh 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x1E, #laugh 2
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x1D, #laugh 1
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0x1E, #laugh 2
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xCD, # turn right
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0xCC, # turn up
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0xCF, # turn left
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0xCE, # turn down
0xE0, 0x01, # Pause for 4 * 1 (4) frames
0xFC, 0x0C, # branch backward 12 bytes
0xFF,
0x95, # pause for 120 frames
0x37, 0x10 + gogo_npc.npcid, 0x0C, # Change npc to gogo's sprite
0x92, # pause 30 frames
0x10 + gogo_npc.npcid, 0x0D, # begin queue for gogo npc
0xCD, # turn right
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xCC, # turn up
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xCF, # turn left
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xCE, # turn down
0xE0, 0x02, # Pause for 4 * 2 (8) frames
0xFF,
0x42, 0x31,
0x42, 0x32,
0x42, 0x33,
0x42, 0x34,
] + hide_npcs + [
0xB2, 0xD1, 0x81, 0x01, # Branch to recruit gogo event
0x32, 0x03,
0xC2,
0x83,
0xFF,
0x33, 0x03,
0xC2,
0x81,
0xFF,
0x34, 0x03,
0xC2,
0x80,
0xFF,
0x93,
0x42, 0x32,
0x42, 0x33,
0x42, 0x34,
0xB2, 0x34, 0x2E, 0x01, # enable collision
0xFE, # Return
]
recruit_event.write(fout)
next_event = recruit_event.location + len(recruit_event.bytestring)
recruit_event = Substitution()
recruit_event.bytestring = [0x10 + gogo_npc.npcid]
for location in [0xB81CA, 0xB8204, 0xB820E, 0xB821C, 0xB8221, 0xB822D, 0xB822F, 0xB8236]:
recruit_event.set_location(location)
recruit_event.write(fout)
# Called after naming Gogo
ptr_low = next_event & 0xFF
ptr_mid = (next_event & 0xFF00) >> 8
ptr_high = ((next_event - 0xA0000) & 0xFF0000) >> 16
recruit_event = Substitution()
recruit_event.set_location(0xB81F9)
recruit_event.bytestring = [
0xB2, ptr_low, ptr_mid, ptr_high, # Call subroutine below
0xFD, 0xFD, # NOP
]
recruit_event.write(fout)
recruit_event = Substitution()
recruit_event.set_location(next_event)
recruit_event.bytestring = [
0xB2, 0x15, 0xD0, 0x00, # Call subroutine to lighten screen
0x41, 0x31, # show party members 0-3
0x41, 0x32,
0x41, 0x33,
0x41, 0x34,
] + show_npcs + [
0xFE #return
]
recruit_event.write(fout)
next_event = recruit_event.location + len(recruit_event.bytestring)
# Turn off Gogo bit at beginning of game
fout.seek(0xE0A0 + gogo_npc.memaddr)
value = ord(fout.read(1))
value &= ~(1 << gogo_npc.membit)
fout.seek(0xE0A0 + gogo_npc.memaddr)
fout.write(bytes([value]))
def _setup_alternate_zone_eater(fout, include_gau):
# replace zone eater gogo with gau, instead of giving him for free on the airship
zone_eater_loc = get_location(0x116)
gau_npc = zone_eater_loc.npcs[0]
gau_npc.graphics = 0xB # Gau
gau_npc.palette = get_character(0xB).palette
gau_npc.membit = 3
gau_npc.memaddr = 0x4D
gau_npc.event_addr = 0x14B4B
if not include_gau:
# TODO: If you turn off flags so that gau is on the veldt, what should be in zone eater instead?
return
# Turn on Gau bit at beginning of game
fout.seek(0xE0A0 + gau_npc.memaddr)
value = ord(fout.read(1))
value |= (1 << gau_npc.membit)
fout.seek(0xE0A0 + gau_npc.memaddr)
fout.write(bytes([value]))
text = '<GAU>: Uwao, aooh!<wait 60 frames> I’m <GAU>!<wait 60 frames><line>I’m your friend!<wait 60 frames><line>Let’s travel together!'
set_dialogue(0x286, text)
gau_event = Substitution()
gau_event.set_location(0x305200)
bytes_1 = [
0x4B, 0x86, 0x02, # Display text box
0xB2, 0xC1, 0xC5, 0x00, # Set caseword to number of characters in party
0xC0, 0xA3, 0x81, 0xFF, 0xFF, 0xFF # Jump to [bytes3, location to be computed shortly]
]
bytes_2 = [
0x3D, 0x0B, # Create Gau
0x3F, 0x0B, 0x01, # Add Gau to party
0x45, # Refresh objects
]
bytes_3 = [
0x77, 0x0B, # Level average Gau
0x8B, 0x0B, 0x7F, # Set Gau's HP to max
0x8C, 0x0B, 0x7F, # Set Gau's MP to max
0x88, 0x0B, 0x00, 0x00, # Remove all status effects from Gau
0xD4, 0xFB, # Set Gau as available
0x78, 0x10, # Enable ability to pass through other objects for NPC $10
0x10, 0x04, # queue for NPC $10
0xC2, # Set vehicle/entity's event speed to normal
0x82, # Move vehicle/entity down 1 tile
0xD1, # Make vehicle/entity disappear
0xFF, # End queue
0x3E, 0x10, # Delete NPC $10
0xDB, 0x6B, # Turn off NPC bit
0x45, # Refresh objects
0xFE, # Return
]
jump_location = gau_event.location + len(bytes_1) + len(bytes_2)
ptr_low = jump_location & 0xFF
ptr_mid = (jump_location & 0xFF00) >> 8
ptr_high = ((jump_location - 0xA0000) & 0xFF0000) >> 16
gau_event.bytestring = bytes_1[:-3] + [ptr_low, ptr_mid, ptr_high] + bytes_2 + bytes_3
gau_event.write(fout)
global alt_zone_eater_recruit
alt_zone_eater_recruit = WoRRecruitInfo(
label="ZoneEater",
event_pointers=[gau_event.location + len(bytes_1) + 1, gau_event.location + len(bytes_1) + 3,
gau_event.location + len(bytes_1) + len(bytes_2) + 1,
gau_event.location + len(bytes_1) + len(bytes_2) + 3,
gau_event.location + len(bytes_1) + len(bytes_2) + 6,
gau_event.location + len(bytes_1) + len(bytes_2) + 9,],
recruited_bit_pointers=[gau_event.location + len(bytes_1) + len(bytes_2) + 13],
location_npcs=[(0x116, 0)],
dialogue_pointers=[0x286],
old_char_id=0xB,
name_pointer=gau_event.location,
num_name_bytes=7
)
next_event = gau_event.location + len(gau_event.bytestring)
jump_location = gau_event.location
ptr_low = jump_location & 0xFF
ptr_mid = (jump_location & 0xFF00) >> 8
ptr_high = ((jump_location - 0xA0000) & 0xFF0000) >> 16
gau_event_shim = Substitution()
gau_event_shim.set_location(0xB4B4B)
gau_event_shim.bytestring = [
0xB2, ptr_low, ptr_mid, ptr_high, 0xFE
]
gau_event_shim.write(fout)
def manage_wor_skip(fout, wor_free_char=0xB, airship=False, dragon=False, alternate_gogo=False, esper_replacements=None):
characters = get_characters()
espers = [0x0, 0x1, 0x2, 0x3, 0x5, 0x6, 0x7, 0x8, 0x11, 0x13, 0x14, 0x17]
if esper_replacements:
espers = [esper_replacements[i].id for i in espers]
espers = [i + 0x36 for i in espers]
# jump to FC end cutscene for more space
startsub0 = Substitution()
startsub0.bytestring = bytes([0xB2, 0x1E, 0xDD, 0x00, 0xFE])
startsub0.set_location(0xC9A4F)
startsub0.write(fout)
# change code at start of game to warp to wor
wor_sub = Substitution()
wor_sub.bytestring = bytes([
0x6C, 0x01, 0x00, 0x91, 0xD3, 0x02, # make WoR the parent map
0x88, 0x00, 0x00, 0x00, # remove Magitek from Terra
0xD5, 0xF0, # flag Terra as unobtained
0xD5, 0xE0, # flag Terra as unobtained
0x3F, 0x00, 0x00, # remove Terra from party
0x3F, 0x0E, 0x00, # remove Vicks from party
0x3F, 0x0F, 0x00, # remove Wedge from party
0x3E, 0x00, # delete Terra
0x3E, 0x0E, # delete Vicks
0x3E, 0x0F, # delete Wedge
# there's no command to set a char's level, so I'ma
# do something hacky and continually set Mog/Strago's
# properties. Each of them will consider the other's
# level as the "party average". Strago will be
# boosted 2 levels above this average, and Mog will
# be boosted 5 levels, which effectively see-saws
# their levels upwards until they are around the
# level I want Celes to be at.
0xD4, 0xF7, # flag Strago as obtained
0xD4, 0xE7, # flag Strago as obtained
0xD4, 0xFA, # flag Mog as obtained
0xD4, 0xEA, # flag Mog as obtained
0x40, 0x0A, 0x0A, # give Mog properties
0x40, 0x07, 0x07, # give Strago properties
0x40, 0x0A, 0x0A, # give Mog properties
0x40, 0x07, 0x07, # give Strago properties
0x40, 0x0A, 0x0A, # give Mog properties
0x40, 0x07, 0x07, # give Strago properties
0x40, 0x0A, 0x0A, # give Mog properties
0x40, 0x07, 0x07, # give Strago properties
0x40, 0x0A, 0x0A, # give Mog properties
0x40, 0x07, 0x07, # give Strago properties
0x40, 0x0A, 0x0A, # give Mog properties
0x40, 0x07, 0x07, # give Strago properties
]) + bytes([0x40, 0x0A, 0x0A,] if dragon else [
]) + bytes([
0x40, 0x06, 0x06, # give Celes properties
0xD5, 0xF7, # flag Strago as unobtained
0xD5, 0xE7, # flag Strago as unobtained
0xD5, 0xFA, # flag Mog as unobtained
0xD5, 0xEA, # flag Mog as unobtained
0xD4, 0xF6, # flag Celes as obtained
0xD4, 0xE6, # flag Celes as obtained
0x3D, 0x06, # create Celes
0x3F, 0x06, 0x01, # add Celes to party
0x40, 0x0C, 0x1B, # give Gogo the properties of Kamog
0x40, 0x0D, 0x1C, # give Umaro the properties of Mog (three scenario party selection)
0x8D, 0x0C, # unequip Kamog
0x8D, 0x0D, # unequip fake Mog
0x40, 0x01, 0x01, # give Locke properties
0x40, 0x02, 0x02, # give Cyan properties
0x40, 0x03, 0x03, # give Shadow properties
0x40, 0x04, 0x04, # give Edgar properties
0x40, 0x05, 0x05, # give Sabin properties
0x40, 0x07, 0x07, # give Strago properties
0x40, 0x08, 0x08, # give Relm properties
0x40, 0x09, 0x09, # give Setzer properties
0x40, 0x0A, 0x0A, # give Mog properties
0x40, 0x0B, 0x0B, # give Gau properties
0x37, 0x01, 0x01, # give Locke graphics
0x37, 0x02, 0x02, # give Cyan graphics
0x37, 0x03, 0x03, # give Shadow graphics
0x37, 0x04, 0x04, # give Edgar graphics
0x37, 0x05, 0x05, # give Sabin graphics
0x37, 0x06, 0x06, # give Celes graphics
0x37, 0x07, 0x07, # give Strago graphics
0x37, 0x08, 0x08, # give Relm graphics
0x37, 0x09, 0x09, # give Setzer graphics
0x37, 0x0A, 0x0A, # give Mog graphics
0x37, 0x0B, 0x0B, # give Gau graphics
0x7F, 0x00, 0x00, # give Terra name
0x7F, 0x01, 0x01, # give Locke name
0x7F, 0x02, 0x02, # give Cyan name
0x7F, 0x03, 0x03, # give Shadow name
0x7F, 0x04, 0x04, # give Edgar name
0x7F, 0x05, 0x05, # give Sabin name
0x7F, 0x06, 0x06, # give Celes name
0x7F, 0x07, 0x07, # give Strago name
0x7F, 0x08, 0x08, # give Relm name
0x7F, 0x09, 0x09, # give Setzer name
0x7F, 0x0A, 0x0A, # give Mog name
0x7F, 0x0B, 0x0B, # give Gau name
0x84, 0x50, 0xC3, # give party 50K Gil
] + [i for e in espers for i in (0x86, e)] + [
0xB8, 0x42, # allow Morph
0xB8, 0x43, # display AP
0xB8, 0x49, # Gau handed Meat
0xB8, 0x4B, # Shadow can't leave
0xE8, 0x06, 0x08, 0x00, # set up 8 dragons
])
# assign a palette to each character
partymembers = [c for c in characters if 1 <= c.id <= 12]
for character in partymembers:
id = character.id
palette = character.palette
wor_sub.bytestring += bytes([0x43, id, palette])
# obtain all locations with WoB treasures
wobtreasurelocs = []
for line in open(WOB_TREASURE_TABLE):
line = line.strip()
wobtreasurelocs.append(line)
# obtain a list of all treasures in these areas
wobtreasures = []
for l in get_locations():
if not l.chests:
continue
if l.area_name.upper() in wobtreasurelocs:
wobtreasures.extend(l.treasure_ids)
# give the items to the player via event code
for t in wobtreasures:
wor_sub.bytestring += bytes([0x80, t])
# give WoB event items
event_items = get_event_items()
for l in event_items:
if l.upper() in wobtreasurelocs + ["FIGARO CASTLE"]:
for e in event_items[l]:
if e.content_type == 0x40 and not e.multiple:
wor_sub.bytestring += bytes([0x80, e.contents])
# give the player a basic set of items. These items are intended to
# reflect the items a player would probably have by the time they get this
# far, so that they aren't missing basic supplies they would have in almost any seed.
for line in open(WOR_ITEMS_TABLE):
line = line.strip().split(',')
for i in range(0, int(line[1])):
wor_sub.bytestring += bytes([0x80, int(line[0], 16)])
# jump to overwriting the Ramuh cutscene because we need even more space
wor_sub.bytestring += bytes([
0xB2, 0x49, 0x97, 0x00,
0xFE
])
wor_sub.set_location(0xADD1E)
wor_sub.write(fout)
wor_sub2 = Substitution()
wor_sub2.bytestring = bytearray([])
# set most of the event bits that would have been set in the WoB
for line in open(WOB_EVENTS_TABLE):
line = line.strip().split(',')
setbit = int(line[1], 16) # if 1, set the bit from the txt file
bit = line[0] # the bit to set/clear from the txt file
if bit == "2FB":
if wor_free_char is None:
setbit = 0
else:
bit = "2F" + hex(wor_free_char)[2]
firstbyte = 0xD1 + int(bit[0:1], 16) * 2 - setbit
lastbyte = int(bit[1:], 16)
wor_sub2.bytestring += bytearray([firstbyte, lastbyte])
if alternate_gogo:
wor_sub2.bytestring += bytearray([0xDA, 0x4B]) # set event bit $54B
# This is only necessary if the random wor recruitment is on, but it's harmless if not.
wor_sub2.bytestring += bytearray([
0x7F, 0x0C, 0x0C, # Set name for GOGO
0x7F, 0x0D, 0x0D, # Set name for UMARO
0x7F, 0x0A, 0x0A # Set name for MOG
])
if airship:
wor_sub2.bytestring += bytearray([0xD2, 0xB9]) # airship appears in WoR
if dragon:
wor_sub2.bytestring += bytearray([
0xD0, 0xA7, # Talked to crimson robber
0xD0, 0xA8, # Talked to crimson robber
0xD0, 0xA9, # Talked to crimson robber
0xD0, 0xAA, # Talked to crimson robber
0xD0, 0xAB, # crimson robber left cafe
0xD7, 0x74,
0xD0, 0xAC, # boarded the crimson robbers' ship
0xD7, 0xFE,
0xD7, 0x77,
0xD7, 0x78,
0xD7, 0x7E, # talked to gerad in s figaro inn
0xD7, 0x7A,
0xD6, 0x99,
0xD2, 0x23, # Can jump on turtle in figaro cave
0xD4, 0x6E, # Saw Gerad help the injured guy
0xD0, 0xC6, # recruited Edgar in WoR
0xD4, 0xF4, # flag Edgar as obtained
0xD4, 0xE4, # flag Edgar as obtained
0x3D, 0x04, # create Edgar
0x3F, 0x04, 0x01, # add Edgar to party
0xD7, 0xF0,
0xD7, 0xF1,
0xD7, 0xF2,
0xD7, 0x82,
0xD7, 0x97,
0xD6, 0x81,
0xD0, 0xC7, # Saw Figaro Castle rise after tentacles
0xD5, 0xB7, # prison door is not open
0xD0, 0xDC, # Figaro castle is in Western desert
0xD4, 0xF9, # flag Setzer as obtained
0xD4, 0xE9, # flag Setzer as obtained
0x3D, 0x09, # create Setzer
0x3F, 0x09, 0x01, # add Setzer to party
0xDD, 0x7F,
0xDD, 0xB6,
0xD0, 0xCA, # recruited Setzer in WoR
0xD0, 0xCB, # opened Daryl's tomb
0xD4, 0xB1, # opened the door
0xD4, 0xB3, # raised the water
0xD4, 0xB5, # raised the water 2
0xD4, 0xB8, # opened the door 2
0xD4, 0xB2, # defeated dullahan
0xD7, 0xF3,
0x04, 0x05,
0xD5, 0x11, 0x08,
0xCF,
0xFF,
0x06, 0x05,
0xD5, 0x12, 0x07,
0xCF,
0xFF,
0x41, 0x04,
0x41, 0x06,
0x41, 0x09,
0xB2, 0x7B, 0x47, 0x00, # Falcon rising out of water
0xFE,
])
text = "<SETZER>: But first we need to kill the dragons!"
set_dialogue(0x9AF, text)
else:
wor_sub2.bytestring += bytearray([0x6B, 0x01, 0x00, 0x91, 0xD3, 0x00]) # go to WoR
if airship:
wor_sub2.bytestring += bytearray([0xC7, 0x91, 0xD3]) # place airship
wor_sub2.bytestring += bytearray([
0xFF, # end map script
0xFE, # return
])
wor_sub2.set_location(0xA9749)
wor_sub2.write(fout)
# set more Lores as starting Lores
odds = [True, True, False]
address = 0x26F564
fout.seek(address)
extra_known_lores = read_multi(fout, length=3)
for i in range(24):
if random.choice(odds):
extra_known_lores |= (1 << i)
if random.choice([True, False, False]):
odds.append(False)
fout.seek(address)
write_multi(fout, extra_known_lores, length=3)
if dragon:
set_alternate_dragon_locations(fout)
def set_alternate_dragon_locations(fout):
# TODO: Add more locations and randomly pick two?
# These NPCs happen to match the NPC numbers of the dragons
# in Kefka's tower so we can jump into the same event.
# A more general solution would need to copy the event
# after dragons have been randomized.
# Or just abandon the option of going into Kefka's tower
# to fight them.
# gold dragon: zone eater
zone_eater = get_location(0x114)
gold_dragon = zone_eater.npcs[0]
gold_dragon.palette = 2
gold_dragon.graphics = 57
gold_dragon.membit = 3
gold_dragon.memaddr = 0x1F56 - 0x1EE0
gold_dragon.event_addr = 0x218F3
# skull dragon: Owzer's mansion
owzer = get_location(0xd1)
# Hide the emperor and replace Ultros
# since his NPC number matches the skull dragon's.
emperor = owzer.npcs[3]
emperor.membit = 2
emperor.memaddr = 0x1F1E - 0x1EE0
skull_dragon = owzer.npcs[4]
skull_dragon.palette = 4
skull_dragon.graphics = 57
skull_dragon.membit = 4
skull_dragon.memaddr = 0x1F56 - 0x1EE0
skull_dragon.event_addr = 0x5EB3
skull_dragon_event = Substitution()
skull_dragon_event.set_location(0xB4B62)
skull_dragon_event.bytestring = bytes([
0xC0, 0xB4, 0x86, 0x20, 0x19, 0x02, # If haven't beat this dragon, branch to $CC1920
0xFE # return
])
skull_dragon_event.write(fout)
| 38.58966 | 540 | 0.602663 | 7,689 | 58,965 | 4.460918 | 0.151905 | 0.023557 | 0.022566 | 0.014927 | 0.406968 | 0.359592 | 0.326152 | 0.306997 | 0.284082 | 0.266414 | 0 | 0.121329 | 0.302501 | 58,965 | 1,527 | 541 | 38.614931 | 0.712653 | 0.218214 | 0 | 0.441132 | 0 | 0.000765 | 0.012208 | 0.001295 | 0 | 0 | 0.162612 | 0.000655 | 0.001529 | 1 | 0.013761 | false | 0.000765 | 0.005352 | 0 | 0.028287 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80a53a59d7c9cc5fc9f43fec4ffd711ec190d7c2 | 3,742 | py | Python | acs/acs/UtilitiesFWK/CommandLine.py | intel/test-framework-and-suites-for-android | 3aae8452ae931437b3b5ac30f068dc22a8dc5b85 | [
"Apache-2.0"
] | 8 | 2018-09-14T01:34:01.000Z | 2021-07-01T02:00:23.000Z | acs/acs/UtilitiesFWK/CommandLine.py | intel/test-framework-and-suites-for-android | 3aae8452ae931437b3b5ac30f068dc22a8dc5b85 | [
"Apache-2.0"
] | 3 | 2019-09-10T11:39:50.000Z | 2019-10-10T08:26:22.000Z | acs/acs/UtilitiesFWK/CommandLine.py | intel/test-framework-and-suites-for-android | 3aae8452ae931437b3b5ac30f068dc22a8dc5b85 | [
"Apache-2.0"
] | 9 | 2018-10-11T15:14:03.000Z | 2021-02-17T11:37:20.000Z | """
Copyright (C) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import os
import sys
class CommandLine(object):
"""
Class which implement simple methods based on unix command
"""
# Dictionary to store full path of a shell command to avoid
# searching in PATH each time we call which method
__which_command_dict = {}
@staticmethod
def which(file_name):
"""
Shows the full path of (shell) commands.
Find full path of a command/file from PATH environment
:rtype: string
:return: Full path of the command
Return None in case PATH environment is not defined or command not found
"""
# Reformat command name regarding os
file_name = file_name if os.name in ['posix'] else file_name + ".exe"
if file_name in CommandLine.__which_command_dict.iterkeys():
full_path_cmd = CommandLine.__which_command_dict[file_name]
else:
# Separator is different in Linux
path_separator = ":" if os.name in ['posix'] else ";"
full_path_cmd = None
# Get PATH environment value
os_env_path = os.environ.get("PATH")
if os_env_path:
for path in os_env_path.split(path_separator):
try:
if file_name in os.listdir(r'%s' % path):
full_path_cmd = os.path.join(path, file_name)
CommandLine.__which_command_dict[file_name] = full_path_cmd
break
except OSError:
# Skip if current path is not found
# It could arrives that some path defined in the PATH environment is not found.
continue
return full_path_cmd
@staticmethod
def findfile(file2find):
"""
Find the file named file2find in the sys.path + the current working dir.
:type file2find: String
:param file2find: filename to find in the
:rtype: String or None
:return: the full path name if found, None if not found
"""
cwd = os.getcwd()
paths = [cwd] + sys.path
for dirname in paths:
possible = os.path.join(dirname, file2find)
if os.path.isfile(possible):
return possible
return None
@staticmethod
def exists(file2check):
"""
CHeck if the given (path to) file named file2check exists.
:type file2check: String
:param file2check: file path to check
:rtype: bool
:return: True if the full path if found, False otherwise if not found
"""
return os.path.exists(file2check)
@staticmethod
def chmod(file2use, mode):
"""
Change the right mode to the (path to) file named file2use
:type file2use: String
:param file2use: file path to use
:type mode: int
:param mode: The octal standard linux mode to use
:rtype: bool
:return: True if the full path if found, False otherwise if not found
"""
return os.chmod(file2use, mode)
| 32.824561 | 103 | 0.613576 | 482 | 3,742 | 4.674274 | 0.348548 | 0.04261 | 0.024412 | 0.035952 | 0.110963 | 0.110963 | 0.063027 | 0.063027 | 0.063027 | 0.063027 | 0 | 0.009862 | 0.322555 | 3,742 | 113 | 104 | 33.115044 | 0.878895 | 0.504543 | 0 | 0.105263 | 0 | 0 | 0.013845 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.342105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80aa11171d981757abd23c45640f5c0e84c66506 | 9,132 | py | Python | scripts/image_dataset.py | KentJames/crocodile | 83c34c0530521774ba48063bb2357fc92a74d334 | [
"Apache-2.0"
] | 4 | 2015-02-10T17:26:50.000Z | 2019-12-28T17:14:48.000Z | scripts/image_dataset.py | KentJames/crocodile | 83c34c0530521774ba48063bb2357fc92a74d334 | [
"Apache-2.0"
] | 5 | 2015-03-19T12:15:08.000Z | 2015-06-19T12:51:26.000Z | scripts/image_dataset.py | KentJames/crocodile | 83c34c0530521774ba48063bb2357fc92a74d334 | [
"Apache-2.0"
] | 10 | 2015-03-05T18:21:19.000Z | 2018-07-30T02:04:23.000Z | #!/bin/env python3
import sys
import os
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_root)
import argparse
import h5py
import itertools
import numpy
import pylru
from multiprocessing import Process, Array, Queue
import ctypes
import arl.test_support
from crocodile.synthesis import *
import util.visualize
# Parse arguments
parser = argparse.ArgumentParser(description='Grid a data set')
parser.add_argument('input', metavar='input', type=argparse.FileType('r'),
help='input visibilities')
parser.add_argument('-N', dest='N', type=int, default=1,
help='Process parallelism')
parser.add_argument('--theta', dest='theta', type=float, required=True, default=0.08,
help='Field of view size')
parser.add_argument('--lambda', dest='lam', type=float, required=True, default=300000,
help='Grid size')
parser.add_argument('--grid', dest='grid', type=argparse.FileType('w'),
help='grid output file')
parser.add_argument('--image', dest='image', type=argparse.FileType('w'),
help='image output file')
parser.add_argument('--wkern', dest='wkern', type=argparse.FileType('r'),
help='w-kernel file to use for w-projection')
parser.add_argument('--akern', dest='akern', type=argparse.FileType('r'),
help='A-kernel file to use for w-projection')
parser.add_argument('--kern-cache', dest='kern_cache', type=int,
help='Size of A-kernel cache')
parser.add_argument('--quick', dest='method', const='quick', action='store_const',
help='Only use one visibility from every baseline')
parser.add_argument('--psf', dest='psf', const=True, default=False, action='store_const',
help='generate point spread function')
parser.add_argument('--show-grid', dest='show_grid', const=True, default=False, action='store_const',
help='Open a matplotlib window to inspect the result grid')
parser.add_argument('--show-image', dest='show_image', const=True, default=False, action='store_const',
help='Open a matplotlib window to inspect the result image')
args = parser.parse_args()
# Open input file
print("Reading %s..." % args.input.name)
input = h5py.File(args.input.name, "r")
# Get baselines
print("Reading baselines...")
viss = arl.test_support.import_visibility_baselines_from_hdf5(input)
print("Got %d visibility chunks" % len(viss))
# Generate UVW and visibilities
if args.method == 'quick':
# Select one visibility from every chunk
uvw = numpy.array([vis.uvw_lambda(0)[0] for vis in viss])
src = numpy.hstack([
[vis.antenna1[0] for vis in viss],
[vis.antenna2[0] for vis in viss],
[vis.time[0] for vis in viss],
[vis.frequency[0] for vis in viss]
])
vis = numpy.array([vis.vis[0,0,0] for vis in viss])
else:
# Utility to collect data from visibility blocks
def collect_blocks(prop):
result = []
for vis in viss:
vres = []
for chan in range(len(vis.frequency)):
vres.append(prop(vis, chan))
result.append(numpy.vstack(numpy.transpose(vres, (1,0,2))))
return numpy.vstack(result)
uvw = collect_blocks(lambda vis, chan: vis.uvw_lambda(chan))
src = collect_blocks(
lambda vis, chan: numpy.transpose([
vis.antenna1,
vis.antenna2,
vis.time,
vis.frequency[chan] * numpy.ones(vis.time.shape)
]))
vis = collect_blocks(lambda vis, chan: vis.vis[:,chan,:])[:,0]
# Show statistics
print()
print("Have %d visibilities" % vis.shape[0])
print("u range: %.2f - %.2f lambda" % (numpy.min(uvw[:,0]), numpy.max(uvw[:,0])))
print("v range: %.2f - %.2f lambda" % (numpy.min(uvw[:,1]), numpy.max(uvw[:,1])))
print("w range: %.2f - %.2f lambda" % (numpy.min(uvw[:,2]), numpy.max(uvw[:,2])))
print("Antennas: %d - %d" % (numpy.min(src[:,0]), numpy.max(src[:,1])))
print("t range: %.6f - %.6f MJD UTC" %(numpy.min(src[:,2]), numpy.max(src[:,2])))
print("f range: %.2f - %.2f MHz" % (numpy.min(src[:,3])/1e6, numpy.max(src[:,3])/1e6))
print()
# Initialise gridder
if args.wkern is None:
# Simple imaging without convolution. No source dependency.
print("Gridder: Simple imaging")
grid_fn = simple_imaging
grid_pars = {}
src = numpy.zeros((src.shape[0],0))
else:
# Determine w-cache steps
wkern_file = h5py.File(args.wkern.name, "r", driver='core')
wsteps = numpy.array(sorted(map(float, wkern_file['wkern/%s' % args.theta].keys())))
wstep = wsteps[1] - wsteps[0]
print("w kernels: %.2f - %.2f lambda (step %.2f lambda)" % (min(wsteps), max(wsteps), wstep))
# Make a custom kernel cache that reads from the hdf5 file
def closest(xs, x):
return xs[numpy.argmin(numpy.abs(numpy.array(xs) - x))]
def w_kernel_fn(theta, w):
kernw = closest(wsteps, w)
#print("w=", kernw)
return wkern_file['wkern/%s/%s/kern' % (theta, kernw)]
w_cache = pylru.FunctionCacheManager(w_kernel_fn, len(wsteps))
# A-kernels?
if args.akern is None:
# Just pure w-projection, also no source dependency.
print("Gridder: W-projection")
grid_fn = w_cache_imaging
grid_pars = { 'wstep': wstep, 'kernel_cache': w_cache }
src = numpy.zeros((src.shape[0],0))
else:
# Open A-kernel file
akern_file = h5py.File(args.akern.name, "r", driver='core')
times = list(map(float, akern_file['akern/%s/0' % args.theta]))
freqs = list(map(float, akern_file['akern/%s/0/%s' % (args.theta, times[0])]))
print("A kernels: %d antennas" %
max(map(int, akern_file['akern/%s' % args.theta])))
print(" \" t range: %.6f - %.6f MJD UTC (step %.2f s)" % (
numpy.min(times), numpy.max(times), (times[1] - times[0]) * 24 * 3600))
print(" \" f range: %.2f - %.2f MHz (step %.2f MHz)" % (
numpy.min(freqs)/1e6, numpy.max(freqs)/1e6, (freqs[1] - freqs[0]) /1e6))
# Make a custom kernel cache that reads from the hdf5 file
def a_kernel_fn(theta, a, t, f):
# print("a=%d, t=%f, f=%f" % (a, t, f))
return akern_file['akern/%s/%d/%s/%d/kern' % (theta, a, t, f)]
a_cache = pylru.FunctionCacheManager(a_kernel_fn, args.kern_cache)
# And yet another cache for AW-combinations
aw_cache = pylru.FunctionCacheManager(aw_kernel_fn(a_cache, w_cache), args.kern_cache)
# Round time and frequency to closest one that we actually have data for
def tf_round_fn(theta, w, a1, a2, t, f):
kernt = closest(times, t)
kernf = closest(freqs, f)
return aw_cache(theta, w, a1, a2, kernt, kernf)
# Use w-imaging function, but with AW kernels
print("Gridder: AW-projection")
grid_fn = w_cache_imaging
grid_pars = { 'wstep': wstep, 'kernel_cache': tf_round_fn }
# Generate PSF? Set all visibilities to 1
if args.psf:
vis[:] = 1.0
# Weight, mirror visibilities with negative v
print("\nWeight...")
wt = doweight(args.theta, args.lam, uvw, numpy.ones(len(uvw)))
uvw, vis = mirror_uvw(uvw, vis)
# Make grid
N = max(1, args.N)
if N == 1:
print("Gridding...")
uvgrid = grid_fn(args.theta, args.lam, uvw, src, wt * vis,
**grid_pars)
else:
# Crude attempt at parallelisation to make imaging big datasets
# at least bearable...
print("Make shared grid...")
step = vis.shape[0] // N
px = int(round(args.theta * args.lam))
grid_arr = Array(ctypes.c_double, px * px * 2) # slow!
uvgrid = numpy.frombuffer(grid_arr.get_obj(), dtype=complex).reshape((px, px))
uvgrid[:] = 0
print("Gridding using %d procs (%d visibilities each)..." % (N, step))
def do_grid(start):
uvg = grid_fn(args.theta, args.lam,
uvw[start:start+step,:],
src[start:start+step,:],
wt[start:start+step] * vis[start:start+step],
**grid_pars)
with grid_arr.get_lock():
uvgrid = numpy.frombuffer(grid_arr.get_obj(), dtype=complex).reshape((px, px))
uvgrid += uvg
print("... worker %d done" % (start / step))
procs = []
for start in range(0, vis.shape[0], step):
p = Process(target=do_grid, args=(start,))
p.start()
procs.append(p)
# Accumulate grids
for p in procs:
p.join()
# Make hermitian
uvgrid = make_grid_hermitian(uvgrid)
if args.grid is not None:
uvgrid.tofile(args.grid)
args.grid.close()
if args.show_grid:
util.visualize.show_grid(uvgrid, "result", args.theta)
# FFT, if requested
if args.image is not None or args.show_image:
print("FFT...")
img = numpy.real(ifft(uvgrid))
if args.image is not None:
img.tofile(args.image)
args.image.close()
if args.show_image:
util.visualize.show_image(img, "result", args.theta)
| 38.209205 | 103 | 0.611695 | 1,281 | 9,132 | 4.278689 | 0.210773 | 0.021346 | 0.040321 | 0.015326 | 0.27568 | 0.207809 | 0.166575 | 0.135559 | 0.108009 | 0.108009 | 0 | 0.015685 | 0.232041 | 9,132 | 238 | 104 | 38.369748 | 0.765863 | 0.102606 | 0 | 0.08046 | 0 | 0.005747 | 0.153337 | 0.002694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.074713 | 0.011494 | 0.137931 | 0.143678 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ac180f9a1f733a4e347f78769bceb79ce6c95d | 719 | py | Python | examples/api_csv_to_file.py | gjpower/python-sdk | e2f1bd7078afe0ed13364037992477a13ca8e4dc | [
"MIT"
] | 18 | 2018-09-25T11:47:28.000Z | 2021-12-14T20:28:39.000Z | examples/api_csv_to_file.py | gjpower/python-sdk | e2f1bd7078afe0ed13364037992477a13ca8e4dc | [
"MIT"
] | 57 | 2018-11-08T12:40:30.000Z | 2022-03-31T13:01:19.000Z | examples/api_csv_to_file.py | gjpower/python-sdk | e2f1bd7078afe0ed13364037992477a13ca8e4dc | [
"MIT"
] | 34 | 2018-11-05T16:09:15.000Z | 2022-03-08T10:51:34.000Z | import os
from devo.api import Client, ClientConfig, TO_BYTES
key = os.getenv('DEVO_API_KEY', None)
secret = os.getenv('DEVO_API_SECRET', None)
api = Client(auth={"key": key, "secret": secret},
address="https://apiv2-eu.devo.com/search/query",
config=ClientConfig(response="csv",
stream=True, processor=TO_BYTES))
response = api.query(query="from demo.ecommerce.data select * limit 20",
dates={'from': "today()-1*day()", 'to': "today()"})
with open("example_data/example.csv", "wb") as f:
try:
for item in response:
f.write(item)
f.write(b"\n")
except Exception as error:
print(error)
| 29.958333 | 72 | 0.584145 | 93 | 719 | 4.44086 | 0.580645 | 0.050847 | 0.058111 | 0.072639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007547 | 0.262865 | 719 | 23 | 73 | 31.26087 | 0.771698 | 0 | 0 | 0 | 0 | 0 | 0.243394 | 0.03338 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ac6e530ef3e3ca2f4277c6ca59011dab2aece5 | 2,246 | py | Python | ovs/dal/lists/rolelist.py | mflu/openvstorage_centos | 280a98d3e5d212d58297e0ffcecd325dfecef0f8 | [
"Apache-2.0"
] | 1 | 2015-08-29T16:36:40.000Z | 2015-08-29T16:36:40.000Z | ovs/dal/lists/rolelist.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | ovs/dal/lists/rolelist.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RoleList module
"""
from ovs.dal.datalist import DataList
from ovs.dal.dataobject import DataObjectList
from ovs.dal.hybrids.role import Role
from ovs.dal.helpers import Descriptor
class RoleList(object):
"""
This RoleList class contains various lists regarding to the Role class
"""
@staticmethod
def get_roles():
"""
Returns a list of all Roles
"""
roles = DataList({'object': Role,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': []}}).data
return DataObjectList(roles, Role)
@staticmethod
def get_role_by_code(code):
"""
Returns a single Role for the given code. Returns None if no Role was found
"""
roles = DataList({'object': Role,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('code', DataList.operator.EQUALS, code)]}}).data # noqa
if len(roles) == 1:
return Descriptor(Role, roles[0]).get_object(True)
return None
@staticmethod
def get_roles_by_codes(codes):
"""
Returns a list of Roles for a list of codes
"""
roles = DataList({'object': Role,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('code', DataList.operator.IN, codes)]}}).data # noqa
return DataObjectList(roles, Role)
| 35.650794 | 103 | 0.59528 | 262 | 2,246 | 5.061069 | 0.427481 | 0.045249 | 0.030166 | 0.052036 | 0.220211 | 0.220211 | 0.220211 | 0.220211 | 0.220211 | 0.220211 | 0 | 0.006402 | 0.304541 | 2,246 | 62 | 104 | 36.225806 | 0.84251 | 0.355744 | 0 | 0.5 | 0 | 0 | 0.059657 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ad6ed4380cff811437166d91bf4e659300cfcd | 5,776 | py | Python | urfiles/load.py | rikfaith/urfiles | 95319aae9e6400075cd5ee4a35b1f3a5e32eb571 | [
"MIT"
] | null | null | null | urfiles/load.py | rikfaith/urfiles | 95319aae9e6400075cd5ee4a35b1f3a5e32eb571 | [
"MIT"
] | null | null | null | urfiles/load.py | rikfaith/urfiles | 95319aae9e6400075cd5ee4a35b1f3a5e32eb571 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# load.py -*-python-*-
import csv
import io
import os
import re
import time
import urfiles.db
# pylint: disable=unused-import
from urfiles.log import DEBUG, INFO, ERROR, FATAL
class Load():
def __init__(self, directories, config, source=None, debug=False,
md5file='md5sum.txt', statfile='stat.txt'):
self.directories = directories
self.config = config
self.source = source
self.debug = debug
self.md5file = md5file
self.statfile = statfile
self.md5 = dict()
# List of all known md5s
self.known_md5s = set()
# Lists of data that needs updating in the database
self.path_data = []
self.meta_data = []
def _unescape(self, path):
result = path.replace(r'\\', '\\').replace(r'\n', '\n')
# if path != result:
# INFO('path=%s -> %s', path, result)
return result
def _load_md5file(self, directory):
filename = os.path.join(directory, self.md5file)
if not os.path.isfile(filename):
ERROR('Cannot find %s', filename)
return
try:
fp = open(filename, 'r', errors='ignore')
except OSError as e:
ERROR('Cannot open %s: %s', filename, repr(e))
return
INFO('Reading %s', filename)
current_time = time.time()
count = 0
for line in fp:
md5, path = line.split(' ', 1)
path = path.strip()
if md5[0] == '\\':
# The GNU version of md5sum (from Coreutils) uses an initial
# backslash on the line to indicate that the escaping in the
# filename is different for this line. The patch is here:
# http://git.savannah.gnu.org/cgit/coreutils.git/commit/\
# ?id=646902b30dee04b9454fdcaa8a30fd89fc0514ca
# and seems to escape backslashes and newlines. We undo those
# escapes here.
md5 = md5[1:]
path = self._unescape(path)
if re.search('md5sum.txt', path):
INFO(path)
self.md5[path] = md5
count += 1
if time.time() - current_time > 1.0:
INFO('%d lines read', count)
current_time = time.time()
INFO('%d lines read', count)
def _file(self, db, conn, path, source, size, mtime_ns):
# Look up the md5 for this file
try:
md5 = self.md5[path]
except KeyError as e:
ERROR('Cannot find md5 for path="%s"', path)
return
if path not in self.known_paths:
self.path_data.append([path, source, size, mtime_ns, md5])
if md5 not in self.known_md5s:
self.meta_data.append([md5, '{}'])
self.known_md5s.add(md5)
def _load_statfile(self, directory, db, conn):
if self.source is None:
source = os.path.basename(directory)
else:
source = self.source
INFO('Reading paths for source=%s', source)
self.known_paths = db.fetch_paths(source)
filename = os.path.join(directory, self.statfile)
if not os.path.isfile(filename):
ERROR('Cannot find %s', filename)
return
try:
fp = open(filename, 'r', errors='ignore')
except OSError as e:
ERROR('Cannot open %s: %s', filename, repr(e))
return
INFO('Reading %s', filename)
current_time = time.time()
count = 0
for line in fp:
try:
# Because we anchor with a number, we won' have the correct
# mode.
path, attr = re.split(r' r [0-9]', line)
except ValueError as e:
ERROR('Cannot split "%s": %s', line.strip(), repr(e))
continue
path = path.strip()
_, size, _, _, timestamp, _, tm, _ = attr.split()
ns = re.sub(r'^.*\.', '', tm)
mtime_ns = int(float(timestamp) * 1e9 + int(ns))
self._file(db, conn, path, source, size, mtime_ns)
count += 1
if time.time() - current_time > 1.0:
INFO('%d lines read', count)
current_time = time.time()
INFO('%d lines read: %d path updates and %d meta updates pending',
count, len(self.path_data), len(self.meta_data))
def _update_database(self, db, conn):
INFO('Preparing data for bulk load')
path_rows = io.StringIO()
path_writer = csv.writer(path_rows)
path_writer.writerows(self.path_data)
meta_rows = io.StringIO()
meta_writer = csv.writer(meta_rows)
meta_writer.writerows(self.meta_data)
path_rows.seek(0)
meta_rows.seek(0)
INFO('Bulk load starting')
db.bulk_insert(conn, path_rows=path_rows, meta_rows=meta_rows)
INFO('Bulk load finished')
def load(self):
try:
db = urfiles.db.DB(self.config.config)
conn = db.connect()
except Exception as e:
FATAL('Cannot connect to database: %s', repr(e))
INFO('Reading all md5s')
self.known_md5s = db.fetch_md5s()
for directory in self.directories:
self.path_data = []
self.meta_data = []
INFO('Loading data from %s', directory)
try:
self._load_md5file(directory)
except UnicodeDecodeErro as e:
FATAL('Cannot parse from %s: %s', directory, repr(e))
self._load_statfile(directory, db, conn)
self._update_database(db, conn)
INFO('Data loaded')
| 32.818182 | 77 | 0.540166 | 701 | 5,776 | 4.35378 | 0.256776 | 0.026212 | 0.019659 | 0.018349 | 0.241809 | 0.23329 | 0.197248 | 0.179554 | 0.179554 | 0.179554 | 0 | 0.019078 | 0.346607 | 5,776 | 175 | 78 | 33.005714 | 0.789613 | 0.116343 | 0 | 0.341085 | 0 | 0 | 0.09654 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054264 | false | 0 | 0.054264 | 0 | 0.162791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80b3ded7af8cd9841983dca1a2e3c26add8a24cb | 2,109 | py | Python | tomoproc/util/logger.py | KedoKudo/tomoproc | b20270e87af4ce7459004a6ed928037ae8573b1e | [
"MIT"
] | 1 | 2020-07-19T21:12:33.000Z | 2020-07-19T21:12:33.000Z | tomoproc/util/logger.py | KedoKudo/xproc | b20270e87af4ce7459004a6ed928037ae8573b1e | [
"MIT"
] | null | null | null | tomoproc/util/logger.py | KedoKudo/xproc | b20270e87af4ce7459004a6ed928037ae8573b1e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Event and exception handeling with logger
"""
import functools
import logging
def create_logger(logfile=r"/tmp/tomoproc.log"):
"""Default logger for exception tracking"""
logger = logging.getLogger("tomoproc_logger")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(logfile)
fh.setFormatter(
logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
)
# add handler to logger object
logger.addHandler(fh)
return logger
logger_default = create_logger()
def log_exception(logger):
"""decorator for logging exception"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
args_str = ",".join(map(str, args))
kwargs_str = ",".join([f"{k}={v}" for k,v in kwargs.items()])
logger.exception(
f'Exception in calling {func.__name__}()\n\targs: {args_str}\n\tkwargs:{kwargs_str}'
)
# re-raise the exception
raise
return wrapper
return decorator
def log_event(logger):
"""decorator for verbose event logging"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
args_str = ",".join(map(str, args))
kwargs_str = ",".join([f"{k}={v}" for k,v in kwargs.items()])
logger.info(f'Executing {func.__name__}()\n\targs: {args_str}\n\tkwargs:{kwargs_str}')
return func(*args, **kwargs)
return wrapper
return decorator
@log_exception(logger_default)
# @log_event(logger_default)
def _test_logger(a, b=1, c=1):
"""testing the logger for exception"""
return a/b
if __name__ == "__main__":
print(_test_logger.__name__)
print(_test_logger.__doc__)
print(f"no exception test:\n\t{_test_logger(1, b=1)}")
print(f"exception test:\n\t{_test_logger(1, b=0)}")
| 28.12 | 104 | 0.596017 | 258 | 2,109 | 4.670543 | 0.321705 | 0.049793 | 0.029876 | 0.041494 | 0.298755 | 0.298755 | 0.298755 | 0.298755 | 0.253942 | 0.253942 | 0 | 0.004496 | 0.261735 | 2,109 | 74 | 105 | 28.5 | 0.769428 | 0.158369 | 0 | 0.340426 | 0 | 0.021277 | 0.198622 | 0.096441 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170213 | false | 0 | 0.042553 | 0 | 0.382979 | 0.085106 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80b6a23d570751524a2f07ff3ef236f65d15c194 | 674 | py | Python | trivial-forms/caller_1.py | mykespb/damba | 1e16a6823fc2b307b023388f8dd61e5a83c6431b | [
"MIT"
] | null | null | null | trivial-forms/caller_1.py | mykespb/damba | 1e16a6823fc2b307b023388f8dd61e5a83c6431b | [
"MIT"
] | null | null | null | trivial-forms/caller_1.py | mykespb/damba | 1e16a6823fc2b307b023388f8dd61e5a83c6431b | [
"MIT"
] | null | null | null | #!python
# caller_1.py
# caller file for forms
# Mikhail Kolodin, 2020
# ver. 2020-02-27 1.0
from forms_1 import *
global_values = {'max': 5000, 'min': 100, 'name': 'Vasya'}
def main(args):
local_values = {'max': 3000, 'name': 'Kirill'}
my_values = {**global_values, **local_values}
temp = templates['main_template']
print ("template: ", temp)
print ("values: ", my_values)
print (temp.format(names = my_values))
temp = templates['aux_template']
print ("template: ", temp)
print ("values: ", my_values)
print (temp.format(names = my_values))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| 21.741935 | 58 | 0.632047 | 90 | 674 | 4.5 | 0.477778 | 0.098765 | 0.093827 | 0.123457 | 0.355556 | 0.355556 | 0.355556 | 0.355556 | 0.355556 | 0.355556 | 0 | 0.052336 | 0.206231 | 674 | 30 | 59 | 22.466667 | 0.704673 | 0.123145 | 0 | 0.352941 | 0 | 0 | 0.165529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.235294 | 0.352941 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80b8a6cce37192bf40ffca015c22b4b54d5f60e3 | 3,614 | py | Python | sahara/config.py | hortonworksqe/sahara | b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270 | [
"Apache-2.0"
] | 1 | 2016-04-13T17:07:05.000Z | 2016-04-13T17:07:05.000Z | sahara/config.py | hortonworksqe/sahara | b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270 | [
"Apache-2.0"
] | null | null | null | sahara/config.py | hortonworksqe/sahara | b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from sahara import exceptions as ex
from sahara.openstack.common import log
from sahara import version
cli_opts = [
cfg.StrOpt('host', default='',
help='Hostname or IP address that will be used to listen on.'),
cfg.IntOpt('port', default=8386,
help='Port that will be used to listen on.'),
cfg.BoolOpt('log-exchange', default=False,
help='Log request/response exchange details: environ, '
'headers and bodies.')
]
edp_opts = [
cfg.IntOpt('job_binary_max_KB',
default=5120,
help='Maximum length of job binary data in kilobytes that '
'may be stored or retrieved in a single operation')
]
networking_opts = [
cfg.BoolOpt('use_floating_ips',
default=True,
help='If set to True, Sahara will use floating IPs to '
'communicate with instances. To make sure that all '
'instances have floating IPs assigned in Nova Network '
'set "auto_assign_floating_ip=True" in nova.conf. '
'If Neutron is used for networking, make sure that '
'all Node Groups have "floating_ip_pool" parameter '
'defined.'),
cfg.StrOpt('node_domain',
default='novalocal',
help="The suffix of the node's FQDN. In nova-network that is "
"the dhcp_domain config parameter."),
cfg.BoolOpt('use_neutron',
default=False,
help="Use Neutron Networking (False indicates the use of Nova "
"networking)."),
cfg.BoolOpt('use_namespaces',
default=False,
help="Use network namespaces for communication (only valid to "
"use in conjunction with use_neutron=True).")
]
cfg.set_defaults(log.log_opts, default_log_levels=[
'amqplib=WARN',
'qpid.messaging=INFO',
'stevedore=INFO',
'eventlet.wsgi.server=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'paramiko=WARN',
'requests=WARN',
'iso8601=WARN',
])
CONF = cfg.CONF
CONF.register_cli_opts(cli_opts)
CONF.register_opts(networking_opts)
CONF.register_opts(edp_opts)
def parse_configs(conf_files=None):
try:
version_string = version.version_info.version_string()
CONF(project='sahara', version=version_string,
default_config_files=conf_files)
except cfg.RequiredOptError as roe:
raise ex.ConfigurationError(
"Option '%s' is required for config group '%s'" %
(roe.opt_name, roe.group.name))
validate_configs()
def validate_network_configs():
if CONF.use_namespaces and not CONF.use_neutron:
raise ex.ConfigurationError(
'use_namespaces can not be set to "True" when use_neutron is set '
'to "False"')
def validate_configs():
validate_network_configs()
| 33.775701 | 79 | 0.633647 | 454 | 3,614 | 4.936123 | 0.4163 | 0.026774 | 0.021419 | 0.014279 | 0.024096 | 0.024096 | 0.024096 | 0.024096 | 0 | 0 | 0 | 0.007634 | 0.275042 | 3,614 | 106 | 80 | 34.09434 | 0.84771 | 0.153016 | 0 | 0.052632 | 0 | 0 | 0.392646 | 0.018056 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039474 | false | 0 | 0.052632 | 0 | 0.092105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80b8af7d0e63b2441f4f96ef7b409613e1e3bbf6 | 3,308 | py | Python | scripts/python/set_districts_on_parking_places.py | grvl/grvl.github.io | 1eff80b1dc01a612cc699f5f32e8ae342153e786 | [
"MIT"
] | null | null | null | scripts/python/set_districts_on_parking_places.py | grvl/grvl.github.io | 1eff80b1dc01a612cc699f5f32e8ae342153e786 | [
"MIT"
] | null | null | null | scripts/python/set_districts_on_parking_places.py | grvl/grvl.github.io | 1eff80b1dc01a612cc699f5f32e8ae342153e786 | [
"MIT"
] | null | null | null | """set_districts_on_parkgin_places.py
set the district from SP, on the parking place.
"""
import json
import math
from sp_districts import get_districts, get_district_from_point
_VAGAS_FILE = 'data/vagas/ZonaAzuVagas_DF_ID_latlong.json'
_OUTPUT_FILE_RAW = 'data/vagas/vagas_latlong.csv'
_OUTPUT_FILE_SCORED = 'data/vagas/vaga_district_scored.csv'
def get_vagas(districts=None):
"""Returns a list of dicts with info about each reserved parking area.
"""
with open(_VAGAS_FILE, "r") as f:
vagas_json = json.load(f)
districts = districts or get_districts()
vagas = []
for vaga in vagas_json['features']:
lat = vaga['geometry']['coordinates'][1]
lng = vaga['geometry']['coordinates'][0]
dtc = get_district_from_point(
districts,
latitude=lat,
longitude=lng)
code, name, area = ('0', 'none', 0) if dtc is None else (
dtc['code'],
dtc['name'],
dtc['polygon'].area * 100000 # Area is in an arbitrary unit.
)
vagas.append({
'district_id': code,
'district_name': name,
'district_area': str(area),
'place': vaga['properties']['Local'],
'qty': str(vaga['properties']['Quantidade']),
'area': vaga['properties']['Area'],
'type': vaga['properties']['Tipo'],
'lat': str(lat),
'long': str(lng),
})
return vagas
def _export(headers, lines, outfile):
# Writes to file.
lines = sorted(lines, key=lambda el: int(el[0]))
with open(outfile, 'w') as f:
f.write('\n'.join(','.join(map(lambda el: el.encode('utf-8'), line))
for line in ([headers] + lines)))
def export_raw(vagas=None):
"""Exports to csv file the raw data about parking spaces.
Each line corresponds to a reserved parking area.
"""
vagas = vagas or get_vagas()
print('Exporting raw data...')
# Builds data.
headers = ['district_id', 'district_name', 'qty', 'area', 'lat', 'long']
lines = [
[vaga[attr] for attr in headers]
for vaga in vagas
]
_export(headers, lines, _OUTPUT_FILE_RAW)
def export_scored(districts=None, vagas=None):
"""Exports to csv file scores for each district based parking spaces info.
"""
districts = districts or get_districts()
vagas = vagas or get_vagas()
# Calculates each districts score.
scores = {
dtc['code']: math.log(1 + sum(
map(
lambda vaga: int(vaga['qty']),
filter(lambda vaga: vaga['district_id'] == dtc['code'], vagas)
)
) / dtc['polygon'].area)
for dtc in districts
}
max_score = max(scores.values()) # Normalization factor.
headers = ['district_id', 'district_name', 'score']
lines = [
[
dtc['code'],
dtc['name'],
str(scores[dtc['code']] / max_score),
]
for dtc in districts
]
print('Exporting district scores...')
_export(headers, lines, _OUTPUT_FILE_SCORED)
if __name__ == '__main__':
districts = get_districts()
vagas = get_vagas(districts)
export_raw(vagas)
export_scored(districts, vagas)
print('Done.')
| 28.273504 | 78 | 0.58283 | 398 | 3,308 | 4.673367 | 0.301508 | 0.018817 | 0.027419 | 0.021505 | 0.146774 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0.005467 | 0.281137 | 3,308 | 116 | 79 | 28.517241 | 0.776703 | 0.137243 | 0 | 0.151899 | 0 | 0 | 0.168145 | 0.037247 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0 | 0.037975 | 0 | 0.101266 | 0.037975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80b9a3fc573c3c4f2095f1c187ca7ad44b7fa13f | 10,744 | py | Python | pybilt/bilayer_analyzer/leaflet.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 11 | 2019-07-29T16:21:53.000Z | 2022-02-02T11:44:57.000Z | pybilt/bilayer_analyzer/leaflet.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 11 | 2019-05-15T09:30:05.000Z | 2021-07-19T16:49:59.000Z | pybilt/bilayer_analyzer/leaflet.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 9 | 2019-08-12T11:14:45.000Z | 2020-12-22T18:22:55.000Z |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import object
# leaflet object
class Leaflet(object):
""" Create a bilayer Leaflet representation.
This class object is used to group lipids together according to their bilayer leaflet. It is primarily meant to
store the indices of LipidCOMs as they are in a Frame.lipidcom list. This class also
creates sub-groups within the Leaflet based on the LipidCOM.type using LipidGroup objects. Instances of Leaflet
are created by the MemSys class.
"""
def __init__(self, name):
"""Initializes an instance of a Leaflet object.
Args:
name (str): The name of the bilayer leaflet being initialized ('upper' and 'lower' are used by the MemSys class).
Attributes:
name (str): The name of the Leaflet (e.g. 'upper' or 'lower').
members (list of int): A list containing the integer indices associated with the LipidCOM objects within
a Frame that are assigned to the Leaflet instance.
groups (list of obj:LipidGroup): A list of the LipidGroup objects (uniquely named) that are created by the Leaflet instance
as new members are added.
group_dict (dict): A dictionary keyed according to the names of the LipidGroup objects created, which stores the
corresponding index of that LipidGroup in self.groups.
"""
#the name of the leaflet - e.g. 'upper' or 'lower'
self.name = name
#initialize a list to store the indices of lipids assigned to this leaflet
self.members = []
#initialize a list to hold the LipidGroup objects
self.groups = []
#initialize a dictionary to store the self.groups index of LipidGroup objects
self.group_dict = {}
return
def __str__(self):
return '%s leaflet of a Membrane System with %s members and %s lipid groups' % (self.name, len(self.members), len(self.groups))
def __repr__(self):
return '%s leaflet of a Membrane System with %s members and %s lipid groups' % (self.name, len(self.members), len(self.groups))
def __len__(self):
""" Have len(Leaflet) return the number of lipids that have been added to the Leaflet instance.
Returns:
int: Number of lipids in the Leaflet.
"""
return len(self.members)
#consider changing var name of input 'resname' to something that doesn't conflict with LipidCOM.type
def add_member(self, com_index, resname, resid):
""" Add new lipids to the Leaflet.
This function is meant to be used to add new lipids according to their Frame.lipidcom index
to the Leaflet and to a LipidGroup according resname/type/name.
Args:
com_index (int): The COMFrame.lipidcom index of the lipid being added to the Leaflet.
resname (str): The resname (or LipidCOM.type) of the lipid being added.
resid (int): The topological resid of the lipid being added to the leaflet.
"""
if len(self.members) == 0:
self.members.append([com_index, resname, resid])
self.groups.append(LipidGroup(resname))
self.groups[0].add_member(com_index)
self.group_dict.update({resname: 0})
else:
self.members.append([com_index, resname, resid])
addgroup = True
group_ind = 0
for rn in self.groups:
if resname == rn.lg_name:
addgroup = False
break
group_ind+=1
if addgroup:
self.groups.append(LipidGroup(resname))
ng = len(self.groups)
self.groups[ng-1].add_member(com_index)
self.group_dict.update({resname: ng-1})
else:
self.groups[group_ind].add_member(com_index)
#self.members=sorted(self.members,key=lambda self.members:self.members[1])
return
def get_group_indices(self, group_name):
""" Get the indices of lipids in the Leaflet belonging to a specific LipidGroup.
Args:
group_name (string): The name of the LipidGroup pull LipidCOM indices from.
Passing the string 'all' will return indices of all the lipids assigned to
the Leaflet instance. If the group_name is not recognised (i.e. is not in the group_dict)
The function defaults to 'all'.
Returns:
list of int: A list containing the integer indices of lipids in the Leaflet that
belong to the specified LipidGroup.
"""
indices = []
if group_name == "all":
return self.get_member_indices()
elif group_name in self.group_dict:
gindex = self.group_dict[group_name]
indices = self.groups[gindex].lg_members
else:
#unkwown group name- print warning and use the default "all"
print("!! Warning - request for unknown Lipid Group \'",group_name,"\' from the ",self.name," leaflet")
print("!! using the default \"all\"")
return self.get_member_indices()
return list(indices)
def get_group_indices_per_resid(self, group_name):
""" Get the indices of lipids in the Leaflet belonging to a specific LipidGroup.
Args:
group_name (string): The name of the LipidGroup pull LipidCOM indices from.
Passing the string 'all' will return indices of all the lipids assigned to
the Leaflet instance. If the group_name is not recognised (i.e. is not in the group_dict)
The function defaults to 'all'.
Returns:
list of int: A list containing the integer indices of lipids in the Leaflet that
belong to the specified LipidGroup.
"""
ret_indices = {}
if group_name == "all":
indices = self.get_member_indices()
elif group_name in self.group_dict:
gindex = self.group_dict[group_name]
indices = self.groups[gindex].lg_members
else:
#unkwown group name- print warning and use the default "all"
print("!! Warning - request for unknown Lipid Group \'",group_name,"\' from the ",self.name," leaflet")
print("!! using the default \"all\"")
return self.get_member_indices()
for i in indices:
resid = self.get_member_resid_from_index(i)
if resid not in ret_indices.keys():
ret_indices[resid] = [i]
else:
ret_indices[resid].append(i)
return ret_indices
def get_member_indices(self):
""" Get the indices of all lipids (LipidCOM) in the Leaflet.
This member function Returns: the list of indices for the lipids grouped in the Leaflet instance.
Returns:
list of int: A list of integer indices of the lipids associated with the Leaflet instance.
"""
indices = []
for element in self.members:
indices.append(element[0])
return list(indices)
def get_member_resids(self):
""" Get the 'resid's of all lipids in the Leaflet.
This member function Returns: the list of resid for the lipids grouped in the Leaflet instance.
Returns:
list of int: A list of integer 'resid's of the lipids associated with the Leaflet instance.
"""
return [element[2] for element in self.members]
def get_member_resnames(self):
""" Get the 'resname's of all lipids in the Leaflet.
This member function Returns: the list of resnames for the lipids grouped in the Leaflet instance.
Returns:
list of int: A list of 'resname's of the lipids associated with the Leaflet instance.
"""
return [element[1] for element in self.members]
def get_member_resname_from_resid(self, resid):
resids = self.get_member_resids()
if resid in resids:
index = resids.index(resid)
return self.get_member_resnames()[index]
else:
raise ValueError('resid is not in this leaflet')
def get_member_resid_from_index(self, index):
indices = self.get_member_indices()
if index in indices:
ind = indices.index(index)
return self.get_member_resids()[ind]
else:
raise ValueError('resid is not in this leaflet')
def has_group(self, group_name):
""" Check if there is a LipidGroup with the specified name.
Args:
group_name (str): The name to checked against the names of existing LipidGroup objects.
Returns:
bool: True if there is a LipidGroup with name group_name, and False otherwise.
"""
if group_name == 'all':
return True
return group_name in list(self.group_dict.keys())
def num_groups(self):
""" Get the number of LipidGroups in the Leaflet.
Returns:
int: The number of unique LipidGroups.
"""
return len(self.groups)
def get_group_names(self):
""" Get the names of all the LipidGroup objects in the Leaflet
Returns:
list of str: A list of the names of current LipidGroup objects.
"""
return [group.lg_name for group in self.groups]
class LipidGroup(object):
""" Object to group lipid indices by type/resname/name.
Instances of this object are created by the Leaflet class.
"""
def __init__(self, name):
""" Initializes LipidGroup object.
Args:
name (str): The name/type/resname of the lipids being grouped in this object.
Attributes:
lg_members (list of int): A list to hold the indices of lipids added to this
this LipidGroup.
lg_name (str): The name/type/resname of the lipids being grouped in this object.
"""
#initialize a list to hold the member indices
self.lg_members = []
# the name of this lipid group
self.lg_name = name
return
def add_member(self, new_mem):
""" Add lipid index to to the LipidGroup.
Args:
new_mem (int): The index of the lipid being added to this LipidGroup.
"""
self.lg_members.append(new_mem)
return
def name(self):
""" Get the name associated with this LipidGroup.
Returns:
str: The name of the lipid group (i.e. lg_name)
"""
return self.lg_name
#@classmethod
#def leaflet_from_mda_frame
| 39.5 | 135 | 0.625558 | 1,427 | 10,744 | 4.608269 | 0.131044 | 0.0441 | 0.023723 | 0.010645 | 0.519465 | 0.448145 | 0.411496 | 0.395377 | 0.368461 | 0.349148 | 0 | 0.001469 | 0.302867 | 10,744 | 271 | 136 | 39.645756 | 0.876502 | 0.493206 | 0 | 0.378378 | 0 | 0 | 0.080504 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162162 | false | 0 | 0.036036 | 0.018018 | 0.414414 | 0.045045 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ba70e30ea60ba73100e502e98b6715cae9406e | 11,797 | py | Python | train_dqn.py | jamqd/EE239AS | d1e45d8878ac61e0b6af38d6ce24b9d3a87fa285 | [
"MIT"
] | 2 | 2020-08-24T08:09:39.000Z | 2020-08-31T11:42:12.000Z | train_dqn.py | jamqd/EE239AS | d1e45d8878ac61e0b6af38d6ce24b9d3a87fa285 | [
"MIT"
] | null | null | null | train_dqn.py | jamqd/EE239AS | d1e45d8878ac61e0b6af38d6ce24b9d3a87fa285 | [
"MIT"
] | null | null | null | import torch
from torch import optim
import torch.nn.functional as F
from dqn import DQN
import run
import gym
import numpy as np
from trajectory_dataset import TrajectoryDataset
from torch.utils.tensorboard import SummaryWriter
from run import collect_trajectories
import os
import datetime
import qvalues
import random
import constants
def compute_loss(s, a, r, s_prime, done, dqn, discount_factor, dqn_prime=None):
"""
param:
s : (N, |S|)
a : batch of of actions (N,)
r : batch of rewards (N,)
s_prime : (N, |S|)
q_
return:
a scalar value representing the loss
"""
N = len(s)
q = dqn.forward(s)[torch.arange(N), a.long()]
if dqn_prime: # using ddqn and target network
bootstrap = dqn_prime.forward(s_prime)[torch.arange(N), dqn.forward_best_actions(s_prime)[0]]
else:
bootstrap = dqn.forward_best_actions(s_prime)[1]
target = discount_factor * bootstrap
done_mask = done < 0.5
target *= done_mask
target += r
target = target.detach() # do not propogate graadients through targets
return F.mse_loss(q, target.float())
def train(
learning_rate=constants.LEARNING_RATE,
discount_factor=0.99,
env_name="LunarLander-v2",
iterations=50000,
episodes_per_iteration=100,
use_ddqn=False,
batch_size=32,
n_threads=1,
copy_params_every=100,
save_model_every=100,
max_replay_history=500000,
freq_report_log=5,
online=True,
epsilon=0.995,
render=False,
eval_episodes=16,
gd_optimizer="RMSprop",
num_episodes=50000,
decay = None
):
"""
param:
learning_rate:
return:
None
"""
params = locals()
for param in params:
print(f"Using {param}={params[param]}")
ident_string = datetime.datetime.now().strftime("%Y_%m_%d_%H.%M.%S.%f")
if not os.path.isdir("./models/"):
os.mkdir("./models/")
os.mkdir("./models/{}/".format(ident_string))
if not os.path.isdir("./meta_text/"):
os.mkdir("./meta_text/")
if not os.path.isdir("./metrics/"):
os.mkdir("./metrics/")
with open(f"./meta_text/{ident_string}.txt", "w+") as text_file:
for param in params:
text_file.write(f"{param}={params[param]}\n")
env = gym.make(env_name)
if not isinstance(env.action_space, gym.spaces.discrete.Discrete):
print("Action space for env {} is not discrete".format(env_name))
raise ValueError
print("Using env: {}".format(env_name))
action_space_dim = env.action_space.n
obs_space_dim = np.prod(env.observation_space.shape)
print("Action space dimension: {}".format(action_space_dim))
print("Observation space dimension {}".format(obs_space_dim))
# initializes deep Q network
dqn = DQN(obs_space_dim, action_space_dim)
if torch.cuda.is_available():
print("DQN on GPU")
dqn = dqn.cuda()
dqn_prime=None
if use_ddqn:
print("Using DDQN")
dqn_prime = DQN(obs_space_dim, action_space_dim)
if torch.cuda.is_available():
print("DQN Prime on GPU")
dqn_prime = dqn_prime.cuda()
if gd_optimizer == "Adam":
optimizer = optim.Adam(dqn.parameters(), lr=learning_rate)
elif gd_optimizer == "SGD":
optimizer = optim.SGD(dqn.parameters(), lr=learning_rate)
elif gd_optimizer == "RMSprop":
optimizer = optim.RMSprop(dqn.parameters(), lr=learning_rate)
else:
print("Invalid gd_optimizer: {}".format(gd_optimizer))
raise ValueError
summary_writer = SummaryWriter(log_dir=f'./runs/{ident_string}')
# gradient step every time a transition is collected
epsilon_use = epsilon
if online:
# initialize dataset
observation = env.reset()
action = env.action_space.sample()
observation_, reward, done, info = env.step(action)
terminal = 1 if done else 0
replay = [observation, action, reward, observation_, terminal]
dataset = TrajectoryDataset(replay, max_replay_history=max_replay_history)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=n_threads,
sampler=torch.utils.data.RandomSampler(dataset),
)
dataset.add_transition(replay)
dataset.flush()
metrics = []
# go through episodes
for i_episode in range(num_episodes):
if torch.cuda.is_available():
print("Episode {}, Transitions {}, MemAlloc {}".format(i_episode, len(dataset), torch.cuda.memory_allocated()))
else:
print("Episode {}, Transitions {}".format(i_episode, len(dataset)))
observation = env.reset()
total_reward = 0
if decay is not None:
epsilon_use = epsilon * np.power(decay, i_episode)
if use_ddqn and i_episode % copy_params_every == 0:
print("Copying dqn to dqn_prime")
dqn_prime.load_state_dict(dqn.state_dict())
while True: # repeat
if render:
env.render()
# selecting an action
if dqn and random.random() > epsilon_use:
action = torch.squeeze(dqn.forward_best_actions([observation])[0]).item()
else:
action = env.action_space.sample() # random sample of action space
# carry out action, observe new reward and state
observation_, reward, done, info = env.step(action)
total_reward += reward
# store experience in replay memory
terminal = 1 if done else 0
dataset.add_transition([observation, action, reward, observation_, terminal])
# sample random transition from replay memory
sarsd = next(iter(dataloader))
s, a, r, s_prime, done = unpack_dataloader_sarsd(sarsd, obs_space_dim)
if torch.cuda.is_available():
s = s.cuda()
a = a.cuda()
r = r.cuda()
s_prime = s_prime.cuda()
done = done.cuda()
loss = compute_loss(s, a, r, s_prime, done, dqn, discount_factor, dqn_prime)
optimizer.zero_grad()
loss.backward()
optimizer.step() # does the gradient update, loss computed update
# change current state
observation = observation_
if terminal:
break
dataset.flush()
summary_writer.add_scalar("RealReward", total_reward, i_episode)
# log evaluation metrics
if i_episode % freq_report_log == 0:
undiscounted_avg_reward, q_difference, avg_q = log_evaluate(env, dqn, eval_episodes, summary_writer, i_episode)
metrics.append([i_episode, undiscounted_avg_reward, q_difference, avg_q.cpu(), total_reward])
np.save("./metrics/" + ident_string + ".npy", np.array(metrics))
if i_episode % save_model_every == 0:
torch.save(dqn, "./models/{}/dqn_{}.pt".format(ident_string, i_episode))
env.close()
return
# collect trajectories with random policy
init_trajectories = collect_trajectories(env, episodes_per_iteration, sarsa=False, dqn=dqn)
dataset = TrajectoryDataset(init_trajectories, max_replay_history=max_replay_history, online=False)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=n_threads,
sampler=torch.utils.data.RandomSampler(dataset),
)
metrics = []
for i in range(iterations):
if torch.cuda.is_available():
print("Iteration {}, Transitions {}, MemAlloc {}".format(i, len(dataset), torch.cuda.memory_allocated()))
else:
print("Iteration {}, Transitions {}".format(i, len(dataset)))
if use_ddqn and i % copy_params_every == 0:
print("Copying dqn to dqn_prime")
dqn_prime.load_state_dict(dqn.state_dict())
# fitted Q-iteration
sarsd = next(iter(dataloader))
s, a, r, s_prime, done = unpack_dataloader_sarsd(sarsd, obs_space_dim)
if torch.cuda.is_available():
s = s.cuda()
a = a.cuda()
r = r.cuda()
s_prime = s_prime.cuda()
done = done.cuda()
loss = compute_loss(s, a, r, s_prime, done, dqn, discount_factor, dqn_prime)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# collect trajectories
if decay is not None:
epsilon_use = epsilon * np.power(decay, i)
trajectories = collect_trajectories(env, episodes_per_iteration, sarsa=False, dqn=dqn, epsilon=epsilon_use)
dataset.add(trajectories)
# log evaluation metrics
if i % freq_report_log == 0:
undiscounted_avg_reward, q_difference, avg_q = log_evaluate(env, dqn, eval_episodes, summary_writer, i)
metrics.append([i, undiscounted_avg_reward, q_difference, avg_q])
np.save("./metrics/" + ident_string + ".npy", np.array(metrics))
if i% save_model_every == 0:
torch.save(dqn, "./models/{}/dqn_{}.pt".format(ident_string, i))
env.close()
def unpack_dataloader_sarsd(sarsd, obs_space_dim):
N = len(sarsd)
s = sarsd[:, :obs_space_dim]
s = torch.reshape(s, (N, obs_space_dim))
a = sarsd[:, obs_space_dim:obs_space_dim + 1]
a = torch.reshape(a, (N,))
r = sarsd[:, obs_space_dim + 1 : obs_space_dim + 1 + 1]
r = torch.reshape(r, (N,))
s_prime = sarsd[:, obs_space_dim + 1 + 1: obs_space_dim + 1 + 1 + obs_space_dim]
s_prime = torch.reshape(s_prime, (N, obs_space_dim))
done = sarsd[:, obs_space_dim + 1 + 1 + obs_space_dim: obs_space_dim + 1 + 1 + obs_space_dim + 1]
done = torch.reshape(done, (N,))
return s, a, r, s_prime, done
def log_evaluate(env, dqn, num_episodes, summary_writer, iteration):
with torch.no_grad():
trajectories = collect_trajectories(env=env, episodes=num_episodes, dqn=dqn)
# average reward per trajectory
undiscounted_avg_reward = sum([sarsa[2] for traj in trajectories for sarsa in traj])/len(trajectories)
summary_writer.add_scalar("AvgReward", undiscounted_avg_reward, iteration)
# average difference between empirical q and q from network
q_difference = q_diff(dqn, trajectories)
summary_writer.add_scalar("QDiff", q_difference, iteration)
#average q value
#run the environment randomly, get the list of states
trajectories_random = collect_trajectories(env=env, episodes=num_episodes)
s = [sarsa[0] for traj in trajectories for sarsa in traj]
#q network on states
a, q = dqn.forward_best_actions(s)
avg_q = sum(q) / len(q)
summary_writer.add_scalar("AvgQ", avg_q, iteration)
return undiscounted_avg_reward, q_difference, avg_q
def q_diff(dqn, trajectories):
s = [sarsa[0] for traj in trajectories for sarsa in traj]
a = [sarsa[1] for traj in trajectories for sarsa in traj]
N = len(s)
q = dqn.forward(s).detach().cpu().numpy()[np.arange(N), a]
q_empirical = qvalues.cumulative_discounted_rewards(trajectories)
q_empirical = np.concatenate([q_t for q_t in q_empirical])
diff = q - q_empirical
return sum(diff) / (len(q))
| 36.981191 | 127 | 0.610325 | 1,474 | 11,797 | 4.675712 | 0.183175 | 0.029019 | 0.033517 | 0.018572 | 0.448491 | 0.386535 | 0.351857 | 0.298752 | 0.262188 | 0.25 | 0 | 0.008504 | 0.282275 | 11,797 | 318 | 128 | 37.097484 | 0.80548 | 0.080953 | 0 | 0.307359 | 0 | 0 | 0.063669 | 0.013125 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021645 | false | 0 | 0.064935 | 0 | 0.108225 | 0.064935 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ba74d0606b498a8e643910a20eeff2e8a3db5b | 4,105 | py | Python | tests/ClientServer/interop_tools/client_sc_renew.py | workerVA/S2OPC | 9a5b6008559501f46a4bc079beea2d6655b1bfe5 | [
"ECL-2.0",
"Apache-2.0"
] | 8 | 2018-09-28T16:03:55.000Z | 2021-09-23T09:07:10.000Z | tests/ClientServer/interop_tools/client_sc_renew.py | workerVA/S2OPC | 9a5b6008559501f46a4bc079beea2d6655b1bfe5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/ClientServer/interop_tools/client_sc_renew.py | workerVA/S2OPC | 9a5b6008559501f46a4bc079beea2d6655b1bfe5 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-28T08:32:27.000Z | 2020-04-28T08:32:27.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to Systerel under one or more contributor license
# agreements. See the NOTICE file distributed with this work
# for additional information regarding copyright ownership.
# Systerel licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Freeopcua based test client to validate the SOPC server.
Tests that the server renew the SecureChannel and revises the timeout correctly,
and does not accept messages after the specified timeout.
"""
from time import sleep
import re
import sys
import concurrent.futures
from opcua.ua import SecurityPolicy
from safety_secure_channels import secure_channels_connect
from common import sUri, create_client
from tap_logger import TapLogger
from opcua.crypto import security_policies
def secure_channel_renew_nominal(client, logger):
# Define renew time to 1 second
client.secure_channel_timeout=1000
# Renew with 1 second
client.open_secure_channel(renew=True)
print('Open Secure Channel renewed')
# Check revised time
logger.add_test('OPN renew test - renewed with given timeout value', client.secure_channel_timeout == 1000)
# Read a node to be sure we are using the new security token
nid_index = 1001
nid = u"ns=1;i={}".format(nid_index)
node = client.get_node(nid)
value = node.get_value()
print(' Value for Node {}:'.format(nid), value)
print(' Error expected on next read:')
def secure_channel_renew_test_read_failure(client, logger):
# Define renew time to 1 second
client.secure_channel_timeout=1000
# Renew with 1 second
client.open_secure_channel(renew=True)
print('Open Secure Channel renewed')
# Check revised time
logger.add_test('OPN renew test - renewed with given timeout value', client.secure_channel_timeout == 1000)
# Change revised time to avoid client to renew the security token in time
client.secure_channel_timeout=10000
# Read a node to be sure we are using the new security token
nid_index = 1001
nid = u"ns=1;i={}".format(nid_index)
node = client.get_node(nid)
value = node.get_value()
print(' Value for Node {}:'.format(nid), value)
# Wait timeout of the security token
sleep(2)
print(' Error expected on next read:')
# Try to read a node again
try:
node = client.get_node(nid)
value = node.get_value()
except:
logger.add_test('OPN renew test - read refused after timeout', True)
else:
logger.add_test('OPN renew test - read refused after timeout', False)
if __name__=='__main__':
# tests with one connexion
print('Connecting to', sUri)
client = create_client()
logger = TapLogger("sc_renew.tap")
# tests of SC renew with degraded cases
headerString = "******************* Beginning {0} test of degraded SC renew *********************"
for sp in [SecurityPolicy, security_policies.SecurityPolicyBasic256]:
logger.begin_section("security policy {0}".format(re.split("#",sp.URI)[-1]))
# secure channel connection
print(headerString.format(re.split("#",sp.URI)[-1]))
try:
secure_channels_connect(client, sp)
for i in range(0,1):
secure_channel_renew_nominal(client, logger)
secure_channel_renew_test_read_failure(client, logger)
finally:
try:
client.disconnect()
except (concurrent.futures.TimeoutError, TimeoutError, OSError):
pass
logger.finalize_report()
sys.exit(1 if logger.has_failed_tests else 0)
| 37.318182 | 111 | 0.703045 | 571 | 4,105 | 4.935201 | 0.345009 | 0.064585 | 0.038325 | 0.046132 | 0.407026 | 0.407026 | 0.351668 | 0.351668 | 0.323989 | 0.310859 | 0 | 0.016273 | 0.206577 | 4,105 | 109 | 112 | 37.66055 | 0.848941 | 0.350548 | 0 | 0.416667 | 0 | 0 | 0.185383 | 0.007994 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.016667 | 0.15 | 0 | 0.183333 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80bc2884ecba206ed21a7fb62256701a985367e5 | 832 | py | Python | roblox/promotionchannels.py | speer-kinjo/ro.py | 2d5b80aec8fd143b11101fbbfdf3b557f798a27f | [
"MIT"
] | 28 | 2021-11-04T11:13:38.000Z | 2022-03-11T05:00:16.000Z | roblox/promotionchannels.py | speer-kinjo/ro.py | 2d5b80aec8fd143b11101fbbfdf3b557f798a27f | [
"MIT"
] | 12 | 2021-11-24T06:25:24.000Z | 2022-03-18T14:37:01.000Z | roblox/promotionchannels.py | speer-kinjo/ro.py | 2d5b80aec8fd143b11101fbbfdf3b557f798a27f | [
"MIT"
] | 21 | 2021-10-20T16:36:55.000Z | 2022-03-27T21:43:53.000Z | """
This module contains classes intended to parse and deal with data from Roblox promotion channel endpoints.
"""
from typing import Optional
class UserPromotionChannels:
"""
Represents a user's promotion channels.
Attributes:
facebook: A link to the user's Facebook profile.
twitter: A Twitter handle.
youtube: A link to the user's YouTube channel.
twitch: A link to the user's Twitch channel.
"""
def __init__(self, data: dict):
self.facebook: Optional[str] = data["facebook"]
self.twitter: Optional[str] = data["twitter"]
self.youtube: Optional[str] = data["youtube"]
self.twitch: Optional[str] = data["twitch"]
self.guilded: Optional[str] = data["guilded"]
def __repr__(self):
return f"<{self.__class__.__name__}>"
| 27.733333 | 106 | 0.652644 | 103 | 832 | 5.116505 | 0.436893 | 0.104364 | 0.142315 | 0.056926 | 0.085389 | 0.085389 | 0 | 0 | 0 | 0 | 0 | 0 | 0.240385 | 832 | 29 | 107 | 28.689655 | 0.833861 | 0.41226 | 0 | 0 | 0 | 0 | 0.13964 | 0.060811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0.1 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80bcc9aff7166b320e85d3dea2785530a717ef20 | 1,256 | py | Python | training/Toxic_CNN1_MCD.py | jsandersen/CMT | 1be6e36b9a6042386395bc654c9dd4b579e6ce6d | [
"Apache-2.0"
] | null | null | null | training/Toxic_CNN1_MCD.py | jsandersen/CMT | 1be6e36b9a6042386395bc654c9dd4b579e6ce6d | [
"Apache-2.0"
] | null | null | null | training/Toxic_CNN1_MCD.py | jsandersen/CMT | 1be6e36b9a6042386395bc654c9dd4b579e6ce6d | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
from src.datasets.toxic import Toxic
from src.models.cnn1 import getCNN1
from src.models.predict import predict_mcdropout
import tensorflow as tf
def build():
# config
RANDOM_STATE = 1
VOCAB_SIZE = 20000
MAX_SEQUENCE_LENGTH = 500
NUM_SPLITS = 5
SPLIT_SIZE = 10000
BATCH_SIZE= 100
EMBEDDING_DIM = 100
NUM_EPOCHS = 100
# get data
print('load data ...')
toxic = Toxic(clean=True)
X_train, y_train, X_test, y_test, X_val, y_val = toxic.getRankedDataSplits(
vocab_size=VOCAB_SIZE,
max_sequence_length=MAX_SEQUENCE_LENGTH,
n_splits=NUM_SPLITS,
test_size=SPLIT_SIZE,
random_state=RANDOM_STATE
)
# training
models_n = []
print('train ...')
for i in range(NUM_SPLITS):
model = tf.keras.models.load_model(f'models/toxic/CNN1_BL_{i}')
models_n.append(model)
# predict
print('predict ...')
dfs = [predict_mcdropout(models_n[i], X_val, y_val) for i in range(NUM_SPLITS)]
# save
print('save predict ...')
name = 'CNN1_MCD'
i = 0
for df in dfs:
df.to_pickle(f"pickle/toxic/df_{name}_{i}.pkl")
i = i+1 | 22.035088 | 83 | 0.636146 | 178 | 1,256 | 4.235955 | 0.410112 | 0.047745 | 0.067639 | 0.05305 | 0.05305 | 0.05305 | 0 | 0 | 0 | 0 | 0 | 0.03452 | 0.261943 | 1,256 | 57 | 84 | 22.035088 | 0.778857 | 0.030255 | 0 | 0.054054 | 0 | 0 | 0.091509 | 0.044518 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.135135 | 0 | 0.162162 | 0.108108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80be7ef53052c878fdc38ea2f892c9d1a8f45ee3 | 1,691 | py | Python | pal.py | pfreese/py_test | bf1cb713d63259c8b6db666924b69bd101b55674 | [
"MIT"
] | null | null | null | pal.py | pfreese/py_test | bf1cb713d63259c8b6db666924b69bd101b55674 | [
"MIT"
] | null | null | null | pal.py | pfreese/py_test | bf1cb713d63259c8b6db666924b69bd101b55674 | [
"MIT"
] | null | null | null |
def isPalindrome(r):
rL = len(r)
rHalf = rL // 2
for i in range(rHalf):
if r[i] != r[rL - i - 1]:
return False
return True
def longestPalindrome(s):
sLen = len(s)
if sLen < 2:
return s
if sLen == 2:
if isPalindrome(s):
return s
else:
return s[0]
palindromesTs = []
for l in range(2, 4):
palindromesTs += [(i, l) for i in range(sLen - l + 1) if isPalindrome(s[i:(i+l)])]
if len(palindromesTs) == 0:
return s[0]
print(palindromesTs)
def expandIfPossible(T):
startingIdx = T[0]
palLen = T[1]
beforeIdx = startingIdx - 1
afterIdx = startingIdx + palLen
if (beforeIdx >= 0) and (afterIdx < sLen) and (s[beforeIdx] == s[afterIdx]):
return (startingIdx - 1, palLen + 2)
else:
return None
def expandPalindromesTs(pt):
expanded = [expandIfPossible(T) for T in pt]
return [e for e in expanded if e is not None]
def maxPalLen(pt):
if len(pt) == 0:
return 0
return max([T[1] for T in pt])
expandedPalindromes = expandPalindromesTs(palindromesTs)
while maxPalLen(expandedPalindromes) > maxPalLen(palindromesTs):
palindromesTs = expandedPalindromes
expandedPalindromes = expandPalindromesTs(palindromesTs)
allPalindromes = palindromesTs + expandedPalindromes
# Get the max len.
filtMaxPalLen = [T for T in allPalindromes if T[1] == maxPalLen(allPalindromes)]
startIdx = filtMaxPalLen[0][0]
palLen = filtMaxPalLen[0][1]
return s[startIdx:(startIdx + palLen)]
print(longestPalindrome("aaaa"))
| 28.183333 | 90 | 0.591957 | 199 | 1,691 | 5.030151 | 0.246231 | 0.034965 | 0.017982 | 0.021978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020322 | 0.301597 | 1,691 | 59 | 91 | 28.661017 | 0.827265 | 0.009462 | 0 | 0.166667 | 0 | 0 | 0.002392 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104167 | false | 0 | 0 | 0 | 0.354167 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80bece861972253f223e4ca2fe5fcdbcc32983b7 | 8,437 | py | Python | ignite/contrib/handlers/time_profilers.py | Patil2099/ignite | 5d01c306150345e081b41b9b623bd04a3f599448 | [
"BSD-3-Clause"
] | null | null | null | ignite/contrib/handlers/time_profilers.py | Patil2099/ignite | 5d01c306150345e081b41b9b623bd04a3f599448 | [
"BSD-3-Clause"
] | null | null | null | ignite/contrib/handlers/time_profilers.py | Patil2099/ignite | 5d01c306150345e081b41b9b623bd04a3f599448 | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict
import torch
from ignite.engine import Engine, Events
from ignite.handlers import Timer
class BasicTimeProfiler(object):
def __init__(self):
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
def _reset(self, num_epochs, total_num_iters):
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters)
}
def _as_first_started(self, engine):
num_iters = engine.state.max_epochs * len(engine.state.dataloader)
self._reset(engine.state.max_epochs, num_iters)
self.event_handlers_names = {
e: [h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]]
for e in Events if e != Events.EXCEPTION_RAISED
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (), {}))
# - add the first handlers
events = [Events.EPOCH_STARTED, Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED, Events.ITERATION_COMPLETED,
Events.COMPLETED]
fmethods = [self._as_first_epoch_started, self._as_first_epoch_completed,
self._as_first_iter_started, self._as_first_iter_completed,
self._as_first_completed]
lmethods = [self._as_last_epoch_started, self._as_last_epoch_completed,
self._as_last_iter_started, self._as_last_iter_completed,
self._as_last_completed]
for e, m in zip(events, fmethods):
engine._event_handlers[e].insert(0, (m, (), {}))
for e, m in zip(events, lmethods):
engine._event_handlers[e].append((m, (), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine):
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine):
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine):
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine):
t = self._dataflow_timer.value()
i = engine.state.iteration - 1
self.dataflow_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine):
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine):
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine):
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
self._dataflow_timer.reset()
def _as_first_epoch_completed(self, engine):
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine):
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine):
self._event_handlers_timer.reset()
def _as_last_completed(self, engine):
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
Engine.remove_event_handler(self._as_last_started, engine, Events.STARTED)
# - add the first handlers
events = [Events.EPOCH_STARTED, Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED, Events.ITERATION_COMPLETED,
Events.COMPLETED]
fmethods = [self._as_first_epoch_started, self._as_first_epoch_completed,
self._as_first_iter_started, self._as_first_iter_completed,
self._as_first_completed]
lmethods = [self._as_last_epoch_started, self._as_last_epoch_completed,
self._as_last_iter_started, self._as_last_iter_completed,
self._as_last_completed]
for e, m in zip(events, fmethods):
Engine.remove_event_handler(self, m, e)
for e, m in zip(events, lmethods):
Engine.remove_event_handler(self, m, e)
def attach(self, engine):
if not isinstance(engine, Engine):
raise TypeError("Argument engine should be ignite.engine.Engine, "
"but given {}".format(type(engine)))
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (), {}))
@staticmethod
def _compute_basic_stats(data):
return OrderedDict([
('min/index', (torch.min(data).item(), torch.argmin(data).item())),
('max/index', (torch.max(data).item(), torch.argmax(data).item())),
('mean', torch.mean(data).item()),
('std', torch.std(data).item()),
('total', torch.sum(data).item())
])
def get_results(self):
total_eh_time = sum([sum(self.event_handlers_times[e]) for e in Events if e != Events.EXCEPTION_RAISED])
return OrderedDict([
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats",
dict([(str(e).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events if e != Events.EXCEPTION_RAISED] + [("total_time", total_eh_time)])
),
("event_handlers_names", {str(e).replace(".", "_") + "_names": v
for e, v in self.event_handlers_names.items()})
])
@staticmethod
def print_results(results):
def odict_to_str(d):
out = ""
for k, v in d.items():
out += "\t{}: {}\n".format(k, v)
return out
others = {k: odict_to_str(v) if isinstance(v, OrderedDict) else v
for k, v in results['event_handlers_stats'].items()}
others.update(results['event_handlers_names'])
output_message = """
--------------------------------------------
- Time profiling results:
--------------------------------------------
Processing function time stats (in seconds):
{processing_stats}
Dataflow time stats (in seconds):
{dataflow_stats}
Time stats of event handlers (in seconds):
- Total time spent:
\t{total_time}
- Events.STARTED:
{Events_STARTED}
Handlers names:
{Events_STARTED_names}
- Events.EPOCH_STARTED:
{Events_EPOCH_STARTED}
Handlers names:
{Events_EPOCH_STARTED_names}
- Events.ITERATION_STARTED:
{Events_ITERATION_STARTED}
Handlers names:
{Events_ITERATION_STARTED_names}
- Events.ITERATION_COMPLETED:
{Events_ITERATION_COMPLETED}
Handlers names:
{Events_ITERATION_COMPLETED_names}
- Events.EPOCH_COMPLETED:
{Events_EPOCH_COMPLETED}
Handlers names:
{Events_EPOCH_COMPLETED_names}
- Events.COMPLETED:
{Events_COMPLETED}
Handlers names:
{Events_COMPLETED_names}
""".format(processing_stats=odict_to_str(results['processing_stats']),
dataflow_stats=odict_to_str(results['dataflow_stats']),
**others)
print(output_message)
return output_message
@staticmethod
def write_results(output_path):
try:
import pandas as pd
except ImportError:
print("Need pandas to write results as files")
return
raise NotImplementedError("")
| 35.154167 | 112 | 0.644779 | 1,009 | 8,437 | 5.021804 | 0.132805 | 0.087231 | 0.080521 | 0.056444 | 0.523584 | 0.452339 | 0.402802 | 0.3657 | 0.319518 | 0.313203 | 0 | 0.001872 | 0.24037 | 8,437 | 239 | 113 | 35.301255 | 0.788735 | 0.012919 | 0 | 0.314917 | 0 | 0 | 0.142634 | 0.057078 | 0 | 0 | 0 | 0 | 0 | 1 | 0.110497 | false | 0 | 0.033149 | 0.005525 | 0.176796 | 0.016575 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80bfb4b7a7a0c88d0a0cf52ced86fbe80cb85e15 | 4,678 | py | Python | hopla/tests/test_converter.py | AGrigis/hopla | 60147969267b8bf71aec774053d33fa797e2f668 | [
"CECILL-B"
] | null | null | null | hopla/tests/test_converter.py | AGrigis/hopla | 60147969267b8bf71aec774053d33fa797e2f668 | [
"CECILL-B"
] | null | null | null | hopla/tests/test_converter.py | AGrigis/hopla | 60147969267b8bf71aec774053d33fa797e2f668 | [
"CECILL-B"
] | null | null | null | #! /usr/bin/env python
##########################################################################
# Hopla - Copyright (C) AGrigis, 2015
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
import unittest
import os
import sys
# COMPATIBILITY: since python 3.3 mock is included in unittest module
python_version = sys.version_info
if python_version[:2] <= (3, 3):
import mock
else:
import unittest.mock as mock
# Hopla import
# Apparently the 'hopla' modules must be imported after coverage is started.
from hopla.converter import hopla
import hopla as root
class TestConverterHopla(unittest.TestCase):
""" Test the converter module.
"""
def setUp(self):
""" Define some parameters.
"""
self.demodir = os.path.abspath(os.path.dirname(root.__file__))
self.script = os.path.join(self.demodir, "demo", "my_ls_script.py")
def test_notlistiter_raises(self):
""" Not a list for an iterative kwargs -> raise ValueError.
"""
self.assertRaises(
ValueError, hopla, self.script, d=["dir1", "dir2"], l=[2, 3],
fbreak=False, verbose=0, hopla_iterative_kwargs=["d", "verbose"])
def test_notitersamelength_raises(self):
""" Not iterative kwargs of same length -> raise ValueError.
"""
self.assertRaises(
ValueError, hopla, self.script, d=["dir1", "dir2"], l=[2, 3],
fbreak=False, verbose=[0, 1, 0],
hopla_iterative_kwargs=["d", "verbose"])
@mock.patch("hopla.converter.scheduler")
def test_normal_execution(self, mock_scheduler):
""" Test normal execution.
"""
# Local execution
for fbreak in (True, False):
hopla(self.script, d=["dir1"], l=[2, 3], fbreak=fbreak,
verbose=[0], hopla_iterative_kwargs=["d", "verbose"],
hopla_optional=["fbreak", "verbose"])
generated_commands = mock_scheduler.call_args_list[-1][1][
"commands"]
expected_commands = [
[self.script, "-d", "dir1", "--verbose", "0", "-l", "2", "3"]]
if fbreak:
expected_commands[0].insert(5, "--fbreak")
self.assertEqual(sorted(generated_commands),
sorted(expected_commands))
for optional in (None, "some_string"):
hopla(self.script, d=["dir1"], l=[2, 3], o=optional,
verbose=[0], hopla_iterative_kwargs=["d", "verbose"],
hopla_optional=["fbreak", "verbose"])
generated_commands = mock_scheduler.call_args_list[-1][1][
"commands"]
expected_commands = [
[self.script, "-d", "dir1", "--verbose", "0", "-l", "2", "3"]]
if optional is not None:
expected_commands[0].extend(["-o", optional])
self.assertEqual(sorted(generated_commands),
sorted(expected_commands))
# Local execution with boolean iter
for fbreak in (True, False):
hopla(self.script, d=["dir1"], l=[2, 3], fbreak=[fbreak],
verbose=0, hopla_iterative_kwargs=["d", "fbreak"],
hopla_optional=["fbreak", "verbose"])
# print(mock_scheduler.call_args_list[-1][1]["commands"])
generated_commands = mock_scheduler.call_args_list[-1][1][
"commands"]
expected_commands = [
[self.script, "-d", "dir1", "-l", "2", "3", "--verbose", "0"]]
if fbreak:
expected_commands[0].insert(3, "--fbreak")
self.assertEqual(generated_commands, expected_commands)
for optional in (None, "some_string"):
hopla(self.script, d=["dir1"], l=[2, 3], o=[optional],
verbose=0, hopla_iterative_kwargs=["d", "o"],
hopla_optional=["fbreak", "verbose"])
generated_commands = mock_scheduler.call_args_list[-1][1][
"commands"]
expected_commands = [
[self.script, "-d", "dir1", "-l", "2", "3", "--verbose", "0"]]
if optional is not None:
expected_commands[0].insert(3, "-o")
expected_commands[0].insert(4, optional)
self.assertEqual(sorted(generated_commands),
sorted(expected_commands))
if __name__ == "__main__":
unittest.main()
| 42.144144 | 78 | 0.551945 | 512 | 4,678 | 4.886719 | 0.263672 | 0.083133 | 0.043965 | 0.059952 | 0.572342 | 0.569145 | 0.534373 | 0.523181 | 0.464428 | 0.410072 | 0 | 0.021578 | 0.276828 | 4,678 | 110 | 79 | 42.527273 | 0.718002 | 0.161394 | 0 | 0.533333 | 0 | 0 | 0.088692 | 0.006699 | 0 | 0 | 0 | 0 | 0.08 | 1 | 0.053333 | false | 0 | 0.093333 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80bff1f35b026d788a822b1166f2ed86dd9836a7 | 999 | py | Python | quartz_metadata/handlers/on_mint.py | dipdup-net/quartz-metadata | 78b90319359cbc641abdbbbfbf2fec59e601429b | [
"MIT"
] | null | null | null | quartz_metadata/handlers/on_mint.py | dipdup-net/quartz-metadata | 78b90319359cbc641abdbbbfbf2fec59e601429b | [
"MIT"
] | null | null | null | quartz_metadata/handlers/on_mint.py | dipdup-net/quartz-metadata | 78b90319359cbc641abdbbbfbf2fec59e601429b | [
"MIT"
] | null | null | null | from dipdup.context import HandlerContext
from dipdup.models import Transaction
from tortoise.exceptions import IntegrityError
from quartz_metadata.manager import ResolveMetadataTaskManager
from quartz_metadata.models import ResolveToken
from quartz_metadata.types.ubisoft_quartz_minter.parameter.mint import MintParameter
from quartz_metadata.types.ubisoft_quartz_minter.storage import (
UbisoftQuartzMinterStorage,
)
async def on_mint(
ctx: HandlerContext,
mint: Transaction[MintParameter, UbisoftQuartzMinterStorage],
) -> None:
contract = mint.data.target_address
token_id = mint.parameter.tokenid
token_metadata_uri = mint.storage.token_metadata_uri
try:
await ResolveToken.create(
network=ctx.datasource.network,
contract=contract,
token_id=token_id,
token_metadata_uri=token_metadata_uri,
)
except IntegrityError:
pass
await ResolveMetadataTaskManager.process_resolve_tasks(ctx)
| 31.21875 | 84 | 0.76977 | 106 | 999 | 7.037736 | 0.433962 | 0.053619 | 0.096515 | 0.061662 | 0.112601 | 0.112601 | 0.112601 | 0 | 0 | 0 | 0 | 0 | 0.176176 | 999 | 31 | 85 | 32.225806 | 0.90644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.038462 | 0.269231 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80c133884608c388783cc004a5ae950066a8bd8a | 1,161 | py | Python | app/gws/lib/ows/formats/get_feature_info_response.py | gbd-consult/gbd-websuite | 7212f41081c04614fdb4641e902d4de3424da8c5 | [
"Apache-2.0"
] | 3 | 2020-07-24T10:10:18.000Z | 2022-03-16T10:22:04.000Z | app/gws/lib/ows/formats/get_feature_info_response.py | gbd-consult/gbd-websuite | 7212f41081c04614fdb4641e902d4de3424da8c5 | [
"Apache-2.0"
] | 28 | 2020-03-03T17:35:58.000Z | 2021-07-12T12:05:47.000Z | app/gws/lib/ows/formats/get_feature_info_response.py | gbd-consult/gbd-websuite | 7212f41081c04614fdb4641e902d4de3424da8c5 | [
"Apache-2.0"
] | 1 | 2021-02-22T14:32:10.000Z | 2021-02-22T14:32:10.000Z | import gws.lib.feature
import gws.lib.shape
import gws.lib.xml2
# geoserver
#
# <GetFeatureInfoResponse>
# <Layer name="....">
# <Feature id="...">
# <Attribute name="..." value="..."/>
# <Attribute name="geometry" value="wkt"/>
def parse(text, first_el, crs=None, invert_axis=None, **kwargs):
if first_el.name.lower() != 'getfeatureinforesponse':
return None
el = gws.lib.xml2.from_string(text)
fs = []
for layer in el.all('Layer'):
for feature in layer.all('Feature'):
atts = {}
for e in feature.all('Attribute'):
name = e.attr('name')
value = e.attr('value')
if gws.as_str(value).lower() != 'null':
atts[name] = value
shape = None
if 'geometry' in atts:
shape = gws.lib.shape.from_wkt(atts.pop('geometry'), crs)
fs.append(gws.lib.feature.Feature(
uid=atts.get('uid') or feature.attr('id'),
category=layer.attr('name', ''),
shape=shape,
attributes=atts
))
return fs
| 27 | 73 | 0.511628 | 130 | 1,161 | 4.523077 | 0.361538 | 0.061224 | 0.061224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002581 | 0.332472 | 1,161 | 42 | 74 | 27.642857 | 0.756129 | 0.153316 | 0 | 0 | 0 | 0 | 0.083077 | 0.022564 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.115385 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80c2a4c56b25c8d0daa6417069d865c0369c616f | 1,287 | py | Python | imagefilter/imagefilter-rank.py | martinmcbride/python-imaging-book-examples | 37e4ccf9b7b2fc3ff75b1fdb9f772de452a843b2 | [
"MIT"
] | 1 | 2021-08-22T17:09:44.000Z | 2021-08-22T17:09:44.000Z | imagefilter/imagefilter-rank.py | sthagen/python-imaging-book-examples | 2a079c5271f9849bc90a33bed6f3288142035ea7 | [
"MIT"
] | null | null | null | imagefilter/imagefilter-rank.py | sthagen/python-imaging-book-examples | 2a079c5271f9849bc90a33bed6f3288142035ea7 | [
"MIT"
] | 1 | 2021-08-22T17:09:48.000Z | 2021-08-22T17:09:48.000Z | # Author: Martin McBride
# Created: 2021-05-23
# Copyright (C) 2021, Martin McBride
# License: MIT
# Use the ranking filters.
# Create a final image with all the filters.
from PIL import Image, ImageFilter, ImageDraw, ImageFont
image = Image.open('boat-small.jpg')
min_image = image.filter(ImageFilter.MinFilter())
max_image = image.filter(ImageFilter.MaxFilter())
median_image = image.filter(ImageFilter.MedianFilter())
mode_image = image.filter(ImageFilter.ModeFilter())
rank_image = image.filter(ImageFilter.RankFilter(3, 6))
# Place the images in a grid, with captions
output_image = Image.new('RGB', (1280, 640), 'white')
draw = ImageDraw.Draw(output_image)
font = ImageFont.truetype("Arial.ttf", 20)
x, y = 0, 0
draw.text((x+10, y+285), "Min", font=font, fill=0)
output_image.paste(min_image, (x, y))
x, y = 430, 0
draw.text((x+10, y+285), "Max", font=font, fill=0)
output_image.paste(max_image, (x, y))
x, y = 860, 0
draw.text((x+10, y+285), "Median", font=font, fill=0)
output_image.paste(median_image, (x, y))
x, y = 0, 320
draw.text((x+10, y+285), "Mode", font=font, fill=0)
output_image.paste(mode_image, (x, y))
x, y = 430, 320
draw.text((x+10, y+285), "Rank 10", font=font, fill=0)
output_image.paste(rank_image, (x, y))
output_image.save('imagefilter-rank.jpg')
| 29.25 | 56 | 0.706294 | 215 | 1,287 | 4.144186 | 0.339535 | 0.022447 | 0.089787 | 0.151515 | 0.304153 | 0.283951 | 0.257015 | 0 | 0 | 0 | 0 | 0.066313 | 0.121212 | 1,287 | 43 | 57 | 29.930233 | 0.721485 | 0.156177 | 0 | 0 | 0 | 0 | 0.068646 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80c334bb39045d805035eb994c6817b18dd7c10e | 4,974 | py | Python | prepare_training_dataset.py | Koziev/masked_np_language_model | b2173682adb77b424ffa192f3030d8c8e78e88e2 | [
"CC0-1.0"
] | null | null | null | prepare_training_dataset.py | Koziev/masked_np_language_model | b2173682adb77b424ffa192f3030d8c8e78e88e2 | [
"CC0-1.0"
] | 1 | 2022-03-04T14:48:02.000Z | 2022-03-04T15:21:37.000Z | prepare_training_dataset.py | Koziev/masked_np_language_model | b2173682adb77b424ffa192f3030d8c8e78e88e2 | [
"CC0-1.0"
] | null | null | null | """
Подготовка датасета для файнтюнинга ruT5 и ruGPT, чтобы модель могла подставлять NP в предложения.
Используется неразмеченный текст и синтаксический парсер UDPipe для выделения именных групп.
ATT: используются всякие локальные корпуса, которые я не выгружаю в общий доступ по разным соображениям.
Тем не менее, не вижу проблем с использованием любых других корпусов. См. функцию read_corpus1.
"""
import glob
import io
import os
import random
import pyconll
from ufal.udpipe import Model, Pipeline, ProcessingError
import extractors
def read_corpus1():
"""
Чтение параграфов из одного большого корпуса.
"""
with io.open('/home/inkoziev/corpora/Corpus/Raw/ru/text_blocks.txt', 'r', encoding='utf-8') as rdr:
for line in rdr:
# Возвращается абцаз из нескольких предложений, UDPipe будет сегментировать.
yield line.strip()
def read_corpora2():
"""
Чтение предложений из line-by-line корпусов в разных файлах
"""
fnames = []
dir1 = '/home/inkoziev/polygon/chatbot/data/SENTx'
for filename in glob.iglob(dir1 + '/*.txt'):
fnames.append(os.path.join(dir1, filename))
dir2 = '/home/inkoziev/polygon/chatbot/data'
for filename in ['facts5.txt', 'facts6.txt', 'facts7.txt', 'facts8.txt']:
fnames.append(os.path.join(dir2, filename))
sents = set()
# Добавим предпосылок из QA датасета чатбота
print('Loading pqa_all.dat')
with io.open('/home/inkoziev/polygon/chatbot/tmp/pqa_all.dat', 'r', encoding='utf-8') as rdr:
lines = []
for line in rdr:
s = line.strip()
if s:
lines.append(s)
else:
for premise in lines[:-2]:
sents.add(premise)
lines.clear()
for i, p in enumerate(fnames, start=1):
print('Loading {}/{} file="{}"...'.format(i, len(fnames), p))
with io.open(p, 'r', encoding='utf-8') as rdr:
for line in rdr:
sents.add(line.strip())
sents = sorted(sents, key=lambda z: random.random())
return sents
def read_debug_corpus():
return ['кошка хочет съесть мышку']
if __name__ == '__main__':
# Скачать готовую модель для UDPipe можно тут https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3131
model = Model.load('/home/inkoziev/polygon/GramEval2020/tmp/udpipe_syntagrus.model')
pipeline = Pipeline(model, 'tokenize', Pipeline.DEFAULT, Pipeline.DEFAULT, 'conllu')
udp_error = ProcessingError()
print('Start parsing...')
line_count = 0
sample_count = 0
with io.open('./data/t5_dataset.txt', 'w', encoding='utf-8') as wrt_t5, \
io.open('./data/gpt_dataset.txt', 'w', encoding='utf-8') as wrt_gpt:
#for line in read_corpus1():
for line in read_corpora2():
#for line in read_debug_corpus():
line_count += 1
if 0 == (line_count % 10000):
# Время от времени показываем прогресс.
print('{} lines, {} samples'.format(line_count, sample_count))
if sample_count >= 100000:
# Ограничиваем размер тренировочного датасета
break
# Выполняем синт. анализ очередного предложения
processed = pipeline.process(line, udp_error)
parsed_data = pyconll.load_from_string(processed)
for parsing in parsed_data:
if len(parsing) < 15: # берем предложения длиной не более 15 токенов
for c_type, c_tokens in extractors.extract_constituents(parsing):
if 1 < len(c_tokens) < 5: # слишком длинные составляющие пропускаем
# Собираем токены входного контекста, заменяя цепочку c_tokens на один <extra_id_0>
c_ids = [t.id for t in c_tokens]
input_tokens = []
for t in parsing:
if t.id == c_tokens[0].id:
# Первый токен в NP
input_tokens.append('<extra_id_0>')
elif t.id in c_ids:
# Второй и последующие токены в составляющей пропускаем
pass
else:
input_tokens.append(t.form)
input_text = ' '.join(input_tokens)
# сэмпл для T5
output_text = '<extra_id_0>' + ' '.join(t.form for t in c_tokens)
wrt_t5.write('{}\t{}\n'.format(input_text, output_text))
# сэмпл для GPT
wrt_gpt.write('<s>{} # {}</s>\n'.format(input_text.replace('<extra_id_0>', '[{}]'.format(c_type)), ' '.join(t.form for t in c_tokens)))
sample_count += 1
| 40.112903 | 163 | 0.567752 | 588 | 4,974 | 4.683673 | 0.421769 | 0.017792 | 0.019608 | 0.025418 | 0.125272 | 0.082789 | 0.058097 | 0.058097 | 0.021786 | 0.021786 | 0 | 0.02 | 0.326498 | 4,974 | 123 | 164 | 40.439024 | 0.80209 | 0.237033 | 0 | 0.066667 | 0 | 0 | 0.146674 | 0.074539 | 0.013333 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0.013333 | 0.093333 | 0.013333 | 0.16 | 0.053333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80c3575c2734a1ea3e2894cddf66ae5e01537fa7 | 601 | py | Python | server/loaddata.py | deb17/nearby-places | 0d05f888f3c90cd021c67d446bc16ccb59efc8bc | [
"MIT"
] | null | null | null | server/loaddata.py | deb17/nearby-places | 0d05f888f3c90cd021c67d446bc16ccb59efc8bc | [
"MIT"
] | 6 | 2021-03-09T13:19:32.000Z | 2022-02-26T15:52:16.000Z | server/loaddata.py | deb17/nearby-places | 0d05f888f3c90cd021c67d446bc16ccb59efc8bc | [
"MIT"
] | null | null | null | import csv
from app import db
from app.models import Feature
def process_row(row):
key, value = row[0], row[1]
if '/' in value:
values = [val.strip() for val in value.split('/')]
for v in values:
f = Feature(key=key, value=v)
db.session.add(f)
db.session.commit()
else:
f = Feature(key=key, value=value)
db.session.add(f)
db.session.commit()
if __name__ == '__main__':
with open('features.csv', newline='') as infile:
reader = csv.reader(infile)
for row in reader:
process_row(row)
| 25.041667 | 58 | 0.567388 | 84 | 601 | 3.940476 | 0.428571 | 0.108761 | 0.07855 | 0.084592 | 0.283988 | 0.169184 | 0.169184 | 0 | 0 | 0 | 0 | 0.004762 | 0.301165 | 601 | 23 | 59 | 26.130435 | 0.783333 | 0 | 0 | 0.2 | 0 | 0 | 0.036606 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80c8628f774c95ee9df7395733a5d80589d0278f | 2,234 | py | Python | detection_ctpn/utils/tf_utils.py | EuphoriaYan/Chinese-ancient-book-recognition-HSK | 865736d16389037f555f0eea7ec6c4ab7e4319c9 | [
"Apache-2.0"
] | null | null | null | detection_ctpn/utils/tf_utils.py | EuphoriaYan/Chinese-ancient-book-recognition-HSK | 865736d16389037f555f0eea7ec6c4ab7e4319c9 | [
"Apache-2.0"
] | null | null | null | detection_ctpn/utils/tf_utils.py | EuphoriaYan/Chinese-ancient-book-recognition-HSK | 865736d16389037f555f0eea7ec6c4ab7e4319c9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
File Name: tf_utils
Description : tensorflow工具类
Author : mick.yi
date: 2019/3/13
"""
import tensorflow as tf
def pad_to_fixed_size(input_tensor, fixed_size):
"""padding到固定长度, 在第二维度末位增加一个padding_flag, no_pad:1, pad:0.
Parameter:
input_tensor: 二维张量
"""
input_size = tf.shape(input_tensor)[0]
x = tf.pad(input_tensor, [[0, 0], [0, 1]], mode='CONSTANT', constant_values=1)
padding_size = tf.maximum(0, fixed_size - input_size)
x = tf.pad(x, [[0, padding_size], [0, 0]], mode='CONSTANT', constant_values=0) # padding
return x[:fixed_size]
def remove_pad(input_tensor):
"""no_pad:1, pad:0; Be in order."""
pad_tag = input_tensor[..., -1]
real_size = tf.cast(tf.reduce_sum(pad_tag), tf.int32)
return input_tensor[:real_size, :-1]
def clip_boxes(boxes, window):
"""
将boxes裁剪到指定的窗口范围内
:param boxes: 边框坐标,[N,(y1,x1,y2,x2)]
:param window: 窗口坐标,[(y1,x1,y2,x2)]
:return:
"""
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1) # split后维数不变
y1 = tf.maximum(tf.minimum(y1, wy2), wy1) # wy1<=y1<=wy2
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped_boxes = tf.concat([y1, x1, y2, x2], axis=1, name='clipped_boxes')
# clipped_boxes.([boxes.shape[0], 4])
return clipped_boxes
def apply_regress(deltas, anchors):
"""
应用回归目标到边框
:param deltas: 回归目标[N,(dy, dx, dh, dw)]
:param anchors: anchor boxes[N,(y1,x1,y2,x2)]
:return:
"""
# 高度和宽度
h = anchors[:, 2] - anchors[:, 0]
w = anchors[:, 3] - anchors[:, 1]
# 中心点坐标
cy = (anchors[:, 2] + anchors[:, 0]) * 0.5
cx = (anchors[:, 3] + anchors[:, 1]) * 0.5
# 回归系数
deltas *= tf.constant([0.1, 0.1, 0.2, 0.2])
dy, dx, dh, dw = deltas[:, 0], deltas[:, 1], deltas[:, 2], deltas[:, 3]
# 中心坐标回归
cy += dy * h
cx += dx * w
# 高度和宽度回归
h *= tf.exp(dh)
w *= tf.exp(dw)
# 转为y1,x1,y2,x2
y1 = cy - h * 0.5
x1 = cx - w * 0.5
y2 = cy + h * 0.5
x2 = cx + w * 0.5
return tf.stack([y1, x1, y2, x2], axis=1)
| 25.976744 | 93 | 0.562668 | 346 | 2,234 | 3.531792 | 0.289017 | 0.063011 | 0.03437 | 0.03928 | 0.06874 | 0.021277 | 0 | 0 | 0 | 0 | 0 | 0.072196 | 0.249776 | 2,234 | 85 | 94 | 26.282353 | 0.656921 | 0.252014 | 0 | 0 | 0 | 0 | 0.01859 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.027778 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80c94708b0f74b66e6d49ed413132347b371694e | 291 | py | Python | InterviewBit/Scripting/TransformCSV.py | CRAZYGEEKS04/competitive-programming-1 | f27b8a718761b7bfeb8ff9e294398ca1a294cb5d | [
"MIT"
] | 2 | 2022-02-08T12:37:41.000Z | 2022-03-09T03:48:56.000Z | InterviewBit/Scripting/TransformCSV.py | gauravsingh58/competitive-programming | fa5548f435cdf2aa059e1d6ab733885790c6a592 | [
"MIT"
] | 1 | 2020-10-10T16:14:54.000Z | 2020-10-10T16:14:54.000Z | InterviewBit/Scripting/TransformCSV.py | gauravsingh58/competitive-programming | fa5548f435cdf2aa059e1d6ab733885790c6a592 | [
"MIT"
] | 2 | 2021-01-23T14:35:48.000Z | 2021-03-15T05:04:24.000Z | while True :
try :
text = input()
arr = text.split(',')
for i in range(len(arr)) :
if i == 4 :
continue
if i == 6 :
print("+", end = "")
print(arr[4], end = "-")
print(arr[6], end = "")
else :
print(arr[i], end = ",")
print()
except EOFError :
break
| 17.117647 | 28 | 0.474227 | 40 | 291 | 3.45 | 0.55 | 0.173913 | 0.15942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02 | 0.312715 | 291 | 16 | 29 | 18.1875 | 0.67 | 0 | 0 | 0 | 0 | 0 | 0.013746 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.3125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80c9f68534b6e93dc236ef38e36dca90fb996522 | 15,621 | py | Python | response_model/python/metric_learning/metric_eval.py | googlearchive/rgc-models | 0dea94bbd54f591d82d95169e33d40bb55b6be94 | [
"Apache-2.0"
] | 1 | 2018-09-18T16:47:09.000Z | 2018-09-18T16:47:09.000Z | response_model/python/metric_learning/metric_eval.py | google/rgc-models | 0dea94bbd54f591d82d95169e33d40bb55b6be94 | [
"Apache-2.0"
] | null | null | null | response_model/python/metric_learning/metric_eval.py | google/rgc-models | 0dea94bbd54f591d82d95169e33d40bb55b6be94 | [
"Apache-2.0"
] | 1 | 2022-01-12T12:44:17.000Z | 2022-01-12T12:44:17.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r""""Run analyses on learnt metric/score function.
We load a learnt metric and test responses which are repeated
presentations of a short stimuli, and perform various analyses such as:
* Accuracy of triplet ordering.
* Precision recall analysis of triplet ordering.
* Evaluating clustering of responses generated due to same stimulus.
* Retrieval of nearest responses in training data and
using it to decode the stimulus corresponding to test responses.
The output of all the analyses is stored in a pickle file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import pickle
import numpy as np
import tensorflow as tf
from absl import app
from absl import gfile
import sklearn
import sklearn.manifold as manifold
import retina.response_model.python.metric_learning.analyse_metric as analyse
import retina.response_model.python.metric_learning.config as config
import retina.response_model.python.metric_learning.data_util as du
from tensorflow.python.profiler import PrintModelAnalysis
FLAGS = tf.app.flags.FLAGS
def main(unused_argv=()):
# set random seed
np.random.seed(121)
print('random seed reset')
# Get details of stored model.
model_savepath, model_filename = config.get_filepaths()
# Load responses to two trials of long white noise.
data_wn = du.DataUtilsMetric(os.path.join(FLAGS.data_path, FLAGS.data_test))
# Quadratic score function.
with tf.Session() as sess:
# Define and restore/initialize the model.
tf.logging.info('Model : %s ' % FLAGS.model)
met = config.get_model(sess, model_savepath, model_filename,
data_wn, True)
print('IS_TRAINING = TRUE!!! ')
tf.logging.info('IS_TRAINING = TRUE!!! ')
PrintModelAnalysis(tf.get_default_graph())
# get triplets
# triplet A
outputs = data_wn.get_triplets(batch_size=FLAGS.batch_size_test,
time_window=FLAGS.time_window)
anchor_test, pos_test, neg_test, _, _, _ = outputs
triplet_a = (anchor_test, pos_test, neg_test)
# triplet B
outputs = data_wn.get_tripletsB(batch_size=FLAGS.batch_size_test,
time_window=FLAGS.time_window)
anchor_test, pos_test, neg_test, _, _, _ = outputs
triplet_b = (anchor_test, pos_test, neg_test)
triplets = [triplet_a, triplet_b]
triplet_labels = ['triplet A', 'triplet B']
analysis_results = {} # collect analysis results in a dictionary
# 1. Plot distances between positive and negative pairs.
# analyse.plot_pos_neg_distances(met, anchor_test, pos_test, neg_test)
# tf.logging.info('Distances plotted')
# 2. Accuracy of triplet orderings - fraction of triplets where
# distance with positive is smaller than distance with negative.
triplet_dict = {}
for iitriplet, itriplet in enumerate(triplets):
dist_pos, dist_neg, accuracy = analyse.compute_distances(met, *itriplet)
dist_analysis = {'pos': dist_pos,
'neg': dist_neg,
'accuracy': accuracy}
triplet_dict.update({triplet_labels[iitriplet]: dist_analysis})
analysis_results.update({'distances': triplet_dict})
tf.logging.info('Accuracy computed')
# 3. Precision-Recall analysis : declare positive if s(x,y)<t and
# negative otherwise. Vary threshold t, and plot precision-recall and
# ROC curves.
triplet_dict = {}
for iitriplet, itriplet in enumerate(triplets):
output = analyse.precision_recall(met, *itriplet, toplot=False)
precision_log, recall_log, f1_log, fpr_log, tpr_log, pr_data = output
pr = {'precision': precision_log, 'recall': recall_log,
'pr_data': pr_data}
roc = {'TPR': tpr_log, 'FPR': fpr_log}
pr_results = {'PR': pr, 'F1': f1_log, 'ROC': roc}
triplet_dict.update({triplet_labels[iitriplet]: pr_results})
analysis_results.update({'PR_analysis': triplet_dict})
tf.logging.info('Precision Recall, F1 score and ROC curves computed')
# 4. Clustering analysis: How well clustered are responses for a stimulus?
# Get all trials for a few (1000) stimuli and compute
# distances between all pairs of points.
# See how many of responses generated by same stimulus are actually
# near to each other.
n_tests = 10
p_log = []
r_log = []
s_log = []
resp_log = []
dist_log = []
embedding_log = []
for itest in range(n_tests):
n_stims = 10 # previously 100
tf.logging.info('Number of random samples is : %d' % n_stims)
resp_fcn = data_wn.get_response_all_trials
resp_all_trials, stim_id = resp_fcn(n_stims, FLAGS.time_window,
random_seed=itest)
# TODO(bhaishahster) : Remove duplicates from resp_all_trials
distance_pairs = analyse.get_pairwise_distances(met, resp_all_trials)
k_log = [1, 2, 3, 4, 5, 10, 15, 20, 50, 75, 100, 200, 300, 400, 500]
precision_log = []
recall_log = []
for k in k_log:
precision, recall = analyse.topK_retrieval(distance_pairs, k, stim_id)
precision_log += [precision]
recall_log += [recall]
p_log += [precision_log]
r_log += [recall_log]
s_log += [stim_id]
resp_log += [resp_all_trials]
dist_log += [distance_pairs]
#tf.logging.info('Getting 2D t-SNE embedding')
#model = manifold.TSNE(n_components=2)
#tSNE_embedding = model.fit_transform(distance_pairs)
#embedding_log += [tSNE_embedding]
all_trials = {'distances': dist_log, 'K': k_log,
'precision': p_log,
'recall': r_log,
'probe_stim_idx': s_log, 'probes': resp_log,
'embedding': embedding_log}
analysis_results_clustering = {'all_trials': all_trials}
pickle_file_clustering = (os.path.join(model_savepath, model_filename)
+ '_' + FLAGS.data_test +
'_analysis_clustering.pkl')
pickle.dump(analysis_results_clustering, gfile.Open(pickle_file_clustering, 'w'))
tf.logging.info('Clustering analysis done.')
'''
# sample few/all repeats of stimuli which are continous.
repeats = data_wn.get_repeats()
n_samples_max = 10
samples = np.random.randint(0, repeats.shape[0],
np.minimum(n_samples_max, repeats.shape[0]))
n_start_times = 5
time_window = 15
resps_cont = np.zeros((n_start_times, n_samples_max,
time_window, repeats.shape[-1]))
from IPython import embed; embed()
for istart in range(n_start_times):
start_tm = np.random.randint(repeats.shape[1] - time_window)
resps_cont[istart, :, :, :] = repeats[samples, start_tm:
start_tm+time_window, :]
resps_cont_2d = np.reshape(resps_cont, [-1, resps_cont.shape[-1]])
resps_cont_3d = np.expand_dims(resps_cont_2d, 2)
distances_cont_resp = analyse.get_pairwise_distances(met, resps_cont_3d)
n_components = 2
model = manifold.TSNE(n_components=n_components)
ts = model.fit_transform(distances_cont_resp)
tts = np.reshape(ts, [n_start_times, n_samples_max,
time_window, n_components])
from IPython import embed; embed()
plt.figure()
for istart in [1]: # range(n_start_times):
for isample in range(n_samples_max):
pts = tts[istart, isample, :, :]
plt.plot(pts[:, 0], pts[:, 1])
plt.show()
'''
# 5. Store the parameters of the score function.
score_params = met.get_parameters()
analysis_results.update({'score_params': score_params})
tf.logging.info('Got interesting parameters of score')
# 6. Retreival analysis on training data.
# Retrieve the nearest responses in training data for a probe test response.
# Load training data.
# data_wn_train = du.DataUtilsMetric(os.path.join(FLAGS.data_path,
# FLAGS.data_train))
#
# out_data = data_wn_train.get_all_responses(FLAGS.time_window)
# train_all_resp, train_stim_time = out_data
#
# # Get a few test stimuli. Here we use all repreats of a few stimuli.
# n_stims = 100
# resp_all_trials, stim_id = data_wn.get_response_all_trials(n_stims,
# FLAGS.time_window)
# k = 1000
# retrieved, retrieved_stim = analyse.topK_retrieval_probes(train_all_resp,
# train_stim_time,
# resp_all_trials,
# k, met)
# retrieval_dict = {'probe': resp_all_trials, 'probe_stim_idx': stim_id,
# 'retrieved': retrieved,
# 'retrieved_stim_idx': retrieved_stim}
# analysis_results.update({'retrieval': retrieval_dict})
# tf.logging.info('Retrieved nearest points in training data'
# ' for some probes in test data')
# TODO(bhaishahster) : Decode stimulus using retrieved responses.
# 7. Learn encoding model.
# Learn mapping from stimulus to response.
# from IPython import embed; embed()
'''
data_wn_train = du.DataUtilsMetric(os.path.join(FLAGS.data_path,
'example_long_wn_2rep_'
'ON_OFF_with_stim.mat'))
data_wn_test = du.DataUtilsMetric(os.path.join(FLAGS.data_path,
'example_wn_30reps_ON_'
'OFF_with_stimulus.mat'))
stimulus_test = data_wn_test.get_stimulus()
response_test = data_wn_test.get_repeats()
stimulus = data_wn_train.get_stimulus()
response = data_wn_train.get_repeats()
ttf = data_wn_train.ttf[::-1]
encoding_fcn = encoding_model.learn_encoding_model_ln
# Initialize ttf, RF using ttf and scale ttf to match firing rate
RF_np, ttf_np, model = encoding_fcn(sess, met, stimulus, response, ttf_in=ttf,
lr=0.1)
firing_rate_pred = sess.run(model.firing_rate,
feed_dict={model.stimulus: stimulus_test})
initialize_all = {'RF': RF_np, 'ttf': ttf,
'firing_rate_test': firing_rate_pred}
# Initialize ttf and do no other initializations
RF_np_noinit, ttf_np_noinit, model = encoding_fcn(sess,met, stimulus, response,
ttf_in=ttf,
initialize_RF_using_ttf=False,
scale_ttf=False, lr=0.1)
firing_rate_pred = sess.run(model.firing_rate,
feed_dict={model.stimulus: stimulus_test})
initialize_only_ttf = {'RF': RF_np_noinit, 'ttf': ttf_np_noinit,
'firing_rate_test': firing_rate_pred}
# Initialize ttf and do no other initializations
RF_np_noinit2, ttf_np_noinit2, model = encoding_fcn(sess, met, stimulus, response,
ttf_in=None,
initialize_RF_using_ttf=False,
scale_ttf=False, lr=0.1)
firing_rate_pred = sess.run(model.firing_rate,
feed_dict={model.stimulus: stimulus_test})
initialize_none = {'RF': RF_np_noinit2, 'ttf': ttf_np_noinit2,
'firing_rate_test': firing_rate_pred}
encoding_models = {'Init_all': initialize_all,
'Init_ttf': initialize_only_ttf,
'Init_none': initialize_none,
'responses_test': response_test}
analysis_results.update({'Encoding_models': encoding_models})
'''
# 8. Is similarity in images implicitly learnt in the metric ?
# Reconstruction done in colab notebook
'''
class StimulusMetric(object):
"""Compute MSE between stimuli."""
def get_distance(self, in1, in2):
return np.sqrt(np.sum(np.sum((in1 - in2)**2, 2), 1))
# TODO(bhaishahster) : Filtering by time is remaining!
stimuli_met = StimulusMetric()
stim_distance, resp_distance, times, responses = analyse.compare_stimulus_score_similarity(data_wn, stimuli_met,
met)
compare_stim_mse_resp_met = {'stimulus_mse': stim_distance,
'response_metric': resp_distance,
'times': times,
'response_pairs': responses}
analysis_results.update({'perception': compare_stim_mse_resp_met})
'''
# 9. Retrieve nearest responses from ALL possible response patterns
# Retrieve the nearest responses in training data for a probe test response.
'''
import itertools
lst = list(map(list, itertools.product([0, 1], repeat=data_wn.n_cells)))
all_resp = np.array(lst)
all_resp = np.expand_dims(all_resp, 2)
# Get a few test stimuli. Here we use all repreats of a few stimuli.
n_stims = 100
probe_responses, stim_id = data_wn.get_response_all_trials(n_stims,
FLAGS.time_window)
distances_corpus = analyse.compute_all_distances(all_resp, probe_responses,
met)
retrieval_dict = {'probe': probe_responses, 'probe_stim_idx': stim_id,
'corpus': all_resp,
'distance_corpus': distances_corpus}
analysis_results.update({'retrieval_ALL_responses': retrieval_dict})
tf.logging.info('Distance of probe to ALL possible response patterns')
'''
# 10. Get embedding for all possible responses,
# only if there are less than 15 cells
if data_wn.n_cells < 15:
import itertools
lst = list(map(list, itertools.product([0, 1], repeat=data_wn.n_cells)))
all_resp = np.expand_dims(np.array(lst), 2) # use time_window of 1.
all_resp_embedding = met.get_embedding(all_resp)
analysis_results.update({'all_resp_embedding': all_resp_embedding})
# save analysis in a pickle file
# from IPython import embed; embed()
pickle_file = (os.path.join(model_savepath, model_filename) + '_' +
FLAGS.data_test +
'_analysis.pkl')
pickle.dump(analysis_results, gfile.Open(pickle_file, 'w'))
# pickle.dump(analysis_results, file_io.FileIO(pickle_file, 'w'))
tf.logging.info('File: ' + pickle_file)
tf.logging.info('Analysis results saved')
print('File: ' + pickle_file)
if __name__ == '__main__':
app.run(main)
| 41.991935 | 116 | 0.626272 | 1,934 | 15,621 | 4.795243 | 0.20424 | 0.013586 | 0.018223 | 0.009165 | 0.301919 | 0.233556 | 0.206168 | 0.191611 | 0.173927 | 0.16336 | 0 | 0.011724 | 0.279239 | 15,621 | 371 | 117 | 42.105121 | 0.811973 | 0.275334 | 0 | 0.069565 | 0 | 0 | 0.090115 | 0.004369 | 0 | 0 | 0 | 0.008086 | 0 | 1 | 0.008696 | false | 0 | 0.13913 | 0 | 0.147826 | 0.034783 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80cd12bb7a5b93faddb12652ac494f409752a60f | 8,192 | py | Python | apps/graph.py | csgobeta/csgobetabot | 4d37b0eb166d500869d9b271d417b61e95333824 | [
"MIT"
] | 9 | 2021-01-08T05:21:38.000Z | 2021-12-10T12:35:59.000Z | apps/graph.py | csgobeta/csgobetabot | 4d37b0eb166d500869d9b271d417b61e95333824 | [
"MIT"
] | null | null | null | apps/graph.py | csgobeta/csgobetabot | 4d37b0eb166d500869d9b271d417b61e95333824 | [
"MIT"
] | 2 | 2021-01-14T21:58:46.000Z | 2022-01-23T23:21:15.000Z | import sys
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import pandas as pd
from datetime import datetime
import time
import logging
from html_telegraph_poster.upload_images import upload_image
import config
from addons import file_manager
def graph_maker():
while True:
minutes = datetime.now().minute
seconds = datetime.now().second
microseconds = datetime.now().microsecond
if minutes not in {0, 10, 20, 30, 40, 50}:
snooze = ((10 - minutes % 10) * 60) - \
(seconds + microseconds/1000000.0)
time.sleep(snooze)
else:
try:
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
cache_key_list = []
for keys, values in cacheFile.items():
cache_key_list.append(keys)
player_count = cacheFile['online_player_count']
old_player_data = pd.read_csv(
config.PLAYER_CHART_FILE_PATH, parse_dates=['DateTime'])
old_player_data.drop(0, axis=0, inplace=True)
temp_player_data = pd.DataFrame([[datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S'), player_count]], columns=['DateTime', 'Players'])
new_player_data = pd.concat(
[old_player_data, temp_player_data])
new_player_data.to_csv(
config.PLAYER_CHART_FILE_PATH, index=False)
player_data = pd.read_csv(
config.PLAYER_CHART_FILE_PATH, parse_dates=['DateTime'])
sns.set_style('whitegrid')
fig, ax = plt.subplots(figsize=(10, 2.5))
ax.plot('DateTime', 'Players', data=player_data,
color='red', linewidth=.7, marker='o', markevery=[-1])
ax.fill_between(
player_data['DateTime'], player_data['Players'], 0, facecolor='red', color='red', alpha=.4)
ax.margins(x=0)
ax.grid(b=True, axis='y', linestyle='--', alpha=.3)
ax.grid(b=False, axis='x')
ax.spines['bottom'].set_position('zero')
ax.spines['bottom'].set_color('black')
ax.set_ylabel('')
ax.set_xlabel('')
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_locator(mdates.DayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left')
ax.axhline(y=0, color='none')
ax.axhline(y=1400000, color='none')
plt.yticks(ticks=[0, 250000, 500000, 750000, 1000000, 1250000])
plt.subplots_adjust(top=1, bottom=0.077, left=0, right=1)
plt.text(0.989, 0.058, '0', transform=ax.transAxes, alpha=.3)
plt.text(0.965, 0.215, '250k',
transform=ax.transAxes, alpha=.3)
plt.text(0.965, 0.377, '500k',
transform=ax.transAxes, alpha=.3)
plt.text(0.965, 0.54, '700k', transform=ax.transAxes, alpha=.3)
plt.text(0.951, 0.705, '1 000k',
transform=ax.transAxes, alpha=.3)
plt.text(0.951, 0.865, '1 250k',
transform=ax.transAxes, alpha=.3)
plt.text(0.156, 0.874, 'Made by @csgobeta\nupd every 10 min',
ha='center', transform=ax.transAxes, color='black', size='6')
plt.close()
fig.savefig(config.GRAPH_IMG_FILE_PATH)
trigger1 = True
while trigger1:
try:
url1 = upload_image(config.GRAPH_IMG_FILE_PATH)
if url1.startswith('http'):
trigger1 = False
except:
pass
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
if cacheFile['graph_url'] != url1:
file_manager.updateJson(
config.CACHE_FILE_PATH, url1, cache_key_list[22])
except Exception as e:
print(f' - Error:\n{e}\n')
time.sleep(70)
try:
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
cache_key_list = []
for keys, values in cacheFile.items():
cache_key_list.append(keys)
dev_count = cacheFile['dev_player_count']
old_dev_data = pd.read_csv(
config.DEV_CHART_FILE_PATH, parse_dates=['DateTime'])
old_dev_data.drop(0, axis=0, inplace=True)
temp_dev_data = pd.DataFrame([[datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S'), dev_count]], columns=['DateTime', 'Devs'])
new_dev_data = pd.concat([old_dev_data, temp_dev_data])
new_dev_data.to_csv(config.DEV_CHART_FILE_PATH, index=False)
dev_data = pd.read_csv(
config.DEV_CHART_FILE_PATH, parse_dates=['DateTime'])
sns.set_style('whitegrid')
fig2, ax = plt.subplots(figsize=(10, 2.5))
ax.plot('DateTime', 'Devs', data=dev_data, color='red',
linewidth=.7, marker='o', markevery=[-1])
ax.fill_between(
dev_data['DateTime'], dev_data['Devs'], 0, facecolor='red', color='red', alpha=.4)
ax.margins(x=0)
ax.grid(b=True, axis='y', linestyle='--', alpha=.3)
ax.grid(b=False, axis='x')
ax.spines['bottom'].set_position('zero')
ax.spines['bottom'].set_color('black')
ax.set_ylabel('')
ax.set_xlabel('')
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_locator(mdates.DayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left')
ax.axhline(y=0, color='none')
ax.axhline(y=6, color='none')
plt.yticks(ticks=[0, 1, 2, 3, 4, 5])
plt.subplots_adjust(top=1, bottom=0.077, left=0, right=1)
plt.text(0.989, 0.059, '0', transform=ax.transAxes, alpha=.3)
plt.text(0.989, 0.215, '1', transform=ax.transAxes, alpha=.3)
plt.text(0.989, 0.368, '2', transform=ax.transAxes, alpha=.3)
plt.text(0.989, 0.526, '3', transform=ax.transAxes, alpha=.3)
plt.text(0.988, 0.670, '4', transform=ax.transAxes, alpha=.3)
plt.text(0.989, 0.821, '5', transform=ax.transAxes, alpha=.3)
plt.text(0.141, 0.874, 'Made by @csgobeta\nupd every 10 min',
ha='center', transform=ax.transAxes, color='black', size='6')
plt.close()
fig2.savefig(config.GRAPH2_IMG_FILE_PATH)
trigger2 = True
while trigger2:
try:
url2 = upload_image(config.GRAPH2_IMG_FILE_PATH)
if url2.startswith('http'):
trigger2 = False
except:
pass
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
if cacheFile['graph_url2'] != url2:
file_manager.updateJson(
config.CACHE_FILE_PATH, url2, cache_key_list[23])
time.sleep(70)
except Exception as e:
print(f' - Error:\n{e}\n')
time.sleep(70)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG, format='%(asctime)s | %(name)s: %(message)s', datefmt='%H:%M:%S — %d/%m/%Y')
graph_maker()
| 41.373737 | 111 | 0.521606 | 957 | 8,192 | 4.299896 | 0.241379 | 0.031106 | 0.027218 | 0.072904 | 0.667072 | 0.64034 | 0.615796 | 0.594897 | 0.564277 | 0.553584 | 0 | 0.055451 | 0.350586 | 8,192 | 197 | 112 | 41.583756 | 0.717857 | 0 | 0 | 0.43125 | 0 | 0 | 0.070068 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00625 | false | 0.0125 | 0.08125 | 0 | 0.0875 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80cd62ec15badfd33866992ef09a6d8c71ac2b2f | 5,443 | py | Python | vicarui/src/vicarui/analysis/missions/cassini/set_info.py | joniumGit/moons | f5f8b7e23e707c8cf7e1081c4a1c0fcc22182d85 | [
"MIT"
] | 1 | 2021-07-16T06:30:37.000Z | 2021-07-16T06:30:37.000Z | vicarui/src/vicarui/analysis/missions/cassini/set_info.py | joniumGit/moons | f5f8b7e23e707c8cf7e1081c4a1c0fcc22182d85 | [
"MIT"
] | null | null | null | vicarui/src/vicarui/analysis/missions/cassini/set_info.py | joniumGit/moons | f5f8b7e23e707c8cf7e1081c4a1c0fcc22182d85 | [
"MIT"
] | 1 | 2021-05-26T03:53:41.000Z | 2021-05-26T03:53:41.000Z | from .config import *
from .funcs import norm, target_estimate
from .helpers import ImageHelper
from ...common import load_kernels_for_image, release_kernels
from ....support import sci_2
def set_info(
image: ImageWrapper,
image_axis=None,
analysis_axis=None,
**config
):
raw = image.raw
try:
load_kernels_for_image(raw)
helper = ImageHelper(raw, **config)
config = helper.config
target, target_id = helper.target_full
utc = helper.time_utc
pa = helper.phase_angle * spice.dpr()
title = "%s FROM: %s - %s @ UTC %s \nPA=%.2f DEG" % (helper.id, CASSINI, target, utc, pa)
try:
filters: List[str] = helper['INSTRUMENT']['FILTER_NAME']
title += " Filters: " + ','.join(filters)
exposure: float = helper['INSTRUMENT']['EXPOSURE_DURATION']
title += f" Exp: {exposure / 1000:.2f}s"
number: str = helper.id
title += f" Image n: {number}"
h1 = helper.saturn_equator_offset(CASSINI_ID)
h2 = helper.saturn_equator_offset(target_id)
sun_to_rings, shadow_in_image, shadow_to_image = helper.shadow_angles
ang_xy = f'{sun_to_rings:.2f} deg'
ang_img = f'{shadow_in_image:.2f} deg'
ang_bore = f'{shadow_to_image:.2f} deg'
title += (
"\n"
fr"Target from Ring Plane: ${sci_2(h2):}\,km$ Cassini from Ring Plane: ${sci_2(h1)}\,km$"
"\n"
f"Shadow angle in Image: {ang_img}, to Image plane: {ang_bore}, to Ring: {ang_xy}"
)
except Exception as e:
log.warning("Failed to find some data", exc_info=e)
if image_axis is not None:
try:
# noinspection PyUnresolvedReferences
from matplotlib.axes import Axes
ax: Axes = image_axis
try:
from matplotlib.ticker import AutoMinorLocator
from ....support.misc import MPL_FONT_CONFIG
second_x = ax.secondary_xaxis(location=1.07, functions=helper.size_x_transforms)
second_y = ax.secondary_yaxis(location=1.07, functions=helper.size_y_transforms)
second_y.yaxis.set_minor_locator(AutoMinorLocator(10))
second_x.xaxis.set_minor_locator(AutoMinorLocator(10))
second_y.set_ylabel(
f"At {helper.size_name} intercept "
f"$(px = {sci_2(helper.size_per_px[0])},"
f" {sci_2(helper.size_per_px[1])})$ KM",
**MPL_FONT_CONFIG
)
def mod_ax(axes: Axes, vertical: bool = False, **_):
ax2 = axes.secondary_xaxis(
location=-0.22,
functions=helper.size_y_transforms if vertical else helper.size_x_transforms
)
ax2.xaxis.set_minor_locator(AutoMinorLocator(10))
analysis_axis.axes_modifier = mod_ax
except Exception as e:
log.exception("Something happened", exc_info=e)
if config[SUN_SATURN_VECTORS] or config[TARGET_ESTIMATE]:
sun_pos = helper.trpf(SUN_ID)
if helper.target_id == SATURN_ID:
saturn_pos = helper.crpf(SATURN_ID)
else:
saturn_pos = helper.trpf(SATURN_ID)
t_sun, t_saturn = (-norm(v)[0:2] for v in (sun_pos, saturn_pos))
if config[SUN_SATURN_VECTORS]:
x = 70
y = 70
sun = np.column_stack(
(
[x, y],
[
x + t_sun[0] * 60 / np.linalg.norm(t_sun),
y + t_sun[1] * 60 / np.linalg.norm(t_sun)
]
)
)
sat = np.column_stack(
(
[x, y],
[
x + t_saturn[0] * 60 / np.linalg.norm(t_saturn),
y + t_saturn[1] * 60 / np.linalg.norm(t_saturn)
]
)
)
ax.plot(*sun, label="Sun", color=SUN_COLOR, linewidth=1)
ax.plot(*sat, label="Saturn", color=SATURN_COLOR, linewidth=1)
if config[TARGET_ESTIMATE]:
x, y = target_estimate(image, helper)
log.debug(f"Estimate {x},{y}")
ax.scatter(x, y, s=16, c=TARGET_ALT_COLOR, alpha=0.65)
except ImportError as e:
log.exception("No matplotlib", exc_info=e)
except Exception as e:
log.exception("Something bad happened", exc_info=e)
return title
except Exception as e:
log.exception("Failed to load data: %s", raw.name, exc_info=e)
return "Failed to load data"
finally:
release_kernels()
__all__ = ['set_info']
| 40.924812 | 105 | 0.482271 | 587 | 5,443 | 4.250426 | 0.277683 | 0.028056 | 0.012024 | 0.028858 | 0.235271 | 0.177555 | 0.04489 | 0 | 0 | 0 | 0 | 0.019726 | 0.422561 | 5,443 | 132 | 106 | 41.234848 | 0.774101 | 0.00643 | 0 | 0.107143 | 0 | 0.017857 | 0.119127 | 0.019423 | 0.008929 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.080357 | 0 | 0.116071 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80cddf253a619a7f9e2bea2ab9271db73ba0ccb6 | 2,187 | py | Python | python/tvm/contrib/msir/core/utils/info.py | Archermmt/tvm | 8b900cec1a9c3cb453e159db4d497ebeb26ed289 | [
"Apache-2.0"
] | null | null | null | python/tvm/contrib/msir/core/utils/info.py | Archermmt/tvm | 8b900cec1a9c3cb453e159db4d497ebeb26ed289 | [
"Apache-2.0"
] | null | null | null | python/tvm/contrib/msir/core/utils/info.py | Archermmt/tvm | 8b900cec1a9c3cb453e159db4d497ebeb26ed289 | [
"Apache-2.0"
] | null | null | null | import tvm
import logging
import numpy as np
from collections.abc import Iterable
from .namespace import MSIR_COLLECTION,MSIR_NAME,MSIR_TARGET
def _get_logger():
if not MSIR_COLLECTION.get(MSIR_NAME.LOGGER):
MSIR_COLLECTION.set(MSIR_NAME.LOGGER, logging.getLogger("MSIR"))
return MSIR_COLLECTION.get(MSIR_NAME.LOGGER)
def info(msg,level=0):
env_level = MSIR_COLLECTION.get(MSIR_NAME.VERBOSE_LEVEL, 0)
if level > env_level:
_get_logger().info("[LV{}]{}".format(level,msg))
def warning(msg):
_get_logger().warning(msg)
def debug(msg):
_get_logger().debug(msg)
def check_type(obj,r_type):
assert isinstance(obj,r_type), "Object {}({}) is not {}".format(obj,type(obj),r_type)
def check_iterable_type(obj,r_type,length=-1):
assert isinstance(obj,Iterable), "object {}({}) is not iterable".format(obj,type(obj))
assert all(isinstance(o,r_type) for o in obj),"Some of the object {} is not class of {}".format(obj,r_type)
if length>0:
assert len(obj)==length, "Object length {} mismatch with required {}".format(len(obj),length)
def get_version(target = MSIR_TARGET.MSIR):
if target == MSIR_TARGET.TORCH:
import torch
version=torch.__version__
if '+cu' in version:
version=version.split('+cu')[0]
elif target == MSIR_TARGET.TF:
import tensorflow
version = tensorflow.__version__
else:
raise Exception("Unexpected target " + str(target))
return version
def _cast_array(array):
if isinstance(array,tvm.nd.NDArray):
return "tvm.ndarray", array.asnumpy()
try:
import torch
if isinstance(array, torch.Tensor):
return "torch.Tensor", array.detach().cpu().numpy()
except:
pass
assert isinstance(array, np.ndarray), "Unexpected array type " + str(type(array))
return "ndarray", array
def array_info(array):
array_type, array = _cast_array(array)
return "<{}>S:{}, D:{}, Max:{:g}, MIN:{:g}, SUM:{:g}".format(array_type, array.shape, array.dtype, array.max(), array.min(), array.sum())
def show_array(array, name="array"):
print("{}:{}".format(name, array_info(array))) | 30.375 | 141 | 0.666667 | 303 | 2,187 | 4.640264 | 0.290429 | 0.021337 | 0.02845 | 0.044808 | 0.061878 | 0.044097 | 0 | 0 | 0 | 0 | 0 | 0.002809 | 0.1861 | 2,187 | 72 | 142 | 30.375 | 0.787079 | 0 | 0 | 0.038462 | 0 | 0 | 0.126143 | 0 | 0 | 0 | 0 | 0 | 0.096154 | 1 | 0.192308 | false | 0.019231 | 0.153846 | 0 | 0.461538 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80cf8406717293e85d168ad077e6680849900737 | 2,072 | py | Python | src/python/dicomifier/bruker_to_dicom/modules/series.py | DimitriPapadopoulos/dicomifier | 708e4e1c932f6411200aa010f857823dfcc495f1 | [
"CECILL-B"
] | null | null | null | src/python/dicomifier/bruker_to_dicom/modules/series.py | DimitriPapadopoulos/dicomifier | 708e4e1c932f6411200aa010f857823dfcc495f1 | [
"CECILL-B"
] | null | null | null | src/python/dicomifier/bruker_to_dicom/modules/series.py | DimitriPapadopoulos/dicomifier | 708e4e1c932f6411200aa010f857823dfcc495f1 | [
"CECILL-B"
] | null | null | null | #########################################################################
# Dicomifier - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
#########################################################################
from . import cached
def _get_series_number(data_set, generator, index):
if "VisuSeriesNumber" in data_set:
series_number = int(data_set["VisuSeriesNumber"][0])
else:
# cf. ParaVision Parameters, 2.4.11.6
experiment = int(data_set["VisuExperimentNumber"][0])
processing = int(data_set["VisuProcessingNumber"][0])
series_number = (experiment * 2**16)+processing
return [series_number]
GeneralSeries = [ # PS 3.3, C.7.3.1
# Modality is added by the specific IOD converter.
# (None, "Modality", 1, lambda d,g,i: ["MR"]),
("VisuUid", "SeriesInstanceUID", 1, None),
(None, "SeriesNumber", 2, cached("__SeriesNumber")(_get_series_number)),
(
None, "SeriesDate", 3,
cached("__SeriesDate")(
lambda d,g,i: d.get("VisuSeriesDate") or d.get("VisuAcqDate"))),
(
None, "SeriesTime", 3,
cached("__SeriesTime")(
lambda d,g,i: d.get("VisuSeriesDate") or d.get("VisuAcqDate"))),
("OWNER", "PerformingPhysicianName", 3, None),
("VisuAcquisitionProtocol", "ProtocolName", 3, None),
("VisuAcquisitionProtocol", "SeriesDescription", 3, None),
(
"VisuSubjectPosition", "PatientPosition", 2,
cached("__PatientPosition")(
lambda d,g,i: [{
"Head_Supine": "HFS", "Head_Prone": "HFP",
"Head_Left" : "HFDL", "Head_Right": "HFDR",
"Foot_Supine": "FFS", "Foot_Prone": "FFP",
"Foot_Left": "FFDL", "Foot_Right": "FFDR"
}[d["VisuSubjectPosition"][0]]])),
("VisuSubjectType", "AnatomicalOrientationType", 3, None),
]
| 42.285714 | 76 | 0.56805 | 214 | 2,072 | 5.350467 | 0.518692 | 0.052402 | 0.027948 | 0.031441 | 0.076856 | 0.076856 | 0.076856 | 0.076856 | 0.076856 | 0.076856 | 0 | 0.017263 | 0.217181 | 2,072 | 48 | 77 | 43.166667 | 0.688656 | 0.188707 | 0 | 0.058824 | 0 | 0 | 0.352131 | 0.061639 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.029412 | 0 | 0.088235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80cf9b726d141aa64359a81571dc5cc74e78eff7 | 12,365 | py | Python | TranskribusDU/tasks/TablePrototypes/DU_ABPTableSkewed_txtTOMBS_sepSIO_line.py | Transkribus/TranskribusDU | 61028ee5f5f39f435bf9c461f8073e75bca344ac | [
"BSD-3-Clause"
] | 20 | 2017-01-24T20:08:25.000Z | 2021-10-30T15:20:44.000Z | TranskribusDU/tasks/TablePrototypes/DU_ABPTableSkewed_txtTOMBS_sepSIO_line.py | Transkribus/TranskribusDU | 61028ee5f5f39f435bf9c461f8073e75bca344ac | [
"BSD-3-Clause"
] | 11 | 2017-06-27T11:41:42.000Z | 2020-10-12T04:59:25.000Z | TranskribusDU/tasks/TablePrototypes/DU_ABPTableSkewed_txtTOMBS_sepSIO_line.py | Transkribus/TranskribusDU | 61028ee5f5f39f435bf9c461f8073e75bca344ac | [
"BSD-3-Clause"
] | 5 | 2017-01-12T15:55:34.000Z | 2019-10-10T05:13:20.000Z | # -*- coding: utf-8 -*-
"""
***
Labelling is T O M B S
It depends on the distance between the baseline and its above and below valid (S) cut
Cuts are SIO
Copyright Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
import numpy as np
from lxml import etree
import shapely.affinity
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from tasks.DU_CRF_Task import DU_CRF_Task
from tasks.DU_ABPTableSkewed import GraphSkewedCut_H, My_FeatureDefinition_v3, NodeType_PageXml_Cut_Shape, main_command_line
from tasks.DU_ABPTableSkewed import Edge_BL
from tasks.DU_ABPTableSkewed_txtBIO_sepSIO import NodeType_BIESO_to_BIO_Shape
from xml_formats.PageXml import MultiPageXml
from util.Shape import ShapeLoader
#------------------------------------------------------------------------------------------------------
# WE add one feature for _ishort
from crf.Transformer import Transformer
import tasks.DU_ABPTableSkewed
class Block2CutLine_EdgeTransformer_qtty(Transformer):
def transform(self, lEdge):
N = 5
a = np.zeros( ( len(lEdge), 2 * N) , dtype=np.float64)
for i, edge in enumerate(lEdge):
# z = 0 if edge._type < 0 else N # _type is -1 or 1
if edge._type < 0:
z = 0
ishort = 1 if edge.len < GraphSkewedCut_H_TOMBS_lines.iCutCloseDistanceTop else 0
else:
z = N
ishort = 1 if edge.len < GraphSkewedCut_H_TOMBS_lines.iCutCloseDistanceBot else 0
a[i, z:z+N] = (1
, len(edge.B.set_support)
, edge.A._in_edge_up
, edge.A._in_edge_down
, ishort
)
# print(a[i,:].tolist())
# traceln("Block2CutLine_EdgeTransformer", a[:min(100, len(lEdge)),])
return a
tasks.DU_ABPTableSkewed.Block2CutLine_EdgeTransformer_qtty = Block2CutLine_EdgeTransformer_qtty
class Block2CutLine_FakeEdgeTransformer(Transformer):
"""
a fake transformer that return as many features as the union of real ones above
"""
def transform(self, lEdge):
assert not(lEdge)
return np.zeros( ( len(lEdge), 2*8 + 2*5) , dtype=np.float64)
tasks.DU_ABPTableSkewed.Block2CutLine_FakeEdgeTransformer = Block2CutLine_FakeEdgeTransformer
#------------------------------------------------------------------------------------------------------
class GraphSkewedCut_H_TOMBS_lines(GraphSkewedCut_H):
# reflecting text baseline as a LineString
shaper_fun = ShapeLoader.node_to_SingleLine
iCutCloseDistanceTop = 45 # any block close enough become T or S
iCutCloseDistanceBot = 45 # any block close enough become B or S
@classmethod
def showClassParam(cls):
bShown = super().showClassParam()
if bShown:
#also show ours!
traceln(" - iCutCloseDistanceTop : " , cls.iCutCloseDistanceTop)
traceln(" - iCutCloseDistanceBot : " , cls.iCutCloseDistanceBot)
def addEdgeToDoc(self):
"""
To display the grpah conveniently we add new Edge elements
Since we change the BAseline representation, we show the new one
"""
super().addEdgeToDoc()
for blk in self.lNode:
assert blk.type.name in ["row", "sepH"], blk.type.name
if blk.type.name == "row":
ndBaseline = blk.node.xpath(".//pc:Baseline", namespaces=self.dNS)[0]
o = self.shaper_fun(ndBaseline)
MultiPageXml.setPoints(ndBaseline, list(o.coords))
return
"""
To compute TOMBS labels, it is better to use the built graph...
"""
def parseDocLabels(self):
"""
Parse the label of the graph from the dataset, and set the node label
return the set of observed class (set of integers in N+)
"""
# WE expect I or O for text blocks!!
setSeensLabels = super().parseDocLabels()
# now look at edges to compute T M B S
# REMEMBER, we did: edge.len = dist / self.iBlockVisibility
maxLenTop = self.iCutCloseDistanceTop / self.iBlockVisibility
maxLenBot = self.iCutCloseDistanceBot / self.iBlockVisibility
# --- ASSUMPTION !!! ---
T, _O, M, B, S = 0, 1, 2, 3, 4
sepS, _sepI, _sepO = 5, 6, 7
for edge in self.lEdge:
if type(edge) == Edge_BL and edge.B.cls == sepS:
cls = edge.A.cls
if edge._type < 0: # this short edge goes up
if edge.len <= maxLenTop:
# Ok, this will be a T or B or S!
# which means the text block is teh 1st CRF node type
# REMEMBER, we did: edge._type = -1 if blk.y_bslne >= y else +1
if cls == M:
newcls = T
elif cls == B:
newcls = S
else:
continue
edge.A.cls = newcls
setSeensLabels.add(newcls)
else: # sthis hort edge goes down
if edge.len <= maxLenBot:
if cls == M:
newcls = B
elif cls == T:
newcls = S
else:
continue
edge.A.cls = newcls
setSeensLabels.add(newcls)
# traceln(self._dClsByLabel)
return setSeensLabels
class NodeType_BIESO_to_TOMBS_Shape(NodeType_BIESO_to_BIO_Shape):
"""
Convert BIESO labeling to SIOStSmSb
"""
bColumnHeader = False # ignore headers for now
dConverter = { 'B':'M',
'I':'M',
'E':'M',
'S':'M', # St Sm Sb => specific processing to get it
'O':'O',
'CH':'CH'}
def parseDocNodeLabel(self, graph_node, defaultCls=None):
"""
Parse and set the graph node label and return its class index
raise a ValueError if the label is missing while bOther was not True, or if the label is neither a valid one nor an ignored one
"""
domnode = graph_node.node
sXmlLabel = domnode.get(self.sLabelAttr)
# in case we also deal with column headers
if self.bColumnHeader and 'CH' == domnode.get("DU_header"):
sXmlLabel = 'CH'
sXmlLabel = self.dConverter[sXmlLabel]
try:
sLabel = self.dXmlLabel2Label[sXmlLabel]
except KeyError:
raise ValueError("Invalid label '%s'"
" (from @%s or @%s) in node %s"%(sXmlLabel,
self.sLabelAttr,
self.sDefaultLabel,
etree.tostring(domnode)))
# traceln(etree.tostring(domnode), sLabel)
return sLabel
class DU_ABPTableSkewedRowCutLine(DU_CRF_Task):
"""
We will do a CRF model for a DU task
, with the below labels
"""
sXmlFilenamePattern = "*.mpxml" # *_du.* files are now ignored by DU_CRF_Task
iBlockVisibility = None
iLineVisibility = None
fCutHeight = None
bCutAbove = None
lRadAngle = None
#=== CONFIGURATION ====================================================================
@classmethod
def getConfiguredGraphClass(cls):
"""
In this class method, we must return a configured graph class
"""
# Textline labels
# Begin Inside End Single Other
lLabels_TOMBS_blk = ['T', 'O', 'M', 'B', 'S']
# Cut lines:
# Border Ignore Separator Outside
lLabels_SIO_Cut = ['S', 'I', 'O']
#DEFINING THE CLASS OF GRAPH WE USE
DU_GRAPH = GraphSkewedCut_H_TOMBS_lines
DU_GRAPH.iBlockVisibility = cls.iBlockVisibility
DU_GRAPH.iLineVisibility = cls.iLineVisibility
DU_GRAPH.fCutHeight = cls.fCutHeight
DU_GRAPH.bCutAbove = cls.bCutAbove
DU_GRAPH.lRadAngle = cls.lRadAngle
# ROW
ntR = NodeType_BIESO_to_TOMBS_Shape("row"
, lLabels_TOMBS_blk
, None
, False
, None
)
ntR.setLabelAttribute("DU_row")
ntR.setXpathExpr( (".//pc:TextLine" #how to find the nodes
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntR)
# CUT
ntCutH = NodeType_PageXml_Cut_Shape("sepH"
, lLabels_SIO_Cut
, None
, False
, None # equiv. to: BBoxDeltaFun=lambda _: 0
)
ntCutH.setLabelAttribute("DU_type")
ntCutH.setXpathExpr( ('.//pc:CutSeparator[@orient="0"]' #how to find the nodes
# the angle attribute give the true orientation (which is near 0)
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntCutH)
DU_GRAPH.setClassicNodeTypeList( [ntR ])
return DU_GRAPH
def __init__(self, sModelName, sModelDir,
iBlockVisibility = None,
iLineVisibility = None,
fCutHeight = None,
bCutAbove = None,
lRadAngle = None,
sComment = None,
C=None, tol=None, njobs=None, max_iter=None,
inference_cache=None):
DU_ABPTableSkewedRowCutLine.iBlockVisibility = iBlockVisibility
DU_ABPTableSkewedRowCutLine.iLineVisibility = iLineVisibility
DU_ABPTableSkewedRowCutLine.fCutHeight = fCutHeight
DU_ABPTableSkewedRowCutLine.bCutAbove = True
DU_ABPTableSkewedRowCutLine.lRadAngle = lRadAngle
DU_CRF_Task.__init__(self
, sModelName, sModelDir
, dFeatureConfig = {'row_row':{}, 'row_sepH':{},
'sepH_row':{}, 'sepH_sepH':{},
'sepH':{}, 'row':{}}
, dLearnerConfig = {
'C' : .1 if C is None else C
, 'njobs' : 4 if njobs is None else njobs
, 'inference_cache' : 50 if inference_cache is None else inference_cache
#, 'tol' : .1
, 'tol' : .05 if tol is None else tol
, 'save_every' : 50 #save every 50 iterations,for warm start
, 'max_iter' : 10 if max_iter is None else max_iter
}
, sComment=sComment
#,cFeatureDefinition=FeatureDefinition_PageXml_StandardOnes_noText
,cFeatureDefinition=My_FeatureDefinition_v3
)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main_command_line(DU_ABPTableSkewedRowCutLine)
| 39.006309 | 135 | 0.510069 | 1,213 | 12,365 | 5.065128 | 0.309975 | 0.011393 | 0.020508 | 0.016276 | 0.125163 | 0.081055 | 0.072266 | 0.072266 | 0.072266 | 0.058919 | 0 | 0.011008 | 0.390214 | 12,365 | 316 | 136 | 39.129747 | 0.803846 | 0.228629 | 0 | 0.171123 | 0 | 0 | 0.037984 | 0.003374 | 0 | 0 | 0 | 0 | 0.010695 | 1 | 0.042781 | false | 0 | 0.085562 | 0 | 0.245989 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80cfa666e310014576d03dad3c75588c0786534a | 574 | py | Python | LeetCode/2044. Count Number of Maximum Bitwise-OR Subsets/solution.py | InnoFang/oh-my-algorithms | f559dba371ce725a926725ad28d5e1c2facd0ab2 | [
"Apache-2.0"
] | 1 | 2017-03-31T15:24:01.000Z | 2017-03-31T15:24:01.000Z | LeetCode/2044. Count Number of Maximum Bitwise-OR Subsets/solution.py | InnoFang/Algorithm-Library | 1896b9d8b1fa4cd73879aaecf97bc32d13ae0169 | [
"Apache-2.0"
] | null | null | null | LeetCode/2044. Count Number of Maximum Bitwise-OR Subsets/solution.py | InnoFang/Algorithm-Library | 1896b9d8b1fa4cd73879aaecf97bc32d13ae0169 | [
"Apache-2.0"
] | null | null | null | """
111 / 111 test cases passed.
Runtime: 440 ms
Memory Usage: 14.9 MB
"""
class Solution:
def countMaxOrSubsets(self, nums: List[int]) -> int:
count = largest = 0
def dfs(idx, res):
nonlocal count, largest
if idx == len(nums):
if res > largest:
largest = res
count = 1
elif res == largest:
count += 1
return 0
dfs(idx + 1, res | nums[idx])
dfs(idx + 1, res)
dfs(0, 0)
return count
| 26.090909 | 56 | 0.444251 | 65 | 574 | 3.923077 | 0.492308 | 0.070588 | 0.054902 | 0.078431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064309 | 0.458188 | 574 | 21 | 57 | 27.333333 | 0.755627 | 0.114983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80d4ab9196ae0d1428afc7cb93d38dd105b47bce | 3,717 | py | Python | ikalog/utils/icon_recoginizer/weapon.py | fetus-hina/IkaLog | bd476da541fcc296f792d4db76a6b9174c4777ad | [
"Apache-2.0"
] | 285 | 2015-08-15T14:38:38.000Z | 2022-02-18T15:00:06.000Z | ikalog/utils/icon_recoginizer/weapon.py | fetus-hina/IkaLog | bd476da541fcc296f792d4db76a6b9174c4777ad | [
"Apache-2.0"
] | 323 | 2015-09-24T12:21:34.000Z | 2018-05-06T16:34:54.000Z | ikalog/utils/icon_recoginizer/weapon.py | fetus-hina/IkaLog | bd476da541fcc296f792d4db76a6b9174c4777ad | [
"Apache-2.0"
] | 72 | 2015-08-22T00:18:54.000Z | 2022-02-18T14:44:20.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cv2
import numpy as np
from ikalog.utils.icon_recoginizer import IconRecoginizer
def get_img_custom(self, img):
(h, w) = img.shape[0:2]
return img[int(h * 0.7):, int(w * 0.7):]
def max_pooling_2d(self, img, xy=(2, 2)):
x, y = xy
oh, ow = img.shape
hw = int(ow / x)
hh = int(oh / y)
img = img[:hh * y, :hw * x]
oh, ow = img.shape
img_360p = np.max(img.reshape((oh, hw, x)),
axis=2).T.reshape((hw, hh, y))
img_360p = np.max(img_360p, axis=2).T
return img_360p
def sub_average(img):
img_f = np.asarray(img, dtype=np.float32)
for i in range(img_f.shape[2]):
avg = np.average(img_f[:, :, i])
img_f[:, :, i] = (img_f[:, :, i] - avg)
img_f[:, :, i] = img_f[:, :, i] - np.amin(img_f[:, :, i])
img_f[:, :, i] = img_f[:, :, i] / np.amax(img_f[:, :, i])
img2 = np.asarray(img_f, dtype=np.uint8)
return img2
class WeaponRecoginizer(IconRecoginizer):
def extract_main_features(self, img, debug=False):
h, w = img.shape[0:2]
img_cropped = img[2:h - 4, 10:w - 3]
img_normalized = self.normalize_icon_image(img_cropped)
return img_normalized[0]
def extract_sub_features(self, img, debug=False):
laplacian_threshold = 192
img_subavg = sub_average(img)
img_gray = cv2.cvtColor(img_subavg, cv2.COLOR_BGR2GRAY)
img_gray_laplacian = cv2.Laplacian(img_gray, cv2.CV_64F)
img_laplacian_abs = cv2.convertScaleAbs(img_gray_laplacian)
a, img_laplacian_abs_thres = cv2.threshold(
img_laplacian_abs, laplacian_threshold, 255, 0)
img_gray_custom = get_img_custom(None, img_gray)
img_gray_custom = max_pooling_2d(None, img_gray, (4, 4))
return np.array(img_gray_custom, dtype=np.float32)
# Define weapon classification specific features.
def extract_features_func(self, img, debug=False):
features_main = self.extract_main_features(img)
features_sub = self.extract_sub_features(img)
features = np.append(
features_main.reshape(-1),
features_sub.reshape(-1),
)
return features
def model_filename(self):
return 'data/weapons.knn.data'
def load_model_from_file(self, model_file=None):
if model_file is None:
model_file = self.model_filename()
super(WeaponRecoginizer, self).load_model_from_file(model_file)
def save_model_to_file(self, model_file=None):
if model_file is None:
model_file = self.model_filename()
super(WeaponRecoginizer, self).save_model_to_file(model_file)
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '__instance__'):
cls.__instance__ = super(
WeaponRecoginizer, cls).__new__(cls, *args, **kwargs)
return cls.__instance__
def __init__(self, model_file=None):
if hasattr(self, 'trained') and self.trained:
return
super(WeaponRecoginizer, self).__init__(k=5)
| 31.235294 | 75 | 0.641377 | 532 | 3,717 | 4.244361 | 0.327068 | 0.021258 | 0.019929 | 0.017715 | 0.153676 | 0.109832 | 0.099203 | 0.093003 | 0.093003 | 0.079717 | 0 | 0.02546 | 0.239171 | 3,717 | 118 | 76 | 31.5 | 0.772984 | 0.181598 | 0 | 0.085714 | 0 | 0 | 0.013236 | 0.006949 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157143 | false | 0 | 0.042857 | 0.014286 | 0.342857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80d598d25cb928d4e75ce7a8868f16d8dbc96650 | 11,867 | py | Python | code/pytorch/utils/mujoco_solver.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | 1 | 2021-11-22T07:45:28.000Z | 2021-11-22T07:45:28.000Z | code/pytorch/utils/mujoco_solver.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | null | null | null | code/pytorch/utils/mujoco_solver.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | null | null | null | import math
import os
import random
import numpy as np
import torch
from tensorboardX import SummaryWriter
from tqdm import tqdm
from ..methods import DDPG, TD3, SAC
from envs.abb.models import utils
class Solver(object):
def __init__(self, args, env, project_path):
self.args = args
self.env = env
self.file_name = ''
self.project_path = project_path
self.result_path = project_path + "results/robosuite"
self.evaluations = []
# Set seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# print('action_dim :::', env._action_dim)
# print("obs :::", env._setup_observables())
state_dim = env.observation_space.shape[0]
print('state_dim', state_dim)
action_dim = env.action_space.shape[0]
print('action_dim', action_dim)
print(env.action_space.high)
max_action = float(env.action_space.high[0])
# Initialize policy
if 'DDPG' == args.policy_name:
policy = DDPG.DDPG(args, state_dim, action_dim, max_action)
elif 'SAC' == args.policy_name:
policy = SAC.SAC(args, state_dim, action_dim, max_action, self.env.action_space)
elif 'TD3' == args.policy_name:
policy = TD3.TD3(args, state_dim, action_dim, max_action)
else:
policy = TD3.TD3(args, state_dim, action_dim, max_action)
self.log_dir = '{}/{}/{}_{}_seed_{}'.format(self.result_path,
self.args.log_path,
self.args.policy_name,
self.args.env_name,
self.args.seed)
print("---------------------------------------")
print("Settings: %s" % self.log_dir)
print("---------------------------------------")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# self.log_transfer_dir = '{}/{}_transfer/{}_{}_seed_{}'.format(self.result_path,
# self.args.log_path,
# self.args.policy_name,
# self.args.env_name,
# self.args.seed)
# print("---------------------------------------")
# print("Settings: %s" % self.log_transfer_dir)
# print("---------------------------------------")
# if not os.path.exists(self.log_transfer_dir):
# os.makedirs(self.log_transfer_dir)
self.policy = policy
self.replay_buffer = utils.ReplayBuffer()
self.total_timesteps = 0
self.pre_num_steps = self.total_timesteps
self.best_reward = 0.0
self.writer_train = SummaryWriter(logdir=self.log_dir)
# self.writer_test = SummaryWriter(logdir=self.log_dir)
def reset(self):
self.obs = self.env.reset()
self.episode_reward = 0
self.episode_timesteps = 0
def train_once(self):
if self.total_timesteps != 0:
self.writer_train.add_scalar('train_ave_reward', self.episode_reward, self.total_timesteps)
self.policy.train(self.replay_buffer,
self.args.batch_size,
self.args.discount,
self.args.tau,
self.args.policy_noise,
self.args.noise_clip,
self.args.policy_freq)
def eval_once(self):
self.pbar.update(self.total_timesteps - self.pre_num_steps)
self.pre_num_steps = self.total_timesteps
# Evaluate episode
if self.total_timesteps%self.args.eval_freq==0:
# evaluate the policy for once
avg_reward, avg_episode_steps = evaluate_policy(self.env, self.policy, self.args)
self.evaluations.append(avg_reward)
self.writer_train.add_scalar('test_ave_reward', avg_reward, self.total_timesteps)
if self.best_reward < avg_reward:
self.best_reward = avg_reward
print("Best reward! Total T: %d Episode T: %d Reward: %f" %
(self.total_timesteps, self.episode_timesteps, avg_reward))
self.policy.save(self.file_name, directory=self.log_dir)
np.save(self.log_dir + "/test_accuracy", self.evaluations)
utils.write_table(self.log_dir + "/test_accuracy", np.asarray(self.evaluations))
def train(self):
avg_reward, _ = evaluate_policy(self.env, self.policy, self.args)
self.evaluations = [avg_reward]
self.pbar = tqdm(total=self.args.max_timesteps, initial=self.total_timesteps, position=0, leave=True)
if self.args.load_policy:
self.policy.load(self.file_name + str(self.args.load_policy_idx), self.log_dir)
done = False
self.reset()
while self.total_timesteps < self.args.max_timesteps:
self.train_once()
if done or self.episode_timesteps + 1 > self.args.max_episode_steps:
print('done', done)
print('total_timesteps', self.total_timesteps)
print('episode_reward', self.episode_reward)
self.eval_once()
self.reset()
done = False
# Select action randomly or according to policy
if self.total_timesteps < self.args.start_timesteps:
action = self.env.action_space.sample()
else:
if 'SAC' in self.args.policy_name:
action = self.policy.select_action(np.array(self.obs), eval=False)
else:
action = self.policy.select_action(np.array(self.obs))
if self.args.expl_noise != 0:
action = (action + np.random.normal(0, self.args.expl_noise,
size=self.env.action_space.shape[0])).clip(
self.env.action_space.low[0], self.env.action_space.high[0])
new_obs, reward, done, _ = self.env.step(action)
if self.args.render:
self.env.render()
self.episode_reward += reward
done_bool = 0 if self.episode_timesteps + 1 == self.args.max_episode_steps else float(done)
p = 1.0
self.replay_buffer.add((self.obs, new_obs, action, reward, done_bool, p))
self.obs = new_obs
self.episode_timesteps += 1
self.total_timesteps += 1
avg_reward, avg_episode_steps = evaluate_policy(self.env, self.policy, self.args)
self.evaluations.append(avg_reward)
if self.best_reward < avg_reward:
self.best_reward = avg_reward
print("Best reward! Total T: %d Episode T: %d Reward: %f" %
(self.total_timesteps, self.episode_timesteps, avg_reward))
self.policy.save(self.file_name, directory=self.log_dir)
if self.args.save_all_policy:
self.policy.save(self.file_name + str(int(self.args.max_timesteps)), directory=self.log_dir)
# if self.args.load_policy:
# np.save(self.log_transfer_dir + "/test_accuracy", self.evaluations)
# utils.write_table(self.log_transfer_dir + "/test_accuracy", np.asarray(self.evaluations))
# else:
np.save(self.log_dir + "/test_accuracy", self.evaluations)
utils.write_table(self.log_dir + "/test_accuracy", np.asarray(self.evaluations))
# # save the replay buffer
if self.args.save_data:
self.replay_buffer.save_buffer(self.log_dir + "/buffer_data")
self.env.reset()
def eval_only(self):
self.evaluations = [evaluate_policy(self.env, self.policy, self.args)]
self.writer_test = SummaryWriter(logdir=self.log_dir + '_test')
self.pbar = tqdm(total=self.args.max_timesteps, initial=self.total_timesteps, position=0, leave=True)
if self.args.load_policy:
self.policy.load(self.file_name + str(self.args.load_policy_idx), self.log_dir)
done = False
safe_or_not = True
self.reset()
while self.total_timesteps < self.args.eval_max_timesteps:
if done or not safe_or_not or self.episode_timesteps + 1 > self.args.max_episode_steps:
print('safe_or_not', safe_or_not)
print('done', done)
print('total_timesteps', self.total_timesteps)
print('episode_reward', self.episode_reward)
self.eval_once()
self.reset()
done = False
safe_or_not = True
# Select action randomly or according to policy
if 'SAC' in self.args.policy_name:
action = self.policy.select_action(np.array(self.obs), eval=False)
else:
action = self.policy.select_action(np.array(self.obs))
new_obs, reward, done, _ = self.env.step(action)
self.episode_reward += reward
done_bool = 0 if self.episode_timesteps + 1 == self.args.max_episode_steps else float(done)
self.obs = new_obs
self.episode_timesteps += 1
self.total_timesteps += 1
avg_reward = evaluate_policy(self.env, self.policy, self.args)
self.evaluations.append(avg_reward)
print('evaluations', self.evaluations)
if self.best_reward < avg_reward:
self.best_reward = avg_reward
print("Best reward! Total T: %d Episode T: %d Reward: %f" %
(self.total_timesteps, self.episode_timesteps, avg_reward))
self.policy.save(self.file_name, directory=self.log_dir)
if self.args.save_all_policy:
self.policy.save(self.file_name + str(int(self.args.max_timesteps)), directory=self.log_dir)
if self.args.load_policy:
np.save(self.log_transfer_dir + "/test_accuracy", self.evaluations)
utils.write_table(self.log_transfer_dir + "/test_accuracy", np.asarray(self.evaluations))
else:
np.save(self.log_dir + "/test_accuracy", self.evaluations)
utils.write_table(self.log_dir + "/test_accuracy", np.asarray(self.evaluations))
self.env.reset()
def evaluate_policy(env, policy, args, eval_episodes=5):
avg_reward = 0.
avg_episode_steps = 0
for _ in range(eval_episodes):
print('eval_episodes', eval_episodes)
obs = env.reset()
# obs, state, done = env.reset()
done = False
eval_episodes_steps = 0
episode_states = []
while not done and eval_episodes_steps < args.max_episode_steps:
action = policy.select_action(np.array(obs))
# obs, _, reward, done, safe_or_not = env.step(action)
obs, reward, done, _ = env.step(action)
episode_states.append(obs)
avg_reward += reward
avg_episode_steps += 1
eval_episodes_steps += 1
avg_reward /= eval_episodes
avg_episode_steps /= eval_episodes
print('eval_avg_reward', avg_reward)
return avg_reward, avg_episode_steps
| 42.08156 | 109 | 0.555911 | 1,365 | 11,867 | 4.594139 | 0.110623 | 0.059959 | 0.033487 | 0.035082 | 0.666082 | 0.603891 | 0.589858 | 0.57455 | 0.519375 | 0.502312 | 0 | 0.005186 | 0.333783 | 11,867 | 281 | 110 | 42.231317 | 0.788009 | 0.104913 | 0 | 0.446154 | 0 | 0 | 0.055126 | 0.007363 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035897 | false | 0 | 0.046154 | 0 | 0.092308 | 0.097436 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80d6549d6455eb3b023025b15786be0473b6bbe6 | 641 | py | Python | code_example/w2_genMeanSd.py | koonyook/unsupervised-phase-supplementary | 09ee8000c79465da8731b5323f2db9a25d7252ab | [
"MIT"
] | null | null | null | code_example/w2_genMeanSd.py | koonyook/unsupervised-phase-supplementary | 09ee8000c79465da8731b5323f2db9a25d7252ab | [
"MIT"
] | null | null | null | code_example/w2_genMeanSd.py | koonyook/unsupervised-phase-supplementary | 09ee8000c79465da8731b5323f2db9a25d7252ab | [
"MIT"
] | null | null | null | import numpy as np
import pickle
#this file will do
#1. read from list of .pkl files that will become training data
#2. save wMean.dat and wSd.dat
for input in ["inputHeart/","inputBow/","inputAcrobat/"]:
fileList=[
'data_train.pkl',
]
collect=[]
for f in fileList:
#data=np.load('input/'+f) #(2,-)
dataList=pickle.load(open(input+f,'rb'))
for data in dataList:
collect.append(data)
allTrainingData=np.hstack(collect)
mean=np.mean(allTrainingData, axis=1, keepdims=True)
sd=np.std(allTrainingData-mean, axis=1, keepdims=True)
mean.dump(input+'wMean.dat') #(2,1)
sd.dump(input+'wSD.dat') #(2,1)
print("done")
| 22.103448 | 63 | 0.687988 | 101 | 641 | 4.356436 | 0.514851 | 0.036364 | 0.059091 | 0.077273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016393 | 0.143526 | 641 | 28 | 64 | 22.892857 | 0.785064 | 0.230889 | 0 | 0 | 0 | 0 | 0.141975 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80d6b3fdb21cbf34c945220abf23ee4dc73841af | 10,553 | py | Python | metadrive/component/road_network/node_road_network.py | liuzuxin/metadrive | 850c207536531bc85179084acd7c30ab14a66111 | [
"Apache-2.0"
] | 125 | 2021-08-30T06:33:57.000Z | 2022-03-31T09:02:44.000Z | metadrive/component/road_network/node_road_network.py | liuzuxin/metadrive | 850c207536531bc85179084acd7c30ab14a66111 | [
"Apache-2.0"
] | 72 | 2021-08-30T16:23:41.000Z | 2022-03-31T19:17:16.000Z | metadrive/component/road_network/node_road_network.py | liuzuxin/metadrive | 850c207536531bc85179084acd7c30ab14a66111 | [
"Apache-2.0"
] | 20 | 2021-09-09T08:20:25.000Z | 2022-03-24T13:24:07.000Z | import copy
import logging
from typing import List, Tuple, Dict
import numpy as np
from metadrive.component.lane.abs_lane import AbstractLane
from metadrive.component.road_network.road import Road
from metadrive.component.road_network.base_road_network import BaseRoadNetwork
from metadrive.constants import Decoration
from metadrive.utils.math_utils import get_boxes_bounding_box
from metadrive.utils.scene_utils import get_lanes_bounding_box
logger = logging.getLogger(__name__)
LaneIndex = Tuple[str, str, int]
Route = List[LaneIndex]
class NodeRoadNetwork(BaseRoadNetwork):
"""
This network uses two node to describe the road network graph, and the edges between two nodes represent road, which
is a list of lanes connecting two lanes
"""
graph: Dict[str, Dict[str, List[AbstractLane]]]
def __init__(self, debug=False):
super(NodeRoadNetwork, self).__init__()
self.graph = {}
self.indices = []
self._graph_helper = None
self.debug = debug
self.is_initialized = False
def after_init(self):
assert not self.is_initialized
self._update_indices()
self._init_graph_helper()
self.is_initialized = True
def add(self, other, no_intersect=True):
assert not self.is_initialized, "Adding new blocks should be done before road network initialization!"
set_1 = set(self.graph) - {Decoration.start, Decoration.end}
set_2 = set(other.graph) - {Decoration.start, Decoration.end}
intersect = set_1.intersection(set_2)
if len(intersect) != 0 and no_intersect:
raise ValueError("Same start node {} in two road network".format(intersect))
# handle decoration_lanes
dec_lanes = self.get_all_decoration_lanes() + other.get_all_decoration_lanes()
self.graph.update(copy.copy(other.graph))
self.update_decoration_lanes(dec_lanes)
return self
def __isub__(self, other):
intersection = self.graph.keys() & other.graph.keys() - {Decoration.start, Decoration.end}
if len(intersection) != 0:
for k in intersection:
self.graph.pop(k, None)
if Decoration.start in other.graph.keys():
for lane in other.graph[Decoration.start][Decoration.end]:
if lane in self.graph[Decoration.start][Decoration.end]:
self.graph[Decoration.start][Decoration.end].remove(lane)
return self
def get_all_decoration_lanes(self) -> List:
if Decoration.start in self.graph:
return self.graph[Decoration.start][Decoration.end]
else:
return []
def update_decoration_lanes(self, lanes):
if len(lanes) == 0:
return
if Decoration.start in self.graph:
self.graph.pop(Decoration.start, None)
self.graph[Decoration.start] = {Decoration.end: lanes}
def clear(self):
self.graph.clear()
def get_positive_lanes(self):
"""
In order to remain the lane index, ret is a 2-dim array structure like [Road_lanes[lane_1, lane_2]]
"""
ret = []
for _from, _to_dict in self.graph.items():
for _to, lanes in _to_dict.items():
road = Road(_from, _to)
if not road.is_negative_road() and road.is_valid_road():
ret.append(lanes)
return ret
def get_negative_lanes(self):
"""
In order to remain the lane index, ret is a 2-dim array structure like like [Road_lanes[lane_1, lane_2]]
"""
ret = []
for _from, _to_dict in self.graph.items():
for _to, lanes in _to_dict.items():
road = Road(_from, _to)
if road.is_negative_road() and road.is_valid_road():
ret.append(lanes)
return ret
def _get_bounding_box(self):
"""
By using this bounding box, the edge length of x, y direction and the center of this road network can be
easily calculated.
:return: minimum x value, maximum x value, minimum y value, maximum y value
"""
boxes = []
for _from, to_dict in self.graph.items():
for _to, lanes in to_dict.items():
if len(lanes) == 0:
continue
boxes.append(get_lanes_bounding_box(lanes))
res_x_max, res_x_min, res_y_max, res_y_min = get_boxes_bounding_box(boxes)
return res_x_min, res_x_max, res_y_min, res_y_max
def remove_all_roads(self, start_node: str, end_node: str):
"""
Remove all road between two road nodes
:param start_node: start node name
:param end_node: end node name
:return: roads removed
"""
ret = []
paths = self.bfs_paths(start_node, end_node)
for path in paths:
for next_idx, node in enumerate(path[:-1], 1):
road_removed = self.remove_road(Road(node, path[next_idx]))
ret += road_removed
return ret
def remove_road(self, road):
assert isinstance(road, Road), "Only Road Type can be deleted"
ret = self.graph[road.start_node].pop(road.end_node)
if len(self.graph[road.start_node]) == 0:
self.graph.pop(road.start_node)
return ret
def add_road(self, road, lanes: List):
assert isinstance(road, Road), "Only Road Type can be added to road network"
if road.start_node not in self.graph:
self.graph[road.start_node] = {}
if road.end_node not in self.graph[road.start_node]:
self.graph[road.start_node][road.end_node] = []
self.graph[road.start_node][road.end_node] += lanes
def add_lane(self, _from: str, _to: str, lane: AbstractLane) -> None:
"""
A lane is encoded as an edge in the road network.
:param _from: the node at which the lane starts.
:param _to: the node at which the lane ends.
:param AbstractLane lane: the lane geometry.
"""
if _from not in self.graph:
self.graph[_from] = {}
if _to not in self.graph[_from]:
self.graph[_from][_to] = []
self.graph[_from][_to].append(lane)
def _init_graph_helper(self):
self._graph_helper = GraphLookupTable(self.graph, self.debug)
def get_lane(self, index: LaneIndex) -> AbstractLane:
"""
Get the lane geometry corresponding to a given index in the road network.
:param index: a tuple (origin node, destination node, lane id on the road).
:return: the corresponding lane geometry.
"""
_from, _to, _id = index
if _id is None and len(self.graph[_from][_to]) == 1:
_id = 0
return self.graph[_from][_to][_id]
def _update_indices(self):
indexes = []
for _from, to_dict in self.graph.items():
for _to, lanes in to_dict.items():
for _id, l in enumerate(lanes):
indexes.append((_from, _to, _id))
self.indices = indexes
def get_closest_lane_index(self, position, return_all=False):
return self._graph_helper.get(position, return_all)
def bfs_paths(self, start: str, goal: str) -> List[List[str]]:
"""
Breadth-first search of all routes from start to goal.
:param start: starting node
:param goal: goal node
:return: list of paths from start to goal.
"""
queue = [(start, [start])]
while queue:
(node, path) = queue.pop(0)
if node not in self.graph:
yield []
for _next in set(self.graph[node].keys()) - set(path):
if _next == goal:
yield path + [_next]
elif _next in self.graph:
queue.append((_next, path + [_next]))
def shortest_path(self, start: str, goal: str) -> List[str]:
"""
Breadth-first search of shortest checkpoints from start to goal.
:param start: starting node
:param goal: goal node
:return: shortest checkpoints from start to goal.
"""
start_road_node = start[0]
assert start != goal
return next(self.bfs_paths(start_road_node, goal), [])
class GraphLookupTable:
def __init__(self, graph, debug):
self.graph = graph
self.debug = debug
def get(self, position, return_all):
log = dict()
count = 0
for _, (_from, to_dict) in enumerate(self.graph.items()):
if _from == "decoration":
continue
for lanes_id, lanes in to_dict.items():
lane = next(iter(lanes))
log[count] = (lane.distance(position), (_from, lanes_id))
count += 1
distance_index_mapping = []
for rank, candidate_count in enumerate(sorted(log, key=lambda key: log[key][0])):
first_lane_distance, (section_id, lanes_id) = log[candidate_count]
lanes = self.graph[section_id][lanes_id]
for lane_id, lane in enumerate(lanes):
if lanes_id == Decoration.start:
continue
if lane_id == 0:
dist = first_lane_distance
else:
dist = lane.distance(position)
distance_index_mapping.append((dist, (section_id, lanes_id, lane_id)))
# if rank > 10:
# # Take first rank 5 lanes into consideration. The number may related to the number of
# # lanes in intersection. We have 3 lanes in intersection, so computing the first 4 ranks can make
# # thing work. We choose take first 5 lanes into consideration.
# # In futurem we shall refactor the whole system, so this vulnerable code would be removed.
# break
if self.graph.get(Decoration.start, False):
for id, lane in enumerate(self.graph[Decoration.start][Decoration.end]):
dist = lane.distance(position)
distance_index_mapping.append((dist, (Decoration.start, Decoration.end, id)))
distance_index_mapping = sorted(distance_index_mapping, key=lambda d: d[0])
if return_all:
return distance_index_mapping
else:
ret_ind = 0
index = distance_index_mapping[ret_ind][1]
distance = distance_index_mapping[ret_ind][0]
return index, distance
| 39.376866 | 120 | 0.607221 | 1,361 | 10,553 | 4.515797 | 0.166054 | 0.065897 | 0.023267 | 0.045558 | 0.318744 | 0.246827 | 0.150667 | 0.150667 | 0.150667 | 0.108363 | 0 | 0.004729 | 0.298683 | 10,553 | 267 | 121 | 39.524345 | 0.825699 | 0.16943 | 0 | 0.192308 | 0 | 0 | 0.02232 | 0 | 0 | 0 | 0 | 0 | 0.027473 | 1 | 0.120879 | false | 0 | 0.054945 | 0.005495 | 0.274725 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80d8125a476591f35f2ad71b32650a81f028ecab | 1,468 | py | Python | generic_ui/RawTextWidget.py | STMicroelectronics/stm32ai-datalogger | 0ba92ced44248e606a5cc68139fdfdc84489fa17 | [
"BSD-3-Clause"
] | 3 | 2021-06-28T13:41:12.000Z | 2021-07-21T13:06:34.000Z | generic_ui/RawTextWidget.py | STMicroelectronics/stm32ai-datalogger | 0ba92ced44248e606a5cc68139fdfdc84489fa17 | [
"BSD-3-Clause"
] | null | null | null | generic_ui/RawTextWidget.py | STMicroelectronics/stm32ai-datalogger | 0ba92ced44248e606a5cc68139fdfdc84489fa17 | [
"BSD-3-Clause"
] | null | null | null | ###################################################################################
# Copyright (c) 2020-2021 STMicroelectronics.
# All rights reserved.
# This software is licensed under terms that can be found in the LICENSE file in
# the root directory of this software component.
# If no LICENSE file comes with this software, it is provided AS-IS.
###################################################################################
__author__ = "Romain LE DONGE"
__copyright__ = "Copyright (c) 2021 STMicroelectronics"
__license__ = """
Copyright (c) 2020-2021 STMicroelectronics.
All rights reserved.
This software is licensed under terms that can be found in the LICENSE file in
the root directory of this software component.
If no LICENSE file comes with this software, it is provided AS-IS.
"""
from PySide2.QtWidgets import QPlainTextEdit
from PySide2.QtCore import Slot, qDebug
class RawTextWidget(QPlainTextEdit):
def __init__(self, controller, parent=None):
super().__init__(parent)
self.controller = controller
self.controller.sig_newRawData.connect(self.s_appendRaw)
@Slot(dict)
def s_appendRaw(self, data:dict):
self.appendPlainText(str(data)+"\n")
def closeEvent(self, closeEvent):
qDebug("Closing RawTextWidget")
self.controller.sig_newRawData.disconnect(self.s_appendRaw)
closeEvent.accept()
| 38.631579 | 84 | 0.620572 | 160 | 1,468 | 5.5375 | 0.425 | 0.081264 | 0.031603 | 0.040632 | 0.469526 | 0.469526 | 0.469526 | 0.469526 | 0.469526 | 0.469526 | 0 | 0.018739 | 0.200272 | 1,468 | 37 | 85 | 39.675676 | 0.735945 | 0.180518 | 0 | 0 | 0 | 0 | 0.356855 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.086957 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80d88468c8e7365b40222afca17976c053b6b8f3 | 7,243 | py | Python | test_QueryPharos.py | kevinxin90/RTX_BioThings_Explorer | 16de49de9e0db75c7616a85c2592166ea055faa7 | [
"Apache-2.0"
] | 1 | 2018-05-24T13:16:57.000Z | 2018-05-24T13:16:57.000Z | test_QueryPharos.py | kevinxin90/RTX_BioThings_Explorer | 16de49de9e0db75c7616a85c2592166ea055faa7 | [
"Apache-2.0"
] | 1 | 2018-06-01T02:04:23.000Z | 2018-06-01T20:21:32.000Z | test_QueryPharos.py | kevinxin90/RTX_BioThings_Explorer | 16de49de9e0db75c7616a85c2592166ea055faa7 | [
"Apache-2.0"
] | null | null | null | import unittest
from QueryPharos import QueryPharos
class QueryPharosTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pharos = QueryPharos()
def test_query_drug_name_to_targets(self):
# bte_result = self.pharos.query_drug_name_to_targets('paclitaxel')
# # TODO: BioThings Explorer API can only return short names, e.g. TUBB3 for Tubulin beta-3 chain
# # TODO: Should a target ID be an int or a string?
# rtx_result = [{'id': 1995, 'name': 'Tubulin beta-3 chain'},
# {'id': 15579, 'name': 'Tubulin beta chain'},
# {'id': 10262, 'name': 'Tubulin beta-1 chain'},
# {'id': 16012, 'name': 'Cytochrome P450 3A4'},
# {'id': 1906, 'name': 'Tubulin beta-4A chain'},
# {'id': 18746, 'name': 'Cytochrome P450 3A5'},
# {'id': 16739, 'name': 'Mimitin, mitochondrial'},
# {'id': 15851, 'name': 'Tubulin beta-4B chain'},
# {'id': 13919, 'name': 'Ribonucleoside-diphosphate reductase large subunit'},
# {'id': 5762, 'name': 'Tubulin beta-2A chain'}]
# bte_ids = {x["id"] for x in bte_result}
# rtx_ids = {x["id"] for x in rtx_result}
# self.assertSetEqual(set(bte_ids), set(rtx_ids))
# bte_result = self.pharos.query_drug_name_to_targets('lovastatin')
# rtx_result = [{'id': 19672, 'name': '3-hydroxy-3-methylglutaryl-coenzyme A reductase'},
# {'id': 14711, 'name': 'Integrin alpha-L'},
# {'id': 3939, 'name': 'Farnesyl pyrophosphate synthase'},
# {'id': 14764, 'name': 'Integrin beta-3'},
# {'id': 13844, 'name': 'Cytochrome P450 2D6'},
# {'id': 16824, 'name': 'Prostacyclin receptor'},
# {'id': 17657, 'name': 'Serine/threonine-protein kinase mTOR'},
# {'id': 8600, 'name': 'Prostaglandin G/H synthase 2'},
# {'id': 18746, 'name': 'Cytochrome P450 3A5'},
# {'id': 7520, 'name': 'C-C chemokine receptor type 5'}]
# bte_ids = {x["id"] for x in bte_result}
# rtx_ids = {x["id"] for x in rtx_result}
# self.assertSetEqual(set(bte_ids), set(rtx_ids))
self.skipTest('Kevin Implemented this method in a different way. Check his Google Doc.')
def test_query_target_to_diseases(self):
bte_result = self.pharos.query_target_to_diseases("16012")
rtx_result = [{'id': '852', 'name': 'Hepatitis C'},
{'id': '35', 'name': 'osteosarcoma'},
{'id': '67', 'name': 'Prostatic Neoplasms'},
{'id': '4854', 'name': 'Torsades de Pointes'},
{'id': '771', 'name': 'Mammary Neoplasms'},
{'id': '50', 'name': 'astrocytic glioma'},
{'id': '51', 'name': 'ependymoma'},
{'id': '52', 'name': 'oligodendroglioma'},
{'id': '47', 'name': 'cutaneous lupus erythematosus'},
{'id': '42', 'name': 'psoriasis'},
{'id': '5', 'name': 'medulloblastoma, large-cell'},
{'id': '304', 'name': 'adrenocortical adenoma'},
{'id': '95', 'name': 'pancreatic ductal adenocarcinoma liver metastasis'},
{'id': '49', 'name': 'intraductal papillary-mucinous neoplasm (IPMN)'},
{'id': '129', 'name': 'Cancer'},
{'id': '849', 'name': 'Liver disease'},
{'id': '2102', 'name': 'Diarrhea'},
{'id': '745', 'name': 'Neutropenia'},
{'id': '1855', 'name': 'Human immunodeficiency virus infectious disease'},
{'id': '662', 'name': 'Exanthem'},
{'id': '205', 'name': 'Hypertension'},
{'id': '53', 'name': 'diabetes mellitus'},
{'id': '6190', 'name': 'Sexual dysfunction'},
{'id': '893', 'name': 'Leber congenital amaurosis'},
{'id': '349', 'name': 'Cholestasis'},
{'id': '300', 'name': 'Epilepsy'},
{'id': '203', 'name': 'Coronary artery disease'},
{'id': '171', 'name': 'tuberculosis'},
{'id': '209', 'name': 'Kidney disease'},
{'id': '332', 'name': 'Toxic encephalopathy'},
{'id': '61', 'name': 'Schizophrenia'},
{'id': '533', 'name': 'Pain agnosia'},
{'id': '9826', 'name': 'Human immunodeficiency virus infection'}]
bte_ids = {x["id"] for x in bte_result}
rtx_ids = {x["id"] for x in rtx_result}
self.assertSetEqual(set(bte_ids), set(rtx_ids))
def test_query_target_to_drugs(self):
# bte_result = self.pharos.query_target_to_drugs("16012")
# rtx_result = [{'action': 'INHIBITOR', 'id': 4490391, 'name': 'cobicistat'}]
# self.assertEqual(len(bte_result), len(rtx_result))
self.skipTest("Kevin claimed that we should use 'refid' instead of the 'Record ID'. Check his Google Doc")
def test_query_drug_to_targets(self):
# bte_result = self.pharos.query_drug_to_targets("254599")
# rtx_result = list()
# self.assertListEqual(bte_result, rtx_result)
#
# bte_result = self.pharos.query_drug_to_targets("218623")
# rtx_result = [{'id': 9873512, 'name': 'HMGCR'}]
# self.assertListEqual(bte_result, rtx_result)
self.skipTest("Kevin claimed that we should use 'refid' instead of the 'Record ID'. Check his Google Doc")
def test_query_target_name(self):
bte_result = self.pharos.query_target_name("852")
rtx_result = 'Putative uncharacterized protein ENSP00000382790'
self.assertEqual(bte_result, rtx_result)
def test_query_target_uniprot_accession(self):
bte_result = self.pharos.query_target_uniprot_accession("852")
rtx_result = 'A8MVM7'
self.assertEqual(bte_result, rtx_result)
bte_result = self.pharos.query_target_uniprot_accession("1")
rtx_result = 'Q9UL59'
self.assertEqual(bte_result, rtx_result)
def test_query_disease_name(self):
bte_result = self.pharos.query_disease_name("9636")
rtx_result = 'MALARIA, SEVERE, SUSCEPTIBILITY TO'
self.assertEqual(bte_result, rtx_result)
def test_query_disease_id_by_name(self):
bte_result = self.pharos.query_disease_id_by_name("MALARIA, SEVERE, SUSCEPTIBILITY TO")
rtx_result = '936'
self.assertEqual(bte_result, rtx_result)
def test_query_drug_name(self):
bte_result = self.pharos.query_drug_name("218623")
rtx_result = 'lovastatin'
self.assertEqual(bte_result, rtx_result)
def test_query_drug_id_by_name(self):
bte_result = self.pharos.query_drug_id_by_name("lovastatin")
rtx_result = 218623
self.assertEqual(bte_result, rtx_result)
if __name__ == '__main__':
unittest.main()
| 52.107914 | 114 | 0.54425 | 785 | 7,243 | 4.828025 | 0.322293 | 0.061741 | 0.044591 | 0.065172 | 0.407124 | 0.391821 | 0.366755 | 0.339842 | 0.262533 | 0.173615 | 0 | 0.058312 | 0.301533 | 7,243 | 138 | 115 | 52.485507 | 0.690848 | 0.316443 | 0 | 0.113924 | 0 | 0 | 0.273839 | 0 | 0 | 0 | 0 | 0.007246 | 0.101266 | 1 | 0.139241 | false | 0 | 0.025316 | 0 | 0.177215 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80d969efc16e2fceb81dc2c5f9ae3ec495eea52f | 2,016 | py | Python | setup.py | MyGodIsHe/..-pytest-neo | 5a7d3ef6754c03afeb01db189a80c55bba538de6 | [
"MIT"
] | 45 | 2019-03-07T12:12:11.000Z | 2022-02-01T09:36:30.000Z | setup.py | MyGodIsHe/..-pytest-neo | 5a7d3ef6754c03afeb01db189a80c55bba538de6 | [
"MIT"
] | 6 | 2019-03-14T09:37:51.000Z | 2020-12-01T21:30:15.000Z | setup.py | MyGodIsHe/..-pytest-neo | 5a7d3ef6754c03afeb01db189a80c55bba538de6 | [
"MIT"
] | 1 | 2019-03-30T22:45:58.000Z | 2019-03-30T22:45:58.000Z | from setuptools import setup
import codecs
# Copied from (and hacked):
# https://github.com/pypa/virtualenv/blob/develop/setup.py#L42
def get_version(filename):
import os
import re
here = os.path.dirname(os.path.abspath(__file__))
f = codecs.open(os.path.join(here, filename), encoding='utf-8')
version_file = f.read()
f.close()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
with open("README.rst", "r") as fh:
long_description = fh.read()
setup(
name='pytest-neo',
description=(
'pytest-neo is a plugin for pytest that shows '
'tests like screen of Matrix.'
),
long_description=long_description,
version=get_version('pytest_neo.py'),
license='MIT',
author='Ilya Chistyakov',
author_email='ilchistyakov@gmail.com',
py_modules=['pytest_neo'],
entry_points={'pytest11': ['neo = pytest_neo']},
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=['pytest>=6.2.0'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: PyPy',
],
project_urls={
'Source': 'https://github.com/MyGodIsHe/pytest-neo',
},
)
| 31.5 | 68 | 0.609623 | 227 | 2,016 | 5.286344 | 0.572687 | 0.110833 | 0.145833 | 0.13 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01436 | 0.240079 | 2,016 | 63 | 69 | 32 | 0.76893 | 0.042163 | 0 | 0 | 0 | 0 | 0.435911 | 0.011417 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.074074 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80da5c32e637d7a9cc6af9fdd36bc9ca02fad468 | 10,646 | py | Python | ref_bot/cog/articlerefs.py | tser0f/ref_bot | 8945992ec8802a88546494b503d7658cc53d80c5 | [
"MIT"
] | null | null | null | ref_bot/cog/articlerefs.py | tser0f/ref_bot | 8945992ec8802a88546494b503d7658cc53d80c5 | [
"MIT"
] | 1 | 2020-07-02T13:37:44.000Z | 2020-07-07T03:09:50.000Z | ref_bot/cog/articlerefs.py | tser0f/ref_bot | 8945992ec8802a88546494b503d7658cc53d80c5 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from ref_bot.data_models import Article, Tag, ArticleOwner
from ref_bot.article_scraper import scrape_article
class ArticleRefs(commands.Cog):
def __init__(self, bot, db_session):
self.bot = bot
self.db_session = db_session
self._last_member = None
def generate_embed(self, article):
emb = discord.Embed(title=article.title, description=article.description, url=article.url)
#emb.set_author(name=article.discord_user_id)
emb.add_field(name='Id', value=article.id)
emb.add_field(name='Tags', value=', '.join([str(tag.name) for tag in article.tags]))
#emb.add_field(name='Added by', value='<@{0}>'.format(article.discord_user_id))
#emb.add_field(name='Channel', value='<#{0}>'.format(article.discord_channel_id))
#emb.add_field(name='Original request', value='[Link!](https://discordapp.com/channels/{0.discord_guild_id}/{0.discord_channel_id}/{0.discord_message_id})'.format(article))
emb.set_footer(text='Created: {0.created} | Last updated: {0.last_updated}'.format(article))
return emb
@commands.command(name='help')
async def show_help(self, ctx):
emb = discord.Embed(title='Ref bot help',
description='''Commands :
`!ref add <article_url> <tags...>` - Adds a new article
`!ref find <keywords...>` - Searches for an article posted in the current channel using the specified keywords.
`!ref find_all <keywords...>` - Same as !ref find but posted anywhere
`!ref id <id>` - Gets the article with specified id
`!ref delete <id>` - Removes the article with specified id
`!ref tag <id> <+tag -tag...>` - Adds tags specified with `+` and removes tags specified with `-`
`!ref update <id>` - Automatically update the article from the url
`!ref owners <id>` - shows the users that added the articles to the channel
Examples :
`!ref add https://site.com/articles/23 hashing crypto`
`!ref find hash`
`!ref tag 5 +passwords +practice -dolan`
`!ref delete 8`
''')
await ctx.send(embed=emb)
@commands.command(name='add')
async def add_article(self, ctx, url, *tags):
url = url.strip()
if url[0] == '<' and url[-1] == '>':
url = url[1:-1]
article_query = self.db_session.query(Article).filter_by(url=url)
article_obj = article_query.first()
is_new = False
owners = []
if article_obj is None:
article_obj = Article(url=url) #,
is_new = True
for tag in tags:
article_obj.tags.append(Tag(name=tag))
if scrape_article(article_obj) == False:
await ctx.send('Error! Could not add the article')
return
else:
owners = article_obj.find_owners(ArticleOwner(discord_channel_id=ctx.message.channel.id, discord_guild_id=ctx.guild.id))
if len(owners) == 0:
article_obj.owners.append(ArticleOwner(discord_user_id=ctx.author.id, discord_channel_id=ctx.message.channel.id, discord_message_id=ctx.message.id, discord_guild_id=ctx.guild.id))
elif len(owners) == 1:
await ctx.send('Article has already been added in this channel. It is currently owned by <@{0.discord_user_id}>.'.format(owners[0]))
return
else:
await ctx.send('WARNING: Article {0} has multiple({1}) owners in this channel!'.format(article_obj.id, len(owners)))
return
article_obj.resolve_existing_tags(self.db_session)
if is_new:
self.db_session.add(article_obj)
self.db_session.commit()
await ctx.send('Article added successfully!', embed=self.generate_embed(article_obj))
def find_by_id(self, query, id):
return query.filter(Article.id==id)
def find_by_channel(self, query, channel_id):
return query.filter(Article.owners.any(ArticleOwner.discord_channel_id==channel_id))
def find_like_tag(self, query, tag):
return query.filter(Article.tags.any(Tag.name.like('%{0}%'.format(tag))))
def find_like_title(self, query, title):
return query.filter(Article.title.like('%{0}%'.format(title)))
def find_by_keywords(self, query, keywords):
articles_found = None
for keyword in keywords:
if articles_found is None or len(articles_found) == 0:
articles_found = query.filter(Article.tags.any(Tag.name.like('%{0}%'.format(keyword))) | Article.title.like('%{0}%'.format(keyword))).all()
if len(articles_found) == 1: #only one result is left, cannot get less anyway
return articles_found
elif len(articles_found) > 1:
articles_tag = []
articles_title = []
for article in articles_found:
for tag in article.tags:
if keyword in tag.name:
articles_tag.append(article)
break
if keyword in article.title:
articles_title.append(article)
if len(articles_tag) >= len(articles_title): #set whichever result set is biggest
articles_found = articles_tag
elif len(articles_tag) < len(articles_title):
articles_found = articles_title
return articles_found
@commands.command(name='find_all')
async def find_article(self, ctx, *keywords):
articles_found = self.find_by_keywords(self.db_session.query(Article), keywords)
if articles_found is not None:
for article in articles_found:
await ctx.send('Found!', embed=self.generate_embed(article))
else:
await ctx.send('Could not find your article. :(')
@commands.command(name='find')
async def find_article_channel(self, ctx, *keywords):
articles_found = self.find_by_keywords(self.db_session.query(Article), keywords)
articles_sent = False
if articles_found is not None and len(articles_found) != 0:
for article in articles_found:
if any(owner.discord_channel_id == ctx.message.channel.id for owner in article.owners):
articles_sent = True
await ctx.send('Found!', embed=self.generate_embed(article))
if articles_sent == False:
await ctx.send('Could not find your article. :(')
@commands.command(name='id')
async def find_article_id(self, ctx, id):
article = self.find_by_id(self.db_session.query(Article), id).first()
if article is not None:
await ctx.send('Found!', embed=self.generate_embed(article))
else:
await ctx.send('Could not find specified article.')
@commands.command(name='delete')
async def delete_article(self, ctx, id):
article_query = self.find_by_id(self.db_session.query(Article), id)
article = article_query.first()
if article is not None:
#owner = article_query.filter(Article.owners.any((ArticleOwner.discord_channel_id==ctx.message.channel.id) & (ArticleOwner.discord_guild_id==ctx.guild.id))).first().owners[0]
owners = article.find_owners(ArticleOwner(discord_channel_id=ctx.message.channel.id, discord_guild_id=ctx.guild.id, discord_user_id=ctx.author.id))
if len(owners) > 1 or ctx.message.author.guild_permissions.administrator:
for owner in owners:
self.db_session.delete(owner)
if len(article.owners) == len(owners):
self.db_session.delete(article)
self.db_session.commit()
await ctx.send('Sucessfully deleted article!')
else:
await ctx.send('Only <@{0}> can delete this article!'.format(article.discord_user_id))
else:
await ctx.send('Could not find the specified article.')
@commands.command(name='tag', aliases=['tags'])
async def tag_article(self, ctx, id, *tags):
article = self.find_by_id(self.db_session.query(Article), id).first()
if article is not None:
tags_add = []
tags_remove = []
for tag in tags:
if tag[0] == '-':
tags_remove.append(tag[1:])
elif tag[0] == '+':
article.tags.append(Tag(name=tag[1:]))
else:
article.tags.append(Tag(name=tag))
for existing_tag in article.tags:
if existing_tag.name in tags_remove:
article.tags.remove(existing_tag)
article.resolve_existing_tags(self.db_session)
self.db_session.commit()
await ctx.send('Tags updated!', embed=self.generate_embed(article))
else:
await ctx.send('Could not find the specified article.')
@commands.command(name='update')
async def update_article(self, ctx, id):
article = self.find_by_id(self.db_session.query(Article), id).first()
if article is not None:
if scrape_article(article):
article.resolve_existing_tags(self.db_session)
self.db_session.commit()
await ctx.send('Article has been autoupdated!', embed=self.generate_embed(article))
else:
await ctx.send('Could not open the article for updating!')
else:
await ctx.send('Could not find the specified article.')
@commands.command(name='owner', aliases=['owners'])
async def list_owners(self, ctx, id):
article = self.find_by_id(self.db_session.query(Article), id).first()
if article is not None:
emb = discord.Embed(title=article.title)
owners = article.owners_without_personal()
owners_mentions = '\r\n'.join(['<@{0.discord_user_id}>'.format(owner) for owner in owners])
owners_channels = '\r\n'.join(['<#{0.discord_channel_id}>'.format(owner) for owner in owners])
emb.add_field(name='Owners', value=owners_mentions)
emb.add_field(name='Channels', value=owners_channels)
await ctx.send('Article owners list : ', embed=emb)
| 43.453061 | 191 | 0.603231 | 1,330 | 10,646 | 4.670677 | 0.132331 | 0.030425 | 0.038635 | 0.023181 | 0.469414 | 0.368802 | 0.302962 | 0.278815 | 0.250483 | 0.22537 | 0 | 0.004974 | 0.282454 | 10,646 | 244 | 192 | 43.631148 | 0.808221 | 0.059083 | 0 | 0.245902 | 0 | 0.010929 | 0.179257 | 0.006994 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038251 | false | 0.005464 | 0.021858 | 0.021858 | 0.120219 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ddf73885657b81588970d4b5f8599da4c9b6a7 | 2,060 | py | Python | Discord Webhook Automation/discord_webhook.py | zYxDevs/Python_Scripts | 74ed7df97c9287b966b4139f585ed3a1702f2d29 | [
"MIT"
] | 14 | 2021-10-02T14:17:06.000Z | 2021-11-08T10:17:14.000Z | Discord Webhook Automation/discord_webhook.py | Naik-G/Python_Scripts | cd975036e126982aaa01da48c94cec1759af6d61 | [
"MIT"
] | 4 | 2021-10-03T05:35:11.000Z | 2021-10-06T18:05:05.000Z | Discord Webhook Automation/discord_webhook.py | Naik-G/Python_Scripts | cd975036e126982aaa01da48c94cec1759af6d61 | [
"MIT"
] | 47 | 2021-10-02T12:07:07.000Z | 2021-11-07T11:49:50.000Z | #!/usr/bin/env python3
# Path: Discord Webhook Automation/discord_webhook.py
import requests
discord_webhook_url = 'your webhook url'
Message = {
"content": "./Hello_World",
"username": "Name for your discord webhook",
"avatar_url": "Your Avatar Image URL",
"tts": False,
"embeds": [
{
"title": "Title",
"description": "Description",
"url": "https://discordapp.com",
"color": 16711680,
"footer": {
"text": "Footer Text"
},
"image": {
"url": "https://discordapp.com"
},
"thumbnail": {
"url": "https://discordapp.com"
},
"author": {
"name": "Author Name",
"url": "https://discordapp.com",
"icon_url": "https://discordapp.com"
},
"fields": [
{
"name": "Field Name",
"value": "Field Value",
"inline": True
}
]
}
]
}
requests.post(discord_webhook_url, data=Message)
# Message can consist of the following:
# content: The message to be sent
# username: The name of the user
# avatar_url: The URL of the user's avatar
# tts: Whether or not the message should be read aloud
# embeds: An array of embeds to be sent with the message
# The embeds are formatted as follows:
# title: The title of the embed
# description: The description of the embed
# url: The URL of the embed
# timestamp: The timestamp of the embed
# color: The color of the embed
# footer: The footer of the embed
# footer_icon: The icon of the footer
# image: The image of the embed
# thumbnail: The thumbnail of the embed
# For more info check out the discord API: https://discordapp.com/developers/docs/resources/channel#embed-object
# The following are the fields of the embed:
# title: The title of the embed
# description: The description of the embed
# url: The URL of the embed
# timestamp: The timestamp of the embed
# color: The color of the embed
# footer: The footer of the embed
# footer_icon: The icon of the footer
# image: The image of the embed
# thumbnail: The thumbnail of the embed
| 29.428571 | 112 | 0.647087 | 280 | 2,060 | 4.721429 | 0.282143 | 0.083207 | 0.128593 | 0.079425 | 0.370651 | 0.360061 | 0.360061 | 0.360061 | 0.360061 | 0.360061 | 0 | 0.005736 | 0.23835 | 2,060 | 69 | 113 | 29.855072 | 0.836839 | 0.542233 | 0 | 0.105263 | 0 | 0 | 0.41402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.026316 | 0 | 0.026316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80ddf8c8d244690e44871a0fc5d1f5d9d7730557 | 298 | py | Python | Advertising/advertising.py | narenzhang/learnml | c6d5f4b84a7c9c23f93d03b06087f28772a52236 | [
"Apache-2.0"
] | null | null | null | Advertising/advertising.py | narenzhang/learnml | c6d5f4b84a7c9c23f93d03b06087f28772a52236 | [
"Apache-2.0"
] | null | null | null | Advertising/advertising.py | narenzhang/learnml | c6d5f4b84a7c9c23f93d03b06087f28772a52236 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# _*_ coding : utf-8 _*_
import pandas as pd
def run_main():
csv_path = 'Advertising.csv'
# pandas 读取数据
data = pd.read_csv(csv_path)
x = data[['TV', 'Radio', 'Newspaper']]
y = data['Sales']
# 绘制1
plt.plot
if __name__ == '__main__':
run_main() | 16.555556 | 42 | 0.583893 | 41 | 298 | 3.829268 | 0.731707 | 0.089172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008969 | 0.251678 | 298 | 18 | 43 | 16.555556 | 0.695067 | 0.184564 | 0 | 0 | 0 | 0 | 0.183333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80df428c3bf27d5c6635b075ac58b6ddf1c4e21a | 561 | py | Python | python/Python - Settrade Open API Example - Equity.py | settrade/stt-open-api-sdk-example | b2644985ef41957df85a239a033a101435dff2c1 | [
"MIT"
] | 1 | 2022-03-03T20:15:34.000Z | 2022-03-03T20:15:34.000Z | python/Python - Settrade Open API Example - Equity.py | settrade/stt-open-api-sdk-example | b2644985ef41957df85a239a033a101435dff2c1 | [
"MIT"
] | null | null | null | python/Python - Settrade Open API Example - Equity.py | settrade/stt-open-api-sdk-example | b2644985ef41957df85a239a033a101435dff2c1 | [
"MIT"
] | null | null | null | import settrade.openapi
from settrade.openapi import Investor
############################# login #############################
investor = Investor(
app_id="8uuaMP1npccDixrg",
app_secret="APX6wnqzk/yoVLIRyQ4ps4Fm13uzbC4tL5nyjAwwCKue",
app_code="SANDBOX",
broker_id="SANDBOX",
is_auto_queue=False)
############################# Equity #############################
equity = investor.Equity(account_no="settrade-E")
equity.place_order(
symbol="PTT",
price=38,
volume=100,
side="BUY",
pin="000000") | 28.05 | 67 | 0.538324 | 49 | 561 | 6 | 0.714286 | 0.102041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042827 | 0.167558 | 561 | 20 | 68 | 28.05 | 0.586724 | 0.023173 | 0 | 0 | 0 | 0 | 0.23301 | 0.106796 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80dfe6a1ff36490beaa2733bf4a9c540f4667373 | 2,135 | py | Python | tests/examples/test_examples.py | 897615138/tfsnippet-jill | 2fc898a4def866c8d3c685168df1fa22083bb143 | [
"MIT"
] | 63 | 2018-06-06T11:56:40.000Z | 2022-03-22T08:00:59.000Z | tests/examples/test_examples.py | 897615138/tfsnippet-jill | 2fc898a4def866c8d3c685168df1fa22083bb143 | [
"MIT"
] | 39 | 2018-07-04T12:40:53.000Z | 2022-02-09T23:48:44.000Z | tests/examples/test_examples.py | 897615138/tfsnippet-jill | 2fc898a4def866c8d3c685168df1fa22083bb143 | [
"MIT"
] | 34 | 2018-06-25T09:59:22.000Z | 2022-02-23T12:46:33.000Z | import codecs
import copy
import os
import re
import subprocess
import sys
import time
import unittest
from tfsnippet.utils import TemporaryDirectory, humanize_duration
from tests.examples.helper import skipUnlessRunExamplesTests
class ExamplesTestCase(unittest.TestCase):
"""
Test case to ensure all examples can run for at least one step.
"""
@skipUnlessRunExamplesTests()
def test_examples_can_run_one_step(self):
timer = -time.time()
# discover all example scripts
def walk(pa, dst):
for fn in os.listdir(pa):
fp = os.path.join(pa, fn)
if os.path.isdir(fp):
walk(fp, dst)
elif fp.endswith('.py'):
with codecs.open(fp, 'rb', 'utf-8') as f:
cnt = f.read()
if re.search(
r'''if\s+__name__\s*==\s+(['"])__main__\1:''',
cnt):
if 'max_step=config.max_step' not in cnt:
raise RuntimeError('Example script does not have '
'max_step configuration: {}'.
format(fp))
dst.append(fp)
return dst
examples_dir = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
'../../tfsnippet/examples'
)
examples_scripts = walk(examples_dir, [])
# run all examples scripts for just max_step
env_dict = copy.copy(os.environ)
for example_script in examples_scripts:
print('Run {} ...'.format(example_script))
with TemporaryDirectory() as tempdir:
args = [sys.executable, '-u',
example_script, '--max_step=1']
subprocess.check_call(args, cwd=tempdir, env=env_dict)
print('')
# report finished tests
print('Finished to run {} example scripts in {}.'.format(
len(examples_scripts), humanize_duration(time.time() + timer)))
| 33.888889 | 78 | 0.526464 | 227 | 2,135 | 4.797357 | 0.444934 | 0.027548 | 0.025712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002978 | 0.37096 | 2,135 | 62 | 79 | 34.435484 | 0.807893 | 0.074005 | 0 | 0 | 0 | 0 | 0.09295 | 0.025065 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.217391 | 0 | 0.304348 | 0.065217 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80e29a5b17063378ea11a9ba5ec63b825ffd1e08 | 2,858 | py | Python | Utils/fetcher.py | EchoAbstract/soma-fm-player | c1418033998c3fab74a649db98e230ced102e5fc | [
"MIT"
] | 1 | 2019-03-04T10:35:42.000Z | 2019-03-04T10:35:42.000Z | Utils/fetcher.py | EchoAbstract/soma-fm-player | c1418033998c3fab74a649db98e230ced102e5fc | [
"MIT"
] | null | null | null | Utils/fetcher.py | EchoAbstract/soma-fm-player | c1418033998c3fab74a649db98e230ced102e5fc | [
"MIT"
] | null | null | null | import urllib2
from bs4 import BeautifulSoup
from collections import defaultdict
def fetch_html():
resp = urllib2.urlopen("http://somafm.com/listen/")
html = resp.read()
return html
def make_soup(ingredients):
return BeautifulSoup(ingredients, 'html.parser')
def get_stations(soup):
stations = []
for station in soup.find_all('h3'):
if station.get('class') != None:
break
print("Found station: " + station.text)
stations.append(station.text)
return stations
def get_image_urls(soup, stations):
root_url = "http://www.somafm.com"
image_count = len(stations)
images_urls = []
for icon in soup.find_all('img'):
if not icon["src"].endswith("LoneDJsquare400.jpg"):
if image_count != 0:
images_urls.append(root_url + icon["src"])
image_count = image_count - 1
return images_urls
preferred_playlist_order = ["130", "64", "256", "320", "192", "32", ""]
def get_playlist_shortname(pl):
last_bit = pl.split("/")[-1]
basename = last_bit.replace(".pls", "")
for suffix in preferred_playlist_order:
if basename.endswith(suffix):
return basename.replace(suffix, "")
def get_playlist_urls(soup):
root_url = "https://somafm.com"
handled = defaultdict(bool)
playlist_urls = []
for link in soup.find_all("a"):
if not link.get('href'):
next
url = link['href']
if url.endswith('.pls'):
# Have we seen this yet?
short_name = get_playlist_shortname(url)
if not handled[short_name]:
if not url.startswith(root_url):
url = root_url + url
playlist_urls.append(url)
handled[short_name] = True
return playlist_urls
def download_images(imgs, out_dir):
for img in imgs:
filename = img.split("/")[-1]
filename = out_dir + "/" + filename
with open(filename, 'wb') as f:
f.write(urllib2.urlopen(img).read())
if __name__ == "__main__":
html = fetch_html()
soup = make_soup(html)
station_list = get_stations(soup)
imgs = get_image_urls(soup, station_list)
urls = get_playlist_urls(soup)
download_images(imgs, "./img_tmp")
for i in range(0, len(station_list)):
icon_name = imgs[i].split("/")[-1]
icon_name = icon_name.split(".")[0]
prefix = '[StationInfo '
prefix = prefix + 'stationInfoForStationNamed:@"' +station_list[i] + '" '
prefix = prefix + 'withPlaylistLocation:@"' + urls[i] + '" '
prefix = prefix + 'withShortKey:@""'
prefix = prefix + 'withIconNamed:@"' + "rounded_" + icon_name + '" '
prefix = prefix + 'atSortOrder:50],'
print(prefix) | 27.747573 | 81 | 0.586424 | 334 | 2,858 | 4.817365 | 0.341317 | 0.021753 | 0.018645 | 0.024239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016058 | 0.280966 | 2,858 | 103 | 82 | 27.747573 | 0.76691 | 0.007698 | 0 | 0 | 0 | 0 | 0.108995 | 0.018342 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09589 | false | 0 | 0.041096 | 0.013699 | 0.219178 | 0.027397 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80e3d5f21275645dfb4b04506fb537a7e5daed1f | 5,015 | py | Python | Django elements/charts/data_preparation.py | LouisdeBruijn/Medium | afc66ee061c10b7107ba1661d2b9dfed0559dfc3 | [
"MIT"
] | 41 | 2020-05-03T19:32:37.000Z | 2022-02-02T22:03:07.000Z | Django elements/charts/data_preparation.py | LouisdeBruijn/Medium | afc66ee061c10b7107ba1661d2b9dfed0559dfc3 | [
"MIT"
] | 2 | 2021-11-11T03:11:52.000Z | 2021-12-16T01:51:13.000Z | Django elements/charts/data_preparation.py | LouisdeBruijn/Medium | afc66ee061c10b7107ba1661d2b9dfed0559dfc3 | [
"MIT"
] | 45 | 2020-03-29T02:43:24.000Z | 2022-03-15T02:14:27.000Z | from .models import *
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import seaborn as sns
import numpy as np
import time
import re
import json
def hashtag_demographics(route, label):
"""Return most-used hashtags."""
dataset = {'hateval': Hashtag.objects.filter(tweet__hateval=True),
'offenseval': Hashtag.objects.filter(tweet__offenseval=True),
'all': Hashtag.objects.all()}
db = dataset.get(route)
if label == 'abuse':
db = db.filter(tweet__pre_annotation='abuse')
elif label == 'no-abuse':
db = db.filter(tweet__pre_annotation='no-abuse')
db_hashtags_c = db.distinct().order_by('-count')[:15]
hashtag_labels = [h.text for h in db_hashtags_c]
hashtag_values = [h.count for h in db_hashtags_c]
hashtag_palette = sns.color_palette("Blues_r", len(hashtag_labels)).as_hex()
return hashtag_labels, hashtag_values, hashtag_palette
def creation_date_demographics(route, label):
"""Return tweet creation dates."""
dataset = {'hateval': Tweet.objects.filter(hateval=True),
'offenseval': Tweet.objects.filter(offenseval=True),
'all': Tweet.objects.all()}
db = dataset.get(route)
if label == 'abuse':
db = db.filter(pre_annotation='abuse')
elif label == 'no-abuse':
db = db.filter(pre_annotation='no-abuse')
db = db.values_list('created_at', flat=True)
created = [int(time.mktime(t.timetuple())) * 1000 for t in db if t]
created.sort()
return created
def text_demographics(route, label, vectorizer):
"""Return most used words by tf-idf or count."""
dataset = {'hateval': Tweet.objects.filter(hateval=True),
'offenseval': Tweet.objects.filter(offenseval=True),
'all': Tweet.objects.all()}
db = dataset.get(route)
if label == 'abuse':
db = db.filter(pre_annotation='abuse')
elif label == 'no-abuse':
db = db.filter(pre_annotation='no-abuse')
if vectorizer == 'tfidf':
corpus = [tw.text for tw in Tweet.objects.filter(active=True)]
tfidf_vectorizer = TfidfVectorizer(stop_words='english')
X = tfidf_vectorizer.fit_transform(corpus)
scores = zip(tfidf_vectorizer.get_feature_names(),
np.asarray(X.sum(axis=0)).ravel())
sorted_scores = sorted(scores, key=lambda x: x[1], reverse=True)[:15]
items = [(score[0], round(score[1], 1)) for score in sorted_scores]
elif vectorizer == 'count':
db_text = db.values_list('text', flat=True)
stop_words = stopwords.words('english')
stop_words += ['I', 'RT', 'The'] # added own stop-words
tknzr = TweetTokenizer()
bow = {}
for text in db_text:
tokens = tknzr.tokenize(text)
for token in tokens:
token = re.sub(r'[^\w\s]', '', token)
if token and token not in stop_words:
bow[token] = bow.get(token, 0) + 1
items = sorted(bow.items(), key=lambda x: x[1], reverse=True)[:15]
labels, values = zip(*items)
colors = sns.cubehelix_palette(len(values)).as_hex()
colors.reverse()
return list(labels), list(values), colors
def user_demographics(route, label):
"""Return active/non-active user ratios"""
dataset = {'hateval': Tweet.objects.filter(hateval=True, exception__isnull=False),
'offenseval': Tweet.objects.filter(offenseval=True, exception__isnull=False),
'all': Tweet.objects.filter(exception__isnull=False)}
db_exc = dataset.get(route)
if label == 'abuse':
db_exc = db_exc.filter(pre_annotation='abuse')
elif label == 'no-abuse':
db_exc = db_exc.filter(pre_annotation='no-abuse')
db_exc = db_exc.distinct().values_list('exception', flat=True)
exceptions = {}
for exc in db_exc:
json_exc = json.loads(exc)
if len(json_exc) > 1:
string = "[{0}] {1}".format(json_exc['code'], json_exc['message'][:-1])
exceptions[string] = exceptions.get(string, 0) + 1
'''Get the unique/non-unique ratio'''
unique_users_w_reply = TwitterUser.objects.filter(nr_tweets__lt=2,
twitter_user__in_reply_to_status_id__isnull=False,
twitter_user__in_reply_to_self=False).count()
unique_users = TwitterUser.objects.filter(nr_tweets__lt=2).count()
non_unique_users = TwitterUser.objects.filter(nr_tweets__gt=1).count()
user_labels = ['unique users w/ in_reply_to_status_id to others', 'other unique users', 'non-unique users'] + list(exceptions.keys())
user_values = [unique_users_w_reply, unique_users, non_unique_users] + list(exceptions.values())
palette = ['#007bff', '#ffc107'] + sns.color_palette("Reds_r", len(user_labels)-1).as_hex()
return user_labels, user_values, palette
| 37.992424 | 137 | 0.637488 | 647 | 5,015 | 4.748068 | 0.239567 | 0.055013 | 0.046875 | 0.029297 | 0.361979 | 0.337891 | 0.318034 | 0.208008 | 0.17806 | 0.164388 | 0 | 0.008783 | 0.228116 | 5,015 | 131 | 138 | 38.282443 | 0.78481 | 0.031306 | 0 | 0.221053 | 0 | 0 | 0.07875 | 0.004375 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042105 | false | 0 | 0.094737 | 0 | 0.178947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80e57ac43f1c3e92e78c3a47d232135e483fe654 | 2,838 | py | Python | TaxiBJ/src/model/cnn.py | panzheyi/MF-STN | 70d875d6b287a398b783e74031bb8237d44e5f8c | [
"MIT"
] | 19 | 2019-10-28T09:41:51.000Z | 2022-03-09T02:37:01.000Z | TaxiNYC/src/model/cnn.py | yoshall/MF-STN | 70d875d6b287a398b783e74031bb8237d44e5f8c | [
"MIT"
] | null | null | null | TaxiNYC/src/model/cnn.py | yoshall/MF-STN | 70d875d6b287a398b783e74031bb8237d44e5f8c | [
"MIT"
] | 8 | 2020-11-20T09:02:30.000Z | 2021-08-12T05:50:54.000Z | import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet.gluon import Block, HybridBlock, nn, rnn
from config import ROWS, COLUMES, FLOW_OUTPUT_DIM, FLOW_OUTPUT_LEN
from model.structure import MFDense, ResUnit
N_LOC = ROWS * COLUMES
class CNN(Block):
""" Convolutional neural network """
def __init__(self, filters, hiddens, embed_dim, prefix):
super(CNN, self).__init__(prefix=prefix)
self.filters = filters
with self.name_scope():
# convolutional layers
self.convs = nn.Sequential()
for i, filter in enumerate(filters):
self.convs.add(nn.Conv2D(filter, kernel_size=3, strides=1, padding=1, activation='relu', prefix='cnn%d_'%i))
# dense layers (mf dense layers)
self.denses = nn.Sequential()
in_dims = [filters[-1]]+ hiddens
out_dims = hiddens + [FLOW_OUTPUT_DIM * FLOW_OUTPUT_LEN]
for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
activation = None if i == len(in_dims) - 1 else 'relu'
if embed_dim == 0: self.denses.add(nn.Dense(out_dim, activation, flatten=False, prefix='dense%d_'%i))
else: self.denses.add(MFDense(N_LOC, embed_dim, in_dim, out_dim, activation, prefix='mf_dense%d_'%i))
def forward(self, data, label):
""" Forward process of CNN.
Parameters
----------
data: NDArray with shape [b, t, row, col, d].
label: NDArray with shape [b, t, row, col, d].
Returns
-------
loss: loss for gradient descent.
(pred, label): each of them is a NDArray with shape [n, b, t, d].
"""
B = data.shape[0]
data = nd.transpose(data, axes=(0,1,4,2,3)) # [b, t, d, row, col]
data = nd.reshape(data, shape=(B,-1,ROWS,COLUMES)) # [b, t * d, row, col]
# convolution layers
data = self.convs(data)
data = nd.transpose(data, axes=(2,3,0,1)) # [row, col, b, d]
data = nd.reshape(data, shape=(ROWS * COLUMES,B,-1))
# dense layers
data = self.denses(data)
data = nd.reshape(data, shape=(ROWS,COLUMES,B,FLOW_OUTPUT_LEN,-1))
data = nd.transpose(data, axes=(2,0,1,3,4))
label = nd.transpose(label, axes=(0,2,3,1,4)) # [b, row, col, t, d]
label = label[:,:,:,:,:FLOW_OUTPUT_DIM]
loss = nd.sum((data - label) ** 2)
return loss, {'flow_pred': data, 'flow_label': label}
def net(settings):
return CNN(
filters = settings['model']['filters'],
hiddens = settings['model']['hiddens'],
embed_dim = settings['model']['embed_dim'],
prefix = settings['model']['type'] + "_"
) | 39.416667 | 125 | 0.559549 | 376 | 2,838 | 4.101064 | 0.279255 | 0.038911 | 0.025292 | 0.036965 | 0.182231 | 0.141375 | 0.076524 | 0.076524 | 0 | 0 | 0 | 0.015539 | 0.29704 | 2,838 | 72 | 126 | 39.416667 | 0.757393 | 0.157858 | 0 | 0 | 0 | 0 | 0.045086 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.139535 | 0.023256 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80eb165a96cff968c89b9f3c537a4b8ba8c0ae1a | 7,315 | py | Python | mujpy/muplot.py | RDeRenzi/mujpy | f7aa0eb97c3db668a1b099d00aba8e1bd41d4444 | [
"MIT"
] | 1 | 2017-09-10T15:55:23.000Z | 2017-09-10T15:55:23.000Z | mujpy/muplot.py | RDeRenzi/mujpy | f7aa0eb97c3db668a1b099d00aba8e1bd41d4444 | [
"MIT"
] | 1 | 2019-04-08T21:13:38.000Z | 2019-04-08T21:13:38.000Z | mujpy/muplot.py | RDeRenzi/mujpy | f7aa0eb97c3db668a1b099d00aba8e1bd41d4444 | [
"MIT"
] | 2 | 2019-03-26T11:47:29.000Z | 2021-02-16T22:42:31.000Z | class multiplot(object):
'''
plot class (let's see)
'''
def __init__(self,time,asymm,title,nscan,histoLength):
'''
input: if suite is the multiple run instance
time, asymm - 1d and 2d numpy arrays
e.g. from rebin(suite.time,suite.asymmetry_multirun(),(0,20000),100)
title - list
e.g. [get_title(run[0]) for run in suite._the_runs_]
nscan - list
e.g. [run[0].get_runNumber_int() for run in suite._the_runs_]
[groups = [grp['forward']+'-'+grp['backward'] for grp in the_suite.groups]
histoLength - max length of each asymmetry array
e.g. musuite.histoLength
method:
multiplot.display(anim=True) 1s sequence of run plots, paused by muose click
'''
from numpy import array
self.time = array([time])
self.asymm = asymm
self.title = title
self.scan = nscan
self.histoLength = histoLength
self.multi_offset = 0.1
def display(self,groups = False, anim = False, anim_delay = 1000):
'''
input:
produces plot
multiplot_range = self.multiplot_range
anim = True, False
anim_delay = delay between frames (ms)
output: MULTIPLOT display:
anim_multiplot to be .paused() and .resumed() by toggle_pause
'''
import matplotlib.pyplot as P
from numpy import array
from mujpy.aux.aux import set_fig, derange, rebin #, animate_multiplot, init_animate_multiplot
import matplotlib.animation as animation
###################
# PYPLOT ANIMATIONS
###################
def animate_multiplot(i):
'''
anim function
update multiplot data and its color
'''
line.set_ydata(self.asymm[i])
line.set_color(color[i])
self.ax.set_title(str(self.scan[i])+': '+ self.title[i])
return line,
def init_animate_multiplot():
'''
anim init function
to give a clean slate
'''
line.set_ydata(self.asymm[0])
line.set_color(color[0])
self.ax.set_title(str(self.scan[0])+': '+ self.title[0])
return line,
def toggle_pause(*args, **kwargs):
if self.paused:
self.anim_multiplot.resume() # event_source.start() # matplotlib.__version__ >= 3.4
# animation.event_source.start()
else:
self.anim_multiplot.pause() #event_source.stop() # if matplotlib.__version__ >= 3.4
# animation.event_source.stop()
self.paused = not self.paused
dpi = 100.
if len(self.asymm.shape)==1:
anim = False # make sure
nscans, nbins = 1, self.asymm.shape[0]
else:
nscans,nbins = self.asymm.shape
#print('start, stop, pack = {},{},{}'.format(start,stop,pack))
#print('shape time {}, asymm {}'.format(time.shape,asymm.shape))
y = 4. # normal y size in inches
x = 6. # normal x size in inches
my = 12. # try not to go beyond 12 run plots
##############################
# set figure, axes
##############################
kwargs = {'figsize':(x,y),'dpi':100.}
fig, self.ax = set_fig(1,1,1,'Multiplot',**kwargs)
screen_x, screen_y = P.get_current_fig_manager().window.wm_maxsize() # screen size in pixels
y_maxinch = float(screen_y)/float(fig.dpi) # maximum y size in inches
########## note that "inches" are conventional, since they depend on the display pitch
# print('your display is y_maxinch = {:.2f} inches'.format(y_maxinch))
########## XPS 13 is 10.5 "inches" high @160 ppi (cfr. conventional fig.dpi = 100)
bars = 1. # overhead y size(inches) for three bars (tools, window and icons)
dy = 0. if anim else (y_maxinch-y-1)/my # extra y size per run plot
y = y + nscans*dy if nscans < 12 else y + 12*dy # size, does not dilate for anim
# fig.set_size_inches(x,y, forward=True)
##########################
# plot data and fit curve
##########################
color = []
color.append(next(self.ax._get_lines.prop_cycler)['color'])
for run in range(1,nscans):
color.append(next(self.ax._get_lines.prop_cycler)['color'])
anim_multiplot = []
if anim:
#############
# animation
#############
##############
# initial plot
##############
ylow, yhigh = self.asymm.min()*1.02, self.asymm.max()*1.02
line, = self.ax.plot(self.time[0],self.asymm[0],'o-',ms=2,lw=0.5,color=color[0],alpha=0.5,zorder=1)
self.ax.set_title(str(self.scan[0])+': '+self.title[0])
self.ax.plot([self.time[0][0],self.time[0][-1]],[0,0],'k-',lw=0.5,alpha=0.3)
self.ax.set_xlim(self.time[0][0],self.time[0][-1])
self.ax.set_ylim(ylow,yhigh)
self.ax.set_ylabel('Asymmetry')
self.ax.set_xlabel(r'time [$\mu$s]')
#######
# anim
#######
self.anim_multiplot = animation.FuncAnimation(fig, animate_multiplot, nscans,
init_func=init_animate_multiplot,
interval=anim_delay, blit=False)
self.paused = False
fig.canvas.mpl_connect('button_press_event', toggle_pause)
P.suptitle('Click to toggle pause/resume',fontsize='small')
###############################
# tiles with offset
###############################
else:
aoffset = self.asymm.max()*self.multi_offset*array([[run] for run in range(nscans)])
self.asymm = self.asymm + aoffset # exploits numpy broadcasting
ylow,yhigh = min([0,self.asymm.min()+0.01]),self.asymm.max()+0.01
if nscans>1:
for run in range(nscans):
self.ax.plot(self.time[0],self.asymm[run],'o-',
lw=0.5,ms=2,alpha=0.5,color=color[run],zorder=1)
self.ax.plot([self.time[0][0],self.time[0][-1]],
[aoffset[run],aoffset[run]],'k-',lw=0.5,alpha=0.3,zorder=0)
self.ax.text(self.time[-1]*1.025,aoffset[run],self.run[run])
self.ax.set_title(self.title[run])
else:
self.ax.plot(self.time,self.asymm,'o-',lw=0.5,ms=2,alpha=0.5,color=color[0],zorder=1)
self.ax.set_title(self.title)
self.ax.set_xlim(self.time[0][0],self.time[0][-1]*9./8.)
self.ax.set_ylim(ylow,yhigh)
# print('axis = [{},{},{},{}]'.format(time[0,0],time[0,-1]*9./8.,ylow,yhigh))
self.ax.set_ylabel('Asymmetry')
self.ax.set_xlabel(r'time [$\mu$s]')
# self.fig_multiplot.tight_layout()
fig.canvas.manager.window.tkraise()
P.draw()
return anim_multiplot
| 43.541667 | 111 | 0.513192 | 907 | 7,315 | 4.033076 | 0.261301 | 0.036085 | 0.034445 | 0.019136 | 0.243849 | 0.220886 | 0.172499 | 0.13778 | 0.122471 | 0.122471 | 0 | 0.029193 | 0.320984 | 7,315 | 167 | 112 | 43.802395 | 0.707268 | 0.277649 | 0 | 0.204545 | 0 | 0 | 0.029979 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056818 | false | 0 | 0.056818 | 0 | 0.159091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
80eb66e87e9597516ec5942a88129b383f122135 | 1,308 | py | Python | tenning/layers/svdo.py | guilherme9820/Tenning | c0fe7695ef3dd791ea1083f39d6b312266fb0512 | [
"MIT"
] | null | null | null | tenning/layers/svdo.py | guilherme9820/Tenning | c0fe7695ef3dd791ea1083f39d6b312266fb0512 | [
"MIT"
] | null | null | null | tenning/layers/svdo.py | guilherme9820/Tenning | c0fe7695ef3dd791ea1083f39d6b312266fb0512 | [
"MIT"
] | null | null | null | from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Conv2D
from tenning.generic_utils import get_object_config
import tensorflow as tf
class SVDO(Layer):
""" Performs symmetric orthogonalization as detailed in the paper
'An Analysis of SVD for Deep Rotation Estimation'
(https://proceedings.neurips.cc/paper/2020/file/fec3392b0dc073244d38eba1feb8e6b7-Paper.pdf)
This implementation was taken from its original implementation at
(https://github.com/google-research/google-research/tree/master/special_orthogonalization)
"""
def __init__(self,
**kwargs):
super().__init__(**kwargs)
def call(self, input_tensor):
# Reshapes a (batch, 9) tensor to a (batch, 3, 3) tensor.
input_tensor = tf.reshape(input_tensor, (-1, 3, 3))
_, u, v = tf.linalg.svd(input_tensor)
det = tf.linalg.det(tf.matmul(u, v, transpose_b=True))
output = tf.matmul(
tf.concat([u[:, :, :-1], u[:, :, -1:] * tf.reshape(det, [-1, 1, 1])], 2),
v, transpose_b=True)
return output
def get_config(self):
config = super().get_config()
config.update({'trainable': self.trainable,
'name': self.name})
return config
| 30.418605 | 99 | 0.632263 | 163 | 1,308 | 4.944785 | 0.521472 | 0.054591 | 0.047146 | 0.062035 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 0.246177 | 1,308 | 42 | 100 | 31.142857 | 0.782961 | 0.319572 | 0 | 0 | 0 | 0 | 0.015366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.190476 | 0 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |